vme_tsi148.c 73 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772
  1. /*
  2. * Support for the Tundra TSI148 VME-PCI Bridge Chip
  3. *
  4. * Author: Martyn Welch <martyn.welch@ge.com>
  5. * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
  6. *
  7. * Based on work by Tom Armistead and Ajit Prem
  8. * Copyright 2004 Motorola Inc.
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the
  12. * Free Software Foundation; either version 2 of the License, or (at your
  13. * option) any later version.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/moduleparam.h>
  17. #include <linux/mm.h>
  18. #include <linux/types.h>
  19. #include <linux/errno.h>
  20. #include <linux/proc_fs.h>
  21. #include <linux/pci.h>
  22. #include <linux/poll.h>
  23. #include <linux/dma-mapping.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/sched.h>
  27. #include <linux/slab.h>
  28. #include <linux/time.h>
  29. #include <linux/io.h>
  30. #include <linux/uaccess.h>
  31. #include <linux/byteorder/generic.h>
  32. #include <linux/vme.h>
  33. #include "../vme_bridge.h"
  34. #include "vme_tsi148.h"
  35. static int __init tsi148_init(void);
  36. static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
  37. static void tsi148_remove(struct pci_dev *);
  38. static void __exit tsi148_exit(void);
  39. /* Module parameter */
  40. static bool err_chk;
  41. static int geoid;
  42. static const char driver_name[] = "vme_tsi148";
  43. static DEFINE_PCI_DEVICE_TABLE(tsi148_ids) = {
  44. { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
  45. { },
  46. };
  47. static struct pci_driver tsi148_driver = {
  48. .name = driver_name,
  49. .id_table = tsi148_ids,
  50. .probe = tsi148_probe,
  51. .remove = tsi148_remove,
  52. };
  53. static void reg_join(unsigned int high, unsigned int low,
  54. unsigned long long *variable)
  55. {
  56. *variable = (unsigned long long)high << 32;
  57. *variable |= (unsigned long long)low;
  58. }
  59. static void reg_split(unsigned long long variable, unsigned int *high,
  60. unsigned int *low)
  61. {
  62. *low = (unsigned int)variable & 0xFFFFFFFF;
  63. *high = (unsigned int)(variable >> 32);
  64. }
  65. /*
  66. * Wakes up DMA queue.
  67. */
  68. static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
  69. int channel_mask)
  70. {
  71. u32 serviced = 0;
  72. if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
  73. wake_up(&bridge->dma_queue[0]);
  74. serviced |= TSI148_LCSR_INTC_DMA0C;
  75. }
  76. if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
  77. wake_up(&bridge->dma_queue[1]);
  78. serviced |= TSI148_LCSR_INTC_DMA1C;
  79. }
  80. return serviced;
  81. }
  82. /*
  83. * Wake up location monitor queue
  84. */
  85. static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
  86. {
  87. int i;
  88. u32 serviced = 0;
  89. for (i = 0; i < 4; i++) {
  90. if (stat & TSI148_LCSR_INTS_LMS[i]) {
  91. /* We only enable interrupts if the callback is set */
  92. bridge->lm_callback[i](i);
  93. serviced |= TSI148_LCSR_INTC_LMC[i];
  94. }
  95. }
  96. return serviced;
  97. }
  98. /*
  99. * Wake up mail box queue.
  100. *
  101. * XXX This functionality is not exposed up though API.
  102. */
  103. static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
  104. {
  105. int i;
  106. u32 val;
  107. u32 serviced = 0;
  108. struct tsi148_driver *bridge;
  109. bridge = tsi148_bridge->driver_priv;
  110. for (i = 0; i < 4; i++) {
  111. if (stat & TSI148_LCSR_INTS_MBS[i]) {
  112. val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
  113. dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
  114. ": 0x%x\n", i, val);
  115. serviced |= TSI148_LCSR_INTC_MBC[i];
  116. }
  117. }
  118. return serviced;
  119. }
  120. /*
  121. * Display error & status message when PERR (PCI) exception interrupt occurs.
  122. */
  123. static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
  124. {
  125. struct tsi148_driver *bridge;
  126. bridge = tsi148_bridge->driver_priv;
  127. dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
  128. "attributes: %08x\n",
  129. ioread32be(bridge->base + TSI148_LCSR_EDPAU),
  130. ioread32be(bridge->base + TSI148_LCSR_EDPAL),
  131. ioread32be(bridge->base + TSI148_LCSR_EDPAT));
  132. dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
  133. "completion reg: %08x\n",
  134. ioread32be(bridge->base + TSI148_LCSR_EDPXA),
  135. ioread32be(bridge->base + TSI148_LCSR_EDPXS));
  136. iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
  137. return TSI148_LCSR_INTC_PERRC;
  138. }
  139. /*
  140. * Save address and status when VME error interrupt occurs.
  141. */
  142. static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
  143. {
  144. unsigned int error_addr_high, error_addr_low;
  145. unsigned long long error_addr;
  146. u32 error_attrib;
  147. struct vme_bus_error *error;
  148. struct tsi148_driver *bridge;
  149. bridge = tsi148_bridge->driver_priv;
  150. error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
  151. error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
  152. error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
  153. reg_join(error_addr_high, error_addr_low, &error_addr);
  154. /* Check for exception register overflow (we have lost error data) */
  155. if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
  156. dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
  157. "Occurred\n");
  158. }
  159. error = kmalloc(sizeof(struct vme_bus_error), GFP_ATOMIC);
  160. if (error) {
  161. error->address = error_addr;
  162. error->attributes = error_attrib;
  163. list_add_tail(&error->list, &tsi148_bridge->vme_errors);
  164. } else {
  165. dev_err(tsi148_bridge->parent, "Unable to alloc memory for "
  166. "VMEbus Error reporting\n");
  167. dev_err(tsi148_bridge->parent, "VME Bus Error at address: "
  168. "0x%llx, attributes: %08x\n", error_addr, error_attrib);
  169. }
  170. /* Clear Status */
  171. iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
  172. return TSI148_LCSR_INTC_VERRC;
  173. }
  174. /*
  175. * Wake up IACK queue.
  176. */
  177. static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
  178. {
  179. wake_up(&bridge->iack_queue);
  180. return TSI148_LCSR_INTC_IACKC;
  181. }
  182. /*
  183. * Calling VME bus interrupt callback if provided.
  184. */
  185. static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
  186. u32 stat)
  187. {
  188. int vec, i, serviced = 0;
  189. struct tsi148_driver *bridge;
  190. bridge = tsi148_bridge->driver_priv;
  191. for (i = 7; i > 0; i--) {
  192. if (stat & (1 << i)) {
  193. /*
  194. * Note: Even though the registers are defined as
  195. * 32-bits in the spec, we only want to issue 8-bit
  196. * IACK cycles on the bus, read from offset 3.
  197. */
  198. vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
  199. vme_irq_handler(tsi148_bridge, i, vec);
  200. serviced |= (1 << i);
  201. }
  202. }
  203. return serviced;
  204. }
  205. /*
  206. * Top level interrupt handler. Clears appropriate interrupt status bits and
  207. * then calls appropriate sub handler(s).
  208. */
  209. static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
  210. {
  211. u32 stat, enable, serviced = 0;
  212. struct vme_bridge *tsi148_bridge;
  213. struct tsi148_driver *bridge;
  214. tsi148_bridge = ptr;
  215. bridge = tsi148_bridge->driver_priv;
  216. /* Determine which interrupts are unmasked and set */
  217. enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
  218. stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
  219. /* Only look at unmasked interrupts */
  220. stat &= enable;
  221. if (unlikely(!stat))
  222. return IRQ_NONE;
  223. /* Call subhandlers as appropriate */
  224. /* DMA irqs */
  225. if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
  226. serviced |= tsi148_DMA_irqhandler(bridge, stat);
  227. /* Location monitor irqs */
  228. if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
  229. TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
  230. serviced |= tsi148_LM_irqhandler(bridge, stat);
  231. /* Mail box irqs */
  232. if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
  233. TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
  234. serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
  235. /* PCI bus error */
  236. if (stat & TSI148_LCSR_INTS_PERRS)
  237. serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
  238. /* VME bus error */
  239. if (stat & TSI148_LCSR_INTS_VERRS)
  240. serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
  241. /* IACK irq */
  242. if (stat & TSI148_LCSR_INTS_IACKS)
  243. serviced |= tsi148_IACK_irqhandler(bridge);
  244. /* VME bus irqs */
  245. if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
  246. TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
  247. TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
  248. TSI148_LCSR_INTS_IRQ1S))
  249. serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
  250. /* Clear serviced interrupts */
  251. iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
  252. return IRQ_HANDLED;
  253. }
  254. static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
  255. {
  256. int result;
  257. unsigned int tmp;
  258. struct pci_dev *pdev;
  259. struct tsi148_driver *bridge;
  260. pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
  261. bridge = tsi148_bridge->driver_priv;
  262. /* Initialise list for VME bus errors */
  263. INIT_LIST_HEAD(&tsi148_bridge->vme_errors);
  264. mutex_init(&tsi148_bridge->irq_mtx);
  265. result = request_irq(pdev->irq,
  266. tsi148_irqhandler,
  267. IRQF_SHARED,
  268. driver_name, tsi148_bridge);
  269. if (result) {
  270. dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
  271. "vector %02X\n", pdev->irq);
  272. return result;
  273. }
  274. /* Enable and unmask interrupts */
  275. tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
  276. TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
  277. TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
  278. TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
  279. TSI148_LCSR_INTEO_IACKEO;
  280. /* This leaves the following interrupts masked.
  281. * TSI148_LCSR_INTEO_VIEEO
  282. * TSI148_LCSR_INTEO_SYSFLEO
  283. * TSI148_LCSR_INTEO_ACFLEO
  284. */
  285. /* Don't enable Location Monitor interrupts here - they will be
  286. * enabled when the location monitors are properly configured and
  287. * a callback has been attached.
  288. * TSI148_LCSR_INTEO_LM0EO
  289. * TSI148_LCSR_INTEO_LM1EO
  290. * TSI148_LCSR_INTEO_LM2EO
  291. * TSI148_LCSR_INTEO_LM3EO
  292. */
  293. /* Don't enable VME interrupts until we add a handler, else the board
  294. * will respond to it and we don't want that unless it knows how to
  295. * properly deal with it.
  296. * TSI148_LCSR_INTEO_IRQ7EO
  297. * TSI148_LCSR_INTEO_IRQ6EO
  298. * TSI148_LCSR_INTEO_IRQ5EO
  299. * TSI148_LCSR_INTEO_IRQ4EO
  300. * TSI148_LCSR_INTEO_IRQ3EO
  301. * TSI148_LCSR_INTEO_IRQ2EO
  302. * TSI148_LCSR_INTEO_IRQ1EO
  303. */
  304. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
  305. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
  306. return 0;
  307. }
  308. static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
  309. struct pci_dev *pdev)
  310. {
  311. struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
  312. /* Turn off interrupts */
  313. iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
  314. iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
  315. /* Clear all interrupts */
  316. iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
  317. /* Detach interrupt handler */
  318. free_irq(pdev->irq, tsi148_bridge);
  319. }
  320. /*
  321. * Check to see if an IACk has been received, return true (1) or false (0).
  322. */
  323. static int tsi148_iack_received(struct tsi148_driver *bridge)
  324. {
  325. u32 tmp;
  326. tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
  327. if (tmp & TSI148_LCSR_VICR_IRQS)
  328. return 0;
  329. else
  330. return 1;
  331. }
  332. /*
  333. * Configure VME interrupt
  334. */
  335. static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
  336. int state, int sync)
  337. {
  338. struct pci_dev *pdev;
  339. u32 tmp;
  340. struct tsi148_driver *bridge;
  341. bridge = tsi148_bridge->driver_priv;
  342. /* We need to do the ordering differently for enabling and disabling */
  343. if (state == 0) {
  344. tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
  345. tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
  346. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
  347. tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
  348. tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
  349. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
  350. if (sync != 0) {
  351. pdev = container_of(tsi148_bridge->parent,
  352. struct pci_dev, dev);
  353. synchronize_irq(pdev->irq);
  354. }
  355. } else {
  356. tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
  357. tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
  358. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
  359. tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
  360. tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
  361. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
  362. }
  363. }
  364. /*
  365. * Generate a VME bus interrupt at the requested level & vector. Wait for
  366. * interrupt to be acked.
  367. */
  368. static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
  369. int statid)
  370. {
  371. u32 tmp;
  372. struct tsi148_driver *bridge;
  373. bridge = tsi148_bridge->driver_priv;
  374. mutex_lock(&bridge->vme_int);
  375. /* Read VICR register */
  376. tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
  377. /* Set Status/ID */
  378. tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
  379. (statid & TSI148_LCSR_VICR_STID_M);
  380. iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
  381. /* Assert VMEbus IRQ */
  382. tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
  383. iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
  384. /* XXX Consider implementing a timeout? */
  385. wait_event_interruptible(bridge->iack_queue,
  386. tsi148_iack_received(bridge));
  387. mutex_unlock(&bridge->vme_int);
  388. return 0;
  389. }
  390. /*
  391. * Find the first error in this address range
  392. */
  393. static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
  394. u32 aspace, unsigned long long address, size_t count)
  395. {
  396. struct list_head *err_pos;
  397. struct vme_bus_error *vme_err, *valid = NULL;
  398. unsigned long long bound;
  399. bound = address + count;
  400. /*
  401. * XXX We are currently not looking at the address space when parsing
  402. * for errors. This is because parsing the Address Modifier Codes
  403. * is going to be quite resource intensive to do properly. We
  404. * should be OK just looking at the addresses and this is certainly
  405. * much better than what we had before.
  406. */
  407. err_pos = NULL;
  408. /* Iterate through errors */
  409. list_for_each(err_pos, &tsi148_bridge->vme_errors) {
  410. vme_err = list_entry(err_pos, struct vme_bus_error, list);
  411. if ((vme_err->address >= address) &&
  412. (vme_err->address < bound)) {
  413. valid = vme_err;
  414. break;
  415. }
  416. }
  417. return valid;
  418. }
  419. /*
  420. * Clear errors in the provided address range.
  421. */
  422. static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
  423. u32 aspace, unsigned long long address, size_t count)
  424. {
  425. struct list_head *err_pos, *temp;
  426. struct vme_bus_error *vme_err;
  427. unsigned long long bound;
  428. bound = address + count;
  429. /*
  430. * XXX We are currently not looking at the address space when parsing
  431. * for errors. This is because parsing the Address Modifier Codes
  432. * is going to be quite resource intensive to do properly. We
  433. * should be OK just looking at the addresses and this is certainly
  434. * much better than what we had before.
  435. */
  436. err_pos = NULL;
  437. /* Iterate through errors */
  438. list_for_each_safe(err_pos, temp, &tsi148_bridge->vme_errors) {
  439. vme_err = list_entry(err_pos, struct vme_bus_error, list);
  440. if ((vme_err->address >= address) &&
  441. (vme_err->address < bound)) {
  442. list_del(err_pos);
  443. kfree(vme_err);
  444. }
  445. }
  446. }
  447. /*
  448. * Initialize a slave window with the requested attributes.
  449. */
  450. static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
  451. unsigned long long vme_base, unsigned long long size,
  452. dma_addr_t pci_base, u32 aspace, u32 cycle)
  453. {
  454. unsigned int i, addr = 0, granularity = 0;
  455. unsigned int temp_ctl = 0;
  456. unsigned int vme_base_low, vme_base_high;
  457. unsigned int vme_bound_low, vme_bound_high;
  458. unsigned int pci_offset_low, pci_offset_high;
  459. unsigned long long vme_bound, pci_offset;
  460. struct vme_bridge *tsi148_bridge;
  461. struct tsi148_driver *bridge;
  462. tsi148_bridge = image->parent;
  463. bridge = tsi148_bridge->driver_priv;
  464. i = image->number;
  465. switch (aspace) {
  466. case VME_A16:
  467. granularity = 0x10;
  468. addr |= TSI148_LCSR_ITAT_AS_A16;
  469. break;
  470. case VME_A24:
  471. granularity = 0x1000;
  472. addr |= TSI148_LCSR_ITAT_AS_A24;
  473. break;
  474. case VME_A32:
  475. granularity = 0x10000;
  476. addr |= TSI148_LCSR_ITAT_AS_A32;
  477. break;
  478. case VME_A64:
  479. granularity = 0x10000;
  480. addr |= TSI148_LCSR_ITAT_AS_A64;
  481. break;
  482. case VME_CRCSR:
  483. case VME_USER1:
  484. case VME_USER2:
  485. case VME_USER3:
  486. case VME_USER4:
  487. default:
  488. dev_err(tsi148_bridge->parent, "Invalid address space\n");
  489. return -EINVAL;
  490. break;
  491. }
  492. /* Convert 64-bit variables to 2x 32-bit variables */
  493. reg_split(vme_base, &vme_base_high, &vme_base_low);
  494. /*
  495. * Bound address is a valid address for the window, adjust
  496. * accordingly
  497. */
  498. vme_bound = vme_base + size - granularity;
  499. reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
  500. pci_offset = (unsigned long long)pci_base - vme_base;
  501. reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
  502. if (vme_base_low & (granularity - 1)) {
  503. dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
  504. return -EINVAL;
  505. }
  506. if (vme_bound_low & (granularity - 1)) {
  507. dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
  508. return -EINVAL;
  509. }
  510. if (pci_offset_low & (granularity - 1)) {
  511. dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
  512. "alignment\n");
  513. return -EINVAL;
  514. }
  515. /* Disable while we are mucking around */
  516. temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  517. TSI148_LCSR_OFFSET_ITAT);
  518. temp_ctl &= ~TSI148_LCSR_ITAT_EN;
  519. iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
  520. TSI148_LCSR_OFFSET_ITAT);
  521. /* Setup mapping */
  522. iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
  523. TSI148_LCSR_OFFSET_ITSAU);
  524. iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
  525. TSI148_LCSR_OFFSET_ITSAL);
  526. iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
  527. TSI148_LCSR_OFFSET_ITEAU);
  528. iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
  529. TSI148_LCSR_OFFSET_ITEAL);
  530. iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
  531. TSI148_LCSR_OFFSET_ITOFU);
  532. iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
  533. TSI148_LCSR_OFFSET_ITOFL);
  534. /* Setup 2eSST speeds */
  535. temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
  536. switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
  537. case VME_2eSST160:
  538. temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
  539. break;
  540. case VME_2eSST267:
  541. temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
  542. break;
  543. case VME_2eSST320:
  544. temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
  545. break;
  546. }
  547. /* Setup cycle types */
  548. temp_ctl &= ~(0x1F << 7);
  549. if (cycle & VME_BLT)
  550. temp_ctl |= TSI148_LCSR_ITAT_BLT;
  551. if (cycle & VME_MBLT)
  552. temp_ctl |= TSI148_LCSR_ITAT_MBLT;
  553. if (cycle & VME_2eVME)
  554. temp_ctl |= TSI148_LCSR_ITAT_2eVME;
  555. if (cycle & VME_2eSST)
  556. temp_ctl |= TSI148_LCSR_ITAT_2eSST;
  557. if (cycle & VME_2eSSTB)
  558. temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
  559. /* Setup address space */
  560. temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
  561. temp_ctl |= addr;
  562. temp_ctl &= ~0xF;
  563. if (cycle & VME_SUPER)
  564. temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
  565. if (cycle & VME_USER)
  566. temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
  567. if (cycle & VME_PROG)
  568. temp_ctl |= TSI148_LCSR_ITAT_PGM;
  569. if (cycle & VME_DATA)
  570. temp_ctl |= TSI148_LCSR_ITAT_DATA;
  571. /* Write ctl reg without enable */
  572. iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
  573. TSI148_LCSR_OFFSET_ITAT);
  574. if (enabled)
  575. temp_ctl |= TSI148_LCSR_ITAT_EN;
  576. iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
  577. TSI148_LCSR_OFFSET_ITAT);
  578. return 0;
  579. }
  580. /*
  581. * Get slave window configuration.
  582. */
  583. static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
  584. unsigned long long *vme_base, unsigned long long *size,
  585. dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
  586. {
  587. unsigned int i, granularity = 0, ctl = 0;
  588. unsigned int vme_base_low, vme_base_high;
  589. unsigned int vme_bound_low, vme_bound_high;
  590. unsigned int pci_offset_low, pci_offset_high;
  591. unsigned long long vme_bound, pci_offset;
  592. struct tsi148_driver *bridge;
  593. bridge = image->parent->driver_priv;
  594. i = image->number;
  595. /* Read registers */
  596. ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  597. TSI148_LCSR_OFFSET_ITAT);
  598. vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  599. TSI148_LCSR_OFFSET_ITSAU);
  600. vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  601. TSI148_LCSR_OFFSET_ITSAL);
  602. vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  603. TSI148_LCSR_OFFSET_ITEAU);
  604. vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  605. TSI148_LCSR_OFFSET_ITEAL);
  606. pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  607. TSI148_LCSR_OFFSET_ITOFU);
  608. pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
  609. TSI148_LCSR_OFFSET_ITOFL);
  610. /* Convert 64-bit variables to 2x 32-bit variables */
  611. reg_join(vme_base_high, vme_base_low, vme_base);
  612. reg_join(vme_bound_high, vme_bound_low, &vme_bound);
  613. reg_join(pci_offset_high, pci_offset_low, &pci_offset);
  614. *pci_base = (dma_addr_t)vme_base + pci_offset;
  615. *enabled = 0;
  616. *aspace = 0;
  617. *cycle = 0;
  618. if (ctl & TSI148_LCSR_ITAT_EN)
  619. *enabled = 1;
  620. if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
  621. granularity = 0x10;
  622. *aspace |= VME_A16;
  623. }
  624. if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
  625. granularity = 0x1000;
  626. *aspace |= VME_A24;
  627. }
  628. if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
  629. granularity = 0x10000;
  630. *aspace |= VME_A32;
  631. }
  632. if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
  633. granularity = 0x10000;
  634. *aspace |= VME_A64;
  635. }
  636. /* Need granularity before we set the size */
  637. *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
  638. if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
  639. *cycle |= VME_2eSST160;
  640. if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
  641. *cycle |= VME_2eSST267;
  642. if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
  643. *cycle |= VME_2eSST320;
  644. if (ctl & TSI148_LCSR_ITAT_BLT)
  645. *cycle |= VME_BLT;
  646. if (ctl & TSI148_LCSR_ITAT_MBLT)
  647. *cycle |= VME_MBLT;
  648. if (ctl & TSI148_LCSR_ITAT_2eVME)
  649. *cycle |= VME_2eVME;
  650. if (ctl & TSI148_LCSR_ITAT_2eSST)
  651. *cycle |= VME_2eSST;
  652. if (ctl & TSI148_LCSR_ITAT_2eSSTB)
  653. *cycle |= VME_2eSSTB;
  654. if (ctl & TSI148_LCSR_ITAT_SUPR)
  655. *cycle |= VME_SUPER;
  656. if (ctl & TSI148_LCSR_ITAT_NPRIV)
  657. *cycle |= VME_USER;
  658. if (ctl & TSI148_LCSR_ITAT_PGM)
  659. *cycle |= VME_PROG;
  660. if (ctl & TSI148_LCSR_ITAT_DATA)
  661. *cycle |= VME_DATA;
  662. return 0;
  663. }
  664. /*
  665. * Allocate and map PCI Resource
  666. */
  667. static int tsi148_alloc_resource(struct vme_master_resource *image,
  668. unsigned long long size)
  669. {
  670. unsigned long long existing_size;
  671. int retval = 0;
  672. struct pci_dev *pdev;
  673. struct vme_bridge *tsi148_bridge;
  674. tsi148_bridge = image->parent;
  675. pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
  676. existing_size = (unsigned long long)(image->bus_resource.end -
  677. image->bus_resource.start);
  678. /* If the existing size is OK, return */
  679. if ((size != 0) && (existing_size == (size - 1)))
  680. return 0;
  681. if (existing_size != 0) {
  682. iounmap(image->kern_base);
  683. image->kern_base = NULL;
  684. kfree(image->bus_resource.name);
  685. release_resource(&image->bus_resource);
  686. memset(&image->bus_resource, 0, sizeof(struct resource));
  687. }
  688. /* Exit here if size is zero */
  689. if (size == 0)
  690. return 0;
  691. if (image->bus_resource.name == NULL) {
  692. image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
  693. if (image->bus_resource.name == NULL) {
  694. dev_err(tsi148_bridge->parent, "Unable to allocate "
  695. "memory for resource name\n");
  696. retval = -ENOMEM;
  697. goto err_name;
  698. }
  699. }
  700. sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
  701. image->number);
  702. image->bus_resource.start = 0;
  703. image->bus_resource.end = (unsigned long)size;
  704. image->bus_resource.flags = IORESOURCE_MEM;
  705. retval = pci_bus_alloc_resource(pdev->bus,
  706. &image->bus_resource, size, size, PCIBIOS_MIN_MEM,
  707. 0, NULL, NULL);
  708. if (retval) {
  709. dev_err(tsi148_bridge->parent, "Failed to allocate mem "
  710. "resource for window %d size 0x%lx start 0x%lx\n",
  711. image->number, (unsigned long)size,
  712. (unsigned long)image->bus_resource.start);
  713. goto err_resource;
  714. }
  715. image->kern_base = ioremap_nocache(
  716. image->bus_resource.start, size);
  717. if (image->kern_base == NULL) {
  718. dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
  719. retval = -ENOMEM;
  720. goto err_remap;
  721. }
  722. return 0;
  723. err_remap:
  724. release_resource(&image->bus_resource);
  725. err_resource:
  726. kfree(image->bus_resource.name);
  727. memset(&image->bus_resource, 0, sizeof(struct resource));
  728. err_name:
  729. return retval;
  730. }
  731. /*
  732. * Free and unmap PCI Resource
  733. */
  734. static void tsi148_free_resource(struct vme_master_resource *image)
  735. {
  736. iounmap(image->kern_base);
  737. image->kern_base = NULL;
  738. release_resource(&image->bus_resource);
  739. kfree(image->bus_resource.name);
  740. memset(&image->bus_resource, 0, sizeof(struct resource));
  741. }
  742. /*
  743. * Set the attributes of an outbound window.
  744. */
  745. static int tsi148_master_set(struct vme_master_resource *image, int enabled,
  746. unsigned long long vme_base, unsigned long long size, u32 aspace,
  747. u32 cycle, u32 dwidth)
  748. {
  749. int retval = 0;
  750. unsigned int i;
  751. unsigned int temp_ctl = 0;
  752. unsigned int pci_base_low, pci_base_high;
  753. unsigned int pci_bound_low, pci_bound_high;
  754. unsigned int vme_offset_low, vme_offset_high;
  755. unsigned long long pci_bound, vme_offset, pci_base;
  756. struct vme_bridge *tsi148_bridge;
  757. struct tsi148_driver *bridge;
  758. tsi148_bridge = image->parent;
  759. bridge = tsi148_bridge->driver_priv;
  760. /* Verify input data */
  761. if (vme_base & 0xFFFF) {
  762. dev_err(tsi148_bridge->parent, "Invalid VME Window "
  763. "alignment\n");
  764. retval = -EINVAL;
  765. goto err_window;
  766. }
  767. if ((size == 0) && (enabled != 0)) {
  768. dev_err(tsi148_bridge->parent, "Size must be non-zero for "
  769. "enabled windows\n");
  770. retval = -EINVAL;
  771. goto err_window;
  772. }
  773. spin_lock(&image->lock);
  774. /* Let's allocate the resource here rather than further up the stack as
  775. * it avoids pushing loads of bus dependent stuff up the stack. If size
  776. * is zero, any existing resource will be freed.
  777. */
  778. retval = tsi148_alloc_resource(image, size);
  779. if (retval) {
  780. spin_unlock(&image->lock);
  781. dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
  782. "resource\n");
  783. goto err_res;
  784. }
  785. if (size == 0) {
  786. pci_base = 0;
  787. pci_bound = 0;
  788. vme_offset = 0;
  789. } else {
  790. pci_base = (unsigned long long)image->bus_resource.start;
  791. /*
  792. * Bound address is a valid address for the window, adjust
  793. * according to window granularity.
  794. */
  795. pci_bound = pci_base + (size - 0x10000);
  796. vme_offset = vme_base - pci_base;
  797. }
  798. /* Convert 64-bit variables to 2x 32-bit variables */
  799. reg_split(pci_base, &pci_base_high, &pci_base_low);
  800. reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
  801. reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
  802. if (pci_base_low & 0xFFFF) {
  803. spin_unlock(&image->lock);
  804. dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
  805. retval = -EINVAL;
  806. goto err_gran;
  807. }
  808. if (pci_bound_low & 0xFFFF) {
  809. spin_unlock(&image->lock);
  810. dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
  811. retval = -EINVAL;
  812. goto err_gran;
  813. }
  814. if (vme_offset_low & 0xFFFF) {
  815. spin_unlock(&image->lock);
  816. dev_err(tsi148_bridge->parent, "Invalid VME Offset "
  817. "alignment\n");
  818. retval = -EINVAL;
  819. goto err_gran;
  820. }
  821. i = image->number;
  822. /* Disable while we are mucking around */
  823. temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  824. TSI148_LCSR_OFFSET_OTAT);
  825. temp_ctl &= ~TSI148_LCSR_OTAT_EN;
  826. iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
  827. TSI148_LCSR_OFFSET_OTAT);
  828. /* Setup 2eSST speeds */
  829. temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
  830. switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
  831. case VME_2eSST160:
  832. temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
  833. break;
  834. case VME_2eSST267:
  835. temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
  836. break;
  837. case VME_2eSST320:
  838. temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
  839. break;
  840. }
  841. /* Setup cycle types */
  842. if (cycle & VME_BLT) {
  843. temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
  844. temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
  845. }
  846. if (cycle & VME_MBLT) {
  847. temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
  848. temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
  849. }
  850. if (cycle & VME_2eVME) {
  851. temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
  852. temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
  853. }
  854. if (cycle & VME_2eSST) {
  855. temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
  856. temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
  857. }
  858. if (cycle & VME_2eSSTB) {
  859. dev_warn(tsi148_bridge->parent, "Currently not setting "
  860. "Broadcast Select Registers\n");
  861. temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
  862. temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
  863. }
  864. /* Setup data width */
  865. temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
  866. switch (dwidth) {
  867. case VME_D16:
  868. temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
  869. break;
  870. case VME_D32:
  871. temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
  872. break;
  873. default:
  874. spin_unlock(&image->lock);
  875. dev_err(tsi148_bridge->parent, "Invalid data width\n");
  876. retval = -EINVAL;
  877. goto err_dwidth;
  878. }
  879. /* Setup address space */
  880. temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
  881. switch (aspace) {
  882. case VME_A16:
  883. temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
  884. break;
  885. case VME_A24:
  886. temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
  887. break;
  888. case VME_A32:
  889. temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
  890. break;
  891. case VME_A64:
  892. temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
  893. break;
  894. case VME_CRCSR:
  895. temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
  896. break;
  897. case VME_USER1:
  898. temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
  899. break;
  900. case VME_USER2:
  901. temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
  902. break;
  903. case VME_USER3:
  904. temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
  905. break;
  906. case VME_USER4:
  907. temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
  908. break;
  909. default:
  910. spin_unlock(&image->lock);
  911. dev_err(tsi148_bridge->parent, "Invalid address space\n");
  912. retval = -EINVAL;
  913. goto err_aspace;
  914. break;
  915. }
  916. temp_ctl &= ~(3<<4);
  917. if (cycle & VME_SUPER)
  918. temp_ctl |= TSI148_LCSR_OTAT_SUP;
  919. if (cycle & VME_PROG)
  920. temp_ctl |= TSI148_LCSR_OTAT_PGM;
  921. /* Setup mapping */
  922. iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
  923. TSI148_LCSR_OFFSET_OTSAU);
  924. iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
  925. TSI148_LCSR_OFFSET_OTSAL);
  926. iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
  927. TSI148_LCSR_OFFSET_OTEAU);
  928. iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
  929. TSI148_LCSR_OFFSET_OTEAL);
  930. iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
  931. TSI148_LCSR_OFFSET_OTOFU);
  932. iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
  933. TSI148_LCSR_OFFSET_OTOFL);
  934. /* Write ctl reg without enable */
  935. iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
  936. TSI148_LCSR_OFFSET_OTAT);
  937. if (enabled)
  938. temp_ctl |= TSI148_LCSR_OTAT_EN;
  939. iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
  940. TSI148_LCSR_OFFSET_OTAT);
  941. spin_unlock(&image->lock);
  942. return 0;
  943. err_aspace:
  944. err_dwidth:
  945. err_gran:
  946. tsi148_free_resource(image);
  947. err_res:
  948. err_window:
  949. return retval;
  950. }
  951. /*
  952. * Set the attributes of an outbound window.
  953. *
  954. * XXX Not parsing prefetch information.
  955. */
  956. static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
  957. unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
  958. u32 *cycle, u32 *dwidth)
  959. {
  960. unsigned int i, ctl;
  961. unsigned int pci_base_low, pci_base_high;
  962. unsigned int pci_bound_low, pci_bound_high;
  963. unsigned int vme_offset_low, vme_offset_high;
  964. unsigned long long pci_base, pci_bound, vme_offset;
  965. struct tsi148_driver *bridge;
  966. bridge = image->parent->driver_priv;
  967. i = image->number;
  968. ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  969. TSI148_LCSR_OFFSET_OTAT);
  970. pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  971. TSI148_LCSR_OFFSET_OTSAU);
  972. pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  973. TSI148_LCSR_OFFSET_OTSAL);
  974. pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  975. TSI148_LCSR_OFFSET_OTEAU);
  976. pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  977. TSI148_LCSR_OFFSET_OTEAL);
  978. vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  979. TSI148_LCSR_OFFSET_OTOFU);
  980. vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  981. TSI148_LCSR_OFFSET_OTOFL);
  982. /* Convert 64-bit variables to 2x 32-bit variables */
  983. reg_join(pci_base_high, pci_base_low, &pci_base);
  984. reg_join(pci_bound_high, pci_bound_low, &pci_bound);
  985. reg_join(vme_offset_high, vme_offset_low, &vme_offset);
  986. *vme_base = pci_base + vme_offset;
  987. *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
  988. *enabled = 0;
  989. *aspace = 0;
  990. *cycle = 0;
  991. *dwidth = 0;
  992. if (ctl & TSI148_LCSR_OTAT_EN)
  993. *enabled = 1;
  994. /* Setup address space */
  995. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
  996. *aspace |= VME_A16;
  997. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
  998. *aspace |= VME_A24;
  999. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
  1000. *aspace |= VME_A32;
  1001. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
  1002. *aspace |= VME_A64;
  1003. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
  1004. *aspace |= VME_CRCSR;
  1005. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
  1006. *aspace |= VME_USER1;
  1007. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
  1008. *aspace |= VME_USER2;
  1009. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
  1010. *aspace |= VME_USER3;
  1011. if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
  1012. *aspace |= VME_USER4;
  1013. /* Setup 2eSST speeds */
  1014. if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
  1015. *cycle |= VME_2eSST160;
  1016. if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
  1017. *cycle |= VME_2eSST267;
  1018. if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
  1019. *cycle |= VME_2eSST320;
  1020. /* Setup cycle types */
  1021. if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
  1022. *cycle |= VME_SCT;
  1023. if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
  1024. *cycle |= VME_BLT;
  1025. if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
  1026. *cycle |= VME_MBLT;
  1027. if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
  1028. *cycle |= VME_2eVME;
  1029. if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
  1030. *cycle |= VME_2eSST;
  1031. if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
  1032. *cycle |= VME_2eSSTB;
  1033. if (ctl & TSI148_LCSR_OTAT_SUP)
  1034. *cycle |= VME_SUPER;
  1035. else
  1036. *cycle |= VME_USER;
  1037. if (ctl & TSI148_LCSR_OTAT_PGM)
  1038. *cycle |= VME_PROG;
  1039. else
  1040. *cycle |= VME_DATA;
  1041. /* Setup data width */
  1042. if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
  1043. *dwidth = VME_D16;
  1044. if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
  1045. *dwidth = VME_D32;
  1046. return 0;
  1047. }
  1048. static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
  1049. unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
  1050. u32 *cycle, u32 *dwidth)
  1051. {
  1052. int retval;
  1053. spin_lock(&image->lock);
  1054. retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
  1055. cycle, dwidth);
  1056. spin_unlock(&image->lock);
  1057. return retval;
  1058. }
  1059. static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
  1060. size_t count, loff_t offset)
  1061. {
  1062. int retval, enabled;
  1063. unsigned long long vme_base, size;
  1064. u32 aspace, cycle, dwidth;
  1065. struct vme_bus_error *vme_err = NULL;
  1066. struct vme_bridge *tsi148_bridge;
  1067. void *addr = image->kern_base + offset;
  1068. unsigned int done = 0;
  1069. unsigned int count32;
  1070. tsi148_bridge = image->parent;
  1071. spin_lock(&image->lock);
  1072. /* The following code handles VME address alignment. We cannot use
  1073. * memcpy_xxx directly here because it may cut small data transfers in
  1074. * to 8-bit cycles, thus making D16 cycle impossible.
  1075. * On the other hand, the bridge itself assures that the maximum data
  1076. * cycle configured for the transfer is used and splits it
  1077. * automatically for non-aligned addresses, so we don't want the
  1078. * overhead of needlessly forcing small transfers for the entire cycle.
  1079. */
  1080. if ((uintptr_t)addr & 0x1) {
  1081. *(u8 *)buf = ioread8(addr);
  1082. done += 1;
  1083. if (done == count)
  1084. goto out;
  1085. }
  1086. if ((uintptr_t)addr & 0x2) {
  1087. if ((count - done) < 2) {
  1088. *(u8 *)(buf + done) = ioread8(addr + done);
  1089. done += 1;
  1090. goto out;
  1091. } else {
  1092. *(u16 *)(buf + done) = ioread16(addr + done);
  1093. done += 2;
  1094. }
  1095. }
  1096. count32 = (count - done) & ~0x3;
  1097. if (count32 > 0) {
  1098. memcpy_fromio(buf + done, addr + done, count32);
  1099. done += count32;
  1100. }
  1101. if ((count - done) & 0x2) {
  1102. *(u16 *)(buf + done) = ioread16(addr + done);
  1103. done += 2;
  1104. }
  1105. if ((count - done) & 0x1) {
  1106. *(u8 *)(buf + done) = ioread8(addr + done);
  1107. done += 1;
  1108. }
  1109. out:
  1110. retval = count;
  1111. if (!err_chk)
  1112. goto skip_chk;
  1113. __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
  1114. &dwidth);
  1115. vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
  1116. count);
  1117. if (vme_err != NULL) {
  1118. dev_err(image->parent->parent, "First VME read error detected "
  1119. "an at address 0x%llx\n", vme_err->address);
  1120. retval = vme_err->address - (vme_base + offset);
  1121. /* Clear down save errors in this address range */
  1122. tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
  1123. count);
  1124. }
  1125. skip_chk:
  1126. spin_unlock(&image->lock);
  1127. return retval;
  1128. }
  1129. static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
  1130. size_t count, loff_t offset)
  1131. {
  1132. int retval = 0, enabled;
  1133. unsigned long long vme_base, size;
  1134. u32 aspace, cycle, dwidth;
  1135. void *addr = image->kern_base + offset;
  1136. unsigned int done = 0;
  1137. unsigned int count32;
  1138. struct vme_bus_error *vme_err = NULL;
  1139. struct vme_bridge *tsi148_bridge;
  1140. struct tsi148_driver *bridge;
  1141. tsi148_bridge = image->parent;
  1142. bridge = tsi148_bridge->driver_priv;
  1143. spin_lock(&image->lock);
  1144. /* Here we apply for the same strategy we do in master_read
  1145. * function in order to assure D16 cycle when required.
  1146. */
  1147. if ((uintptr_t)addr & 0x1) {
  1148. iowrite8(*(u8 *)buf, addr);
  1149. done += 1;
  1150. if (done == count)
  1151. goto out;
  1152. }
  1153. if ((uintptr_t)addr & 0x2) {
  1154. if ((count - done) < 2) {
  1155. iowrite8(*(u8 *)(buf + done), addr + done);
  1156. done += 1;
  1157. goto out;
  1158. } else {
  1159. iowrite16(*(u16 *)(buf + done), addr + done);
  1160. done += 2;
  1161. }
  1162. }
  1163. count32 = (count - done) & ~0x3;
  1164. if (count32 > 0) {
  1165. memcpy_toio(addr + done, buf + done, count32);
  1166. done += count32;
  1167. }
  1168. if ((count - done) & 0x2) {
  1169. iowrite16(*(u16 *)(buf + done), addr + done);
  1170. done += 2;
  1171. }
  1172. if ((count - done) & 0x1) {
  1173. iowrite8(*(u8 *)(buf + done), addr + done);
  1174. done += 1;
  1175. }
  1176. out:
  1177. retval = count;
  1178. /*
  1179. * Writes are posted. We need to do a read on the VME bus to flush out
  1180. * all of the writes before we check for errors. We can't guarantee
  1181. * that reading the data we have just written is safe. It is believed
  1182. * that there isn't any read, write re-ordering, so we can read any
  1183. * location in VME space, so lets read the Device ID from the tsi148's
  1184. * own registers as mapped into CR/CSR space.
  1185. *
  1186. * We check for saved errors in the written address range/space.
  1187. */
  1188. if (!err_chk)
  1189. goto skip_chk;
  1190. /*
  1191. * Get window info first, to maximise the time that the buffers may
  1192. * fluch on their own
  1193. */
  1194. __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
  1195. &dwidth);
  1196. ioread16(bridge->flush_image->kern_base + 0x7F000);
  1197. vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
  1198. count);
  1199. if (vme_err != NULL) {
  1200. dev_warn(tsi148_bridge->parent, "First VME write error detected"
  1201. " an at address 0x%llx\n", vme_err->address);
  1202. retval = vme_err->address - (vme_base + offset);
  1203. /* Clear down save errors in this address range */
  1204. tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
  1205. count);
  1206. }
  1207. skip_chk:
  1208. spin_unlock(&image->lock);
  1209. return retval;
  1210. }
  1211. /*
  1212. * Perform an RMW cycle on the VME bus.
  1213. *
  1214. * Requires a previously configured master window, returns final value.
  1215. */
  1216. static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
  1217. unsigned int mask, unsigned int compare, unsigned int swap,
  1218. loff_t offset)
  1219. {
  1220. unsigned long long pci_addr;
  1221. unsigned int pci_addr_high, pci_addr_low;
  1222. u32 tmp, result;
  1223. int i;
  1224. struct tsi148_driver *bridge;
  1225. bridge = image->parent->driver_priv;
  1226. /* Find the PCI address that maps to the desired VME address */
  1227. i = image->number;
  1228. /* Locking as we can only do one of these at a time */
  1229. mutex_lock(&bridge->vme_rmw);
  1230. /* Lock image */
  1231. spin_lock(&image->lock);
  1232. pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  1233. TSI148_LCSR_OFFSET_OTSAU);
  1234. pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
  1235. TSI148_LCSR_OFFSET_OTSAL);
  1236. reg_join(pci_addr_high, pci_addr_low, &pci_addr);
  1237. reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
  1238. /* Configure registers */
  1239. iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
  1240. iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
  1241. iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
  1242. iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
  1243. iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
  1244. /* Enable RMW */
  1245. tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
  1246. tmp |= TSI148_LCSR_VMCTRL_RMWEN;
  1247. iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
  1248. /* Kick process off with a read to the required address. */
  1249. result = ioread32be(image->kern_base + offset);
  1250. /* Disable RMW */
  1251. tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
  1252. tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
  1253. iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
  1254. spin_unlock(&image->lock);
  1255. mutex_unlock(&bridge->vme_rmw);
  1256. return result;
  1257. }
  1258. static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
  1259. u32 aspace, u32 cycle, u32 dwidth)
  1260. {
  1261. u32 val;
  1262. val = be32_to_cpu(*attr);
  1263. /* Setup 2eSST speeds */
  1264. switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
  1265. case VME_2eSST160:
  1266. val |= TSI148_LCSR_DSAT_2eSSTM_160;
  1267. break;
  1268. case VME_2eSST267:
  1269. val |= TSI148_LCSR_DSAT_2eSSTM_267;
  1270. break;
  1271. case VME_2eSST320:
  1272. val |= TSI148_LCSR_DSAT_2eSSTM_320;
  1273. break;
  1274. }
  1275. /* Setup cycle types */
  1276. if (cycle & VME_SCT)
  1277. val |= TSI148_LCSR_DSAT_TM_SCT;
  1278. if (cycle & VME_BLT)
  1279. val |= TSI148_LCSR_DSAT_TM_BLT;
  1280. if (cycle & VME_MBLT)
  1281. val |= TSI148_LCSR_DSAT_TM_MBLT;
  1282. if (cycle & VME_2eVME)
  1283. val |= TSI148_LCSR_DSAT_TM_2eVME;
  1284. if (cycle & VME_2eSST)
  1285. val |= TSI148_LCSR_DSAT_TM_2eSST;
  1286. if (cycle & VME_2eSSTB) {
  1287. dev_err(dev, "Currently not setting Broadcast Select "
  1288. "Registers\n");
  1289. val |= TSI148_LCSR_DSAT_TM_2eSSTB;
  1290. }
  1291. /* Setup data width */
  1292. switch (dwidth) {
  1293. case VME_D16:
  1294. val |= TSI148_LCSR_DSAT_DBW_16;
  1295. break;
  1296. case VME_D32:
  1297. val |= TSI148_LCSR_DSAT_DBW_32;
  1298. break;
  1299. default:
  1300. dev_err(dev, "Invalid data width\n");
  1301. return -EINVAL;
  1302. }
  1303. /* Setup address space */
  1304. switch (aspace) {
  1305. case VME_A16:
  1306. val |= TSI148_LCSR_DSAT_AMODE_A16;
  1307. break;
  1308. case VME_A24:
  1309. val |= TSI148_LCSR_DSAT_AMODE_A24;
  1310. break;
  1311. case VME_A32:
  1312. val |= TSI148_LCSR_DSAT_AMODE_A32;
  1313. break;
  1314. case VME_A64:
  1315. val |= TSI148_LCSR_DSAT_AMODE_A64;
  1316. break;
  1317. case VME_CRCSR:
  1318. val |= TSI148_LCSR_DSAT_AMODE_CRCSR;
  1319. break;
  1320. case VME_USER1:
  1321. val |= TSI148_LCSR_DSAT_AMODE_USER1;
  1322. break;
  1323. case VME_USER2:
  1324. val |= TSI148_LCSR_DSAT_AMODE_USER2;
  1325. break;
  1326. case VME_USER3:
  1327. val |= TSI148_LCSR_DSAT_AMODE_USER3;
  1328. break;
  1329. case VME_USER4:
  1330. val |= TSI148_LCSR_DSAT_AMODE_USER4;
  1331. break;
  1332. default:
  1333. dev_err(dev, "Invalid address space\n");
  1334. return -EINVAL;
  1335. break;
  1336. }
  1337. if (cycle & VME_SUPER)
  1338. val |= TSI148_LCSR_DSAT_SUP;
  1339. if (cycle & VME_PROG)
  1340. val |= TSI148_LCSR_DSAT_PGM;
  1341. *attr = cpu_to_be32(val);
  1342. return 0;
  1343. }
  1344. static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
  1345. u32 aspace, u32 cycle, u32 dwidth)
  1346. {
  1347. u32 val;
  1348. val = be32_to_cpu(*attr);
  1349. /* Setup 2eSST speeds */
  1350. switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
  1351. case VME_2eSST160:
  1352. val |= TSI148_LCSR_DDAT_2eSSTM_160;
  1353. break;
  1354. case VME_2eSST267:
  1355. val |= TSI148_LCSR_DDAT_2eSSTM_267;
  1356. break;
  1357. case VME_2eSST320:
  1358. val |= TSI148_LCSR_DDAT_2eSSTM_320;
  1359. break;
  1360. }
  1361. /* Setup cycle types */
  1362. if (cycle & VME_SCT)
  1363. val |= TSI148_LCSR_DDAT_TM_SCT;
  1364. if (cycle & VME_BLT)
  1365. val |= TSI148_LCSR_DDAT_TM_BLT;
  1366. if (cycle & VME_MBLT)
  1367. val |= TSI148_LCSR_DDAT_TM_MBLT;
  1368. if (cycle & VME_2eVME)
  1369. val |= TSI148_LCSR_DDAT_TM_2eVME;
  1370. if (cycle & VME_2eSST)
  1371. val |= TSI148_LCSR_DDAT_TM_2eSST;
  1372. if (cycle & VME_2eSSTB) {
  1373. dev_err(dev, "Currently not setting Broadcast Select "
  1374. "Registers\n");
  1375. val |= TSI148_LCSR_DDAT_TM_2eSSTB;
  1376. }
  1377. /* Setup data width */
  1378. switch (dwidth) {
  1379. case VME_D16:
  1380. val |= TSI148_LCSR_DDAT_DBW_16;
  1381. break;
  1382. case VME_D32:
  1383. val |= TSI148_LCSR_DDAT_DBW_32;
  1384. break;
  1385. default:
  1386. dev_err(dev, "Invalid data width\n");
  1387. return -EINVAL;
  1388. }
  1389. /* Setup address space */
  1390. switch (aspace) {
  1391. case VME_A16:
  1392. val |= TSI148_LCSR_DDAT_AMODE_A16;
  1393. break;
  1394. case VME_A24:
  1395. val |= TSI148_LCSR_DDAT_AMODE_A24;
  1396. break;
  1397. case VME_A32:
  1398. val |= TSI148_LCSR_DDAT_AMODE_A32;
  1399. break;
  1400. case VME_A64:
  1401. val |= TSI148_LCSR_DDAT_AMODE_A64;
  1402. break;
  1403. case VME_CRCSR:
  1404. val |= TSI148_LCSR_DDAT_AMODE_CRCSR;
  1405. break;
  1406. case VME_USER1:
  1407. val |= TSI148_LCSR_DDAT_AMODE_USER1;
  1408. break;
  1409. case VME_USER2:
  1410. val |= TSI148_LCSR_DDAT_AMODE_USER2;
  1411. break;
  1412. case VME_USER3:
  1413. val |= TSI148_LCSR_DDAT_AMODE_USER3;
  1414. break;
  1415. case VME_USER4:
  1416. val |= TSI148_LCSR_DDAT_AMODE_USER4;
  1417. break;
  1418. default:
  1419. dev_err(dev, "Invalid address space\n");
  1420. return -EINVAL;
  1421. break;
  1422. }
  1423. if (cycle & VME_SUPER)
  1424. val |= TSI148_LCSR_DDAT_SUP;
  1425. if (cycle & VME_PROG)
  1426. val |= TSI148_LCSR_DDAT_PGM;
  1427. *attr = cpu_to_be32(val);
  1428. return 0;
  1429. }
  1430. /*
  1431. * Add a link list descriptor to the list
  1432. *
  1433. * Note: DMA engine expects the DMA descriptor to be big endian.
  1434. */
  1435. static int tsi148_dma_list_add(struct vme_dma_list *list,
  1436. struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
  1437. {
  1438. struct tsi148_dma_entry *entry, *prev;
  1439. u32 address_high, address_low, val;
  1440. struct vme_dma_pattern *pattern_attr;
  1441. struct vme_dma_pci *pci_attr;
  1442. struct vme_dma_vme *vme_attr;
  1443. int retval = 0;
  1444. struct vme_bridge *tsi148_bridge;
  1445. tsi148_bridge = list->parent->parent;
  1446. /* Descriptor must be aligned on 64-bit boundaries */
  1447. entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL);
  1448. if (entry == NULL) {
  1449. dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
  1450. "dma resource structure\n");
  1451. retval = -ENOMEM;
  1452. goto err_mem;
  1453. }
  1454. /* Test descriptor alignment */
  1455. if ((unsigned long)&entry->descriptor & 0x7) {
  1456. dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
  1457. "byte boundary as required: %p\n",
  1458. &entry->descriptor);
  1459. retval = -EINVAL;
  1460. goto err_align;
  1461. }
  1462. /* Given we are going to fill out the structure, we probably don't
  1463. * need to zero it, but better safe than sorry for now.
  1464. */
  1465. memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor));
  1466. /* Fill out source part */
  1467. switch (src->type) {
  1468. case VME_DMA_PATTERN:
  1469. pattern_attr = src->private;
  1470. entry->descriptor.dsal = cpu_to_be32(pattern_attr->pattern);
  1471. val = TSI148_LCSR_DSAT_TYP_PAT;
  1472. /* Default behaviour is 32 bit pattern */
  1473. if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
  1474. val |= TSI148_LCSR_DSAT_PSZ;
  1475. /* It seems that the default behaviour is to increment */
  1476. if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
  1477. val |= TSI148_LCSR_DSAT_NIN;
  1478. entry->descriptor.dsat = cpu_to_be32(val);
  1479. break;
  1480. case VME_DMA_PCI:
  1481. pci_attr = src->private;
  1482. reg_split((unsigned long long)pci_attr->address, &address_high,
  1483. &address_low);
  1484. entry->descriptor.dsau = cpu_to_be32(address_high);
  1485. entry->descriptor.dsal = cpu_to_be32(address_low);
  1486. entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI);
  1487. break;
  1488. case VME_DMA_VME:
  1489. vme_attr = src->private;
  1490. reg_split((unsigned long long)vme_attr->address, &address_high,
  1491. &address_low);
  1492. entry->descriptor.dsau = cpu_to_be32(address_high);
  1493. entry->descriptor.dsal = cpu_to_be32(address_low);
  1494. entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME);
  1495. retval = tsi148_dma_set_vme_src_attributes(
  1496. tsi148_bridge->parent, &entry->descriptor.dsat,
  1497. vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
  1498. if (retval < 0)
  1499. goto err_source;
  1500. break;
  1501. default:
  1502. dev_err(tsi148_bridge->parent, "Invalid source type\n");
  1503. retval = -EINVAL;
  1504. goto err_source;
  1505. break;
  1506. }
  1507. /* Assume last link - this will be over-written by adding another */
  1508. entry->descriptor.dnlau = cpu_to_be32(0);
  1509. entry->descriptor.dnlal = cpu_to_be32(TSI148_LCSR_DNLAL_LLA);
  1510. /* Fill out destination part */
  1511. switch (dest->type) {
  1512. case VME_DMA_PCI:
  1513. pci_attr = dest->private;
  1514. reg_split((unsigned long long)pci_attr->address, &address_high,
  1515. &address_low);
  1516. entry->descriptor.ddau = cpu_to_be32(address_high);
  1517. entry->descriptor.ddal = cpu_to_be32(address_low);
  1518. entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI);
  1519. break;
  1520. case VME_DMA_VME:
  1521. vme_attr = dest->private;
  1522. reg_split((unsigned long long)vme_attr->address, &address_high,
  1523. &address_low);
  1524. entry->descriptor.ddau = cpu_to_be32(address_high);
  1525. entry->descriptor.ddal = cpu_to_be32(address_low);
  1526. entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME);
  1527. retval = tsi148_dma_set_vme_dest_attributes(
  1528. tsi148_bridge->parent, &entry->descriptor.ddat,
  1529. vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
  1530. if (retval < 0)
  1531. goto err_dest;
  1532. break;
  1533. default:
  1534. dev_err(tsi148_bridge->parent, "Invalid destination type\n");
  1535. retval = -EINVAL;
  1536. goto err_dest;
  1537. break;
  1538. }
  1539. /* Fill out count */
  1540. entry->descriptor.dcnt = cpu_to_be32((u32)count);
  1541. /* Add to list */
  1542. list_add_tail(&entry->list, &list->entries);
  1543. /* Fill out previous descriptors "Next Address" */
  1544. if (entry->list.prev != &list->entries) {
  1545. prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
  1546. list);
  1547. /* We need the bus address for the pointer */
  1548. entry->dma_handle = dma_map_single(tsi148_bridge->parent,
  1549. &entry->descriptor,
  1550. sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
  1551. reg_split((unsigned long long)entry->dma_handle, &address_high,
  1552. &address_low);
  1553. entry->descriptor.dnlau = cpu_to_be32(address_high);
  1554. entry->descriptor.dnlal = cpu_to_be32(address_low);
  1555. }
  1556. return 0;
  1557. err_dest:
  1558. err_source:
  1559. err_align:
  1560. kfree(entry);
  1561. err_mem:
  1562. return retval;
  1563. }
  1564. /*
  1565. * Check to see if the provided DMA channel is busy.
  1566. */
  1567. static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
  1568. {
  1569. u32 tmp;
  1570. struct tsi148_driver *bridge;
  1571. bridge = tsi148_bridge->driver_priv;
  1572. tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
  1573. TSI148_LCSR_OFFSET_DSTA);
  1574. if (tmp & TSI148_LCSR_DSTA_BSY)
  1575. return 0;
  1576. else
  1577. return 1;
  1578. }
  1579. /*
  1580. * Execute a previously generated link list
  1581. *
  1582. * XXX Need to provide control register configuration.
  1583. */
  1584. static int tsi148_dma_list_exec(struct vme_dma_list *list)
  1585. {
  1586. struct vme_dma_resource *ctrlr;
  1587. int channel, retval = 0;
  1588. struct tsi148_dma_entry *entry;
  1589. u32 bus_addr_high, bus_addr_low;
  1590. u32 val, dctlreg = 0;
  1591. struct vme_bridge *tsi148_bridge;
  1592. struct tsi148_driver *bridge;
  1593. ctrlr = list->parent;
  1594. tsi148_bridge = ctrlr->parent;
  1595. bridge = tsi148_bridge->driver_priv;
  1596. mutex_lock(&ctrlr->mtx);
  1597. channel = ctrlr->number;
  1598. if (!list_empty(&ctrlr->running)) {
  1599. /*
  1600. * XXX We have an active DMA transfer and currently haven't
  1601. * sorted out the mechanism for "pending" DMA transfers.
  1602. * Return busy.
  1603. */
  1604. /* Need to add to pending here */
  1605. mutex_unlock(&ctrlr->mtx);
  1606. return -EBUSY;
  1607. } else {
  1608. list_add(&list->list, &ctrlr->running);
  1609. }
  1610. /* Get first bus address and write into registers */
  1611. entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
  1612. list);
  1613. entry->dma_handle = dma_map_single(tsi148_bridge->parent,
  1614. &entry->descriptor,
  1615. sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
  1616. mutex_unlock(&ctrlr->mtx);
  1617. reg_split(entry->dma_handle, &bus_addr_high, &bus_addr_low);
  1618. iowrite32be(bus_addr_high, bridge->base +
  1619. TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
  1620. iowrite32be(bus_addr_low, bridge->base +
  1621. TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
  1622. dctlreg = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
  1623. TSI148_LCSR_OFFSET_DCTL);
  1624. /* Start the operation */
  1625. iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
  1626. TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
  1627. wait_event_interruptible(bridge->dma_queue[channel],
  1628. tsi148_dma_busy(ctrlr->parent, channel));
  1629. /*
  1630. * Read status register, this register is valid until we kick off a
  1631. * new transfer.
  1632. */
  1633. val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
  1634. TSI148_LCSR_OFFSET_DSTA);
  1635. if (val & TSI148_LCSR_DSTA_VBE) {
  1636. dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
  1637. retval = -EIO;
  1638. }
  1639. /* Remove list from running list */
  1640. mutex_lock(&ctrlr->mtx);
  1641. list_del(&list->list);
  1642. mutex_unlock(&ctrlr->mtx);
  1643. return retval;
  1644. }
  1645. /*
  1646. * Clean up a previously generated link list
  1647. *
  1648. * We have a separate function, don't assume that the chain can't be reused.
  1649. */
  1650. static int tsi148_dma_list_empty(struct vme_dma_list *list)
  1651. {
  1652. struct list_head *pos, *temp;
  1653. struct tsi148_dma_entry *entry;
  1654. struct vme_bridge *tsi148_bridge = list->parent->parent;
  1655. /* detach and free each entry */
  1656. list_for_each_safe(pos, temp, &list->entries) {
  1657. list_del(pos);
  1658. entry = list_entry(pos, struct tsi148_dma_entry, list);
  1659. dma_unmap_single(tsi148_bridge->parent, entry->dma_handle,
  1660. sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
  1661. kfree(entry);
  1662. }
  1663. return 0;
  1664. }
  1665. /*
  1666. * All 4 location monitors reside at the same base - this is therefore a
  1667. * system wide configuration.
  1668. *
  1669. * This does not enable the LM monitor - that should be done when the first
  1670. * callback is attached and disabled when the last callback is removed.
  1671. */
  1672. static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
  1673. u32 aspace, u32 cycle)
  1674. {
  1675. u32 lm_base_high, lm_base_low, lm_ctl = 0;
  1676. int i;
  1677. struct vme_bridge *tsi148_bridge;
  1678. struct tsi148_driver *bridge;
  1679. tsi148_bridge = lm->parent;
  1680. bridge = tsi148_bridge->driver_priv;
  1681. mutex_lock(&lm->mtx);
  1682. /* If we already have a callback attached, we can't move it! */
  1683. for (i = 0; i < lm->monitors; i++) {
  1684. if (bridge->lm_callback[i] != NULL) {
  1685. mutex_unlock(&lm->mtx);
  1686. dev_err(tsi148_bridge->parent, "Location monitor "
  1687. "callback attached, can't reset\n");
  1688. return -EBUSY;
  1689. }
  1690. }
  1691. switch (aspace) {
  1692. case VME_A16:
  1693. lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
  1694. break;
  1695. case VME_A24:
  1696. lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
  1697. break;
  1698. case VME_A32:
  1699. lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
  1700. break;
  1701. case VME_A64:
  1702. lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
  1703. break;
  1704. default:
  1705. mutex_unlock(&lm->mtx);
  1706. dev_err(tsi148_bridge->parent, "Invalid address space\n");
  1707. return -EINVAL;
  1708. break;
  1709. }
  1710. if (cycle & VME_SUPER)
  1711. lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
  1712. if (cycle & VME_USER)
  1713. lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
  1714. if (cycle & VME_PROG)
  1715. lm_ctl |= TSI148_LCSR_LMAT_PGM;
  1716. if (cycle & VME_DATA)
  1717. lm_ctl |= TSI148_LCSR_LMAT_DATA;
  1718. reg_split(lm_base, &lm_base_high, &lm_base_low);
  1719. iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
  1720. iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
  1721. iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
  1722. mutex_unlock(&lm->mtx);
  1723. return 0;
  1724. }
  1725. /* Get configuration of the callback monitor and return whether it is enabled
  1726. * or disabled.
  1727. */
  1728. static int tsi148_lm_get(struct vme_lm_resource *lm,
  1729. unsigned long long *lm_base, u32 *aspace, u32 *cycle)
  1730. {
  1731. u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
  1732. struct tsi148_driver *bridge;
  1733. bridge = lm->parent->driver_priv;
  1734. mutex_lock(&lm->mtx);
  1735. lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
  1736. lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
  1737. lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
  1738. reg_join(lm_base_high, lm_base_low, lm_base);
  1739. if (lm_ctl & TSI148_LCSR_LMAT_EN)
  1740. enabled = 1;
  1741. if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
  1742. *aspace |= VME_A16;
  1743. if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
  1744. *aspace |= VME_A24;
  1745. if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
  1746. *aspace |= VME_A32;
  1747. if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
  1748. *aspace |= VME_A64;
  1749. if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
  1750. *cycle |= VME_SUPER;
  1751. if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
  1752. *cycle |= VME_USER;
  1753. if (lm_ctl & TSI148_LCSR_LMAT_PGM)
  1754. *cycle |= VME_PROG;
  1755. if (lm_ctl & TSI148_LCSR_LMAT_DATA)
  1756. *cycle |= VME_DATA;
  1757. mutex_unlock(&lm->mtx);
  1758. return enabled;
  1759. }
  1760. /*
  1761. * Attach a callback to a specific location monitor.
  1762. *
  1763. * Callback will be passed the monitor triggered.
  1764. */
  1765. static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
  1766. void (*callback)(int))
  1767. {
  1768. u32 lm_ctl, tmp;
  1769. struct vme_bridge *tsi148_bridge;
  1770. struct tsi148_driver *bridge;
  1771. tsi148_bridge = lm->parent;
  1772. bridge = tsi148_bridge->driver_priv;
  1773. mutex_lock(&lm->mtx);
  1774. /* Ensure that the location monitor is configured - need PGM or DATA */
  1775. lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
  1776. if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
  1777. mutex_unlock(&lm->mtx);
  1778. dev_err(tsi148_bridge->parent, "Location monitor not properly "
  1779. "configured\n");
  1780. return -EINVAL;
  1781. }
  1782. /* Check that a callback isn't already attached */
  1783. if (bridge->lm_callback[monitor] != NULL) {
  1784. mutex_unlock(&lm->mtx);
  1785. dev_err(tsi148_bridge->parent, "Existing callback attached\n");
  1786. return -EBUSY;
  1787. }
  1788. /* Attach callback */
  1789. bridge->lm_callback[monitor] = callback;
  1790. /* Enable Location Monitor interrupt */
  1791. tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
  1792. tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
  1793. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
  1794. tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
  1795. tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
  1796. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
  1797. /* Ensure that global Location Monitor Enable set */
  1798. if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
  1799. lm_ctl |= TSI148_LCSR_LMAT_EN;
  1800. iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
  1801. }
  1802. mutex_unlock(&lm->mtx);
  1803. return 0;
  1804. }
  1805. /*
  1806. * Detach a callback function forn a specific location monitor.
  1807. */
  1808. static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
  1809. {
  1810. u32 lm_en, tmp;
  1811. struct tsi148_driver *bridge;
  1812. bridge = lm->parent->driver_priv;
  1813. mutex_lock(&lm->mtx);
  1814. /* Disable Location Monitor and ensure previous interrupts are clear */
  1815. lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
  1816. lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
  1817. iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
  1818. tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
  1819. tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
  1820. iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
  1821. iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
  1822. bridge->base + TSI148_LCSR_INTC);
  1823. /* Detach callback */
  1824. bridge->lm_callback[monitor] = NULL;
  1825. /* If all location monitors disabled, disable global Location Monitor */
  1826. if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
  1827. TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
  1828. tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
  1829. tmp &= ~TSI148_LCSR_LMAT_EN;
  1830. iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
  1831. }
  1832. mutex_unlock(&lm->mtx);
  1833. return 0;
  1834. }
  1835. /*
  1836. * Determine Geographical Addressing
  1837. */
  1838. static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
  1839. {
  1840. u32 slot = 0;
  1841. struct tsi148_driver *bridge;
  1842. bridge = tsi148_bridge->driver_priv;
  1843. if (!geoid) {
  1844. slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
  1845. slot = slot & TSI148_LCSR_VSTAT_GA_M;
  1846. } else
  1847. slot = geoid;
  1848. return (int)slot;
  1849. }
  1850. static void *tsi148_alloc_consistent(struct device *parent, size_t size,
  1851. dma_addr_t *dma)
  1852. {
  1853. struct pci_dev *pdev;
  1854. /* Find pci_dev container of dev */
  1855. pdev = container_of(parent, struct pci_dev, dev);
  1856. return pci_alloc_consistent(pdev, size, dma);
  1857. }
  1858. static void tsi148_free_consistent(struct device *parent, size_t size,
  1859. void *vaddr, dma_addr_t dma)
  1860. {
  1861. struct pci_dev *pdev;
  1862. /* Find pci_dev container of dev */
  1863. pdev = container_of(parent, struct pci_dev, dev);
  1864. pci_free_consistent(pdev, size, vaddr, dma);
  1865. }
  1866. static int __init tsi148_init(void)
  1867. {
  1868. return pci_register_driver(&tsi148_driver);
  1869. }
  1870. /*
  1871. * Configure CR/CSR space
  1872. *
  1873. * Access to the CR/CSR can be configured at power-up. The location of the
  1874. * CR/CSR registers in the CR/CSR address space is determined by the boards
  1875. * Auto-ID or Geographic address. This function ensures that the window is
  1876. * enabled at an offset consistent with the boards geopgraphic address.
  1877. *
  1878. * Each board has a 512kB window, with the highest 4kB being used for the
  1879. * boards registers, this means there is a fix length 508kB window which must
  1880. * be mapped onto PCI memory.
  1881. */
  1882. static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
  1883. struct pci_dev *pdev)
  1884. {
  1885. u32 cbar, crat, vstat;
  1886. u32 crcsr_bus_high, crcsr_bus_low;
  1887. int retval;
  1888. struct tsi148_driver *bridge;
  1889. bridge = tsi148_bridge->driver_priv;
  1890. /* Allocate mem for CR/CSR image */
  1891. bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
  1892. &bridge->crcsr_bus);
  1893. if (bridge->crcsr_kernel == NULL) {
  1894. dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
  1895. "CR/CSR image\n");
  1896. return -ENOMEM;
  1897. }
  1898. memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
  1899. reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
  1900. iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
  1901. iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
  1902. /* Ensure that the CR/CSR is configured at the correct offset */
  1903. cbar = ioread32be(bridge->base + TSI148_CBAR);
  1904. cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
  1905. vstat = tsi148_slot_get(tsi148_bridge);
  1906. if (cbar != vstat) {
  1907. cbar = vstat;
  1908. dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
  1909. iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
  1910. }
  1911. dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
  1912. crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
  1913. if (crat & TSI148_LCSR_CRAT_EN) {
  1914. dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
  1915. iowrite32be(crat | TSI148_LCSR_CRAT_EN,
  1916. bridge->base + TSI148_LCSR_CRAT);
  1917. } else
  1918. dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
  1919. /* If we want flushed, error-checked writes, set up a window
  1920. * over the CR/CSR registers. We read from here to safely flush
  1921. * through VME writes.
  1922. */
  1923. if (err_chk) {
  1924. retval = tsi148_master_set(bridge->flush_image, 1,
  1925. (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
  1926. VME_D16);
  1927. if (retval)
  1928. dev_err(tsi148_bridge->parent, "Configuring flush image"
  1929. " failed\n");
  1930. }
  1931. return 0;
  1932. }
  1933. static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
  1934. struct pci_dev *pdev)
  1935. {
  1936. u32 crat;
  1937. struct tsi148_driver *bridge;
  1938. bridge = tsi148_bridge->driver_priv;
  1939. /* Turn off CR/CSR space */
  1940. crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
  1941. iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
  1942. bridge->base + TSI148_LCSR_CRAT);
  1943. /* Free image */
  1944. iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
  1945. iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
  1946. pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
  1947. bridge->crcsr_bus);
  1948. }
  1949. static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  1950. {
  1951. int retval, i, master_num;
  1952. u32 data;
  1953. struct list_head *pos = NULL;
  1954. struct vme_bridge *tsi148_bridge;
  1955. struct tsi148_driver *tsi148_device;
  1956. struct vme_master_resource *master_image;
  1957. struct vme_slave_resource *slave_image;
  1958. struct vme_dma_resource *dma_ctrlr;
  1959. struct vme_lm_resource *lm;
  1960. /* If we want to support more than one of each bridge, we need to
  1961. * dynamically generate this so we get one per device
  1962. */
  1963. tsi148_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
  1964. if (tsi148_bridge == NULL) {
  1965. dev_err(&pdev->dev, "Failed to allocate memory for device "
  1966. "structure\n");
  1967. retval = -ENOMEM;
  1968. goto err_struct;
  1969. }
  1970. tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
  1971. if (tsi148_device == NULL) {
  1972. dev_err(&pdev->dev, "Failed to allocate memory for device "
  1973. "structure\n");
  1974. retval = -ENOMEM;
  1975. goto err_driver;
  1976. }
  1977. tsi148_bridge->driver_priv = tsi148_device;
  1978. /* Enable the device */
  1979. retval = pci_enable_device(pdev);
  1980. if (retval) {
  1981. dev_err(&pdev->dev, "Unable to enable device\n");
  1982. goto err_enable;
  1983. }
  1984. /* Map Registers */
  1985. retval = pci_request_regions(pdev, driver_name);
  1986. if (retval) {
  1987. dev_err(&pdev->dev, "Unable to reserve resources\n");
  1988. goto err_resource;
  1989. }
  1990. /* map registers in BAR 0 */
  1991. tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
  1992. 4096);
  1993. if (!tsi148_device->base) {
  1994. dev_err(&pdev->dev, "Unable to remap CRG region\n");
  1995. retval = -EIO;
  1996. goto err_remap;
  1997. }
  1998. /* Check to see if the mapping worked out */
  1999. data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
  2000. if (data != PCI_VENDOR_ID_TUNDRA) {
  2001. dev_err(&pdev->dev, "CRG region check failed\n");
  2002. retval = -EIO;
  2003. goto err_test;
  2004. }
  2005. /* Initialize wait queues & mutual exclusion flags */
  2006. init_waitqueue_head(&tsi148_device->dma_queue[0]);
  2007. init_waitqueue_head(&tsi148_device->dma_queue[1]);
  2008. init_waitqueue_head(&tsi148_device->iack_queue);
  2009. mutex_init(&tsi148_device->vme_int);
  2010. mutex_init(&tsi148_device->vme_rmw);
  2011. tsi148_bridge->parent = &pdev->dev;
  2012. strcpy(tsi148_bridge->name, driver_name);
  2013. /* Setup IRQ */
  2014. retval = tsi148_irq_init(tsi148_bridge);
  2015. if (retval != 0) {
  2016. dev_err(&pdev->dev, "Chip Initialization failed.\n");
  2017. goto err_irq;
  2018. }
  2019. /* If we are going to flush writes, we need to read from the VME bus.
  2020. * We need to do this safely, thus we read the devices own CR/CSR
  2021. * register. To do this we must set up a window in CR/CSR space and
  2022. * hence have one less master window resource available.
  2023. */
  2024. master_num = TSI148_MAX_MASTER;
  2025. if (err_chk) {
  2026. master_num--;
  2027. tsi148_device->flush_image =
  2028. kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
  2029. if (tsi148_device->flush_image == NULL) {
  2030. dev_err(&pdev->dev, "Failed to allocate memory for "
  2031. "flush resource structure\n");
  2032. retval = -ENOMEM;
  2033. goto err_master;
  2034. }
  2035. tsi148_device->flush_image->parent = tsi148_bridge;
  2036. spin_lock_init(&tsi148_device->flush_image->lock);
  2037. tsi148_device->flush_image->locked = 1;
  2038. tsi148_device->flush_image->number = master_num;
  2039. tsi148_device->flush_image->address_attr = VME_A16 | VME_A24 |
  2040. VME_A32 | VME_A64;
  2041. tsi148_device->flush_image->cycle_attr = VME_SCT | VME_BLT |
  2042. VME_MBLT | VME_2eVME | VME_2eSST | VME_2eSSTB |
  2043. VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER |
  2044. VME_USER | VME_PROG | VME_DATA;
  2045. tsi148_device->flush_image->width_attr = VME_D16 | VME_D32;
  2046. memset(&tsi148_device->flush_image->bus_resource, 0,
  2047. sizeof(struct resource));
  2048. tsi148_device->flush_image->kern_base = NULL;
  2049. }
  2050. /* Add master windows to list */
  2051. INIT_LIST_HEAD(&tsi148_bridge->master_resources);
  2052. for (i = 0; i < master_num; i++) {
  2053. master_image = kmalloc(sizeof(struct vme_master_resource),
  2054. GFP_KERNEL);
  2055. if (master_image == NULL) {
  2056. dev_err(&pdev->dev, "Failed to allocate memory for "
  2057. "master resource structure\n");
  2058. retval = -ENOMEM;
  2059. goto err_master;
  2060. }
  2061. master_image->parent = tsi148_bridge;
  2062. spin_lock_init(&master_image->lock);
  2063. master_image->locked = 0;
  2064. master_image->number = i;
  2065. master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
  2066. VME_A64;
  2067. master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
  2068. VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
  2069. VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
  2070. VME_PROG | VME_DATA;
  2071. master_image->width_attr = VME_D16 | VME_D32;
  2072. memset(&master_image->bus_resource, 0,
  2073. sizeof(struct resource));
  2074. master_image->kern_base = NULL;
  2075. list_add_tail(&master_image->list,
  2076. &tsi148_bridge->master_resources);
  2077. }
  2078. /* Add slave windows to list */
  2079. INIT_LIST_HEAD(&tsi148_bridge->slave_resources);
  2080. for (i = 0; i < TSI148_MAX_SLAVE; i++) {
  2081. slave_image = kmalloc(sizeof(struct vme_slave_resource),
  2082. GFP_KERNEL);
  2083. if (slave_image == NULL) {
  2084. dev_err(&pdev->dev, "Failed to allocate memory for "
  2085. "slave resource structure\n");
  2086. retval = -ENOMEM;
  2087. goto err_slave;
  2088. }
  2089. slave_image->parent = tsi148_bridge;
  2090. mutex_init(&slave_image->mtx);
  2091. slave_image->locked = 0;
  2092. slave_image->number = i;
  2093. slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
  2094. VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
  2095. VME_USER3 | VME_USER4;
  2096. slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
  2097. VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
  2098. VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
  2099. VME_PROG | VME_DATA;
  2100. list_add_tail(&slave_image->list,
  2101. &tsi148_bridge->slave_resources);
  2102. }
  2103. /* Add dma engines to list */
  2104. INIT_LIST_HEAD(&tsi148_bridge->dma_resources);
  2105. for (i = 0; i < TSI148_MAX_DMA; i++) {
  2106. dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
  2107. GFP_KERNEL);
  2108. if (dma_ctrlr == NULL) {
  2109. dev_err(&pdev->dev, "Failed to allocate memory for "
  2110. "dma resource structure\n");
  2111. retval = -ENOMEM;
  2112. goto err_dma;
  2113. }
  2114. dma_ctrlr->parent = tsi148_bridge;
  2115. mutex_init(&dma_ctrlr->mtx);
  2116. dma_ctrlr->locked = 0;
  2117. dma_ctrlr->number = i;
  2118. dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
  2119. VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
  2120. VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
  2121. VME_DMA_PATTERN_TO_MEM;
  2122. INIT_LIST_HEAD(&dma_ctrlr->pending);
  2123. INIT_LIST_HEAD(&dma_ctrlr->running);
  2124. list_add_tail(&dma_ctrlr->list,
  2125. &tsi148_bridge->dma_resources);
  2126. }
  2127. /* Add location monitor to list */
  2128. INIT_LIST_HEAD(&tsi148_bridge->lm_resources);
  2129. lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
  2130. if (lm == NULL) {
  2131. dev_err(&pdev->dev, "Failed to allocate memory for "
  2132. "location monitor resource structure\n");
  2133. retval = -ENOMEM;
  2134. goto err_lm;
  2135. }
  2136. lm->parent = tsi148_bridge;
  2137. mutex_init(&lm->mtx);
  2138. lm->locked = 0;
  2139. lm->number = 1;
  2140. lm->monitors = 4;
  2141. list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
  2142. tsi148_bridge->slave_get = tsi148_slave_get;
  2143. tsi148_bridge->slave_set = tsi148_slave_set;
  2144. tsi148_bridge->master_get = tsi148_master_get;
  2145. tsi148_bridge->master_set = tsi148_master_set;
  2146. tsi148_bridge->master_read = tsi148_master_read;
  2147. tsi148_bridge->master_write = tsi148_master_write;
  2148. tsi148_bridge->master_rmw = tsi148_master_rmw;
  2149. tsi148_bridge->dma_list_add = tsi148_dma_list_add;
  2150. tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
  2151. tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
  2152. tsi148_bridge->irq_set = tsi148_irq_set;
  2153. tsi148_bridge->irq_generate = tsi148_irq_generate;
  2154. tsi148_bridge->lm_set = tsi148_lm_set;
  2155. tsi148_bridge->lm_get = tsi148_lm_get;
  2156. tsi148_bridge->lm_attach = tsi148_lm_attach;
  2157. tsi148_bridge->lm_detach = tsi148_lm_detach;
  2158. tsi148_bridge->slot_get = tsi148_slot_get;
  2159. tsi148_bridge->alloc_consistent = tsi148_alloc_consistent;
  2160. tsi148_bridge->free_consistent = tsi148_free_consistent;
  2161. data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
  2162. dev_info(&pdev->dev, "Board is%s the VME system controller\n",
  2163. (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
  2164. if (!geoid)
  2165. dev_info(&pdev->dev, "VME geographical address is %d\n",
  2166. data & TSI148_LCSR_VSTAT_GA_M);
  2167. else
  2168. dev_info(&pdev->dev, "VME geographical address is set to %d\n",
  2169. geoid);
  2170. dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
  2171. err_chk ? "enabled" : "disabled");
  2172. if (tsi148_crcsr_init(tsi148_bridge, pdev)) {
  2173. dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
  2174. goto err_crcsr;
  2175. }
  2176. retval = vme_register_bridge(tsi148_bridge);
  2177. if (retval != 0) {
  2178. dev_err(&pdev->dev, "Chip Registration failed.\n");
  2179. goto err_reg;
  2180. }
  2181. pci_set_drvdata(pdev, tsi148_bridge);
  2182. /* Clear VME bus "board fail", and "power-up reset" lines */
  2183. data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
  2184. data &= ~TSI148_LCSR_VSTAT_BRDFL;
  2185. data |= TSI148_LCSR_VSTAT_CPURST;
  2186. iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
  2187. return 0;
  2188. err_reg:
  2189. tsi148_crcsr_exit(tsi148_bridge, pdev);
  2190. err_crcsr:
  2191. err_lm:
  2192. /* resources are stored in link list */
  2193. list_for_each(pos, &tsi148_bridge->lm_resources) {
  2194. lm = list_entry(pos, struct vme_lm_resource, list);
  2195. list_del(pos);
  2196. kfree(lm);
  2197. }
  2198. err_dma:
  2199. /* resources are stored in link list */
  2200. list_for_each(pos, &tsi148_bridge->dma_resources) {
  2201. dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
  2202. list_del(pos);
  2203. kfree(dma_ctrlr);
  2204. }
  2205. err_slave:
  2206. /* resources are stored in link list */
  2207. list_for_each(pos, &tsi148_bridge->slave_resources) {
  2208. slave_image = list_entry(pos, struct vme_slave_resource, list);
  2209. list_del(pos);
  2210. kfree(slave_image);
  2211. }
  2212. err_master:
  2213. /* resources are stored in link list */
  2214. list_for_each(pos, &tsi148_bridge->master_resources) {
  2215. master_image = list_entry(pos, struct vme_master_resource,
  2216. list);
  2217. list_del(pos);
  2218. kfree(master_image);
  2219. }
  2220. tsi148_irq_exit(tsi148_bridge, pdev);
  2221. err_irq:
  2222. err_test:
  2223. iounmap(tsi148_device->base);
  2224. err_remap:
  2225. pci_release_regions(pdev);
  2226. err_resource:
  2227. pci_disable_device(pdev);
  2228. err_enable:
  2229. kfree(tsi148_device);
  2230. err_driver:
  2231. kfree(tsi148_bridge);
  2232. err_struct:
  2233. return retval;
  2234. }
  2235. static void tsi148_remove(struct pci_dev *pdev)
  2236. {
  2237. struct list_head *pos = NULL;
  2238. struct list_head *tmplist;
  2239. struct vme_master_resource *master_image;
  2240. struct vme_slave_resource *slave_image;
  2241. struct vme_dma_resource *dma_ctrlr;
  2242. int i;
  2243. struct tsi148_driver *bridge;
  2244. struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
  2245. bridge = tsi148_bridge->driver_priv;
  2246. dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
  2247. /*
  2248. * Shutdown all inbound and outbound windows.
  2249. */
  2250. for (i = 0; i < 8; i++) {
  2251. iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
  2252. TSI148_LCSR_OFFSET_ITAT);
  2253. iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
  2254. TSI148_LCSR_OFFSET_OTAT);
  2255. }
  2256. /*
  2257. * Shutdown Location monitor.
  2258. */
  2259. iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
  2260. /*
  2261. * Shutdown CRG map.
  2262. */
  2263. iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
  2264. /*
  2265. * Clear error status.
  2266. */
  2267. iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
  2268. iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
  2269. iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
  2270. /*
  2271. * Remove VIRQ interrupt (if any)
  2272. */
  2273. if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
  2274. iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
  2275. /*
  2276. * Map all Interrupts to PCI INTA
  2277. */
  2278. iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
  2279. iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
  2280. tsi148_irq_exit(tsi148_bridge, pdev);
  2281. vme_unregister_bridge(tsi148_bridge);
  2282. tsi148_crcsr_exit(tsi148_bridge, pdev);
  2283. /* resources are stored in link list */
  2284. list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
  2285. dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
  2286. list_del(pos);
  2287. kfree(dma_ctrlr);
  2288. }
  2289. /* resources are stored in link list */
  2290. list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
  2291. slave_image = list_entry(pos, struct vme_slave_resource, list);
  2292. list_del(pos);
  2293. kfree(slave_image);
  2294. }
  2295. /* resources are stored in link list */
  2296. list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
  2297. master_image = list_entry(pos, struct vme_master_resource,
  2298. list);
  2299. list_del(pos);
  2300. kfree(master_image);
  2301. }
  2302. iounmap(bridge->base);
  2303. pci_release_regions(pdev);
  2304. pci_disable_device(pdev);
  2305. kfree(tsi148_bridge->driver_priv);
  2306. kfree(tsi148_bridge);
  2307. }
  2308. static void __exit tsi148_exit(void)
  2309. {
  2310. pci_unregister_driver(&tsi148_driver);
  2311. }
  2312. MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
  2313. module_param(err_chk, bool, 0);
  2314. MODULE_PARM_DESC(geoid, "Override geographical addressing");
  2315. module_param(geoid, int, 0);
  2316. MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
  2317. MODULE_LICENSE("GPL");
  2318. module_init(tsi148_init);
  2319. module_exit(tsi148_exit);