he.c 82 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099
  1. /* $Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $ */
  2. /*
  3. he.c
  4. ForeRunnerHE ATM Adapter driver for ATM on Linux
  5. Copyright (C) 1999-2001 Naval Research Laboratory
  6. This library is free software; you can redistribute it and/or
  7. modify it under the terms of the GNU Lesser General Public
  8. License as published by the Free Software Foundation; either
  9. version 2.1 of the License, or (at your option) any later version.
  10. This library is distributed in the hope that it will be useful,
  11. but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. Lesser General Public License for more details.
  14. You should have received a copy of the GNU Lesser General Public
  15. License along with this library; if not, write to the Free Software
  16. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17. */
  18. /*
  19. he.c
  20. ForeRunnerHE ATM Adapter driver for ATM on Linux
  21. Copyright (C) 1999-2001 Naval Research Laboratory
  22. Permission to use, copy, modify and distribute this software and its
  23. documentation is hereby granted, provided that both the copyright
  24. notice and this permission notice appear in all copies of the software,
  25. derivative works or modified versions, and any portions thereof, and
  26. that both notices appear in supporting documentation.
  27. NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
  28. DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
  29. RESULTING FROM THE USE OF THIS SOFTWARE.
  30. This driver was written using the "Programmer's Reference Manual for
  31. ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
  32. AUTHORS:
  33. chas williams <chas@cmf.nrl.navy.mil>
  34. eric kinzie <ekinzie@cmf.nrl.navy.mil>
  35. NOTES:
  36. 4096 supported 'connections'
  37. group 0 is used for all traffic
  38. interrupt queue 0 is used for all interrupts
  39. aal0 support (based on work from ulrich.u.muller@nokia.com)
  40. */
  41. #include <linux/module.h>
  42. #include <linux/kernel.h>
  43. #include <linux/skbuff.h>
  44. #include <linux/pci.h>
  45. #include <linux/errno.h>
  46. #include <linux/types.h>
  47. #include <linux/string.h>
  48. #include <linux/delay.h>
  49. #include <linux/init.h>
  50. #include <linux/mm.h>
  51. #include <linux/sched.h>
  52. #include <linux/timer.h>
  53. #include <linux/interrupt.h>
  54. #include <linux/dma-mapping.h>
  55. #include <asm/io.h>
  56. #include <asm/byteorder.h>
  57. #include <asm/uaccess.h>
  58. #include <linux/atmdev.h>
  59. #include <linux/atm.h>
  60. #include <linux/sonet.h>
  61. #define USE_TASKLET
  62. #undef USE_SCATTERGATHER
  63. #undef USE_CHECKSUM_HW /* still confused about this */
  64. #define USE_RBPS
  65. #undef USE_RBPS_POOL /* if memory is tight try this */
  66. #undef USE_RBPL_POOL /* if memory is tight try this */
  67. #define USE_TPD_POOL
  68. /* #undef CONFIG_ATM_HE_USE_SUNI */
  69. /* #undef HE_DEBUG */
  70. #include "he.h"
  71. #include "suni.h"
  72. #include <linux/atm_he.h>
  73. #define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
  74. #ifdef HE_DEBUG
  75. #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
  76. #else /* !HE_DEBUG */
  77. #define HPRINTK(fmt,args...) do { } while (0)
  78. #endif /* HE_DEBUG */
  79. /* version definition */
  80. static char *version = "$Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $";
  81. /* declarations */
  82. static int he_open(struct atm_vcc *vcc);
  83. static void he_close(struct atm_vcc *vcc);
  84. static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
  85. static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
  86. static irqreturn_t he_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
  87. static void he_tasklet(unsigned long data);
  88. static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
  89. static int he_start(struct atm_dev *dev);
  90. static void he_stop(struct he_dev *dev);
  91. static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
  92. static unsigned char he_phy_get(struct atm_dev *, unsigned long);
  93. static u8 read_prom_byte(struct he_dev *he_dev, int addr);
  94. /* globals */
  95. static struct he_dev *he_devs;
  96. static int disable64;
  97. static short nvpibits = -1;
  98. static short nvcibits = -1;
  99. static short rx_skb_reserve = 16;
  100. static int irq_coalesce = 1;
  101. static int sdh = 0;
  102. /* Read from EEPROM = 0000 0011b */
  103. static unsigned int readtab[] = {
  104. CS_HIGH | CLK_HIGH,
  105. CS_LOW | CLK_LOW,
  106. CLK_HIGH, /* 0 */
  107. CLK_LOW,
  108. CLK_HIGH, /* 0 */
  109. CLK_LOW,
  110. CLK_HIGH, /* 0 */
  111. CLK_LOW,
  112. CLK_HIGH, /* 0 */
  113. CLK_LOW,
  114. CLK_HIGH, /* 0 */
  115. CLK_LOW,
  116. CLK_HIGH, /* 0 */
  117. CLK_LOW | SI_HIGH,
  118. CLK_HIGH | SI_HIGH, /* 1 */
  119. CLK_LOW | SI_HIGH,
  120. CLK_HIGH | SI_HIGH /* 1 */
  121. };
  122. /* Clock to read from/write to the EEPROM */
  123. static unsigned int clocktab[] = {
  124. CLK_LOW,
  125. CLK_HIGH,
  126. CLK_LOW,
  127. CLK_HIGH,
  128. CLK_LOW,
  129. CLK_HIGH,
  130. CLK_LOW,
  131. CLK_HIGH,
  132. CLK_LOW,
  133. CLK_HIGH,
  134. CLK_LOW,
  135. CLK_HIGH,
  136. CLK_LOW,
  137. CLK_HIGH,
  138. CLK_LOW,
  139. CLK_HIGH,
  140. CLK_LOW
  141. };
  142. static struct atmdev_ops he_ops =
  143. {
  144. .open = he_open,
  145. .close = he_close,
  146. .ioctl = he_ioctl,
  147. .send = he_send,
  148. .phy_put = he_phy_put,
  149. .phy_get = he_phy_get,
  150. .proc_read = he_proc_read,
  151. .owner = THIS_MODULE
  152. };
  153. #define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
  154. #define he_readl(dev, reg) readl((dev)->membase + (reg))
  155. /* section 2.12 connection memory access */
  156. static __inline__ void
  157. he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
  158. unsigned flags)
  159. {
  160. he_writel(he_dev, val, CON_DAT);
  161. (void) he_readl(he_dev, CON_DAT); /* flush posted writes */
  162. he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
  163. while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
  164. }
  165. #define he_writel_rcm(dev, val, reg) \
  166. he_writel_internal(dev, val, reg, CON_CTL_RCM)
  167. #define he_writel_tcm(dev, val, reg) \
  168. he_writel_internal(dev, val, reg, CON_CTL_TCM)
  169. #define he_writel_mbox(dev, val, reg) \
  170. he_writel_internal(dev, val, reg, CON_CTL_MBOX)
  171. static unsigned
  172. he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
  173. {
  174. he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
  175. while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
  176. return he_readl(he_dev, CON_DAT);
  177. }
  178. #define he_readl_rcm(dev, reg) \
  179. he_readl_internal(dev, reg, CON_CTL_RCM)
  180. #define he_readl_tcm(dev, reg) \
  181. he_readl_internal(dev, reg, CON_CTL_TCM)
  182. #define he_readl_mbox(dev, reg) \
  183. he_readl_internal(dev, reg, CON_CTL_MBOX)
  184. /* figure 2.2 connection id */
  185. #define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
  186. /* 2.5.1 per connection transmit state registers */
  187. #define he_writel_tsr0(dev, val, cid) \
  188. he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
  189. #define he_readl_tsr0(dev, cid) \
  190. he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
  191. #define he_writel_tsr1(dev, val, cid) \
  192. he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
  193. #define he_writel_tsr2(dev, val, cid) \
  194. he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
  195. #define he_writel_tsr3(dev, val, cid) \
  196. he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
  197. #define he_writel_tsr4(dev, val, cid) \
  198. he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
  199. /* from page 2-20
  200. *
  201. * NOTE While the transmit connection is active, bits 23 through 0
  202. * of this register must not be written by the host. Byte
  203. * enables should be used during normal operation when writing
  204. * the most significant byte.
  205. */
  206. #define he_writel_tsr4_upper(dev, val, cid) \
  207. he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
  208. CON_CTL_TCM \
  209. | CON_BYTE_DISABLE_2 \
  210. | CON_BYTE_DISABLE_1 \
  211. | CON_BYTE_DISABLE_0)
  212. #define he_readl_tsr4(dev, cid) \
  213. he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
  214. #define he_writel_tsr5(dev, val, cid) \
  215. he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
  216. #define he_writel_tsr6(dev, val, cid) \
  217. he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
  218. #define he_writel_tsr7(dev, val, cid) \
  219. he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
  220. #define he_writel_tsr8(dev, val, cid) \
  221. he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
  222. #define he_writel_tsr9(dev, val, cid) \
  223. he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
  224. #define he_writel_tsr10(dev, val, cid) \
  225. he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
  226. #define he_writel_tsr11(dev, val, cid) \
  227. he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
  228. #define he_writel_tsr12(dev, val, cid) \
  229. he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
  230. #define he_writel_tsr13(dev, val, cid) \
  231. he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
  232. #define he_writel_tsr14(dev, val, cid) \
  233. he_writel_tcm(dev, val, CONFIG_TSRD | cid)
  234. #define he_writel_tsr14_upper(dev, val, cid) \
  235. he_writel_internal(dev, val, CONFIG_TSRD | cid, \
  236. CON_CTL_TCM \
  237. | CON_BYTE_DISABLE_2 \
  238. | CON_BYTE_DISABLE_1 \
  239. | CON_BYTE_DISABLE_0)
  240. /* 2.7.1 per connection receive state registers */
  241. #define he_writel_rsr0(dev, val, cid) \
  242. he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
  243. #define he_readl_rsr0(dev, cid) \
  244. he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
  245. #define he_writel_rsr1(dev, val, cid) \
  246. he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
  247. #define he_writel_rsr2(dev, val, cid) \
  248. he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
  249. #define he_writel_rsr3(dev, val, cid) \
  250. he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
  251. #define he_writel_rsr4(dev, val, cid) \
  252. he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
  253. #define he_writel_rsr5(dev, val, cid) \
  254. he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
  255. #define he_writel_rsr6(dev, val, cid) \
  256. he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
  257. #define he_writel_rsr7(dev, val, cid) \
  258. he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
  259. static __inline__ struct atm_vcc*
  260. __find_vcc(struct he_dev *he_dev, unsigned cid)
  261. {
  262. struct hlist_head *head;
  263. struct atm_vcc *vcc;
  264. struct hlist_node *node;
  265. struct sock *s;
  266. short vpi;
  267. int vci;
  268. vpi = cid >> he_dev->vcibits;
  269. vci = cid & ((1 << he_dev->vcibits) - 1);
  270. head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
  271. sk_for_each(s, node, head) {
  272. vcc = atm_sk(s);
  273. if (vcc->dev == he_dev->atm_dev &&
  274. vcc->vci == vci && vcc->vpi == vpi &&
  275. vcc->qos.rxtp.traffic_class != ATM_NONE) {
  276. return vcc;
  277. }
  278. }
  279. return NULL;
  280. }
  281. static int __devinit
  282. he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
  283. {
  284. struct atm_dev *atm_dev = NULL;
  285. struct he_dev *he_dev = NULL;
  286. int err = 0;
  287. printk(KERN_INFO "he: %s\n", version);
  288. if (pci_enable_device(pci_dev))
  289. return -EIO;
  290. if (pci_set_dma_mask(pci_dev, DMA_32BIT_MASK) != 0) {
  291. printk(KERN_WARNING "he: no suitable dma available\n");
  292. err = -EIO;
  293. goto init_one_failure;
  294. }
  295. atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
  296. if (!atm_dev) {
  297. err = -ENODEV;
  298. goto init_one_failure;
  299. }
  300. pci_set_drvdata(pci_dev, atm_dev);
  301. he_dev = (struct he_dev *) kmalloc(sizeof(struct he_dev),
  302. GFP_KERNEL);
  303. if (!he_dev) {
  304. err = -ENOMEM;
  305. goto init_one_failure;
  306. }
  307. memset(he_dev, 0, sizeof(struct he_dev));
  308. he_dev->pci_dev = pci_dev;
  309. he_dev->atm_dev = atm_dev;
  310. he_dev->atm_dev->dev_data = he_dev;
  311. atm_dev->dev_data = he_dev;
  312. he_dev->number = atm_dev->number;
  313. if (he_start(atm_dev)) {
  314. he_stop(he_dev);
  315. err = -ENODEV;
  316. goto init_one_failure;
  317. }
  318. he_dev->next = NULL;
  319. if (he_devs)
  320. he_dev->next = he_devs;
  321. he_devs = he_dev;
  322. return 0;
  323. init_one_failure:
  324. if (atm_dev)
  325. atm_dev_deregister(atm_dev);
  326. kfree(he_dev);
  327. pci_disable_device(pci_dev);
  328. return err;
  329. }
  330. static void __devexit
  331. he_remove_one (struct pci_dev *pci_dev)
  332. {
  333. struct atm_dev *atm_dev;
  334. struct he_dev *he_dev;
  335. atm_dev = pci_get_drvdata(pci_dev);
  336. he_dev = HE_DEV(atm_dev);
  337. /* need to remove from he_devs */
  338. he_stop(he_dev);
  339. atm_dev_deregister(atm_dev);
  340. kfree(he_dev);
  341. pci_set_drvdata(pci_dev, NULL);
  342. pci_disable_device(pci_dev);
  343. }
  344. static unsigned
  345. rate_to_atmf(unsigned rate) /* cps to atm forum format */
  346. {
  347. #define NONZERO (1 << 14)
  348. unsigned exp = 0;
  349. if (rate == 0)
  350. return 0;
  351. rate <<= 9;
  352. while (rate > 0x3ff) {
  353. ++exp;
  354. rate >>= 1;
  355. }
  356. return (NONZERO | (exp << 9) | (rate & 0x1ff));
  357. }
  358. static void __devinit
  359. he_init_rx_lbfp0(struct he_dev *he_dev)
  360. {
  361. unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
  362. unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
  363. unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
  364. unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
  365. lbufd_index = 0;
  366. lbm_offset = he_readl(he_dev, RCMLBM_BA);
  367. he_writel(he_dev, lbufd_index, RLBF0_H);
  368. for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
  369. lbufd_index += 2;
  370. lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
  371. he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
  372. he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
  373. if (++lbuf_count == lbufs_per_row) {
  374. lbuf_count = 0;
  375. row_offset += he_dev->bytes_per_row;
  376. }
  377. lbm_offset += 4;
  378. }
  379. he_writel(he_dev, lbufd_index - 2, RLBF0_T);
  380. he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
  381. }
  382. static void __devinit
  383. he_init_rx_lbfp1(struct he_dev *he_dev)
  384. {
  385. unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
  386. unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
  387. unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
  388. unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
  389. lbufd_index = 1;
  390. lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
  391. he_writel(he_dev, lbufd_index, RLBF1_H);
  392. for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
  393. lbufd_index += 2;
  394. lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
  395. he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
  396. he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
  397. if (++lbuf_count == lbufs_per_row) {
  398. lbuf_count = 0;
  399. row_offset += he_dev->bytes_per_row;
  400. }
  401. lbm_offset += 4;
  402. }
  403. he_writel(he_dev, lbufd_index - 2, RLBF1_T);
  404. he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
  405. }
  406. static void __devinit
  407. he_init_tx_lbfp(struct he_dev *he_dev)
  408. {
  409. unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
  410. unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
  411. unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
  412. unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
  413. lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
  414. lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
  415. he_writel(he_dev, lbufd_index, TLBF_H);
  416. for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
  417. lbufd_index += 1;
  418. lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
  419. he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
  420. he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
  421. if (++lbuf_count == lbufs_per_row) {
  422. lbuf_count = 0;
  423. row_offset += he_dev->bytes_per_row;
  424. }
  425. lbm_offset += 2;
  426. }
  427. he_writel(he_dev, lbufd_index - 1, TLBF_T);
  428. }
  429. static int __devinit
  430. he_init_tpdrq(struct he_dev *he_dev)
  431. {
  432. he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
  433. CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
  434. if (he_dev->tpdrq_base == NULL) {
  435. hprintk("failed to alloc tpdrq\n");
  436. return -ENOMEM;
  437. }
  438. memset(he_dev->tpdrq_base, 0,
  439. CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
  440. he_dev->tpdrq_tail = he_dev->tpdrq_base;
  441. he_dev->tpdrq_head = he_dev->tpdrq_base;
  442. he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
  443. he_writel(he_dev, 0, TPDRQ_T);
  444. he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
  445. return 0;
  446. }
  447. static void __devinit
  448. he_init_cs_block(struct he_dev *he_dev)
  449. {
  450. unsigned clock, rate, delta;
  451. int reg;
  452. /* 5.1.7 cs block initialization */
  453. for (reg = 0; reg < 0x20; ++reg)
  454. he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
  455. /* rate grid timer reload values */
  456. clock = he_is622(he_dev) ? 66667000 : 50000000;
  457. rate = he_dev->atm_dev->link_rate;
  458. delta = rate / 16 / 2;
  459. for (reg = 0; reg < 0x10; ++reg) {
  460. /* 2.4 internal transmit function
  461. *
  462. * we initialize the first row in the rate grid.
  463. * values are period (in clock cycles) of timer
  464. */
  465. unsigned period = clock / rate;
  466. he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
  467. rate -= delta;
  468. }
  469. if (he_is622(he_dev)) {
  470. /* table 5.2 (4 cells per lbuf) */
  471. he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
  472. he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
  473. he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
  474. he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
  475. he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
  476. /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
  477. he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
  478. he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
  479. he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
  480. he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
  481. he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
  482. he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
  483. he_writel_mbox(he_dev, 0x4680, CS_RTATR);
  484. /* table 5.8 */
  485. he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
  486. he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
  487. he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
  488. he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
  489. he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
  490. he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
  491. /* table 5.9 */
  492. he_writel_mbox(he_dev, 0x5, CS_OTPPER);
  493. he_writel_mbox(he_dev, 0x14, CS_OTWPER);
  494. } else {
  495. /* table 5.1 (4 cells per lbuf) */
  496. he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
  497. he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
  498. he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
  499. he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
  500. he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
  501. /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
  502. he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
  503. he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
  504. he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
  505. he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
  506. he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
  507. he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
  508. he_writel_mbox(he_dev, 0x4680, CS_RTATR);
  509. /* table 5.8 */
  510. he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
  511. he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
  512. he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
  513. he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
  514. he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
  515. he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
  516. /* table 5.9 */
  517. he_writel_mbox(he_dev, 0x6, CS_OTPPER);
  518. he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
  519. }
  520. he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
  521. for (reg = 0; reg < 0x8; ++reg)
  522. he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
  523. }
  524. static int __devinit
  525. he_init_cs_block_rcm(struct he_dev *he_dev)
  526. {
  527. unsigned (*rategrid)[16][16];
  528. unsigned rate, delta;
  529. int i, j, reg;
  530. unsigned rate_atmf, exp, man;
  531. unsigned long long rate_cps;
  532. int mult, buf, buf_limit = 4;
  533. rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
  534. if (!rategrid)
  535. return -ENOMEM;
  536. /* initialize rate grid group table */
  537. for (reg = 0x0; reg < 0xff; ++reg)
  538. he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
  539. /* initialize rate controller groups */
  540. for (reg = 0x100; reg < 0x1ff; ++reg)
  541. he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
  542. /* initialize tNrm lookup table */
  543. /* the manual makes reference to a routine in a sample driver
  544. for proper configuration; fortunately, we only need this
  545. in order to support abr connection */
  546. /* initialize rate to group table */
  547. rate = he_dev->atm_dev->link_rate;
  548. delta = rate / 32;
  549. /*
  550. * 2.4 transmit internal functions
  551. *
  552. * we construct a copy of the rate grid used by the scheduler
  553. * in order to construct the rate to group table below
  554. */
  555. for (j = 0; j < 16; j++) {
  556. (*rategrid)[0][j] = rate;
  557. rate -= delta;
  558. }
  559. for (i = 1; i < 16; i++)
  560. for (j = 0; j < 16; j++)
  561. if (i > 14)
  562. (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
  563. else
  564. (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
  565. /*
  566. * 2.4 transmit internal function
  567. *
  568. * this table maps the upper 5 bits of exponent and mantissa
  569. * of the atm forum representation of the rate into an index
  570. * on rate grid
  571. */
  572. rate_atmf = 0;
  573. while (rate_atmf < 0x400) {
  574. man = (rate_atmf & 0x1f) << 4;
  575. exp = rate_atmf >> 5;
  576. /*
  577. instead of '/ 512', use '>> 9' to prevent a call
  578. to divdu3 on x86 platforms
  579. */
  580. rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
  581. if (rate_cps < 10)
  582. rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
  583. for (i = 255; i > 0; i--)
  584. if ((*rategrid)[i/16][i%16] >= rate_cps)
  585. break; /* pick nearest rate instead? */
  586. /*
  587. * each table entry is 16 bits: (rate grid index (8 bits)
  588. * and a buffer limit (8 bits)
  589. * there are two table entries in each 32-bit register
  590. */
  591. #ifdef notdef
  592. buf = rate_cps * he_dev->tx_numbuffs /
  593. (he_dev->atm_dev->link_rate * 2);
  594. #else
  595. /* this is pretty, but avoids _divdu3 and is mostly correct */
  596. mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
  597. if (rate_cps > (272 * mult))
  598. buf = 4;
  599. else if (rate_cps > (204 * mult))
  600. buf = 3;
  601. else if (rate_cps > (136 * mult))
  602. buf = 2;
  603. else if (rate_cps > (68 * mult))
  604. buf = 1;
  605. else
  606. buf = 0;
  607. #endif
  608. if (buf > buf_limit)
  609. buf = buf_limit;
  610. reg = (reg << 16) | ((i << 8) | buf);
  611. #define RTGTBL_OFFSET 0x400
  612. if (rate_atmf & 0x1)
  613. he_writel_rcm(he_dev, reg,
  614. CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
  615. ++rate_atmf;
  616. }
  617. kfree(rategrid);
  618. return 0;
  619. }
  620. static int __devinit
  621. he_init_group(struct he_dev *he_dev, int group)
  622. {
  623. int i;
  624. #ifdef USE_RBPS
  625. /* small buffer pool */
  626. #ifdef USE_RBPS_POOL
  627. he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
  628. CONFIG_RBPS_BUFSIZE, 8, 0);
  629. if (he_dev->rbps_pool == NULL) {
  630. hprintk("unable to create rbps pages\n");
  631. return -ENOMEM;
  632. }
  633. #else /* !USE_RBPS_POOL */
  634. he_dev->rbps_pages = pci_alloc_consistent(he_dev->pci_dev,
  635. CONFIG_RBPS_SIZE * CONFIG_RBPS_BUFSIZE, &he_dev->rbps_pages_phys);
  636. if (he_dev->rbps_pages == NULL) {
  637. hprintk("unable to create rbps page pool\n");
  638. return -ENOMEM;
  639. }
  640. #endif /* USE_RBPS_POOL */
  641. he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
  642. CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
  643. if (he_dev->rbps_base == NULL) {
  644. hprintk("failed to alloc rbps\n");
  645. return -ENOMEM;
  646. }
  647. memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
  648. he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
  649. for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
  650. dma_addr_t dma_handle;
  651. void *cpuaddr;
  652. #ifdef USE_RBPS_POOL
  653. cpuaddr = pci_pool_alloc(he_dev->rbps_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
  654. if (cpuaddr == NULL)
  655. return -ENOMEM;
  656. #else
  657. cpuaddr = he_dev->rbps_pages + (i * CONFIG_RBPS_BUFSIZE);
  658. dma_handle = he_dev->rbps_pages_phys + (i * CONFIG_RBPS_BUFSIZE);
  659. #endif
  660. he_dev->rbps_virt[i].virt = cpuaddr;
  661. he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
  662. he_dev->rbps_base[i].phys = dma_handle;
  663. }
  664. he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
  665. he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
  666. he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
  667. G0_RBPS_T + (group * 32));
  668. he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
  669. G0_RBPS_BS + (group * 32));
  670. he_writel(he_dev,
  671. RBP_THRESH(CONFIG_RBPS_THRESH) |
  672. RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
  673. RBP_INT_ENB,
  674. G0_RBPS_QI + (group * 32));
  675. #else /* !USE_RBPS */
  676. he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
  677. he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
  678. he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
  679. he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
  680. G0_RBPS_BS + (group * 32));
  681. #endif /* USE_RBPS */
  682. /* large buffer pool */
  683. #ifdef USE_RBPL_POOL
  684. he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
  685. CONFIG_RBPL_BUFSIZE, 8, 0);
  686. if (he_dev->rbpl_pool == NULL) {
  687. hprintk("unable to create rbpl pool\n");
  688. return -ENOMEM;
  689. }
  690. #else /* !USE_RBPL_POOL */
  691. he_dev->rbpl_pages = (void *) pci_alloc_consistent(he_dev->pci_dev,
  692. CONFIG_RBPL_SIZE * CONFIG_RBPL_BUFSIZE, &he_dev->rbpl_pages_phys);
  693. if (he_dev->rbpl_pages == NULL) {
  694. hprintk("unable to create rbpl pages\n");
  695. return -ENOMEM;
  696. }
  697. #endif /* USE_RBPL_POOL */
  698. he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
  699. CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
  700. if (he_dev->rbpl_base == NULL) {
  701. hprintk("failed to alloc rbpl\n");
  702. return -ENOMEM;
  703. }
  704. memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
  705. he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
  706. for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
  707. dma_addr_t dma_handle;
  708. void *cpuaddr;
  709. #ifdef USE_RBPL_POOL
  710. cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
  711. if (cpuaddr == NULL)
  712. return -ENOMEM;
  713. #else
  714. cpuaddr = he_dev->rbpl_pages + (i * CONFIG_RBPL_BUFSIZE);
  715. dma_handle = he_dev->rbpl_pages_phys + (i * CONFIG_RBPL_BUFSIZE);
  716. #endif
  717. he_dev->rbpl_virt[i].virt = cpuaddr;
  718. he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
  719. he_dev->rbpl_base[i].phys = dma_handle;
  720. }
  721. he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
  722. he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
  723. he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
  724. G0_RBPL_T + (group * 32));
  725. he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
  726. G0_RBPL_BS + (group * 32));
  727. he_writel(he_dev,
  728. RBP_THRESH(CONFIG_RBPL_THRESH) |
  729. RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
  730. RBP_INT_ENB,
  731. G0_RBPL_QI + (group * 32));
  732. /* rx buffer ready queue */
  733. he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
  734. CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
  735. if (he_dev->rbrq_base == NULL) {
  736. hprintk("failed to allocate rbrq\n");
  737. return -ENOMEM;
  738. }
  739. memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
  740. he_dev->rbrq_head = he_dev->rbrq_base;
  741. he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
  742. he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
  743. he_writel(he_dev,
  744. RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
  745. G0_RBRQ_Q + (group * 16));
  746. if (irq_coalesce) {
  747. hprintk("coalescing interrupts\n");
  748. he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
  749. G0_RBRQ_I + (group * 16));
  750. } else
  751. he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
  752. G0_RBRQ_I + (group * 16));
  753. /* tx buffer ready queue */
  754. he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
  755. CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
  756. if (he_dev->tbrq_base == NULL) {
  757. hprintk("failed to allocate tbrq\n");
  758. return -ENOMEM;
  759. }
  760. memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
  761. he_dev->tbrq_head = he_dev->tbrq_base;
  762. he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
  763. he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
  764. he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
  765. he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
  766. return 0;
  767. }
  768. static int __devinit
  769. he_init_irq(struct he_dev *he_dev)
  770. {
  771. int i;
  772. /* 2.9.3.5 tail offset for each interrupt queue is located after the
  773. end of the interrupt queue */
  774. he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
  775. (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
  776. if (he_dev->irq_base == NULL) {
  777. hprintk("failed to allocate irq\n");
  778. return -ENOMEM;
  779. }
  780. he_dev->irq_tailoffset = (unsigned *)
  781. &he_dev->irq_base[CONFIG_IRQ_SIZE];
  782. *he_dev->irq_tailoffset = 0;
  783. he_dev->irq_head = he_dev->irq_base;
  784. he_dev->irq_tail = he_dev->irq_base;
  785. for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
  786. he_dev->irq_base[i].isw = ITYPE_INVALID;
  787. he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
  788. he_writel(he_dev,
  789. IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
  790. IRQ0_HEAD);
  791. he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
  792. he_writel(he_dev, 0x0, IRQ0_DATA);
  793. he_writel(he_dev, 0x0, IRQ1_BASE);
  794. he_writel(he_dev, 0x0, IRQ1_HEAD);
  795. he_writel(he_dev, 0x0, IRQ1_CNTL);
  796. he_writel(he_dev, 0x0, IRQ1_DATA);
  797. he_writel(he_dev, 0x0, IRQ2_BASE);
  798. he_writel(he_dev, 0x0, IRQ2_HEAD);
  799. he_writel(he_dev, 0x0, IRQ2_CNTL);
  800. he_writel(he_dev, 0x0, IRQ2_DATA);
  801. he_writel(he_dev, 0x0, IRQ3_BASE);
  802. he_writel(he_dev, 0x0, IRQ3_HEAD);
  803. he_writel(he_dev, 0x0, IRQ3_CNTL);
  804. he_writel(he_dev, 0x0, IRQ3_DATA);
  805. /* 2.9.3.2 interrupt queue mapping registers */
  806. he_writel(he_dev, 0x0, GRP_10_MAP);
  807. he_writel(he_dev, 0x0, GRP_32_MAP);
  808. he_writel(he_dev, 0x0, GRP_54_MAP);
  809. he_writel(he_dev, 0x0, GRP_76_MAP);
  810. if (request_irq(he_dev->pci_dev->irq, he_irq_handler, IRQF_DISABLED|IRQF_SHARED, DEV_LABEL, he_dev)) {
  811. hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
  812. return -EINVAL;
  813. }
  814. he_dev->irq = he_dev->pci_dev->irq;
  815. return 0;
  816. }
  817. static int __devinit
  818. he_start(struct atm_dev *dev)
  819. {
  820. struct he_dev *he_dev;
  821. struct pci_dev *pci_dev;
  822. unsigned long membase;
  823. u16 command;
  824. u32 gen_cntl_0, host_cntl, lb_swap;
  825. u8 cache_size, timer;
  826. unsigned err;
  827. unsigned int status, reg;
  828. int i, group;
  829. he_dev = HE_DEV(dev);
  830. pci_dev = he_dev->pci_dev;
  831. membase = pci_resource_start(pci_dev, 0);
  832. HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
  833. /*
  834. * pci bus controller initialization
  835. */
  836. /* 4.3 pci bus controller-specific initialization */
  837. if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
  838. hprintk("can't read GEN_CNTL_0\n");
  839. return -EINVAL;
  840. }
  841. gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
  842. if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
  843. hprintk("can't write GEN_CNTL_0.\n");
  844. return -EINVAL;
  845. }
  846. if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
  847. hprintk("can't read PCI_COMMAND.\n");
  848. return -EINVAL;
  849. }
  850. command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
  851. if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
  852. hprintk("can't enable memory.\n");
  853. return -EINVAL;
  854. }
  855. if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
  856. hprintk("can't read cache line size?\n");
  857. return -EINVAL;
  858. }
  859. if (cache_size < 16) {
  860. cache_size = 16;
  861. if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
  862. hprintk("can't set cache line size to %d\n", cache_size);
  863. }
  864. if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
  865. hprintk("can't read latency timer?\n");
  866. return -EINVAL;
  867. }
  868. /* from table 3.9
  869. *
  870. * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
  871. *
  872. * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
  873. * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
  874. *
  875. */
  876. #define LAT_TIMER 209
  877. if (timer < LAT_TIMER) {
  878. HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
  879. timer = LAT_TIMER;
  880. if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
  881. hprintk("can't set latency timer to %d\n", timer);
  882. }
  883. if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
  884. hprintk("can't set up page mapping\n");
  885. return -EINVAL;
  886. }
  887. /* 4.4 card reset */
  888. he_writel(he_dev, 0x0, RESET_CNTL);
  889. he_writel(he_dev, 0xff, RESET_CNTL);
  890. udelay(16*1000); /* 16 ms */
  891. status = he_readl(he_dev, RESET_CNTL);
  892. if ((status & BOARD_RST_STATUS) == 0) {
  893. hprintk("reset failed\n");
  894. return -EINVAL;
  895. }
  896. /* 4.5 set bus width */
  897. host_cntl = he_readl(he_dev, HOST_CNTL);
  898. if (host_cntl & PCI_BUS_SIZE64)
  899. gen_cntl_0 |= ENBL_64;
  900. else
  901. gen_cntl_0 &= ~ENBL_64;
  902. if (disable64 == 1) {
  903. hprintk("disabling 64-bit pci bus transfers\n");
  904. gen_cntl_0 &= ~ENBL_64;
  905. }
  906. if (gen_cntl_0 & ENBL_64)
  907. hprintk("64-bit transfers enabled\n");
  908. pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
  909. /* 4.7 read prom contents */
  910. for (i = 0; i < PROD_ID_LEN; ++i)
  911. he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
  912. he_dev->media = read_prom_byte(he_dev, MEDIA);
  913. for (i = 0; i < 6; ++i)
  914. dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
  915. hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
  916. he_dev->prod_id,
  917. he_dev->media & 0x40 ? "SM" : "MM",
  918. dev->esi[0],
  919. dev->esi[1],
  920. dev->esi[2],
  921. dev->esi[3],
  922. dev->esi[4],
  923. dev->esi[5]);
  924. he_dev->atm_dev->link_rate = he_is622(he_dev) ?
  925. ATM_OC12_PCR : ATM_OC3_PCR;
  926. /* 4.6 set host endianess */
  927. lb_swap = he_readl(he_dev, LB_SWAP);
  928. if (he_is622(he_dev))
  929. lb_swap &= ~XFER_SIZE; /* 4 cells */
  930. else
  931. lb_swap |= XFER_SIZE; /* 8 cells */
  932. #ifdef __BIG_ENDIAN
  933. lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
  934. #else
  935. lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
  936. DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
  937. #endif /* __BIG_ENDIAN */
  938. he_writel(he_dev, lb_swap, LB_SWAP);
  939. /* 4.8 sdram controller initialization */
  940. he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
  941. /* 4.9 initialize rnum value */
  942. lb_swap |= SWAP_RNUM_MAX(0xf);
  943. he_writel(he_dev, lb_swap, LB_SWAP);
  944. /* 4.10 initialize the interrupt queues */
  945. if ((err = he_init_irq(he_dev)) != 0)
  946. return err;
  947. #ifdef USE_TASKLET
  948. tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
  949. #endif
  950. spin_lock_init(&he_dev->global_lock);
  951. /* 4.11 enable pci bus controller state machines */
  952. host_cntl |= (OUTFF_ENB | CMDFF_ENB |
  953. QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
  954. he_writel(he_dev, host_cntl, HOST_CNTL);
  955. gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
  956. pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
  957. /*
  958. * atm network controller initialization
  959. */
  960. /* 5.1.1 generic configuration state */
  961. /*
  962. * local (cell) buffer memory map
  963. *
  964. * HE155 HE622
  965. *
  966. * 0 ____________1023 bytes 0 _______________________2047 bytes
  967. * | | | | |
  968. * | utility | | rx0 | |
  969. * 5|____________| 255|___________________| u |
  970. * 6| | 256| | t |
  971. * | | | | i |
  972. * | rx0 | row | tx | l |
  973. * | | | | i |
  974. * | | 767|___________________| t |
  975. * 517|____________| 768| | y |
  976. * row 518| | | rx1 | |
  977. * | | 1023|___________________|___|
  978. * | |
  979. * | tx |
  980. * | |
  981. * | |
  982. * 1535|____________|
  983. * 1536| |
  984. * | rx1 |
  985. * 2047|____________|
  986. *
  987. */
  988. /* total 4096 connections */
  989. he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
  990. he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
  991. if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
  992. hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
  993. return -ENODEV;
  994. }
  995. if (nvpibits != -1) {
  996. he_dev->vpibits = nvpibits;
  997. he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
  998. }
  999. if (nvcibits != -1) {
  1000. he_dev->vcibits = nvcibits;
  1001. he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
  1002. }
  1003. if (he_is622(he_dev)) {
  1004. he_dev->cells_per_row = 40;
  1005. he_dev->bytes_per_row = 2048;
  1006. he_dev->r0_numrows = 256;
  1007. he_dev->tx_numrows = 512;
  1008. he_dev->r1_numrows = 256;
  1009. he_dev->r0_startrow = 0;
  1010. he_dev->tx_startrow = 256;
  1011. he_dev->r1_startrow = 768;
  1012. } else {
  1013. he_dev->cells_per_row = 20;
  1014. he_dev->bytes_per_row = 1024;
  1015. he_dev->r0_numrows = 512;
  1016. he_dev->tx_numrows = 1018;
  1017. he_dev->r1_numrows = 512;
  1018. he_dev->r0_startrow = 6;
  1019. he_dev->tx_startrow = 518;
  1020. he_dev->r1_startrow = 1536;
  1021. }
  1022. he_dev->cells_per_lbuf = 4;
  1023. he_dev->buffer_limit = 4;
  1024. he_dev->r0_numbuffs = he_dev->r0_numrows *
  1025. he_dev->cells_per_row / he_dev->cells_per_lbuf;
  1026. if (he_dev->r0_numbuffs > 2560)
  1027. he_dev->r0_numbuffs = 2560;
  1028. he_dev->r1_numbuffs = he_dev->r1_numrows *
  1029. he_dev->cells_per_row / he_dev->cells_per_lbuf;
  1030. if (he_dev->r1_numbuffs > 2560)
  1031. he_dev->r1_numbuffs = 2560;
  1032. he_dev->tx_numbuffs = he_dev->tx_numrows *
  1033. he_dev->cells_per_row / he_dev->cells_per_lbuf;
  1034. if (he_dev->tx_numbuffs > 5120)
  1035. he_dev->tx_numbuffs = 5120;
  1036. /* 5.1.2 configure hardware dependent registers */
  1037. he_writel(he_dev,
  1038. SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
  1039. RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
  1040. (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
  1041. (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
  1042. LBARB);
  1043. he_writel(he_dev, BANK_ON |
  1044. (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
  1045. SDRAMCON);
  1046. he_writel(he_dev,
  1047. (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
  1048. RM_RW_WAIT(1), RCMCONFIG);
  1049. he_writel(he_dev,
  1050. (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
  1051. TM_RW_WAIT(1), TCMCONFIG);
  1052. he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
  1053. he_writel(he_dev,
  1054. (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
  1055. (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
  1056. RX_VALVP(he_dev->vpibits) |
  1057. RX_VALVC(he_dev->vcibits), RC_CONFIG);
  1058. he_writel(he_dev, DRF_THRESH(0x20) |
  1059. (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
  1060. TX_VCI_MASK(he_dev->vcibits) |
  1061. LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
  1062. he_writel(he_dev, 0x0, TXAAL5_PROTO);
  1063. he_writel(he_dev, PHY_INT_ENB |
  1064. (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
  1065. RH_CONFIG);
  1066. /* 5.1.3 initialize connection memory */
  1067. for (i = 0; i < TCM_MEM_SIZE; ++i)
  1068. he_writel_tcm(he_dev, 0, i);
  1069. for (i = 0; i < RCM_MEM_SIZE; ++i)
  1070. he_writel_rcm(he_dev, 0, i);
  1071. /*
  1072. * transmit connection memory map
  1073. *
  1074. * tx memory
  1075. * 0x0 ___________________
  1076. * | |
  1077. * | |
  1078. * | TSRa |
  1079. * | |
  1080. * | |
  1081. * 0x8000|___________________|
  1082. * | |
  1083. * | TSRb |
  1084. * 0xc000|___________________|
  1085. * | |
  1086. * | TSRc |
  1087. * 0xe000|___________________|
  1088. * | TSRd |
  1089. * 0xf000|___________________|
  1090. * | tmABR |
  1091. * 0x10000|___________________|
  1092. * | |
  1093. * | tmTPD |
  1094. * |___________________|
  1095. * | |
  1096. * ....
  1097. * 0x1ffff|___________________|
  1098. *
  1099. *
  1100. */
  1101. he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
  1102. he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
  1103. he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
  1104. he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
  1105. he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
  1106. /*
  1107. * receive connection memory map
  1108. *
  1109. * 0x0 ___________________
  1110. * | |
  1111. * | |
  1112. * | RSRa |
  1113. * | |
  1114. * | |
  1115. * 0x8000|___________________|
  1116. * | |
  1117. * | rx0/1 |
  1118. * | LBM | link lists of local
  1119. * | tx | buffer memory
  1120. * | |
  1121. * 0xd000|___________________|
  1122. * | |
  1123. * | rmABR |
  1124. * 0xe000|___________________|
  1125. * | |
  1126. * | RSRb |
  1127. * |___________________|
  1128. * | |
  1129. * ....
  1130. * 0xffff|___________________|
  1131. */
  1132. he_writel(he_dev, 0x08000, RCMLBM_BA);
  1133. he_writel(he_dev, 0x0e000, RCMRSRB_BA);
  1134. he_writel(he_dev, 0x0d800, RCMABR_BA);
  1135. /* 5.1.4 initialize local buffer free pools linked lists */
  1136. he_init_rx_lbfp0(he_dev);
  1137. he_init_rx_lbfp1(he_dev);
  1138. he_writel(he_dev, 0x0, RLBC_H);
  1139. he_writel(he_dev, 0x0, RLBC_T);
  1140. he_writel(he_dev, 0x0, RLBC_H2);
  1141. he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
  1142. he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
  1143. he_init_tx_lbfp(he_dev);
  1144. he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
  1145. /* 5.1.5 initialize intermediate receive queues */
  1146. if (he_is622(he_dev)) {
  1147. he_writel(he_dev, 0x000f, G0_INMQ_S);
  1148. he_writel(he_dev, 0x200f, G0_INMQ_L);
  1149. he_writel(he_dev, 0x001f, G1_INMQ_S);
  1150. he_writel(he_dev, 0x201f, G1_INMQ_L);
  1151. he_writel(he_dev, 0x002f, G2_INMQ_S);
  1152. he_writel(he_dev, 0x202f, G2_INMQ_L);
  1153. he_writel(he_dev, 0x003f, G3_INMQ_S);
  1154. he_writel(he_dev, 0x203f, G3_INMQ_L);
  1155. he_writel(he_dev, 0x004f, G4_INMQ_S);
  1156. he_writel(he_dev, 0x204f, G4_INMQ_L);
  1157. he_writel(he_dev, 0x005f, G5_INMQ_S);
  1158. he_writel(he_dev, 0x205f, G5_INMQ_L);
  1159. he_writel(he_dev, 0x006f, G6_INMQ_S);
  1160. he_writel(he_dev, 0x206f, G6_INMQ_L);
  1161. he_writel(he_dev, 0x007f, G7_INMQ_S);
  1162. he_writel(he_dev, 0x207f, G7_INMQ_L);
  1163. } else {
  1164. he_writel(he_dev, 0x0000, G0_INMQ_S);
  1165. he_writel(he_dev, 0x0008, G0_INMQ_L);
  1166. he_writel(he_dev, 0x0001, G1_INMQ_S);
  1167. he_writel(he_dev, 0x0009, G1_INMQ_L);
  1168. he_writel(he_dev, 0x0002, G2_INMQ_S);
  1169. he_writel(he_dev, 0x000a, G2_INMQ_L);
  1170. he_writel(he_dev, 0x0003, G3_INMQ_S);
  1171. he_writel(he_dev, 0x000b, G3_INMQ_L);
  1172. he_writel(he_dev, 0x0004, G4_INMQ_S);
  1173. he_writel(he_dev, 0x000c, G4_INMQ_L);
  1174. he_writel(he_dev, 0x0005, G5_INMQ_S);
  1175. he_writel(he_dev, 0x000d, G5_INMQ_L);
  1176. he_writel(he_dev, 0x0006, G6_INMQ_S);
  1177. he_writel(he_dev, 0x000e, G6_INMQ_L);
  1178. he_writel(he_dev, 0x0007, G7_INMQ_S);
  1179. he_writel(he_dev, 0x000f, G7_INMQ_L);
  1180. }
  1181. /* 5.1.6 application tunable parameters */
  1182. he_writel(he_dev, 0x0, MCC);
  1183. he_writel(he_dev, 0x0, OEC);
  1184. he_writel(he_dev, 0x0, DCC);
  1185. he_writel(he_dev, 0x0, CEC);
  1186. /* 5.1.7 cs block initialization */
  1187. he_init_cs_block(he_dev);
  1188. /* 5.1.8 cs block connection memory initialization */
  1189. if (he_init_cs_block_rcm(he_dev) < 0)
  1190. return -ENOMEM;
  1191. /* 5.1.10 initialize host structures */
  1192. he_init_tpdrq(he_dev);
  1193. #ifdef USE_TPD_POOL
  1194. he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
  1195. sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
  1196. if (he_dev->tpd_pool == NULL) {
  1197. hprintk("unable to create tpd pci_pool\n");
  1198. return -ENOMEM;
  1199. }
  1200. INIT_LIST_HEAD(&he_dev->outstanding_tpds);
  1201. #else
  1202. he_dev->tpd_base = (void *) pci_alloc_consistent(he_dev->pci_dev,
  1203. CONFIG_NUMTPDS * sizeof(struct he_tpd), &he_dev->tpd_base_phys);
  1204. if (!he_dev->tpd_base)
  1205. return -ENOMEM;
  1206. for (i = 0; i < CONFIG_NUMTPDS; ++i) {
  1207. he_dev->tpd_base[i].status = (i << TPD_ADDR_SHIFT);
  1208. he_dev->tpd_base[i].inuse = 0;
  1209. }
  1210. he_dev->tpd_head = he_dev->tpd_base;
  1211. he_dev->tpd_end = &he_dev->tpd_base[CONFIG_NUMTPDS - 1];
  1212. #endif
  1213. if (he_init_group(he_dev, 0) != 0)
  1214. return -ENOMEM;
  1215. for (group = 1; group < HE_NUM_GROUPS; ++group) {
  1216. he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
  1217. he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
  1218. he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
  1219. he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
  1220. G0_RBPS_BS + (group * 32));
  1221. he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
  1222. he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
  1223. he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
  1224. G0_RBPL_QI + (group * 32));
  1225. he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
  1226. he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
  1227. he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
  1228. he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
  1229. G0_RBRQ_Q + (group * 16));
  1230. he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
  1231. he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
  1232. he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
  1233. he_writel(he_dev, TBRQ_THRESH(0x1),
  1234. G0_TBRQ_THRESH + (group * 16));
  1235. he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
  1236. }
  1237. /* host status page */
  1238. he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
  1239. sizeof(struct he_hsp), &he_dev->hsp_phys);
  1240. if (he_dev->hsp == NULL) {
  1241. hprintk("failed to allocate host status page\n");
  1242. return -ENOMEM;
  1243. }
  1244. memset(he_dev->hsp, 0, sizeof(struct he_hsp));
  1245. he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
  1246. /* initialize framer */
  1247. #ifdef CONFIG_ATM_HE_USE_SUNI
  1248. suni_init(he_dev->atm_dev);
  1249. if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
  1250. he_dev->atm_dev->phy->start(he_dev->atm_dev);
  1251. #endif /* CONFIG_ATM_HE_USE_SUNI */
  1252. if (sdh) {
  1253. /* this really should be in suni.c but for now... */
  1254. int val;
  1255. val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
  1256. val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
  1257. he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
  1258. }
  1259. /* 5.1.12 enable transmit and receive */
  1260. reg = he_readl_mbox(he_dev, CS_ERCTL0);
  1261. reg |= TX_ENABLE|ER_ENABLE;
  1262. he_writel_mbox(he_dev, reg, CS_ERCTL0);
  1263. reg = he_readl(he_dev, RC_CONFIG);
  1264. reg |= RX_ENABLE;
  1265. he_writel(he_dev, reg, RC_CONFIG);
  1266. for (i = 0; i < HE_NUM_CS_STPER; ++i) {
  1267. he_dev->cs_stper[i].inuse = 0;
  1268. he_dev->cs_stper[i].pcr = -1;
  1269. }
  1270. he_dev->total_bw = 0;
  1271. /* atm linux initialization */
  1272. he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
  1273. he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
  1274. he_dev->irq_peak = 0;
  1275. he_dev->rbrq_peak = 0;
  1276. he_dev->rbpl_peak = 0;
  1277. he_dev->tbrq_peak = 0;
  1278. HPRINTK("hell bent for leather!\n");
  1279. return 0;
  1280. }
  1281. static void
  1282. he_stop(struct he_dev *he_dev)
  1283. {
  1284. u16 command;
  1285. u32 gen_cntl_0, reg;
  1286. struct pci_dev *pci_dev;
  1287. pci_dev = he_dev->pci_dev;
  1288. /* disable interrupts */
  1289. if (he_dev->membase) {
  1290. pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
  1291. gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
  1292. pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
  1293. #ifdef USE_TASKLET
  1294. tasklet_disable(&he_dev->tasklet);
  1295. #endif
  1296. /* disable recv and transmit */
  1297. reg = he_readl_mbox(he_dev, CS_ERCTL0);
  1298. reg &= ~(TX_ENABLE|ER_ENABLE);
  1299. he_writel_mbox(he_dev, reg, CS_ERCTL0);
  1300. reg = he_readl(he_dev, RC_CONFIG);
  1301. reg &= ~(RX_ENABLE);
  1302. he_writel(he_dev, reg, RC_CONFIG);
  1303. }
  1304. #ifdef CONFIG_ATM_HE_USE_SUNI
  1305. if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
  1306. he_dev->atm_dev->phy->stop(he_dev->atm_dev);
  1307. #endif /* CONFIG_ATM_HE_USE_SUNI */
  1308. if (he_dev->irq)
  1309. free_irq(he_dev->irq, he_dev);
  1310. if (he_dev->irq_base)
  1311. pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
  1312. * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
  1313. if (he_dev->hsp)
  1314. pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
  1315. he_dev->hsp, he_dev->hsp_phys);
  1316. if (he_dev->rbpl_base) {
  1317. #ifdef USE_RBPL_POOL
  1318. for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
  1319. void *cpuaddr = he_dev->rbpl_virt[i].virt;
  1320. dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
  1321. pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
  1322. }
  1323. #else
  1324. pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
  1325. * CONFIG_RBPL_BUFSIZE, he_dev->rbpl_pages, he_dev->rbpl_pages_phys);
  1326. #endif
  1327. pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
  1328. * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
  1329. }
  1330. #ifdef USE_RBPL_POOL
  1331. if (he_dev->rbpl_pool)
  1332. pci_pool_destroy(he_dev->rbpl_pool);
  1333. #endif
  1334. #ifdef USE_RBPS
  1335. if (he_dev->rbps_base) {
  1336. #ifdef USE_RBPS_POOL
  1337. for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
  1338. void *cpuaddr = he_dev->rbps_virt[i].virt;
  1339. dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
  1340. pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
  1341. }
  1342. #else
  1343. pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
  1344. * CONFIG_RBPS_BUFSIZE, he_dev->rbps_pages, he_dev->rbps_pages_phys);
  1345. #endif
  1346. pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
  1347. * sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
  1348. }
  1349. #ifdef USE_RBPS_POOL
  1350. if (he_dev->rbps_pool)
  1351. pci_pool_destroy(he_dev->rbps_pool);
  1352. #endif
  1353. #endif /* USE_RBPS */
  1354. if (he_dev->rbrq_base)
  1355. pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
  1356. he_dev->rbrq_base, he_dev->rbrq_phys);
  1357. if (he_dev->tbrq_base)
  1358. pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
  1359. he_dev->tbrq_base, he_dev->tbrq_phys);
  1360. if (he_dev->tpdrq_base)
  1361. pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
  1362. he_dev->tpdrq_base, he_dev->tpdrq_phys);
  1363. #ifdef USE_TPD_POOL
  1364. if (he_dev->tpd_pool)
  1365. pci_pool_destroy(he_dev->tpd_pool);
  1366. #else
  1367. if (he_dev->tpd_base)
  1368. pci_free_consistent(he_dev->pci_dev, CONFIG_NUMTPDS * sizeof(struct he_tpd),
  1369. he_dev->tpd_base, he_dev->tpd_base_phys);
  1370. #endif
  1371. if (he_dev->pci_dev) {
  1372. pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
  1373. command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
  1374. pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
  1375. }
  1376. if (he_dev->membase)
  1377. iounmap(he_dev->membase);
  1378. }
  1379. static struct he_tpd *
  1380. __alloc_tpd(struct he_dev *he_dev)
  1381. {
  1382. #ifdef USE_TPD_POOL
  1383. struct he_tpd *tpd;
  1384. dma_addr_t dma_handle;
  1385. tpd = pci_pool_alloc(he_dev->tpd_pool, SLAB_ATOMIC|SLAB_DMA, &dma_handle);
  1386. if (tpd == NULL)
  1387. return NULL;
  1388. tpd->status = TPD_ADDR(dma_handle);
  1389. tpd->reserved = 0;
  1390. tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
  1391. tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
  1392. tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
  1393. return tpd;
  1394. #else
  1395. int i;
  1396. for (i = 0; i < CONFIG_NUMTPDS; ++i) {
  1397. ++he_dev->tpd_head;
  1398. if (he_dev->tpd_head > he_dev->tpd_end) {
  1399. he_dev->tpd_head = he_dev->tpd_base;
  1400. }
  1401. if (!he_dev->tpd_head->inuse) {
  1402. he_dev->tpd_head->inuse = 1;
  1403. he_dev->tpd_head->status &= TPD_MASK;
  1404. he_dev->tpd_head->iovec[0].addr = 0; he_dev->tpd_head->iovec[0].len = 0;
  1405. he_dev->tpd_head->iovec[1].addr = 0; he_dev->tpd_head->iovec[1].len = 0;
  1406. he_dev->tpd_head->iovec[2].addr = 0; he_dev->tpd_head->iovec[2].len = 0;
  1407. return he_dev->tpd_head;
  1408. }
  1409. }
  1410. hprintk("out of tpds -- increase CONFIG_NUMTPDS (%d)\n", CONFIG_NUMTPDS);
  1411. return NULL;
  1412. #endif
  1413. }
  1414. #define AAL5_LEN(buf,len) \
  1415. ((((unsigned char *)(buf))[(len)-6] << 8) | \
  1416. (((unsigned char *)(buf))[(len)-5]))
  1417. /* 2.10.1.2 receive
  1418. *
  1419. * aal5 packets can optionally return the tcp checksum in the lower
  1420. * 16 bits of the crc (RSR0_TCP_CKSUM)
  1421. */
  1422. #define TCP_CKSUM(buf,len) \
  1423. ((((unsigned char *)(buf))[(len)-2] << 8) | \
  1424. (((unsigned char *)(buf))[(len-1)]))
  1425. static int
  1426. he_service_rbrq(struct he_dev *he_dev, int group)
  1427. {
  1428. struct he_rbrq *rbrq_tail = (struct he_rbrq *)
  1429. ((unsigned long)he_dev->rbrq_base |
  1430. he_dev->hsp->group[group].rbrq_tail);
  1431. struct he_rbp *rbp = NULL;
  1432. unsigned cid, lastcid = -1;
  1433. unsigned buf_len = 0;
  1434. struct sk_buff *skb;
  1435. struct atm_vcc *vcc = NULL;
  1436. struct he_vcc *he_vcc;
  1437. struct he_iovec *iov;
  1438. int pdus_assembled = 0;
  1439. int updated = 0;
  1440. read_lock(&vcc_sklist_lock);
  1441. while (he_dev->rbrq_head != rbrq_tail) {
  1442. ++updated;
  1443. HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
  1444. he_dev->rbrq_head, group,
  1445. RBRQ_ADDR(he_dev->rbrq_head),
  1446. RBRQ_BUFLEN(he_dev->rbrq_head),
  1447. RBRQ_CID(he_dev->rbrq_head),
  1448. RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
  1449. RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
  1450. RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
  1451. RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
  1452. RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
  1453. RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
  1454. #ifdef USE_RBPS
  1455. if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
  1456. rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
  1457. else
  1458. #endif
  1459. rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
  1460. buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
  1461. cid = RBRQ_CID(he_dev->rbrq_head);
  1462. if (cid != lastcid)
  1463. vcc = __find_vcc(he_dev, cid);
  1464. lastcid = cid;
  1465. if (vcc == NULL) {
  1466. hprintk("vcc == NULL (cid 0x%x)\n", cid);
  1467. if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
  1468. rbp->status &= ~RBP_LOANED;
  1469. goto next_rbrq_entry;
  1470. }
  1471. he_vcc = HE_VCC(vcc);
  1472. if (he_vcc == NULL) {
  1473. hprintk("he_vcc == NULL (cid 0x%x)\n", cid);
  1474. if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
  1475. rbp->status &= ~RBP_LOANED;
  1476. goto next_rbrq_entry;
  1477. }
  1478. if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
  1479. hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
  1480. atomic_inc(&vcc->stats->rx_drop);
  1481. goto return_host_buffers;
  1482. }
  1483. he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
  1484. he_vcc->iov_tail->iov_len = buf_len;
  1485. he_vcc->pdu_len += buf_len;
  1486. ++he_vcc->iov_tail;
  1487. if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
  1488. lastcid = -1;
  1489. HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
  1490. wake_up(&he_vcc->rx_waitq);
  1491. goto return_host_buffers;
  1492. }
  1493. #ifdef notdef
  1494. if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
  1495. hprintk("iovec full! cid 0x%x\n", cid);
  1496. goto return_host_buffers;
  1497. }
  1498. #endif
  1499. if (!RBRQ_END_PDU(he_dev->rbrq_head))
  1500. goto next_rbrq_entry;
  1501. if (RBRQ_LEN_ERR(he_dev->rbrq_head)
  1502. || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
  1503. HPRINTK("%s%s (%d.%d)\n",
  1504. RBRQ_CRC_ERR(he_dev->rbrq_head)
  1505. ? "CRC_ERR " : "",
  1506. RBRQ_LEN_ERR(he_dev->rbrq_head)
  1507. ? "LEN_ERR" : "",
  1508. vcc->vpi, vcc->vci);
  1509. atomic_inc(&vcc->stats->rx_err);
  1510. goto return_host_buffers;
  1511. }
  1512. skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
  1513. GFP_ATOMIC);
  1514. if (!skb) {
  1515. HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
  1516. goto return_host_buffers;
  1517. }
  1518. if (rx_skb_reserve > 0)
  1519. skb_reserve(skb, rx_skb_reserve);
  1520. __net_timestamp(skb);
  1521. for (iov = he_vcc->iov_head;
  1522. iov < he_vcc->iov_tail; ++iov) {
  1523. #ifdef USE_RBPS
  1524. if (iov->iov_base & RBP_SMALLBUF)
  1525. memcpy(skb_put(skb, iov->iov_len),
  1526. he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
  1527. else
  1528. #endif
  1529. memcpy(skb_put(skb, iov->iov_len),
  1530. he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
  1531. }
  1532. switch (vcc->qos.aal) {
  1533. case ATM_AAL0:
  1534. /* 2.10.1.5 raw cell receive */
  1535. skb->len = ATM_AAL0_SDU;
  1536. skb->tail = skb->data + skb->len;
  1537. break;
  1538. case ATM_AAL5:
  1539. /* 2.10.1.2 aal5 receive */
  1540. skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
  1541. skb->tail = skb->data + skb->len;
  1542. #ifdef USE_CHECKSUM_HW
  1543. if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
  1544. skb->ip_summed = CHECKSUM_COMPLETE;
  1545. skb->csum = TCP_CKSUM(skb->data,
  1546. he_vcc->pdu_len);
  1547. }
  1548. #endif
  1549. break;
  1550. }
  1551. #ifdef should_never_happen
  1552. if (skb->len > vcc->qos.rxtp.max_sdu)
  1553. hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
  1554. #endif
  1555. #ifdef notdef
  1556. ATM_SKB(skb)->vcc = vcc;
  1557. #endif
  1558. spin_unlock(&he_dev->global_lock);
  1559. vcc->push(vcc, skb);
  1560. spin_lock(&he_dev->global_lock);
  1561. atomic_inc(&vcc->stats->rx);
  1562. return_host_buffers:
  1563. ++pdus_assembled;
  1564. for (iov = he_vcc->iov_head;
  1565. iov < he_vcc->iov_tail; ++iov) {
  1566. #ifdef USE_RBPS
  1567. if (iov->iov_base & RBP_SMALLBUF)
  1568. rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
  1569. else
  1570. #endif
  1571. rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
  1572. rbp->status &= ~RBP_LOANED;
  1573. }
  1574. he_vcc->iov_tail = he_vcc->iov_head;
  1575. he_vcc->pdu_len = 0;
  1576. next_rbrq_entry:
  1577. he_dev->rbrq_head = (struct he_rbrq *)
  1578. ((unsigned long) he_dev->rbrq_base |
  1579. RBRQ_MASK(++he_dev->rbrq_head));
  1580. }
  1581. read_unlock(&vcc_sklist_lock);
  1582. if (updated) {
  1583. if (updated > he_dev->rbrq_peak)
  1584. he_dev->rbrq_peak = updated;
  1585. he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
  1586. G0_RBRQ_H + (group * 16));
  1587. }
  1588. return pdus_assembled;
  1589. }
  1590. static void
  1591. he_service_tbrq(struct he_dev *he_dev, int group)
  1592. {
  1593. struct he_tbrq *tbrq_tail = (struct he_tbrq *)
  1594. ((unsigned long)he_dev->tbrq_base |
  1595. he_dev->hsp->group[group].tbrq_tail);
  1596. struct he_tpd *tpd;
  1597. int slot, updated = 0;
  1598. #ifdef USE_TPD_POOL
  1599. struct he_tpd *__tpd;
  1600. #endif
  1601. /* 2.1.6 transmit buffer return queue */
  1602. while (he_dev->tbrq_head != tbrq_tail) {
  1603. ++updated;
  1604. HPRINTK("tbrq%d 0x%x%s%s\n",
  1605. group,
  1606. TBRQ_TPD(he_dev->tbrq_head),
  1607. TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
  1608. TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
  1609. #ifdef USE_TPD_POOL
  1610. tpd = NULL;
  1611. list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
  1612. if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
  1613. tpd = __tpd;
  1614. list_del(&__tpd->entry);
  1615. break;
  1616. }
  1617. }
  1618. if (tpd == NULL) {
  1619. hprintk("unable to locate tpd for dma buffer %x\n",
  1620. TBRQ_TPD(he_dev->tbrq_head));
  1621. goto next_tbrq_entry;
  1622. }
  1623. #else
  1624. tpd = &he_dev->tpd_base[ TPD_INDEX(TBRQ_TPD(he_dev->tbrq_head)) ];
  1625. #endif
  1626. if (TBRQ_EOS(he_dev->tbrq_head)) {
  1627. HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
  1628. he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
  1629. if (tpd->vcc)
  1630. wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
  1631. goto next_tbrq_entry;
  1632. }
  1633. for (slot = 0; slot < TPD_MAXIOV; ++slot) {
  1634. if (tpd->iovec[slot].addr)
  1635. pci_unmap_single(he_dev->pci_dev,
  1636. tpd->iovec[slot].addr,
  1637. tpd->iovec[slot].len & TPD_LEN_MASK,
  1638. PCI_DMA_TODEVICE);
  1639. if (tpd->iovec[slot].len & TPD_LST)
  1640. break;
  1641. }
  1642. if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
  1643. if (tpd->vcc && tpd->vcc->pop)
  1644. tpd->vcc->pop(tpd->vcc, tpd->skb);
  1645. else
  1646. dev_kfree_skb_any(tpd->skb);
  1647. }
  1648. next_tbrq_entry:
  1649. #ifdef USE_TPD_POOL
  1650. if (tpd)
  1651. pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
  1652. #else
  1653. tpd->inuse = 0;
  1654. #endif
  1655. he_dev->tbrq_head = (struct he_tbrq *)
  1656. ((unsigned long) he_dev->tbrq_base |
  1657. TBRQ_MASK(++he_dev->tbrq_head));
  1658. }
  1659. if (updated) {
  1660. if (updated > he_dev->tbrq_peak)
  1661. he_dev->tbrq_peak = updated;
  1662. he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
  1663. G0_TBRQ_H + (group * 16));
  1664. }
  1665. }
  1666. static void
  1667. he_service_rbpl(struct he_dev *he_dev, int group)
  1668. {
  1669. struct he_rbp *newtail;
  1670. struct he_rbp *rbpl_head;
  1671. int moved = 0;
  1672. rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
  1673. RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
  1674. for (;;) {
  1675. newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
  1676. RBPL_MASK(he_dev->rbpl_tail+1));
  1677. /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
  1678. if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
  1679. break;
  1680. newtail->status |= RBP_LOANED;
  1681. he_dev->rbpl_tail = newtail;
  1682. ++moved;
  1683. }
  1684. if (moved)
  1685. he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
  1686. }
  1687. #ifdef USE_RBPS
  1688. static void
  1689. he_service_rbps(struct he_dev *he_dev, int group)
  1690. {
  1691. struct he_rbp *newtail;
  1692. struct he_rbp *rbps_head;
  1693. int moved = 0;
  1694. rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
  1695. RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
  1696. for (;;) {
  1697. newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
  1698. RBPS_MASK(he_dev->rbps_tail+1));
  1699. /* table 3.42 -- rbps_tail should never be set to rbps_head */
  1700. if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
  1701. break;
  1702. newtail->status |= RBP_LOANED;
  1703. he_dev->rbps_tail = newtail;
  1704. ++moved;
  1705. }
  1706. if (moved)
  1707. he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
  1708. }
  1709. #endif /* USE_RBPS */
  1710. static void
  1711. he_tasklet(unsigned long data)
  1712. {
  1713. unsigned long flags;
  1714. struct he_dev *he_dev = (struct he_dev *) data;
  1715. int group, type;
  1716. int updated = 0;
  1717. HPRINTK("tasklet (0x%lx)\n", data);
  1718. #ifdef USE_TASKLET
  1719. spin_lock_irqsave(&he_dev->global_lock, flags);
  1720. #endif
  1721. while (he_dev->irq_head != he_dev->irq_tail) {
  1722. ++updated;
  1723. type = ITYPE_TYPE(he_dev->irq_head->isw);
  1724. group = ITYPE_GROUP(he_dev->irq_head->isw);
  1725. switch (type) {
  1726. case ITYPE_RBRQ_THRESH:
  1727. HPRINTK("rbrq%d threshold\n", group);
  1728. /* fall through */
  1729. case ITYPE_RBRQ_TIMER:
  1730. if (he_service_rbrq(he_dev, group)) {
  1731. he_service_rbpl(he_dev, group);
  1732. #ifdef USE_RBPS
  1733. he_service_rbps(he_dev, group);
  1734. #endif /* USE_RBPS */
  1735. }
  1736. break;
  1737. case ITYPE_TBRQ_THRESH:
  1738. HPRINTK("tbrq%d threshold\n", group);
  1739. /* fall through */
  1740. case ITYPE_TPD_COMPLETE:
  1741. he_service_tbrq(he_dev, group);
  1742. break;
  1743. case ITYPE_RBPL_THRESH:
  1744. he_service_rbpl(he_dev, group);
  1745. break;
  1746. case ITYPE_RBPS_THRESH:
  1747. #ifdef USE_RBPS
  1748. he_service_rbps(he_dev, group);
  1749. #endif /* USE_RBPS */
  1750. break;
  1751. case ITYPE_PHY:
  1752. HPRINTK("phy interrupt\n");
  1753. #ifdef CONFIG_ATM_HE_USE_SUNI
  1754. spin_unlock_irqrestore(&he_dev->global_lock, flags);
  1755. if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
  1756. he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
  1757. spin_lock_irqsave(&he_dev->global_lock, flags);
  1758. #endif
  1759. break;
  1760. case ITYPE_OTHER:
  1761. switch (type|group) {
  1762. case ITYPE_PARITY:
  1763. hprintk("parity error\n");
  1764. break;
  1765. case ITYPE_ABORT:
  1766. hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
  1767. break;
  1768. }
  1769. break;
  1770. case ITYPE_TYPE(ITYPE_INVALID):
  1771. /* see 8.1.1 -- check all queues */
  1772. HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
  1773. he_service_rbrq(he_dev, 0);
  1774. he_service_rbpl(he_dev, 0);
  1775. #ifdef USE_RBPS
  1776. he_service_rbps(he_dev, 0);
  1777. #endif /* USE_RBPS */
  1778. he_service_tbrq(he_dev, 0);
  1779. break;
  1780. default:
  1781. hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
  1782. }
  1783. he_dev->irq_head->isw = ITYPE_INVALID;
  1784. he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
  1785. }
  1786. if (updated) {
  1787. if (updated > he_dev->irq_peak)
  1788. he_dev->irq_peak = updated;
  1789. he_writel(he_dev,
  1790. IRQ_SIZE(CONFIG_IRQ_SIZE) |
  1791. IRQ_THRESH(CONFIG_IRQ_THRESH) |
  1792. IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
  1793. (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
  1794. }
  1795. #ifdef USE_TASKLET
  1796. spin_unlock_irqrestore(&he_dev->global_lock, flags);
  1797. #endif
  1798. }
  1799. static irqreturn_t
  1800. he_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
  1801. {
  1802. unsigned long flags;
  1803. struct he_dev *he_dev = (struct he_dev * )dev_id;
  1804. int handled = 0;
  1805. if (he_dev == NULL)
  1806. return IRQ_NONE;
  1807. spin_lock_irqsave(&he_dev->global_lock, flags);
  1808. he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
  1809. (*he_dev->irq_tailoffset << 2));
  1810. if (he_dev->irq_tail == he_dev->irq_head) {
  1811. HPRINTK("tailoffset not updated?\n");
  1812. he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
  1813. ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
  1814. (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
  1815. }
  1816. #ifdef DEBUG
  1817. if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
  1818. hprintk("spurious (or shared) interrupt?\n");
  1819. #endif
  1820. if (he_dev->irq_head != he_dev->irq_tail) {
  1821. handled = 1;
  1822. #ifdef USE_TASKLET
  1823. tasklet_schedule(&he_dev->tasklet);
  1824. #else
  1825. he_tasklet((unsigned long) he_dev);
  1826. #endif
  1827. he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */
  1828. (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */
  1829. }
  1830. spin_unlock_irqrestore(&he_dev->global_lock, flags);
  1831. return IRQ_RETVAL(handled);
  1832. }
  1833. static __inline__ void
  1834. __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
  1835. {
  1836. struct he_tpdrq *new_tail;
  1837. HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
  1838. tpd, cid, he_dev->tpdrq_tail);
  1839. /* new_tail = he_dev->tpdrq_tail; */
  1840. new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
  1841. TPDRQ_MASK(he_dev->tpdrq_tail+1));
  1842. /*
  1843. * check to see if we are about to set the tail == head
  1844. * if true, update the head pointer from the adapter
  1845. * to see if this is really the case (reading the queue
  1846. * head for every enqueue would be unnecessarily slow)
  1847. */
  1848. if (new_tail == he_dev->tpdrq_head) {
  1849. he_dev->tpdrq_head = (struct he_tpdrq *)
  1850. (((unsigned long)he_dev->tpdrq_base) |
  1851. TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
  1852. if (new_tail == he_dev->tpdrq_head) {
  1853. int slot;
  1854. hprintk("tpdrq full (cid 0x%x)\n", cid);
  1855. /*
  1856. * FIXME
  1857. * push tpd onto a transmit backlog queue
  1858. * after service_tbrq, service the backlog
  1859. * for now, we just drop the pdu
  1860. */
  1861. for (slot = 0; slot < TPD_MAXIOV; ++slot) {
  1862. if (tpd->iovec[slot].addr)
  1863. pci_unmap_single(he_dev->pci_dev,
  1864. tpd->iovec[slot].addr,
  1865. tpd->iovec[slot].len & TPD_LEN_MASK,
  1866. PCI_DMA_TODEVICE);
  1867. }
  1868. if (tpd->skb) {
  1869. if (tpd->vcc->pop)
  1870. tpd->vcc->pop(tpd->vcc, tpd->skb);
  1871. else
  1872. dev_kfree_skb_any(tpd->skb);
  1873. atomic_inc(&tpd->vcc->stats->tx_err);
  1874. }
  1875. #ifdef USE_TPD_POOL
  1876. pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
  1877. #else
  1878. tpd->inuse = 0;
  1879. #endif
  1880. return;
  1881. }
  1882. }
  1883. /* 2.1.5 transmit packet descriptor ready queue */
  1884. #ifdef USE_TPD_POOL
  1885. list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
  1886. he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
  1887. #else
  1888. he_dev->tpdrq_tail->tpd = he_dev->tpd_base_phys +
  1889. (TPD_INDEX(tpd->status) * sizeof(struct he_tpd));
  1890. #endif
  1891. he_dev->tpdrq_tail->cid = cid;
  1892. wmb();
  1893. he_dev->tpdrq_tail = new_tail;
  1894. he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
  1895. (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */
  1896. }
  1897. static int
  1898. he_open(struct atm_vcc *vcc)
  1899. {
  1900. unsigned long flags;
  1901. struct he_dev *he_dev = HE_DEV(vcc->dev);
  1902. struct he_vcc *he_vcc;
  1903. int err = 0;
  1904. unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
  1905. short vpi = vcc->vpi;
  1906. int vci = vcc->vci;
  1907. if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
  1908. return 0;
  1909. HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
  1910. set_bit(ATM_VF_ADDR, &vcc->flags);
  1911. cid = he_mkcid(he_dev, vpi, vci);
  1912. he_vcc = (struct he_vcc *) kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
  1913. if (he_vcc == NULL) {
  1914. hprintk("unable to allocate he_vcc during open\n");
  1915. return -ENOMEM;
  1916. }
  1917. he_vcc->iov_tail = he_vcc->iov_head;
  1918. he_vcc->pdu_len = 0;
  1919. he_vcc->rc_index = -1;
  1920. init_waitqueue_head(&he_vcc->rx_waitq);
  1921. init_waitqueue_head(&he_vcc->tx_waitq);
  1922. vcc->dev_data = he_vcc;
  1923. if (vcc->qos.txtp.traffic_class != ATM_NONE) {
  1924. int pcr_goal;
  1925. pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
  1926. if (pcr_goal == 0)
  1927. pcr_goal = he_dev->atm_dev->link_rate;
  1928. if (pcr_goal < 0) /* means round down, technically */
  1929. pcr_goal = -pcr_goal;
  1930. HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
  1931. switch (vcc->qos.aal) {
  1932. case ATM_AAL5:
  1933. tsr0_aal = TSR0_AAL5;
  1934. tsr4 = TSR4_AAL5;
  1935. break;
  1936. case ATM_AAL0:
  1937. tsr0_aal = TSR0_AAL0_SDU;
  1938. tsr4 = TSR4_AAL0_SDU;
  1939. break;
  1940. default:
  1941. err = -EINVAL;
  1942. goto open_failed;
  1943. }
  1944. spin_lock_irqsave(&he_dev->global_lock, flags);
  1945. tsr0 = he_readl_tsr0(he_dev, cid);
  1946. spin_unlock_irqrestore(&he_dev->global_lock, flags);
  1947. if (TSR0_CONN_STATE(tsr0) != 0) {
  1948. hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
  1949. err = -EBUSY;
  1950. goto open_failed;
  1951. }
  1952. switch (vcc->qos.txtp.traffic_class) {
  1953. case ATM_UBR:
  1954. /* 2.3.3.1 open connection ubr */
  1955. tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
  1956. TSR0_USE_WMIN | TSR0_UPDATE_GER;
  1957. break;
  1958. case ATM_CBR:
  1959. /* 2.3.3.2 open connection cbr */
  1960. /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
  1961. if ((he_dev->total_bw + pcr_goal)
  1962. > (he_dev->atm_dev->link_rate * 9 / 10))
  1963. {
  1964. err = -EBUSY;
  1965. goto open_failed;
  1966. }
  1967. spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */
  1968. /* find an unused cs_stper register */
  1969. for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
  1970. if (he_dev->cs_stper[reg].inuse == 0 ||
  1971. he_dev->cs_stper[reg].pcr == pcr_goal)
  1972. break;
  1973. if (reg == HE_NUM_CS_STPER) {
  1974. err = -EBUSY;
  1975. spin_unlock_irqrestore(&he_dev->global_lock, flags);
  1976. goto open_failed;
  1977. }
  1978. he_dev->total_bw += pcr_goal;
  1979. he_vcc->rc_index = reg;
  1980. ++he_dev->cs_stper[reg].inuse;
  1981. he_dev->cs_stper[reg].pcr = pcr_goal;
  1982. clock = he_is622(he_dev) ? 66667000 : 50000000;
  1983. period = clock / pcr_goal;
  1984. HPRINTK("rc_index = %d period = %d\n",
  1985. reg, period);
  1986. he_writel_mbox(he_dev, rate_to_atmf(period/2),
  1987. CS_STPER0 + reg);
  1988. spin_unlock_irqrestore(&he_dev->global_lock, flags);
  1989. tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
  1990. TSR0_RC_INDEX(reg);
  1991. break;
  1992. default:
  1993. err = -EINVAL;
  1994. goto open_failed;
  1995. }
  1996. spin_lock_irqsave(&he_dev->global_lock, flags);
  1997. he_writel_tsr0(he_dev, tsr0, cid);
  1998. he_writel_tsr4(he_dev, tsr4 | 1, cid);
  1999. he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
  2000. TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
  2001. he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
  2002. he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
  2003. he_writel_tsr3(he_dev, 0x0, cid);
  2004. he_writel_tsr5(he_dev, 0x0, cid);
  2005. he_writel_tsr6(he_dev, 0x0, cid);
  2006. he_writel_tsr7(he_dev, 0x0, cid);
  2007. he_writel_tsr8(he_dev, 0x0, cid);
  2008. he_writel_tsr10(he_dev, 0x0, cid);
  2009. he_writel_tsr11(he_dev, 0x0, cid);
  2010. he_writel_tsr12(he_dev, 0x0, cid);
  2011. he_writel_tsr13(he_dev, 0x0, cid);
  2012. he_writel_tsr14(he_dev, 0x0, cid);
  2013. (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */
  2014. spin_unlock_irqrestore(&he_dev->global_lock, flags);
  2015. }
  2016. if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
  2017. unsigned aal;
  2018. HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
  2019. &HE_VCC(vcc)->rx_waitq);
  2020. switch (vcc->qos.aal) {
  2021. case ATM_AAL5:
  2022. aal = RSR0_AAL5;
  2023. break;
  2024. case ATM_AAL0:
  2025. aal = RSR0_RAWCELL;
  2026. break;
  2027. default:
  2028. err = -EINVAL;
  2029. goto open_failed;
  2030. }
  2031. spin_lock_irqsave(&he_dev->global_lock, flags);
  2032. rsr0 = he_readl_rsr0(he_dev, cid);
  2033. if (rsr0 & RSR0_OPEN_CONN) {
  2034. spin_unlock_irqrestore(&he_dev->global_lock, flags);
  2035. hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
  2036. err = -EBUSY;
  2037. goto open_failed;
  2038. }
  2039. #ifdef USE_RBPS
  2040. rsr1 = RSR1_GROUP(0);
  2041. rsr4 = RSR4_GROUP(0);
  2042. #else /* !USE_RBPS */
  2043. rsr1 = RSR1_GROUP(0)|RSR1_RBPL_ONLY;
  2044. rsr4 = RSR4_GROUP(0)|RSR4_RBPL_ONLY;
  2045. #endif /* USE_RBPS */
  2046. rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
  2047. (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
  2048. #ifdef USE_CHECKSUM_HW
  2049. if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
  2050. rsr0 |= RSR0_TCP_CKSUM;
  2051. #endif
  2052. he_writel_rsr4(he_dev, rsr4, cid);
  2053. he_writel_rsr1(he_dev, rsr1, cid);
  2054. /* 5.1.11 last parameter initialized should be
  2055. the open/closed indication in rsr0 */
  2056. he_writel_rsr0(he_dev,
  2057. rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
  2058. (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
  2059. spin_unlock_irqrestore(&he_dev->global_lock, flags);
  2060. }
  2061. open_failed:
  2062. if (err) {
  2063. kfree(he_vcc);
  2064. clear_bit(ATM_VF_ADDR, &vcc->flags);
  2065. }
  2066. else
  2067. set_bit(ATM_VF_READY, &vcc->flags);
  2068. return err;
  2069. }
  2070. static void
  2071. he_close(struct atm_vcc *vcc)
  2072. {
  2073. unsigned long flags;
  2074. DECLARE_WAITQUEUE(wait, current);
  2075. struct he_dev *he_dev = HE_DEV(vcc->dev);
  2076. struct he_tpd *tpd;
  2077. unsigned cid;
  2078. struct he_vcc *he_vcc = HE_VCC(vcc);
  2079. #define MAX_RETRY 30
  2080. int retry = 0, sleep = 1, tx_inuse;
  2081. HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
  2082. clear_bit(ATM_VF_READY, &vcc->flags);
  2083. cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
  2084. if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
  2085. int timeout;
  2086. HPRINTK("close rx cid 0x%x\n", cid);
  2087. /* 2.7.2.2 close receive operation */
  2088. /* wait for previous close (if any) to finish */
  2089. spin_lock_irqsave(&he_dev->global_lock, flags);
  2090. while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
  2091. HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
  2092. udelay(250);
  2093. }
  2094. set_current_state(TASK_UNINTERRUPTIBLE);
  2095. add_wait_queue(&he_vcc->rx_waitq, &wait);
  2096. he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
  2097. (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
  2098. he_writel_mbox(he_dev, cid, RXCON_CLOSE);
  2099. spin_unlock_irqrestore(&he_dev->global_lock, flags);
  2100. timeout = schedule_timeout(30*HZ);
  2101. remove_wait_queue(&he_vcc->rx_waitq, &wait);
  2102. set_current_state(TASK_RUNNING);
  2103. if (timeout == 0)
  2104. hprintk("close rx timeout cid 0x%x\n", cid);
  2105. HPRINTK("close rx cid 0x%x complete\n", cid);
  2106. }
  2107. if (vcc->qos.txtp.traffic_class != ATM_NONE) {
  2108. volatile unsigned tsr4, tsr0;
  2109. int timeout;
  2110. HPRINTK("close tx cid 0x%x\n", cid);
  2111. /* 2.1.2
  2112. *
  2113. * ... the host must first stop queueing packets to the TPDRQ
  2114. * on the connection to be closed, then wait for all outstanding
  2115. * packets to be transmitted and their buffers returned to the
  2116. * TBRQ. When the last packet on the connection arrives in the
  2117. * TBRQ, the host issues the close command to the adapter.
  2118. */
  2119. while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 0) &&
  2120. (retry < MAX_RETRY)) {
  2121. msleep(sleep);
  2122. if (sleep < 250)
  2123. sleep = sleep * 2;
  2124. ++retry;
  2125. }
  2126. if (tx_inuse)
  2127. hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
  2128. /* 2.3.1.1 generic close operations with flush */
  2129. spin_lock_irqsave(&he_dev->global_lock, flags);
  2130. he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
  2131. /* also clears TSR4_SESSION_ENDED */
  2132. switch (vcc->qos.txtp.traffic_class) {
  2133. case ATM_UBR:
  2134. he_writel_tsr1(he_dev,
  2135. TSR1_MCR(rate_to_atmf(200000))
  2136. | TSR1_PCR(0), cid);
  2137. break;
  2138. case ATM_CBR:
  2139. he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
  2140. break;
  2141. }
  2142. (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */
  2143. tpd = __alloc_tpd(he_dev);
  2144. if (tpd == NULL) {
  2145. hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
  2146. goto close_tx_incomplete;
  2147. }
  2148. tpd->status |= TPD_EOS | TPD_INT;
  2149. tpd->skb = NULL;
  2150. tpd->vcc = vcc;
  2151. wmb();
  2152. set_current_state(TASK_UNINTERRUPTIBLE);
  2153. add_wait_queue(&he_vcc->tx_waitq, &wait);
  2154. __enqueue_tpd(he_dev, tpd, cid);
  2155. spin_unlock_irqrestore(&he_dev->global_lock, flags);
  2156. timeout = schedule_timeout(30*HZ);
  2157. remove_wait_queue(&he_vcc->tx_waitq, &wait);
  2158. set_current_state(TASK_RUNNING);
  2159. spin_lock_irqsave(&he_dev->global_lock, flags);
  2160. if (timeout == 0) {
  2161. hprintk("close tx timeout cid 0x%x\n", cid);
  2162. goto close_tx_incomplete;
  2163. }
  2164. while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
  2165. HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
  2166. udelay(250);
  2167. }
  2168. while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
  2169. HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
  2170. udelay(250);
  2171. }
  2172. close_tx_incomplete:
  2173. if (vcc->qos.txtp.traffic_class == ATM_CBR) {
  2174. int reg = he_vcc->rc_index;
  2175. HPRINTK("cs_stper reg = %d\n", reg);
  2176. if (he_dev->cs_stper[reg].inuse == 0)
  2177. hprintk("cs_stper[%d].inuse = 0!\n", reg);
  2178. else
  2179. --he_dev->cs_stper[reg].inuse;
  2180. he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
  2181. }
  2182. spin_unlock_irqrestore(&he_dev->global_lock, flags);
  2183. HPRINTK("close tx cid 0x%x complete\n", cid);
  2184. }
  2185. kfree(he_vcc);
  2186. clear_bit(ATM_VF_ADDR, &vcc->flags);
  2187. }
  2188. static int
  2189. he_send(struct atm_vcc *vcc, struct sk_buff *skb)
  2190. {
  2191. unsigned long flags;
  2192. struct he_dev *he_dev = HE_DEV(vcc->dev);
  2193. unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
  2194. struct he_tpd *tpd;
  2195. #ifdef USE_SCATTERGATHER
  2196. int i, slot = 0;
  2197. #endif
  2198. #define HE_TPD_BUFSIZE 0xffff
  2199. HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
  2200. if ((skb->len > HE_TPD_BUFSIZE) ||
  2201. ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
  2202. hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
  2203. if (vcc->pop)
  2204. vcc->pop(vcc, skb);
  2205. else
  2206. dev_kfree_skb_any(skb);
  2207. atomic_inc(&vcc->stats->tx_err);
  2208. return -EINVAL;
  2209. }
  2210. #ifndef USE_SCATTERGATHER
  2211. if (skb_shinfo(skb)->nr_frags) {
  2212. hprintk("no scatter/gather support\n");
  2213. if (vcc->pop)
  2214. vcc->pop(vcc, skb);
  2215. else
  2216. dev_kfree_skb_any(skb);
  2217. atomic_inc(&vcc->stats->tx_err);
  2218. return -EINVAL;
  2219. }
  2220. #endif
  2221. spin_lock_irqsave(&he_dev->global_lock, flags);
  2222. tpd = __alloc_tpd(he_dev);
  2223. if (tpd == NULL) {
  2224. if (vcc->pop)
  2225. vcc->pop(vcc, skb);
  2226. else
  2227. dev_kfree_skb_any(skb);
  2228. atomic_inc(&vcc->stats->tx_err);
  2229. spin_unlock_irqrestore(&he_dev->global_lock, flags);
  2230. return -ENOMEM;
  2231. }
  2232. if (vcc->qos.aal == ATM_AAL5)
  2233. tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
  2234. else {
  2235. char *pti_clp = (void *) (skb->data + 3);
  2236. int clp, pti;
  2237. pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
  2238. clp = (*pti_clp & ATM_HDR_CLP);
  2239. tpd->status |= TPD_CELLTYPE(pti);
  2240. if (clp)
  2241. tpd->status |= TPD_CLP;
  2242. skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
  2243. }
  2244. #ifdef USE_SCATTERGATHER
  2245. tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
  2246. skb->len - skb->data_len, PCI_DMA_TODEVICE);
  2247. tpd->iovec[slot].len = skb->len - skb->data_len;
  2248. ++slot;
  2249. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  2250. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  2251. if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
  2252. tpd->vcc = vcc;
  2253. tpd->skb = NULL; /* not the last fragment
  2254. so dont ->push() yet */
  2255. wmb();
  2256. __enqueue_tpd(he_dev, tpd, cid);
  2257. tpd = __alloc_tpd(he_dev);
  2258. if (tpd == NULL) {
  2259. if (vcc->pop)
  2260. vcc->pop(vcc, skb);
  2261. else
  2262. dev_kfree_skb_any(skb);
  2263. atomic_inc(&vcc->stats->tx_err);
  2264. spin_unlock_irqrestore(&he_dev->global_lock, flags);
  2265. return -ENOMEM;
  2266. }
  2267. tpd->status |= TPD_USERCELL;
  2268. slot = 0;
  2269. }
  2270. tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
  2271. (void *) page_address(frag->page) + frag->page_offset,
  2272. frag->size, PCI_DMA_TODEVICE);
  2273. tpd->iovec[slot].len = frag->size;
  2274. ++slot;
  2275. }
  2276. tpd->iovec[slot - 1].len |= TPD_LST;
  2277. #else
  2278. tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
  2279. tpd->length0 = skb->len | TPD_LST;
  2280. #endif
  2281. tpd->status |= TPD_INT;
  2282. tpd->vcc = vcc;
  2283. tpd->skb = skb;
  2284. wmb();
  2285. ATM_SKB(skb)->vcc = vcc;
  2286. __enqueue_tpd(he_dev, tpd, cid);
  2287. spin_unlock_irqrestore(&he_dev->global_lock, flags);
  2288. atomic_inc(&vcc->stats->tx);
  2289. return 0;
  2290. }
  2291. static int
  2292. he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
  2293. {
  2294. unsigned long flags;
  2295. struct he_dev *he_dev = HE_DEV(atm_dev);
  2296. struct he_ioctl_reg reg;
  2297. int err = 0;
  2298. switch (cmd) {
  2299. case HE_GET_REG:
  2300. if (!capable(CAP_NET_ADMIN))
  2301. return -EPERM;
  2302. if (copy_from_user(&reg, arg,
  2303. sizeof(struct he_ioctl_reg)))
  2304. return -EFAULT;
  2305. spin_lock_irqsave(&he_dev->global_lock, flags);
  2306. switch (reg.type) {
  2307. case HE_REGTYPE_PCI:
  2308. reg.val = he_readl(he_dev, reg.addr);
  2309. break;
  2310. case HE_REGTYPE_RCM:
  2311. reg.val =
  2312. he_readl_rcm(he_dev, reg.addr);
  2313. break;
  2314. case HE_REGTYPE_TCM:
  2315. reg.val =
  2316. he_readl_tcm(he_dev, reg.addr);
  2317. break;
  2318. case HE_REGTYPE_MBOX:
  2319. reg.val =
  2320. he_readl_mbox(he_dev, reg.addr);
  2321. break;
  2322. default:
  2323. err = -EINVAL;
  2324. break;
  2325. }
  2326. spin_unlock_irqrestore(&he_dev->global_lock, flags);
  2327. if (err == 0)
  2328. if (copy_to_user(arg, &reg,
  2329. sizeof(struct he_ioctl_reg)))
  2330. return -EFAULT;
  2331. break;
  2332. default:
  2333. #ifdef CONFIG_ATM_HE_USE_SUNI
  2334. if (atm_dev->phy && atm_dev->phy->ioctl)
  2335. err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
  2336. #else /* CONFIG_ATM_HE_USE_SUNI */
  2337. err = -EINVAL;
  2338. #endif /* CONFIG_ATM_HE_USE_SUNI */
  2339. break;
  2340. }
  2341. return err;
  2342. }
  2343. static void
  2344. he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
  2345. {
  2346. unsigned long flags;
  2347. struct he_dev *he_dev = HE_DEV(atm_dev);
  2348. HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
  2349. spin_lock_irqsave(&he_dev->global_lock, flags);
  2350. he_writel(he_dev, val, FRAMER + (addr*4));
  2351. (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */
  2352. spin_unlock_irqrestore(&he_dev->global_lock, flags);
  2353. }
  2354. static unsigned char
  2355. he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
  2356. {
  2357. unsigned long flags;
  2358. struct he_dev *he_dev = HE_DEV(atm_dev);
  2359. unsigned reg;
  2360. spin_lock_irqsave(&he_dev->global_lock, flags);
  2361. reg = he_readl(he_dev, FRAMER + (addr*4));
  2362. spin_unlock_irqrestore(&he_dev->global_lock, flags);
  2363. HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
  2364. return reg;
  2365. }
  2366. static int
  2367. he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
  2368. {
  2369. unsigned long flags;
  2370. struct he_dev *he_dev = HE_DEV(dev);
  2371. int left, i;
  2372. #ifdef notdef
  2373. struct he_rbrq *rbrq_tail;
  2374. struct he_tpdrq *tpdrq_head;
  2375. int rbpl_head, rbpl_tail;
  2376. #endif
  2377. static long mcc = 0, oec = 0, dcc = 0, cec = 0;
  2378. left = *pos;
  2379. if (!left--)
  2380. return sprintf(page, "%s\n", version);
  2381. if (!left--)
  2382. return sprintf(page, "%s%s\n\n",
  2383. he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
  2384. if (!left--)
  2385. return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
  2386. spin_lock_irqsave(&he_dev->global_lock, flags);
  2387. mcc += he_readl(he_dev, MCC);
  2388. oec += he_readl(he_dev, OEC);
  2389. dcc += he_readl(he_dev, DCC);
  2390. cec += he_readl(he_dev, CEC);
  2391. spin_unlock_irqrestore(&he_dev->global_lock, flags);
  2392. if (!left--)
  2393. return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
  2394. mcc, oec, dcc, cec);
  2395. if (!left--)
  2396. return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
  2397. CONFIG_IRQ_SIZE, he_dev->irq_peak);
  2398. if (!left--)
  2399. return sprintf(page, "tpdrq_size = %d inuse = ?\n",
  2400. CONFIG_TPDRQ_SIZE);
  2401. if (!left--)
  2402. return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
  2403. CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
  2404. if (!left--)
  2405. return sprintf(page, "tbrq_size = %d peak = %d\n",
  2406. CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
  2407. #ifdef notdef
  2408. rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
  2409. rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
  2410. inuse = rbpl_head - rbpl_tail;
  2411. if (inuse < 0)
  2412. inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
  2413. inuse /= sizeof(struct he_rbp);
  2414. if (!left--)
  2415. return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
  2416. CONFIG_RBPL_SIZE, inuse);
  2417. #endif
  2418. if (!left--)
  2419. return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
  2420. for (i = 0; i < HE_NUM_CS_STPER; ++i)
  2421. if (!left--)
  2422. return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
  2423. he_dev->cs_stper[i].pcr,
  2424. he_dev->cs_stper[i].inuse);
  2425. if (!left--)
  2426. return sprintf(page, "total bw (cbr): %d (limit %d)\n",
  2427. he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
  2428. return 0;
  2429. }
  2430. /* eeprom routines -- see 4.7 */
  2431. u8
  2432. read_prom_byte(struct he_dev *he_dev, int addr)
  2433. {
  2434. u32 val = 0, tmp_read = 0;
  2435. int i, j = 0;
  2436. u8 byte_read = 0;
  2437. val = readl(he_dev->membase + HOST_CNTL);
  2438. val &= 0xFFFFE0FF;
  2439. /* Turn on write enable */
  2440. val |= 0x800;
  2441. he_writel(he_dev, val, HOST_CNTL);
  2442. /* Send READ instruction */
  2443. for (i = 0; i < sizeof(readtab)/sizeof(readtab[0]); i++) {
  2444. he_writel(he_dev, val | readtab[i], HOST_CNTL);
  2445. udelay(EEPROM_DELAY);
  2446. }
  2447. /* Next, we need to send the byte address to read from */
  2448. for (i = 7; i >= 0; i--) {
  2449. he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
  2450. udelay(EEPROM_DELAY);
  2451. he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
  2452. udelay(EEPROM_DELAY);
  2453. }
  2454. j = 0;
  2455. val &= 0xFFFFF7FF; /* Turn off write enable */
  2456. he_writel(he_dev, val, HOST_CNTL);
  2457. /* Now, we can read data from the EEPROM by clocking it in */
  2458. for (i = 7; i >= 0; i--) {
  2459. he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
  2460. udelay(EEPROM_DELAY);
  2461. tmp_read = he_readl(he_dev, HOST_CNTL);
  2462. byte_read |= (unsigned char)
  2463. ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
  2464. he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
  2465. udelay(EEPROM_DELAY);
  2466. }
  2467. he_writel(he_dev, val | ID_CS, HOST_CNTL);
  2468. udelay(EEPROM_DELAY);
  2469. return byte_read;
  2470. }
  2471. MODULE_LICENSE("GPL");
  2472. MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
  2473. MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
  2474. module_param(disable64, bool, 0);
  2475. MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
  2476. module_param(nvpibits, short, 0);
  2477. MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
  2478. module_param(nvcibits, short, 0);
  2479. MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
  2480. module_param(rx_skb_reserve, short, 0);
  2481. MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
  2482. module_param(irq_coalesce, bool, 0);
  2483. MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
  2484. module_param(sdh, bool, 0);
  2485. MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
  2486. static struct pci_device_id he_pci_tbl[] = {
  2487. { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
  2488. 0, 0, 0 },
  2489. { 0, }
  2490. };
  2491. MODULE_DEVICE_TABLE(pci, he_pci_tbl);
  2492. static struct pci_driver he_driver = {
  2493. .name = "he",
  2494. .probe = he_init_one,
  2495. .remove = __devexit_p(he_remove_one),
  2496. .id_table = he_pci_tbl,
  2497. };
  2498. static int __init he_init(void)
  2499. {
  2500. return pci_register_driver(&he_driver);
  2501. }
  2502. static void __exit he_cleanup(void)
  2503. {
  2504. pci_unregister_driver(&he_driver);
  2505. }
  2506. module_init(he_init);
  2507. module_exit(he_cleanup);