fore200e.c 90 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238
  1. /*
  2. $Id: fore200e.c,v 1.5 2000/04/14 10:10:34 davem Exp $
  3. A FORE Systems 200E-series driver for ATM on Linux.
  4. Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
  5. Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
  6. This driver simultaneously supports PCA-200E and SBA-200E adapters
  7. on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
  8. This program is free software; you can redistribute it and/or modify
  9. it under the terms of the GNU General Public License as published by
  10. the Free Software Foundation; either version 2 of the License, or
  11. (at your option) any later version.
  12. This program is distributed in the hope that it will be useful,
  13. but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. GNU General Public License for more details.
  16. You should have received a copy of the GNU General Public License
  17. along with this program; if not, write to the Free Software
  18. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. */
  20. #include <linux/kernel.h>
  21. #include <linux/slab.h>
  22. #include <linux/init.h>
  23. #include <linux/capability.h>
  24. #include <linux/sched.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/bitops.h>
  27. #include <linux/pci.h>
  28. #include <linux/module.h>
  29. #include <linux/atmdev.h>
  30. #include <linux/sonet.h>
  31. #include <linux/atm_suni.h>
  32. #include <linux/dma-mapping.h>
  33. #include <linux/delay.h>
  34. #include <asm/io.h>
  35. #include <asm/string.h>
  36. #include <asm/page.h>
  37. #include <asm/irq.h>
  38. #include <asm/dma.h>
  39. #include <asm/byteorder.h>
  40. #include <asm/uaccess.h>
  41. #include <asm/atomic.h>
  42. #ifdef CONFIG_ATM_FORE200E_SBA
  43. #include <asm/idprom.h>
  44. #include <asm/sbus.h>
  45. #include <asm/openprom.h>
  46. #include <asm/oplib.h>
  47. #include <asm/pgtable.h>
  48. #endif
  49. #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
  50. #define FORE200E_USE_TASKLET
  51. #endif
  52. #if 0 /* enable the debugging code of the buffer supply queues */
  53. #define FORE200E_BSQ_DEBUG
  54. #endif
  55. #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
  56. #define FORE200E_52BYTE_AAL0_SDU
  57. #endif
  58. #include "fore200e.h"
  59. #include "suni.h"
  60. #define FORE200E_VERSION "0.3e"
  61. #define FORE200E "fore200e: "
  62. #if 0 /* override .config */
  63. #define CONFIG_ATM_FORE200E_DEBUG 1
  64. #endif
  65. #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
  66. #define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
  67. printk(FORE200E format, ##args); } while (0)
  68. #else
  69. #define DPRINTK(level, format, args...) do {} while (0)
  70. #endif
  71. #define FORE200E_ALIGN(addr, alignment) \
  72. ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
  73. #define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type))
  74. #define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ])
  75. #define FORE200E_NEXT_ENTRY(index, modulo) (index = ++(index) % (modulo))
  76. #if 1
  77. #define ASSERT(expr) if (!(expr)) { \
  78. printk(FORE200E "assertion failed! %s[%d]: %s\n", \
  79. __FUNCTION__, __LINE__, #expr); \
  80. panic(FORE200E "%s", __FUNCTION__); \
  81. }
  82. #else
  83. #define ASSERT(expr) do {} while (0)
  84. #endif
  85. static const struct atmdev_ops fore200e_ops;
  86. static const struct fore200e_bus fore200e_bus[];
  87. static LIST_HEAD(fore200e_boards);
  88. MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
  89. MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
  90. MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");
  91. static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
  92. { BUFFER_S1_NBR, BUFFER_L1_NBR },
  93. { BUFFER_S2_NBR, BUFFER_L2_NBR }
  94. };
  95. static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
  96. { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
  97. { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
  98. };
  99. #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
  100. static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
  101. #endif
  102. #if 0 /* currently unused */
  103. static int
  104. fore200e_fore2atm_aal(enum fore200e_aal aal)
  105. {
  106. switch(aal) {
  107. case FORE200E_AAL0: return ATM_AAL0;
  108. case FORE200E_AAL34: return ATM_AAL34;
  109. case FORE200E_AAL5: return ATM_AAL5;
  110. }
  111. return -EINVAL;
  112. }
  113. #endif
  114. static enum fore200e_aal
  115. fore200e_atm2fore_aal(int aal)
  116. {
  117. switch(aal) {
  118. case ATM_AAL0: return FORE200E_AAL0;
  119. case ATM_AAL34: return FORE200E_AAL34;
  120. case ATM_AAL1:
  121. case ATM_AAL2:
  122. case ATM_AAL5: return FORE200E_AAL5;
  123. }
  124. return -EINVAL;
  125. }
  126. static char*
  127. fore200e_irq_itoa(int irq)
  128. {
  129. static char str[8];
  130. sprintf(str, "%d", irq);
  131. return str;
  132. }
  133. static void*
  134. fore200e_kmalloc(int size, gfp_t flags)
  135. {
  136. void *chunk = kzalloc(size, flags);
  137. if (!chunk)
  138. printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags);
  139. return chunk;
  140. }
  141. static void
  142. fore200e_kfree(void* chunk)
  143. {
  144. kfree(chunk);
  145. }
  146. /* allocate and align a chunk of memory intended to hold the data behing exchanged
  147. between the driver and the adapter (using streaming DVMA) */
  148. static int
  149. fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
  150. {
  151. unsigned long offset = 0;
  152. if (alignment <= sizeof(int))
  153. alignment = 0;
  154. chunk->alloc_size = size + alignment;
  155. chunk->align_size = size;
  156. chunk->direction = direction;
  157. chunk->alloc_addr = fore200e_kmalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA);
  158. if (chunk->alloc_addr == NULL)
  159. return -ENOMEM;
  160. if (alignment > 0)
  161. offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
  162. chunk->align_addr = chunk->alloc_addr + offset;
  163. chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction);
  164. return 0;
  165. }
  166. /* free a chunk of memory */
  167. static void
  168. fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
  169. {
  170. fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction);
  171. fore200e_kfree(chunk->alloc_addr);
  172. }
  173. static void
  174. fore200e_spin(int msecs)
  175. {
  176. unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
  177. while (time_before(jiffies, timeout));
  178. }
  179. static int
  180. fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
  181. {
  182. unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
  183. int ok;
  184. mb();
  185. do {
  186. if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
  187. break;
  188. } while (time_before(jiffies, timeout));
  189. #if 1
  190. if (!ok) {
  191. printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
  192. *addr, val);
  193. }
  194. #endif
  195. return ok;
  196. }
  197. static int
  198. fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
  199. {
  200. unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
  201. int ok;
  202. do {
  203. if ((ok = (fore200e->bus->read(addr) == val)))
  204. break;
  205. } while (time_before(jiffies, timeout));
  206. #if 1
  207. if (!ok) {
  208. printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
  209. fore200e->bus->read(addr), val);
  210. }
  211. #endif
  212. return ok;
  213. }
  214. static void
  215. fore200e_free_rx_buf(struct fore200e* fore200e)
  216. {
  217. int scheme, magn, nbr;
  218. struct buffer* buffer;
  219. for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
  220. for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
  221. if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
  222. for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
  223. struct chunk* data = &buffer[ nbr ].data;
  224. if (data->alloc_addr != NULL)
  225. fore200e_chunk_free(fore200e, data);
  226. }
  227. }
  228. }
  229. }
  230. }
  231. static void
  232. fore200e_uninit_bs_queue(struct fore200e* fore200e)
  233. {
  234. int scheme, magn;
  235. for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
  236. for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
  237. struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status;
  238. struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
  239. if (status->alloc_addr)
  240. fore200e->bus->dma_chunk_free(fore200e, status);
  241. if (rbd_block->alloc_addr)
  242. fore200e->bus->dma_chunk_free(fore200e, rbd_block);
  243. }
  244. }
  245. }
  246. static int
  247. fore200e_reset(struct fore200e* fore200e, int diag)
  248. {
  249. int ok;
  250. fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
  251. fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
  252. fore200e->bus->reset(fore200e);
  253. if (diag) {
  254. ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
  255. if (ok == 0) {
  256. printk(FORE200E "device %s self-test failed\n", fore200e->name);
  257. return -ENODEV;
  258. }
  259. printk(FORE200E "device %s self-test passed\n", fore200e->name);
  260. fore200e->state = FORE200E_STATE_RESET;
  261. }
  262. return 0;
  263. }
  264. static void
  265. fore200e_shutdown(struct fore200e* fore200e)
  266. {
  267. printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
  268. fore200e->name, fore200e->phys_base,
  269. fore200e_irq_itoa(fore200e->irq));
  270. if (fore200e->state > FORE200E_STATE_RESET) {
  271. /* first, reset the board to prevent further interrupts or data transfers */
  272. fore200e_reset(fore200e, 0);
  273. }
  274. /* then, release all allocated resources */
  275. switch(fore200e->state) {
  276. case FORE200E_STATE_COMPLETE:
  277. kfree(fore200e->stats);
  278. case FORE200E_STATE_IRQ:
  279. free_irq(fore200e->irq, fore200e->atm_dev);
  280. case FORE200E_STATE_ALLOC_BUF:
  281. fore200e_free_rx_buf(fore200e);
  282. case FORE200E_STATE_INIT_BSQ:
  283. fore200e_uninit_bs_queue(fore200e);
  284. case FORE200E_STATE_INIT_RXQ:
  285. fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status);
  286. fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
  287. case FORE200E_STATE_INIT_TXQ:
  288. fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status);
  289. fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
  290. case FORE200E_STATE_INIT_CMDQ:
  291. fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
  292. case FORE200E_STATE_INITIALIZE:
  293. /* nothing to do for that state */
  294. case FORE200E_STATE_START_FW:
  295. /* nothing to do for that state */
  296. case FORE200E_STATE_LOAD_FW:
  297. /* nothing to do for that state */
  298. case FORE200E_STATE_RESET:
  299. /* nothing to do for that state */
  300. case FORE200E_STATE_MAP:
  301. fore200e->bus->unmap(fore200e);
  302. case FORE200E_STATE_CONFIGURE:
  303. /* nothing to do for that state */
  304. case FORE200E_STATE_REGISTER:
  305. /* XXX shouldn't we *start* by deregistering the device? */
  306. atm_dev_deregister(fore200e->atm_dev);
  307. case FORE200E_STATE_BLANK:
  308. /* nothing to do for that state */
  309. break;
  310. }
  311. }
  312. #ifdef CONFIG_ATM_FORE200E_PCA
  313. static u32 fore200e_pca_read(volatile u32 __iomem *addr)
  314. {
  315. /* on big-endian hosts, the board is configured to convert
  316. the endianess of slave RAM accesses */
  317. return le32_to_cpu(readl(addr));
  318. }
  319. static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
  320. {
  321. /* on big-endian hosts, the board is configured to convert
  322. the endianess of slave RAM accesses */
  323. writel(cpu_to_le32(val), addr);
  324. }
  325. static u32
  326. fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
  327. {
  328. u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction);
  329. DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n",
  330. virt_addr, size, direction, dma_addr);
  331. return dma_addr;
  332. }
  333. static void
  334. fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
  335. {
  336. DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
  337. dma_addr, size, direction);
  338. pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
  339. }
  340. static void
  341. fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
  342. {
  343. DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
  344. pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
  345. }
  346. static void
  347. fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
  348. {
  349. DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
  350. pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
  351. }
  352. /* allocate a DMA consistent chunk of memory intended to act as a communication mechanism
  353. (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
  354. static int
  355. fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
  356. int size, int nbr, int alignment)
  357. {
  358. /* returned chunks are page-aligned */
  359. chunk->alloc_size = size * nbr;
  360. chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev,
  361. chunk->alloc_size,
  362. &chunk->dma_addr);
  363. if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
  364. return -ENOMEM;
  365. chunk->align_addr = chunk->alloc_addr;
  366. return 0;
  367. }
  368. /* free a DMA consistent chunk of memory */
  369. static void
  370. fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
  371. {
  372. pci_free_consistent((struct pci_dev*)fore200e->bus_dev,
  373. chunk->alloc_size,
  374. chunk->alloc_addr,
  375. chunk->dma_addr);
  376. }
  377. static int
  378. fore200e_pca_irq_check(struct fore200e* fore200e)
  379. {
  380. /* this is a 1 bit register */
  381. int irq_posted = readl(fore200e->regs.pca.psr);
  382. #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
  383. if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
  384. DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
  385. }
  386. #endif
  387. return irq_posted;
  388. }
  389. static void
  390. fore200e_pca_irq_ack(struct fore200e* fore200e)
  391. {
  392. writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
  393. }
  394. static void
  395. fore200e_pca_reset(struct fore200e* fore200e)
  396. {
  397. writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
  398. fore200e_spin(10);
  399. writel(0, fore200e->regs.pca.hcr);
  400. }
  401. static int __devinit
  402. fore200e_pca_map(struct fore200e* fore200e)
  403. {
  404. DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
  405. fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
  406. if (fore200e->virt_base == NULL) {
  407. printk(FORE200E "can't map device %s\n", fore200e->name);
  408. return -EFAULT;
  409. }
  410. DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
  411. /* gain access to the PCA specific registers */
  412. fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
  413. fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
  414. fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
  415. fore200e->state = FORE200E_STATE_MAP;
  416. return 0;
  417. }
  418. static void
  419. fore200e_pca_unmap(struct fore200e* fore200e)
  420. {
  421. DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
  422. if (fore200e->virt_base != NULL)
  423. iounmap(fore200e->virt_base);
  424. }
  425. static int __devinit
  426. fore200e_pca_configure(struct fore200e* fore200e)
  427. {
  428. struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
  429. u8 master_ctrl, latency;
  430. DPRINTK(2, "device %s being configured\n", fore200e->name);
  431. if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
  432. printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
  433. return -EIO;
  434. }
  435. pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
  436. master_ctrl = master_ctrl
  437. #if defined(__BIG_ENDIAN)
  438. /* request the PCA board to convert the endianess of slave RAM accesses */
  439. | PCA200E_CTRL_CONVERT_ENDIAN
  440. #endif
  441. #if 0
  442. | PCA200E_CTRL_DIS_CACHE_RD
  443. | PCA200E_CTRL_DIS_WRT_INVAL
  444. | PCA200E_CTRL_ENA_CONT_REQ_MODE
  445. | PCA200E_CTRL_2_CACHE_WRT_INVAL
  446. #endif
  447. | PCA200E_CTRL_LARGE_PCI_BURSTS;
  448. pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
  449. /* raise latency from 32 (default) to 192, as this seems to prevent NIC
  450. lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
  451. this may impact the performances of other PCI devices on the same bus, though */
  452. latency = 192;
  453. pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
  454. fore200e->state = FORE200E_STATE_CONFIGURE;
  455. return 0;
  456. }
  457. static int __init
  458. fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
  459. {
  460. struct host_cmdq* cmdq = &fore200e->host_cmdq;
  461. struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
  462. struct prom_opcode opcode;
  463. int ok;
  464. u32 prom_dma;
  465. FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
  466. opcode.opcode = OPCODE_GET_PROM;
  467. opcode.pad = 0;
  468. prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE);
  469. fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
  470. *entry->status = STATUS_PENDING;
  471. fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
  472. ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
  473. *entry->status = STATUS_FREE;
  474. fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
  475. if (ok == 0) {
  476. printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
  477. return -EIO;
  478. }
  479. #if defined(__BIG_ENDIAN)
  480. #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
  481. /* MAC address is stored as little-endian */
  482. swap_here(&prom->mac_addr[0]);
  483. swap_here(&prom->mac_addr[4]);
  484. #endif
  485. return 0;
  486. }
  487. static int
  488. fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
  489. {
  490. struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
  491. return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n",
  492. pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
  493. }
  494. #endif /* CONFIG_ATM_FORE200E_PCA */
  495. #ifdef CONFIG_ATM_FORE200E_SBA
  496. static u32
  497. fore200e_sba_read(volatile u32 __iomem *addr)
  498. {
  499. return sbus_readl(addr);
  500. }
  501. static void
  502. fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
  503. {
  504. sbus_writel(val, addr);
  505. }
  506. static u32
  507. fore200e_sba_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
  508. {
  509. u32 dma_addr = sbus_map_single((struct sbus_dev*)fore200e->bus_dev, virt_addr, size, direction);
  510. DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n",
  511. virt_addr, size, direction, dma_addr);
  512. return dma_addr;
  513. }
  514. static void
  515. fore200e_sba_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
  516. {
  517. DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
  518. dma_addr, size, direction);
  519. sbus_unmap_single((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
  520. }
  521. static void
  522. fore200e_sba_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
  523. {
  524. DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
  525. sbus_dma_sync_single_for_cpu((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
  526. }
  527. static void
  528. fore200e_sba_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
  529. {
  530. DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
  531. sbus_dma_sync_single_for_device((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
  532. }
  533. /* allocate a DVMA consistent chunk of memory intended to act as a communication mechanism
  534. (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
  535. static int
  536. fore200e_sba_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
  537. int size, int nbr, int alignment)
  538. {
  539. chunk->alloc_size = chunk->align_size = size * nbr;
  540. /* returned chunks are page-aligned */
  541. chunk->alloc_addr = sbus_alloc_consistent((struct sbus_dev*)fore200e->bus_dev,
  542. chunk->alloc_size,
  543. &chunk->dma_addr);
  544. if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
  545. return -ENOMEM;
  546. chunk->align_addr = chunk->alloc_addr;
  547. return 0;
  548. }
  549. /* free a DVMA consistent chunk of memory */
  550. static void
  551. fore200e_sba_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
  552. {
  553. sbus_free_consistent((struct sbus_dev*)fore200e->bus_dev,
  554. chunk->alloc_size,
  555. chunk->alloc_addr,
  556. chunk->dma_addr);
  557. }
  558. static void
  559. fore200e_sba_irq_enable(struct fore200e* fore200e)
  560. {
  561. u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
  562. fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
  563. }
  564. static int
  565. fore200e_sba_irq_check(struct fore200e* fore200e)
  566. {
  567. return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
  568. }
  569. static void
  570. fore200e_sba_irq_ack(struct fore200e* fore200e)
  571. {
  572. u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
  573. fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
  574. }
  575. static void
  576. fore200e_sba_reset(struct fore200e* fore200e)
  577. {
  578. fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
  579. fore200e_spin(10);
  580. fore200e->bus->write(0, fore200e->regs.sba.hcr);
  581. }
  582. static int __init
  583. fore200e_sba_map(struct fore200e* fore200e)
  584. {
  585. struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
  586. unsigned int bursts;
  587. /* gain access to the SBA specific registers */
  588. fore200e->regs.sba.hcr = sbus_ioremap(&sbus_dev->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
  589. fore200e->regs.sba.bsr = sbus_ioremap(&sbus_dev->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
  590. fore200e->regs.sba.isr = sbus_ioremap(&sbus_dev->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
  591. fore200e->virt_base = sbus_ioremap(&sbus_dev->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
  592. if (fore200e->virt_base == NULL) {
  593. printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
  594. return -EFAULT;
  595. }
  596. DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
  597. fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
  598. /* get the supported DVMA burst sizes */
  599. bursts = prom_getintdefault(sbus_dev->bus->prom_node, "burst-sizes", 0x00);
  600. if (sbus_can_dma_64bit(sbus_dev))
  601. sbus_set_sbus64(sbus_dev, bursts);
  602. fore200e->state = FORE200E_STATE_MAP;
  603. return 0;
  604. }
  605. static void
  606. fore200e_sba_unmap(struct fore200e* fore200e)
  607. {
  608. sbus_iounmap(fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
  609. sbus_iounmap(fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
  610. sbus_iounmap(fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
  611. sbus_iounmap(fore200e->virt_base, SBA200E_RAM_LENGTH);
  612. }
  613. static int __init
  614. fore200e_sba_configure(struct fore200e* fore200e)
  615. {
  616. fore200e->state = FORE200E_STATE_CONFIGURE;
  617. return 0;
  618. }
  619. static struct fore200e* __init
  620. fore200e_sba_detect(const struct fore200e_bus* bus, int index)
  621. {
  622. struct fore200e* fore200e;
  623. struct sbus_bus* sbus_bus;
  624. struct sbus_dev* sbus_dev = NULL;
  625. unsigned int count = 0;
  626. for_each_sbus (sbus_bus) {
  627. for_each_sbusdev (sbus_dev, sbus_bus) {
  628. if (strcmp(sbus_dev->prom_name, SBA200E_PROM_NAME) == 0) {
  629. if (count >= index)
  630. goto found;
  631. count++;
  632. }
  633. }
  634. }
  635. return NULL;
  636. found:
  637. if (sbus_dev->num_registers != 4) {
  638. printk(FORE200E "this %s device has %d instead of 4 registers\n",
  639. bus->model_name, sbus_dev->num_registers);
  640. return NULL;
  641. }
  642. fore200e = fore200e_kmalloc(sizeof(struct fore200e), GFP_KERNEL);
  643. if (fore200e == NULL)
  644. return NULL;
  645. fore200e->bus = bus;
  646. fore200e->bus_dev = sbus_dev;
  647. fore200e->irq = sbus_dev->irqs[ 0 ];
  648. fore200e->phys_base = (unsigned long)sbus_dev;
  649. sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
  650. return fore200e;
  651. }
  652. static int __init
  653. fore200e_sba_prom_read(struct fore200e* fore200e, struct prom_data* prom)
  654. {
  655. struct sbus_dev* sbus_dev = (struct sbus_dev*) fore200e->bus_dev;
  656. int len;
  657. len = prom_getproperty(sbus_dev->prom_node, "macaddrlo2", &prom->mac_addr[ 4 ], 4);
  658. if (len < 0)
  659. return -EBUSY;
  660. len = prom_getproperty(sbus_dev->prom_node, "macaddrhi4", &prom->mac_addr[ 2 ], 4);
  661. if (len < 0)
  662. return -EBUSY;
  663. prom_getproperty(sbus_dev->prom_node, "serialnumber",
  664. (char*)&prom->serial_number, sizeof(prom->serial_number));
  665. prom_getproperty(sbus_dev->prom_node, "promversion",
  666. (char*)&prom->hw_revision, sizeof(prom->hw_revision));
  667. return 0;
  668. }
  669. static int
  670. fore200e_sba_proc_read(struct fore200e* fore200e, char *page)
  671. {
  672. struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
  673. return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n", sbus_dev->slot, sbus_dev->prom_name);
  674. }
  675. #endif /* CONFIG_ATM_FORE200E_SBA */
  676. static void
  677. fore200e_tx_irq(struct fore200e* fore200e)
  678. {
  679. struct host_txq* txq = &fore200e->host_txq;
  680. struct host_txq_entry* entry;
  681. struct atm_vcc* vcc;
  682. struct fore200e_vc_map* vc_map;
  683. if (fore200e->host_txq.txing == 0)
  684. return;
  685. for (;;) {
  686. entry = &txq->host_entry[ txq->tail ];
  687. if ((*entry->status & STATUS_COMPLETE) == 0) {
  688. break;
  689. }
  690. DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
  691. entry, txq->tail, entry->vc_map, entry->skb);
  692. /* free copy of misaligned data */
  693. kfree(entry->data);
  694. /* remove DMA mapping */
  695. fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
  696. DMA_TO_DEVICE);
  697. vc_map = entry->vc_map;
  698. /* vcc closed since the time the entry was submitted for tx? */
  699. if ((vc_map->vcc == NULL) ||
  700. (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
  701. DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
  702. fore200e->atm_dev->number);
  703. dev_kfree_skb_any(entry->skb);
  704. }
  705. else {
  706. ASSERT(vc_map->vcc);
  707. /* vcc closed then immediately re-opened? */
  708. if (vc_map->incarn != entry->incarn) {
  709. /* when a vcc is closed, some PDUs may be still pending in the tx queue.
  710. if the same vcc is immediately re-opened, those pending PDUs must
  711. not be popped after the completion of their emission, as they refer
  712. to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
  713. would be decremented by the size of the (unrelated) skb, possibly
  714. leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
  715. we thus bind the tx entry to the current incarnation of the vcc
  716. when the entry is submitted for tx. When the tx later completes,
  717. if the incarnation number of the tx entry does not match the one
  718. of the vcc, then this implies that the vcc has been closed then re-opened.
  719. we thus just drop the skb here. */
  720. DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
  721. fore200e->atm_dev->number);
  722. dev_kfree_skb_any(entry->skb);
  723. }
  724. else {
  725. vcc = vc_map->vcc;
  726. ASSERT(vcc);
  727. /* notify tx completion */
  728. if (vcc->pop) {
  729. vcc->pop(vcc, entry->skb);
  730. }
  731. else {
  732. dev_kfree_skb_any(entry->skb);
  733. }
  734. #if 1
  735. /* race fixed by the above incarnation mechanism, but... */
  736. if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) {
  737. atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0);
  738. }
  739. #endif
  740. /* check error condition */
  741. if (*entry->status & STATUS_ERROR)
  742. atomic_inc(&vcc->stats->tx_err);
  743. else
  744. atomic_inc(&vcc->stats->tx);
  745. }
  746. }
  747. *entry->status = STATUS_FREE;
  748. fore200e->host_txq.txing--;
  749. FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
  750. }
  751. }
  752. #ifdef FORE200E_BSQ_DEBUG
  753. int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
  754. {
  755. struct buffer* buffer;
  756. int count = 0;
  757. buffer = bsq->freebuf;
  758. while (buffer) {
  759. if (buffer->supplied) {
  760. printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
  761. where, scheme, magn, buffer->index);
  762. }
  763. if (buffer->magn != magn) {
  764. printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
  765. where, scheme, magn, buffer->index, buffer->magn);
  766. }
  767. if (buffer->scheme != scheme) {
  768. printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
  769. where, scheme, magn, buffer->index, buffer->scheme);
  770. }
  771. if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
  772. printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
  773. where, scheme, magn, buffer->index);
  774. }
  775. count++;
  776. buffer = buffer->next;
  777. }
  778. if (count != bsq->freebuf_count) {
  779. printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
  780. where, scheme, magn, count, bsq->freebuf_count);
  781. }
  782. return 0;
  783. }
  784. #endif
  785. static void
  786. fore200e_supply(struct fore200e* fore200e)
  787. {
  788. int scheme, magn, i;
  789. struct host_bsq* bsq;
  790. struct host_bsq_entry* entry;
  791. struct buffer* buffer;
  792. for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
  793. for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
  794. bsq = &fore200e->host_bsq[ scheme ][ magn ];
  795. #ifdef FORE200E_BSQ_DEBUG
  796. bsq_audit(1, bsq, scheme, magn);
  797. #endif
  798. while (bsq->freebuf_count >= RBD_BLK_SIZE) {
  799. DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
  800. RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
  801. entry = &bsq->host_entry[ bsq->head ];
  802. for (i = 0; i < RBD_BLK_SIZE; i++) {
  803. /* take the first buffer in the free buffer list */
  804. buffer = bsq->freebuf;
  805. if (!buffer) {
  806. printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
  807. scheme, magn, bsq->freebuf_count);
  808. return;
  809. }
  810. bsq->freebuf = buffer->next;
  811. #ifdef FORE200E_BSQ_DEBUG
  812. if (buffer->supplied)
  813. printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
  814. scheme, magn, buffer->index);
  815. buffer->supplied = 1;
  816. #endif
  817. entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
  818. entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer);
  819. }
  820. FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
  821. /* decrease accordingly the number of free rx buffers */
  822. bsq->freebuf_count -= RBD_BLK_SIZE;
  823. *entry->status = STATUS_PENDING;
  824. fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
  825. }
  826. }
  827. }
  828. }
  829. static int
  830. fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
  831. {
  832. struct sk_buff* skb;
  833. struct buffer* buffer;
  834. struct fore200e_vcc* fore200e_vcc;
  835. int i, pdu_len = 0;
  836. #ifdef FORE200E_52BYTE_AAL0_SDU
  837. u32 cell_header = 0;
  838. #endif
  839. ASSERT(vcc);
  840. fore200e_vcc = FORE200E_VCC(vcc);
  841. ASSERT(fore200e_vcc);
  842. #ifdef FORE200E_52BYTE_AAL0_SDU
  843. if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
  844. cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
  845. (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
  846. (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
  847. (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
  848. rpd->atm_header.clp;
  849. pdu_len = 4;
  850. }
  851. #endif
  852. /* compute total PDU length */
  853. for (i = 0; i < rpd->nseg; i++)
  854. pdu_len += rpd->rsd[ i ].length;
  855. skb = alloc_skb(pdu_len, GFP_ATOMIC);
  856. if (skb == NULL) {
  857. DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
  858. atomic_inc(&vcc->stats->rx_drop);
  859. return -ENOMEM;
  860. }
  861. __net_timestamp(skb);
  862. #ifdef FORE200E_52BYTE_AAL0_SDU
  863. if (cell_header) {
  864. *((u32*)skb_put(skb, 4)) = cell_header;
  865. }
  866. #endif
  867. /* reassemble segments */
  868. for (i = 0; i < rpd->nseg; i++) {
  869. /* rebuild rx buffer address from rsd handle */
  870. buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
  871. /* Make device DMA transfer visible to CPU. */
  872. fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
  873. memcpy(skb_put(skb, rpd->rsd[ i ].length), buffer->data.align_addr, rpd->rsd[ i ].length);
  874. /* Now let the device get at it again. */
  875. fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
  876. }
  877. DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
  878. if (pdu_len < fore200e_vcc->rx_min_pdu)
  879. fore200e_vcc->rx_min_pdu = pdu_len;
  880. if (pdu_len > fore200e_vcc->rx_max_pdu)
  881. fore200e_vcc->rx_max_pdu = pdu_len;
  882. fore200e_vcc->rx_pdu++;
  883. /* push PDU */
  884. if (atm_charge(vcc, skb->truesize) == 0) {
  885. DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
  886. vcc->itf, vcc->vpi, vcc->vci);
  887. dev_kfree_skb_any(skb);
  888. atomic_inc(&vcc->stats->rx_drop);
  889. return -ENOMEM;
  890. }
  891. ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
  892. vcc->push(vcc, skb);
  893. atomic_inc(&vcc->stats->rx);
  894. ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
  895. return 0;
  896. }
  897. static void
  898. fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
  899. {
  900. struct host_bsq* bsq;
  901. struct buffer* buffer;
  902. int i;
  903. for (i = 0; i < rpd->nseg; i++) {
  904. /* rebuild rx buffer address from rsd handle */
  905. buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
  906. bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
  907. #ifdef FORE200E_BSQ_DEBUG
  908. bsq_audit(2, bsq, buffer->scheme, buffer->magn);
  909. if (buffer->supplied == 0)
  910. printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
  911. buffer->scheme, buffer->magn, buffer->index);
  912. buffer->supplied = 0;
  913. #endif
  914. /* re-insert the buffer into the free buffer list */
  915. buffer->next = bsq->freebuf;
  916. bsq->freebuf = buffer;
  917. /* then increment the number of free rx buffers */
  918. bsq->freebuf_count++;
  919. }
  920. }
  921. static void
  922. fore200e_rx_irq(struct fore200e* fore200e)
  923. {
  924. struct host_rxq* rxq = &fore200e->host_rxq;
  925. struct host_rxq_entry* entry;
  926. struct atm_vcc* vcc;
  927. struct fore200e_vc_map* vc_map;
  928. for (;;) {
  929. entry = &rxq->host_entry[ rxq->head ];
  930. /* no more received PDUs */
  931. if ((*entry->status & STATUS_COMPLETE) == 0)
  932. break;
  933. vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
  934. if ((vc_map->vcc == NULL) ||
  935. (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
  936. DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
  937. fore200e->atm_dev->number,
  938. entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
  939. }
  940. else {
  941. vcc = vc_map->vcc;
  942. ASSERT(vcc);
  943. if ((*entry->status & STATUS_ERROR) == 0) {
  944. fore200e_push_rpd(fore200e, vcc, entry->rpd);
  945. }
  946. else {
  947. DPRINTK(2, "damaged PDU on %d.%d.%d\n",
  948. fore200e->atm_dev->number,
  949. entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
  950. atomic_inc(&vcc->stats->rx_err);
  951. }
  952. }
  953. FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
  954. fore200e_collect_rpd(fore200e, entry->rpd);
  955. /* rewrite the rpd address to ack the received PDU */
  956. fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
  957. *entry->status = STATUS_FREE;
  958. fore200e_supply(fore200e);
  959. }
  960. }
  961. #ifndef FORE200E_USE_TASKLET
  962. static void
  963. fore200e_irq(struct fore200e* fore200e)
  964. {
  965. unsigned long flags;
  966. spin_lock_irqsave(&fore200e->q_lock, flags);
  967. fore200e_rx_irq(fore200e);
  968. spin_unlock_irqrestore(&fore200e->q_lock, flags);
  969. spin_lock_irqsave(&fore200e->q_lock, flags);
  970. fore200e_tx_irq(fore200e);
  971. spin_unlock_irqrestore(&fore200e->q_lock, flags);
  972. }
  973. #endif
  974. static irqreturn_t
  975. fore200e_interrupt(int irq, void* dev)
  976. {
  977. struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
  978. if (fore200e->bus->irq_check(fore200e) == 0) {
  979. DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
  980. return IRQ_NONE;
  981. }
  982. DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
  983. #ifdef FORE200E_USE_TASKLET
  984. tasklet_schedule(&fore200e->tx_tasklet);
  985. tasklet_schedule(&fore200e->rx_tasklet);
  986. #else
  987. fore200e_irq(fore200e);
  988. #endif
  989. fore200e->bus->irq_ack(fore200e);
  990. return IRQ_HANDLED;
  991. }
  992. #ifdef FORE200E_USE_TASKLET
  993. static void
  994. fore200e_tx_tasklet(unsigned long data)
  995. {
  996. struct fore200e* fore200e = (struct fore200e*) data;
  997. unsigned long flags;
  998. DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
  999. spin_lock_irqsave(&fore200e->q_lock, flags);
  1000. fore200e_tx_irq(fore200e);
  1001. spin_unlock_irqrestore(&fore200e->q_lock, flags);
  1002. }
  1003. static void
  1004. fore200e_rx_tasklet(unsigned long data)
  1005. {
  1006. struct fore200e* fore200e = (struct fore200e*) data;
  1007. unsigned long flags;
  1008. DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
  1009. spin_lock_irqsave(&fore200e->q_lock, flags);
  1010. fore200e_rx_irq((struct fore200e*) data);
  1011. spin_unlock_irqrestore(&fore200e->q_lock, flags);
  1012. }
  1013. #endif
  1014. static int
  1015. fore200e_select_scheme(struct atm_vcc* vcc)
  1016. {
  1017. /* fairly balance the VCs over (identical) buffer schemes */
  1018. int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
  1019. DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
  1020. vcc->itf, vcc->vpi, vcc->vci, scheme);
  1021. return scheme;
  1022. }
  1023. static int
  1024. fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
  1025. {
  1026. struct host_cmdq* cmdq = &fore200e->host_cmdq;
  1027. struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
  1028. struct activate_opcode activ_opcode;
  1029. struct deactivate_opcode deactiv_opcode;
  1030. struct vpvc vpvc;
  1031. int ok;
  1032. enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal);
  1033. FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
  1034. if (activate) {
  1035. FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
  1036. activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
  1037. activ_opcode.aal = aal;
  1038. activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
  1039. activ_opcode.pad = 0;
  1040. }
  1041. else {
  1042. deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
  1043. deactiv_opcode.pad = 0;
  1044. }
  1045. vpvc.vci = vcc->vci;
  1046. vpvc.vpi = vcc->vpi;
  1047. *entry->status = STATUS_PENDING;
  1048. if (activate) {
  1049. #ifdef FORE200E_52BYTE_AAL0_SDU
  1050. mtu = 48;
  1051. #endif
  1052. /* the MTU is not used by the cp, except in the case of AAL0 */
  1053. fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu);
  1054. fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
  1055. fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
  1056. }
  1057. else {
  1058. fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
  1059. fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
  1060. }
  1061. ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
  1062. *entry->status = STATUS_FREE;
  1063. if (ok == 0) {
  1064. printk(FORE200E "unable to %s VC %d.%d.%d\n",
  1065. activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
  1066. return -EIO;
  1067. }
  1068. DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
  1069. activate ? "open" : "clos");
  1070. return 0;
  1071. }
  1072. #define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */
  1073. static void
  1074. fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
  1075. {
  1076. if (qos->txtp.max_pcr < ATM_OC3_PCR) {
  1077. /* compute the data cells to idle cells ratio from the tx PCR */
  1078. rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
  1079. rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
  1080. }
  1081. else {
  1082. /* disable rate control */
  1083. rate->data_cells = rate->idle_cells = 0;
  1084. }
  1085. }
  1086. static int
  1087. fore200e_open(struct atm_vcc *vcc)
  1088. {
  1089. struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
  1090. struct fore200e_vcc* fore200e_vcc;
  1091. struct fore200e_vc_map* vc_map;
  1092. unsigned long flags;
  1093. int vci = vcc->vci;
  1094. short vpi = vcc->vpi;
  1095. ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
  1096. ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
  1097. spin_lock_irqsave(&fore200e->q_lock, flags);
  1098. vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
  1099. if (vc_map->vcc) {
  1100. spin_unlock_irqrestore(&fore200e->q_lock, flags);
  1101. printk(FORE200E "VC %d.%d.%d already in use\n",
  1102. fore200e->atm_dev->number, vpi, vci);
  1103. return -EINVAL;
  1104. }
  1105. vc_map->vcc = vcc;
  1106. spin_unlock_irqrestore(&fore200e->q_lock, flags);
  1107. fore200e_vcc = fore200e_kmalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
  1108. if (fore200e_vcc == NULL) {
  1109. vc_map->vcc = NULL;
  1110. return -ENOMEM;
  1111. }
  1112. DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
  1113. "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
  1114. vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
  1115. fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
  1116. vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
  1117. fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
  1118. vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
  1119. /* pseudo-CBR bandwidth requested? */
  1120. if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
  1121. down(&fore200e->rate_sf);
  1122. if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
  1123. up(&fore200e->rate_sf);
  1124. fore200e_kfree(fore200e_vcc);
  1125. vc_map->vcc = NULL;
  1126. return -EAGAIN;
  1127. }
  1128. /* reserve bandwidth */
  1129. fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
  1130. up(&fore200e->rate_sf);
  1131. }
  1132. vcc->itf = vcc->dev->number;
  1133. set_bit(ATM_VF_PARTIAL,&vcc->flags);
  1134. set_bit(ATM_VF_ADDR, &vcc->flags);
  1135. vcc->dev_data = fore200e_vcc;
  1136. if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
  1137. vc_map->vcc = NULL;
  1138. clear_bit(ATM_VF_ADDR, &vcc->flags);
  1139. clear_bit(ATM_VF_PARTIAL,&vcc->flags);
  1140. vcc->dev_data = NULL;
  1141. fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
  1142. fore200e_kfree(fore200e_vcc);
  1143. return -EINVAL;
  1144. }
  1145. /* compute rate control parameters */
  1146. if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
  1147. fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
  1148. set_bit(ATM_VF_HASQOS, &vcc->flags);
  1149. DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
  1150. vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
  1151. vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
  1152. fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
  1153. }
  1154. fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
  1155. fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
  1156. fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0;
  1157. /* new incarnation of the vcc */
  1158. vc_map->incarn = ++fore200e->incarn_count;
  1159. /* VC unusable before this flag is set */
  1160. set_bit(ATM_VF_READY, &vcc->flags);
  1161. return 0;
  1162. }
  1163. static void
  1164. fore200e_close(struct atm_vcc* vcc)
  1165. {
  1166. struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
  1167. struct fore200e_vcc* fore200e_vcc;
  1168. struct fore200e_vc_map* vc_map;
  1169. unsigned long flags;
  1170. ASSERT(vcc);
  1171. ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
  1172. ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
  1173. DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
  1174. clear_bit(ATM_VF_READY, &vcc->flags);
  1175. fore200e_activate_vcin(fore200e, 0, vcc, 0);
  1176. spin_lock_irqsave(&fore200e->q_lock, flags);
  1177. vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
  1178. /* the vc is no longer considered as "in use" by fore200e_open() */
  1179. vc_map->vcc = NULL;
  1180. vcc->itf = vcc->vci = vcc->vpi = 0;
  1181. fore200e_vcc = FORE200E_VCC(vcc);
  1182. vcc->dev_data = NULL;
  1183. spin_unlock_irqrestore(&fore200e->q_lock, flags);
  1184. /* release reserved bandwidth, if any */
  1185. if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
  1186. down(&fore200e->rate_sf);
  1187. fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
  1188. up(&fore200e->rate_sf);
  1189. clear_bit(ATM_VF_HASQOS, &vcc->flags);
  1190. }
  1191. clear_bit(ATM_VF_ADDR, &vcc->flags);
  1192. clear_bit(ATM_VF_PARTIAL,&vcc->flags);
  1193. ASSERT(fore200e_vcc);
  1194. fore200e_kfree(fore200e_vcc);
  1195. }
  1196. static int
  1197. fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
  1198. {
  1199. struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
  1200. struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
  1201. struct fore200e_vc_map* vc_map;
  1202. struct host_txq* txq = &fore200e->host_txq;
  1203. struct host_txq_entry* entry;
  1204. struct tpd* tpd;
  1205. struct tpd_haddr tpd_haddr;
  1206. int retry = CONFIG_ATM_FORE200E_TX_RETRY;
  1207. int tx_copy = 0;
  1208. int tx_len = skb->len;
  1209. u32* cell_header = NULL;
  1210. unsigned char* skb_data;
  1211. int skb_len;
  1212. unsigned char* data;
  1213. unsigned long flags;
  1214. ASSERT(vcc);
  1215. ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
  1216. ASSERT(fore200e);
  1217. ASSERT(fore200e_vcc);
  1218. if (!test_bit(ATM_VF_READY, &vcc->flags)) {
  1219. DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
  1220. dev_kfree_skb_any(skb);
  1221. return -EINVAL;
  1222. }
  1223. #ifdef FORE200E_52BYTE_AAL0_SDU
  1224. if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
  1225. cell_header = (u32*) skb->data;
  1226. skb_data = skb->data + 4; /* skip 4-byte cell header */
  1227. skb_len = tx_len = skb->len - 4;
  1228. DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
  1229. }
  1230. else
  1231. #endif
  1232. {
  1233. skb_data = skb->data;
  1234. skb_len = skb->len;
  1235. }
  1236. if (((unsigned long)skb_data) & 0x3) {
  1237. DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
  1238. tx_copy = 1;
  1239. tx_len = skb_len;
  1240. }
  1241. if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
  1242. /* this simply NUKES the PCA board */
  1243. DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
  1244. tx_copy = 1;
  1245. tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
  1246. }
  1247. if (tx_copy) {
  1248. data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA);
  1249. if (data == NULL) {
  1250. if (vcc->pop) {
  1251. vcc->pop(vcc, skb);
  1252. }
  1253. else {
  1254. dev_kfree_skb_any(skb);
  1255. }
  1256. return -ENOMEM;
  1257. }
  1258. memcpy(data, skb_data, skb_len);
  1259. if (skb_len < tx_len)
  1260. memset(data + skb_len, 0x00, tx_len - skb_len);
  1261. }
  1262. else {
  1263. data = skb_data;
  1264. }
  1265. vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
  1266. ASSERT(vc_map->vcc == vcc);
  1267. retry_here:
  1268. spin_lock_irqsave(&fore200e->q_lock, flags);
  1269. entry = &txq->host_entry[ txq->head ];
  1270. if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
  1271. /* try to free completed tx queue entries */
  1272. fore200e_tx_irq(fore200e);
  1273. if (*entry->status != STATUS_FREE) {
  1274. spin_unlock_irqrestore(&fore200e->q_lock, flags);
  1275. /* retry once again? */
  1276. if (--retry > 0) {
  1277. udelay(50);
  1278. goto retry_here;
  1279. }
  1280. atomic_inc(&vcc->stats->tx_err);
  1281. fore200e->tx_sat++;
  1282. DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
  1283. fore200e->name, fore200e->cp_queues->heartbeat);
  1284. if (vcc->pop) {
  1285. vcc->pop(vcc, skb);
  1286. }
  1287. else {
  1288. dev_kfree_skb_any(skb);
  1289. }
  1290. if (tx_copy)
  1291. kfree(data);
  1292. return -ENOBUFS;
  1293. }
  1294. }
  1295. entry->incarn = vc_map->incarn;
  1296. entry->vc_map = vc_map;
  1297. entry->skb = skb;
  1298. entry->data = tx_copy ? data : NULL;
  1299. tpd = entry->tpd;
  1300. tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE);
  1301. tpd->tsd[ 0 ].length = tx_len;
  1302. FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
  1303. txq->txing++;
  1304. /* The dma_map call above implies a dma_sync so the device can use it,
  1305. * thus no explicit dma_sync call is necessary here.
  1306. */
  1307. DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
  1308. vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
  1309. tpd->tsd[0].length, skb_len);
  1310. if (skb_len < fore200e_vcc->tx_min_pdu)
  1311. fore200e_vcc->tx_min_pdu = skb_len;
  1312. if (skb_len > fore200e_vcc->tx_max_pdu)
  1313. fore200e_vcc->tx_max_pdu = skb_len;
  1314. fore200e_vcc->tx_pdu++;
  1315. /* set tx rate control information */
  1316. tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
  1317. tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
  1318. if (cell_header) {
  1319. tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
  1320. tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
  1321. tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
  1322. tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
  1323. tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
  1324. }
  1325. else {
  1326. /* set the ATM header, common to all cells conveying the PDU */
  1327. tpd->atm_header.clp = 0;
  1328. tpd->atm_header.plt = 0;
  1329. tpd->atm_header.vci = vcc->vci;
  1330. tpd->atm_header.vpi = vcc->vpi;
  1331. tpd->atm_header.gfc = 0;
  1332. }
  1333. tpd->spec.length = tx_len;
  1334. tpd->spec.nseg = 1;
  1335. tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal);
  1336. tpd->spec.intr = 1;
  1337. tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */
  1338. tpd_haddr.pad = 0;
  1339. tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */
  1340. *entry->status = STATUS_PENDING;
  1341. fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
  1342. spin_unlock_irqrestore(&fore200e->q_lock, flags);
  1343. return 0;
  1344. }
  1345. static int
  1346. fore200e_getstats(struct fore200e* fore200e)
  1347. {
  1348. struct host_cmdq* cmdq = &fore200e->host_cmdq;
  1349. struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
  1350. struct stats_opcode opcode;
  1351. int ok;
  1352. u32 stats_dma_addr;
  1353. if (fore200e->stats == NULL) {
  1354. fore200e->stats = fore200e_kmalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA);
  1355. if (fore200e->stats == NULL)
  1356. return -ENOMEM;
  1357. }
  1358. stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats,
  1359. sizeof(struct stats), DMA_FROM_DEVICE);
  1360. FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
  1361. opcode.opcode = OPCODE_GET_STATS;
  1362. opcode.pad = 0;
  1363. fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
  1364. *entry->status = STATUS_PENDING;
  1365. fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
  1366. ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
  1367. *entry->status = STATUS_FREE;
  1368. fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
  1369. if (ok == 0) {
  1370. printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
  1371. return -EIO;
  1372. }
  1373. return 0;
  1374. }
  1375. static int
  1376. fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
  1377. {
  1378. /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
  1379. DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
  1380. vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
  1381. return -EINVAL;
  1382. }
  1383. static int
  1384. fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
  1385. {
  1386. /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
  1387. DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
  1388. vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
  1389. return -EINVAL;
  1390. }
  1391. #if 0 /* currently unused */
  1392. static int
  1393. fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
  1394. {
  1395. struct host_cmdq* cmdq = &fore200e->host_cmdq;
  1396. struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
  1397. struct oc3_opcode opcode;
  1398. int ok;
  1399. u32 oc3_regs_dma_addr;
  1400. oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
  1401. FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
  1402. opcode.opcode = OPCODE_GET_OC3;
  1403. opcode.reg = 0;
  1404. opcode.value = 0;
  1405. opcode.mask = 0;
  1406. fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
  1407. *entry->status = STATUS_PENDING;
  1408. fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
  1409. ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
  1410. *entry->status = STATUS_FREE;
  1411. fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
  1412. if (ok == 0) {
  1413. printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
  1414. return -EIO;
  1415. }
  1416. return 0;
  1417. }
  1418. #endif
  1419. static int
  1420. fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
  1421. {
  1422. struct host_cmdq* cmdq = &fore200e->host_cmdq;
  1423. struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
  1424. struct oc3_opcode opcode;
  1425. int ok;
  1426. DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
  1427. FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
  1428. opcode.opcode = OPCODE_SET_OC3;
  1429. opcode.reg = reg;
  1430. opcode.value = value;
  1431. opcode.mask = mask;
  1432. fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
  1433. *entry->status = STATUS_PENDING;
  1434. fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
  1435. ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
  1436. *entry->status = STATUS_FREE;
  1437. if (ok == 0) {
  1438. printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
  1439. return -EIO;
  1440. }
  1441. return 0;
  1442. }
  1443. static int
  1444. fore200e_setloop(struct fore200e* fore200e, int loop_mode)
  1445. {
  1446. u32 mct_value, mct_mask;
  1447. int error;
  1448. if (!capable(CAP_NET_ADMIN))
  1449. return -EPERM;
  1450. switch (loop_mode) {
  1451. case ATM_LM_NONE:
  1452. mct_value = 0;
  1453. mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE;
  1454. break;
  1455. case ATM_LM_LOC_PHY:
  1456. mct_value = mct_mask = SUNI_MCT_DLE;
  1457. break;
  1458. case ATM_LM_RMT_PHY:
  1459. mct_value = mct_mask = SUNI_MCT_LLE;
  1460. break;
  1461. default:
  1462. return -EINVAL;
  1463. }
  1464. error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
  1465. if (error == 0)
  1466. fore200e->loop_mode = loop_mode;
  1467. return error;
  1468. }
  1469. static inline unsigned int
  1470. fore200e_swap(unsigned int in)
  1471. {
  1472. #if defined(__LITTLE_ENDIAN)
  1473. return swab32(in);
  1474. #else
  1475. return in;
  1476. #endif
  1477. }
  1478. static int
  1479. fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
  1480. {
  1481. struct sonet_stats tmp;
  1482. if (fore200e_getstats(fore200e) < 0)
  1483. return -EIO;
  1484. tmp.section_bip = fore200e_swap(fore200e->stats->oc3.section_bip8_errors);
  1485. tmp.line_bip = fore200e_swap(fore200e->stats->oc3.line_bip24_errors);
  1486. tmp.path_bip = fore200e_swap(fore200e->stats->oc3.path_bip8_errors);
  1487. tmp.line_febe = fore200e_swap(fore200e->stats->oc3.line_febe_errors);
  1488. tmp.path_febe = fore200e_swap(fore200e->stats->oc3.path_febe_errors);
  1489. tmp.corr_hcs = fore200e_swap(fore200e->stats->oc3.corr_hcs_errors);
  1490. tmp.uncorr_hcs = fore200e_swap(fore200e->stats->oc3.ucorr_hcs_errors);
  1491. tmp.tx_cells = fore200e_swap(fore200e->stats->aal0.cells_transmitted) +
  1492. fore200e_swap(fore200e->stats->aal34.cells_transmitted) +
  1493. fore200e_swap(fore200e->stats->aal5.cells_transmitted);
  1494. tmp.rx_cells = fore200e_swap(fore200e->stats->aal0.cells_received) +
  1495. fore200e_swap(fore200e->stats->aal34.cells_received) +
  1496. fore200e_swap(fore200e->stats->aal5.cells_received);
  1497. if (arg)
  1498. return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
  1499. return 0;
  1500. }
  1501. static int
  1502. fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
  1503. {
  1504. struct fore200e* fore200e = FORE200E_DEV(dev);
  1505. DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
  1506. switch (cmd) {
  1507. case SONET_GETSTAT:
  1508. return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
  1509. case SONET_GETDIAG:
  1510. return put_user(0, (int __user *)arg) ? -EFAULT : 0;
  1511. case ATM_SETLOOP:
  1512. return fore200e_setloop(fore200e, (int)(unsigned long)arg);
  1513. case ATM_GETLOOP:
  1514. return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
  1515. case ATM_QUERYLOOP:
  1516. return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
  1517. }
  1518. return -ENOSYS; /* not implemented */
  1519. }
  1520. static int
  1521. fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
  1522. {
  1523. struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
  1524. struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
  1525. if (!test_bit(ATM_VF_READY, &vcc->flags)) {
  1526. DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
  1527. return -EINVAL;
  1528. }
  1529. DPRINTK(2, "change_qos %d.%d.%d, "
  1530. "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
  1531. "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
  1532. "available_cell_rate = %u",
  1533. vcc->itf, vcc->vpi, vcc->vci,
  1534. fore200e_traffic_class[ qos->txtp.traffic_class ],
  1535. qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
  1536. fore200e_traffic_class[ qos->rxtp.traffic_class ],
  1537. qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
  1538. flags, fore200e->available_cell_rate);
  1539. if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
  1540. down(&fore200e->rate_sf);
  1541. if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
  1542. up(&fore200e->rate_sf);
  1543. return -EAGAIN;
  1544. }
  1545. fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
  1546. fore200e->available_cell_rate -= qos->txtp.max_pcr;
  1547. up(&fore200e->rate_sf);
  1548. memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
  1549. /* update rate control parameters */
  1550. fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
  1551. set_bit(ATM_VF_HASQOS, &vcc->flags);
  1552. return 0;
  1553. }
  1554. return -EINVAL;
  1555. }
  1556. static int __devinit
  1557. fore200e_irq_request(struct fore200e* fore200e)
  1558. {
  1559. if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) {
  1560. printk(FORE200E "unable to reserve IRQ %s for device %s\n",
  1561. fore200e_irq_itoa(fore200e->irq), fore200e->name);
  1562. return -EBUSY;
  1563. }
  1564. printk(FORE200E "IRQ %s reserved for device %s\n",
  1565. fore200e_irq_itoa(fore200e->irq), fore200e->name);
  1566. #ifdef FORE200E_USE_TASKLET
  1567. tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
  1568. tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
  1569. #endif
  1570. fore200e->state = FORE200E_STATE_IRQ;
  1571. return 0;
  1572. }
  1573. static int __devinit
  1574. fore200e_get_esi(struct fore200e* fore200e)
  1575. {
  1576. struct prom_data* prom = fore200e_kmalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA);
  1577. int ok, i;
  1578. if (!prom)
  1579. return -ENOMEM;
  1580. ok = fore200e->bus->prom_read(fore200e, prom);
  1581. if (ok < 0) {
  1582. fore200e_kfree(prom);
  1583. return -EBUSY;
  1584. }
  1585. printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %02x:%02x:%02x:%02x:%02x:%02x\n",
  1586. fore200e->name,
  1587. (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */
  1588. prom->serial_number & 0xFFFF,
  1589. prom->mac_addr[ 2 ], prom->mac_addr[ 3 ], prom->mac_addr[ 4 ],
  1590. prom->mac_addr[ 5 ], prom->mac_addr[ 6 ], prom->mac_addr[ 7 ]);
  1591. for (i = 0; i < ESI_LEN; i++) {
  1592. fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
  1593. }
  1594. fore200e_kfree(prom);
  1595. return 0;
  1596. }
  1597. static int __devinit
  1598. fore200e_alloc_rx_buf(struct fore200e* fore200e)
  1599. {
  1600. int scheme, magn, nbr, size, i;
  1601. struct host_bsq* bsq;
  1602. struct buffer* buffer;
  1603. for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
  1604. for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
  1605. bsq = &fore200e->host_bsq[ scheme ][ magn ];
  1606. nbr = fore200e_rx_buf_nbr[ scheme ][ magn ];
  1607. size = fore200e_rx_buf_size[ scheme ][ magn ];
  1608. DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
  1609. /* allocate the array of receive buffers */
  1610. buffer = bsq->buffer = fore200e_kmalloc(nbr * sizeof(struct buffer), GFP_KERNEL);
  1611. if (buffer == NULL)
  1612. return -ENOMEM;
  1613. bsq->freebuf = NULL;
  1614. for (i = 0; i < nbr; i++) {
  1615. buffer[ i ].scheme = scheme;
  1616. buffer[ i ].magn = magn;
  1617. #ifdef FORE200E_BSQ_DEBUG
  1618. buffer[ i ].index = i;
  1619. buffer[ i ].supplied = 0;
  1620. #endif
  1621. /* allocate the receive buffer body */
  1622. if (fore200e_chunk_alloc(fore200e,
  1623. &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
  1624. DMA_FROM_DEVICE) < 0) {
  1625. while (i > 0)
  1626. fore200e_chunk_free(fore200e, &buffer[ --i ].data);
  1627. fore200e_kfree(buffer);
  1628. return -ENOMEM;
  1629. }
  1630. /* insert the buffer into the free buffer list */
  1631. buffer[ i ].next = bsq->freebuf;
  1632. bsq->freebuf = &buffer[ i ];
  1633. }
  1634. /* all the buffers are free, initially */
  1635. bsq->freebuf_count = nbr;
  1636. #ifdef FORE200E_BSQ_DEBUG
  1637. bsq_audit(3, bsq, scheme, magn);
  1638. #endif
  1639. }
  1640. }
  1641. fore200e->state = FORE200E_STATE_ALLOC_BUF;
  1642. return 0;
  1643. }
  1644. static int __devinit
  1645. fore200e_init_bs_queue(struct fore200e* fore200e)
  1646. {
  1647. int scheme, magn, i;
  1648. struct host_bsq* bsq;
  1649. struct cp_bsq_entry __iomem * cp_entry;
  1650. for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
  1651. for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
  1652. DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
  1653. bsq = &fore200e->host_bsq[ scheme ][ magn ];
  1654. /* allocate and align the array of status words */
  1655. if (fore200e->bus->dma_chunk_alloc(fore200e,
  1656. &bsq->status,
  1657. sizeof(enum status),
  1658. QUEUE_SIZE_BS,
  1659. fore200e->bus->status_alignment) < 0) {
  1660. return -ENOMEM;
  1661. }
  1662. /* allocate and align the array of receive buffer descriptors */
  1663. if (fore200e->bus->dma_chunk_alloc(fore200e,
  1664. &bsq->rbd_block,
  1665. sizeof(struct rbd_block),
  1666. QUEUE_SIZE_BS,
  1667. fore200e->bus->descr_alignment) < 0) {
  1668. fore200e->bus->dma_chunk_free(fore200e, &bsq->status);
  1669. return -ENOMEM;
  1670. }
  1671. /* get the base address of the cp resident buffer supply queue entries */
  1672. cp_entry = fore200e->virt_base +
  1673. fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
  1674. /* fill the host resident and cp resident buffer supply queue entries */
  1675. for (i = 0; i < QUEUE_SIZE_BS; i++) {
  1676. bsq->host_entry[ i ].status =
  1677. FORE200E_INDEX(bsq->status.align_addr, enum status, i);
  1678. bsq->host_entry[ i ].rbd_block =
  1679. FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
  1680. bsq->host_entry[ i ].rbd_block_dma =
  1681. FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
  1682. bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
  1683. *bsq->host_entry[ i ].status = STATUS_FREE;
  1684. fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
  1685. &cp_entry[ i ].status_haddr);
  1686. }
  1687. }
  1688. }
  1689. fore200e->state = FORE200E_STATE_INIT_BSQ;
  1690. return 0;
  1691. }
  1692. static int __devinit
  1693. fore200e_init_rx_queue(struct fore200e* fore200e)
  1694. {
  1695. struct host_rxq* rxq = &fore200e->host_rxq;
  1696. struct cp_rxq_entry __iomem * cp_entry;
  1697. int i;
  1698. DPRINTK(2, "receive queue is being initialized\n");
  1699. /* allocate and align the array of status words */
  1700. if (fore200e->bus->dma_chunk_alloc(fore200e,
  1701. &rxq->status,
  1702. sizeof(enum status),
  1703. QUEUE_SIZE_RX,
  1704. fore200e->bus->status_alignment) < 0) {
  1705. return -ENOMEM;
  1706. }
  1707. /* allocate and align the array of receive PDU descriptors */
  1708. if (fore200e->bus->dma_chunk_alloc(fore200e,
  1709. &rxq->rpd,
  1710. sizeof(struct rpd),
  1711. QUEUE_SIZE_RX,
  1712. fore200e->bus->descr_alignment) < 0) {
  1713. fore200e->bus->dma_chunk_free(fore200e, &rxq->status);
  1714. return -ENOMEM;
  1715. }
  1716. /* get the base address of the cp resident rx queue entries */
  1717. cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
  1718. /* fill the host resident and cp resident rx entries */
  1719. for (i=0; i < QUEUE_SIZE_RX; i++) {
  1720. rxq->host_entry[ i ].status =
  1721. FORE200E_INDEX(rxq->status.align_addr, enum status, i);
  1722. rxq->host_entry[ i ].rpd =
  1723. FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
  1724. rxq->host_entry[ i ].rpd_dma =
  1725. FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
  1726. rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
  1727. *rxq->host_entry[ i ].status = STATUS_FREE;
  1728. fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
  1729. &cp_entry[ i ].status_haddr);
  1730. fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
  1731. &cp_entry[ i ].rpd_haddr);
  1732. }
  1733. /* set the head entry of the queue */
  1734. rxq->head = 0;
  1735. fore200e->state = FORE200E_STATE_INIT_RXQ;
  1736. return 0;
  1737. }
  1738. static int __devinit
  1739. fore200e_init_tx_queue(struct fore200e* fore200e)
  1740. {
  1741. struct host_txq* txq = &fore200e->host_txq;
  1742. struct cp_txq_entry __iomem * cp_entry;
  1743. int i;
  1744. DPRINTK(2, "transmit queue is being initialized\n");
  1745. /* allocate and align the array of status words */
  1746. if (fore200e->bus->dma_chunk_alloc(fore200e,
  1747. &txq->status,
  1748. sizeof(enum status),
  1749. QUEUE_SIZE_TX,
  1750. fore200e->bus->status_alignment) < 0) {
  1751. return -ENOMEM;
  1752. }
  1753. /* allocate and align the array of transmit PDU descriptors */
  1754. if (fore200e->bus->dma_chunk_alloc(fore200e,
  1755. &txq->tpd,
  1756. sizeof(struct tpd),
  1757. QUEUE_SIZE_TX,
  1758. fore200e->bus->descr_alignment) < 0) {
  1759. fore200e->bus->dma_chunk_free(fore200e, &txq->status);
  1760. return -ENOMEM;
  1761. }
  1762. /* get the base address of the cp resident tx queue entries */
  1763. cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
  1764. /* fill the host resident and cp resident tx entries */
  1765. for (i=0; i < QUEUE_SIZE_TX; i++) {
  1766. txq->host_entry[ i ].status =
  1767. FORE200E_INDEX(txq->status.align_addr, enum status, i);
  1768. txq->host_entry[ i ].tpd =
  1769. FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
  1770. txq->host_entry[ i ].tpd_dma =
  1771. FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
  1772. txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
  1773. *txq->host_entry[ i ].status = STATUS_FREE;
  1774. fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
  1775. &cp_entry[ i ].status_haddr);
  1776. /* although there is a one-to-one mapping of tx queue entries and tpds,
  1777. we do not write here the DMA (physical) base address of each tpd into
  1778. the related cp resident entry, because the cp relies on this write
  1779. operation to detect that a new pdu has been submitted for tx */
  1780. }
  1781. /* set the head and tail entries of the queue */
  1782. txq->head = 0;
  1783. txq->tail = 0;
  1784. fore200e->state = FORE200E_STATE_INIT_TXQ;
  1785. return 0;
  1786. }
  1787. static int __devinit
  1788. fore200e_init_cmd_queue(struct fore200e* fore200e)
  1789. {
  1790. struct host_cmdq* cmdq = &fore200e->host_cmdq;
  1791. struct cp_cmdq_entry __iomem * cp_entry;
  1792. int i;
  1793. DPRINTK(2, "command queue is being initialized\n");
  1794. /* allocate and align the array of status words */
  1795. if (fore200e->bus->dma_chunk_alloc(fore200e,
  1796. &cmdq->status,
  1797. sizeof(enum status),
  1798. QUEUE_SIZE_CMD,
  1799. fore200e->bus->status_alignment) < 0) {
  1800. return -ENOMEM;
  1801. }
  1802. /* get the base address of the cp resident cmd queue entries */
  1803. cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
  1804. /* fill the host resident and cp resident cmd entries */
  1805. for (i=0; i < QUEUE_SIZE_CMD; i++) {
  1806. cmdq->host_entry[ i ].status =
  1807. FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
  1808. cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
  1809. *cmdq->host_entry[ i ].status = STATUS_FREE;
  1810. fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
  1811. &cp_entry[ i ].status_haddr);
  1812. }
  1813. /* set the head entry of the queue */
  1814. cmdq->head = 0;
  1815. fore200e->state = FORE200E_STATE_INIT_CMDQ;
  1816. return 0;
  1817. }
  1818. static void __init
  1819. fore200e_param_bs_queue(struct fore200e* fore200e,
  1820. enum buffer_scheme scheme, enum buffer_magn magn,
  1821. int queue_length, int pool_size, int supply_blksize)
  1822. {
  1823. struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
  1824. fore200e->bus->write(queue_length, &bs_spec->queue_length);
  1825. fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
  1826. fore200e->bus->write(pool_size, &bs_spec->pool_size);
  1827. fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize);
  1828. }
  1829. static int __devinit
  1830. fore200e_initialize(struct fore200e* fore200e)
  1831. {
  1832. struct cp_queues __iomem * cpq;
  1833. int ok, scheme, magn;
  1834. DPRINTK(2, "device %s being initialized\n", fore200e->name);
  1835. init_MUTEX(&fore200e->rate_sf);
  1836. spin_lock_init(&fore200e->q_lock);
  1837. cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
  1838. /* enable cp to host interrupts */
  1839. fore200e->bus->write(1, &cpq->imask);
  1840. if (fore200e->bus->irq_enable)
  1841. fore200e->bus->irq_enable(fore200e);
  1842. fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
  1843. fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
  1844. fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len);
  1845. fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len);
  1846. fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension);
  1847. fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension);
  1848. for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
  1849. for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
  1850. fore200e_param_bs_queue(fore200e, scheme, magn,
  1851. QUEUE_SIZE_BS,
  1852. fore200e_rx_buf_nbr[ scheme ][ magn ],
  1853. RBD_BLK_SIZE);
  1854. /* issue the initialize command */
  1855. fore200e->bus->write(STATUS_PENDING, &cpq->init.status);
  1856. fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
  1857. ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
  1858. if (ok == 0) {
  1859. printk(FORE200E "device %s initialization failed\n", fore200e->name);
  1860. return -ENODEV;
  1861. }
  1862. printk(FORE200E "device %s initialized\n", fore200e->name);
  1863. fore200e->state = FORE200E_STATE_INITIALIZE;
  1864. return 0;
  1865. }
  1866. static void __devinit
  1867. fore200e_monitor_putc(struct fore200e* fore200e, char c)
  1868. {
  1869. struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
  1870. #if 0
  1871. printk("%c", c);
  1872. #endif
  1873. fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
  1874. }
  1875. static int __devinit
  1876. fore200e_monitor_getc(struct fore200e* fore200e)
  1877. {
  1878. struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
  1879. unsigned long timeout = jiffies + msecs_to_jiffies(50);
  1880. int c;
  1881. while (time_before(jiffies, timeout)) {
  1882. c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
  1883. if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
  1884. fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
  1885. #if 0
  1886. printk("%c", c & 0xFF);
  1887. #endif
  1888. return c & 0xFF;
  1889. }
  1890. }
  1891. return -1;
  1892. }
  1893. static void __devinit
  1894. fore200e_monitor_puts(struct fore200e* fore200e, char* str)
  1895. {
  1896. while (*str) {
  1897. /* the i960 monitor doesn't accept any new character if it has something to say */
  1898. while (fore200e_monitor_getc(fore200e) >= 0);
  1899. fore200e_monitor_putc(fore200e, *str++);
  1900. }
  1901. while (fore200e_monitor_getc(fore200e) >= 0);
  1902. }
  1903. static int __devinit
  1904. fore200e_start_fw(struct fore200e* fore200e)
  1905. {
  1906. int ok;
  1907. char cmd[ 48 ];
  1908. struct fw_header* fw_header = (struct fw_header*) fore200e->bus->fw_data;
  1909. DPRINTK(2, "device %s firmware being started\n", fore200e->name);
  1910. #if defined(__sparc_v9__)
  1911. /* reported to be required by SBA cards on some sparc64 hosts */
  1912. fore200e_spin(100);
  1913. #endif
  1914. sprintf(cmd, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
  1915. fore200e_monitor_puts(fore200e, cmd);
  1916. ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000);
  1917. if (ok == 0) {
  1918. printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
  1919. return -ENODEV;
  1920. }
  1921. printk(FORE200E "device %s firmware started\n", fore200e->name);
  1922. fore200e->state = FORE200E_STATE_START_FW;
  1923. return 0;
  1924. }
  1925. static int __devinit
  1926. fore200e_load_fw(struct fore200e* fore200e)
  1927. {
  1928. u32* fw_data = (u32*) fore200e->bus->fw_data;
  1929. u32 fw_size = (u32) *fore200e->bus->fw_size / sizeof(u32);
  1930. struct fw_header* fw_header = (struct fw_header*) fw_data;
  1931. u32 __iomem *load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
  1932. DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
  1933. fore200e->name, load_addr, fw_size);
  1934. if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
  1935. printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
  1936. return -ENODEV;
  1937. }
  1938. for (; fw_size--; fw_data++, load_addr++)
  1939. fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
  1940. fore200e->state = FORE200E_STATE_LOAD_FW;
  1941. return 0;
  1942. }
  1943. static int __devinit
  1944. fore200e_register(struct fore200e* fore200e)
  1945. {
  1946. struct atm_dev* atm_dev;
  1947. DPRINTK(2, "device %s being registered\n", fore200e->name);
  1948. atm_dev = atm_dev_register(fore200e->bus->proc_name, &fore200e_ops, -1,
  1949. NULL);
  1950. if (atm_dev == NULL) {
  1951. printk(FORE200E "unable to register device %s\n", fore200e->name);
  1952. return -ENODEV;
  1953. }
  1954. atm_dev->dev_data = fore200e;
  1955. fore200e->atm_dev = atm_dev;
  1956. atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
  1957. atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
  1958. fore200e->available_cell_rate = ATM_OC3_PCR;
  1959. fore200e->state = FORE200E_STATE_REGISTER;
  1960. return 0;
  1961. }
  1962. static int __devinit
  1963. fore200e_init(struct fore200e* fore200e)
  1964. {
  1965. if (fore200e_register(fore200e) < 0)
  1966. return -ENODEV;
  1967. if (fore200e->bus->configure(fore200e) < 0)
  1968. return -ENODEV;
  1969. if (fore200e->bus->map(fore200e) < 0)
  1970. return -ENODEV;
  1971. if (fore200e_reset(fore200e, 1) < 0)
  1972. return -ENODEV;
  1973. if (fore200e_load_fw(fore200e) < 0)
  1974. return -ENODEV;
  1975. if (fore200e_start_fw(fore200e) < 0)
  1976. return -ENODEV;
  1977. if (fore200e_initialize(fore200e) < 0)
  1978. return -ENODEV;
  1979. if (fore200e_init_cmd_queue(fore200e) < 0)
  1980. return -ENOMEM;
  1981. if (fore200e_init_tx_queue(fore200e) < 0)
  1982. return -ENOMEM;
  1983. if (fore200e_init_rx_queue(fore200e) < 0)
  1984. return -ENOMEM;
  1985. if (fore200e_init_bs_queue(fore200e) < 0)
  1986. return -ENOMEM;
  1987. if (fore200e_alloc_rx_buf(fore200e) < 0)
  1988. return -ENOMEM;
  1989. if (fore200e_get_esi(fore200e) < 0)
  1990. return -EIO;
  1991. if (fore200e_irq_request(fore200e) < 0)
  1992. return -EBUSY;
  1993. fore200e_supply(fore200e);
  1994. /* all done, board initialization is now complete */
  1995. fore200e->state = FORE200E_STATE_COMPLETE;
  1996. return 0;
  1997. }
  1998. static int __devinit
  1999. fore200e_pca_detect(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
  2000. {
  2001. const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data;
  2002. struct fore200e* fore200e;
  2003. int err = 0;
  2004. static int index = 0;
  2005. if (pci_enable_device(pci_dev)) {
  2006. err = -EINVAL;
  2007. goto out;
  2008. }
  2009. fore200e = fore200e_kmalloc(sizeof(struct fore200e), GFP_KERNEL);
  2010. if (fore200e == NULL) {
  2011. err = -ENOMEM;
  2012. goto out_disable;
  2013. }
  2014. fore200e->bus = bus;
  2015. fore200e->bus_dev = pci_dev;
  2016. fore200e->irq = pci_dev->irq;
  2017. fore200e->phys_base = pci_resource_start(pci_dev, 0);
  2018. sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
  2019. pci_set_master(pci_dev);
  2020. printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
  2021. fore200e->bus->model_name,
  2022. fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
  2023. sprintf(fore200e->name, "%s-%d", bus->model_name, index);
  2024. err = fore200e_init(fore200e);
  2025. if (err < 0) {
  2026. fore200e_shutdown(fore200e);
  2027. goto out_free;
  2028. }
  2029. ++index;
  2030. pci_set_drvdata(pci_dev, fore200e);
  2031. out:
  2032. return err;
  2033. out_free:
  2034. kfree(fore200e);
  2035. out_disable:
  2036. pci_disable_device(pci_dev);
  2037. goto out;
  2038. }
  2039. static void __devexit fore200e_pca_remove_one(struct pci_dev *pci_dev)
  2040. {
  2041. struct fore200e *fore200e;
  2042. fore200e = pci_get_drvdata(pci_dev);
  2043. fore200e_shutdown(fore200e);
  2044. kfree(fore200e);
  2045. pci_disable_device(pci_dev);
  2046. }
  2047. #ifdef CONFIG_ATM_FORE200E_PCA
  2048. static struct pci_device_id fore200e_pca_tbl[] = {
  2049. { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID,
  2050. 0, 0, (unsigned long) &fore200e_bus[0] },
  2051. { 0, }
  2052. };
  2053. MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
  2054. static struct pci_driver fore200e_pca_driver = {
  2055. .name = "fore_200e",
  2056. .probe = fore200e_pca_detect,
  2057. .remove = __devexit_p(fore200e_pca_remove_one),
  2058. .id_table = fore200e_pca_tbl,
  2059. };
  2060. #endif
  2061. static int __init
  2062. fore200e_module_init(void)
  2063. {
  2064. const struct fore200e_bus* bus;
  2065. struct fore200e* fore200e;
  2066. int index;
  2067. printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
  2068. /* for each configured bus interface */
  2069. for (bus = fore200e_bus; bus->model_name; bus++) {
  2070. /* detect all boards present on that bus */
  2071. for (index = 0; bus->detect && (fore200e = bus->detect(bus, index)); index++) {
  2072. printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
  2073. fore200e->bus->model_name,
  2074. fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
  2075. sprintf(fore200e->name, "%s-%d", bus->model_name, index);
  2076. if (fore200e_init(fore200e) < 0) {
  2077. fore200e_shutdown(fore200e);
  2078. break;
  2079. }
  2080. list_add(&fore200e->entry, &fore200e_boards);
  2081. }
  2082. }
  2083. #ifdef CONFIG_ATM_FORE200E_PCA
  2084. if (!pci_register_driver(&fore200e_pca_driver))
  2085. return 0;
  2086. #endif
  2087. if (!list_empty(&fore200e_boards))
  2088. return 0;
  2089. return -ENODEV;
  2090. }
  2091. static void __exit
  2092. fore200e_module_cleanup(void)
  2093. {
  2094. struct fore200e *fore200e, *next;
  2095. #ifdef CONFIG_ATM_FORE200E_PCA
  2096. pci_unregister_driver(&fore200e_pca_driver);
  2097. #endif
  2098. list_for_each_entry_safe(fore200e, next, &fore200e_boards, entry) {
  2099. fore200e_shutdown(fore200e);
  2100. kfree(fore200e);
  2101. }
  2102. DPRINTK(1, "module being removed\n");
  2103. }
  2104. static int
  2105. fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
  2106. {
  2107. struct fore200e* fore200e = FORE200E_DEV(dev);
  2108. struct fore200e_vcc* fore200e_vcc;
  2109. struct atm_vcc* vcc;
  2110. int i, len, left = *pos;
  2111. unsigned long flags;
  2112. if (!left--) {
  2113. if (fore200e_getstats(fore200e) < 0)
  2114. return -EIO;
  2115. len = sprintf(page,"\n"
  2116. " device:\n"
  2117. " internal name:\t\t%s\n", fore200e->name);
  2118. /* print bus-specific information */
  2119. if (fore200e->bus->proc_read)
  2120. len += fore200e->bus->proc_read(fore200e, page + len);
  2121. len += sprintf(page + len,
  2122. " interrupt line:\t\t%s\n"
  2123. " physical base address:\t0x%p\n"
  2124. " virtual base address:\t0x%p\n"
  2125. " factory address (ESI):\t%02x:%02x:%02x:%02x:%02x:%02x\n"
  2126. " board serial number:\t\t%d\n\n",
  2127. fore200e_irq_itoa(fore200e->irq),
  2128. (void*)fore200e->phys_base,
  2129. fore200e->virt_base,
  2130. fore200e->esi[0], fore200e->esi[1], fore200e->esi[2],
  2131. fore200e->esi[3], fore200e->esi[4], fore200e->esi[5],
  2132. fore200e->esi[4] * 256 + fore200e->esi[5]);
  2133. return len;
  2134. }
  2135. if (!left--)
  2136. return sprintf(page,
  2137. " free small bufs, scheme 1:\t%d\n"
  2138. " free large bufs, scheme 1:\t%d\n"
  2139. " free small bufs, scheme 2:\t%d\n"
  2140. " free large bufs, scheme 2:\t%d\n",
  2141. fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
  2142. fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
  2143. fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
  2144. fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
  2145. if (!left--) {
  2146. u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
  2147. len = sprintf(page,"\n\n"
  2148. " cell processor:\n"
  2149. " heartbeat state:\t\t");
  2150. if (hb >> 16 != 0xDEAD)
  2151. len += sprintf(page + len, "0x%08x\n", hb);
  2152. else
  2153. len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
  2154. return len;
  2155. }
  2156. if (!left--) {
  2157. static const char* media_name[] = {
  2158. "unshielded twisted pair",
  2159. "multimode optical fiber ST",
  2160. "multimode optical fiber SC",
  2161. "single-mode optical fiber ST",
  2162. "single-mode optical fiber SC",
  2163. "unknown"
  2164. };
  2165. static const char* oc3_mode[] = {
  2166. "normal operation",
  2167. "diagnostic loopback",
  2168. "line loopback",
  2169. "unknown"
  2170. };
  2171. u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release);
  2172. u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
  2173. u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
  2174. u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
  2175. u32 oc3_index;
  2176. if ((media_index < 0) || (media_index > 4))
  2177. media_index = 5;
  2178. switch (fore200e->loop_mode) {
  2179. case ATM_LM_NONE: oc3_index = 0;
  2180. break;
  2181. case ATM_LM_LOC_PHY: oc3_index = 1;
  2182. break;
  2183. case ATM_LM_RMT_PHY: oc3_index = 2;
  2184. break;
  2185. default: oc3_index = 3;
  2186. }
  2187. return sprintf(page,
  2188. " firmware release:\t\t%d.%d.%d\n"
  2189. " monitor release:\t\t%d.%d\n"
  2190. " media type:\t\t\t%s\n"
  2191. " OC-3 revision:\t\t0x%x\n"
  2192. " OC-3 mode:\t\t\t%s",
  2193. fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24,
  2194. mon960_release >> 16, mon960_release << 16 >> 16,
  2195. media_name[ media_index ],
  2196. oc3_revision,
  2197. oc3_mode[ oc3_index ]);
  2198. }
  2199. if (!left--) {
  2200. struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
  2201. return sprintf(page,
  2202. "\n\n"
  2203. " monitor:\n"
  2204. " version number:\t\t%d\n"
  2205. " boot status word:\t\t0x%08x\n",
  2206. fore200e->bus->read(&cp_monitor->mon_version),
  2207. fore200e->bus->read(&cp_monitor->bstat));
  2208. }
  2209. if (!left--)
  2210. return sprintf(page,
  2211. "\n"
  2212. " device statistics:\n"
  2213. " 4b5b:\n"
  2214. " crc_header_errors:\t\t%10u\n"
  2215. " framing_errors:\t\t%10u\n",
  2216. fore200e_swap(fore200e->stats->phy.crc_header_errors),
  2217. fore200e_swap(fore200e->stats->phy.framing_errors));
  2218. if (!left--)
  2219. return sprintf(page, "\n"
  2220. " OC-3:\n"
  2221. " section_bip8_errors:\t%10u\n"
  2222. " path_bip8_errors:\t\t%10u\n"
  2223. " line_bip24_errors:\t\t%10u\n"
  2224. " line_febe_errors:\t\t%10u\n"
  2225. " path_febe_errors:\t\t%10u\n"
  2226. " corr_hcs_errors:\t\t%10u\n"
  2227. " ucorr_hcs_errors:\t\t%10u\n",
  2228. fore200e_swap(fore200e->stats->oc3.section_bip8_errors),
  2229. fore200e_swap(fore200e->stats->oc3.path_bip8_errors),
  2230. fore200e_swap(fore200e->stats->oc3.line_bip24_errors),
  2231. fore200e_swap(fore200e->stats->oc3.line_febe_errors),
  2232. fore200e_swap(fore200e->stats->oc3.path_febe_errors),
  2233. fore200e_swap(fore200e->stats->oc3.corr_hcs_errors),
  2234. fore200e_swap(fore200e->stats->oc3.ucorr_hcs_errors));
  2235. if (!left--)
  2236. return sprintf(page,"\n"
  2237. " ATM:\t\t\t\t cells\n"
  2238. " TX:\t\t\t%10u\n"
  2239. " RX:\t\t\t%10u\n"
  2240. " vpi out of range:\t\t%10u\n"
  2241. " vpi no conn:\t\t%10u\n"
  2242. " vci out of range:\t\t%10u\n"
  2243. " vci no conn:\t\t%10u\n",
  2244. fore200e_swap(fore200e->stats->atm.cells_transmitted),
  2245. fore200e_swap(fore200e->stats->atm.cells_received),
  2246. fore200e_swap(fore200e->stats->atm.vpi_bad_range),
  2247. fore200e_swap(fore200e->stats->atm.vpi_no_conn),
  2248. fore200e_swap(fore200e->stats->atm.vci_bad_range),
  2249. fore200e_swap(fore200e->stats->atm.vci_no_conn));
  2250. if (!left--)
  2251. return sprintf(page,"\n"
  2252. " AAL0:\t\t\t cells\n"
  2253. " TX:\t\t\t%10u\n"
  2254. " RX:\t\t\t%10u\n"
  2255. " dropped:\t\t\t%10u\n",
  2256. fore200e_swap(fore200e->stats->aal0.cells_transmitted),
  2257. fore200e_swap(fore200e->stats->aal0.cells_received),
  2258. fore200e_swap(fore200e->stats->aal0.cells_dropped));
  2259. if (!left--)
  2260. return sprintf(page,"\n"
  2261. " AAL3/4:\n"
  2262. " SAR sublayer:\t\t cells\n"
  2263. " TX:\t\t\t%10u\n"
  2264. " RX:\t\t\t%10u\n"
  2265. " dropped:\t\t\t%10u\n"
  2266. " CRC errors:\t\t%10u\n"
  2267. " protocol errors:\t\t%10u\n\n"
  2268. " CS sublayer:\t\t PDUs\n"
  2269. " TX:\t\t\t%10u\n"
  2270. " RX:\t\t\t%10u\n"
  2271. " dropped:\t\t\t%10u\n"
  2272. " protocol errors:\t\t%10u\n",
  2273. fore200e_swap(fore200e->stats->aal34.cells_transmitted),
  2274. fore200e_swap(fore200e->stats->aal34.cells_received),
  2275. fore200e_swap(fore200e->stats->aal34.cells_dropped),
  2276. fore200e_swap(fore200e->stats->aal34.cells_crc_errors),
  2277. fore200e_swap(fore200e->stats->aal34.cells_protocol_errors),
  2278. fore200e_swap(fore200e->stats->aal34.cspdus_transmitted),
  2279. fore200e_swap(fore200e->stats->aal34.cspdus_received),
  2280. fore200e_swap(fore200e->stats->aal34.cspdus_dropped),
  2281. fore200e_swap(fore200e->stats->aal34.cspdus_protocol_errors));
  2282. if (!left--)
  2283. return sprintf(page,"\n"
  2284. " AAL5:\n"
  2285. " SAR sublayer:\t\t cells\n"
  2286. " TX:\t\t\t%10u\n"
  2287. " RX:\t\t\t%10u\n"
  2288. " dropped:\t\t\t%10u\n"
  2289. " congestions:\t\t%10u\n\n"
  2290. " CS sublayer:\t\t PDUs\n"
  2291. " TX:\t\t\t%10u\n"
  2292. " RX:\t\t\t%10u\n"
  2293. " dropped:\t\t\t%10u\n"
  2294. " CRC errors:\t\t%10u\n"
  2295. " protocol errors:\t\t%10u\n",
  2296. fore200e_swap(fore200e->stats->aal5.cells_transmitted),
  2297. fore200e_swap(fore200e->stats->aal5.cells_received),
  2298. fore200e_swap(fore200e->stats->aal5.cells_dropped),
  2299. fore200e_swap(fore200e->stats->aal5.congestion_experienced),
  2300. fore200e_swap(fore200e->stats->aal5.cspdus_transmitted),
  2301. fore200e_swap(fore200e->stats->aal5.cspdus_received),
  2302. fore200e_swap(fore200e->stats->aal5.cspdus_dropped),
  2303. fore200e_swap(fore200e->stats->aal5.cspdus_crc_errors),
  2304. fore200e_swap(fore200e->stats->aal5.cspdus_protocol_errors));
  2305. if (!left--)
  2306. return sprintf(page,"\n"
  2307. " AUX:\t\t allocation failures\n"
  2308. " small b1:\t\t\t%10u\n"
  2309. " large b1:\t\t\t%10u\n"
  2310. " small b2:\t\t\t%10u\n"
  2311. " large b2:\t\t\t%10u\n"
  2312. " RX PDUs:\t\t\t%10u\n"
  2313. " TX PDUs:\t\t\t%10lu\n",
  2314. fore200e_swap(fore200e->stats->aux.small_b1_failed),
  2315. fore200e_swap(fore200e->stats->aux.large_b1_failed),
  2316. fore200e_swap(fore200e->stats->aux.small_b2_failed),
  2317. fore200e_swap(fore200e->stats->aux.large_b2_failed),
  2318. fore200e_swap(fore200e->stats->aux.rpd_alloc_failed),
  2319. fore200e->tx_sat);
  2320. if (!left--)
  2321. return sprintf(page,"\n"
  2322. " receive carrier:\t\t\t%s\n",
  2323. fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
  2324. if (!left--) {
  2325. return sprintf(page,"\n"
  2326. " VCCs:\n address VPI VCI AAL "
  2327. "TX PDUs TX min/max size RX PDUs RX min/max size\n");
  2328. }
  2329. for (i = 0; i < NBR_CONNECT; i++) {
  2330. vcc = fore200e->vc_map[i].vcc;
  2331. if (vcc == NULL)
  2332. continue;
  2333. spin_lock_irqsave(&fore200e->q_lock, flags);
  2334. if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
  2335. fore200e_vcc = FORE200E_VCC(vcc);
  2336. ASSERT(fore200e_vcc);
  2337. len = sprintf(page,
  2338. " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
  2339. (u32)(unsigned long)vcc,
  2340. vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
  2341. fore200e_vcc->tx_pdu,
  2342. fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
  2343. fore200e_vcc->tx_max_pdu,
  2344. fore200e_vcc->rx_pdu,
  2345. fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
  2346. fore200e_vcc->rx_max_pdu);
  2347. spin_unlock_irqrestore(&fore200e->q_lock, flags);
  2348. return len;
  2349. }
  2350. spin_unlock_irqrestore(&fore200e->q_lock, flags);
  2351. }
  2352. return 0;
  2353. }
  2354. module_init(fore200e_module_init);
  2355. module_exit(fore200e_module_cleanup);
  2356. static const struct atmdev_ops fore200e_ops =
  2357. {
  2358. .open = fore200e_open,
  2359. .close = fore200e_close,
  2360. .ioctl = fore200e_ioctl,
  2361. .getsockopt = fore200e_getsockopt,
  2362. .setsockopt = fore200e_setsockopt,
  2363. .send = fore200e_send,
  2364. .change_qos = fore200e_change_qos,
  2365. .proc_read = fore200e_proc_read,
  2366. .owner = THIS_MODULE
  2367. };
  2368. #ifdef CONFIG_ATM_FORE200E_PCA
  2369. extern const unsigned char _fore200e_pca_fw_data[];
  2370. extern const unsigned int _fore200e_pca_fw_size;
  2371. #endif
  2372. #ifdef CONFIG_ATM_FORE200E_SBA
  2373. extern const unsigned char _fore200e_sba_fw_data[];
  2374. extern const unsigned int _fore200e_sba_fw_size;
  2375. #endif
  2376. static const struct fore200e_bus fore200e_bus[] = {
  2377. #ifdef CONFIG_ATM_FORE200E_PCA
  2378. { "PCA-200E", "pca200e", 32, 4, 32,
  2379. _fore200e_pca_fw_data, &_fore200e_pca_fw_size,
  2380. fore200e_pca_read,
  2381. fore200e_pca_write,
  2382. fore200e_pca_dma_map,
  2383. fore200e_pca_dma_unmap,
  2384. fore200e_pca_dma_sync_for_cpu,
  2385. fore200e_pca_dma_sync_for_device,
  2386. fore200e_pca_dma_chunk_alloc,
  2387. fore200e_pca_dma_chunk_free,
  2388. NULL,
  2389. fore200e_pca_configure,
  2390. fore200e_pca_map,
  2391. fore200e_pca_reset,
  2392. fore200e_pca_prom_read,
  2393. fore200e_pca_unmap,
  2394. NULL,
  2395. fore200e_pca_irq_check,
  2396. fore200e_pca_irq_ack,
  2397. fore200e_pca_proc_read,
  2398. },
  2399. #endif
  2400. #ifdef CONFIG_ATM_FORE200E_SBA
  2401. { "SBA-200E", "sba200e", 32, 64, 32,
  2402. _fore200e_sba_fw_data, &_fore200e_sba_fw_size,
  2403. fore200e_sba_read,
  2404. fore200e_sba_write,
  2405. fore200e_sba_dma_map,
  2406. fore200e_sba_dma_unmap,
  2407. fore200e_sba_dma_sync_for_cpu,
  2408. fore200e_sba_dma_sync_for_device,
  2409. fore200e_sba_dma_chunk_alloc,
  2410. fore200e_sba_dma_chunk_free,
  2411. fore200e_sba_detect,
  2412. fore200e_sba_configure,
  2413. fore200e_sba_map,
  2414. fore200e_sba_reset,
  2415. fore200e_sba_prom_read,
  2416. fore200e_sba_unmap,
  2417. fore200e_sba_irq_enable,
  2418. fore200e_sba_irq_check,
  2419. fore200e_sba_irq_ack,
  2420. fore200e_sba_proc_read,
  2421. },
  2422. #endif
  2423. {}
  2424. };
  2425. #ifdef MODULE_LICENSE
  2426. MODULE_LICENSE("GPL");
  2427. #endif