spider_net.c 68 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533
  1. /*
  2. * Network device driver for Cell Processor-Based Blade and Celleb platform
  3. *
  4. * (C) Copyright IBM Corp. 2005
  5. * (C) Copyright 2006 TOSHIBA CORPORATION
  6. *
  7. * Authors : Utz Bacher <utz.bacher@de.ibm.com>
  8. * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2, or (at your option)
  13. * any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  23. */
  24. #include <linux/compiler.h>
  25. #include <linux/crc32.h>
  26. #include <linux/delay.h>
  27. #include <linux/etherdevice.h>
  28. #include <linux/ethtool.h>
  29. #include <linux/firmware.h>
  30. #include <linux/if_vlan.h>
  31. #include <linux/in.h>
  32. #include <linux/init.h>
  33. #include <linux/ioport.h>
  34. #include <linux/ip.h>
  35. #include <linux/kernel.h>
  36. #include <linux/mii.h>
  37. #include <linux/module.h>
  38. #include <linux/netdevice.h>
  39. #include <linux/device.h>
  40. #include <linux/pci.h>
  41. #include <linux/skbuff.h>
  42. #include <linux/slab.h>
  43. #include <linux/tcp.h>
  44. #include <linux/types.h>
  45. #include <linux/vmalloc.h>
  46. #include <linux/wait.h>
  47. #include <linux/workqueue.h>
  48. #include <asm/bitops.h>
  49. #include <asm/pci-bridge.h>
  50. #include <net/checksum.h>
  51. #include "spider_net.h"
  52. MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
  53. "<Jens.Osterkamp@de.ibm.com>");
  54. MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
  55. MODULE_LICENSE("GPL");
  56. MODULE_VERSION(VERSION);
  57. static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
  58. static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
  59. module_param(rx_descriptors, int, 0444);
  60. module_param(tx_descriptors, int, 0444);
  61. MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \
  62. "in rx chains");
  63. MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
  64. "in tx chain");
  65. char spider_net_driver_name[] = "spidernet";
  66. static struct pci_device_id spider_net_pci_tbl[] = {
  67. { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
  68. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  69. { 0, }
  70. };
  71. MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
  72. /**
  73. * spider_net_read_reg - reads an SMMIO register of a card
  74. * @card: device structure
  75. * @reg: register to read from
  76. *
  77. * returns the content of the specified SMMIO register.
  78. */
  79. static inline u32
  80. spider_net_read_reg(struct spider_net_card *card, u32 reg)
  81. {
  82. /* We use the powerpc specific variants instead of readl_be() because
  83. * we know spidernet is not a real PCI device and we can thus avoid the
  84. * performance hit caused by the PCI workarounds.
  85. */
  86. return in_be32(card->regs + reg);
  87. }
  88. /**
  89. * spider_net_write_reg - writes to an SMMIO register of a card
  90. * @card: device structure
  91. * @reg: register to write to
  92. * @value: value to write into the specified SMMIO register
  93. */
  94. static inline void
  95. spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
  96. {
  97. /* We use the powerpc specific variants instead of writel_be() because
  98. * we know spidernet is not a real PCI device and we can thus avoid the
  99. * performance hit caused by the PCI workarounds.
  100. */
  101. out_be32(card->regs + reg, value);
  102. }
  103. /** spider_net_write_phy - write to phy register
  104. * @netdev: adapter to be written to
  105. * @mii_id: id of MII
  106. * @reg: PHY register
  107. * @val: value to be written to phy register
  108. *
  109. * spider_net_write_phy_register writes to an arbitrary PHY
  110. * register via the spider GPCWOPCMD register. We assume the queue does
  111. * not run full (not more than 15 commands outstanding).
  112. **/
  113. static void
  114. spider_net_write_phy(struct net_device *netdev, int mii_id,
  115. int reg, int val)
  116. {
  117. struct spider_net_card *card = netdev_priv(netdev);
  118. u32 writevalue;
  119. writevalue = ((u32)mii_id << 21) |
  120. ((u32)reg << 16) | ((u32)val);
  121. spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
  122. }
  123. /** spider_net_read_phy - read from phy register
  124. * @netdev: network device to be read from
  125. * @mii_id: id of MII
  126. * @reg: PHY register
  127. *
  128. * Returns value read from PHY register
  129. *
  130. * spider_net_write_phy reads from an arbitrary PHY
  131. * register via the spider GPCROPCMD register
  132. **/
  133. static int
  134. spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
  135. {
  136. struct spider_net_card *card = netdev_priv(netdev);
  137. u32 readvalue;
  138. readvalue = ((u32)mii_id << 21) | ((u32)reg << 16);
  139. spider_net_write_reg(card, SPIDER_NET_GPCROPCMD, readvalue);
  140. /* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT
  141. * interrupt, as we poll for the completion of the read operation
  142. * in spider_net_read_phy. Should take about 50 us */
  143. do {
  144. readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD);
  145. } while (readvalue & SPIDER_NET_GPREXEC);
  146. readvalue &= SPIDER_NET_GPRDAT_MASK;
  147. return readvalue;
  148. }
  149. /**
  150. * spider_net_setup_aneg - initial auto-negotiation setup
  151. * @card: device structure
  152. **/
  153. static void
  154. spider_net_setup_aneg(struct spider_net_card *card)
  155. {
  156. struct mii_phy *phy = &card->phy;
  157. u32 advertise = 0;
  158. u16 bmsr, estat;
  159. bmsr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
  160. estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS);
  161. if (bmsr & BMSR_10HALF)
  162. advertise |= ADVERTISED_10baseT_Half;
  163. if (bmsr & BMSR_10FULL)
  164. advertise |= ADVERTISED_10baseT_Full;
  165. if (bmsr & BMSR_100HALF)
  166. advertise |= ADVERTISED_100baseT_Half;
  167. if (bmsr & BMSR_100FULL)
  168. advertise |= ADVERTISED_100baseT_Full;
  169. if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL))
  170. advertise |= SUPPORTED_1000baseT_Full;
  171. if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF))
  172. advertise |= SUPPORTED_1000baseT_Half;
  173. mii_phy_probe(phy, phy->mii_id);
  174. phy->def->ops->setup_aneg(phy, advertise);
  175. }
  176. /**
  177. * spider_net_rx_irq_off - switch off rx irq on this spider card
  178. * @card: device structure
  179. *
  180. * switches off rx irq by masking them out in the GHIINTnMSK register
  181. */
  182. static void
  183. spider_net_rx_irq_off(struct spider_net_card *card)
  184. {
  185. u32 regvalue;
  186. regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
  187. spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
  188. }
  189. /**
  190. * spider_net_rx_irq_on - switch on rx irq on this spider card
  191. * @card: device structure
  192. *
  193. * switches on rx irq by enabling them in the GHIINTnMSK register
  194. */
  195. static void
  196. spider_net_rx_irq_on(struct spider_net_card *card)
  197. {
  198. u32 regvalue;
  199. regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
  200. spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
  201. }
  202. /**
  203. * spider_net_set_promisc - sets the unicast address or the promiscuous mode
  204. * @card: card structure
  205. *
  206. * spider_net_set_promisc sets the unicast destination address filter and
  207. * thus either allows for non-promisc mode or promisc mode
  208. */
  209. static void
  210. spider_net_set_promisc(struct spider_net_card *card)
  211. {
  212. u32 macu, macl;
  213. struct net_device *netdev = card->netdev;
  214. if (netdev->flags & IFF_PROMISC) {
  215. /* clear destination entry 0 */
  216. spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, 0);
  217. spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, 0);
  218. spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
  219. SPIDER_NET_PROMISC_VALUE);
  220. } else {
  221. macu = netdev->dev_addr[0];
  222. macu <<= 8;
  223. macu |= netdev->dev_addr[1];
  224. memcpy(&macl, &netdev->dev_addr[2], sizeof(macl));
  225. macu |= SPIDER_NET_UA_DESCR_VALUE;
  226. spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, macu);
  227. spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, macl);
  228. spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
  229. SPIDER_NET_NONPROMISC_VALUE);
  230. }
  231. }
  232. /**
  233. * spider_net_get_mac_address - read mac address from spider card
  234. * @card: device structure
  235. *
  236. * reads MAC address from GMACUNIMACU and GMACUNIMACL registers
  237. */
  238. static int
  239. spider_net_get_mac_address(struct net_device *netdev)
  240. {
  241. struct spider_net_card *card = netdev_priv(netdev);
  242. u32 macl, macu;
  243. macl = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACL);
  244. macu = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACU);
  245. netdev->dev_addr[0] = (macu >> 24) & 0xff;
  246. netdev->dev_addr[1] = (macu >> 16) & 0xff;
  247. netdev->dev_addr[2] = (macu >> 8) & 0xff;
  248. netdev->dev_addr[3] = macu & 0xff;
  249. netdev->dev_addr[4] = (macl >> 8) & 0xff;
  250. netdev->dev_addr[5] = macl & 0xff;
  251. if (!is_valid_ether_addr(&netdev->dev_addr[0]))
  252. return -EINVAL;
  253. return 0;
  254. }
  255. /**
  256. * spider_net_get_descr_status -- returns the status of a descriptor
  257. * @descr: descriptor to look at
  258. *
  259. * returns the status as in the dmac_cmd_status field of the descriptor
  260. */
  261. static inline int
  262. spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr)
  263. {
  264. return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
  265. }
  266. /**
  267. * spider_net_free_chain - free descriptor chain
  268. * @card: card structure
  269. * @chain: address of chain
  270. *
  271. */
  272. static void
  273. spider_net_free_chain(struct spider_net_card *card,
  274. struct spider_net_descr_chain *chain)
  275. {
  276. struct spider_net_descr *descr;
  277. descr = chain->ring;
  278. do {
  279. descr->bus_addr = 0;
  280. descr->hwdescr->next_descr_addr = 0;
  281. descr = descr->next;
  282. } while (descr != chain->ring);
  283. dma_free_coherent(&card->pdev->dev, chain->num_desc,
  284. chain->hwring, chain->dma_addr);
  285. }
  286. /**
  287. * spider_net_init_chain - alloc and link descriptor chain
  288. * @card: card structure
  289. * @chain: address of chain
  290. *
  291. * We manage a circular list that mirrors the hardware structure,
  292. * except that the hardware uses bus addresses.
  293. *
  294. * Returns 0 on success, <0 on failure
  295. */
  296. static int
  297. spider_net_init_chain(struct spider_net_card *card,
  298. struct spider_net_descr_chain *chain)
  299. {
  300. int i;
  301. struct spider_net_descr *descr;
  302. struct spider_net_hw_descr *hwdescr;
  303. dma_addr_t buf;
  304. size_t alloc_size;
  305. alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
  306. chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
  307. &chain->dma_addr, GFP_KERNEL);
  308. if (!chain->hwring)
  309. return -ENOMEM;
  310. memset(chain->ring, 0, chain->num_desc * sizeof(struct spider_net_descr));
  311. /* Set up the hardware pointers in each descriptor */
  312. descr = chain->ring;
  313. hwdescr = chain->hwring;
  314. buf = chain->dma_addr;
  315. for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) {
  316. hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
  317. hwdescr->next_descr_addr = 0;
  318. descr->hwdescr = hwdescr;
  319. descr->bus_addr = buf;
  320. descr->next = descr + 1;
  321. descr->prev = descr - 1;
  322. buf += sizeof(struct spider_net_hw_descr);
  323. }
  324. /* do actual circular list */
  325. (descr-1)->next = chain->ring;
  326. chain->ring->prev = descr-1;
  327. spin_lock_init(&chain->lock);
  328. chain->head = chain->ring;
  329. chain->tail = chain->ring;
  330. return 0;
  331. }
  332. /**
  333. * spider_net_free_rx_chain_contents - frees descr contents in rx chain
  334. * @card: card structure
  335. *
  336. * returns 0 on success, <0 on failure
  337. */
  338. static void
  339. spider_net_free_rx_chain_contents(struct spider_net_card *card)
  340. {
  341. struct spider_net_descr *descr;
  342. descr = card->rx_chain.head;
  343. do {
  344. if (descr->skb) {
  345. pci_unmap_single(card->pdev, descr->hwdescr->buf_addr,
  346. SPIDER_NET_MAX_FRAME,
  347. PCI_DMA_BIDIRECTIONAL);
  348. dev_kfree_skb(descr->skb);
  349. descr->skb = NULL;
  350. }
  351. descr = descr->next;
  352. } while (descr != card->rx_chain.head);
  353. }
  354. /**
  355. * spider_net_prepare_rx_descr - Reinitialize RX descriptor
  356. * @card: card structure
  357. * @descr: descriptor to re-init
  358. *
  359. * Return 0 on succes, <0 on failure.
  360. *
  361. * Allocates a new rx skb, iommu-maps it and attaches it to the
  362. * descriptor. Mark the descriptor as activated, ready-to-use.
  363. */
  364. static int
  365. spider_net_prepare_rx_descr(struct spider_net_card *card,
  366. struct spider_net_descr *descr)
  367. {
  368. struct spider_net_hw_descr *hwdescr = descr->hwdescr;
  369. dma_addr_t buf;
  370. int offset;
  371. int bufsize;
  372. /* we need to round up the buffer size to a multiple of 128 */
  373. bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
  374. (~(SPIDER_NET_RXBUF_ALIGN - 1));
  375. /* and we need to have it 128 byte aligned, therefore we allocate a
  376. * bit more */
  377. /* allocate an skb */
  378. descr->skb = netdev_alloc_skb(card->netdev,
  379. bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
  380. if (!descr->skb) {
  381. if (netif_msg_rx_err(card) && net_ratelimit())
  382. pr_err("Not enough memory to allocate rx buffer\n");
  383. card->spider_stats.alloc_rx_skb_error++;
  384. return -ENOMEM;
  385. }
  386. hwdescr->buf_size = bufsize;
  387. hwdescr->result_size = 0;
  388. hwdescr->valid_size = 0;
  389. hwdescr->data_status = 0;
  390. hwdescr->data_error = 0;
  391. offset = ((unsigned long)descr->skb->data) &
  392. (SPIDER_NET_RXBUF_ALIGN - 1);
  393. if (offset)
  394. skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
  395. /* iommu-map the skb */
  396. buf = pci_map_single(card->pdev, descr->skb->data,
  397. SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
  398. if (pci_dma_mapping_error(buf)) {
  399. dev_kfree_skb_any(descr->skb);
  400. descr->skb = NULL;
  401. if (netif_msg_rx_err(card) && net_ratelimit())
  402. pr_err("Could not iommu-map rx buffer\n");
  403. card->spider_stats.rx_iommu_map_error++;
  404. hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
  405. } else {
  406. hwdescr->buf_addr = buf;
  407. wmb();
  408. hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
  409. SPIDER_NET_DMAC_NOINTR_COMPLETE;
  410. }
  411. return 0;
  412. }
  413. /**
  414. * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
  415. * @card: card structure
  416. *
  417. * spider_net_enable_rxchtails sets the RX DMAC chain tail adresses in the
  418. * chip by writing to the appropriate register. DMA is enabled in
  419. * spider_net_enable_rxdmac.
  420. */
  421. static inline void
  422. spider_net_enable_rxchtails(struct spider_net_card *card)
  423. {
  424. /* assume chain is aligned correctly */
  425. spider_net_write_reg(card, SPIDER_NET_GDADCHA ,
  426. card->rx_chain.tail->bus_addr);
  427. }
  428. /**
  429. * spider_net_enable_rxdmac - enables a receive DMA controller
  430. * @card: card structure
  431. *
  432. * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
  433. * in the GDADMACCNTR register
  434. */
  435. static inline void
  436. spider_net_enable_rxdmac(struct spider_net_card *card)
  437. {
  438. wmb();
  439. spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
  440. SPIDER_NET_DMA_RX_VALUE);
  441. }
  442. /**
  443. * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
  444. * @card: card structure
  445. *
  446. * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
  447. */
  448. static void
  449. spider_net_refill_rx_chain(struct spider_net_card *card)
  450. {
  451. struct spider_net_descr_chain *chain = &card->rx_chain;
  452. unsigned long flags;
  453. /* one context doing the refill (and a second context seeing that
  454. * and omitting it) is ok. If called by NAPI, we'll be called again
  455. * as spider_net_decode_one_descr is called several times. If some
  456. * interrupt calls us, the NAPI is about to clean up anyway. */
  457. if (!spin_trylock_irqsave(&chain->lock, flags))
  458. return;
  459. while (spider_net_get_descr_status(chain->head->hwdescr) ==
  460. SPIDER_NET_DESCR_NOT_IN_USE) {
  461. if (spider_net_prepare_rx_descr(card, chain->head))
  462. break;
  463. chain->head = chain->head->next;
  464. }
  465. spin_unlock_irqrestore(&chain->lock, flags);
  466. }
  467. /**
  468. * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains
  469. * @card: card structure
  470. *
  471. * Returns 0 on success, <0 on failure.
  472. */
  473. static int
  474. spider_net_alloc_rx_skbs(struct spider_net_card *card)
  475. {
  476. struct spider_net_descr_chain *chain = &card->rx_chain;
  477. struct spider_net_descr *start = chain->tail;
  478. struct spider_net_descr *descr = start;
  479. /* Link up the hardware chain pointers */
  480. do {
  481. descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
  482. descr = descr->next;
  483. } while (descr != start);
  484. /* Put at least one buffer into the chain. if this fails,
  485. * we've got a problem. If not, spider_net_refill_rx_chain
  486. * will do the rest at the end of this function. */
  487. if (spider_net_prepare_rx_descr(card, chain->head))
  488. goto error;
  489. else
  490. chain->head = chain->head->next;
  491. /* This will allocate the rest of the rx buffers;
  492. * if not, it's business as usual later on. */
  493. spider_net_refill_rx_chain(card);
  494. spider_net_enable_rxdmac(card);
  495. return 0;
  496. error:
  497. spider_net_free_rx_chain_contents(card);
  498. return -ENOMEM;
  499. }
  500. /**
  501. * spider_net_get_multicast_hash - generates hash for multicast filter table
  502. * @addr: multicast address
  503. *
  504. * returns the hash value.
  505. *
  506. * spider_net_get_multicast_hash calculates a hash value for a given multicast
  507. * address, that is used to set the multicast filter tables
  508. */
  509. static u8
  510. spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
  511. {
  512. u32 crc;
  513. u8 hash;
  514. char addr_for_crc[ETH_ALEN] = { 0, };
  515. int i, bit;
  516. for (i = 0; i < ETH_ALEN * 8; i++) {
  517. bit = (addr[i / 8] >> (i % 8)) & 1;
  518. addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
  519. }
  520. crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
  521. hash = (crc >> 27);
  522. hash <<= 3;
  523. hash |= crc & 7;
  524. hash &= 0xff;
  525. return hash;
  526. }
  527. /**
  528. * spider_net_set_multi - sets multicast addresses and promisc flags
  529. * @netdev: interface device structure
  530. *
  531. * spider_net_set_multi configures multicast addresses as needed for the
  532. * netdev interface. It also sets up multicast, allmulti and promisc
  533. * flags appropriately
  534. */
  535. static void
  536. spider_net_set_multi(struct net_device *netdev)
  537. {
  538. struct dev_mc_list *mc;
  539. u8 hash;
  540. int i;
  541. u32 reg;
  542. struct spider_net_card *card = netdev_priv(netdev);
  543. unsigned long bitmask[SPIDER_NET_MULTICAST_HASHES / BITS_PER_LONG] =
  544. {0, };
  545. spider_net_set_promisc(card);
  546. if (netdev->flags & IFF_ALLMULTI) {
  547. for (i = 0; i < SPIDER_NET_MULTICAST_HASHES; i++) {
  548. set_bit(i, bitmask);
  549. }
  550. goto write_hash;
  551. }
  552. /* well, we know, what the broadcast hash value is: it's xfd
  553. hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
  554. set_bit(0xfd, bitmask);
  555. for (mc = netdev->mc_list; mc; mc = mc->next) {
  556. hash = spider_net_get_multicast_hash(netdev, mc->dmi_addr);
  557. set_bit(hash, bitmask);
  558. }
  559. write_hash:
  560. for (i = 0; i < SPIDER_NET_MULTICAST_HASHES / 4; i++) {
  561. reg = 0;
  562. if (test_bit(i * 4, bitmask))
  563. reg += 0x08;
  564. reg <<= 8;
  565. if (test_bit(i * 4 + 1, bitmask))
  566. reg += 0x08;
  567. reg <<= 8;
  568. if (test_bit(i * 4 + 2, bitmask))
  569. reg += 0x08;
  570. reg <<= 8;
  571. if (test_bit(i * 4 + 3, bitmask))
  572. reg += 0x08;
  573. spider_net_write_reg(card, SPIDER_NET_GMRMHFILnR + i * 4, reg);
  574. }
  575. }
  576. /**
  577. * spider_net_disable_rxdmac - disables the receive DMA controller
  578. * @card: card structure
  579. *
  580. * spider_net_disable_rxdmac terminates processing on the DMA controller by
  581. * turing off DMA and issueing a force end
  582. */
  583. static void
  584. spider_net_disable_rxdmac(struct spider_net_card *card)
  585. {
  586. spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
  587. SPIDER_NET_DMA_RX_FEND_VALUE);
  588. }
  589. /**
  590. * spider_net_prepare_tx_descr - fill tx descriptor with skb data
  591. * @card: card structure
  592. * @descr: descriptor structure to fill out
  593. * @skb: packet to use
  594. *
  595. * returns 0 on success, <0 on failure.
  596. *
  597. * fills out the descriptor structure with skb data and len. Copies data,
  598. * if needed (32bit DMA!)
  599. */
  600. static int
  601. spider_net_prepare_tx_descr(struct spider_net_card *card,
  602. struct sk_buff *skb)
  603. {
  604. struct spider_net_descr_chain *chain = &card->tx_chain;
  605. struct spider_net_descr *descr;
  606. struct spider_net_hw_descr *hwdescr;
  607. dma_addr_t buf;
  608. unsigned long flags;
  609. buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
  610. if (pci_dma_mapping_error(buf)) {
  611. if (netif_msg_tx_err(card) && net_ratelimit())
  612. pr_err("could not iommu-map packet (%p, %i). "
  613. "Dropping packet\n", skb->data, skb->len);
  614. card->spider_stats.tx_iommu_map_error++;
  615. return -ENOMEM;
  616. }
  617. spin_lock_irqsave(&chain->lock, flags);
  618. descr = card->tx_chain.head;
  619. if (descr->next == chain->tail->prev) {
  620. spin_unlock_irqrestore(&chain->lock, flags);
  621. pci_unmap_single(card->pdev, buf, skb->len, PCI_DMA_TODEVICE);
  622. return -ENOMEM;
  623. }
  624. hwdescr = descr->hwdescr;
  625. chain->head = descr->next;
  626. descr->skb = skb;
  627. hwdescr->buf_addr = buf;
  628. hwdescr->buf_size = skb->len;
  629. hwdescr->next_descr_addr = 0;
  630. hwdescr->data_status = 0;
  631. hwdescr->dmac_cmd_status =
  632. SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
  633. spin_unlock_irqrestore(&chain->lock, flags);
  634. if (skb->ip_summed == CHECKSUM_PARTIAL)
  635. switch (ip_hdr(skb)->protocol) {
  636. case IPPROTO_TCP:
  637. hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
  638. break;
  639. case IPPROTO_UDP:
  640. hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
  641. break;
  642. }
  643. /* Chain the bus address, so that the DMA engine finds this descr. */
  644. wmb();
  645. descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
  646. card->netdev->trans_start = jiffies; /* set netdev watchdog timer */
  647. return 0;
  648. }
  649. static int
  650. spider_net_set_low_watermark(struct spider_net_card *card)
  651. {
  652. struct spider_net_descr *descr = card->tx_chain.tail;
  653. struct spider_net_hw_descr *hwdescr;
  654. unsigned long flags;
  655. int status;
  656. int cnt=0;
  657. int i;
  658. /* Measure the length of the queue. Measurement does not
  659. * need to be precise -- does not need a lock. */
  660. while (descr != card->tx_chain.head) {
  661. status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
  662. if (status == SPIDER_NET_DESCR_NOT_IN_USE)
  663. break;
  664. descr = descr->next;
  665. cnt++;
  666. }
  667. /* If TX queue is short, don't even bother with interrupts */
  668. if (cnt < card->tx_chain.num_desc/4)
  669. return cnt;
  670. /* Set low-watermark 3/4th's of the way into the queue. */
  671. descr = card->tx_chain.tail;
  672. cnt = (cnt*3)/4;
  673. for (i=0;i<cnt; i++)
  674. descr = descr->next;
  675. /* Set the new watermark, clear the old watermark */
  676. spin_lock_irqsave(&card->tx_chain.lock, flags);
  677. descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
  678. if (card->low_watermark && card->low_watermark != descr) {
  679. hwdescr = card->low_watermark->hwdescr;
  680. hwdescr->dmac_cmd_status =
  681. hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
  682. }
  683. card->low_watermark = descr;
  684. spin_unlock_irqrestore(&card->tx_chain.lock, flags);
  685. return cnt;
  686. }
  687. /**
  688. * spider_net_release_tx_chain - processes sent tx descriptors
  689. * @card: adapter structure
  690. * @brutal: if set, don't care about whether descriptor seems to be in use
  691. *
  692. * returns 0 if the tx ring is empty, otherwise 1.
  693. *
  694. * spider_net_release_tx_chain releases the tx descriptors that spider has
  695. * finished with (if non-brutal) or simply release tx descriptors (if brutal).
  696. * If some other context is calling this function, we return 1 so that we're
  697. * scheduled again (if we were scheduled) and will not loose initiative.
  698. */
  699. static int
  700. spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
  701. {
  702. struct spider_net_descr_chain *chain = &card->tx_chain;
  703. struct spider_net_descr *descr;
  704. struct spider_net_hw_descr *hwdescr;
  705. struct sk_buff *skb;
  706. u32 buf_addr;
  707. unsigned long flags;
  708. int status;
  709. while (1) {
  710. spin_lock_irqsave(&chain->lock, flags);
  711. if (chain->tail == chain->head) {
  712. spin_unlock_irqrestore(&chain->lock, flags);
  713. return 0;
  714. }
  715. descr = chain->tail;
  716. hwdescr = descr->hwdescr;
  717. status = spider_net_get_descr_status(hwdescr);
  718. switch (status) {
  719. case SPIDER_NET_DESCR_COMPLETE:
  720. card->netdev_stats.tx_packets++;
  721. card->netdev_stats.tx_bytes += descr->skb->len;
  722. break;
  723. case SPIDER_NET_DESCR_CARDOWNED:
  724. if (!brutal) {
  725. spin_unlock_irqrestore(&chain->lock, flags);
  726. return 1;
  727. }
  728. /* fallthrough, if we release the descriptors
  729. * brutally (then we don't care about
  730. * SPIDER_NET_DESCR_CARDOWNED) */
  731. case SPIDER_NET_DESCR_RESPONSE_ERROR:
  732. case SPIDER_NET_DESCR_PROTECTION_ERROR:
  733. case SPIDER_NET_DESCR_FORCE_END:
  734. if (netif_msg_tx_err(card))
  735. pr_err("%s: forcing end of tx descriptor "
  736. "with status x%02x\n",
  737. card->netdev->name, status);
  738. card->netdev_stats.tx_errors++;
  739. break;
  740. default:
  741. card->netdev_stats.tx_dropped++;
  742. if (!brutal) {
  743. spin_unlock_irqrestore(&chain->lock, flags);
  744. return 1;
  745. }
  746. }
  747. chain->tail = descr->next;
  748. hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
  749. skb = descr->skb;
  750. descr->skb = NULL;
  751. buf_addr = hwdescr->buf_addr;
  752. spin_unlock_irqrestore(&chain->lock, flags);
  753. /* unmap the skb */
  754. if (skb) {
  755. pci_unmap_single(card->pdev, buf_addr, skb->len,
  756. PCI_DMA_TODEVICE);
  757. dev_kfree_skb(skb);
  758. }
  759. }
  760. return 0;
  761. }
  762. /**
  763. * spider_net_kick_tx_dma - enables TX DMA processing
  764. * @card: card structure
  765. * @descr: descriptor address to enable TX processing at
  766. *
  767. * This routine will start the transmit DMA running if
  768. * it is not already running. This routine ned only be
  769. * called when queueing a new packet to an empty tx queue.
  770. * Writes the current tx chain head as start address
  771. * of the tx descriptor chain and enables the transmission
  772. * DMA engine.
  773. */
  774. static inline void
  775. spider_net_kick_tx_dma(struct spider_net_card *card)
  776. {
  777. struct spider_net_descr *descr;
  778. if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &
  779. SPIDER_NET_TX_DMA_EN)
  780. goto out;
  781. descr = card->tx_chain.tail;
  782. for (;;) {
  783. if (spider_net_get_descr_status(descr->hwdescr) ==
  784. SPIDER_NET_DESCR_CARDOWNED) {
  785. spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
  786. descr->bus_addr);
  787. spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
  788. SPIDER_NET_DMA_TX_VALUE);
  789. break;
  790. }
  791. if (descr == card->tx_chain.head)
  792. break;
  793. descr = descr->next;
  794. }
  795. out:
  796. mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
  797. }
  798. /**
  799. * spider_net_xmit - transmits a frame over the device
  800. * @skb: packet to send out
  801. * @netdev: interface device structure
  802. *
  803. * returns 0 on success, !0 on failure
  804. */
  805. static int
  806. spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
  807. {
  808. int cnt;
  809. struct spider_net_card *card = netdev_priv(netdev);
  810. spider_net_release_tx_chain(card, 0);
  811. if (spider_net_prepare_tx_descr(card, skb) != 0) {
  812. card->netdev_stats.tx_dropped++;
  813. netif_stop_queue(netdev);
  814. return NETDEV_TX_BUSY;
  815. }
  816. cnt = spider_net_set_low_watermark(card);
  817. if (cnt < 5)
  818. spider_net_kick_tx_dma(card);
  819. return NETDEV_TX_OK;
  820. }
  821. /**
  822. * spider_net_cleanup_tx_ring - cleans up the TX ring
  823. * @card: card structure
  824. *
  825. * spider_net_cleanup_tx_ring is called by either the tx_timer
  826. * or from the NAPI polling routine.
  827. * This routine releases resources associted with transmitted
  828. * packets, including updating the queue tail pointer.
  829. */
  830. static void
  831. spider_net_cleanup_tx_ring(struct spider_net_card *card)
  832. {
  833. if ((spider_net_release_tx_chain(card, 0) != 0) &&
  834. (card->netdev->flags & IFF_UP)) {
  835. spider_net_kick_tx_dma(card);
  836. netif_wake_queue(card->netdev);
  837. }
  838. }
  839. /**
  840. * spider_net_do_ioctl - called for device ioctls
  841. * @netdev: interface device structure
  842. * @ifr: request parameter structure for ioctl
  843. * @cmd: command code for ioctl
  844. *
  845. * returns 0 on success, <0 on failure. Currently, we have no special ioctls.
  846. * -EOPNOTSUPP is returned, if an unknown ioctl was requested
  847. */
  848. static int
  849. spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  850. {
  851. switch (cmd) {
  852. default:
  853. return -EOPNOTSUPP;
  854. }
  855. }
  856. /**
  857. * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
  858. * @descr: descriptor to process
  859. * @card: card structure
  860. *
  861. * Fills out skb structure and passes the data to the stack.
  862. * The descriptor state is not changed.
  863. */
  864. static void
  865. spider_net_pass_skb_up(struct spider_net_descr *descr,
  866. struct spider_net_card *card)
  867. {
  868. struct spider_net_hw_descr *hwdescr= descr->hwdescr;
  869. struct sk_buff *skb;
  870. struct net_device *netdev;
  871. u32 data_status, data_error;
  872. data_status = hwdescr->data_status;
  873. data_error = hwdescr->data_error;
  874. netdev = card->netdev;
  875. skb = descr->skb;
  876. skb_put(skb, hwdescr->valid_size);
  877. /* the card seems to add 2 bytes of junk in front
  878. * of the ethernet frame */
  879. #define SPIDER_MISALIGN 2
  880. skb_pull(skb, SPIDER_MISALIGN);
  881. skb->protocol = eth_type_trans(skb, netdev);
  882. /* checksum offload */
  883. if (card->options.rx_csum) {
  884. if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
  885. SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
  886. !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
  887. skb->ip_summed = CHECKSUM_UNNECESSARY;
  888. else
  889. skb->ip_summed = CHECKSUM_NONE;
  890. } else
  891. skb->ip_summed = CHECKSUM_NONE;
  892. if (data_status & SPIDER_NET_VLAN_PACKET) {
  893. /* further enhancements: HW-accel VLAN
  894. * vlan_hwaccel_receive_skb
  895. */
  896. }
  897. /* update netdevice statistics */
  898. card->netdev_stats.rx_packets++;
  899. card->netdev_stats.rx_bytes += skb->len;
  900. /* pass skb up to stack */
  901. netif_receive_skb(skb);
  902. }
  903. #ifdef DEBUG
  904. static void show_rx_chain(struct spider_net_card *card)
  905. {
  906. struct spider_net_descr_chain *chain = &card->rx_chain;
  907. struct spider_net_descr *start= chain->tail;
  908. struct spider_net_descr *descr= start;
  909. int status;
  910. int cnt = 0;
  911. int cstat = spider_net_get_descr_status(descr);
  912. printk(KERN_INFO "RX chain tail at descr=%ld\n",
  913. (start - card->descr) - card->tx_chain.num_desc);
  914. status = cstat;
  915. do
  916. {
  917. status = spider_net_get_descr_status(descr);
  918. if (cstat != status) {
  919. printk(KERN_INFO "Have %d descrs with stat=x%08x\n", cnt, cstat);
  920. cstat = status;
  921. cnt = 0;
  922. }
  923. cnt ++;
  924. descr = descr->next;
  925. } while (descr != start);
  926. printk(KERN_INFO "Last %d descrs with stat=x%08x\n", cnt, cstat);
  927. }
  928. #endif
  929. /**
  930. * spider_net_resync_head_ptr - Advance head ptr past empty descrs
  931. *
  932. * If the driver fails to keep up and empty the queue, then the
  933. * hardware wil run out of room to put incoming packets. This
  934. * will cause the hardware to skip descrs that are full (instead
  935. * of halting/retrying). Thus, once the driver runs, it wil need
  936. * to "catch up" to where the hardware chain pointer is at.
  937. */
  938. static void spider_net_resync_head_ptr(struct spider_net_card *card)
  939. {
  940. unsigned long flags;
  941. struct spider_net_descr_chain *chain = &card->rx_chain;
  942. struct spider_net_descr *descr;
  943. int i, status;
  944. /* Advance head pointer past any empty descrs */
  945. descr = chain->head;
  946. status = spider_net_get_descr_status(descr->hwdescr);
  947. if (status == SPIDER_NET_DESCR_NOT_IN_USE)
  948. return;
  949. spin_lock_irqsave(&chain->lock, flags);
  950. descr = chain->head;
  951. status = spider_net_get_descr_status(descr->hwdescr);
  952. for (i=0; i<chain->num_desc; i++) {
  953. if (status != SPIDER_NET_DESCR_CARDOWNED) break;
  954. descr = descr->next;
  955. status = spider_net_get_descr_status(descr->hwdescr);
  956. }
  957. chain->head = descr;
  958. spin_unlock_irqrestore(&chain->lock, flags);
  959. }
  960. static int spider_net_resync_tail_ptr(struct spider_net_card *card)
  961. {
  962. struct spider_net_descr_chain *chain = &card->rx_chain;
  963. struct spider_net_descr *descr;
  964. int i, status;
  965. /* Advance tail pointer past any empty and reaped descrs */
  966. descr = chain->tail;
  967. status = spider_net_get_descr_status(descr->hwdescr);
  968. for (i=0; i<chain->num_desc; i++) {
  969. if ((status != SPIDER_NET_DESCR_CARDOWNED) &&
  970. (status != SPIDER_NET_DESCR_NOT_IN_USE)) break;
  971. descr = descr->next;
  972. status = spider_net_get_descr_status(descr->hwdescr);
  973. }
  974. chain->tail = descr;
  975. if ((i == chain->num_desc) || (i == 0))
  976. return 1;
  977. return 0;
  978. }
  979. /**
  980. * spider_net_decode_one_descr - processes an RX descriptor
  981. * @card: card structure
  982. *
  983. * Returns 1 if a packet has been sent to the stack, otherwise 0.
  984. *
  985. * Processes an RX descriptor by iommu-unmapping the data buffer
  986. * and passing the packet up to the stack. This function is called
  987. * in softirq context, e.g. either bottom half from interrupt or
  988. * NAPI polling context.
  989. */
  990. static int
  991. spider_net_decode_one_descr(struct spider_net_card *card)
  992. {
  993. struct spider_net_descr_chain *chain = &card->rx_chain;
  994. struct spider_net_descr *descr = chain->tail;
  995. struct spider_net_hw_descr *hwdescr = descr->hwdescr;
  996. int status;
  997. status = spider_net_get_descr_status(hwdescr);
  998. /* Nothing in the descriptor, or ring must be empty */
  999. if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
  1000. (status == SPIDER_NET_DESCR_NOT_IN_USE))
  1001. return 0;
  1002. /* descriptor definitively used -- move on tail */
  1003. chain->tail = descr->next;
  1004. /* unmap descriptor */
  1005. pci_unmap_single(card->pdev, hwdescr->buf_addr,
  1006. SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
  1007. if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
  1008. (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
  1009. (status == SPIDER_NET_DESCR_FORCE_END) ) {
  1010. if (netif_msg_rx_err(card))
  1011. pr_err("%s: dropping RX descriptor with state %d\n",
  1012. card->netdev->name, status);
  1013. card->netdev_stats.rx_dropped++;
  1014. goto bad_desc;
  1015. }
  1016. if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
  1017. (status != SPIDER_NET_DESCR_FRAME_END) ) {
  1018. if (netif_msg_rx_err(card))
  1019. pr_err("%s: RX descriptor with unknown state %d\n",
  1020. card->netdev->name, status);
  1021. card->spider_stats.rx_desc_unk_state++;
  1022. goto bad_desc;
  1023. }
  1024. /* The cases we'll throw away the packet immediately */
  1025. if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
  1026. if (netif_msg_rx_err(card))
  1027. pr_err("%s: error in received descriptor found, "
  1028. "data_status=x%08x, data_error=x%08x\n",
  1029. card->netdev->name,
  1030. hwdescr->data_status, hwdescr->data_error);
  1031. goto bad_desc;
  1032. }
  1033. if (hwdescr->dmac_cmd_status & 0xfcf4) {
  1034. pr_err("%s: bad status, cmd_status=x%08x\n",
  1035. card->netdev->name,
  1036. hwdescr->dmac_cmd_status);
  1037. pr_err("buf_addr=x%08x\n", hwdescr->buf_addr);
  1038. pr_err("buf_size=x%08x\n", hwdescr->buf_size);
  1039. pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr);
  1040. pr_err("result_size=x%08x\n", hwdescr->result_size);
  1041. pr_err("valid_size=x%08x\n", hwdescr->valid_size);
  1042. pr_err("data_status=x%08x\n", hwdescr->data_status);
  1043. pr_err("data_error=x%08x\n", hwdescr->data_error);
  1044. pr_err("which=%ld\n", descr - card->rx_chain.ring);
  1045. card->spider_stats.rx_desc_error++;
  1046. goto bad_desc;
  1047. }
  1048. /* Ok, we've got a packet in descr */
  1049. spider_net_pass_skb_up(descr, card);
  1050. descr->skb = NULL;
  1051. hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
  1052. return 1;
  1053. bad_desc:
  1054. dev_kfree_skb_irq(descr->skb);
  1055. descr->skb = NULL;
  1056. hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
  1057. return 0;
  1058. }
  1059. /**
  1060. * spider_net_poll - NAPI poll function called by the stack to return packets
  1061. * @netdev: interface device structure
  1062. * @budget: number of packets we can pass to the stack at most
  1063. *
  1064. * returns 0 if no more packets available to the driver/stack. Returns 1,
  1065. * if the quota is exceeded, but the driver has still packets.
  1066. *
  1067. * spider_net_poll returns all packets from the rx descriptors to the stack
  1068. * (using netif_receive_skb). If all/enough packets are up, the driver
  1069. * reenables interrupts and returns 0. If not, 1 is returned.
  1070. */
  1071. static int
  1072. spider_net_poll(struct net_device *netdev, int *budget)
  1073. {
  1074. struct spider_net_card *card = netdev_priv(netdev);
  1075. int packets_to_do, packets_done = 0;
  1076. int no_more_packets = 0;
  1077. spider_net_cleanup_tx_ring(card);
  1078. packets_to_do = min(*budget, netdev->quota);
  1079. while (packets_to_do) {
  1080. if (spider_net_decode_one_descr(card)) {
  1081. packets_done++;
  1082. packets_to_do--;
  1083. } else {
  1084. /* no more packets for the stack */
  1085. no_more_packets = 1;
  1086. break;
  1087. }
  1088. }
  1089. if ((packets_done == 0) && (card->num_rx_ints != 0)) {
  1090. no_more_packets = spider_net_resync_tail_ptr(card);
  1091. spider_net_resync_head_ptr(card);
  1092. }
  1093. card->num_rx_ints = 0;
  1094. netdev->quota -= packets_done;
  1095. *budget -= packets_done;
  1096. spider_net_refill_rx_chain(card);
  1097. spider_net_enable_rxdmac(card);
  1098. /* if all packets are in the stack, enable interrupts and return 0 */
  1099. /* if not, return 1 */
  1100. if (no_more_packets) {
  1101. netif_rx_complete(netdev);
  1102. spider_net_rx_irq_on(card);
  1103. card->ignore_rx_ramfull = 0;
  1104. return 0;
  1105. }
  1106. return 1;
  1107. }
  1108. /**
  1109. * spider_net_get_stats - get interface statistics
  1110. * @netdev: interface device structure
  1111. *
  1112. * returns the interface statistics residing in the spider_net_card struct
  1113. */
  1114. static struct net_device_stats *
  1115. spider_net_get_stats(struct net_device *netdev)
  1116. {
  1117. struct spider_net_card *card = netdev_priv(netdev);
  1118. struct net_device_stats *stats = &card->netdev_stats;
  1119. return stats;
  1120. }
  1121. /**
  1122. * spider_net_change_mtu - changes the MTU of an interface
  1123. * @netdev: interface device structure
  1124. * @new_mtu: new MTU value
  1125. *
  1126. * returns 0 on success, <0 on failure
  1127. */
  1128. static int
  1129. spider_net_change_mtu(struct net_device *netdev, int new_mtu)
  1130. {
  1131. /* no need to re-alloc skbs or so -- the max mtu is about 2.3k
  1132. * and mtu is outbound only anyway */
  1133. if ( (new_mtu < SPIDER_NET_MIN_MTU ) ||
  1134. (new_mtu > SPIDER_NET_MAX_MTU) )
  1135. return -EINVAL;
  1136. netdev->mtu = new_mtu;
  1137. return 0;
  1138. }
  1139. /**
  1140. * spider_net_set_mac - sets the MAC of an interface
  1141. * @netdev: interface device structure
  1142. * @ptr: pointer to new MAC address
  1143. *
  1144. * Returns 0 on success, <0 on failure. Currently, we don't support this
  1145. * and will always return EOPNOTSUPP.
  1146. */
  1147. static int
  1148. spider_net_set_mac(struct net_device *netdev, void *p)
  1149. {
  1150. struct spider_net_card *card = netdev_priv(netdev);
  1151. u32 macl, macu, regvalue;
  1152. struct sockaddr *addr = p;
  1153. if (!is_valid_ether_addr(addr->sa_data))
  1154. return -EADDRNOTAVAIL;
  1155. /* switch off GMACTPE and GMACRPE */
  1156. regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
  1157. regvalue &= ~((1 << 5) | (1 << 6));
  1158. spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
  1159. /* write mac */
  1160. macu = (addr->sa_data[0]<<24) + (addr->sa_data[1]<<16) +
  1161. (addr->sa_data[2]<<8) + (addr->sa_data[3]);
  1162. macl = (addr->sa_data[4]<<8) + (addr->sa_data[5]);
  1163. spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu);
  1164. spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl);
  1165. /* switch GMACTPE and GMACRPE back on */
  1166. regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
  1167. regvalue |= ((1 << 5) | (1 << 6));
  1168. spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
  1169. spider_net_set_promisc(card);
  1170. /* look up, whether we have been successful */
  1171. if (spider_net_get_mac_address(netdev))
  1172. return -EADDRNOTAVAIL;
  1173. if (memcmp(netdev->dev_addr,addr->sa_data,netdev->addr_len))
  1174. return -EADDRNOTAVAIL;
  1175. return 0;
  1176. }
  1177. /**
  1178. * spider_net_link_reset
  1179. * @netdev: net device structure
  1180. *
  1181. * This is called when the PHY_LINK signal is asserted. For the blade this is
  1182. * not connected so we should never get here.
  1183. *
  1184. */
  1185. static void
  1186. spider_net_link_reset(struct net_device *netdev)
  1187. {
  1188. struct spider_net_card *card = netdev_priv(netdev);
  1189. del_timer_sync(&card->aneg_timer);
  1190. /* clear interrupt, block further interrupts */
  1191. spider_net_write_reg(card, SPIDER_NET_GMACST,
  1192. spider_net_read_reg(card, SPIDER_NET_GMACST));
  1193. spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
  1194. /* reset phy and setup aneg */
  1195. spider_net_setup_aneg(card);
  1196. mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
  1197. }
  1198. /**
  1199. * spider_net_handle_error_irq - handles errors raised by an interrupt
  1200. * @card: card structure
  1201. * @status_reg: interrupt status register 0 (GHIINT0STS)
  1202. *
  1203. * spider_net_handle_error_irq treats or ignores all error conditions
  1204. * found when an interrupt is presented
  1205. */
  1206. static void
  1207. spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
  1208. {
  1209. u32 error_reg1, error_reg2;
  1210. u32 i;
  1211. int show_error = 1;
  1212. error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS);
  1213. error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS);
  1214. /* check GHIINT0STS ************************************/
  1215. if (status_reg)
  1216. for (i = 0; i < 32; i++)
  1217. if (status_reg & (1<<i))
  1218. switch (i)
  1219. {
  1220. /* let error_reg1 and error_reg2 evaluation decide, what to do
  1221. case SPIDER_NET_PHYINT:
  1222. case SPIDER_NET_GMAC2INT:
  1223. case SPIDER_NET_GMAC1INT:
  1224. case SPIDER_NET_GFIFOINT:
  1225. case SPIDER_NET_DMACINT:
  1226. case SPIDER_NET_GSYSINT:
  1227. break; */
  1228. case SPIDER_NET_GIPSINT:
  1229. show_error = 0;
  1230. break;
  1231. case SPIDER_NET_GPWOPCMPINT:
  1232. /* PHY write operation completed */
  1233. show_error = 0;
  1234. break;
  1235. case SPIDER_NET_GPROPCMPINT:
  1236. /* PHY read operation completed */
  1237. /* we don't use semaphores, as we poll for the completion
  1238. * of the read operation in spider_net_read_phy. Should take
  1239. * about 50 us */
  1240. show_error = 0;
  1241. break;
  1242. case SPIDER_NET_GPWFFINT:
  1243. /* PHY command queue full */
  1244. if (netif_msg_intr(card))
  1245. pr_err("PHY write queue full\n");
  1246. show_error = 0;
  1247. break;
  1248. /* case SPIDER_NET_GRMDADRINT: not used. print a message */
  1249. /* case SPIDER_NET_GRMARPINT: not used. print a message */
  1250. /* case SPIDER_NET_GRMMPINT: not used. print a message */
  1251. case SPIDER_NET_GDTDEN0INT:
  1252. /* someone has set TX_DMA_EN to 0 */
  1253. show_error = 0;
  1254. break;
  1255. case SPIDER_NET_GDDDEN0INT: /* fallthrough */
  1256. case SPIDER_NET_GDCDEN0INT: /* fallthrough */
  1257. case SPIDER_NET_GDBDEN0INT: /* fallthrough */
  1258. case SPIDER_NET_GDADEN0INT:
  1259. /* someone has set RX_DMA_EN to 0 */
  1260. show_error = 0;
  1261. break;
  1262. /* RX interrupts */
  1263. case SPIDER_NET_GDDFDCINT:
  1264. case SPIDER_NET_GDCFDCINT:
  1265. case SPIDER_NET_GDBFDCINT:
  1266. case SPIDER_NET_GDAFDCINT:
  1267. /* case SPIDER_NET_GDNMINT: not used. print a message */
  1268. /* case SPIDER_NET_GCNMINT: not used. print a message */
  1269. /* case SPIDER_NET_GBNMINT: not used. print a message */
  1270. /* case SPIDER_NET_GANMINT: not used. print a message */
  1271. /* case SPIDER_NET_GRFNMINT: not used. print a message */
  1272. show_error = 0;
  1273. break;
  1274. /* TX interrupts */
  1275. case SPIDER_NET_GDTFDCINT:
  1276. show_error = 0;
  1277. break;
  1278. case SPIDER_NET_GTTEDINT:
  1279. show_error = 0;
  1280. break;
  1281. case SPIDER_NET_GDTDCEINT:
  1282. /* chain end. If a descriptor should be sent, kick off
  1283. * tx dma
  1284. if (card->tx_chain.tail != card->tx_chain.head)
  1285. spider_net_kick_tx_dma(card);
  1286. */
  1287. show_error = 0;
  1288. break;
  1289. /* case SPIDER_NET_G1TMCNTINT: not used. print a message */
  1290. /* case SPIDER_NET_GFREECNTINT: not used. print a message */
  1291. }
  1292. /* check GHIINT1STS ************************************/
  1293. if (error_reg1)
  1294. for (i = 0; i < 32; i++)
  1295. if (error_reg1 & (1<<i))
  1296. switch (i)
  1297. {
  1298. case SPIDER_NET_GTMFLLINT:
  1299. /* TX RAM full may happen on a usual case.
  1300. * Logging is not needed. */
  1301. show_error = 0;
  1302. break;
  1303. case SPIDER_NET_GRFDFLLINT: /* fallthrough */
  1304. case SPIDER_NET_GRFCFLLINT: /* fallthrough */
  1305. case SPIDER_NET_GRFBFLLINT: /* fallthrough */
  1306. case SPIDER_NET_GRFAFLLINT: /* fallthrough */
  1307. case SPIDER_NET_GRMFLLINT:
  1308. /* Could happen when rx chain is full */
  1309. if (card->ignore_rx_ramfull == 0) {
  1310. card->ignore_rx_ramfull = 1;
  1311. spider_net_resync_head_ptr(card);
  1312. spider_net_refill_rx_chain(card);
  1313. spider_net_enable_rxdmac(card);
  1314. card->num_rx_ints ++;
  1315. netif_rx_schedule(card->netdev);
  1316. }
  1317. show_error = 0;
  1318. break;
  1319. /* case SPIDER_NET_GTMSHTINT: problem, print a message */
  1320. case SPIDER_NET_GDTINVDINT:
  1321. /* allrighty. tx from previous descr ok */
  1322. show_error = 0;
  1323. break;
  1324. /* chain end */
  1325. case SPIDER_NET_GDDDCEINT: /* fallthrough */
  1326. case SPIDER_NET_GDCDCEINT: /* fallthrough */
  1327. case SPIDER_NET_GDBDCEINT: /* fallthrough */
  1328. case SPIDER_NET_GDADCEINT:
  1329. spider_net_resync_head_ptr(card);
  1330. spider_net_refill_rx_chain(card);
  1331. spider_net_enable_rxdmac(card);
  1332. card->num_rx_ints ++;
  1333. netif_rx_schedule(card->netdev);
  1334. show_error = 0;
  1335. break;
  1336. /* invalid descriptor */
  1337. case SPIDER_NET_GDDINVDINT: /* fallthrough */
  1338. case SPIDER_NET_GDCINVDINT: /* fallthrough */
  1339. case SPIDER_NET_GDBINVDINT: /* fallthrough */
  1340. case SPIDER_NET_GDAINVDINT:
  1341. /* Could happen when rx chain is full */
  1342. spider_net_resync_head_ptr(card);
  1343. spider_net_refill_rx_chain(card);
  1344. spider_net_enable_rxdmac(card);
  1345. card->num_rx_ints ++;
  1346. netif_rx_schedule(card->netdev);
  1347. show_error = 0;
  1348. break;
  1349. /* case SPIDER_NET_GDTRSERINT: problem, print a message */
  1350. /* case SPIDER_NET_GDDRSERINT: problem, print a message */
  1351. /* case SPIDER_NET_GDCRSERINT: problem, print a message */
  1352. /* case SPIDER_NET_GDBRSERINT: problem, print a message */
  1353. /* case SPIDER_NET_GDARSERINT: problem, print a message */
  1354. /* case SPIDER_NET_GDSERINT: problem, print a message */
  1355. /* case SPIDER_NET_GDTPTERINT: problem, print a message */
  1356. /* case SPIDER_NET_GDDPTERINT: problem, print a message */
  1357. /* case SPIDER_NET_GDCPTERINT: problem, print a message */
  1358. /* case SPIDER_NET_GDBPTERINT: problem, print a message */
  1359. /* case SPIDER_NET_GDAPTERINT: problem, print a message */
  1360. default:
  1361. show_error = 1;
  1362. break;
  1363. }
  1364. /* check GHIINT2STS ************************************/
  1365. if (error_reg2)
  1366. for (i = 0; i < 32; i++)
  1367. if (error_reg2 & (1<<i))
  1368. switch (i)
  1369. {
  1370. /* there is nothing we can (want to) do at this time. Log a
  1371. * message, we can switch on and off the specific values later on
  1372. case SPIDER_NET_GPROPERINT:
  1373. case SPIDER_NET_GMCTCRSNGINT:
  1374. case SPIDER_NET_GMCTLCOLINT:
  1375. case SPIDER_NET_GMCTTMOTINT:
  1376. case SPIDER_NET_GMCRCAERINT:
  1377. case SPIDER_NET_GMCRCALERINT:
  1378. case SPIDER_NET_GMCRALNERINT:
  1379. case SPIDER_NET_GMCROVRINT:
  1380. case SPIDER_NET_GMCRRNTINT:
  1381. case SPIDER_NET_GMCRRXERINT:
  1382. case SPIDER_NET_GTITCSERINT:
  1383. case SPIDER_NET_GTIFMTERINT:
  1384. case SPIDER_NET_GTIPKTRVKINT:
  1385. case SPIDER_NET_GTISPINGINT:
  1386. case SPIDER_NET_GTISADNGINT:
  1387. case SPIDER_NET_GTISPDNGINT:
  1388. case SPIDER_NET_GRIFMTERINT:
  1389. case SPIDER_NET_GRIPKTRVKINT:
  1390. case SPIDER_NET_GRISPINGINT:
  1391. case SPIDER_NET_GRISADNGINT:
  1392. case SPIDER_NET_GRISPDNGINT:
  1393. break;
  1394. */
  1395. default:
  1396. break;
  1397. }
  1398. if ((show_error) && (netif_msg_intr(card)) && net_ratelimit())
  1399. pr_err("Got error interrupt on %s, GHIINT0STS = 0x%08x, "
  1400. "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
  1401. card->netdev->name,
  1402. status_reg, error_reg1, error_reg2);
  1403. /* clear interrupt sources */
  1404. spider_net_write_reg(card, SPIDER_NET_GHIINT1STS, error_reg1);
  1405. spider_net_write_reg(card, SPIDER_NET_GHIINT2STS, error_reg2);
  1406. }
  1407. /**
  1408. * spider_net_interrupt - interrupt handler for spider_net
  1409. * @irq: interupt number
  1410. * @ptr: pointer to net_device
  1411. * @regs: PU registers
  1412. *
  1413. * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no
  1414. * interrupt found raised by card.
  1415. *
  1416. * This is the interrupt handler, that turns off
  1417. * interrupts for this device and makes the stack poll the driver
  1418. */
  1419. static irqreturn_t
  1420. spider_net_interrupt(int irq, void *ptr)
  1421. {
  1422. struct net_device *netdev = ptr;
  1423. struct spider_net_card *card = netdev_priv(netdev);
  1424. u32 status_reg;
  1425. status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS);
  1426. if (!status_reg)
  1427. return IRQ_NONE;
  1428. if (status_reg & SPIDER_NET_RXINT ) {
  1429. spider_net_rx_irq_off(card);
  1430. netif_rx_schedule(netdev);
  1431. card->num_rx_ints ++;
  1432. }
  1433. if (status_reg & SPIDER_NET_TXINT)
  1434. netif_rx_schedule(netdev);
  1435. if (status_reg & SPIDER_NET_LINKINT)
  1436. spider_net_link_reset(netdev);
  1437. if (status_reg & SPIDER_NET_ERRINT )
  1438. spider_net_handle_error_irq(card, status_reg);
  1439. /* clear interrupt sources */
  1440. spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
  1441. return IRQ_HANDLED;
  1442. }
  1443. #ifdef CONFIG_NET_POLL_CONTROLLER
  1444. /**
  1445. * spider_net_poll_controller - artificial interrupt for netconsole etc.
  1446. * @netdev: interface device structure
  1447. *
  1448. * see Documentation/networking/netconsole.txt
  1449. */
  1450. static void
  1451. spider_net_poll_controller(struct net_device *netdev)
  1452. {
  1453. disable_irq(netdev->irq);
  1454. spider_net_interrupt(netdev->irq, netdev);
  1455. enable_irq(netdev->irq);
  1456. }
  1457. #endif /* CONFIG_NET_POLL_CONTROLLER */
  1458. /**
  1459. * spider_net_init_card - initializes the card
  1460. * @card: card structure
  1461. *
  1462. * spider_net_init_card initializes the card so that other registers can
  1463. * be used
  1464. */
  1465. static void
  1466. spider_net_init_card(struct spider_net_card *card)
  1467. {
  1468. spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
  1469. SPIDER_NET_CKRCTRL_STOP_VALUE);
  1470. spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
  1471. SPIDER_NET_CKRCTRL_RUN_VALUE);
  1472. /* trigger ETOMOD signal */
  1473. spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
  1474. spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4);
  1475. }
  1476. /**
  1477. * spider_net_enable_card - enables the card by setting all kinds of regs
  1478. * @card: card structure
  1479. *
  1480. * spider_net_enable_card sets a lot of SMMIO registers to enable the device
  1481. */
  1482. static void
  1483. spider_net_enable_card(struct spider_net_card *card)
  1484. {
  1485. int i;
  1486. /* the following array consists of (register),(value) pairs
  1487. * that are set in this function. A register of 0 ends the list */
  1488. u32 regs[][2] = {
  1489. { SPIDER_NET_GRESUMINTNUM, 0 },
  1490. { SPIDER_NET_GREINTNUM, 0 },
  1491. /* set interrupt frame number registers */
  1492. /* clear the single DMA engine registers first */
  1493. { SPIDER_NET_GFAFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
  1494. { SPIDER_NET_GFBFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
  1495. { SPIDER_NET_GFCFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
  1496. { SPIDER_NET_GFDFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
  1497. /* then set, what we really need */
  1498. { SPIDER_NET_GFFRMNUM, SPIDER_NET_FRAMENUM_VALUE },
  1499. /* timer counter registers and stuff */
  1500. { SPIDER_NET_GFREECNNUM, 0 },
  1501. { SPIDER_NET_GONETIMENUM, 0 },
  1502. { SPIDER_NET_GTOUTFRMNUM, 0 },
  1503. /* RX mode setting */
  1504. { SPIDER_NET_GRXMDSET, SPIDER_NET_RXMODE_VALUE },
  1505. /* TX mode setting */
  1506. { SPIDER_NET_GTXMDSET, SPIDER_NET_TXMODE_VALUE },
  1507. /* IPSEC mode setting */
  1508. { SPIDER_NET_GIPSECINIT, SPIDER_NET_IPSECINIT_VALUE },
  1509. { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
  1510. { SPIDER_NET_GMRWOLCTRL, 0 },
  1511. { SPIDER_NET_GTESTMD, 0x10000000 },
  1512. { SPIDER_NET_GTTQMSK, 0x00400040 },
  1513. { SPIDER_NET_GMACINTEN, 0 },
  1514. /* flow control stuff */
  1515. { SPIDER_NET_GMACAPAUSE, SPIDER_NET_MACAPAUSE_VALUE },
  1516. { SPIDER_NET_GMACTXPAUSE, SPIDER_NET_TXPAUSE_VALUE },
  1517. { SPIDER_NET_GMACBSTLMT, SPIDER_NET_BURSTLMT_VALUE },
  1518. { 0, 0}
  1519. };
  1520. i = 0;
  1521. while (regs[i][0]) {
  1522. spider_net_write_reg(card, regs[i][0], regs[i][1]);
  1523. i++;
  1524. }
  1525. /* clear unicast filter table entries 1 to 14 */
  1526. for (i = 1; i <= 14; i++) {
  1527. spider_net_write_reg(card,
  1528. SPIDER_NET_GMRUAFILnR + i * 8,
  1529. 0x00080000);
  1530. spider_net_write_reg(card,
  1531. SPIDER_NET_GMRUAFILnR + i * 8 + 4,
  1532. 0x00000000);
  1533. }
  1534. spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 0x08080000);
  1535. spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
  1536. /* set chain tail adress for RX chains and
  1537. * enable DMA */
  1538. spider_net_enable_rxchtails(card);
  1539. spider_net_enable_rxdmac(card);
  1540. spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
  1541. spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
  1542. SPIDER_NET_LENLMT_VALUE);
  1543. spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
  1544. SPIDER_NET_OPMODE_VALUE);
  1545. /* set interrupt mask registers */
  1546. spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK,
  1547. SPIDER_NET_INT0_MASK_VALUE);
  1548. spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK,
  1549. SPIDER_NET_INT1_MASK_VALUE);
  1550. spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
  1551. SPIDER_NET_INT2_MASK_VALUE);
  1552. spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
  1553. SPIDER_NET_GDTBSTA);
  1554. }
  1555. /**
  1556. * spider_net_download_firmware - loads firmware into the adapter
  1557. * @card: card structure
  1558. * @firmware_ptr: pointer to firmware data
  1559. *
  1560. * spider_net_download_firmware loads the firmware data into the
  1561. * adapter. It assumes the length etc. to be allright.
  1562. */
  1563. static int
  1564. spider_net_download_firmware(struct spider_net_card *card,
  1565. const void *firmware_ptr)
  1566. {
  1567. int sequencer, i;
  1568. const u32 *fw_ptr = firmware_ptr;
  1569. /* stop sequencers */
  1570. spider_net_write_reg(card, SPIDER_NET_GSINIT,
  1571. SPIDER_NET_STOP_SEQ_VALUE);
  1572. for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
  1573. sequencer++) {
  1574. spider_net_write_reg(card,
  1575. SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
  1576. for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
  1577. spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
  1578. sequencer * 8, *fw_ptr);
  1579. fw_ptr++;
  1580. }
  1581. }
  1582. if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
  1583. return -EIO;
  1584. spider_net_write_reg(card, SPIDER_NET_GSINIT,
  1585. SPIDER_NET_RUN_SEQ_VALUE);
  1586. return 0;
  1587. }
  1588. /**
  1589. * spider_net_init_firmware - reads in firmware parts
  1590. * @card: card structure
  1591. *
  1592. * Returns 0 on success, <0 on failure
  1593. *
  1594. * spider_net_init_firmware opens the sequencer firmware and does some basic
  1595. * checks. This function opens and releases the firmware structure. A call
  1596. * to download the firmware is performed before the release.
  1597. *
  1598. * Firmware format
  1599. * ===============
  1600. * spider_fw.bin is expected to be a file containing 6*1024*4 bytes, 4k being
  1601. * the program for each sequencer. Use the command
  1602. * tail -q -n +2 Seq_code1_0x088.txt Seq_code2_0x090.txt \
  1603. * Seq_code3_0x098.txt Seq_code4_0x0A0.txt Seq_code5_0x0A8.txt \
  1604. * Seq_code6_0x0B0.txt | xxd -r -p -c4 > spider_fw.bin
  1605. *
  1606. * to generate spider_fw.bin, if you have sequencer programs with something
  1607. * like the following contents for each sequencer:
  1608. * <ONE LINE COMMENT>
  1609. * <FIRST 4-BYTES-WORD FOR SEQUENCER>
  1610. * <SECOND 4-BYTES-WORD FOR SEQUENCER>
  1611. * ...
  1612. * <1024th 4-BYTES-WORD FOR SEQUENCER>
  1613. */
  1614. static int
  1615. spider_net_init_firmware(struct spider_net_card *card)
  1616. {
  1617. struct firmware *firmware = NULL;
  1618. struct device_node *dn;
  1619. const u8 *fw_prop = NULL;
  1620. int err = -ENOENT;
  1621. int fw_size;
  1622. if (request_firmware((const struct firmware **)&firmware,
  1623. SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
  1624. if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
  1625. netif_msg_probe(card) ) {
  1626. pr_err("Incorrect size of spidernet firmware in " \
  1627. "filesystem. Looking in host firmware...\n");
  1628. goto try_host_fw;
  1629. }
  1630. err = spider_net_download_firmware(card, firmware->data);
  1631. release_firmware(firmware);
  1632. if (err)
  1633. goto try_host_fw;
  1634. goto done;
  1635. }
  1636. try_host_fw:
  1637. dn = pci_device_to_OF_node(card->pdev);
  1638. if (!dn)
  1639. goto out_err;
  1640. fw_prop = of_get_property(dn, "firmware", &fw_size);
  1641. if (!fw_prop)
  1642. goto out_err;
  1643. if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
  1644. netif_msg_probe(card) ) {
  1645. pr_err("Incorrect size of spidernet firmware in " \
  1646. "host firmware\n");
  1647. goto done;
  1648. }
  1649. err = spider_net_download_firmware(card, fw_prop);
  1650. done:
  1651. return err;
  1652. out_err:
  1653. if (netif_msg_probe(card))
  1654. pr_err("Couldn't find spidernet firmware in filesystem " \
  1655. "or host firmware\n");
  1656. return err;
  1657. }
  1658. /**
  1659. * spider_net_open - called upon ifonfig up
  1660. * @netdev: interface device structure
  1661. *
  1662. * returns 0 on success, <0 on failure
  1663. *
  1664. * spider_net_open allocates all the descriptors and memory needed for
  1665. * operation, sets up multicast list and enables interrupts
  1666. */
  1667. int
  1668. spider_net_open(struct net_device *netdev)
  1669. {
  1670. struct spider_net_card *card = netdev_priv(netdev);
  1671. int result;
  1672. result = spider_net_init_firmware(card);
  1673. if (result)
  1674. goto init_firmware_failed;
  1675. /* start probing with copper */
  1676. spider_net_setup_aneg(card);
  1677. if (card->phy.def->phy_id)
  1678. mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
  1679. result = spider_net_init_chain(card, &card->tx_chain);
  1680. if (result)
  1681. goto alloc_tx_failed;
  1682. card->low_watermark = NULL;
  1683. result = spider_net_init_chain(card, &card->rx_chain);
  1684. if (result)
  1685. goto alloc_rx_failed;
  1686. /* Allocate rx skbs */
  1687. if (spider_net_alloc_rx_skbs(card))
  1688. goto alloc_skbs_failed;
  1689. spider_net_set_multi(netdev);
  1690. /* further enhancement: setup hw vlan, if needed */
  1691. result = -EBUSY;
  1692. if (request_irq(netdev->irq, spider_net_interrupt,
  1693. IRQF_SHARED, netdev->name, netdev))
  1694. goto register_int_failed;
  1695. spider_net_enable_card(card);
  1696. netif_start_queue(netdev);
  1697. netif_carrier_on(netdev);
  1698. netif_poll_enable(netdev);
  1699. return 0;
  1700. register_int_failed:
  1701. spider_net_free_rx_chain_contents(card);
  1702. alloc_skbs_failed:
  1703. spider_net_free_chain(card, &card->rx_chain);
  1704. alloc_rx_failed:
  1705. spider_net_free_chain(card, &card->tx_chain);
  1706. alloc_tx_failed:
  1707. del_timer_sync(&card->aneg_timer);
  1708. init_firmware_failed:
  1709. return result;
  1710. }
  1711. /**
  1712. * spider_net_link_phy
  1713. * @data: used for pointer to card structure
  1714. *
  1715. */
  1716. static void spider_net_link_phy(unsigned long data)
  1717. {
  1718. struct spider_net_card *card = (struct spider_net_card *)data;
  1719. struct mii_phy *phy = &card->phy;
  1720. /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
  1721. if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) {
  1722. pr_info("%s: link is down trying to bring it up\n", card->netdev->name);
  1723. switch (card->medium) {
  1724. case BCM54XX_COPPER:
  1725. /* enable fiber with autonegotiation first */
  1726. if (phy->def->ops->enable_fiber)
  1727. phy->def->ops->enable_fiber(phy, 1);
  1728. card->medium = BCM54XX_FIBER;
  1729. break;
  1730. case BCM54XX_FIBER:
  1731. /* fiber didn't come up, try to disable fiber autoneg */
  1732. if (phy->def->ops->enable_fiber)
  1733. phy->def->ops->enable_fiber(phy, 0);
  1734. card->medium = BCM54XX_UNKNOWN;
  1735. break;
  1736. case BCM54XX_UNKNOWN:
  1737. /* copper, fiber with and without failed,
  1738. * retry from beginning */
  1739. spider_net_setup_aneg(card);
  1740. card->medium = BCM54XX_COPPER;
  1741. break;
  1742. }
  1743. card->aneg_count = 0;
  1744. mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
  1745. return;
  1746. }
  1747. /* link still not up, try again later */
  1748. if (!(phy->def->ops->poll_link(phy))) {
  1749. card->aneg_count++;
  1750. mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
  1751. return;
  1752. }
  1753. /* link came up, get abilities */
  1754. phy->def->ops->read_link(phy);
  1755. spider_net_write_reg(card, SPIDER_NET_GMACST,
  1756. spider_net_read_reg(card, SPIDER_NET_GMACST));
  1757. spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0x4);
  1758. if (phy->speed == 1000)
  1759. spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0x00000001);
  1760. else
  1761. spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0);
  1762. card->aneg_count = 0;
  1763. pr_debug("Found %s with %i Mbps, %s-duplex %sautoneg.\n",
  1764. phy->def->name, phy->speed, phy->duplex==1 ? "Full" : "Half",
  1765. phy->autoneg==1 ? "" : "no ");
  1766. return;
  1767. }
  1768. /**
  1769. * spider_net_setup_phy - setup PHY
  1770. * @card: card structure
  1771. *
  1772. * returns 0 on success, <0 on failure
  1773. *
  1774. * spider_net_setup_phy is used as part of spider_net_probe.
  1775. **/
  1776. static int
  1777. spider_net_setup_phy(struct spider_net_card *card)
  1778. {
  1779. struct mii_phy *phy = &card->phy;
  1780. spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
  1781. SPIDER_NET_DMASEL_VALUE);
  1782. spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
  1783. SPIDER_NET_PHY_CTRL_VALUE);
  1784. phy->dev = card->netdev;
  1785. phy->mdio_read = spider_net_read_phy;
  1786. phy->mdio_write = spider_net_write_phy;
  1787. for (phy->mii_id = 1; phy->mii_id <= 31; phy->mii_id++) {
  1788. unsigned short id;
  1789. id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
  1790. if (id != 0x0000 && id != 0xffff) {
  1791. if (!mii_phy_probe(phy, phy->mii_id)) {
  1792. pr_info("Found %s.\n", phy->def->name);
  1793. break;
  1794. }
  1795. }
  1796. }
  1797. return 0;
  1798. }
  1799. /**
  1800. * spider_net_workaround_rxramfull - work around firmware bug
  1801. * @card: card structure
  1802. *
  1803. * no return value
  1804. **/
  1805. static void
  1806. spider_net_workaround_rxramfull(struct spider_net_card *card)
  1807. {
  1808. int i, sequencer = 0;
  1809. /* cancel reset */
  1810. spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
  1811. SPIDER_NET_CKRCTRL_RUN_VALUE);
  1812. /* empty sequencer data */
  1813. for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
  1814. sequencer++) {
  1815. spider_net_write_reg(card, SPIDER_NET_GSnPRGADR +
  1816. sequencer * 8, 0x0);
  1817. for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
  1818. spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
  1819. sequencer * 8, 0x0);
  1820. }
  1821. }
  1822. /* set sequencer operation */
  1823. spider_net_write_reg(card, SPIDER_NET_GSINIT, 0x000000fe);
  1824. /* reset */
  1825. spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
  1826. SPIDER_NET_CKRCTRL_STOP_VALUE);
  1827. }
  1828. /**
  1829. * spider_net_stop - called upon ifconfig down
  1830. * @netdev: interface device structure
  1831. *
  1832. * always returns 0
  1833. */
  1834. int
  1835. spider_net_stop(struct net_device *netdev)
  1836. {
  1837. struct spider_net_card *card = netdev_priv(netdev);
  1838. netif_poll_disable(netdev);
  1839. netif_carrier_off(netdev);
  1840. netif_stop_queue(netdev);
  1841. del_timer_sync(&card->tx_timer);
  1842. del_timer_sync(&card->aneg_timer);
  1843. /* disable/mask all interrupts */
  1844. spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
  1845. spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
  1846. spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
  1847. spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
  1848. free_irq(netdev->irq, netdev);
  1849. spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
  1850. SPIDER_NET_DMA_TX_FEND_VALUE);
  1851. /* turn off DMA, force end */
  1852. spider_net_disable_rxdmac(card);
  1853. /* release chains */
  1854. spider_net_release_tx_chain(card, 1);
  1855. spider_net_free_rx_chain_contents(card);
  1856. spider_net_free_chain(card, &card->tx_chain);
  1857. spider_net_free_chain(card, &card->rx_chain);
  1858. return 0;
  1859. }
  1860. /**
  1861. * spider_net_tx_timeout_task - task scheduled by the watchdog timeout
  1862. * function (to be called not under interrupt status)
  1863. * @data: data, is interface device structure
  1864. *
  1865. * called as task when tx hangs, resets interface (if interface is up)
  1866. */
  1867. static void
  1868. spider_net_tx_timeout_task(struct work_struct *work)
  1869. {
  1870. struct spider_net_card *card =
  1871. container_of(work, struct spider_net_card, tx_timeout_task);
  1872. struct net_device *netdev = card->netdev;
  1873. if (!(netdev->flags & IFF_UP))
  1874. goto out;
  1875. netif_device_detach(netdev);
  1876. spider_net_stop(netdev);
  1877. spider_net_workaround_rxramfull(card);
  1878. spider_net_init_card(card);
  1879. if (spider_net_setup_phy(card))
  1880. goto out;
  1881. spider_net_open(netdev);
  1882. spider_net_kick_tx_dma(card);
  1883. netif_device_attach(netdev);
  1884. out:
  1885. atomic_dec(&card->tx_timeout_task_counter);
  1886. }
  1887. /**
  1888. * spider_net_tx_timeout - called when the tx timeout watchdog kicks in.
  1889. * @netdev: interface device structure
  1890. *
  1891. * called, if tx hangs. Schedules a task that resets the interface
  1892. */
  1893. static void
  1894. spider_net_tx_timeout(struct net_device *netdev)
  1895. {
  1896. struct spider_net_card *card;
  1897. card = netdev_priv(netdev);
  1898. atomic_inc(&card->tx_timeout_task_counter);
  1899. if (netdev->flags & IFF_UP)
  1900. schedule_work(&card->tx_timeout_task);
  1901. else
  1902. atomic_dec(&card->tx_timeout_task_counter);
  1903. card->spider_stats.tx_timeouts++;
  1904. }
  1905. /**
  1906. * spider_net_setup_netdev_ops - initialization of net_device operations
  1907. * @netdev: net_device structure
  1908. *
  1909. * fills out function pointers in the net_device structure
  1910. */
  1911. static void
  1912. spider_net_setup_netdev_ops(struct net_device *netdev)
  1913. {
  1914. netdev->open = &spider_net_open;
  1915. netdev->stop = &spider_net_stop;
  1916. netdev->hard_start_xmit = &spider_net_xmit;
  1917. netdev->get_stats = &spider_net_get_stats;
  1918. netdev->set_multicast_list = &spider_net_set_multi;
  1919. netdev->set_mac_address = &spider_net_set_mac;
  1920. netdev->change_mtu = &spider_net_change_mtu;
  1921. netdev->do_ioctl = &spider_net_do_ioctl;
  1922. /* tx watchdog */
  1923. netdev->tx_timeout = &spider_net_tx_timeout;
  1924. netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT;
  1925. /* NAPI */
  1926. netdev->poll = &spider_net_poll;
  1927. netdev->weight = SPIDER_NET_NAPI_WEIGHT;
  1928. /* HW VLAN */
  1929. #ifdef CONFIG_NET_POLL_CONTROLLER
  1930. /* poll controller */
  1931. netdev->poll_controller = &spider_net_poll_controller;
  1932. #endif /* CONFIG_NET_POLL_CONTROLLER */
  1933. /* ethtool ops */
  1934. netdev->ethtool_ops = &spider_net_ethtool_ops;
  1935. }
  1936. /**
  1937. * spider_net_setup_netdev - initialization of net_device
  1938. * @card: card structure
  1939. *
  1940. * Returns 0 on success or <0 on failure
  1941. *
  1942. * spider_net_setup_netdev initializes the net_device structure
  1943. **/
  1944. static int
  1945. spider_net_setup_netdev(struct spider_net_card *card)
  1946. {
  1947. int result;
  1948. struct net_device *netdev = card->netdev;
  1949. struct device_node *dn;
  1950. struct sockaddr addr;
  1951. const u8 *mac;
  1952. SET_MODULE_OWNER(netdev);
  1953. SET_NETDEV_DEV(netdev, &card->pdev->dev);
  1954. pci_set_drvdata(card->pdev, netdev);
  1955. init_timer(&card->tx_timer);
  1956. card->tx_timer.function =
  1957. (void (*)(unsigned long)) spider_net_cleanup_tx_ring;
  1958. card->tx_timer.data = (unsigned long) card;
  1959. netdev->irq = card->pdev->irq;
  1960. card->aneg_count = 0;
  1961. init_timer(&card->aneg_timer);
  1962. card->aneg_timer.function = spider_net_link_phy;
  1963. card->aneg_timer.data = (unsigned long) card;
  1964. card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
  1965. spider_net_setup_netdev_ops(netdev);
  1966. netdev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX;
  1967. /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
  1968. * NETIF_F_HW_VLAN_FILTER */
  1969. netdev->irq = card->pdev->irq;
  1970. card->num_rx_ints = 0;
  1971. card->ignore_rx_ramfull = 0;
  1972. dn = pci_device_to_OF_node(card->pdev);
  1973. if (!dn)
  1974. return -EIO;
  1975. mac = of_get_property(dn, "local-mac-address", NULL);
  1976. if (!mac)
  1977. return -EIO;
  1978. memcpy(addr.sa_data, mac, ETH_ALEN);
  1979. result = spider_net_set_mac(netdev, &addr);
  1980. if ((result) && (netif_msg_probe(card)))
  1981. pr_err("Failed to set MAC address: %i\n", result);
  1982. result = register_netdev(netdev);
  1983. if (result) {
  1984. if (netif_msg_probe(card))
  1985. pr_err("Couldn't register net_device: %i\n",
  1986. result);
  1987. return result;
  1988. }
  1989. if (netif_msg_probe(card))
  1990. pr_info("Initialized device %s.\n", netdev->name);
  1991. return 0;
  1992. }
  1993. /**
  1994. * spider_net_alloc_card - allocates net_device and card structure
  1995. *
  1996. * returns the card structure or NULL in case of errors
  1997. *
  1998. * the card and net_device structures are linked to each other
  1999. */
  2000. static struct spider_net_card *
  2001. spider_net_alloc_card(void)
  2002. {
  2003. struct net_device *netdev;
  2004. struct spider_net_card *card;
  2005. size_t alloc_size;
  2006. alloc_size = sizeof(struct spider_net_card) +
  2007. (tx_descriptors + rx_descriptors) * sizeof(struct spider_net_descr);
  2008. netdev = alloc_etherdev(alloc_size);
  2009. if (!netdev)
  2010. return NULL;
  2011. card = netdev_priv(netdev);
  2012. card->netdev = netdev;
  2013. card->msg_enable = SPIDER_NET_DEFAULT_MSG;
  2014. INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
  2015. init_waitqueue_head(&card->waitq);
  2016. atomic_set(&card->tx_timeout_task_counter, 0);
  2017. card->rx_chain.num_desc = rx_descriptors;
  2018. card->rx_chain.ring = card->darray;
  2019. card->tx_chain.num_desc = tx_descriptors;
  2020. card->tx_chain.ring = card->darray + rx_descriptors;
  2021. return card;
  2022. }
  2023. /**
  2024. * spider_net_undo_pci_setup - releases PCI ressources
  2025. * @card: card structure
  2026. *
  2027. * spider_net_undo_pci_setup releases the mapped regions
  2028. */
  2029. static void
  2030. spider_net_undo_pci_setup(struct spider_net_card *card)
  2031. {
  2032. iounmap(card->regs);
  2033. pci_release_regions(card->pdev);
  2034. }
  2035. /**
  2036. * spider_net_setup_pci_dev - sets up the device in terms of PCI operations
  2037. * @card: card structure
  2038. * @pdev: PCI device
  2039. *
  2040. * Returns the card structure or NULL if any errors occur
  2041. *
  2042. * spider_net_setup_pci_dev initializes pdev and together with the
  2043. * functions called in spider_net_open configures the device so that
  2044. * data can be transferred over it
  2045. * The net_device structure is attached to the card structure, if the
  2046. * function returns without error.
  2047. **/
  2048. static struct spider_net_card *
  2049. spider_net_setup_pci_dev(struct pci_dev *pdev)
  2050. {
  2051. struct spider_net_card *card;
  2052. unsigned long mmio_start, mmio_len;
  2053. if (pci_enable_device(pdev)) {
  2054. pr_err("Couldn't enable PCI device\n");
  2055. return NULL;
  2056. }
  2057. if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
  2058. pr_err("Couldn't find proper PCI device base address.\n");
  2059. goto out_disable_dev;
  2060. }
  2061. if (pci_request_regions(pdev, spider_net_driver_name)) {
  2062. pr_err("Couldn't obtain PCI resources, aborting.\n");
  2063. goto out_disable_dev;
  2064. }
  2065. pci_set_master(pdev);
  2066. card = spider_net_alloc_card();
  2067. if (!card) {
  2068. pr_err("Couldn't allocate net_device structure, "
  2069. "aborting.\n");
  2070. goto out_release_regions;
  2071. }
  2072. card->pdev = pdev;
  2073. /* fetch base address and length of first resource */
  2074. mmio_start = pci_resource_start(pdev, 0);
  2075. mmio_len = pci_resource_len(pdev, 0);
  2076. card->netdev->mem_start = mmio_start;
  2077. card->netdev->mem_end = mmio_start + mmio_len;
  2078. card->regs = ioremap(mmio_start, mmio_len);
  2079. if (!card->regs) {
  2080. pr_err("Couldn't obtain PCI resources, aborting.\n");
  2081. goto out_release_regions;
  2082. }
  2083. return card;
  2084. out_release_regions:
  2085. pci_release_regions(pdev);
  2086. out_disable_dev:
  2087. pci_disable_device(pdev);
  2088. pci_set_drvdata(pdev, NULL);
  2089. return NULL;
  2090. }
  2091. /**
  2092. * spider_net_probe - initialization of a device
  2093. * @pdev: PCI device
  2094. * @ent: entry in the device id list
  2095. *
  2096. * Returns 0 on success, <0 on failure
  2097. *
  2098. * spider_net_probe initializes pdev and registers a net_device
  2099. * structure for it. After that, the device can be ifconfig'ed up
  2100. **/
  2101. static int __devinit
  2102. spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  2103. {
  2104. int err = -EIO;
  2105. struct spider_net_card *card;
  2106. card = spider_net_setup_pci_dev(pdev);
  2107. if (!card)
  2108. goto out;
  2109. spider_net_workaround_rxramfull(card);
  2110. spider_net_init_card(card);
  2111. err = spider_net_setup_phy(card);
  2112. if (err)
  2113. goto out_undo_pci;
  2114. err = spider_net_setup_netdev(card);
  2115. if (err)
  2116. goto out_undo_pci;
  2117. return 0;
  2118. out_undo_pci:
  2119. spider_net_undo_pci_setup(card);
  2120. free_netdev(card->netdev);
  2121. out:
  2122. return err;
  2123. }
  2124. /**
  2125. * spider_net_remove - removal of a device
  2126. * @pdev: PCI device
  2127. *
  2128. * Returns 0 on success, <0 on failure
  2129. *
  2130. * spider_net_remove is called to remove the device and unregisters the
  2131. * net_device
  2132. **/
  2133. static void __devexit
  2134. spider_net_remove(struct pci_dev *pdev)
  2135. {
  2136. struct net_device *netdev;
  2137. struct spider_net_card *card;
  2138. netdev = pci_get_drvdata(pdev);
  2139. card = netdev_priv(netdev);
  2140. wait_event(card->waitq,
  2141. atomic_read(&card->tx_timeout_task_counter) == 0);
  2142. unregister_netdev(netdev);
  2143. /* switch off card */
  2144. spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
  2145. SPIDER_NET_CKRCTRL_STOP_VALUE);
  2146. spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
  2147. SPIDER_NET_CKRCTRL_RUN_VALUE);
  2148. spider_net_undo_pci_setup(card);
  2149. free_netdev(netdev);
  2150. }
  2151. static struct pci_driver spider_net_driver = {
  2152. .name = spider_net_driver_name,
  2153. .id_table = spider_net_pci_tbl,
  2154. .probe = spider_net_probe,
  2155. .remove = __devexit_p(spider_net_remove)
  2156. };
  2157. /**
  2158. * spider_net_init - init function when the driver is loaded
  2159. *
  2160. * spider_net_init registers the device driver
  2161. */
  2162. static int __init spider_net_init(void)
  2163. {
  2164. printk(KERN_INFO "Spidernet version %s.\n", VERSION);
  2165. if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) {
  2166. rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN;
  2167. pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
  2168. }
  2169. if (rx_descriptors > SPIDER_NET_RX_DESCRIPTORS_MAX) {
  2170. rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MAX;
  2171. pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
  2172. }
  2173. if (tx_descriptors < SPIDER_NET_TX_DESCRIPTORS_MIN) {
  2174. tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MIN;
  2175. pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
  2176. }
  2177. if (tx_descriptors > SPIDER_NET_TX_DESCRIPTORS_MAX) {
  2178. tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MAX;
  2179. pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
  2180. }
  2181. return pci_register_driver(&spider_net_driver);
  2182. }
  2183. /**
  2184. * spider_net_cleanup - exit function when driver is unloaded
  2185. *
  2186. * spider_net_cleanup unregisters the device driver
  2187. */
  2188. static void __exit spider_net_cleanup(void)
  2189. {
  2190. pci_unregister_driver(&spider_net_driver);
  2191. }
  2192. module_init(spider_net_init);
  2193. module_exit(spider_net_cleanup);