spider_net.c 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278
  1. /*
  2. * Network device driver for Cell Processor-Based Blade
  3. *
  4. * (C) Copyright IBM Corp. 2005
  5. *
  6. * Authors : Utz Bacher <utz.bacher@de.ibm.com>
  7. * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2, or (at your option)
  12. * any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  22. */
  23. #include <linux/compiler.h>
  24. #include <linux/crc32.h>
  25. #include <linux/delay.h>
  26. #include <linux/etherdevice.h>
  27. #include <linux/ethtool.h>
  28. #include <linux/firmware.h>
  29. #include <linux/if_vlan.h>
  30. #include <linux/in.h>
  31. #include <linux/init.h>
  32. #include <linux/ioport.h>
  33. #include <linux/ip.h>
  34. #include <linux/kernel.h>
  35. #include <linux/mii.h>
  36. #include <linux/module.h>
  37. #include <linux/netdevice.h>
  38. #include <linux/device.h>
  39. #include <linux/pci.h>
  40. #include <linux/skbuff.h>
  41. #include <linux/slab.h>
  42. #include <linux/tcp.h>
  43. #include <linux/types.h>
  44. #include <linux/vmalloc.h>
  45. #include <linux/wait.h>
  46. #include <linux/workqueue.h>
  47. #include <asm/bitops.h>
  48. #include <asm/pci-bridge.h>
  49. #include <net/checksum.h>
  50. #include "spider_net.h"
  51. MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
  52. "<Jens.Osterkamp@de.ibm.com>");
  53. MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
  54. MODULE_LICENSE("GPL");
  55. MODULE_VERSION(VERSION);
  56. static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
  57. static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
  58. module_param(rx_descriptors, int, 0444);
  59. module_param(tx_descriptors, int, 0444);
  60. MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \
  61. "in rx chains");
  62. MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
  63. "in tx chain");
  64. char spider_net_driver_name[] = "spidernet";
  65. static struct pci_device_id spider_net_pci_tbl[] = {
  66. { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
  67. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  68. { 0, }
  69. };
  70. MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
  71. /**
  72. * spider_net_read_reg - reads an SMMIO register of a card
  73. * @card: device structure
  74. * @reg: register to read from
  75. *
  76. * returns the content of the specified SMMIO register.
  77. */
  78. static inline u32
  79. spider_net_read_reg(struct spider_net_card *card, u32 reg)
  80. {
  81. /* We use the powerpc specific variants instead of readl_be() because
  82. * we know spidernet is not a real PCI device and we can thus avoid the
  83. * performance hit caused by the PCI workarounds.
  84. */
  85. return in_be32(card->regs + reg);
  86. }
  87. /**
  88. * spider_net_write_reg - writes to an SMMIO register of a card
  89. * @card: device structure
  90. * @reg: register to write to
  91. * @value: value to write into the specified SMMIO register
  92. */
  93. static inline void
  94. spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
  95. {
  96. /* We use the powerpc specific variants instead of writel_be() because
  97. * we know spidernet is not a real PCI device and we can thus avoid the
  98. * performance hit caused by the PCI workarounds.
  99. */
  100. out_be32(card->regs + reg, value);
  101. }
  102. /** spider_net_write_phy - write to phy register
  103. * @netdev: adapter to be written to
  104. * @mii_id: id of MII
  105. * @reg: PHY register
  106. * @val: value to be written to phy register
  107. *
  108. * spider_net_write_phy_register writes to an arbitrary PHY
  109. * register via the spider GPCWOPCMD register. We assume the queue does
  110. * not run full (not more than 15 commands outstanding).
  111. **/
  112. static void
  113. spider_net_write_phy(struct net_device *netdev, int mii_id,
  114. int reg, int val)
  115. {
  116. struct spider_net_card *card = netdev_priv(netdev);
  117. u32 writevalue;
  118. writevalue = ((u32)mii_id << 21) |
  119. ((u32)reg << 16) | ((u32)val);
  120. spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
  121. }
  122. /** spider_net_read_phy - read from phy register
  123. * @netdev: network device to be read from
  124. * @mii_id: id of MII
  125. * @reg: PHY register
  126. *
  127. * Returns value read from PHY register
  128. *
  129. * spider_net_write_phy reads from an arbitrary PHY
  130. * register via the spider GPCROPCMD register
  131. **/
  132. static int
  133. spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
  134. {
  135. struct spider_net_card *card = netdev_priv(netdev);
  136. u32 readvalue;
  137. readvalue = ((u32)mii_id << 21) | ((u32)reg << 16);
  138. spider_net_write_reg(card, SPIDER_NET_GPCROPCMD, readvalue);
  139. /* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT
  140. * interrupt, as we poll for the completion of the read operation
  141. * in spider_net_read_phy. Should take about 50 us */
  142. do {
  143. readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD);
  144. } while (readvalue & SPIDER_NET_GPREXEC);
  145. readvalue &= SPIDER_NET_GPRDAT_MASK;
  146. return readvalue;
  147. }
  148. /**
  149. * spider_net_rx_irq_off - switch off rx irq on this spider card
  150. * @card: device structure
  151. *
  152. * switches off rx irq by masking them out in the GHIINTnMSK register
  153. */
  154. static void
  155. spider_net_rx_irq_off(struct spider_net_card *card)
  156. {
  157. u32 regvalue;
  158. regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
  159. spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
  160. }
  161. /**
  162. * spider_net_rx_irq_on - switch on rx irq on this spider card
  163. * @card: device structure
  164. *
  165. * switches on rx irq by enabling them in the GHIINTnMSK register
  166. */
  167. static void
  168. spider_net_rx_irq_on(struct spider_net_card *card)
  169. {
  170. u32 regvalue;
  171. regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
  172. spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
  173. }
  174. /**
  175. * spider_net_set_promisc - sets the unicast address or the promiscuous mode
  176. * @card: card structure
  177. *
  178. * spider_net_set_promisc sets the unicast destination address filter and
  179. * thus either allows for non-promisc mode or promisc mode
  180. */
  181. static void
  182. spider_net_set_promisc(struct spider_net_card *card)
  183. {
  184. u32 macu, macl;
  185. struct net_device *netdev = card->netdev;
  186. if (netdev->flags & IFF_PROMISC) {
  187. /* clear destination entry 0 */
  188. spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, 0);
  189. spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, 0);
  190. spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
  191. SPIDER_NET_PROMISC_VALUE);
  192. } else {
  193. macu = netdev->dev_addr[0];
  194. macu <<= 8;
  195. macu |= netdev->dev_addr[1];
  196. memcpy(&macl, &netdev->dev_addr[2], sizeof(macl));
  197. macu |= SPIDER_NET_UA_DESCR_VALUE;
  198. spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, macu);
  199. spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, macl);
  200. spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
  201. SPIDER_NET_NONPROMISC_VALUE);
  202. }
  203. }
  204. /**
  205. * spider_net_get_mac_address - read mac address from spider card
  206. * @card: device structure
  207. *
  208. * reads MAC address from GMACUNIMACU and GMACUNIMACL registers
  209. */
  210. static int
  211. spider_net_get_mac_address(struct net_device *netdev)
  212. {
  213. struct spider_net_card *card = netdev_priv(netdev);
  214. u32 macl, macu;
  215. macl = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACL);
  216. macu = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACU);
  217. netdev->dev_addr[0] = (macu >> 24) & 0xff;
  218. netdev->dev_addr[1] = (macu >> 16) & 0xff;
  219. netdev->dev_addr[2] = (macu >> 8) & 0xff;
  220. netdev->dev_addr[3] = macu & 0xff;
  221. netdev->dev_addr[4] = (macl >> 8) & 0xff;
  222. netdev->dev_addr[5] = macl & 0xff;
  223. if (!is_valid_ether_addr(&netdev->dev_addr[0]))
  224. return -EINVAL;
  225. return 0;
  226. }
  227. /**
  228. * spider_net_get_descr_status -- returns the status of a descriptor
  229. * @descr: descriptor to look at
  230. *
  231. * returns the status as in the dmac_cmd_status field of the descriptor
  232. */
  233. static inline int
  234. spider_net_get_descr_status(struct spider_net_descr *descr)
  235. {
  236. return descr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
  237. }
  238. /**
  239. * spider_net_free_chain - free descriptor chain
  240. * @card: card structure
  241. * @chain: address of chain
  242. *
  243. */
  244. static void
  245. spider_net_free_chain(struct spider_net_card *card,
  246. struct spider_net_descr_chain *chain)
  247. {
  248. struct spider_net_descr *descr;
  249. descr = chain->ring;
  250. do {
  251. descr->bus_addr = 0;
  252. descr->next_descr_addr = 0;
  253. descr = descr->next;
  254. } while (descr != chain->ring);
  255. dma_free_coherent(&card->pdev->dev, chain->num_desc,
  256. chain->ring, chain->dma_addr);
  257. }
  258. /**
  259. * spider_net_init_chain - alloc and link descriptor chain
  260. * @card: card structure
  261. * @chain: address of chain
  262. *
  263. * We manage a circular list that mirrors the hardware structure,
  264. * except that the hardware uses bus addresses.
  265. *
  266. * Returns 0 on success, <0 on failure
  267. */
  268. static int
  269. spider_net_init_chain(struct spider_net_card *card,
  270. struct spider_net_descr_chain *chain)
  271. {
  272. int i;
  273. struct spider_net_descr *descr;
  274. dma_addr_t buf;
  275. size_t alloc_size;
  276. alloc_size = chain->num_desc * sizeof (struct spider_net_descr);
  277. chain->ring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
  278. &chain->dma_addr, GFP_KERNEL);
  279. if (!chain->ring)
  280. return -ENOMEM;
  281. descr = chain->ring;
  282. memset(descr, 0, alloc_size);
  283. /* Set up the hardware pointers in each descriptor */
  284. buf = chain->dma_addr;
  285. for (i=0; i < chain->num_desc; i++, descr++) {
  286. descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
  287. descr->bus_addr = buf;
  288. descr->next_descr_addr = 0;
  289. descr->next = descr + 1;
  290. descr->prev = descr - 1;
  291. buf += sizeof(struct spider_net_descr);
  292. }
  293. /* do actual circular list */
  294. (descr-1)->next = chain->ring;
  295. chain->ring->prev = descr-1;
  296. spin_lock_init(&chain->lock);
  297. chain->head = chain->ring;
  298. chain->tail = chain->ring;
  299. return 0;
  300. }
  301. /**
  302. * spider_net_free_rx_chain_contents - frees descr contents in rx chain
  303. * @card: card structure
  304. *
  305. * returns 0 on success, <0 on failure
  306. */
  307. static void
  308. spider_net_free_rx_chain_contents(struct spider_net_card *card)
  309. {
  310. struct spider_net_descr *descr;
  311. descr = card->rx_chain.head;
  312. do {
  313. if (descr->skb) {
  314. dev_kfree_skb(descr->skb);
  315. pci_unmap_single(card->pdev, descr->buf_addr,
  316. SPIDER_NET_MAX_FRAME,
  317. PCI_DMA_BIDIRECTIONAL);
  318. }
  319. descr = descr->next;
  320. } while (descr != card->rx_chain.head);
  321. }
  322. /**
  323. * spider_net_prepare_rx_descr - Reinitialize RX descriptor
  324. * @card: card structure
  325. * @descr: descriptor to re-init
  326. *
  327. * Return 0 on succes, <0 on failure.
  328. *
  329. * Allocates a new rx skb, iommu-maps it and attaches it to the
  330. * descriptor. Mark the descriptor as activated, ready-to-use.
  331. */
  332. static int
  333. spider_net_prepare_rx_descr(struct spider_net_card *card,
  334. struct spider_net_descr *descr)
  335. {
  336. dma_addr_t buf;
  337. int offset;
  338. int bufsize;
  339. /* we need to round up the buffer size to a multiple of 128 */
  340. bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
  341. (~(SPIDER_NET_RXBUF_ALIGN - 1));
  342. /* and we need to have it 128 byte aligned, therefore we allocate a
  343. * bit more */
  344. /* allocate an skb */
  345. descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
  346. if (!descr->skb) {
  347. if (netif_msg_rx_err(card) && net_ratelimit())
  348. pr_err("Not enough memory to allocate rx buffer\n");
  349. card->spider_stats.alloc_rx_skb_error++;
  350. return -ENOMEM;
  351. }
  352. descr->buf_size = bufsize;
  353. descr->result_size = 0;
  354. descr->valid_size = 0;
  355. descr->data_status = 0;
  356. descr->data_error = 0;
  357. offset = ((unsigned long)descr->skb->data) &
  358. (SPIDER_NET_RXBUF_ALIGN - 1);
  359. if (offset)
  360. skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
  361. /* iommu-map the skb */
  362. buf = pci_map_single(card->pdev, descr->skb->data,
  363. SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
  364. descr->buf_addr = buf;
  365. if (pci_dma_mapping_error(buf)) {
  366. dev_kfree_skb_any(descr->skb);
  367. if (netif_msg_rx_err(card) && net_ratelimit())
  368. pr_err("Could not iommu-map rx buffer\n");
  369. card->spider_stats.rx_iommu_map_error++;
  370. descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
  371. } else {
  372. wmb();
  373. descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
  374. SPIDER_NET_DMAC_NOINTR_COMPLETE;
  375. }
  376. return 0;
  377. }
  378. /**
  379. * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
  380. * @card: card structure
  381. *
  382. * spider_net_enable_rxchtails sets the RX DMAC chain tail adresses in the
  383. * chip by writing to the appropriate register. DMA is enabled in
  384. * spider_net_enable_rxdmac.
  385. */
  386. static inline void
  387. spider_net_enable_rxchtails(struct spider_net_card *card)
  388. {
  389. /* assume chain is aligned correctly */
  390. spider_net_write_reg(card, SPIDER_NET_GDADCHA ,
  391. card->rx_chain.tail->bus_addr);
  392. }
  393. /**
  394. * spider_net_enable_rxdmac - enables a receive DMA controller
  395. * @card: card structure
  396. *
  397. * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
  398. * in the GDADMACCNTR register
  399. */
  400. static inline void
  401. spider_net_enable_rxdmac(struct spider_net_card *card)
  402. {
  403. wmb();
  404. spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
  405. SPIDER_NET_DMA_RX_VALUE);
  406. }
  407. /**
  408. * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
  409. * @card: card structure
  410. *
  411. * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
  412. */
  413. static void
  414. spider_net_refill_rx_chain(struct spider_net_card *card)
  415. {
  416. struct spider_net_descr_chain *chain = &card->rx_chain;
  417. unsigned long flags;
  418. /* one context doing the refill (and a second context seeing that
  419. * and omitting it) is ok. If called by NAPI, we'll be called again
  420. * as spider_net_decode_one_descr is called several times. If some
  421. * interrupt calls us, the NAPI is about to clean up anyway. */
  422. if (!spin_trylock_irqsave(&chain->lock, flags))
  423. return;
  424. while (spider_net_get_descr_status(chain->head) ==
  425. SPIDER_NET_DESCR_NOT_IN_USE) {
  426. if (spider_net_prepare_rx_descr(card, chain->head))
  427. break;
  428. chain->head = chain->head->next;
  429. }
  430. spin_unlock_irqrestore(&chain->lock, flags);
  431. }
  432. /**
  433. * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains
  434. * @card: card structure
  435. *
  436. * Returns 0 on success, <0 on failure.
  437. */
  438. static int
  439. spider_net_alloc_rx_skbs(struct spider_net_card *card)
  440. {
  441. int result;
  442. struct spider_net_descr_chain *chain;
  443. result = -ENOMEM;
  444. chain = &card->rx_chain;
  445. /* Put at least one buffer into the chain. if this fails,
  446. * we've got a problem. If not, spider_net_refill_rx_chain
  447. * will do the rest at the end of this function. */
  448. if (spider_net_prepare_rx_descr(card, chain->head))
  449. goto error;
  450. else
  451. chain->head = chain->head->next;
  452. /* This will allocate the rest of the rx buffers;
  453. * if not, it's business as usual later on. */
  454. spider_net_refill_rx_chain(card);
  455. spider_net_enable_rxdmac(card);
  456. return 0;
  457. error:
  458. spider_net_free_rx_chain_contents(card);
  459. return result;
  460. }
  461. /**
  462. * spider_net_get_multicast_hash - generates hash for multicast filter table
  463. * @addr: multicast address
  464. *
  465. * returns the hash value.
  466. *
  467. * spider_net_get_multicast_hash calculates a hash value for a given multicast
  468. * address, that is used to set the multicast filter tables
  469. */
  470. static u8
  471. spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
  472. {
  473. u32 crc;
  474. u8 hash;
  475. char addr_for_crc[ETH_ALEN] = { 0, };
  476. int i, bit;
  477. for (i = 0; i < ETH_ALEN * 8; i++) {
  478. bit = (addr[i / 8] >> (i % 8)) & 1;
  479. addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
  480. }
  481. crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
  482. hash = (crc >> 27);
  483. hash <<= 3;
  484. hash |= crc & 7;
  485. hash &= 0xff;
  486. return hash;
  487. }
  488. /**
  489. * spider_net_set_multi - sets multicast addresses and promisc flags
  490. * @netdev: interface device structure
  491. *
  492. * spider_net_set_multi configures multicast addresses as needed for the
  493. * netdev interface. It also sets up multicast, allmulti and promisc
  494. * flags appropriately
  495. */
  496. static void
  497. spider_net_set_multi(struct net_device *netdev)
  498. {
  499. struct dev_mc_list *mc;
  500. u8 hash;
  501. int i;
  502. u32 reg;
  503. struct spider_net_card *card = netdev_priv(netdev);
  504. unsigned long bitmask[SPIDER_NET_MULTICAST_HASHES / BITS_PER_LONG] =
  505. {0, };
  506. spider_net_set_promisc(card);
  507. if (netdev->flags & IFF_ALLMULTI) {
  508. for (i = 0; i < SPIDER_NET_MULTICAST_HASHES; i++) {
  509. set_bit(i, bitmask);
  510. }
  511. goto write_hash;
  512. }
  513. /* well, we know, what the broadcast hash value is: it's xfd
  514. hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
  515. set_bit(0xfd, bitmask);
  516. for (mc = netdev->mc_list; mc; mc = mc->next) {
  517. hash = spider_net_get_multicast_hash(netdev, mc->dmi_addr);
  518. set_bit(hash, bitmask);
  519. }
  520. write_hash:
  521. for (i = 0; i < SPIDER_NET_MULTICAST_HASHES / 4; i++) {
  522. reg = 0;
  523. if (test_bit(i * 4, bitmask))
  524. reg += 0x08;
  525. reg <<= 8;
  526. if (test_bit(i * 4 + 1, bitmask))
  527. reg += 0x08;
  528. reg <<= 8;
  529. if (test_bit(i * 4 + 2, bitmask))
  530. reg += 0x08;
  531. reg <<= 8;
  532. if (test_bit(i * 4 + 3, bitmask))
  533. reg += 0x08;
  534. spider_net_write_reg(card, SPIDER_NET_GMRMHFILnR + i * 4, reg);
  535. }
  536. }
  537. /**
  538. * spider_net_disable_rxdmac - disables the receive DMA controller
  539. * @card: card structure
  540. *
  541. * spider_net_disable_rxdmac terminates processing on the DMA controller by
  542. * turing off DMA and issueing a force end
  543. */
  544. static void
  545. spider_net_disable_rxdmac(struct spider_net_card *card)
  546. {
  547. spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
  548. SPIDER_NET_DMA_RX_FEND_VALUE);
  549. }
  550. /**
  551. * spider_net_prepare_tx_descr - fill tx descriptor with skb data
  552. * @card: card structure
  553. * @descr: descriptor structure to fill out
  554. * @skb: packet to use
  555. *
  556. * returns 0 on success, <0 on failure.
  557. *
  558. * fills out the descriptor structure with skb data and len. Copies data,
  559. * if needed (32bit DMA!)
  560. */
  561. static int
  562. spider_net_prepare_tx_descr(struct spider_net_card *card,
  563. struct sk_buff *skb)
  564. {
  565. struct spider_net_descr *descr;
  566. dma_addr_t buf;
  567. unsigned long flags;
  568. buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
  569. if (pci_dma_mapping_error(buf)) {
  570. if (netif_msg_tx_err(card) && net_ratelimit())
  571. pr_err("could not iommu-map packet (%p, %i). "
  572. "Dropping packet\n", skb->data, skb->len);
  573. card->spider_stats.tx_iommu_map_error++;
  574. return -ENOMEM;
  575. }
  576. spin_lock_irqsave(&card->tx_chain.lock, flags);
  577. descr = card->tx_chain.head;
  578. card->tx_chain.head = descr->next;
  579. descr->buf_addr = buf;
  580. descr->buf_size = skb->len;
  581. descr->next_descr_addr = 0;
  582. descr->skb = skb;
  583. descr->data_status = 0;
  584. descr->dmac_cmd_status =
  585. SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
  586. spin_unlock_irqrestore(&card->tx_chain.lock, flags);
  587. if (skb->protocol == htons(ETH_P_IP))
  588. switch (skb->nh.iph->protocol) {
  589. case IPPROTO_TCP:
  590. descr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
  591. break;
  592. case IPPROTO_UDP:
  593. descr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
  594. break;
  595. }
  596. /* Chain the bus address, so that the DMA engine finds this descr. */
  597. descr->prev->next_descr_addr = descr->bus_addr;
  598. card->netdev->trans_start = jiffies; /* set netdev watchdog timer */
  599. return 0;
  600. }
  601. static int
  602. spider_net_set_low_watermark(struct spider_net_card *card)
  603. {
  604. unsigned long flags;
  605. int status;
  606. int cnt=0;
  607. int i;
  608. struct spider_net_descr *descr = card->tx_chain.tail;
  609. /* Measure the length of the queue. Measurement does not
  610. * need to be precise -- does not need a lock. */
  611. while (descr != card->tx_chain.head) {
  612. status = descr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
  613. if (status == SPIDER_NET_DESCR_NOT_IN_USE)
  614. break;
  615. descr = descr->next;
  616. cnt++;
  617. }
  618. /* If TX queue is short, don't even bother with interrupts */
  619. if (cnt < card->tx_chain.num_desc/4)
  620. return cnt;
  621. /* Set low-watermark 3/4th's of the way into the queue. */
  622. descr = card->tx_chain.tail;
  623. cnt = (cnt*3)/4;
  624. for (i=0;i<cnt; i++)
  625. descr = descr->next;
  626. /* Set the new watermark, clear the old watermark */
  627. spin_lock_irqsave(&card->tx_chain.lock, flags);
  628. descr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
  629. if (card->low_watermark && card->low_watermark != descr)
  630. card->low_watermark->dmac_cmd_status =
  631. card->low_watermark->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
  632. card->low_watermark = descr;
  633. spin_unlock_irqrestore(&card->tx_chain.lock, flags);
  634. return cnt;
  635. }
  636. /**
  637. * spider_net_release_tx_chain - processes sent tx descriptors
  638. * @card: adapter structure
  639. * @brutal: if set, don't care about whether descriptor seems to be in use
  640. *
  641. * returns 0 if the tx ring is empty, otherwise 1.
  642. *
  643. * spider_net_release_tx_chain releases the tx descriptors that spider has
  644. * finished with (if non-brutal) or simply release tx descriptors (if brutal).
  645. * If some other context is calling this function, we return 1 so that we're
  646. * scheduled again (if we were scheduled) and will not loose initiative.
  647. */
  648. static int
  649. spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
  650. {
  651. struct spider_net_descr_chain *chain = &card->tx_chain;
  652. struct spider_net_descr *descr;
  653. struct sk_buff *skb;
  654. u32 buf_addr;
  655. unsigned long flags;
  656. int status;
  657. while (chain->tail != chain->head) {
  658. spin_lock_irqsave(&chain->lock, flags);
  659. descr = chain->tail;
  660. status = spider_net_get_descr_status(descr);
  661. switch (status) {
  662. case SPIDER_NET_DESCR_COMPLETE:
  663. card->netdev_stats.tx_packets++;
  664. card->netdev_stats.tx_bytes += descr->skb->len;
  665. break;
  666. case SPIDER_NET_DESCR_CARDOWNED:
  667. if (!brutal) {
  668. spin_unlock_irqrestore(&chain->lock, flags);
  669. return 1;
  670. }
  671. /* fallthrough, if we release the descriptors
  672. * brutally (then we don't care about
  673. * SPIDER_NET_DESCR_CARDOWNED) */
  674. case SPIDER_NET_DESCR_RESPONSE_ERROR:
  675. case SPIDER_NET_DESCR_PROTECTION_ERROR:
  676. case SPIDER_NET_DESCR_FORCE_END:
  677. if (netif_msg_tx_err(card))
  678. pr_err("%s: forcing end of tx descriptor "
  679. "with status x%02x\n",
  680. card->netdev->name, status);
  681. card->netdev_stats.tx_errors++;
  682. break;
  683. default:
  684. card->netdev_stats.tx_dropped++;
  685. if (!brutal) {
  686. spin_unlock_irqrestore(&chain->lock, flags);
  687. return 1;
  688. }
  689. }
  690. chain->tail = descr->next;
  691. descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
  692. skb = descr->skb;
  693. buf_addr = descr->buf_addr;
  694. spin_unlock_irqrestore(&chain->lock, flags);
  695. /* unmap the skb */
  696. if (skb) {
  697. pci_unmap_single(card->pdev, buf_addr, skb->len,
  698. PCI_DMA_TODEVICE);
  699. dev_kfree_skb(skb);
  700. }
  701. }
  702. return 0;
  703. }
  704. /**
  705. * spider_net_kick_tx_dma - enables TX DMA processing
  706. * @card: card structure
  707. * @descr: descriptor address to enable TX processing at
  708. *
  709. * This routine will start the transmit DMA running if
  710. * it is not already running. This routine ned only be
  711. * called when queueing a new packet to an empty tx queue.
  712. * Writes the current tx chain head as start address
  713. * of the tx descriptor chain and enables the transmission
  714. * DMA engine.
  715. */
  716. static inline void
  717. spider_net_kick_tx_dma(struct spider_net_card *card)
  718. {
  719. struct spider_net_descr *descr;
  720. if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &
  721. SPIDER_NET_TX_DMA_EN)
  722. goto out;
  723. descr = card->tx_chain.tail;
  724. for (;;) {
  725. if (spider_net_get_descr_status(descr) ==
  726. SPIDER_NET_DESCR_CARDOWNED) {
  727. spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
  728. descr->bus_addr);
  729. spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
  730. SPIDER_NET_DMA_TX_VALUE);
  731. break;
  732. }
  733. if (descr == card->tx_chain.head)
  734. break;
  735. descr = descr->next;
  736. }
  737. out:
  738. mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
  739. }
  740. /**
  741. * spider_net_xmit - transmits a frame over the device
  742. * @skb: packet to send out
  743. * @netdev: interface device structure
  744. *
  745. * returns 0 on success, !0 on failure
  746. */
  747. static int
  748. spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
  749. {
  750. int cnt;
  751. struct spider_net_card *card = netdev_priv(netdev);
  752. struct spider_net_descr_chain *chain = &card->tx_chain;
  753. spider_net_release_tx_chain(card, 0);
  754. if ((chain->head->next == chain->tail->prev) ||
  755. (spider_net_prepare_tx_descr(card, skb) != 0)) {
  756. card->netdev_stats.tx_dropped++;
  757. netif_stop_queue(netdev);
  758. return NETDEV_TX_BUSY;
  759. }
  760. cnt = spider_net_set_low_watermark(card);
  761. if (cnt < 5)
  762. spider_net_kick_tx_dma(card);
  763. return NETDEV_TX_OK;
  764. }
  765. /**
  766. * spider_net_cleanup_tx_ring - cleans up the TX ring
  767. * @card: card structure
  768. *
  769. * spider_net_cleanup_tx_ring is called by either the tx_timer
  770. * or from the NAPI polling routine.
  771. * This routine releases resources associted with transmitted
  772. * packets, including updating the queue tail pointer.
  773. */
  774. static void
  775. spider_net_cleanup_tx_ring(struct spider_net_card *card)
  776. {
  777. if ((spider_net_release_tx_chain(card, 0) != 0) &&
  778. (card->netdev->flags & IFF_UP)) {
  779. spider_net_kick_tx_dma(card);
  780. netif_wake_queue(card->netdev);
  781. }
  782. }
  783. /**
  784. * spider_net_do_ioctl - called for device ioctls
  785. * @netdev: interface device structure
  786. * @ifr: request parameter structure for ioctl
  787. * @cmd: command code for ioctl
  788. *
  789. * returns 0 on success, <0 on failure. Currently, we have no special ioctls.
  790. * -EOPNOTSUPP is returned, if an unknown ioctl was requested
  791. */
  792. static int
  793. spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  794. {
  795. switch (cmd) {
  796. default:
  797. return -EOPNOTSUPP;
  798. }
  799. }
  800. /**
  801. * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
  802. * @descr: descriptor to process
  803. * @card: card structure
  804. *
  805. * Fills out skb structure and passes the data to the stack.
  806. * The descriptor state is not changed.
  807. */
  808. static void
  809. spider_net_pass_skb_up(struct spider_net_descr *descr,
  810. struct spider_net_card *card)
  811. {
  812. struct sk_buff *skb;
  813. struct net_device *netdev;
  814. u32 data_status, data_error;
  815. data_status = descr->data_status;
  816. data_error = descr->data_error;
  817. netdev = card->netdev;
  818. skb = descr->skb;
  819. skb->dev = netdev;
  820. skb_put(skb, descr->valid_size);
  821. /* the card seems to add 2 bytes of junk in front
  822. * of the ethernet frame */
  823. #define SPIDER_MISALIGN 2
  824. skb_pull(skb, SPIDER_MISALIGN);
  825. skb->protocol = eth_type_trans(skb, netdev);
  826. /* checksum offload */
  827. if (card->options.rx_csum) {
  828. if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
  829. SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
  830. !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
  831. skb->ip_summed = CHECKSUM_UNNECESSARY;
  832. else
  833. skb->ip_summed = CHECKSUM_NONE;
  834. } else
  835. skb->ip_summed = CHECKSUM_NONE;
  836. if (data_status & SPIDER_NET_VLAN_PACKET) {
  837. /* further enhancements: HW-accel VLAN
  838. * vlan_hwaccel_receive_skb
  839. */
  840. }
  841. /* pass skb up to stack */
  842. netif_receive_skb(skb);
  843. /* update netdevice statistics */
  844. card->netdev_stats.rx_packets++;
  845. card->netdev_stats.rx_bytes += skb->len;
  846. }
  847. /**
  848. * spider_net_decode_one_descr - processes an rx descriptor
  849. * @card: card structure
  850. *
  851. * Returns 1 if a packet has been sent to the stack, otherwise 0
  852. *
  853. * Processes an rx descriptor by iommu-unmapping the data buffer and passing
  854. * the packet up to the stack. This function is called in softirq
  855. * context, e.g. either bottom half from interrupt or NAPI polling context
  856. */
  857. static int
  858. spider_net_decode_one_descr(struct spider_net_card *card)
  859. {
  860. struct spider_net_descr_chain *chain = &card->rx_chain;
  861. struct spider_net_descr *descr = chain->tail;
  862. int status;
  863. status = spider_net_get_descr_status(descr);
  864. /* Nothing in the descriptor, or ring must be empty */
  865. if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
  866. (status == SPIDER_NET_DESCR_NOT_IN_USE))
  867. return 0;
  868. /* descriptor definitively used -- move on tail */
  869. chain->tail = descr->next;
  870. /* unmap descriptor */
  871. pci_unmap_single(card->pdev, descr->buf_addr,
  872. SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
  873. if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
  874. (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
  875. (status == SPIDER_NET_DESCR_FORCE_END) ) {
  876. if (netif_msg_rx_err(card))
  877. pr_err("%s: dropping RX descriptor with state %d\n",
  878. card->netdev->name, status);
  879. card->netdev_stats.rx_dropped++;
  880. goto bad_desc;
  881. }
  882. if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
  883. (status != SPIDER_NET_DESCR_FRAME_END) ) {
  884. if (netif_msg_rx_err(card))
  885. pr_err("%s: RX descriptor with unkown state %d\n",
  886. card->netdev->name, status);
  887. card->spider_stats.rx_desc_unk_state++;
  888. goto bad_desc;
  889. }
  890. /* The cases we'll throw away the packet immediately */
  891. if (descr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
  892. if (netif_msg_rx_err(card))
  893. pr_err("%s: error in received descriptor found, "
  894. "data_status=x%08x, data_error=x%08x\n",
  895. card->netdev->name,
  896. descr->data_status, descr->data_error);
  897. goto bad_desc;
  898. }
  899. /* Ok, we've got a packet in descr */
  900. spider_net_pass_skb_up(descr, card);
  901. descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
  902. return 1;
  903. bad_desc:
  904. dev_kfree_skb_irq(descr->skb);
  905. descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
  906. return 0;
  907. }
  908. /**
  909. * spider_net_poll - NAPI poll function called by the stack to return packets
  910. * @netdev: interface device structure
  911. * @budget: number of packets we can pass to the stack at most
  912. *
  913. * returns 0 if no more packets available to the driver/stack. Returns 1,
  914. * if the quota is exceeded, but the driver has still packets.
  915. *
  916. * spider_net_poll returns all packets from the rx descriptors to the stack
  917. * (using netif_receive_skb). If all/enough packets are up, the driver
  918. * reenables interrupts and returns 0. If not, 1 is returned.
  919. */
  920. static int
  921. spider_net_poll(struct net_device *netdev, int *budget)
  922. {
  923. struct spider_net_card *card = netdev_priv(netdev);
  924. int packets_to_do, packets_done = 0;
  925. int no_more_packets = 0;
  926. spider_net_cleanup_tx_ring(card);
  927. packets_to_do = min(*budget, netdev->quota);
  928. while (packets_to_do) {
  929. if (spider_net_decode_one_descr(card)) {
  930. packets_done++;
  931. packets_to_do--;
  932. } else {
  933. /* no more packets for the stack */
  934. no_more_packets = 1;
  935. break;
  936. }
  937. }
  938. netdev->quota -= packets_done;
  939. *budget -= packets_done;
  940. spider_net_refill_rx_chain(card);
  941. spider_net_enable_rxdmac(card);
  942. /* if all packets are in the stack, enable interrupts and return 0 */
  943. /* if not, return 1 */
  944. if (no_more_packets) {
  945. netif_rx_complete(netdev);
  946. spider_net_rx_irq_on(card);
  947. return 0;
  948. }
  949. return 1;
  950. }
  951. /**
  952. * spider_net_vlan_rx_reg - initializes VLAN structures in the driver and card
  953. * @netdev: interface device structure
  954. * @grp: vlan_group structure that is registered (NULL on destroying interface)
  955. */
  956. static void
  957. spider_net_vlan_rx_reg(struct net_device *netdev, struct vlan_group *grp)
  958. {
  959. /* further enhancement... yet to do */
  960. return;
  961. }
  962. /**
  963. * spider_net_vlan_rx_add - adds VLAN id to the card filter
  964. * @netdev: interface device structure
  965. * @vid: VLAN id to add
  966. */
  967. static void
  968. spider_net_vlan_rx_add(struct net_device *netdev, uint16_t vid)
  969. {
  970. /* further enhancement... yet to do */
  971. /* add vid to card's VLAN filter table */
  972. return;
  973. }
  974. /**
  975. * spider_net_vlan_rx_kill - removes VLAN id to the card filter
  976. * @netdev: interface device structure
  977. * @vid: VLAN id to remove
  978. */
  979. static void
  980. spider_net_vlan_rx_kill(struct net_device *netdev, uint16_t vid)
  981. {
  982. /* further enhancement... yet to do */
  983. /* remove vid from card's VLAN filter table */
  984. }
  985. /**
  986. * spider_net_get_stats - get interface statistics
  987. * @netdev: interface device structure
  988. *
  989. * returns the interface statistics residing in the spider_net_card struct
  990. */
  991. static struct net_device_stats *
  992. spider_net_get_stats(struct net_device *netdev)
  993. {
  994. struct spider_net_card *card = netdev_priv(netdev);
  995. struct net_device_stats *stats = &card->netdev_stats;
  996. return stats;
  997. }
  998. /**
  999. * spider_net_change_mtu - changes the MTU of an interface
  1000. * @netdev: interface device structure
  1001. * @new_mtu: new MTU value
  1002. *
  1003. * returns 0 on success, <0 on failure
  1004. */
  1005. static int
  1006. spider_net_change_mtu(struct net_device *netdev, int new_mtu)
  1007. {
  1008. /* no need to re-alloc skbs or so -- the max mtu is about 2.3k
  1009. * and mtu is outbound only anyway */
  1010. if ( (new_mtu < SPIDER_NET_MIN_MTU ) ||
  1011. (new_mtu > SPIDER_NET_MAX_MTU) )
  1012. return -EINVAL;
  1013. netdev->mtu = new_mtu;
  1014. return 0;
  1015. }
  1016. /**
  1017. * spider_net_set_mac - sets the MAC of an interface
  1018. * @netdev: interface device structure
  1019. * @ptr: pointer to new MAC address
  1020. *
  1021. * Returns 0 on success, <0 on failure. Currently, we don't support this
  1022. * and will always return EOPNOTSUPP.
  1023. */
  1024. static int
  1025. spider_net_set_mac(struct net_device *netdev, void *p)
  1026. {
  1027. struct spider_net_card *card = netdev_priv(netdev);
  1028. u32 macl, macu, regvalue;
  1029. struct sockaddr *addr = p;
  1030. if (!is_valid_ether_addr(addr->sa_data))
  1031. return -EADDRNOTAVAIL;
  1032. /* switch off GMACTPE and GMACRPE */
  1033. regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
  1034. regvalue &= ~((1 << 5) | (1 << 6));
  1035. spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
  1036. /* write mac */
  1037. macu = (addr->sa_data[0]<<24) + (addr->sa_data[1]<<16) +
  1038. (addr->sa_data[2]<<8) + (addr->sa_data[3]);
  1039. macl = (addr->sa_data[4]<<8) + (addr->sa_data[5]);
  1040. spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu);
  1041. spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl);
  1042. /* switch GMACTPE and GMACRPE back on */
  1043. regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
  1044. regvalue |= ((1 << 5) | (1 << 6));
  1045. spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
  1046. spider_net_set_promisc(card);
  1047. /* look up, whether we have been successful */
  1048. if (spider_net_get_mac_address(netdev))
  1049. return -EADDRNOTAVAIL;
  1050. if (memcmp(netdev->dev_addr,addr->sa_data,netdev->addr_len))
  1051. return -EADDRNOTAVAIL;
  1052. return 0;
  1053. }
  1054. /**
  1055. * spider_net_handle_error_irq - handles errors raised by an interrupt
  1056. * @card: card structure
  1057. * @status_reg: interrupt status register 0 (GHIINT0STS)
  1058. *
  1059. * spider_net_handle_error_irq treats or ignores all error conditions
  1060. * found when an interrupt is presented
  1061. */
  1062. static void
  1063. spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
  1064. {
  1065. u32 error_reg1, error_reg2;
  1066. u32 i;
  1067. int show_error = 1;
  1068. error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS);
  1069. error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS);
  1070. /* check GHIINT0STS ************************************/
  1071. if (status_reg)
  1072. for (i = 0; i < 32; i++)
  1073. if (status_reg & (1<<i))
  1074. switch (i)
  1075. {
  1076. /* let error_reg1 and error_reg2 evaluation decide, what to do
  1077. case SPIDER_NET_PHYINT:
  1078. case SPIDER_NET_GMAC2INT:
  1079. case SPIDER_NET_GMAC1INT:
  1080. case SPIDER_NET_GFIFOINT:
  1081. case SPIDER_NET_DMACINT:
  1082. case SPIDER_NET_GSYSINT:
  1083. break; */
  1084. case SPIDER_NET_GIPSINT:
  1085. show_error = 0;
  1086. break;
  1087. case SPIDER_NET_GPWOPCMPINT:
  1088. /* PHY write operation completed */
  1089. show_error = 0;
  1090. break;
  1091. case SPIDER_NET_GPROPCMPINT:
  1092. /* PHY read operation completed */
  1093. /* we don't use semaphores, as we poll for the completion
  1094. * of the read operation in spider_net_read_phy. Should take
  1095. * about 50 us */
  1096. show_error = 0;
  1097. break;
  1098. case SPIDER_NET_GPWFFINT:
  1099. /* PHY command queue full */
  1100. if (netif_msg_intr(card))
  1101. pr_err("PHY write queue full\n");
  1102. show_error = 0;
  1103. break;
  1104. /* case SPIDER_NET_GRMDADRINT: not used. print a message */
  1105. /* case SPIDER_NET_GRMARPINT: not used. print a message */
  1106. /* case SPIDER_NET_GRMMPINT: not used. print a message */
  1107. case SPIDER_NET_GDTDEN0INT:
  1108. /* someone has set TX_DMA_EN to 0 */
  1109. show_error = 0;
  1110. break;
  1111. case SPIDER_NET_GDDDEN0INT: /* fallthrough */
  1112. case SPIDER_NET_GDCDEN0INT: /* fallthrough */
  1113. case SPIDER_NET_GDBDEN0INT: /* fallthrough */
  1114. case SPIDER_NET_GDADEN0INT:
  1115. /* someone has set RX_DMA_EN to 0 */
  1116. show_error = 0;
  1117. break;
  1118. /* RX interrupts */
  1119. case SPIDER_NET_GDDFDCINT:
  1120. case SPIDER_NET_GDCFDCINT:
  1121. case SPIDER_NET_GDBFDCINT:
  1122. case SPIDER_NET_GDAFDCINT:
  1123. /* case SPIDER_NET_GDNMINT: not used. print a message */
  1124. /* case SPIDER_NET_GCNMINT: not used. print a message */
  1125. /* case SPIDER_NET_GBNMINT: not used. print a message */
  1126. /* case SPIDER_NET_GANMINT: not used. print a message */
  1127. /* case SPIDER_NET_GRFNMINT: not used. print a message */
  1128. show_error = 0;
  1129. break;
  1130. /* TX interrupts */
  1131. case SPIDER_NET_GDTFDCINT:
  1132. show_error = 0;
  1133. break;
  1134. case SPIDER_NET_GTTEDINT:
  1135. show_error = 0;
  1136. break;
  1137. case SPIDER_NET_GDTDCEINT:
  1138. /* chain end. If a descriptor should be sent, kick off
  1139. * tx dma
  1140. if (card->tx_chain.tail != card->tx_chain.head)
  1141. spider_net_kick_tx_dma(card);
  1142. */
  1143. show_error = 0;
  1144. break;
  1145. /* case SPIDER_NET_G1TMCNTINT: not used. print a message */
  1146. /* case SPIDER_NET_GFREECNTINT: not used. print a message */
  1147. }
  1148. /* check GHIINT1STS ************************************/
  1149. if (error_reg1)
  1150. for (i = 0; i < 32; i++)
  1151. if (error_reg1 & (1<<i))
  1152. switch (i)
  1153. {
  1154. case SPIDER_NET_GTMFLLINT:
  1155. if (netif_msg_intr(card) && net_ratelimit())
  1156. pr_err("Spider TX RAM full\n");
  1157. show_error = 0;
  1158. break;
  1159. case SPIDER_NET_GRFDFLLINT: /* fallthrough */
  1160. case SPIDER_NET_GRFCFLLINT: /* fallthrough */
  1161. case SPIDER_NET_GRFBFLLINT: /* fallthrough */
  1162. case SPIDER_NET_GRFAFLLINT: /* fallthrough */
  1163. case SPIDER_NET_GRMFLLINT:
  1164. if (netif_msg_intr(card) && net_ratelimit())
  1165. pr_err("Spider RX RAM full, incoming packets "
  1166. "might be discarded!\n");
  1167. spider_net_rx_irq_off(card);
  1168. netif_rx_schedule(card->netdev);
  1169. show_error = 0;
  1170. break;
  1171. /* case SPIDER_NET_GTMSHTINT: problem, print a message */
  1172. case SPIDER_NET_GDTINVDINT:
  1173. /* allrighty. tx from previous descr ok */
  1174. show_error = 0;
  1175. break;
  1176. /* chain end */
  1177. case SPIDER_NET_GDDDCEINT: /* fallthrough */
  1178. case SPIDER_NET_GDCDCEINT: /* fallthrough */
  1179. case SPIDER_NET_GDBDCEINT: /* fallthrough */
  1180. case SPIDER_NET_GDADCEINT:
  1181. if (netif_msg_intr(card) && net_ratelimit())
  1182. pr_err("got descriptor chain end interrupt, "
  1183. "restarting DMAC %c.\n",
  1184. 'D'-(i-SPIDER_NET_GDDDCEINT)/3);
  1185. spider_net_refill_rx_chain(card);
  1186. spider_net_enable_rxdmac(card);
  1187. show_error = 0;
  1188. break;
  1189. /* invalid descriptor */
  1190. case SPIDER_NET_GDDINVDINT: /* fallthrough */
  1191. case SPIDER_NET_GDCINVDINT: /* fallthrough */
  1192. case SPIDER_NET_GDBINVDINT: /* fallthrough */
  1193. case SPIDER_NET_GDAINVDINT:
  1194. /* could happen when rx chain is full */
  1195. spider_net_refill_rx_chain(card);
  1196. spider_net_enable_rxdmac(card);
  1197. show_error = 0;
  1198. break;
  1199. /* case SPIDER_NET_GDTRSERINT: problem, print a message */
  1200. /* case SPIDER_NET_GDDRSERINT: problem, print a message */
  1201. /* case SPIDER_NET_GDCRSERINT: problem, print a message */
  1202. /* case SPIDER_NET_GDBRSERINT: problem, print a message */
  1203. /* case SPIDER_NET_GDARSERINT: problem, print a message */
  1204. /* case SPIDER_NET_GDSERINT: problem, print a message */
  1205. /* case SPIDER_NET_GDTPTERINT: problem, print a message */
  1206. /* case SPIDER_NET_GDDPTERINT: problem, print a message */
  1207. /* case SPIDER_NET_GDCPTERINT: problem, print a message */
  1208. /* case SPIDER_NET_GDBPTERINT: problem, print a message */
  1209. /* case SPIDER_NET_GDAPTERINT: problem, print a message */
  1210. default:
  1211. show_error = 1;
  1212. break;
  1213. }
  1214. /* check GHIINT2STS ************************************/
  1215. if (error_reg2)
  1216. for (i = 0; i < 32; i++)
  1217. if (error_reg2 & (1<<i))
  1218. switch (i)
  1219. {
  1220. /* there is nothing we can (want to) do at this time. Log a
  1221. * message, we can switch on and off the specific values later on
  1222. case SPIDER_NET_GPROPERINT:
  1223. case SPIDER_NET_GMCTCRSNGINT:
  1224. case SPIDER_NET_GMCTLCOLINT:
  1225. case SPIDER_NET_GMCTTMOTINT:
  1226. case SPIDER_NET_GMCRCAERINT:
  1227. case SPIDER_NET_GMCRCALERINT:
  1228. case SPIDER_NET_GMCRALNERINT:
  1229. case SPIDER_NET_GMCROVRINT:
  1230. case SPIDER_NET_GMCRRNTINT:
  1231. case SPIDER_NET_GMCRRXERINT:
  1232. case SPIDER_NET_GTITCSERINT:
  1233. case SPIDER_NET_GTIFMTERINT:
  1234. case SPIDER_NET_GTIPKTRVKINT:
  1235. case SPIDER_NET_GTISPINGINT:
  1236. case SPIDER_NET_GTISADNGINT:
  1237. case SPIDER_NET_GTISPDNGINT:
  1238. case SPIDER_NET_GRIFMTERINT:
  1239. case SPIDER_NET_GRIPKTRVKINT:
  1240. case SPIDER_NET_GRISPINGINT:
  1241. case SPIDER_NET_GRISADNGINT:
  1242. case SPIDER_NET_GRISPDNGINT:
  1243. break;
  1244. */
  1245. default:
  1246. break;
  1247. }
  1248. if ((show_error) && (netif_msg_intr(card)) && net_ratelimit())
  1249. pr_err("Got error interrupt on %s, GHIINT0STS = 0x%08x, "
  1250. "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
  1251. card->netdev->name,
  1252. status_reg, error_reg1, error_reg2);
  1253. /* clear interrupt sources */
  1254. spider_net_write_reg(card, SPIDER_NET_GHIINT1STS, error_reg1);
  1255. spider_net_write_reg(card, SPIDER_NET_GHIINT2STS, error_reg2);
  1256. }
  1257. /**
  1258. * spider_net_interrupt - interrupt handler for spider_net
  1259. * @irq: interupt number
  1260. * @ptr: pointer to net_device
  1261. * @regs: PU registers
  1262. *
  1263. * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no
  1264. * interrupt found raised by card.
  1265. *
  1266. * This is the interrupt handler, that turns off
  1267. * interrupts for this device and makes the stack poll the driver
  1268. */
  1269. static irqreturn_t
  1270. spider_net_interrupt(int irq, void *ptr)
  1271. {
  1272. struct net_device *netdev = ptr;
  1273. struct spider_net_card *card = netdev_priv(netdev);
  1274. u32 status_reg;
  1275. status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS);
  1276. if (!status_reg)
  1277. return IRQ_NONE;
  1278. if (status_reg & SPIDER_NET_RXINT ) {
  1279. spider_net_rx_irq_off(card);
  1280. netif_rx_schedule(netdev);
  1281. }
  1282. if (status_reg & SPIDER_NET_TXINT)
  1283. netif_rx_schedule(netdev);
  1284. if (status_reg & SPIDER_NET_ERRINT )
  1285. spider_net_handle_error_irq(card, status_reg);
  1286. /* clear interrupt sources */
  1287. spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
  1288. return IRQ_HANDLED;
  1289. }
  1290. #ifdef CONFIG_NET_POLL_CONTROLLER
  1291. /**
  1292. * spider_net_poll_controller - artificial interrupt for netconsole etc.
  1293. * @netdev: interface device structure
  1294. *
  1295. * see Documentation/networking/netconsole.txt
  1296. */
  1297. static void
  1298. spider_net_poll_controller(struct net_device *netdev)
  1299. {
  1300. disable_irq(netdev->irq);
  1301. spider_net_interrupt(netdev->irq, netdev);
  1302. enable_irq(netdev->irq);
  1303. }
  1304. #endif /* CONFIG_NET_POLL_CONTROLLER */
  1305. /**
  1306. * spider_net_init_card - initializes the card
  1307. * @card: card structure
  1308. *
  1309. * spider_net_init_card initializes the card so that other registers can
  1310. * be used
  1311. */
  1312. static void
  1313. spider_net_init_card(struct spider_net_card *card)
  1314. {
  1315. spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
  1316. SPIDER_NET_CKRCTRL_STOP_VALUE);
  1317. spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
  1318. SPIDER_NET_CKRCTRL_RUN_VALUE);
  1319. }
  1320. /**
  1321. * spider_net_enable_card - enables the card by setting all kinds of regs
  1322. * @card: card structure
  1323. *
  1324. * spider_net_enable_card sets a lot of SMMIO registers to enable the device
  1325. */
  1326. static void
  1327. spider_net_enable_card(struct spider_net_card *card)
  1328. {
  1329. int i;
  1330. /* the following array consists of (register),(value) pairs
  1331. * that are set in this function. A register of 0 ends the list */
  1332. u32 regs[][2] = {
  1333. { SPIDER_NET_GRESUMINTNUM, 0 },
  1334. { SPIDER_NET_GREINTNUM, 0 },
  1335. /* set interrupt frame number registers */
  1336. /* clear the single DMA engine registers first */
  1337. { SPIDER_NET_GFAFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
  1338. { SPIDER_NET_GFBFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
  1339. { SPIDER_NET_GFCFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
  1340. { SPIDER_NET_GFDFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
  1341. /* then set, what we really need */
  1342. { SPIDER_NET_GFFRMNUM, SPIDER_NET_FRAMENUM_VALUE },
  1343. /* timer counter registers and stuff */
  1344. { SPIDER_NET_GFREECNNUM, 0 },
  1345. { SPIDER_NET_GONETIMENUM, 0 },
  1346. { SPIDER_NET_GTOUTFRMNUM, 0 },
  1347. /* RX mode setting */
  1348. { SPIDER_NET_GRXMDSET, SPIDER_NET_RXMODE_VALUE },
  1349. /* TX mode setting */
  1350. { SPIDER_NET_GTXMDSET, SPIDER_NET_TXMODE_VALUE },
  1351. /* IPSEC mode setting */
  1352. { SPIDER_NET_GIPSECINIT, SPIDER_NET_IPSECINIT_VALUE },
  1353. { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
  1354. { SPIDER_NET_GMRWOLCTRL, 0 },
  1355. { SPIDER_NET_GTESTMD, 0x10000000 },
  1356. { SPIDER_NET_GTTQMSK, 0x00400040 },
  1357. { SPIDER_NET_GMACINTEN, 0 },
  1358. /* flow control stuff */
  1359. { SPIDER_NET_GMACAPAUSE, SPIDER_NET_MACAPAUSE_VALUE },
  1360. { SPIDER_NET_GMACTXPAUSE, SPIDER_NET_TXPAUSE_VALUE },
  1361. { SPIDER_NET_GMACBSTLMT, SPIDER_NET_BURSTLMT_VALUE },
  1362. { 0, 0}
  1363. };
  1364. i = 0;
  1365. while (regs[i][0]) {
  1366. spider_net_write_reg(card, regs[i][0], regs[i][1]);
  1367. i++;
  1368. }
  1369. /* clear unicast filter table entries 1 to 14 */
  1370. for (i = 1; i <= 14; i++) {
  1371. spider_net_write_reg(card,
  1372. SPIDER_NET_GMRUAFILnR + i * 8,
  1373. 0x00080000);
  1374. spider_net_write_reg(card,
  1375. SPIDER_NET_GMRUAFILnR + i * 8 + 4,
  1376. 0x00000000);
  1377. }
  1378. spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 0x08080000);
  1379. spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
  1380. /* set chain tail adress for RX chains and
  1381. * enable DMA */
  1382. spider_net_enable_rxchtails(card);
  1383. spider_net_enable_rxdmac(card);
  1384. spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
  1385. spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
  1386. SPIDER_NET_LENLMT_VALUE);
  1387. spider_net_write_reg(card, SPIDER_NET_GMACMODE,
  1388. SPIDER_NET_MACMODE_VALUE);
  1389. spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
  1390. SPIDER_NET_OPMODE_VALUE);
  1391. /* set interrupt mask registers */
  1392. spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK,
  1393. SPIDER_NET_INT0_MASK_VALUE);
  1394. spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK,
  1395. SPIDER_NET_INT1_MASK_VALUE);
  1396. spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
  1397. SPIDER_NET_INT2_MASK_VALUE);
  1398. spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
  1399. SPIDER_NET_GDTBSTA);
  1400. }
  1401. /**
  1402. * spider_net_open - called upon ifonfig up
  1403. * @netdev: interface device structure
  1404. *
  1405. * returns 0 on success, <0 on failure
  1406. *
  1407. * spider_net_open allocates all the descriptors and memory needed for
  1408. * operation, sets up multicast list and enables interrupts
  1409. */
  1410. int
  1411. spider_net_open(struct net_device *netdev)
  1412. {
  1413. struct spider_net_card *card = netdev_priv(netdev);
  1414. struct spider_net_descr *descr;
  1415. int result;
  1416. result = spider_net_init_chain(card, &card->tx_chain);
  1417. if (result)
  1418. goto alloc_tx_failed;
  1419. card->low_watermark = NULL;
  1420. result = spider_net_init_chain(card, &card->rx_chain);
  1421. if (result)
  1422. goto alloc_rx_failed;
  1423. /* Make a ring of of bus addresses */
  1424. descr = card->rx_chain.ring;
  1425. do {
  1426. descr->next_descr_addr = descr->next->bus_addr;
  1427. descr = descr->next;
  1428. } while (descr != card->rx_chain.ring);
  1429. /* Allocate rx skbs */
  1430. if (spider_net_alloc_rx_skbs(card))
  1431. goto alloc_skbs_failed;
  1432. spider_net_set_multi(netdev);
  1433. /* further enhancement: setup hw vlan, if needed */
  1434. result = -EBUSY;
  1435. if (request_irq(netdev->irq, spider_net_interrupt,
  1436. IRQF_SHARED, netdev->name, netdev))
  1437. goto register_int_failed;
  1438. spider_net_enable_card(card);
  1439. netif_start_queue(netdev);
  1440. netif_carrier_on(netdev);
  1441. netif_poll_enable(netdev);
  1442. return 0;
  1443. register_int_failed:
  1444. spider_net_free_rx_chain_contents(card);
  1445. alloc_skbs_failed:
  1446. spider_net_free_chain(card, &card->rx_chain);
  1447. alloc_rx_failed:
  1448. spider_net_free_chain(card, &card->tx_chain);
  1449. alloc_tx_failed:
  1450. return result;
  1451. }
  1452. /**
  1453. * spider_net_setup_phy - setup PHY
  1454. * @card: card structure
  1455. *
  1456. * returns 0 on success, <0 on failure
  1457. *
  1458. * spider_net_setup_phy is used as part of spider_net_probe. Sets
  1459. * the PHY to 1000 Mbps
  1460. **/
  1461. static int
  1462. spider_net_setup_phy(struct spider_net_card *card)
  1463. {
  1464. struct mii_phy *phy = &card->phy;
  1465. spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
  1466. SPIDER_NET_DMASEL_VALUE);
  1467. spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
  1468. SPIDER_NET_PHY_CTRL_VALUE);
  1469. phy->mii_id = 1;
  1470. phy->dev = card->netdev;
  1471. phy->mdio_read = spider_net_read_phy;
  1472. phy->mdio_write = spider_net_write_phy;
  1473. mii_phy_probe(phy, phy->mii_id);
  1474. if (phy->def->ops->setup_forced)
  1475. phy->def->ops->setup_forced(phy, SPEED_1000, DUPLEX_FULL);
  1476. phy->def->ops->enable_fiber(phy);
  1477. phy->def->ops->read_link(phy);
  1478. pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name,
  1479. phy->speed, phy->duplex==1 ? "Full" : "Half");
  1480. return 0;
  1481. }
  1482. /**
  1483. * spider_net_download_firmware - loads firmware into the adapter
  1484. * @card: card structure
  1485. * @firmware_ptr: pointer to firmware data
  1486. *
  1487. * spider_net_download_firmware loads the firmware data into the
  1488. * adapter. It assumes the length etc. to be allright.
  1489. */
  1490. static int
  1491. spider_net_download_firmware(struct spider_net_card *card,
  1492. const void *firmware_ptr)
  1493. {
  1494. int sequencer, i;
  1495. const u32 *fw_ptr = firmware_ptr;
  1496. /* stop sequencers */
  1497. spider_net_write_reg(card, SPIDER_NET_GSINIT,
  1498. SPIDER_NET_STOP_SEQ_VALUE);
  1499. for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
  1500. sequencer++) {
  1501. spider_net_write_reg(card,
  1502. SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
  1503. for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
  1504. spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
  1505. sequencer * 8, *fw_ptr);
  1506. fw_ptr++;
  1507. }
  1508. }
  1509. if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
  1510. return -EIO;
  1511. spider_net_write_reg(card, SPIDER_NET_GSINIT,
  1512. SPIDER_NET_RUN_SEQ_VALUE);
  1513. return 0;
  1514. }
  1515. /**
  1516. * spider_net_init_firmware - reads in firmware parts
  1517. * @card: card structure
  1518. *
  1519. * Returns 0 on success, <0 on failure
  1520. *
  1521. * spider_net_init_firmware opens the sequencer firmware and does some basic
  1522. * checks. This function opens and releases the firmware structure. A call
  1523. * to download the firmware is performed before the release.
  1524. *
  1525. * Firmware format
  1526. * ===============
  1527. * spider_fw.bin is expected to be a file containing 6*1024*4 bytes, 4k being
  1528. * the program for each sequencer. Use the command
  1529. * tail -q -n +2 Seq_code1_0x088.txt Seq_code2_0x090.txt \
  1530. * Seq_code3_0x098.txt Seq_code4_0x0A0.txt Seq_code5_0x0A8.txt \
  1531. * Seq_code6_0x0B0.txt | xxd -r -p -c4 > spider_fw.bin
  1532. *
  1533. * to generate spider_fw.bin, if you have sequencer programs with something
  1534. * like the following contents for each sequencer:
  1535. * <ONE LINE COMMENT>
  1536. * <FIRST 4-BYTES-WORD FOR SEQUENCER>
  1537. * <SECOND 4-BYTES-WORD FOR SEQUENCER>
  1538. * ...
  1539. * <1024th 4-BYTES-WORD FOR SEQUENCER>
  1540. */
  1541. static int
  1542. spider_net_init_firmware(struct spider_net_card *card)
  1543. {
  1544. struct firmware *firmware = NULL;
  1545. struct device_node *dn;
  1546. const u8 *fw_prop = NULL;
  1547. int err = -ENOENT;
  1548. int fw_size;
  1549. if (request_firmware((const struct firmware **)&firmware,
  1550. SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
  1551. if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
  1552. netif_msg_probe(card) ) {
  1553. pr_err("Incorrect size of spidernet firmware in " \
  1554. "filesystem. Looking in host firmware...\n");
  1555. goto try_host_fw;
  1556. }
  1557. err = spider_net_download_firmware(card, firmware->data);
  1558. release_firmware(firmware);
  1559. if (err)
  1560. goto try_host_fw;
  1561. goto done;
  1562. }
  1563. try_host_fw:
  1564. dn = pci_device_to_OF_node(card->pdev);
  1565. if (!dn)
  1566. goto out_err;
  1567. fw_prop = get_property(dn, "firmware", &fw_size);
  1568. if (!fw_prop)
  1569. goto out_err;
  1570. if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
  1571. netif_msg_probe(card) ) {
  1572. pr_err("Incorrect size of spidernet firmware in " \
  1573. "host firmware\n");
  1574. goto done;
  1575. }
  1576. err = spider_net_download_firmware(card, fw_prop);
  1577. done:
  1578. return err;
  1579. out_err:
  1580. if (netif_msg_probe(card))
  1581. pr_err("Couldn't find spidernet firmware in filesystem " \
  1582. "or host firmware\n");
  1583. return err;
  1584. }
  1585. /**
  1586. * spider_net_workaround_rxramfull - work around firmware bug
  1587. * @card: card structure
  1588. *
  1589. * no return value
  1590. **/
  1591. static void
  1592. spider_net_workaround_rxramfull(struct spider_net_card *card)
  1593. {
  1594. int i, sequencer = 0;
  1595. /* cancel reset */
  1596. spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
  1597. SPIDER_NET_CKRCTRL_RUN_VALUE);
  1598. /* empty sequencer data */
  1599. for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
  1600. sequencer++) {
  1601. spider_net_write_reg(card, SPIDER_NET_GSnPRGADR +
  1602. sequencer * 8, 0x0);
  1603. for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
  1604. spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
  1605. sequencer * 8, 0x0);
  1606. }
  1607. }
  1608. /* set sequencer operation */
  1609. spider_net_write_reg(card, SPIDER_NET_GSINIT, 0x000000fe);
  1610. /* reset */
  1611. spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
  1612. SPIDER_NET_CKRCTRL_STOP_VALUE);
  1613. }
  1614. /**
  1615. * spider_net_stop - called upon ifconfig down
  1616. * @netdev: interface device structure
  1617. *
  1618. * always returns 0
  1619. */
  1620. int
  1621. spider_net_stop(struct net_device *netdev)
  1622. {
  1623. struct spider_net_card *card = netdev_priv(netdev);
  1624. netif_poll_disable(netdev);
  1625. netif_carrier_off(netdev);
  1626. netif_stop_queue(netdev);
  1627. del_timer_sync(&card->tx_timer);
  1628. /* disable/mask all interrupts */
  1629. spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
  1630. spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
  1631. spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
  1632. /* free_irq(netdev->irq, netdev);*/
  1633. free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev);
  1634. spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
  1635. SPIDER_NET_DMA_TX_FEND_VALUE);
  1636. /* turn off DMA, force end */
  1637. spider_net_disable_rxdmac(card);
  1638. /* release chains */
  1639. spider_net_release_tx_chain(card, 1);
  1640. spider_net_free_rx_chain_contents(card);
  1641. spider_net_free_rx_chain_contents(card);
  1642. spider_net_free_chain(card, &card->tx_chain);
  1643. spider_net_free_chain(card, &card->rx_chain);
  1644. return 0;
  1645. }
  1646. /**
  1647. * spider_net_tx_timeout_task - task scheduled by the watchdog timeout
  1648. * function (to be called not under interrupt status)
  1649. * @data: data, is interface device structure
  1650. *
  1651. * called as task when tx hangs, resets interface (if interface is up)
  1652. */
  1653. static void
  1654. spider_net_tx_timeout_task(struct work_struct *work)
  1655. {
  1656. struct spider_net_card *card =
  1657. container_of(work, struct spider_net_card, tx_timeout_task);
  1658. struct net_device *netdev = card->netdev;
  1659. if (!(netdev->flags & IFF_UP))
  1660. goto out;
  1661. netif_device_detach(netdev);
  1662. spider_net_stop(netdev);
  1663. spider_net_workaround_rxramfull(card);
  1664. spider_net_init_card(card);
  1665. if (spider_net_setup_phy(card))
  1666. goto out;
  1667. if (spider_net_init_firmware(card))
  1668. goto out;
  1669. spider_net_open(netdev);
  1670. spider_net_kick_tx_dma(card);
  1671. netif_device_attach(netdev);
  1672. out:
  1673. atomic_dec(&card->tx_timeout_task_counter);
  1674. }
  1675. /**
  1676. * spider_net_tx_timeout - called when the tx timeout watchdog kicks in.
  1677. * @netdev: interface device structure
  1678. *
  1679. * called, if tx hangs. Schedules a task that resets the interface
  1680. */
  1681. static void
  1682. spider_net_tx_timeout(struct net_device *netdev)
  1683. {
  1684. struct spider_net_card *card;
  1685. card = netdev_priv(netdev);
  1686. atomic_inc(&card->tx_timeout_task_counter);
  1687. if (netdev->flags & IFF_UP)
  1688. schedule_work(&card->tx_timeout_task);
  1689. else
  1690. atomic_dec(&card->tx_timeout_task_counter);
  1691. card->spider_stats.tx_timeouts++;
  1692. }
  1693. /**
  1694. * spider_net_setup_netdev_ops - initialization of net_device operations
  1695. * @netdev: net_device structure
  1696. *
  1697. * fills out function pointers in the net_device structure
  1698. */
  1699. static void
  1700. spider_net_setup_netdev_ops(struct net_device *netdev)
  1701. {
  1702. netdev->open = &spider_net_open;
  1703. netdev->stop = &spider_net_stop;
  1704. netdev->hard_start_xmit = &spider_net_xmit;
  1705. netdev->get_stats = &spider_net_get_stats;
  1706. netdev->set_multicast_list = &spider_net_set_multi;
  1707. netdev->set_mac_address = &spider_net_set_mac;
  1708. netdev->change_mtu = &spider_net_change_mtu;
  1709. netdev->do_ioctl = &spider_net_do_ioctl;
  1710. /* tx watchdog */
  1711. netdev->tx_timeout = &spider_net_tx_timeout;
  1712. netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT;
  1713. /* NAPI */
  1714. netdev->poll = &spider_net_poll;
  1715. netdev->weight = SPIDER_NET_NAPI_WEIGHT;
  1716. /* HW VLAN */
  1717. netdev->vlan_rx_register = &spider_net_vlan_rx_reg;
  1718. netdev->vlan_rx_add_vid = &spider_net_vlan_rx_add;
  1719. netdev->vlan_rx_kill_vid = &spider_net_vlan_rx_kill;
  1720. #ifdef CONFIG_NET_POLL_CONTROLLER
  1721. /* poll controller */
  1722. netdev->poll_controller = &spider_net_poll_controller;
  1723. #endif /* CONFIG_NET_POLL_CONTROLLER */
  1724. /* ethtool ops */
  1725. netdev->ethtool_ops = &spider_net_ethtool_ops;
  1726. }
  1727. /**
  1728. * spider_net_setup_netdev - initialization of net_device
  1729. * @card: card structure
  1730. *
  1731. * Returns 0 on success or <0 on failure
  1732. *
  1733. * spider_net_setup_netdev initializes the net_device structure
  1734. **/
  1735. static int
  1736. spider_net_setup_netdev(struct spider_net_card *card)
  1737. {
  1738. int result;
  1739. struct net_device *netdev = card->netdev;
  1740. struct device_node *dn;
  1741. struct sockaddr addr;
  1742. const u8 *mac;
  1743. SET_MODULE_OWNER(netdev);
  1744. SET_NETDEV_DEV(netdev, &card->pdev->dev);
  1745. pci_set_drvdata(card->pdev, netdev);
  1746. init_timer(&card->tx_timer);
  1747. card->tx_timer.function =
  1748. (void (*)(unsigned long)) spider_net_cleanup_tx_ring;
  1749. card->tx_timer.data = (unsigned long) card;
  1750. netdev->irq = card->pdev->irq;
  1751. card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
  1752. card->tx_chain.num_desc = tx_descriptors;
  1753. card->rx_chain.num_desc = rx_descriptors;
  1754. spider_net_setup_netdev_ops(netdev);
  1755. netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX;
  1756. /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
  1757. * NETIF_F_HW_VLAN_FILTER */
  1758. netdev->irq = card->pdev->irq;
  1759. dn = pci_device_to_OF_node(card->pdev);
  1760. if (!dn)
  1761. return -EIO;
  1762. mac = get_property(dn, "local-mac-address", NULL);
  1763. if (!mac)
  1764. return -EIO;
  1765. memcpy(addr.sa_data, mac, ETH_ALEN);
  1766. result = spider_net_set_mac(netdev, &addr);
  1767. if ((result) && (netif_msg_probe(card)))
  1768. pr_err("Failed to set MAC address: %i\n", result);
  1769. result = register_netdev(netdev);
  1770. if (result) {
  1771. if (netif_msg_probe(card))
  1772. pr_err("Couldn't register net_device: %i\n",
  1773. result);
  1774. return result;
  1775. }
  1776. if (netif_msg_probe(card))
  1777. pr_info("Initialized device %s.\n", netdev->name);
  1778. return 0;
  1779. }
  1780. /**
  1781. * spider_net_alloc_card - allocates net_device and card structure
  1782. *
  1783. * returns the card structure or NULL in case of errors
  1784. *
  1785. * the card and net_device structures are linked to each other
  1786. */
  1787. static struct spider_net_card *
  1788. spider_net_alloc_card(void)
  1789. {
  1790. struct net_device *netdev;
  1791. struct spider_net_card *card;
  1792. netdev = alloc_etherdev(sizeof(struct spider_net_card));
  1793. if (!netdev)
  1794. return NULL;
  1795. card = netdev_priv(netdev);
  1796. card->netdev = netdev;
  1797. card->msg_enable = SPIDER_NET_DEFAULT_MSG;
  1798. INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
  1799. init_waitqueue_head(&card->waitq);
  1800. atomic_set(&card->tx_timeout_task_counter, 0);
  1801. return card;
  1802. }
  1803. /**
  1804. * spider_net_undo_pci_setup - releases PCI ressources
  1805. * @card: card structure
  1806. *
  1807. * spider_net_undo_pci_setup releases the mapped regions
  1808. */
  1809. static void
  1810. spider_net_undo_pci_setup(struct spider_net_card *card)
  1811. {
  1812. iounmap(card->regs);
  1813. pci_release_regions(card->pdev);
  1814. }
  1815. /**
  1816. * spider_net_setup_pci_dev - sets up the device in terms of PCI operations
  1817. * @card: card structure
  1818. * @pdev: PCI device
  1819. *
  1820. * Returns the card structure or NULL if any errors occur
  1821. *
  1822. * spider_net_setup_pci_dev initializes pdev and together with the
  1823. * functions called in spider_net_open configures the device so that
  1824. * data can be transferred over it
  1825. * The net_device structure is attached to the card structure, if the
  1826. * function returns without error.
  1827. **/
  1828. static struct spider_net_card *
  1829. spider_net_setup_pci_dev(struct pci_dev *pdev)
  1830. {
  1831. struct spider_net_card *card;
  1832. unsigned long mmio_start, mmio_len;
  1833. if (pci_enable_device(pdev)) {
  1834. pr_err("Couldn't enable PCI device\n");
  1835. return NULL;
  1836. }
  1837. if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
  1838. pr_err("Couldn't find proper PCI device base address.\n");
  1839. goto out_disable_dev;
  1840. }
  1841. if (pci_request_regions(pdev, spider_net_driver_name)) {
  1842. pr_err("Couldn't obtain PCI resources, aborting.\n");
  1843. goto out_disable_dev;
  1844. }
  1845. pci_set_master(pdev);
  1846. card = spider_net_alloc_card();
  1847. if (!card) {
  1848. pr_err("Couldn't allocate net_device structure, "
  1849. "aborting.\n");
  1850. goto out_release_regions;
  1851. }
  1852. card->pdev = pdev;
  1853. /* fetch base address and length of first resource */
  1854. mmio_start = pci_resource_start(pdev, 0);
  1855. mmio_len = pci_resource_len(pdev, 0);
  1856. card->netdev->mem_start = mmio_start;
  1857. card->netdev->mem_end = mmio_start + mmio_len;
  1858. card->regs = ioremap(mmio_start, mmio_len);
  1859. if (!card->regs) {
  1860. pr_err("Couldn't obtain PCI resources, aborting.\n");
  1861. goto out_release_regions;
  1862. }
  1863. return card;
  1864. out_release_regions:
  1865. pci_release_regions(pdev);
  1866. out_disable_dev:
  1867. pci_disable_device(pdev);
  1868. pci_set_drvdata(pdev, NULL);
  1869. return NULL;
  1870. }
  1871. /**
  1872. * spider_net_probe - initialization of a device
  1873. * @pdev: PCI device
  1874. * @ent: entry in the device id list
  1875. *
  1876. * Returns 0 on success, <0 on failure
  1877. *
  1878. * spider_net_probe initializes pdev and registers a net_device
  1879. * structure for it. After that, the device can be ifconfig'ed up
  1880. **/
  1881. static int __devinit
  1882. spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  1883. {
  1884. int err = -EIO;
  1885. struct spider_net_card *card;
  1886. card = spider_net_setup_pci_dev(pdev);
  1887. if (!card)
  1888. goto out;
  1889. spider_net_workaround_rxramfull(card);
  1890. spider_net_init_card(card);
  1891. err = spider_net_setup_phy(card);
  1892. if (err)
  1893. goto out_undo_pci;
  1894. err = spider_net_init_firmware(card);
  1895. if (err)
  1896. goto out_undo_pci;
  1897. err = spider_net_setup_netdev(card);
  1898. if (err)
  1899. goto out_undo_pci;
  1900. return 0;
  1901. out_undo_pci:
  1902. spider_net_undo_pci_setup(card);
  1903. free_netdev(card->netdev);
  1904. out:
  1905. return err;
  1906. }
  1907. /**
  1908. * spider_net_remove - removal of a device
  1909. * @pdev: PCI device
  1910. *
  1911. * Returns 0 on success, <0 on failure
  1912. *
  1913. * spider_net_remove is called to remove the device and unregisters the
  1914. * net_device
  1915. **/
  1916. static void __devexit
  1917. spider_net_remove(struct pci_dev *pdev)
  1918. {
  1919. struct net_device *netdev;
  1920. struct spider_net_card *card;
  1921. netdev = pci_get_drvdata(pdev);
  1922. card = netdev_priv(netdev);
  1923. wait_event(card->waitq,
  1924. atomic_read(&card->tx_timeout_task_counter) == 0);
  1925. unregister_netdev(netdev);
  1926. /* switch off card */
  1927. spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
  1928. SPIDER_NET_CKRCTRL_STOP_VALUE);
  1929. spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
  1930. SPIDER_NET_CKRCTRL_RUN_VALUE);
  1931. spider_net_undo_pci_setup(card);
  1932. free_netdev(netdev);
  1933. }
  1934. static struct pci_driver spider_net_driver = {
  1935. .name = spider_net_driver_name,
  1936. .id_table = spider_net_pci_tbl,
  1937. .probe = spider_net_probe,
  1938. .remove = __devexit_p(spider_net_remove)
  1939. };
  1940. /**
  1941. * spider_net_init - init function when the driver is loaded
  1942. *
  1943. * spider_net_init registers the device driver
  1944. */
  1945. static int __init spider_net_init(void)
  1946. {
  1947. printk(KERN_INFO "Spidernet version %s.\n", VERSION);
  1948. if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) {
  1949. rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN;
  1950. pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
  1951. }
  1952. if (rx_descriptors > SPIDER_NET_RX_DESCRIPTORS_MAX) {
  1953. rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MAX;
  1954. pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
  1955. }
  1956. if (tx_descriptors < SPIDER_NET_TX_DESCRIPTORS_MIN) {
  1957. tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MIN;
  1958. pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
  1959. }
  1960. if (tx_descriptors > SPIDER_NET_TX_DESCRIPTORS_MAX) {
  1961. tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MAX;
  1962. pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
  1963. }
  1964. return pci_register_driver(&spider_net_driver);
  1965. }
  1966. /**
  1967. * spider_net_cleanup - exit function when driver is unloaded
  1968. *
  1969. * spider_net_cleanup unregisters the device driver
  1970. */
  1971. static void __exit spider_net_cleanup(void)
  1972. {
  1973. pci_unregister_driver(&spider_net_driver);
  1974. }
  1975. module_init(spider_net_init);
  1976. module_exit(spider_net_cleanup);