rrunner.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737
  1. /*
  2. * rrunner.c: Linux driver for the Essential RoadRunner HIPPI board.
  3. *
  4. * Copyright (C) 1998-2002 by Jes Sorensen, <jes@wildopensource.com>.
  5. *
  6. * Thanks to Essential Communication for providing us with hardware
  7. * and very comprehensive documentation without which I would not have
  8. * been able to write this driver. A special thank you to John Gibbon
  9. * for sorting out the legal issues, with the NDA, allowing the code to
  10. * be released under the GPL.
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. *
  17. * Thanks to Jayaram Bhat from ODS/Essential for fixing some of the
  18. * stupid bugs in my code.
  19. *
  20. * Softnet support and various other patches from Val Henson of
  21. * ODS/Essential.
  22. *
  23. * PCI DMA mapping code partly based on work by Francois Romieu.
  24. */
  25. #define DEBUG 1
  26. #define RX_DMA_SKBUFF 1
  27. #define PKT_COPY_THRESHOLD 512
  28. #include <linux/module.h>
  29. #include <linux/types.h>
  30. #include <linux/errno.h>
  31. #include <linux/ioport.h>
  32. #include <linux/pci.h>
  33. #include <linux/kernel.h>
  34. #include <linux/netdevice.h>
  35. #include <linux/hippidevice.h>
  36. #include <linux/skbuff.h>
  37. #include <linux/init.h>
  38. #include <linux/delay.h>
  39. #include <linux/mm.h>
  40. #include <net/sock.h>
  41. #include <asm/system.h>
  42. #include <asm/cache.h>
  43. #include <asm/byteorder.h>
  44. #include <asm/io.h>
  45. #include <asm/irq.h>
  46. #include <asm/uaccess.h>
  47. #define rr_if_busy(dev) netif_queue_stopped(dev)
  48. #define rr_if_running(dev) netif_running(dev)
  49. #include "rrunner.h"
  50. #define RUN_AT(x) (jiffies + (x))
  51. MODULE_AUTHOR("Jes Sorensen <jes@wildopensource.com>");
  52. MODULE_DESCRIPTION("Essential RoadRunner HIPPI driver");
  53. MODULE_LICENSE("GPL");
  54. static char version[] __devinitdata = "rrunner.c: v0.50 11/11/2002 Jes Sorensen (jes@wildopensource.com)\n";
  55. /*
  56. * Implementation notes:
  57. *
  58. * The DMA engine only allows for DMA within physical 64KB chunks of
  59. * memory. The current approach of the driver (and stack) is to use
  60. * linear blocks of memory for the skbuffs. However, as the data block
  61. * is always the first part of the skb and skbs are 2^n aligned so we
  62. * are guarantted to get the whole block within one 64KB align 64KB
  63. * chunk.
  64. *
  65. * On the long term, relying on being able to allocate 64KB linear
  66. * chunks of memory is not feasible and the skb handling code and the
  67. * stack will need to know about I/O vectors or something similar.
  68. */
  69. /*
  70. * These are checked at init time to see if they are at least 256KB
  71. * and increased to 256KB if they are not. This is done to avoid ending
  72. * up with socket buffers smaller than the MTU size,
  73. */
  74. extern __u32 sysctl_wmem_max;
  75. extern __u32 sysctl_rmem_max;
  76. static int __devinit rr_init_one(struct pci_dev *pdev,
  77. const struct pci_device_id *ent)
  78. {
  79. struct net_device *dev;
  80. static int version_disp;
  81. u8 pci_latency;
  82. struct rr_private *rrpriv;
  83. void *tmpptr;
  84. dma_addr_t ring_dma;
  85. int ret = -ENOMEM;
  86. dev = alloc_hippi_dev(sizeof(struct rr_private));
  87. if (!dev)
  88. goto out3;
  89. ret = pci_enable_device(pdev);
  90. if (ret) {
  91. ret = -ENODEV;
  92. goto out2;
  93. }
  94. rrpriv = netdev_priv(dev);
  95. SET_NETDEV_DEV(dev, &pdev->dev);
  96. if (pci_request_regions(pdev, "rrunner")) {
  97. ret = -EIO;
  98. goto out;
  99. }
  100. pci_set_drvdata(pdev, dev);
  101. rrpriv->pci_dev = pdev;
  102. spin_lock_init(&rrpriv->lock);
  103. dev->irq = pdev->irq;
  104. dev->open = &rr_open;
  105. dev->hard_start_xmit = &rr_start_xmit;
  106. dev->stop = &rr_close;
  107. dev->do_ioctl = &rr_ioctl;
  108. dev->base_addr = pci_resource_start(pdev, 0);
  109. /* display version info if adapter is found */
  110. if (!version_disp) {
  111. /* set display flag to TRUE so that */
  112. /* we only display this string ONCE */
  113. version_disp = 1;
  114. printk(version);
  115. }
  116. pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
  117. if (pci_latency <= 0x58){
  118. pci_latency = 0x58;
  119. pci_write_config_byte(pdev, PCI_LATENCY_TIMER, pci_latency);
  120. }
  121. pci_set_master(pdev);
  122. printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI "
  123. "at 0x%08lx, irq %i, PCI latency %i\n", dev->name,
  124. dev->base_addr, dev->irq, pci_latency);
  125. /*
  126. * Remap the regs into kernel space.
  127. */
  128. rrpriv->regs = ioremap(dev->base_addr, 0x1000);
  129. if (!rrpriv->regs){
  130. printk(KERN_ERR "%s: Unable to map I/O register, "
  131. "RoadRunner will be disabled.\n", dev->name);
  132. ret = -EIO;
  133. goto out;
  134. }
  135. tmpptr = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
  136. rrpriv->tx_ring = tmpptr;
  137. rrpriv->tx_ring_dma = ring_dma;
  138. if (!tmpptr) {
  139. ret = -ENOMEM;
  140. goto out;
  141. }
  142. tmpptr = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
  143. rrpriv->rx_ring = tmpptr;
  144. rrpriv->rx_ring_dma = ring_dma;
  145. if (!tmpptr) {
  146. ret = -ENOMEM;
  147. goto out;
  148. }
  149. tmpptr = pci_alloc_consistent(pdev, EVT_RING_SIZE, &ring_dma);
  150. rrpriv->evt_ring = tmpptr;
  151. rrpriv->evt_ring_dma = ring_dma;
  152. if (!tmpptr) {
  153. ret = -ENOMEM;
  154. goto out;
  155. }
  156. /*
  157. * Don't access any register before this point!
  158. */
  159. #ifdef __BIG_ENDIAN
  160. writel(readl(&rrpriv->regs->HostCtrl) | NO_SWAP,
  161. &rrpriv->regs->HostCtrl);
  162. #endif
  163. /*
  164. * Need to add a case for little-endian 64-bit hosts here.
  165. */
  166. rr_init(dev);
  167. dev->base_addr = 0;
  168. ret = register_netdev(dev);
  169. if (ret)
  170. goto out;
  171. return 0;
  172. out:
  173. if (rrpriv->rx_ring)
  174. pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring,
  175. rrpriv->rx_ring_dma);
  176. if (rrpriv->tx_ring)
  177. pci_free_consistent(pdev, TX_TOTAL_SIZE, rrpriv->tx_ring,
  178. rrpriv->tx_ring_dma);
  179. if (rrpriv->regs)
  180. iounmap(rrpriv->regs);
  181. if (pdev) {
  182. pci_release_regions(pdev);
  183. pci_set_drvdata(pdev, NULL);
  184. }
  185. out2:
  186. free_netdev(dev);
  187. out3:
  188. return ret;
  189. }
  190. static void __devexit rr_remove_one (struct pci_dev *pdev)
  191. {
  192. struct net_device *dev = pci_get_drvdata(pdev);
  193. if (dev) {
  194. struct rr_private *rr = netdev_priv(dev);
  195. if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)){
  196. printk(KERN_ERR "%s: trying to unload running NIC\n",
  197. dev->name);
  198. writel(HALT_NIC, &rr->regs->HostCtrl);
  199. }
  200. pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring,
  201. rr->evt_ring_dma);
  202. pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring,
  203. rr->rx_ring_dma);
  204. pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring,
  205. rr->tx_ring_dma);
  206. unregister_netdev(dev);
  207. iounmap(rr->regs);
  208. free_netdev(dev);
  209. pci_release_regions(pdev);
  210. pci_disable_device(pdev);
  211. pci_set_drvdata(pdev, NULL);
  212. }
  213. }
  214. /*
  215. * Commands are considered to be slow, thus there is no reason to
  216. * inline this.
  217. */
  218. static void rr_issue_cmd(struct rr_private *rrpriv, struct cmd *cmd)
  219. {
  220. struct rr_regs __iomem *regs;
  221. u32 idx;
  222. regs = rrpriv->regs;
  223. /*
  224. * This is temporary - it will go away in the final version.
  225. * We probably also want to make this function inline.
  226. */
  227. if (readl(&regs->HostCtrl) & NIC_HALTED){
  228. printk("issuing command for halted NIC, code 0x%x, "
  229. "HostCtrl %08x\n", cmd->code, readl(&regs->HostCtrl));
  230. if (readl(&regs->Mode) & FATAL_ERR)
  231. printk("error codes Fail1 %02x, Fail2 %02x\n",
  232. readl(&regs->Fail1), readl(&regs->Fail2));
  233. }
  234. idx = rrpriv->info->cmd_ctrl.pi;
  235. writel(*(u32*)(cmd), &regs->CmdRing[idx]);
  236. wmb();
  237. idx = (idx - 1) % CMD_RING_ENTRIES;
  238. rrpriv->info->cmd_ctrl.pi = idx;
  239. wmb();
  240. if (readl(&regs->Mode) & FATAL_ERR)
  241. printk("error code %02x\n", readl(&regs->Fail1));
  242. }
  243. /*
  244. * Reset the board in a sensible manner. The NIC is already halted
  245. * when we get here and a spin-lock is held.
  246. */
  247. static int rr_reset(struct net_device *dev)
  248. {
  249. struct rr_private *rrpriv;
  250. struct rr_regs __iomem *regs;
  251. struct eeprom *hw = NULL;
  252. u32 start_pc;
  253. int i;
  254. rrpriv = netdev_priv(dev);
  255. regs = rrpriv->regs;
  256. rr_load_firmware(dev);
  257. writel(0x01000000, &regs->TX_state);
  258. writel(0xff800000, &regs->RX_state);
  259. writel(0, &regs->AssistState);
  260. writel(CLEAR_INTA, &regs->LocalCtrl);
  261. writel(0x01, &regs->BrkPt);
  262. writel(0, &regs->Timer);
  263. writel(0, &regs->TimerRef);
  264. writel(RESET_DMA, &regs->DmaReadState);
  265. writel(RESET_DMA, &regs->DmaWriteState);
  266. writel(0, &regs->DmaWriteHostHi);
  267. writel(0, &regs->DmaWriteHostLo);
  268. writel(0, &regs->DmaReadHostHi);
  269. writel(0, &regs->DmaReadHostLo);
  270. writel(0, &regs->DmaReadLen);
  271. writel(0, &regs->DmaWriteLen);
  272. writel(0, &regs->DmaWriteLcl);
  273. writel(0, &regs->DmaWriteIPchecksum);
  274. writel(0, &regs->DmaReadLcl);
  275. writel(0, &regs->DmaReadIPchecksum);
  276. writel(0, &regs->PciState);
  277. #if (BITS_PER_LONG == 64) && defined __LITTLE_ENDIAN
  278. writel(SWAP_DATA | PTR64BIT | PTR_WD_SWAP, &regs->Mode);
  279. #elif (BITS_PER_LONG == 64)
  280. writel(SWAP_DATA | PTR64BIT | PTR_WD_NOSWAP, &regs->Mode);
  281. #else
  282. writel(SWAP_DATA | PTR32BIT | PTR_WD_NOSWAP, &regs->Mode);
  283. #endif
  284. #if 0
  285. /*
  286. * Don't worry, this is just black magic.
  287. */
  288. writel(0xdf000, &regs->RxBase);
  289. writel(0xdf000, &regs->RxPrd);
  290. writel(0xdf000, &regs->RxCon);
  291. writel(0xce000, &regs->TxBase);
  292. writel(0xce000, &regs->TxPrd);
  293. writel(0xce000, &regs->TxCon);
  294. writel(0, &regs->RxIndPro);
  295. writel(0, &regs->RxIndCon);
  296. writel(0, &regs->RxIndRef);
  297. writel(0, &regs->TxIndPro);
  298. writel(0, &regs->TxIndCon);
  299. writel(0, &regs->TxIndRef);
  300. writel(0xcc000, &regs->pad10[0]);
  301. writel(0, &regs->DrCmndPro);
  302. writel(0, &regs->DrCmndCon);
  303. writel(0, &regs->DwCmndPro);
  304. writel(0, &regs->DwCmndCon);
  305. writel(0, &regs->DwCmndRef);
  306. writel(0, &regs->DrDataPro);
  307. writel(0, &regs->DrDataCon);
  308. writel(0, &regs->DrDataRef);
  309. writel(0, &regs->DwDataPro);
  310. writel(0, &regs->DwDataCon);
  311. writel(0, &regs->DwDataRef);
  312. #endif
  313. writel(0xffffffff, &regs->MbEvent);
  314. writel(0, &regs->Event);
  315. writel(0, &regs->TxPi);
  316. writel(0, &regs->IpRxPi);
  317. writel(0, &regs->EvtCon);
  318. writel(0, &regs->EvtPrd);
  319. rrpriv->info->evt_ctrl.pi = 0;
  320. for (i = 0; i < CMD_RING_ENTRIES; i++)
  321. writel(0, &regs->CmdRing[i]);
  322. /*
  323. * Why 32 ? is this not cache line size dependent?
  324. */
  325. writel(RBURST_64|WBURST_64, &regs->PciState);
  326. wmb();
  327. start_pc = rr_read_eeprom_word(rrpriv, &hw->rncd_info.FwStart);
  328. #if (DEBUG > 1)
  329. printk("%s: Executing firmware at address 0x%06x\n",
  330. dev->name, start_pc);
  331. #endif
  332. writel(start_pc + 0x800, &regs->Pc);
  333. wmb();
  334. udelay(5);
  335. writel(start_pc, &regs->Pc);
  336. wmb();
  337. return 0;
  338. }
  339. /*
  340. * Read a string from the EEPROM.
  341. */
  342. static unsigned int rr_read_eeprom(struct rr_private *rrpriv,
  343. unsigned long offset,
  344. unsigned char *buf,
  345. unsigned long length)
  346. {
  347. struct rr_regs __iomem *regs = rrpriv->regs;
  348. u32 misc, io, host, i;
  349. io = readl(&regs->ExtIo);
  350. writel(0, &regs->ExtIo);
  351. misc = readl(&regs->LocalCtrl);
  352. writel(0, &regs->LocalCtrl);
  353. host = readl(&regs->HostCtrl);
  354. writel(host | HALT_NIC, &regs->HostCtrl);
  355. mb();
  356. for (i = 0; i < length; i++){
  357. writel((EEPROM_BASE + ((offset+i) << 3)), &regs->WinBase);
  358. mb();
  359. buf[i] = (readl(&regs->WinData) >> 24) & 0xff;
  360. mb();
  361. }
  362. writel(host, &regs->HostCtrl);
  363. writel(misc, &regs->LocalCtrl);
  364. writel(io, &regs->ExtIo);
  365. mb();
  366. return i;
  367. }
  368. /*
  369. * Shortcut to read one word (4 bytes) out of the EEPROM and convert
  370. * it to our CPU byte-order.
  371. */
  372. static u32 rr_read_eeprom_word(struct rr_private *rrpriv,
  373. void * offset)
  374. {
  375. u32 word;
  376. if ((rr_read_eeprom(rrpriv, (unsigned long)offset,
  377. (char *)&word, 4) == 4))
  378. return be32_to_cpu(word);
  379. return 0;
  380. }
  381. /*
  382. * Write a string to the EEPROM.
  383. *
  384. * This is only called when the firmware is not running.
  385. */
  386. static unsigned int write_eeprom(struct rr_private *rrpriv,
  387. unsigned long offset,
  388. unsigned char *buf,
  389. unsigned long length)
  390. {
  391. struct rr_regs __iomem *regs = rrpriv->regs;
  392. u32 misc, io, data, i, j, ready, error = 0;
  393. io = readl(&regs->ExtIo);
  394. writel(0, &regs->ExtIo);
  395. misc = readl(&regs->LocalCtrl);
  396. writel(ENABLE_EEPROM_WRITE, &regs->LocalCtrl);
  397. mb();
  398. for (i = 0; i < length; i++){
  399. writel((EEPROM_BASE + ((offset+i) << 3)), &regs->WinBase);
  400. mb();
  401. data = buf[i] << 24;
  402. /*
  403. * Only try to write the data if it is not the same
  404. * value already.
  405. */
  406. if ((readl(&regs->WinData) & 0xff000000) != data){
  407. writel(data, &regs->WinData);
  408. ready = 0;
  409. j = 0;
  410. mb();
  411. while(!ready){
  412. udelay(20);
  413. if ((readl(&regs->WinData) & 0xff000000) ==
  414. data)
  415. ready = 1;
  416. mb();
  417. if (j++ > 5000){
  418. printk("data mismatch: %08x, "
  419. "WinData %08x\n", data,
  420. readl(&regs->WinData));
  421. ready = 1;
  422. error = 1;
  423. }
  424. }
  425. }
  426. }
  427. writel(misc, &regs->LocalCtrl);
  428. writel(io, &regs->ExtIo);
  429. mb();
  430. return error;
  431. }
  432. static int __devinit rr_init(struct net_device *dev)
  433. {
  434. struct rr_private *rrpriv;
  435. struct rr_regs __iomem *regs;
  436. struct eeprom *hw = NULL;
  437. u32 sram_size, rev;
  438. DECLARE_MAC_BUF(mac);
  439. rrpriv = netdev_priv(dev);
  440. regs = rrpriv->regs;
  441. rev = readl(&regs->FwRev);
  442. rrpriv->fw_rev = rev;
  443. if (rev > 0x00020024)
  444. printk(" Firmware revision: %i.%i.%i\n", (rev >> 16),
  445. ((rev >> 8) & 0xff), (rev & 0xff));
  446. else if (rev >= 0x00020000) {
  447. printk(" Firmware revision: %i.%i.%i (2.0.37 or "
  448. "later is recommended)\n", (rev >> 16),
  449. ((rev >> 8) & 0xff), (rev & 0xff));
  450. }else{
  451. printk(" Firmware revision too old: %i.%i.%i, please "
  452. "upgrade to 2.0.37 or later.\n",
  453. (rev >> 16), ((rev >> 8) & 0xff), (rev & 0xff));
  454. }
  455. #if (DEBUG > 2)
  456. printk(" Maximum receive rings %i\n", readl(&regs->MaxRxRng));
  457. #endif
  458. /*
  459. * Read the hardware address from the eeprom. The HW address
  460. * is not really necessary for HIPPI but awfully convenient.
  461. * The pointer arithmetic to put it in dev_addr is ugly, but
  462. * Donald Becker does it this way for the GigE version of this
  463. * card and it's shorter and more portable than any
  464. * other method I've seen. -VAL
  465. */
  466. *(u16 *)(dev->dev_addr) =
  467. htons(rr_read_eeprom_word(rrpriv, &hw->manf.BoardULA));
  468. *(u32 *)(dev->dev_addr+2) =
  469. htonl(rr_read_eeprom_word(rrpriv, &hw->manf.BoardULA[4]));
  470. printk(" MAC: %s\n", print_mac(mac, dev->dev_addr));
  471. sram_size = rr_read_eeprom_word(rrpriv, (void *)8);
  472. printk(" SRAM size 0x%06x\n", sram_size);
  473. if (sysctl_rmem_max < 262144){
  474. printk(" Receive socket buffer limit too low (%i), "
  475. "setting to 262144\n", sysctl_rmem_max);
  476. sysctl_rmem_max = 262144;
  477. }
  478. if (sysctl_wmem_max < 262144){
  479. printk(" Transmit socket buffer limit too low (%i), "
  480. "setting to 262144\n", sysctl_wmem_max);
  481. sysctl_wmem_max = 262144;
  482. }
  483. return 0;
  484. }
  485. static int rr_init1(struct net_device *dev)
  486. {
  487. struct rr_private *rrpriv;
  488. struct rr_regs __iomem *regs;
  489. unsigned long myjif, flags;
  490. struct cmd cmd;
  491. u32 hostctrl;
  492. int ecode = 0;
  493. short i;
  494. rrpriv = netdev_priv(dev);
  495. regs = rrpriv->regs;
  496. spin_lock_irqsave(&rrpriv->lock, flags);
  497. hostctrl = readl(&regs->HostCtrl);
  498. writel(hostctrl | HALT_NIC | RR_CLEAR_INT, &regs->HostCtrl);
  499. wmb();
  500. if (hostctrl & PARITY_ERR){
  501. printk("%s: Parity error halting NIC - this is serious!\n",
  502. dev->name);
  503. spin_unlock_irqrestore(&rrpriv->lock, flags);
  504. ecode = -EFAULT;
  505. goto error;
  506. }
  507. set_rxaddr(regs, rrpriv->rx_ctrl_dma);
  508. set_infoaddr(regs, rrpriv->info_dma);
  509. rrpriv->info->evt_ctrl.entry_size = sizeof(struct event);
  510. rrpriv->info->evt_ctrl.entries = EVT_RING_ENTRIES;
  511. rrpriv->info->evt_ctrl.mode = 0;
  512. rrpriv->info->evt_ctrl.pi = 0;
  513. set_rraddr(&rrpriv->info->evt_ctrl.rngptr, rrpriv->evt_ring_dma);
  514. rrpriv->info->cmd_ctrl.entry_size = sizeof(struct cmd);
  515. rrpriv->info->cmd_ctrl.entries = CMD_RING_ENTRIES;
  516. rrpriv->info->cmd_ctrl.mode = 0;
  517. rrpriv->info->cmd_ctrl.pi = 15;
  518. for (i = 0; i < CMD_RING_ENTRIES; i++) {
  519. writel(0, &regs->CmdRing[i]);
  520. }
  521. for (i = 0; i < TX_RING_ENTRIES; i++) {
  522. rrpriv->tx_ring[i].size = 0;
  523. set_rraddr(&rrpriv->tx_ring[i].addr, 0);
  524. rrpriv->tx_skbuff[i] = NULL;
  525. }
  526. rrpriv->info->tx_ctrl.entry_size = sizeof(struct tx_desc);
  527. rrpriv->info->tx_ctrl.entries = TX_RING_ENTRIES;
  528. rrpriv->info->tx_ctrl.mode = 0;
  529. rrpriv->info->tx_ctrl.pi = 0;
  530. set_rraddr(&rrpriv->info->tx_ctrl.rngptr, rrpriv->tx_ring_dma);
  531. /*
  532. * Set dirty_tx before we start receiving interrupts, otherwise
  533. * the interrupt handler might think it is supposed to process
  534. * tx ints before we are up and running, which may cause a null
  535. * pointer access in the int handler.
  536. */
  537. rrpriv->tx_full = 0;
  538. rrpriv->cur_rx = 0;
  539. rrpriv->dirty_rx = rrpriv->dirty_tx = 0;
  540. rr_reset(dev);
  541. /* Tuning values */
  542. writel(0x5000, &regs->ConRetry);
  543. writel(0x100, &regs->ConRetryTmr);
  544. writel(0x500000, &regs->ConTmout);
  545. writel(0x60, &regs->IntrTmr);
  546. writel(0x500000, &regs->TxDataMvTimeout);
  547. writel(0x200000, &regs->RxDataMvTimeout);
  548. writel(0x80, &regs->WriteDmaThresh);
  549. writel(0x80, &regs->ReadDmaThresh);
  550. rrpriv->fw_running = 0;
  551. wmb();
  552. hostctrl &= ~(HALT_NIC | INVALID_INST_B | PARITY_ERR);
  553. writel(hostctrl, &regs->HostCtrl);
  554. wmb();
  555. spin_unlock_irqrestore(&rrpriv->lock, flags);
  556. for (i = 0; i < RX_RING_ENTRIES; i++) {
  557. struct sk_buff *skb;
  558. dma_addr_t addr;
  559. rrpriv->rx_ring[i].mode = 0;
  560. skb = alloc_skb(dev->mtu + HIPPI_HLEN, GFP_ATOMIC);
  561. if (!skb) {
  562. printk(KERN_WARNING "%s: Unable to allocate memory "
  563. "for receive ring - halting NIC\n", dev->name);
  564. ecode = -ENOMEM;
  565. goto error;
  566. }
  567. rrpriv->rx_skbuff[i] = skb;
  568. addr = pci_map_single(rrpriv->pci_dev, skb->data,
  569. dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE);
  570. /*
  571. * Sanity test to see if we conflict with the DMA
  572. * limitations of the Roadrunner.
  573. */
  574. if ((((unsigned long)skb->data) & 0xfff) > ~65320)
  575. printk("skb alloc error\n");
  576. set_rraddr(&rrpriv->rx_ring[i].addr, addr);
  577. rrpriv->rx_ring[i].size = dev->mtu + HIPPI_HLEN;
  578. }
  579. rrpriv->rx_ctrl[4].entry_size = sizeof(struct rx_desc);
  580. rrpriv->rx_ctrl[4].entries = RX_RING_ENTRIES;
  581. rrpriv->rx_ctrl[4].mode = 8;
  582. rrpriv->rx_ctrl[4].pi = 0;
  583. wmb();
  584. set_rraddr(&rrpriv->rx_ctrl[4].rngptr, rrpriv->rx_ring_dma);
  585. udelay(1000);
  586. /*
  587. * Now start the FirmWare.
  588. */
  589. cmd.code = C_START_FW;
  590. cmd.ring = 0;
  591. cmd.index = 0;
  592. rr_issue_cmd(rrpriv, &cmd);
  593. /*
  594. * Give the FirmWare time to chew on the `get running' command.
  595. */
  596. myjif = jiffies + 5 * HZ;
  597. while (time_before(jiffies, myjif) && !rrpriv->fw_running)
  598. cpu_relax();
  599. netif_start_queue(dev);
  600. return ecode;
  601. error:
  602. /*
  603. * We might have gotten here because we are out of memory,
  604. * make sure we release everything we allocated before failing
  605. */
  606. for (i = 0; i < RX_RING_ENTRIES; i++) {
  607. struct sk_buff *skb = rrpriv->rx_skbuff[i];
  608. if (skb) {
  609. pci_unmap_single(rrpriv->pci_dev,
  610. rrpriv->rx_ring[i].addr.addrlo,
  611. dev->mtu + HIPPI_HLEN,
  612. PCI_DMA_FROMDEVICE);
  613. rrpriv->rx_ring[i].size = 0;
  614. set_rraddr(&rrpriv->rx_ring[i].addr, 0);
  615. dev_kfree_skb(skb);
  616. rrpriv->rx_skbuff[i] = NULL;
  617. }
  618. }
  619. return ecode;
  620. }
  621. /*
  622. * All events are considered to be slow (RX/TX ints do not generate
  623. * events) and are handled here, outside the main interrupt handler,
  624. * to reduce the size of the handler.
  625. */
  626. static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx)
  627. {
  628. struct rr_private *rrpriv;
  629. struct rr_regs __iomem *regs;
  630. u32 tmp;
  631. rrpriv = netdev_priv(dev);
  632. regs = rrpriv->regs;
  633. while (prodidx != eidx){
  634. switch (rrpriv->evt_ring[eidx].code){
  635. case E_NIC_UP:
  636. tmp = readl(&regs->FwRev);
  637. printk(KERN_INFO "%s: Firmware revision %i.%i.%i "
  638. "up and running\n", dev->name,
  639. (tmp >> 16), ((tmp >> 8) & 0xff), (tmp & 0xff));
  640. rrpriv->fw_running = 1;
  641. writel(RX_RING_ENTRIES - 1, &regs->IpRxPi);
  642. wmb();
  643. break;
  644. case E_LINK_ON:
  645. printk(KERN_INFO "%s: Optical link ON\n", dev->name);
  646. break;
  647. case E_LINK_OFF:
  648. printk(KERN_INFO "%s: Optical link OFF\n", dev->name);
  649. break;
  650. case E_RX_IDLE:
  651. printk(KERN_WARNING "%s: RX data not moving\n",
  652. dev->name);
  653. goto drop;
  654. case E_WATCHDOG:
  655. printk(KERN_INFO "%s: The watchdog is here to see "
  656. "us\n", dev->name);
  657. break;
  658. case E_INTERN_ERR:
  659. printk(KERN_ERR "%s: HIPPI Internal NIC error\n",
  660. dev->name);
  661. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
  662. &regs->HostCtrl);
  663. wmb();
  664. break;
  665. case E_HOST_ERR:
  666. printk(KERN_ERR "%s: Host software error\n",
  667. dev->name);
  668. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
  669. &regs->HostCtrl);
  670. wmb();
  671. break;
  672. /*
  673. * TX events.
  674. */
  675. case E_CON_REJ:
  676. printk(KERN_WARNING "%s: Connection rejected\n",
  677. dev->name);
  678. dev->stats.tx_aborted_errors++;
  679. break;
  680. case E_CON_TMOUT:
  681. printk(KERN_WARNING "%s: Connection timeout\n",
  682. dev->name);
  683. break;
  684. case E_DISC_ERR:
  685. printk(KERN_WARNING "%s: HIPPI disconnect error\n",
  686. dev->name);
  687. dev->stats.tx_aborted_errors++;
  688. break;
  689. case E_INT_PRTY:
  690. printk(KERN_ERR "%s: HIPPI Internal Parity error\n",
  691. dev->name);
  692. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
  693. &regs->HostCtrl);
  694. wmb();
  695. break;
  696. case E_TX_IDLE:
  697. printk(KERN_WARNING "%s: Transmitter idle\n",
  698. dev->name);
  699. break;
  700. case E_TX_LINK_DROP:
  701. printk(KERN_WARNING "%s: Link lost during transmit\n",
  702. dev->name);
  703. dev->stats.tx_aborted_errors++;
  704. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
  705. &regs->HostCtrl);
  706. wmb();
  707. break;
  708. case E_TX_INV_RNG:
  709. printk(KERN_ERR "%s: Invalid send ring block\n",
  710. dev->name);
  711. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
  712. &regs->HostCtrl);
  713. wmb();
  714. break;
  715. case E_TX_INV_BUF:
  716. printk(KERN_ERR "%s: Invalid send buffer address\n",
  717. dev->name);
  718. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
  719. &regs->HostCtrl);
  720. wmb();
  721. break;
  722. case E_TX_INV_DSC:
  723. printk(KERN_ERR "%s: Invalid descriptor address\n",
  724. dev->name);
  725. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
  726. &regs->HostCtrl);
  727. wmb();
  728. break;
  729. /*
  730. * RX events.
  731. */
  732. case E_RX_RNG_OUT:
  733. printk(KERN_INFO "%s: Receive ring full\n", dev->name);
  734. break;
  735. case E_RX_PAR_ERR:
  736. printk(KERN_WARNING "%s: Receive parity error\n",
  737. dev->name);
  738. goto drop;
  739. case E_RX_LLRC_ERR:
  740. printk(KERN_WARNING "%s: Receive LLRC error\n",
  741. dev->name);
  742. goto drop;
  743. case E_PKT_LN_ERR:
  744. printk(KERN_WARNING "%s: Receive packet length "
  745. "error\n", dev->name);
  746. goto drop;
  747. case E_DTA_CKSM_ERR:
  748. printk(KERN_WARNING "%s: Data checksum error\n",
  749. dev->name);
  750. goto drop;
  751. case E_SHT_BST:
  752. printk(KERN_WARNING "%s: Unexpected short burst "
  753. "error\n", dev->name);
  754. goto drop;
  755. case E_STATE_ERR:
  756. printk(KERN_WARNING "%s: Recv. state transition"
  757. " error\n", dev->name);
  758. goto drop;
  759. case E_UNEXP_DATA:
  760. printk(KERN_WARNING "%s: Unexpected data error\n",
  761. dev->name);
  762. goto drop;
  763. case E_LST_LNK_ERR:
  764. printk(KERN_WARNING "%s: Link lost error\n",
  765. dev->name);
  766. goto drop;
  767. case E_FRM_ERR:
  768. printk(KERN_WARNING "%s: Framming Error\n",
  769. dev->name);
  770. goto drop;
  771. case E_FLG_SYN_ERR:
  772. printk(KERN_WARNING "%s: Flag sync. lost during"
  773. "packet\n", dev->name);
  774. goto drop;
  775. case E_RX_INV_BUF:
  776. printk(KERN_ERR "%s: Invalid receive buffer "
  777. "address\n", dev->name);
  778. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
  779. &regs->HostCtrl);
  780. wmb();
  781. break;
  782. case E_RX_INV_DSC:
  783. printk(KERN_ERR "%s: Invalid receive descriptor "
  784. "address\n", dev->name);
  785. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
  786. &regs->HostCtrl);
  787. wmb();
  788. break;
  789. case E_RNG_BLK:
  790. printk(KERN_ERR "%s: Invalid ring block\n",
  791. dev->name);
  792. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
  793. &regs->HostCtrl);
  794. wmb();
  795. break;
  796. drop:
  797. /* Label packet to be dropped.
  798. * Actual dropping occurs in rx
  799. * handling.
  800. *
  801. * The index of packet we get to drop is
  802. * the index of the packet following
  803. * the bad packet. -kbf
  804. */
  805. {
  806. u16 index = rrpriv->evt_ring[eidx].index;
  807. index = (index + (RX_RING_ENTRIES - 1)) %
  808. RX_RING_ENTRIES;
  809. rrpriv->rx_ring[index].mode |=
  810. (PACKET_BAD | PACKET_END);
  811. }
  812. break;
  813. default:
  814. printk(KERN_WARNING "%s: Unhandled event 0x%02x\n",
  815. dev->name, rrpriv->evt_ring[eidx].code);
  816. }
  817. eidx = (eidx + 1) % EVT_RING_ENTRIES;
  818. }
  819. rrpriv->info->evt_ctrl.pi = eidx;
  820. wmb();
  821. return eidx;
  822. }
  823. static void rx_int(struct net_device *dev, u32 rxlimit, u32 index)
  824. {
  825. struct rr_private *rrpriv = netdev_priv(dev);
  826. struct rr_regs __iomem *regs = rrpriv->regs;
  827. do {
  828. struct rx_desc *desc;
  829. u32 pkt_len;
  830. desc = &(rrpriv->rx_ring[index]);
  831. pkt_len = desc->size;
  832. #if (DEBUG > 2)
  833. printk("index %i, rxlimit %i\n", index, rxlimit);
  834. printk("len %x, mode %x\n", pkt_len, desc->mode);
  835. #endif
  836. if ( (rrpriv->rx_ring[index].mode & PACKET_BAD) == PACKET_BAD){
  837. dev->stats.rx_dropped++;
  838. goto defer;
  839. }
  840. if (pkt_len > 0){
  841. struct sk_buff *skb, *rx_skb;
  842. rx_skb = rrpriv->rx_skbuff[index];
  843. if (pkt_len < PKT_COPY_THRESHOLD) {
  844. skb = alloc_skb(pkt_len, GFP_ATOMIC);
  845. if (skb == NULL){
  846. printk(KERN_WARNING "%s: Unable to allocate skb (%i bytes), deferring packet\n", dev->name, pkt_len);
  847. dev->stats.rx_dropped++;
  848. goto defer;
  849. } else {
  850. pci_dma_sync_single_for_cpu(rrpriv->pci_dev,
  851. desc->addr.addrlo,
  852. pkt_len,
  853. PCI_DMA_FROMDEVICE);
  854. memcpy(skb_put(skb, pkt_len),
  855. rx_skb->data, pkt_len);
  856. pci_dma_sync_single_for_device(rrpriv->pci_dev,
  857. desc->addr.addrlo,
  858. pkt_len,
  859. PCI_DMA_FROMDEVICE);
  860. }
  861. }else{
  862. struct sk_buff *newskb;
  863. newskb = alloc_skb(dev->mtu + HIPPI_HLEN,
  864. GFP_ATOMIC);
  865. if (newskb){
  866. dma_addr_t addr;
  867. pci_unmap_single(rrpriv->pci_dev,
  868. desc->addr.addrlo, dev->mtu +
  869. HIPPI_HLEN, PCI_DMA_FROMDEVICE);
  870. skb = rx_skb;
  871. skb_put(skb, pkt_len);
  872. rrpriv->rx_skbuff[index] = newskb;
  873. addr = pci_map_single(rrpriv->pci_dev,
  874. newskb->data,
  875. dev->mtu + HIPPI_HLEN,
  876. PCI_DMA_FROMDEVICE);
  877. set_rraddr(&desc->addr, addr);
  878. } else {
  879. printk("%s: Out of memory, deferring "
  880. "packet\n", dev->name);
  881. dev->stats.rx_dropped++;
  882. goto defer;
  883. }
  884. }
  885. skb->protocol = hippi_type_trans(skb, dev);
  886. netif_rx(skb); /* send it up */
  887. dev->last_rx = jiffies;
  888. dev->stats.rx_packets++;
  889. dev->stats.rx_bytes += pkt_len;
  890. }
  891. defer:
  892. desc->mode = 0;
  893. desc->size = dev->mtu + HIPPI_HLEN;
  894. if ((index & 7) == 7)
  895. writel(index, &regs->IpRxPi);
  896. index = (index + 1) % RX_RING_ENTRIES;
  897. } while(index != rxlimit);
  898. rrpriv->cur_rx = index;
  899. wmb();
  900. }
  901. static irqreturn_t rr_interrupt(int irq, void *dev_id)
  902. {
  903. struct rr_private *rrpriv;
  904. struct rr_regs __iomem *regs;
  905. struct net_device *dev = (struct net_device *)dev_id;
  906. u32 prodidx, rxindex, eidx, txcsmr, rxlimit, txcon;
  907. rrpriv = netdev_priv(dev);
  908. regs = rrpriv->regs;
  909. if (!(readl(&regs->HostCtrl) & RR_INT))
  910. return IRQ_NONE;
  911. spin_lock(&rrpriv->lock);
  912. prodidx = readl(&regs->EvtPrd);
  913. txcsmr = (prodidx >> 8) & 0xff;
  914. rxlimit = (prodidx >> 16) & 0xff;
  915. prodidx &= 0xff;
  916. #if (DEBUG > 2)
  917. printk("%s: interrupt, prodidx = %i, eidx = %i\n", dev->name,
  918. prodidx, rrpriv->info->evt_ctrl.pi);
  919. #endif
  920. /*
  921. * Order here is important. We must handle events
  922. * before doing anything else in order to catch
  923. * such things as LLRC errors, etc -kbf
  924. */
  925. eidx = rrpriv->info->evt_ctrl.pi;
  926. if (prodidx != eidx)
  927. eidx = rr_handle_event(dev, prodidx, eidx);
  928. rxindex = rrpriv->cur_rx;
  929. if (rxindex != rxlimit)
  930. rx_int(dev, rxlimit, rxindex);
  931. txcon = rrpriv->dirty_tx;
  932. if (txcsmr != txcon) {
  933. do {
  934. /* Due to occational firmware TX producer/consumer out
  935. * of sync. error need to check entry in ring -kbf
  936. */
  937. if(rrpriv->tx_skbuff[txcon]){
  938. struct tx_desc *desc;
  939. struct sk_buff *skb;
  940. desc = &(rrpriv->tx_ring[txcon]);
  941. skb = rrpriv->tx_skbuff[txcon];
  942. dev->stats.tx_packets++;
  943. dev->stats.tx_bytes += skb->len;
  944. pci_unmap_single(rrpriv->pci_dev,
  945. desc->addr.addrlo, skb->len,
  946. PCI_DMA_TODEVICE);
  947. dev_kfree_skb_irq(skb);
  948. rrpriv->tx_skbuff[txcon] = NULL;
  949. desc->size = 0;
  950. set_rraddr(&rrpriv->tx_ring[txcon].addr, 0);
  951. desc->mode = 0;
  952. }
  953. txcon = (txcon + 1) % TX_RING_ENTRIES;
  954. } while (txcsmr != txcon);
  955. wmb();
  956. rrpriv->dirty_tx = txcon;
  957. if (rrpriv->tx_full && rr_if_busy(dev) &&
  958. (((rrpriv->info->tx_ctrl.pi + 1) % TX_RING_ENTRIES)
  959. != rrpriv->dirty_tx)){
  960. rrpriv->tx_full = 0;
  961. netif_wake_queue(dev);
  962. }
  963. }
  964. eidx |= ((txcsmr << 8) | (rxlimit << 16));
  965. writel(eidx, &regs->EvtCon);
  966. wmb();
  967. spin_unlock(&rrpriv->lock);
  968. return IRQ_HANDLED;
  969. }
  970. static inline void rr_raz_tx(struct rr_private *rrpriv,
  971. struct net_device *dev)
  972. {
  973. int i;
  974. for (i = 0; i < TX_RING_ENTRIES; i++) {
  975. struct sk_buff *skb = rrpriv->tx_skbuff[i];
  976. if (skb) {
  977. struct tx_desc *desc = &(rrpriv->tx_ring[i]);
  978. pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo,
  979. skb->len, PCI_DMA_TODEVICE);
  980. desc->size = 0;
  981. set_rraddr(&desc->addr, 0);
  982. dev_kfree_skb(skb);
  983. rrpriv->tx_skbuff[i] = NULL;
  984. }
  985. }
  986. }
  987. static inline void rr_raz_rx(struct rr_private *rrpriv,
  988. struct net_device *dev)
  989. {
  990. int i;
  991. for (i = 0; i < RX_RING_ENTRIES; i++) {
  992. struct sk_buff *skb = rrpriv->rx_skbuff[i];
  993. if (skb) {
  994. struct rx_desc *desc = &(rrpriv->rx_ring[i]);
  995. pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo,
  996. dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE);
  997. desc->size = 0;
  998. set_rraddr(&desc->addr, 0);
  999. dev_kfree_skb(skb);
  1000. rrpriv->rx_skbuff[i] = NULL;
  1001. }
  1002. }
  1003. }
  1004. static void rr_timer(unsigned long data)
  1005. {
  1006. struct net_device *dev = (struct net_device *)data;
  1007. struct rr_private *rrpriv = netdev_priv(dev);
  1008. struct rr_regs __iomem *regs = rrpriv->regs;
  1009. unsigned long flags;
  1010. if (readl(&regs->HostCtrl) & NIC_HALTED){
  1011. printk("%s: Restarting nic\n", dev->name);
  1012. memset(rrpriv->rx_ctrl, 0, 256 * sizeof(struct ring_ctrl));
  1013. memset(rrpriv->info, 0, sizeof(struct rr_info));
  1014. wmb();
  1015. rr_raz_tx(rrpriv, dev);
  1016. rr_raz_rx(rrpriv, dev);
  1017. if (rr_init1(dev)) {
  1018. spin_lock_irqsave(&rrpriv->lock, flags);
  1019. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
  1020. &regs->HostCtrl);
  1021. spin_unlock_irqrestore(&rrpriv->lock, flags);
  1022. }
  1023. }
  1024. rrpriv->timer.expires = RUN_AT(5*HZ);
  1025. add_timer(&rrpriv->timer);
  1026. }
  1027. static int rr_open(struct net_device *dev)
  1028. {
  1029. struct rr_private *rrpriv = netdev_priv(dev);
  1030. struct pci_dev *pdev = rrpriv->pci_dev;
  1031. struct rr_regs __iomem *regs;
  1032. int ecode = 0;
  1033. unsigned long flags;
  1034. dma_addr_t dma_addr;
  1035. regs = rrpriv->regs;
  1036. if (rrpriv->fw_rev < 0x00020000) {
  1037. printk(KERN_WARNING "%s: trying to configure device with "
  1038. "obsolete firmware\n", dev->name);
  1039. ecode = -EBUSY;
  1040. goto error;
  1041. }
  1042. rrpriv->rx_ctrl = pci_alloc_consistent(pdev,
  1043. 256 * sizeof(struct ring_ctrl),
  1044. &dma_addr);
  1045. if (!rrpriv->rx_ctrl) {
  1046. ecode = -ENOMEM;
  1047. goto error;
  1048. }
  1049. rrpriv->rx_ctrl_dma = dma_addr;
  1050. memset(rrpriv->rx_ctrl, 0, 256*sizeof(struct ring_ctrl));
  1051. rrpriv->info = pci_alloc_consistent(pdev, sizeof(struct rr_info),
  1052. &dma_addr);
  1053. if (!rrpriv->info) {
  1054. ecode = -ENOMEM;
  1055. goto error;
  1056. }
  1057. rrpriv->info_dma = dma_addr;
  1058. memset(rrpriv->info, 0, sizeof(struct rr_info));
  1059. wmb();
  1060. spin_lock_irqsave(&rrpriv->lock, flags);
  1061. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl);
  1062. readl(&regs->HostCtrl);
  1063. spin_unlock_irqrestore(&rrpriv->lock, flags);
  1064. if (request_irq(dev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) {
  1065. printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
  1066. dev->name, dev->irq);
  1067. ecode = -EAGAIN;
  1068. goto error;
  1069. }
  1070. if ((ecode = rr_init1(dev)))
  1071. goto error;
  1072. /* Set the timer to switch to check for link beat and perhaps switch
  1073. to an alternate media type. */
  1074. init_timer(&rrpriv->timer);
  1075. rrpriv->timer.expires = RUN_AT(5*HZ); /* 5 sec. watchdog */
  1076. rrpriv->timer.data = (unsigned long)dev;
  1077. rrpriv->timer.function = &rr_timer; /* timer handler */
  1078. add_timer(&rrpriv->timer);
  1079. netif_start_queue(dev);
  1080. return ecode;
  1081. error:
  1082. spin_lock_irqsave(&rrpriv->lock, flags);
  1083. writel(readl(&regs->HostCtrl)|HALT_NIC|RR_CLEAR_INT, &regs->HostCtrl);
  1084. spin_unlock_irqrestore(&rrpriv->lock, flags);
  1085. if (rrpriv->info) {
  1086. pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info,
  1087. rrpriv->info_dma);
  1088. rrpriv->info = NULL;
  1089. }
  1090. if (rrpriv->rx_ctrl) {
  1091. pci_free_consistent(pdev, sizeof(struct ring_ctrl),
  1092. rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
  1093. rrpriv->rx_ctrl = NULL;
  1094. }
  1095. netif_stop_queue(dev);
  1096. return ecode;
  1097. }
  1098. static void rr_dump(struct net_device *dev)
  1099. {
  1100. struct rr_private *rrpriv;
  1101. struct rr_regs __iomem *regs;
  1102. u32 index, cons;
  1103. short i;
  1104. int len;
  1105. rrpriv = netdev_priv(dev);
  1106. regs = rrpriv->regs;
  1107. printk("%s: dumping NIC TX rings\n", dev->name);
  1108. printk("RxPrd %08x, TxPrd %02x, EvtPrd %08x, TxPi %02x, TxCtrlPi %02x\n",
  1109. readl(&regs->RxPrd), readl(&regs->TxPrd),
  1110. readl(&regs->EvtPrd), readl(&regs->TxPi),
  1111. rrpriv->info->tx_ctrl.pi);
  1112. printk("Error code 0x%x\n", readl(&regs->Fail1));
  1113. index = (((readl(&regs->EvtPrd) >> 8) & 0xff ) - 1) % EVT_RING_ENTRIES;
  1114. cons = rrpriv->dirty_tx;
  1115. printk("TX ring index %i, TX consumer %i\n",
  1116. index, cons);
  1117. if (rrpriv->tx_skbuff[index]){
  1118. len = min_t(int, 0x80, rrpriv->tx_skbuff[index]->len);
  1119. printk("skbuff for index %i is valid - dumping data (0x%x bytes - DMA len 0x%x)\n", index, len, rrpriv->tx_ring[index].size);
  1120. for (i = 0; i < len; i++){
  1121. if (!(i & 7))
  1122. printk("\n");
  1123. printk("%02x ", (unsigned char) rrpriv->tx_skbuff[index]->data[i]);
  1124. }
  1125. printk("\n");
  1126. }
  1127. if (rrpriv->tx_skbuff[cons]){
  1128. len = min_t(int, 0x80, rrpriv->tx_skbuff[cons]->len);
  1129. printk("skbuff for cons %i is valid - dumping data (0x%x bytes - skbuff len 0x%x)\n", cons, len, rrpriv->tx_skbuff[cons]->len);
  1130. printk("mode 0x%x, size 0x%x,\n phys %08Lx, skbuff-addr %08lx, truesize 0x%x\n",
  1131. rrpriv->tx_ring[cons].mode,
  1132. rrpriv->tx_ring[cons].size,
  1133. (unsigned long long) rrpriv->tx_ring[cons].addr.addrlo,
  1134. (unsigned long)rrpriv->tx_skbuff[cons]->data,
  1135. (unsigned int)rrpriv->tx_skbuff[cons]->truesize);
  1136. for (i = 0; i < len; i++){
  1137. if (!(i & 7))
  1138. printk("\n");
  1139. printk("%02x ", (unsigned char)rrpriv->tx_ring[cons].size);
  1140. }
  1141. printk("\n");
  1142. }
  1143. printk("dumping TX ring info:\n");
  1144. for (i = 0; i < TX_RING_ENTRIES; i++)
  1145. printk("mode 0x%x, size 0x%x, phys-addr %08Lx\n",
  1146. rrpriv->tx_ring[i].mode,
  1147. rrpriv->tx_ring[i].size,
  1148. (unsigned long long) rrpriv->tx_ring[i].addr.addrlo);
  1149. }
  1150. static int rr_close(struct net_device *dev)
  1151. {
  1152. struct rr_private *rrpriv;
  1153. struct rr_regs __iomem *regs;
  1154. unsigned long flags;
  1155. u32 tmp;
  1156. short i;
  1157. netif_stop_queue(dev);
  1158. rrpriv = netdev_priv(dev);
  1159. regs = rrpriv->regs;
  1160. /*
  1161. * Lock to make sure we are not cleaning up while another CPU
  1162. * is handling interrupts.
  1163. */
  1164. spin_lock_irqsave(&rrpriv->lock, flags);
  1165. tmp = readl(&regs->HostCtrl);
  1166. if (tmp & NIC_HALTED){
  1167. printk("%s: NIC already halted\n", dev->name);
  1168. rr_dump(dev);
  1169. }else{
  1170. tmp |= HALT_NIC | RR_CLEAR_INT;
  1171. writel(tmp, &regs->HostCtrl);
  1172. readl(&regs->HostCtrl);
  1173. }
  1174. rrpriv->fw_running = 0;
  1175. del_timer_sync(&rrpriv->timer);
  1176. writel(0, &regs->TxPi);
  1177. writel(0, &regs->IpRxPi);
  1178. writel(0, &regs->EvtCon);
  1179. writel(0, &regs->EvtPrd);
  1180. for (i = 0; i < CMD_RING_ENTRIES; i++)
  1181. writel(0, &regs->CmdRing[i]);
  1182. rrpriv->info->tx_ctrl.entries = 0;
  1183. rrpriv->info->cmd_ctrl.pi = 0;
  1184. rrpriv->info->evt_ctrl.pi = 0;
  1185. rrpriv->rx_ctrl[4].entries = 0;
  1186. rr_raz_tx(rrpriv, dev);
  1187. rr_raz_rx(rrpriv, dev);
  1188. pci_free_consistent(rrpriv->pci_dev, 256 * sizeof(struct ring_ctrl),
  1189. rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
  1190. rrpriv->rx_ctrl = NULL;
  1191. pci_free_consistent(rrpriv->pci_dev, sizeof(struct rr_info),
  1192. rrpriv->info, rrpriv->info_dma);
  1193. rrpriv->info = NULL;
  1194. free_irq(dev->irq, dev);
  1195. spin_unlock_irqrestore(&rrpriv->lock, flags);
  1196. return 0;
  1197. }
  1198. static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1199. {
  1200. struct rr_private *rrpriv = netdev_priv(dev);
  1201. struct rr_regs __iomem *regs = rrpriv->regs;
  1202. struct hippi_cb *hcb = (struct hippi_cb *) skb->cb;
  1203. struct ring_ctrl *txctrl;
  1204. unsigned long flags;
  1205. u32 index, len = skb->len;
  1206. u32 *ifield;
  1207. struct sk_buff *new_skb;
  1208. if (readl(&regs->Mode) & FATAL_ERR)
  1209. printk("error codes Fail1 %02x, Fail2 %02x\n",
  1210. readl(&regs->Fail1), readl(&regs->Fail2));
  1211. /*
  1212. * We probably need to deal with tbusy here to prevent overruns.
  1213. */
  1214. if (skb_headroom(skb) < 8){
  1215. printk("incoming skb too small - reallocating\n");
  1216. if (!(new_skb = dev_alloc_skb(len + 8))) {
  1217. dev_kfree_skb(skb);
  1218. netif_wake_queue(dev);
  1219. return -EBUSY;
  1220. }
  1221. skb_reserve(new_skb, 8);
  1222. skb_put(new_skb, len);
  1223. skb_copy_from_linear_data(skb, new_skb->data, len);
  1224. dev_kfree_skb(skb);
  1225. skb = new_skb;
  1226. }
  1227. ifield = (u32 *)skb_push(skb, 8);
  1228. ifield[0] = 0;
  1229. ifield[1] = hcb->ifield;
  1230. /*
  1231. * We don't need the lock before we are actually going to start
  1232. * fiddling with the control blocks.
  1233. */
  1234. spin_lock_irqsave(&rrpriv->lock, flags);
  1235. txctrl = &rrpriv->info->tx_ctrl;
  1236. index = txctrl->pi;
  1237. rrpriv->tx_skbuff[index] = skb;
  1238. set_rraddr(&rrpriv->tx_ring[index].addr, pci_map_single(
  1239. rrpriv->pci_dev, skb->data, len + 8, PCI_DMA_TODEVICE));
  1240. rrpriv->tx_ring[index].size = len + 8; /* include IFIELD */
  1241. rrpriv->tx_ring[index].mode = PACKET_START | PACKET_END;
  1242. txctrl->pi = (index + 1) % TX_RING_ENTRIES;
  1243. wmb();
  1244. writel(txctrl->pi, &regs->TxPi);
  1245. if (txctrl->pi == rrpriv->dirty_tx){
  1246. rrpriv->tx_full = 1;
  1247. netif_stop_queue(dev);
  1248. }
  1249. spin_unlock_irqrestore(&rrpriv->lock, flags);
  1250. dev->trans_start = jiffies;
  1251. return 0;
  1252. }
  1253. /*
  1254. * Read the firmware out of the EEPROM and put it into the SRAM
  1255. * (or from user space - later)
  1256. *
  1257. * This operation requires the NIC to be halted and is performed with
  1258. * interrupts disabled and with the spinlock hold.
  1259. */
  1260. static int rr_load_firmware(struct net_device *dev)
  1261. {
  1262. struct rr_private *rrpriv;
  1263. struct rr_regs __iomem *regs;
  1264. unsigned long eptr, segptr;
  1265. int i, j;
  1266. u32 localctrl, sptr, len, tmp;
  1267. u32 p2len, p2size, nr_seg, revision, io, sram_size;
  1268. struct eeprom *hw = NULL;
  1269. rrpriv = netdev_priv(dev);
  1270. regs = rrpriv->regs;
  1271. if (dev->flags & IFF_UP)
  1272. return -EBUSY;
  1273. if (!(readl(&regs->HostCtrl) & NIC_HALTED)){
  1274. printk("%s: Trying to load firmware to a running NIC.\n",
  1275. dev->name);
  1276. return -EBUSY;
  1277. }
  1278. localctrl = readl(&regs->LocalCtrl);
  1279. writel(0, &regs->LocalCtrl);
  1280. writel(0, &regs->EvtPrd);
  1281. writel(0, &regs->RxPrd);
  1282. writel(0, &regs->TxPrd);
  1283. /*
  1284. * First wipe the entire SRAM, otherwise we might run into all
  1285. * kinds of trouble ... sigh, this took almost all afternoon
  1286. * to track down ;-(
  1287. */
  1288. io = readl(&regs->ExtIo);
  1289. writel(0, &regs->ExtIo);
  1290. sram_size = rr_read_eeprom_word(rrpriv, (void *)8);
  1291. for (i = 200; i < sram_size / 4; i++){
  1292. writel(i * 4, &regs->WinBase);
  1293. mb();
  1294. writel(0, &regs->WinData);
  1295. mb();
  1296. }
  1297. writel(io, &regs->ExtIo);
  1298. mb();
  1299. eptr = (unsigned long)rr_read_eeprom_word(rrpriv,
  1300. &hw->rncd_info.AddrRunCodeSegs);
  1301. eptr = ((eptr & 0x1fffff) >> 3);
  1302. p2len = rr_read_eeprom_word(rrpriv, (void *)(0x83*4));
  1303. p2len = (p2len << 2);
  1304. p2size = rr_read_eeprom_word(rrpriv, (void *)(0x84*4));
  1305. p2size = ((p2size & 0x1fffff) >> 3);
  1306. if ((eptr < p2size) || (eptr > (p2size + p2len))){
  1307. printk("%s: eptr is invalid\n", dev->name);
  1308. goto out;
  1309. }
  1310. revision = rr_read_eeprom_word(rrpriv, &hw->manf.HeaderFmt);
  1311. if (revision != 1){
  1312. printk("%s: invalid firmware format (%i)\n",
  1313. dev->name, revision);
  1314. goto out;
  1315. }
  1316. nr_seg = rr_read_eeprom_word(rrpriv, (void *)eptr);
  1317. eptr +=4;
  1318. #if (DEBUG > 1)
  1319. printk("%s: nr_seg %i\n", dev->name, nr_seg);
  1320. #endif
  1321. for (i = 0; i < nr_seg; i++){
  1322. sptr = rr_read_eeprom_word(rrpriv, (void *)eptr);
  1323. eptr += 4;
  1324. len = rr_read_eeprom_word(rrpriv, (void *)eptr);
  1325. eptr += 4;
  1326. segptr = (unsigned long)rr_read_eeprom_word(rrpriv, (void *)eptr);
  1327. segptr = ((segptr & 0x1fffff) >> 3);
  1328. eptr += 4;
  1329. #if (DEBUG > 1)
  1330. printk("%s: segment %i, sram address %06x, length %04x, segptr %06x\n",
  1331. dev->name, i, sptr, len, segptr);
  1332. #endif
  1333. for (j = 0; j < len; j++){
  1334. tmp = rr_read_eeprom_word(rrpriv, (void *)segptr);
  1335. writel(sptr, &regs->WinBase);
  1336. mb();
  1337. writel(tmp, &regs->WinData);
  1338. mb();
  1339. segptr += 4;
  1340. sptr += 4;
  1341. }
  1342. }
  1343. out:
  1344. writel(localctrl, &regs->LocalCtrl);
  1345. mb();
  1346. return 0;
  1347. }
  1348. static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  1349. {
  1350. struct rr_private *rrpriv;
  1351. unsigned char *image, *oldimage;
  1352. unsigned long flags;
  1353. unsigned int i;
  1354. int error = -EOPNOTSUPP;
  1355. rrpriv = netdev_priv(dev);
  1356. switch(cmd){
  1357. case SIOCRRGFW:
  1358. if (!capable(CAP_SYS_RAWIO)){
  1359. return -EPERM;
  1360. }
  1361. image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
  1362. if (!image){
  1363. printk(KERN_ERR "%s: Unable to allocate memory "
  1364. "for EEPROM image\n", dev->name);
  1365. return -ENOMEM;
  1366. }
  1367. if (rrpriv->fw_running){
  1368. printk("%s: Firmware already running\n", dev->name);
  1369. error = -EPERM;
  1370. goto gf_out;
  1371. }
  1372. spin_lock_irqsave(&rrpriv->lock, flags);
  1373. i = rr_read_eeprom(rrpriv, 0, image, EEPROM_BYTES);
  1374. spin_unlock_irqrestore(&rrpriv->lock, flags);
  1375. if (i != EEPROM_BYTES){
  1376. printk(KERN_ERR "%s: Error reading EEPROM\n",
  1377. dev->name);
  1378. error = -EFAULT;
  1379. goto gf_out;
  1380. }
  1381. error = copy_to_user(rq->ifr_data, image, EEPROM_BYTES);
  1382. if (error)
  1383. error = -EFAULT;
  1384. gf_out:
  1385. kfree(image);
  1386. return error;
  1387. case SIOCRRPFW:
  1388. if (!capable(CAP_SYS_RAWIO)){
  1389. return -EPERM;
  1390. }
  1391. image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
  1392. oldimage = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
  1393. if (!image || !oldimage) {
  1394. printk(KERN_ERR "%s: Unable to allocate memory "
  1395. "for EEPROM image\n", dev->name);
  1396. error = -ENOMEM;
  1397. goto wf_out;
  1398. }
  1399. error = copy_from_user(image, rq->ifr_data, EEPROM_BYTES);
  1400. if (error) {
  1401. error = -EFAULT;
  1402. goto wf_out;
  1403. }
  1404. if (rrpriv->fw_running){
  1405. printk("%s: Firmware already running\n", dev->name);
  1406. error = -EPERM;
  1407. goto wf_out;
  1408. }
  1409. printk("%s: Updating EEPROM firmware\n", dev->name);
  1410. spin_lock_irqsave(&rrpriv->lock, flags);
  1411. error = write_eeprom(rrpriv, 0, image, EEPROM_BYTES);
  1412. if (error)
  1413. printk(KERN_ERR "%s: Error writing EEPROM\n",
  1414. dev->name);
  1415. i = rr_read_eeprom(rrpriv, 0, oldimage, EEPROM_BYTES);
  1416. spin_unlock_irqrestore(&rrpriv->lock, flags);
  1417. if (i != EEPROM_BYTES)
  1418. printk(KERN_ERR "%s: Error reading back EEPROM "
  1419. "image\n", dev->name);
  1420. error = memcmp(image, oldimage, EEPROM_BYTES);
  1421. if (error){
  1422. printk(KERN_ERR "%s: Error verifying EEPROM image\n",
  1423. dev->name);
  1424. error = -EFAULT;
  1425. }
  1426. wf_out:
  1427. kfree(oldimage);
  1428. kfree(image);
  1429. return error;
  1430. case SIOCRRID:
  1431. return put_user(0x52523032, (int __user *)rq->ifr_data);
  1432. default:
  1433. return error;
  1434. }
  1435. }
  1436. static struct pci_device_id rr_pci_tbl[] = {
  1437. { PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER,
  1438. PCI_ANY_ID, PCI_ANY_ID, },
  1439. { 0,}
  1440. };
  1441. MODULE_DEVICE_TABLE(pci, rr_pci_tbl);
  1442. static struct pci_driver rr_driver = {
  1443. .name = "rrunner",
  1444. .id_table = rr_pci_tbl,
  1445. .probe = rr_init_one,
  1446. .remove = __devexit_p(rr_remove_one),
  1447. };
  1448. static int __init rr_init_module(void)
  1449. {
  1450. return pci_register_driver(&rr_driver);
  1451. }
  1452. static void __exit rr_cleanup_module(void)
  1453. {
  1454. pci_unregister_driver(&rr_driver);
  1455. }
  1456. module_init(rr_init_module);
  1457. module_exit(rr_cleanup_module);
  1458. /*
  1459. * Local variables:
  1460. * compile-command: "gcc -D__KERNEL__ -I../../include -Wall -Wstrict-prototypes -O2 -pipe -fomit-frame-pointer -fno-strength-reduce -m486 -malign-loops=2 -malign-jumps=2 -malign-functions=2 -DMODULE -DMODVERSIONS -include ../../include/linux/modversions.h -c rrunner.c"
  1461. * End:
  1462. */