b44.c 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105
  1. /* b44.c: Broadcom 4400 device driver.
  2. *
  3. * Copyright (C) 2002 David S. Miller (davem@redhat.com)
  4. * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
  5. *
  6. * Distribute under GPL.
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/moduleparam.h>
  11. #include <linux/types.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/ethtool.h>
  14. #include <linux/mii.h>
  15. #include <linux/if_ether.h>
  16. #include <linux/etherdevice.h>
  17. #include <linux/pci.h>
  18. #include <linux/delay.h>
  19. #include <linux/init.h>
  20. #include <linux/dma-mapping.h>
  21. #include <asm/uaccess.h>
  22. #include <asm/io.h>
  23. #include <asm/irq.h>
  24. #include "b44.h"
  25. #define DRV_MODULE_NAME "b44"
  26. #define PFX DRV_MODULE_NAME ": "
  27. #define DRV_MODULE_VERSION "0.95"
  28. #define DRV_MODULE_RELDATE "Aug 3, 2004"
  29. #define B44_DEF_MSG_ENABLE \
  30. (NETIF_MSG_DRV | \
  31. NETIF_MSG_PROBE | \
  32. NETIF_MSG_LINK | \
  33. NETIF_MSG_TIMER | \
  34. NETIF_MSG_IFDOWN | \
  35. NETIF_MSG_IFUP | \
  36. NETIF_MSG_RX_ERR | \
  37. NETIF_MSG_TX_ERR)
  38. /* length of time before we decide the hardware is borked,
  39. * and dev->tx_timeout() should be called to fix the problem
  40. */
  41. #define B44_TX_TIMEOUT (5 * HZ)
  42. /* hardware minimum and maximum for a single frame's data payload */
  43. #define B44_MIN_MTU 60
  44. #define B44_MAX_MTU 1500
  45. #define B44_RX_RING_SIZE 512
  46. #define B44_DEF_RX_RING_PENDING 200
  47. #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
  48. B44_RX_RING_SIZE)
  49. #define B44_TX_RING_SIZE 512
  50. #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
  51. #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
  52. B44_TX_RING_SIZE)
  53. #define B44_DMA_MASK 0x3fffffff
  54. #define TX_RING_GAP(BP) \
  55. (B44_TX_RING_SIZE - (BP)->tx_pending)
  56. #define TX_BUFFS_AVAIL(BP) \
  57. (((BP)->tx_cons <= (BP)->tx_prod) ? \
  58. (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
  59. (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
  60. #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
  61. #define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64)
  62. #define TX_PKT_BUF_SZ (B44_MAX_MTU + ETH_HLEN + 8)
  63. /* minimum number of free TX descriptors required to wake up TX process */
  64. #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
  65. static char version[] __devinitdata =
  66. DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  67. MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
  68. MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
  69. MODULE_LICENSE("GPL");
  70. MODULE_VERSION(DRV_MODULE_VERSION);
  71. static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
  72. module_param(b44_debug, int, 0);
  73. MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
  74. static struct pci_device_id b44_pci_tbl[] = {
  75. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
  76. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  77. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
  78. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  79. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
  80. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  81. { } /* terminate list with empty entry */
  82. };
  83. MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
  84. static void b44_halt(struct b44 *);
  85. static void b44_init_rings(struct b44 *);
  86. static void b44_init_hw(struct b44 *);
  87. static int b44_poll(struct net_device *dev, int *budget);
  88. #ifdef CONFIG_NET_POLL_CONTROLLER
  89. static void b44_poll_controller(struct net_device *dev);
  90. #endif
  91. static int dma_desc_align_mask;
  92. static int dma_desc_sync_size;
  93. static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
  94. dma_addr_t dma_base,
  95. unsigned long offset,
  96. enum dma_data_direction dir)
  97. {
  98. dma_sync_single_range_for_device(&pdev->dev, dma_base,
  99. offset & dma_desc_align_mask,
  100. dma_desc_sync_size, dir);
  101. }
  102. static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
  103. dma_addr_t dma_base,
  104. unsigned long offset,
  105. enum dma_data_direction dir)
  106. {
  107. dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
  108. offset & dma_desc_align_mask,
  109. dma_desc_sync_size, dir);
  110. }
  111. static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
  112. {
  113. return readl(bp->regs + reg);
  114. }
  115. static inline void bw32(const struct b44 *bp,
  116. unsigned long reg, unsigned long val)
  117. {
  118. writel(val, bp->regs + reg);
  119. }
  120. static int b44_wait_bit(struct b44 *bp, unsigned long reg,
  121. u32 bit, unsigned long timeout, const int clear)
  122. {
  123. unsigned long i;
  124. for (i = 0; i < timeout; i++) {
  125. u32 val = br32(bp, reg);
  126. if (clear && !(val & bit))
  127. break;
  128. if (!clear && (val & bit))
  129. break;
  130. udelay(10);
  131. }
  132. if (i == timeout) {
  133. printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
  134. "%lx to %s.\n",
  135. bp->dev->name,
  136. bit, reg,
  137. (clear ? "clear" : "set"));
  138. return -ENODEV;
  139. }
  140. return 0;
  141. }
  142. /* Sonics SiliconBackplane support routines. ROFL, you should see all the
  143. * buzz words used on this company's website :-)
  144. *
  145. * All of these routines must be invoked with bp->lock held and
  146. * interrupts disabled.
  147. */
  148. #define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
  149. #define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
  150. static u32 ssb_get_core_rev(struct b44 *bp)
  151. {
  152. return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
  153. }
  154. static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
  155. {
  156. u32 bar_orig, pci_rev, val;
  157. pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
  158. pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
  159. pci_rev = ssb_get_core_rev(bp);
  160. val = br32(bp, B44_SBINTVEC);
  161. val |= cores;
  162. bw32(bp, B44_SBINTVEC, val);
  163. val = br32(bp, SSB_PCI_TRANS_2);
  164. val |= SSB_PCI_PREF | SSB_PCI_BURST;
  165. bw32(bp, SSB_PCI_TRANS_2, val);
  166. pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
  167. return pci_rev;
  168. }
  169. static void ssb_core_disable(struct b44 *bp)
  170. {
  171. if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
  172. return;
  173. bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
  174. b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
  175. b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
  176. bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
  177. SBTMSLOW_REJECT | SBTMSLOW_RESET));
  178. br32(bp, B44_SBTMSLOW);
  179. udelay(1);
  180. bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
  181. br32(bp, B44_SBTMSLOW);
  182. udelay(1);
  183. }
  184. static void ssb_core_reset(struct b44 *bp)
  185. {
  186. u32 val;
  187. ssb_core_disable(bp);
  188. bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
  189. br32(bp, B44_SBTMSLOW);
  190. udelay(1);
  191. /* Clear SERR if set, this is a hw bug workaround. */
  192. if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
  193. bw32(bp, B44_SBTMSHIGH, 0);
  194. val = br32(bp, B44_SBIMSTATE);
  195. if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
  196. bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
  197. bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
  198. br32(bp, B44_SBTMSLOW);
  199. udelay(1);
  200. bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
  201. br32(bp, B44_SBTMSLOW);
  202. udelay(1);
  203. }
  204. static int ssb_core_unit(struct b44 *bp)
  205. {
  206. #if 0
  207. u32 val = br32(bp, B44_SBADMATCH0);
  208. u32 base;
  209. type = val & SBADMATCH0_TYPE_MASK;
  210. switch (type) {
  211. case 0:
  212. base = val & SBADMATCH0_BS0_MASK;
  213. break;
  214. case 1:
  215. base = val & SBADMATCH0_BS1_MASK;
  216. break;
  217. case 2:
  218. default:
  219. base = val & SBADMATCH0_BS2_MASK;
  220. break;
  221. };
  222. #endif
  223. return 0;
  224. }
  225. static int ssb_is_core_up(struct b44 *bp)
  226. {
  227. return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
  228. == SBTMSLOW_CLOCK);
  229. }
  230. static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
  231. {
  232. u32 val;
  233. val = ((u32) data[2]) << 24;
  234. val |= ((u32) data[3]) << 16;
  235. val |= ((u32) data[4]) << 8;
  236. val |= ((u32) data[5]) << 0;
  237. bw32(bp, B44_CAM_DATA_LO, val);
  238. val = (CAM_DATA_HI_VALID |
  239. (((u32) data[0]) << 8) |
  240. (((u32) data[1]) << 0));
  241. bw32(bp, B44_CAM_DATA_HI, val);
  242. bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
  243. (index << CAM_CTRL_INDEX_SHIFT)));
  244. b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
  245. }
  246. static inline void __b44_disable_ints(struct b44 *bp)
  247. {
  248. bw32(bp, B44_IMASK, 0);
  249. }
  250. static void b44_disable_ints(struct b44 *bp)
  251. {
  252. __b44_disable_ints(bp);
  253. /* Flush posted writes. */
  254. br32(bp, B44_IMASK);
  255. }
  256. static void b44_enable_ints(struct b44 *bp)
  257. {
  258. bw32(bp, B44_IMASK, bp->imask);
  259. }
  260. static int b44_readphy(struct b44 *bp, int reg, u32 *val)
  261. {
  262. int err;
  263. bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
  264. bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
  265. (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
  266. (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
  267. (reg << MDIO_DATA_RA_SHIFT) |
  268. (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
  269. err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
  270. *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
  271. return err;
  272. }
  273. static int b44_writephy(struct b44 *bp, int reg, u32 val)
  274. {
  275. bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
  276. bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
  277. (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
  278. (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
  279. (reg << MDIO_DATA_RA_SHIFT) |
  280. (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
  281. (val & MDIO_DATA_DATA)));
  282. return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
  283. }
  284. /* miilib interface */
  285. /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
  286. * due to code existing before miilib use was added to this driver.
  287. * Someone should remove this artificial driver limitation in
  288. * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
  289. */
  290. static int b44_mii_read(struct net_device *dev, int phy_id, int location)
  291. {
  292. u32 val;
  293. struct b44 *bp = netdev_priv(dev);
  294. int rc = b44_readphy(bp, location, &val);
  295. if (rc)
  296. return 0xffffffff;
  297. return val;
  298. }
  299. static void b44_mii_write(struct net_device *dev, int phy_id, int location,
  300. int val)
  301. {
  302. struct b44 *bp = netdev_priv(dev);
  303. b44_writephy(bp, location, val);
  304. }
  305. static int b44_phy_reset(struct b44 *bp)
  306. {
  307. u32 val;
  308. int err;
  309. err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
  310. if (err)
  311. return err;
  312. udelay(100);
  313. err = b44_readphy(bp, MII_BMCR, &val);
  314. if (!err) {
  315. if (val & BMCR_RESET) {
  316. printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
  317. bp->dev->name);
  318. err = -ENODEV;
  319. }
  320. }
  321. return 0;
  322. }
  323. static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
  324. {
  325. u32 val;
  326. bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
  327. bp->flags |= pause_flags;
  328. val = br32(bp, B44_RXCONFIG);
  329. if (pause_flags & B44_FLAG_RX_PAUSE)
  330. val |= RXCONFIG_FLOW;
  331. else
  332. val &= ~RXCONFIG_FLOW;
  333. bw32(bp, B44_RXCONFIG, val);
  334. val = br32(bp, B44_MAC_FLOW);
  335. if (pause_flags & B44_FLAG_TX_PAUSE)
  336. val |= (MAC_FLOW_PAUSE_ENAB |
  337. (0xc0 & MAC_FLOW_RX_HI_WATER));
  338. else
  339. val &= ~MAC_FLOW_PAUSE_ENAB;
  340. bw32(bp, B44_MAC_FLOW, val);
  341. }
  342. static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
  343. {
  344. u32 pause_enab = bp->flags & (B44_FLAG_TX_PAUSE |
  345. B44_FLAG_RX_PAUSE);
  346. if (local & ADVERTISE_PAUSE_CAP) {
  347. if (local & ADVERTISE_PAUSE_ASYM) {
  348. if (remote & LPA_PAUSE_CAP)
  349. pause_enab |= (B44_FLAG_TX_PAUSE |
  350. B44_FLAG_RX_PAUSE);
  351. else if (remote & LPA_PAUSE_ASYM)
  352. pause_enab |= B44_FLAG_RX_PAUSE;
  353. } else {
  354. if (remote & LPA_PAUSE_CAP)
  355. pause_enab |= (B44_FLAG_TX_PAUSE |
  356. B44_FLAG_RX_PAUSE);
  357. }
  358. } else if (local & ADVERTISE_PAUSE_ASYM) {
  359. if ((remote & LPA_PAUSE_CAP) &&
  360. (remote & LPA_PAUSE_ASYM))
  361. pause_enab |= B44_FLAG_TX_PAUSE;
  362. }
  363. __b44_set_flow_ctrl(bp, pause_enab);
  364. }
  365. static int b44_setup_phy(struct b44 *bp)
  366. {
  367. u32 val;
  368. int err;
  369. if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
  370. goto out;
  371. if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
  372. val & MII_ALEDCTRL_ALLMSK)) != 0)
  373. goto out;
  374. if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
  375. goto out;
  376. if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
  377. val | MII_TLEDCTRL_ENABLE)) != 0)
  378. goto out;
  379. if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
  380. u32 adv = ADVERTISE_CSMA;
  381. if (bp->flags & B44_FLAG_ADV_10HALF)
  382. adv |= ADVERTISE_10HALF;
  383. if (bp->flags & B44_FLAG_ADV_10FULL)
  384. adv |= ADVERTISE_10FULL;
  385. if (bp->flags & B44_FLAG_ADV_100HALF)
  386. adv |= ADVERTISE_100HALF;
  387. if (bp->flags & B44_FLAG_ADV_100FULL)
  388. adv |= ADVERTISE_100FULL;
  389. if (bp->flags & B44_FLAG_PAUSE_AUTO)
  390. adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  391. if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
  392. goto out;
  393. if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
  394. BMCR_ANRESTART))) != 0)
  395. goto out;
  396. } else {
  397. u32 bmcr;
  398. if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
  399. goto out;
  400. bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
  401. if (bp->flags & B44_FLAG_100_BASE_T)
  402. bmcr |= BMCR_SPEED100;
  403. if (bp->flags & B44_FLAG_FULL_DUPLEX)
  404. bmcr |= BMCR_FULLDPLX;
  405. if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
  406. goto out;
  407. /* Since we will not be negotiating there is no safe way
  408. * to determine if the link partner supports flow control
  409. * or not. So just disable it completely in this case.
  410. */
  411. b44_set_flow_ctrl(bp, 0, 0);
  412. }
  413. out:
  414. return err;
  415. }
  416. static void b44_stats_update(struct b44 *bp)
  417. {
  418. unsigned long reg;
  419. u32 *val;
  420. val = &bp->hw_stats.tx_good_octets;
  421. for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
  422. *val++ += br32(bp, reg);
  423. }
  424. val = &bp->hw_stats.rx_good_octets;
  425. for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
  426. *val++ += br32(bp, reg);
  427. }
  428. }
  429. static void b44_link_report(struct b44 *bp)
  430. {
  431. if (!netif_carrier_ok(bp->dev)) {
  432. printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
  433. } else {
  434. printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
  435. bp->dev->name,
  436. (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
  437. (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
  438. printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
  439. "%s for RX.\n",
  440. bp->dev->name,
  441. (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
  442. (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
  443. }
  444. }
  445. static void b44_check_phy(struct b44 *bp)
  446. {
  447. u32 bmsr, aux;
  448. if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
  449. !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
  450. (bmsr != 0xffff)) {
  451. if (aux & MII_AUXCTRL_SPEED)
  452. bp->flags |= B44_FLAG_100_BASE_T;
  453. else
  454. bp->flags &= ~B44_FLAG_100_BASE_T;
  455. if (aux & MII_AUXCTRL_DUPLEX)
  456. bp->flags |= B44_FLAG_FULL_DUPLEX;
  457. else
  458. bp->flags &= ~B44_FLAG_FULL_DUPLEX;
  459. if (!netif_carrier_ok(bp->dev) &&
  460. (bmsr & BMSR_LSTATUS)) {
  461. u32 val = br32(bp, B44_TX_CTRL);
  462. u32 local_adv, remote_adv;
  463. if (bp->flags & B44_FLAG_FULL_DUPLEX)
  464. val |= TX_CTRL_DUPLEX;
  465. else
  466. val &= ~TX_CTRL_DUPLEX;
  467. bw32(bp, B44_TX_CTRL, val);
  468. if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
  469. !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
  470. !b44_readphy(bp, MII_LPA, &remote_adv))
  471. b44_set_flow_ctrl(bp, local_adv, remote_adv);
  472. /* Link now up */
  473. netif_carrier_on(bp->dev);
  474. b44_link_report(bp);
  475. } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
  476. /* Link now down */
  477. netif_carrier_off(bp->dev);
  478. b44_link_report(bp);
  479. }
  480. if (bmsr & BMSR_RFAULT)
  481. printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
  482. bp->dev->name);
  483. if (bmsr & BMSR_JCD)
  484. printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
  485. bp->dev->name);
  486. }
  487. }
  488. static void b44_timer(unsigned long __opaque)
  489. {
  490. struct b44 *bp = (struct b44 *) __opaque;
  491. spin_lock_irq(&bp->lock);
  492. b44_check_phy(bp);
  493. b44_stats_update(bp);
  494. spin_unlock_irq(&bp->lock);
  495. bp->timer.expires = jiffies + HZ;
  496. add_timer(&bp->timer);
  497. }
  498. static void b44_tx(struct b44 *bp)
  499. {
  500. u32 cur, cons;
  501. cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
  502. cur /= sizeof(struct dma_desc);
  503. /* XXX needs updating when NETIF_F_SG is supported */
  504. for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
  505. struct ring_info *rp = &bp->tx_buffers[cons];
  506. struct sk_buff *skb = rp->skb;
  507. if (unlikely(skb == NULL))
  508. BUG();
  509. pci_unmap_single(bp->pdev,
  510. pci_unmap_addr(rp, mapping),
  511. skb->len,
  512. PCI_DMA_TODEVICE);
  513. rp->skb = NULL;
  514. dev_kfree_skb_irq(skb);
  515. }
  516. bp->tx_cons = cons;
  517. if (netif_queue_stopped(bp->dev) &&
  518. TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
  519. netif_wake_queue(bp->dev);
  520. bw32(bp, B44_GPTIMER, 0);
  521. }
  522. /* Works like this. This chip writes a 'struct rx_header" 30 bytes
  523. * before the DMA address you give it. So we allocate 30 more bytes
  524. * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
  525. * point the chip at 30 bytes past where the rx_header will go.
  526. */
  527. static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
  528. {
  529. struct dma_desc *dp;
  530. struct ring_info *src_map, *map;
  531. struct rx_header *rh;
  532. struct sk_buff *skb;
  533. dma_addr_t mapping;
  534. int dest_idx;
  535. u32 ctrl;
  536. src_map = NULL;
  537. if (src_idx >= 0)
  538. src_map = &bp->rx_buffers[src_idx];
  539. dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
  540. map = &bp->rx_buffers[dest_idx];
  541. skb = dev_alloc_skb(RX_PKT_BUF_SZ);
  542. if (skb == NULL)
  543. return -ENOMEM;
  544. mapping = pci_map_single(bp->pdev, skb->data,
  545. RX_PKT_BUF_SZ,
  546. PCI_DMA_FROMDEVICE);
  547. /* Hardware bug work-around, the chip is unable to do PCI DMA
  548. to/from anything above 1GB :-( */
  549. if(mapping+RX_PKT_BUF_SZ > B44_DMA_MASK) {
  550. /* Sigh... */
  551. pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
  552. dev_kfree_skb_any(skb);
  553. skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
  554. if (skb == NULL)
  555. return -ENOMEM;
  556. mapping = pci_map_single(bp->pdev, skb->data,
  557. RX_PKT_BUF_SZ,
  558. PCI_DMA_FROMDEVICE);
  559. if(mapping+RX_PKT_BUF_SZ > B44_DMA_MASK) {
  560. pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
  561. dev_kfree_skb_any(skb);
  562. return -ENOMEM;
  563. }
  564. }
  565. skb->dev = bp->dev;
  566. skb_reserve(skb, bp->rx_offset);
  567. rh = (struct rx_header *)
  568. (skb->data - bp->rx_offset);
  569. rh->len = 0;
  570. rh->flags = 0;
  571. map->skb = skb;
  572. pci_unmap_addr_set(map, mapping, mapping);
  573. if (src_map != NULL)
  574. src_map->skb = NULL;
  575. ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
  576. if (dest_idx == (B44_RX_RING_SIZE - 1))
  577. ctrl |= DESC_CTRL_EOT;
  578. dp = &bp->rx_ring[dest_idx];
  579. dp->ctrl = cpu_to_le32(ctrl);
  580. dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
  581. if (bp->flags & B44_FLAG_RX_RING_HACK)
  582. b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
  583. dest_idx * sizeof(dp),
  584. DMA_BIDIRECTIONAL);
  585. return RX_PKT_BUF_SZ;
  586. }
  587. static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
  588. {
  589. struct dma_desc *src_desc, *dest_desc;
  590. struct ring_info *src_map, *dest_map;
  591. struct rx_header *rh;
  592. int dest_idx;
  593. u32 ctrl;
  594. dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
  595. dest_desc = &bp->rx_ring[dest_idx];
  596. dest_map = &bp->rx_buffers[dest_idx];
  597. src_desc = &bp->rx_ring[src_idx];
  598. src_map = &bp->rx_buffers[src_idx];
  599. dest_map->skb = src_map->skb;
  600. rh = (struct rx_header *) src_map->skb->data;
  601. rh->len = 0;
  602. rh->flags = 0;
  603. pci_unmap_addr_set(dest_map, mapping,
  604. pci_unmap_addr(src_map, mapping));
  605. if (bp->flags & B44_FLAG_RX_RING_HACK)
  606. b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
  607. src_idx * sizeof(src_desc),
  608. DMA_BIDIRECTIONAL);
  609. ctrl = src_desc->ctrl;
  610. if (dest_idx == (B44_RX_RING_SIZE - 1))
  611. ctrl |= cpu_to_le32(DESC_CTRL_EOT);
  612. else
  613. ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
  614. dest_desc->ctrl = ctrl;
  615. dest_desc->addr = src_desc->addr;
  616. src_map->skb = NULL;
  617. if (bp->flags & B44_FLAG_RX_RING_HACK)
  618. b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
  619. dest_idx * sizeof(dest_desc),
  620. DMA_BIDIRECTIONAL);
  621. pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
  622. RX_PKT_BUF_SZ,
  623. PCI_DMA_FROMDEVICE);
  624. }
  625. static int b44_rx(struct b44 *bp, int budget)
  626. {
  627. int received;
  628. u32 cons, prod;
  629. received = 0;
  630. prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
  631. prod /= sizeof(struct dma_desc);
  632. cons = bp->rx_cons;
  633. while (cons != prod && budget > 0) {
  634. struct ring_info *rp = &bp->rx_buffers[cons];
  635. struct sk_buff *skb = rp->skb;
  636. dma_addr_t map = pci_unmap_addr(rp, mapping);
  637. struct rx_header *rh;
  638. u16 len;
  639. pci_dma_sync_single_for_cpu(bp->pdev, map,
  640. RX_PKT_BUF_SZ,
  641. PCI_DMA_FROMDEVICE);
  642. rh = (struct rx_header *) skb->data;
  643. len = cpu_to_le16(rh->len);
  644. if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
  645. (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
  646. drop_it:
  647. b44_recycle_rx(bp, cons, bp->rx_prod);
  648. drop_it_no_recycle:
  649. bp->stats.rx_dropped++;
  650. goto next_pkt;
  651. }
  652. if (len == 0) {
  653. int i = 0;
  654. do {
  655. udelay(2);
  656. barrier();
  657. len = cpu_to_le16(rh->len);
  658. } while (len == 0 && i++ < 5);
  659. if (len == 0)
  660. goto drop_it;
  661. }
  662. /* Omit CRC. */
  663. len -= 4;
  664. if (len > RX_COPY_THRESHOLD) {
  665. int skb_size;
  666. skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
  667. if (skb_size < 0)
  668. goto drop_it;
  669. pci_unmap_single(bp->pdev, map,
  670. skb_size, PCI_DMA_FROMDEVICE);
  671. /* Leave out rx_header */
  672. skb_put(skb, len+bp->rx_offset);
  673. skb_pull(skb,bp->rx_offset);
  674. } else {
  675. struct sk_buff *copy_skb;
  676. b44_recycle_rx(bp, cons, bp->rx_prod);
  677. copy_skb = dev_alloc_skb(len + 2);
  678. if (copy_skb == NULL)
  679. goto drop_it_no_recycle;
  680. copy_skb->dev = bp->dev;
  681. skb_reserve(copy_skb, 2);
  682. skb_put(copy_skb, len);
  683. /* DMA sync done above, copy just the actual packet */
  684. memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
  685. skb = copy_skb;
  686. }
  687. skb->ip_summed = CHECKSUM_NONE;
  688. skb->protocol = eth_type_trans(skb, bp->dev);
  689. netif_receive_skb(skb);
  690. bp->dev->last_rx = jiffies;
  691. received++;
  692. budget--;
  693. next_pkt:
  694. bp->rx_prod = (bp->rx_prod + 1) &
  695. (B44_RX_RING_SIZE - 1);
  696. cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
  697. }
  698. bp->rx_cons = cons;
  699. bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
  700. return received;
  701. }
  702. static int b44_poll(struct net_device *netdev, int *budget)
  703. {
  704. struct b44 *bp = netdev_priv(netdev);
  705. int done;
  706. spin_lock_irq(&bp->lock);
  707. if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
  708. /* spin_lock(&bp->tx_lock); */
  709. b44_tx(bp);
  710. /* spin_unlock(&bp->tx_lock); */
  711. }
  712. spin_unlock_irq(&bp->lock);
  713. done = 1;
  714. if (bp->istat & ISTAT_RX) {
  715. int orig_budget = *budget;
  716. int work_done;
  717. if (orig_budget > netdev->quota)
  718. orig_budget = netdev->quota;
  719. work_done = b44_rx(bp, orig_budget);
  720. *budget -= work_done;
  721. netdev->quota -= work_done;
  722. if (work_done >= orig_budget)
  723. done = 0;
  724. }
  725. if (bp->istat & ISTAT_ERRORS) {
  726. spin_lock_irq(&bp->lock);
  727. b44_halt(bp);
  728. b44_init_rings(bp);
  729. b44_init_hw(bp);
  730. netif_wake_queue(bp->dev);
  731. spin_unlock_irq(&bp->lock);
  732. done = 1;
  733. }
  734. if (done) {
  735. netif_rx_complete(netdev);
  736. b44_enable_ints(bp);
  737. }
  738. return (done ? 0 : 1);
  739. }
  740. static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
  741. {
  742. struct net_device *dev = dev_id;
  743. struct b44 *bp = netdev_priv(dev);
  744. unsigned long flags;
  745. u32 istat, imask;
  746. int handled = 0;
  747. spin_lock_irqsave(&bp->lock, flags);
  748. istat = br32(bp, B44_ISTAT);
  749. imask = br32(bp, B44_IMASK);
  750. /* ??? What the fuck is the purpose of the interrupt mask
  751. * ??? register if we have to mask it out by hand anyways?
  752. */
  753. istat &= imask;
  754. if (istat) {
  755. handled = 1;
  756. if (netif_rx_schedule_prep(dev)) {
  757. /* NOTE: These writes are posted by the readback of
  758. * the ISTAT register below.
  759. */
  760. bp->istat = istat;
  761. __b44_disable_ints(bp);
  762. __netif_rx_schedule(dev);
  763. } else {
  764. printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
  765. dev->name);
  766. }
  767. bw32(bp, B44_ISTAT, istat);
  768. br32(bp, B44_ISTAT);
  769. }
  770. spin_unlock_irqrestore(&bp->lock, flags);
  771. return IRQ_RETVAL(handled);
  772. }
  773. static void b44_tx_timeout(struct net_device *dev)
  774. {
  775. struct b44 *bp = netdev_priv(dev);
  776. printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
  777. dev->name);
  778. spin_lock_irq(&bp->lock);
  779. b44_halt(bp);
  780. b44_init_rings(bp);
  781. b44_init_hw(bp);
  782. spin_unlock_irq(&bp->lock);
  783. b44_enable_ints(bp);
  784. netif_wake_queue(dev);
  785. }
  786. static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
  787. {
  788. struct b44 *bp = netdev_priv(dev);
  789. struct sk_buff *bounce_skb;
  790. dma_addr_t mapping;
  791. u32 len, entry, ctrl;
  792. len = skb->len;
  793. spin_lock_irq(&bp->lock);
  794. /* This is a hard error, log it. */
  795. if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
  796. netif_stop_queue(dev);
  797. spin_unlock_irq(&bp->lock);
  798. printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
  799. dev->name);
  800. return 1;
  801. }
  802. mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
  803. if(mapping+len > B44_DMA_MASK) {
  804. /* Chip can't handle DMA to/from >1GB, use bounce buffer */
  805. pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
  806. bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
  807. GFP_ATOMIC|GFP_DMA);
  808. if (!bounce_skb)
  809. return NETDEV_TX_BUSY;
  810. mapping = pci_map_single(bp->pdev, bounce_skb->data,
  811. len, PCI_DMA_TODEVICE);
  812. if(mapping+len > B44_DMA_MASK) {
  813. pci_unmap_single(bp->pdev, mapping,
  814. len, PCI_DMA_TODEVICE);
  815. dev_kfree_skb_any(bounce_skb);
  816. return NETDEV_TX_BUSY;
  817. }
  818. memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
  819. dev_kfree_skb_any(skb);
  820. skb = bounce_skb;
  821. }
  822. entry = bp->tx_prod;
  823. bp->tx_buffers[entry].skb = skb;
  824. pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
  825. ctrl = (len & DESC_CTRL_LEN);
  826. ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
  827. if (entry == (B44_TX_RING_SIZE - 1))
  828. ctrl |= DESC_CTRL_EOT;
  829. bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
  830. bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
  831. if (bp->flags & B44_FLAG_TX_RING_HACK)
  832. b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
  833. entry * sizeof(bp->tx_ring[0]),
  834. DMA_TO_DEVICE);
  835. entry = NEXT_TX(entry);
  836. bp->tx_prod = entry;
  837. wmb();
  838. bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
  839. if (bp->flags & B44_FLAG_BUGGY_TXPTR)
  840. bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
  841. if (bp->flags & B44_FLAG_REORDER_BUG)
  842. br32(bp, B44_DMATX_PTR);
  843. if (TX_BUFFS_AVAIL(bp) < 1)
  844. netif_stop_queue(dev);
  845. spin_unlock_irq(&bp->lock);
  846. dev->trans_start = jiffies;
  847. return 0;
  848. }
  849. static int b44_change_mtu(struct net_device *dev, int new_mtu)
  850. {
  851. struct b44 *bp = netdev_priv(dev);
  852. if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
  853. return -EINVAL;
  854. if (!netif_running(dev)) {
  855. /* We'll just catch it later when the
  856. * device is up'd.
  857. */
  858. dev->mtu = new_mtu;
  859. return 0;
  860. }
  861. spin_lock_irq(&bp->lock);
  862. b44_halt(bp);
  863. dev->mtu = new_mtu;
  864. b44_init_rings(bp);
  865. b44_init_hw(bp);
  866. spin_unlock_irq(&bp->lock);
  867. b44_enable_ints(bp);
  868. return 0;
  869. }
  870. /* Free up pending packets in all rx/tx rings.
  871. *
  872. * The chip has been shut down and the driver detached from
  873. * the networking, so no interrupts or new tx packets will
  874. * end up in the driver. bp->lock is not held and we are not
  875. * in an interrupt context and thus may sleep.
  876. */
  877. static void b44_free_rings(struct b44 *bp)
  878. {
  879. struct ring_info *rp;
  880. int i;
  881. for (i = 0; i < B44_RX_RING_SIZE; i++) {
  882. rp = &bp->rx_buffers[i];
  883. if (rp->skb == NULL)
  884. continue;
  885. pci_unmap_single(bp->pdev,
  886. pci_unmap_addr(rp, mapping),
  887. RX_PKT_BUF_SZ,
  888. PCI_DMA_FROMDEVICE);
  889. dev_kfree_skb_any(rp->skb);
  890. rp->skb = NULL;
  891. }
  892. /* XXX needs changes once NETIF_F_SG is set... */
  893. for (i = 0; i < B44_TX_RING_SIZE; i++) {
  894. rp = &bp->tx_buffers[i];
  895. if (rp->skb == NULL)
  896. continue;
  897. pci_unmap_single(bp->pdev,
  898. pci_unmap_addr(rp, mapping),
  899. rp->skb->len,
  900. PCI_DMA_TODEVICE);
  901. dev_kfree_skb_any(rp->skb);
  902. rp->skb = NULL;
  903. }
  904. }
  905. /* Initialize tx/rx rings for packet processing.
  906. *
  907. * The chip has been shut down and the driver detached from
  908. * the networking, so no interrupts or new tx packets will
  909. * end up in the driver. bp->lock is not held and we are not
  910. * in an interrupt context and thus may sleep.
  911. */
  912. static void b44_init_rings(struct b44 *bp)
  913. {
  914. int i;
  915. b44_free_rings(bp);
  916. memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
  917. memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
  918. if (bp->flags & B44_FLAG_RX_RING_HACK)
  919. dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
  920. DMA_TABLE_BYTES,
  921. PCI_DMA_BIDIRECTIONAL);
  922. if (bp->flags & B44_FLAG_TX_RING_HACK)
  923. dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
  924. DMA_TABLE_BYTES,
  925. PCI_DMA_TODEVICE);
  926. for (i = 0; i < bp->rx_pending; i++) {
  927. if (b44_alloc_rx_skb(bp, -1, i) < 0)
  928. break;
  929. }
  930. }
  931. /*
  932. * Must not be invoked with interrupt sources disabled and
  933. * the hardware shutdown down.
  934. */
  935. static void b44_free_consistent(struct b44 *bp)
  936. {
  937. kfree(bp->rx_buffers);
  938. bp->rx_buffers = NULL;
  939. kfree(bp->tx_buffers);
  940. bp->tx_buffers = NULL;
  941. if (bp->rx_ring) {
  942. if (bp->flags & B44_FLAG_RX_RING_HACK) {
  943. dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
  944. DMA_TABLE_BYTES,
  945. DMA_BIDIRECTIONAL);
  946. kfree(bp->rx_ring);
  947. } else
  948. pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
  949. bp->rx_ring, bp->rx_ring_dma);
  950. bp->rx_ring = NULL;
  951. bp->flags &= ~B44_FLAG_RX_RING_HACK;
  952. }
  953. if (bp->tx_ring) {
  954. if (bp->flags & B44_FLAG_TX_RING_HACK) {
  955. dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
  956. DMA_TABLE_BYTES,
  957. DMA_TO_DEVICE);
  958. kfree(bp->tx_ring);
  959. } else
  960. pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
  961. bp->tx_ring, bp->tx_ring_dma);
  962. bp->tx_ring = NULL;
  963. bp->flags &= ~B44_FLAG_TX_RING_HACK;
  964. }
  965. }
  966. /*
  967. * Must not be invoked with interrupt sources disabled and
  968. * the hardware shutdown down. Can sleep.
  969. */
  970. static int b44_alloc_consistent(struct b44 *bp)
  971. {
  972. int size;
  973. size = B44_RX_RING_SIZE * sizeof(struct ring_info);
  974. bp->rx_buffers = kmalloc(size, GFP_KERNEL);
  975. if (!bp->rx_buffers)
  976. goto out_err;
  977. memset(bp->rx_buffers, 0, size);
  978. size = B44_TX_RING_SIZE * sizeof(struct ring_info);
  979. bp->tx_buffers = kmalloc(size, GFP_KERNEL);
  980. if (!bp->tx_buffers)
  981. goto out_err;
  982. memset(bp->tx_buffers, 0, size);
  983. size = DMA_TABLE_BYTES;
  984. bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
  985. if (!bp->rx_ring) {
  986. /* Allocation may have failed due to pci_alloc_consistent
  987. insisting on use of GFP_DMA, which is more restrictive
  988. than necessary... */
  989. struct dma_desc *rx_ring;
  990. dma_addr_t rx_ring_dma;
  991. if (!(rx_ring = (struct dma_desc *)kmalloc(size, GFP_KERNEL)))
  992. goto out_err;
  993. memset(rx_ring, 0, size);
  994. rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
  995. DMA_TABLE_BYTES,
  996. DMA_BIDIRECTIONAL);
  997. if (rx_ring_dma + size > B44_DMA_MASK) {
  998. kfree(rx_ring);
  999. goto out_err;
  1000. }
  1001. bp->rx_ring = rx_ring;
  1002. bp->rx_ring_dma = rx_ring_dma;
  1003. bp->flags |= B44_FLAG_RX_RING_HACK;
  1004. }
  1005. bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
  1006. if (!bp->tx_ring) {
  1007. /* Allocation may have failed due to pci_alloc_consistent
  1008. insisting on use of GFP_DMA, which is more restrictive
  1009. than necessary... */
  1010. struct dma_desc *tx_ring;
  1011. dma_addr_t tx_ring_dma;
  1012. if (!(tx_ring = (struct dma_desc *)kmalloc(size, GFP_KERNEL)))
  1013. goto out_err;
  1014. memset(tx_ring, 0, size);
  1015. tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
  1016. DMA_TABLE_BYTES,
  1017. DMA_TO_DEVICE);
  1018. if (tx_ring_dma + size > B44_DMA_MASK) {
  1019. kfree(tx_ring);
  1020. goto out_err;
  1021. }
  1022. bp->tx_ring = tx_ring;
  1023. bp->tx_ring_dma = tx_ring_dma;
  1024. bp->flags |= B44_FLAG_TX_RING_HACK;
  1025. }
  1026. return 0;
  1027. out_err:
  1028. b44_free_consistent(bp);
  1029. return -ENOMEM;
  1030. }
  1031. /* bp->lock is held. */
  1032. static void b44_clear_stats(struct b44 *bp)
  1033. {
  1034. unsigned long reg;
  1035. bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
  1036. for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
  1037. br32(bp, reg);
  1038. for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
  1039. br32(bp, reg);
  1040. }
  1041. /* bp->lock is held. */
  1042. static void b44_chip_reset(struct b44 *bp)
  1043. {
  1044. if (ssb_is_core_up(bp)) {
  1045. bw32(bp, B44_RCV_LAZY, 0);
  1046. bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
  1047. b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
  1048. bw32(bp, B44_DMATX_CTRL, 0);
  1049. bp->tx_prod = bp->tx_cons = 0;
  1050. if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
  1051. b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
  1052. 100, 0);
  1053. }
  1054. bw32(bp, B44_DMARX_CTRL, 0);
  1055. bp->rx_prod = bp->rx_cons = 0;
  1056. } else {
  1057. ssb_pci_setup(bp, (bp->core_unit == 0 ?
  1058. SBINTVEC_ENET0 :
  1059. SBINTVEC_ENET1));
  1060. }
  1061. ssb_core_reset(bp);
  1062. b44_clear_stats(bp);
  1063. /* Make PHY accessible. */
  1064. bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
  1065. (0x0d & MDIO_CTRL_MAXF_MASK)));
  1066. br32(bp, B44_MDIO_CTRL);
  1067. if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
  1068. bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
  1069. br32(bp, B44_ENET_CTRL);
  1070. bp->flags &= ~B44_FLAG_INTERNAL_PHY;
  1071. } else {
  1072. u32 val = br32(bp, B44_DEVCTRL);
  1073. if (val & DEVCTRL_EPR) {
  1074. bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
  1075. br32(bp, B44_DEVCTRL);
  1076. udelay(100);
  1077. }
  1078. bp->flags |= B44_FLAG_INTERNAL_PHY;
  1079. }
  1080. }
  1081. /* bp->lock is held. */
  1082. static void b44_halt(struct b44 *bp)
  1083. {
  1084. b44_disable_ints(bp);
  1085. b44_chip_reset(bp);
  1086. }
  1087. /* bp->lock is held. */
  1088. static void __b44_set_mac_addr(struct b44 *bp)
  1089. {
  1090. bw32(bp, B44_CAM_CTRL, 0);
  1091. if (!(bp->dev->flags & IFF_PROMISC)) {
  1092. u32 val;
  1093. __b44_cam_write(bp, bp->dev->dev_addr, 0);
  1094. val = br32(bp, B44_CAM_CTRL);
  1095. bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
  1096. }
  1097. }
  1098. static int b44_set_mac_addr(struct net_device *dev, void *p)
  1099. {
  1100. struct b44 *bp = netdev_priv(dev);
  1101. struct sockaddr *addr = p;
  1102. if (netif_running(dev))
  1103. return -EBUSY;
  1104. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  1105. spin_lock_irq(&bp->lock);
  1106. __b44_set_mac_addr(bp);
  1107. spin_unlock_irq(&bp->lock);
  1108. return 0;
  1109. }
  1110. /* Called at device open time to get the chip ready for
  1111. * packet processing. Invoked with bp->lock held.
  1112. */
  1113. static void __b44_set_rx_mode(struct net_device *);
  1114. static void b44_init_hw(struct b44 *bp)
  1115. {
  1116. u32 val;
  1117. b44_chip_reset(bp);
  1118. b44_phy_reset(bp);
  1119. b44_setup_phy(bp);
  1120. /* Enable CRC32, set proper LED modes and power on PHY */
  1121. bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
  1122. bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
  1123. /* This sets the MAC address too. */
  1124. __b44_set_rx_mode(bp->dev);
  1125. /* MTU + eth header + possible VLAN tag + struct rx_header */
  1126. bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
  1127. bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
  1128. bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
  1129. bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
  1130. bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
  1131. bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
  1132. (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
  1133. bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
  1134. bw32(bp, B44_DMARX_PTR, bp->rx_pending);
  1135. bp->rx_prod = bp->rx_pending;
  1136. bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
  1137. val = br32(bp, B44_ENET_CTRL);
  1138. bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
  1139. }
  1140. static int b44_open(struct net_device *dev)
  1141. {
  1142. struct b44 *bp = netdev_priv(dev);
  1143. int err;
  1144. err = b44_alloc_consistent(bp);
  1145. if (err)
  1146. return err;
  1147. err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
  1148. if (err)
  1149. goto err_out_free;
  1150. spin_lock_irq(&bp->lock);
  1151. b44_init_rings(bp);
  1152. b44_init_hw(bp);
  1153. bp->flags |= B44_FLAG_INIT_COMPLETE;
  1154. netif_carrier_off(dev);
  1155. b44_check_phy(bp);
  1156. spin_unlock_irq(&bp->lock);
  1157. init_timer(&bp->timer);
  1158. bp->timer.expires = jiffies + HZ;
  1159. bp->timer.data = (unsigned long) bp;
  1160. bp->timer.function = b44_timer;
  1161. add_timer(&bp->timer);
  1162. b44_enable_ints(bp);
  1163. return 0;
  1164. err_out_free:
  1165. b44_free_consistent(bp);
  1166. return err;
  1167. }
  1168. #if 0
  1169. /*static*/ void b44_dump_state(struct b44 *bp)
  1170. {
  1171. u32 val32, val32_2, val32_3, val32_4, val32_5;
  1172. u16 val16;
  1173. pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
  1174. printk("DEBUG: PCI status [%04x] \n", val16);
  1175. }
  1176. #endif
  1177. #ifdef CONFIG_NET_POLL_CONTROLLER
  1178. /*
  1179. * Polling receive - used by netconsole and other diagnostic tools
  1180. * to allow network i/o with interrupts disabled.
  1181. */
  1182. static void b44_poll_controller(struct net_device *dev)
  1183. {
  1184. disable_irq(dev->irq);
  1185. b44_interrupt(dev->irq, dev, NULL);
  1186. enable_irq(dev->irq);
  1187. }
  1188. #endif
  1189. static int b44_close(struct net_device *dev)
  1190. {
  1191. struct b44 *bp = netdev_priv(dev);
  1192. netif_stop_queue(dev);
  1193. del_timer_sync(&bp->timer);
  1194. spin_lock_irq(&bp->lock);
  1195. #if 0
  1196. b44_dump_state(bp);
  1197. #endif
  1198. b44_halt(bp);
  1199. b44_free_rings(bp);
  1200. bp->flags &= ~B44_FLAG_INIT_COMPLETE;
  1201. netif_carrier_off(bp->dev);
  1202. spin_unlock_irq(&bp->lock);
  1203. free_irq(dev->irq, dev);
  1204. b44_free_consistent(bp);
  1205. return 0;
  1206. }
  1207. static struct net_device_stats *b44_get_stats(struct net_device *dev)
  1208. {
  1209. struct b44 *bp = netdev_priv(dev);
  1210. struct net_device_stats *nstat = &bp->stats;
  1211. struct b44_hw_stats *hwstat = &bp->hw_stats;
  1212. /* Convert HW stats into netdevice stats. */
  1213. nstat->rx_packets = hwstat->rx_pkts;
  1214. nstat->tx_packets = hwstat->tx_pkts;
  1215. nstat->rx_bytes = hwstat->rx_octets;
  1216. nstat->tx_bytes = hwstat->tx_octets;
  1217. nstat->tx_errors = (hwstat->tx_jabber_pkts +
  1218. hwstat->tx_oversize_pkts +
  1219. hwstat->tx_underruns +
  1220. hwstat->tx_excessive_cols +
  1221. hwstat->tx_late_cols);
  1222. nstat->multicast = hwstat->tx_multicast_pkts;
  1223. nstat->collisions = hwstat->tx_total_cols;
  1224. nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
  1225. hwstat->rx_undersize);
  1226. nstat->rx_over_errors = hwstat->rx_missed_pkts;
  1227. nstat->rx_frame_errors = hwstat->rx_align_errs;
  1228. nstat->rx_crc_errors = hwstat->rx_crc_errs;
  1229. nstat->rx_errors = (hwstat->rx_jabber_pkts +
  1230. hwstat->rx_oversize_pkts +
  1231. hwstat->rx_missed_pkts +
  1232. hwstat->rx_crc_align_errs +
  1233. hwstat->rx_undersize +
  1234. hwstat->rx_crc_errs +
  1235. hwstat->rx_align_errs +
  1236. hwstat->rx_symbol_errs);
  1237. nstat->tx_aborted_errors = hwstat->tx_underruns;
  1238. #if 0
  1239. /* Carrier lost counter seems to be broken for some devices */
  1240. nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
  1241. #endif
  1242. return nstat;
  1243. }
  1244. static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
  1245. {
  1246. struct dev_mc_list *mclist;
  1247. int i, num_ents;
  1248. num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
  1249. mclist = dev->mc_list;
  1250. for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
  1251. __b44_cam_write(bp, mclist->dmi_addr, i + 1);
  1252. }
  1253. return i+1;
  1254. }
  1255. static void __b44_set_rx_mode(struct net_device *dev)
  1256. {
  1257. struct b44 *bp = netdev_priv(dev);
  1258. u32 val;
  1259. int i=0;
  1260. unsigned char zero[6] = {0,0,0,0,0,0};
  1261. val = br32(bp, B44_RXCONFIG);
  1262. val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
  1263. if (dev->flags & IFF_PROMISC) {
  1264. val |= RXCONFIG_PROMISC;
  1265. bw32(bp, B44_RXCONFIG, val);
  1266. } else {
  1267. __b44_set_mac_addr(bp);
  1268. if (dev->flags & IFF_ALLMULTI)
  1269. val |= RXCONFIG_ALLMULTI;
  1270. else
  1271. i=__b44_load_mcast(bp, dev);
  1272. for(;i<64;i++) {
  1273. __b44_cam_write(bp, zero, i);
  1274. }
  1275. bw32(bp, B44_RXCONFIG, val);
  1276. val = br32(bp, B44_CAM_CTRL);
  1277. bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
  1278. }
  1279. }
  1280. static void b44_set_rx_mode(struct net_device *dev)
  1281. {
  1282. struct b44 *bp = netdev_priv(dev);
  1283. spin_lock_irq(&bp->lock);
  1284. __b44_set_rx_mode(dev);
  1285. spin_unlock_irq(&bp->lock);
  1286. }
  1287. static u32 b44_get_msglevel(struct net_device *dev)
  1288. {
  1289. struct b44 *bp = netdev_priv(dev);
  1290. return bp->msg_enable;
  1291. }
  1292. static void b44_set_msglevel(struct net_device *dev, u32 value)
  1293. {
  1294. struct b44 *bp = netdev_priv(dev);
  1295. bp->msg_enable = value;
  1296. }
  1297. static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
  1298. {
  1299. struct b44 *bp = netdev_priv(dev);
  1300. struct pci_dev *pci_dev = bp->pdev;
  1301. strcpy (info->driver, DRV_MODULE_NAME);
  1302. strcpy (info->version, DRV_MODULE_VERSION);
  1303. strcpy (info->bus_info, pci_name(pci_dev));
  1304. }
  1305. static int b44_nway_reset(struct net_device *dev)
  1306. {
  1307. struct b44 *bp = netdev_priv(dev);
  1308. u32 bmcr;
  1309. int r;
  1310. spin_lock_irq(&bp->lock);
  1311. b44_readphy(bp, MII_BMCR, &bmcr);
  1312. b44_readphy(bp, MII_BMCR, &bmcr);
  1313. r = -EINVAL;
  1314. if (bmcr & BMCR_ANENABLE) {
  1315. b44_writephy(bp, MII_BMCR,
  1316. bmcr | BMCR_ANRESTART);
  1317. r = 0;
  1318. }
  1319. spin_unlock_irq(&bp->lock);
  1320. return r;
  1321. }
  1322. static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1323. {
  1324. struct b44 *bp = netdev_priv(dev);
  1325. if (!(bp->flags & B44_FLAG_INIT_COMPLETE))
  1326. return -EAGAIN;
  1327. cmd->supported = (SUPPORTED_Autoneg);
  1328. cmd->supported |= (SUPPORTED_100baseT_Half |
  1329. SUPPORTED_100baseT_Full |
  1330. SUPPORTED_10baseT_Half |
  1331. SUPPORTED_10baseT_Full |
  1332. SUPPORTED_MII);
  1333. cmd->advertising = 0;
  1334. if (bp->flags & B44_FLAG_ADV_10HALF)
  1335. cmd->advertising |= ADVERTISED_10baseT_Half;
  1336. if (bp->flags & B44_FLAG_ADV_10FULL)
  1337. cmd->advertising |= ADVERTISED_10baseT_Full;
  1338. if (bp->flags & B44_FLAG_ADV_100HALF)
  1339. cmd->advertising |= ADVERTISED_100baseT_Half;
  1340. if (bp->flags & B44_FLAG_ADV_100FULL)
  1341. cmd->advertising |= ADVERTISED_100baseT_Full;
  1342. cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
  1343. cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
  1344. SPEED_100 : SPEED_10;
  1345. cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
  1346. DUPLEX_FULL : DUPLEX_HALF;
  1347. cmd->port = 0;
  1348. cmd->phy_address = bp->phy_addr;
  1349. cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
  1350. XCVR_INTERNAL : XCVR_EXTERNAL;
  1351. cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
  1352. AUTONEG_DISABLE : AUTONEG_ENABLE;
  1353. cmd->maxtxpkt = 0;
  1354. cmd->maxrxpkt = 0;
  1355. return 0;
  1356. }
  1357. static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1358. {
  1359. struct b44 *bp = netdev_priv(dev);
  1360. if (!(bp->flags & B44_FLAG_INIT_COMPLETE))
  1361. return -EAGAIN;
  1362. /* We do not support gigabit. */
  1363. if (cmd->autoneg == AUTONEG_ENABLE) {
  1364. if (cmd->advertising &
  1365. (ADVERTISED_1000baseT_Half |
  1366. ADVERTISED_1000baseT_Full))
  1367. return -EINVAL;
  1368. } else if ((cmd->speed != SPEED_100 &&
  1369. cmd->speed != SPEED_10) ||
  1370. (cmd->duplex != DUPLEX_HALF &&
  1371. cmd->duplex != DUPLEX_FULL)) {
  1372. return -EINVAL;
  1373. }
  1374. spin_lock_irq(&bp->lock);
  1375. if (cmd->autoneg == AUTONEG_ENABLE) {
  1376. bp->flags &= ~B44_FLAG_FORCE_LINK;
  1377. bp->flags &= ~(B44_FLAG_ADV_10HALF |
  1378. B44_FLAG_ADV_10FULL |
  1379. B44_FLAG_ADV_100HALF |
  1380. B44_FLAG_ADV_100FULL);
  1381. if (cmd->advertising & ADVERTISE_10HALF)
  1382. bp->flags |= B44_FLAG_ADV_10HALF;
  1383. if (cmd->advertising & ADVERTISE_10FULL)
  1384. bp->flags |= B44_FLAG_ADV_10FULL;
  1385. if (cmd->advertising & ADVERTISE_100HALF)
  1386. bp->flags |= B44_FLAG_ADV_100HALF;
  1387. if (cmd->advertising & ADVERTISE_100FULL)
  1388. bp->flags |= B44_FLAG_ADV_100FULL;
  1389. } else {
  1390. bp->flags |= B44_FLAG_FORCE_LINK;
  1391. if (cmd->speed == SPEED_100)
  1392. bp->flags |= B44_FLAG_100_BASE_T;
  1393. if (cmd->duplex == DUPLEX_FULL)
  1394. bp->flags |= B44_FLAG_FULL_DUPLEX;
  1395. }
  1396. b44_setup_phy(bp);
  1397. spin_unlock_irq(&bp->lock);
  1398. return 0;
  1399. }
  1400. static void b44_get_ringparam(struct net_device *dev,
  1401. struct ethtool_ringparam *ering)
  1402. {
  1403. struct b44 *bp = netdev_priv(dev);
  1404. ering->rx_max_pending = B44_RX_RING_SIZE - 1;
  1405. ering->rx_pending = bp->rx_pending;
  1406. /* XXX ethtool lacks a tx_max_pending, oops... */
  1407. }
  1408. static int b44_set_ringparam(struct net_device *dev,
  1409. struct ethtool_ringparam *ering)
  1410. {
  1411. struct b44 *bp = netdev_priv(dev);
  1412. if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
  1413. (ering->rx_mini_pending != 0) ||
  1414. (ering->rx_jumbo_pending != 0) ||
  1415. (ering->tx_pending > B44_TX_RING_SIZE - 1))
  1416. return -EINVAL;
  1417. spin_lock_irq(&bp->lock);
  1418. bp->rx_pending = ering->rx_pending;
  1419. bp->tx_pending = ering->tx_pending;
  1420. b44_halt(bp);
  1421. b44_init_rings(bp);
  1422. b44_init_hw(bp);
  1423. netif_wake_queue(bp->dev);
  1424. spin_unlock_irq(&bp->lock);
  1425. b44_enable_ints(bp);
  1426. return 0;
  1427. }
  1428. static void b44_get_pauseparam(struct net_device *dev,
  1429. struct ethtool_pauseparam *epause)
  1430. {
  1431. struct b44 *bp = netdev_priv(dev);
  1432. epause->autoneg =
  1433. (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
  1434. epause->rx_pause =
  1435. (bp->flags & B44_FLAG_RX_PAUSE) != 0;
  1436. epause->tx_pause =
  1437. (bp->flags & B44_FLAG_TX_PAUSE) != 0;
  1438. }
  1439. static int b44_set_pauseparam(struct net_device *dev,
  1440. struct ethtool_pauseparam *epause)
  1441. {
  1442. struct b44 *bp = netdev_priv(dev);
  1443. spin_lock_irq(&bp->lock);
  1444. if (epause->autoneg)
  1445. bp->flags |= B44_FLAG_PAUSE_AUTO;
  1446. else
  1447. bp->flags &= ~B44_FLAG_PAUSE_AUTO;
  1448. if (epause->rx_pause)
  1449. bp->flags |= B44_FLAG_RX_PAUSE;
  1450. else
  1451. bp->flags &= ~B44_FLAG_RX_PAUSE;
  1452. if (epause->tx_pause)
  1453. bp->flags |= B44_FLAG_TX_PAUSE;
  1454. else
  1455. bp->flags &= ~B44_FLAG_TX_PAUSE;
  1456. if (bp->flags & B44_FLAG_PAUSE_AUTO) {
  1457. b44_halt(bp);
  1458. b44_init_rings(bp);
  1459. b44_init_hw(bp);
  1460. } else {
  1461. __b44_set_flow_ctrl(bp, bp->flags);
  1462. }
  1463. spin_unlock_irq(&bp->lock);
  1464. b44_enable_ints(bp);
  1465. return 0;
  1466. }
  1467. static struct ethtool_ops b44_ethtool_ops = {
  1468. .get_drvinfo = b44_get_drvinfo,
  1469. .get_settings = b44_get_settings,
  1470. .set_settings = b44_set_settings,
  1471. .nway_reset = b44_nway_reset,
  1472. .get_link = ethtool_op_get_link,
  1473. .get_ringparam = b44_get_ringparam,
  1474. .set_ringparam = b44_set_ringparam,
  1475. .get_pauseparam = b44_get_pauseparam,
  1476. .set_pauseparam = b44_set_pauseparam,
  1477. .get_msglevel = b44_get_msglevel,
  1478. .set_msglevel = b44_set_msglevel,
  1479. .get_perm_addr = ethtool_op_get_perm_addr,
  1480. };
  1481. static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  1482. {
  1483. struct mii_ioctl_data *data = if_mii(ifr);
  1484. struct b44 *bp = netdev_priv(dev);
  1485. int err;
  1486. spin_lock_irq(&bp->lock);
  1487. err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
  1488. spin_unlock_irq(&bp->lock);
  1489. return err;
  1490. }
  1491. /* Read 128-bytes of EEPROM. */
  1492. static int b44_read_eeprom(struct b44 *bp, u8 *data)
  1493. {
  1494. long i;
  1495. u16 *ptr = (u16 *) data;
  1496. for (i = 0; i < 128; i += 2)
  1497. ptr[i / 2] = readw(bp->regs + 4096 + i);
  1498. return 0;
  1499. }
  1500. static int __devinit b44_get_invariants(struct b44 *bp)
  1501. {
  1502. u8 eeprom[128];
  1503. int err;
  1504. err = b44_read_eeprom(bp, &eeprom[0]);
  1505. if (err)
  1506. goto out;
  1507. bp->dev->dev_addr[0] = eeprom[79];
  1508. bp->dev->dev_addr[1] = eeprom[78];
  1509. bp->dev->dev_addr[2] = eeprom[81];
  1510. bp->dev->dev_addr[3] = eeprom[80];
  1511. bp->dev->dev_addr[4] = eeprom[83];
  1512. bp->dev->dev_addr[5] = eeprom[82];
  1513. memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
  1514. bp->phy_addr = eeprom[90] & 0x1f;
  1515. /* With this, plus the rx_header prepended to the data by the
  1516. * hardware, we'll land the ethernet header on a 2-byte boundary.
  1517. */
  1518. bp->rx_offset = 30;
  1519. bp->imask = IMASK_DEF;
  1520. bp->core_unit = ssb_core_unit(bp);
  1521. bp->dma_offset = SB_PCI_DMA;
  1522. /* XXX - really required?
  1523. bp->flags |= B44_FLAG_BUGGY_TXPTR;
  1524. */
  1525. out:
  1526. return err;
  1527. }
  1528. static int __devinit b44_init_one(struct pci_dev *pdev,
  1529. const struct pci_device_id *ent)
  1530. {
  1531. static int b44_version_printed = 0;
  1532. unsigned long b44reg_base, b44reg_len;
  1533. struct net_device *dev;
  1534. struct b44 *bp;
  1535. int err, i;
  1536. if (b44_version_printed++ == 0)
  1537. printk(KERN_INFO "%s", version);
  1538. err = pci_enable_device(pdev);
  1539. if (err) {
  1540. printk(KERN_ERR PFX "Cannot enable PCI device, "
  1541. "aborting.\n");
  1542. return err;
  1543. }
  1544. if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
  1545. printk(KERN_ERR PFX "Cannot find proper PCI device "
  1546. "base address, aborting.\n");
  1547. err = -ENODEV;
  1548. goto err_out_disable_pdev;
  1549. }
  1550. err = pci_request_regions(pdev, DRV_MODULE_NAME);
  1551. if (err) {
  1552. printk(KERN_ERR PFX "Cannot obtain PCI resources, "
  1553. "aborting.\n");
  1554. goto err_out_disable_pdev;
  1555. }
  1556. pci_set_master(pdev);
  1557. err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
  1558. if (err) {
  1559. printk(KERN_ERR PFX "No usable DMA configuration, "
  1560. "aborting.\n");
  1561. goto err_out_free_res;
  1562. }
  1563. err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
  1564. if (err) {
  1565. printk(KERN_ERR PFX "No usable DMA configuration, "
  1566. "aborting.\n");
  1567. goto err_out_free_res;
  1568. }
  1569. b44reg_base = pci_resource_start(pdev, 0);
  1570. b44reg_len = pci_resource_len(pdev, 0);
  1571. dev = alloc_etherdev(sizeof(*bp));
  1572. if (!dev) {
  1573. printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
  1574. err = -ENOMEM;
  1575. goto err_out_free_res;
  1576. }
  1577. SET_MODULE_OWNER(dev);
  1578. SET_NETDEV_DEV(dev,&pdev->dev);
  1579. /* No interesting netdevice features in this card... */
  1580. dev->features |= 0;
  1581. bp = netdev_priv(dev);
  1582. bp->pdev = pdev;
  1583. bp->dev = dev;
  1584. if (b44_debug >= 0)
  1585. bp->msg_enable = (1 << b44_debug) - 1;
  1586. else
  1587. bp->msg_enable = B44_DEF_MSG_ENABLE;
  1588. spin_lock_init(&bp->lock);
  1589. bp->regs = ioremap(b44reg_base, b44reg_len);
  1590. if (bp->regs == 0UL) {
  1591. printk(KERN_ERR PFX "Cannot map device registers, "
  1592. "aborting.\n");
  1593. err = -ENOMEM;
  1594. goto err_out_free_dev;
  1595. }
  1596. bp->rx_pending = B44_DEF_RX_RING_PENDING;
  1597. bp->tx_pending = B44_DEF_TX_RING_PENDING;
  1598. dev->open = b44_open;
  1599. dev->stop = b44_close;
  1600. dev->hard_start_xmit = b44_start_xmit;
  1601. dev->get_stats = b44_get_stats;
  1602. dev->set_multicast_list = b44_set_rx_mode;
  1603. dev->set_mac_address = b44_set_mac_addr;
  1604. dev->do_ioctl = b44_ioctl;
  1605. dev->tx_timeout = b44_tx_timeout;
  1606. dev->poll = b44_poll;
  1607. dev->weight = 64;
  1608. dev->watchdog_timeo = B44_TX_TIMEOUT;
  1609. #ifdef CONFIG_NET_POLL_CONTROLLER
  1610. dev->poll_controller = b44_poll_controller;
  1611. #endif
  1612. dev->change_mtu = b44_change_mtu;
  1613. dev->irq = pdev->irq;
  1614. SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
  1615. err = b44_get_invariants(bp);
  1616. if (err) {
  1617. printk(KERN_ERR PFX "Problem fetching invariants of chip, "
  1618. "aborting.\n");
  1619. goto err_out_iounmap;
  1620. }
  1621. bp->mii_if.dev = dev;
  1622. bp->mii_if.mdio_read = b44_mii_read;
  1623. bp->mii_if.mdio_write = b44_mii_write;
  1624. bp->mii_if.phy_id = bp->phy_addr;
  1625. bp->mii_if.phy_id_mask = 0x1f;
  1626. bp->mii_if.reg_num_mask = 0x1f;
  1627. /* By default, advertise all speed/duplex settings. */
  1628. bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
  1629. B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
  1630. /* By default, auto-negotiate PAUSE. */
  1631. bp->flags |= B44_FLAG_PAUSE_AUTO;
  1632. err = register_netdev(dev);
  1633. if (err) {
  1634. printk(KERN_ERR PFX "Cannot register net device, "
  1635. "aborting.\n");
  1636. goto err_out_iounmap;
  1637. }
  1638. pci_set_drvdata(pdev, dev);
  1639. pci_save_state(bp->pdev);
  1640. printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
  1641. for (i = 0; i < 6; i++)
  1642. printk("%2.2x%c", dev->dev_addr[i],
  1643. i == 5 ? '\n' : ':');
  1644. return 0;
  1645. err_out_iounmap:
  1646. iounmap(bp->regs);
  1647. err_out_free_dev:
  1648. free_netdev(dev);
  1649. err_out_free_res:
  1650. pci_release_regions(pdev);
  1651. err_out_disable_pdev:
  1652. pci_disable_device(pdev);
  1653. pci_set_drvdata(pdev, NULL);
  1654. return err;
  1655. }
  1656. static void __devexit b44_remove_one(struct pci_dev *pdev)
  1657. {
  1658. struct net_device *dev = pci_get_drvdata(pdev);
  1659. if (dev) {
  1660. struct b44 *bp = netdev_priv(dev);
  1661. unregister_netdev(dev);
  1662. iounmap(bp->regs);
  1663. free_netdev(dev);
  1664. pci_release_regions(pdev);
  1665. pci_disable_device(pdev);
  1666. pci_set_drvdata(pdev, NULL);
  1667. }
  1668. }
  1669. static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
  1670. {
  1671. struct net_device *dev = pci_get_drvdata(pdev);
  1672. struct b44 *bp = netdev_priv(dev);
  1673. if (!netif_running(dev))
  1674. return 0;
  1675. del_timer_sync(&bp->timer);
  1676. spin_lock_irq(&bp->lock);
  1677. b44_halt(bp);
  1678. netif_carrier_off(bp->dev);
  1679. netif_device_detach(bp->dev);
  1680. b44_free_rings(bp);
  1681. spin_unlock_irq(&bp->lock);
  1682. free_irq(dev->irq, dev);
  1683. pci_disable_device(pdev);
  1684. return 0;
  1685. }
  1686. static int b44_resume(struct pci_dev *pdev)
  1687. {
  1688. struct net_device *dev = pci_get_drvdata(pdev);
  1689. struct b44 *bp = netdev_priv(dev);
  1690. pci_restore_state(pdev);
  1691. pci_enable_device(pdev);
  1692. pci_set_master(pdev);
  1693. if (!netif_running(dev))
  1694. return 0;
  1695. if (request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev))
  1696. printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
  1697. spin_lock_irq(&bp->lock);
  1698. b44_init_rings(bp);
  1699. b44_init_hw(bp);
  1700. netif_device_attach(bp->dev);
  1701. spin_unlock_irq(&bp->lock);
  1702. bp->timer.expires = jiffies + HZ;
  1703. add_timer(&bp->timer);
  1704. b44_enable_ints(bp);
  1705. return 0;
  1706. }
  1707. static struct pci_driver b44_driver = {
  1708. .name = DRV_MODULE_NAME,
  1709. .id_table = b44_pci_tbl,
  1710. .probe = b44_init_one,
  1711. .remove = __devexit_p(b44_remove_one),
  1712. .suspend = b44_suspend,
  1713. .resume = b44_resume,
  1714. };
  1715. static int __init b44_init(void)
  1716. {
  1717. unsigned int dma_desc_align_size = dma_get_cache_alignment();
  1718. /* Setup paramaters for syncing RX/TX DMA descriptors */
  1719. dma_desc_align_mask = ~(dma_desc_align_size - 1);
  1720. dma_desc_sync_size = max(dma_desc_align_size, sizeof(struct dma_desc));
  1721. return pci_module_init(&b44_driver);
  1722. }
  1723. static void __exit b44_cleanup(void)
  1724. {
  1725. pci_unregister_driver(&b44_driver);
  1726. }
  1727. module_init(b44_init);
  1728. module_exit(b44_cleanup);