octeon_mgmt.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2009 Cavium Networks
  7. */
  8. #include <linux/capability.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/init.h>
  11. #include <linux/platform_device.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/etherdevice.h>
  14. #include <linux/if_vlan.h>
  15. #include <linux/phy.h>
  16. #include <linux/spinlock.h>
  17. #include <asm/octeon/octeon.h>
  18. #include <asm/octeon/cvmx-mixx-defs.h>
  19. #include <asm/octeon/cvmx-agl-defs.h>
  20. #define DRV_NAME "octeon_mgmt"
  21. #define DRV_VERSION "2.0"
  22. #define DRV_DESCRIPTION \
  23. "Cavium Networks Octeon MII (management) port Network Driver"
  24. #define OCTEON_MGMT_NAPI_WEIGHT 16
  25. /*
  26. * Ring sizes that are powers of two allow for more efficient modulo
  27. * opertions.
  28. */
  29. #define OCTEON_MGMT_RX_RING_SIZE 512
  30. #define OCTEON_MGMT_TX_RING_SIZE 128
  31. /* Allow 8 bytes for vlan and FCS. */
  32. #define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
  33. union mgmt_port_ring_entry {
  34. u64 d64;
  35. struct {
  36. u64 reserved_62_63:2;
  37. /* Length of the buffer/packet in bytes */
  38. u64 len:14;
  39. /* For TX, signals that the packet should be timestamped */
  40. u64 tstamp:1;
  41. /* The RX error code */
  42. u64 code:7;
  43. #define RING_ENTRY_CODE_DONE 0xf
  44. #define RING_ENTRY_CODE_MORE 0x10
  45. /* Physical address of the buffer */
  46. u64 addr:40;
  47. } s;
  48. };
  49. struct octeon_mgmt {
  50. struct net_device *netdev;
  51. int port;
  52. int irq;
  53. u64 *tx_ring;
  54. dma_addr_t tx_ring_handle;
  55. unsigned int tx_next;
  56. unsigned int tx_next_clean;
  57. unsigned int tx_current_fill;
  58. /* The tx_list lock also protects the ring related variables */
  59. struct sk_buff_head tx_list;
  60. /* RX variables only touched in napi_poll. No locking necessary. */
  61. u64 *rx_ring;
  62. dma_addr_t rx_ring_handle;
  63. unsigned int rx_next;
  64. unsigned int rx_next_fill;
  65. unsigned int rx_current_fill;
  66. struct sk_buff_head rx_list;
  67. spinlock_t lock;
  68. unsigned int last_duplex;
  69. unsigned int last_link;
  70. struct device *dev;
  71. struct napi_struct napi;
  72. struct tasklet_struct tx_clean_tasklet;
  73. struct phy_device *phydev;
  74. };
  75. static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
  76. {
  77. int port = p->port;
  78. union cvmx_mixx_intena mix_intena;
  79. unsigned long flags;
  80. spin_lock_irqsave(&p->lock, flags);
  81. mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port));
  82. mix_intena.s.ithena = enable ? 1 : 0;
  83. cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
  84. spin_unlock_irqrestore(&p->lock, flags);
  85. }
  86. static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
  87. {
  88. int port = p->port;
  89. union cvmx_mixx_intena mix_intena;
  90. unsigned long flags;
  91. spin_lock_irqsave(&p->lock, flags);
  92. mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port));
  93. mix_intena.s.othena = enable ? 1 : 0;
  94. cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
  95. spin_unlock_irqrestore(&p->lock, flags);
  96. }
  97. static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
  98. {
  99. octeon_mgmt_set_rx_irq(p, 1);
  100. }
  101. static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
  102. {
  103. octeon_mgmt_set_rx_irq(p, 0);
  104. }
  105. static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
  106. {
  107. octeon_mgmt_set_tx_irq(p, 1);
  108. }
  109. static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
  110. {
  111. octeon_mgmt_set_tx_irq(p, 0);
  112. }
  113. static unsigned int ring_max_fill(unsigned int ring_size)
  114. {
  115. return ring_size - 8;
  116. }
  117. static unsigned int ring_size_to_bytes(unsigned int ring_size)
  118. {
  119. return ring_size * sizeof(union mgmt_port_ring_entry);
  120. }
  121. static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
  122. {
  123. struct octeon_mgmt *p = netdev_priv(netdev);
  124. int port = p->port;
  125. while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
  126. unsigned int size;
  127. union mgmt_port_ring_entry re;
  128. struct sk_buff *skb;
  129. /* CN56XX pass 1 needs 8 bytes of padding. */
  130. size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
  131. skb = netdev_alloc_skb(netdev, size);
  132. if (!skb)
  133. break;
  134. skb_reserve(skb, NET_IP_ALIGN);
  135. __skb_queue_tail(&p->rx_list, skb);
  136. re.d64 = 0;
  137. re.s.len = size;
  138. re.s.addr = dma_map_single(p->dev, skb->data,
  139. size,
  140. DMA_FROM_DEVICE);
  141. /* Put it in the ring. */
  142. p->rx_ring[p->rx_next_fill] = re.d64;
  143. dma_sync_single_for_device(p->dev, p->rx_ring_handle,
  144. ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
  145. DMA_BIDIRECTIONAL);
  146. p->rx_next_fill =
  147. (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
  148. p->rx_current_fill++;
  149. /* Ring the bell. */
  150. cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
  151. }
  152. }
  153. static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
  154. {
  155. int port = p->port;
  156. union cvmx_mixx_orcnt mix_orcnt;
  157. union mgmt_port_ring_entry re;
  158. struct sk_buff *skb;
  159. int cleaned = 0;
  160. unsigned long flags;
  161. mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
  162. while (mix_orcnt.s.orcnt) {
  163. dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
  164. ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
  165. DMA_BIDIRECTIONAL);
  166. spin_lock_irqsave(&p->tx_list.lock, flags);
  167. re.d64 = p->tx_ring[p->tx_next_clean];
  168. p->tx_next_clean =
  169. (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
  170. skb = __skb_dequeue(&p->tx_list);
  171. mix_orcnt.u64 = 0;
  172. mix_orcnt.s.orcnt = 1;
  173. /* Acknowledge to hardware that we have the buffer. */
  174. cvmx_write_csr(CVMX_MIXX_ORCNT(port), mix_orcnt.u64);
  175. p->tx_current_fill--;
  176. spin_unlock_irqrestore(&p->tx_list.lock, flags);
  177. dma_unmap_single(p->dev, re.s.addr, re.s.len,
  178. DMA_TO_DEVICE);
  179. dev_kfree_skb_any(skb);
  180. cleaned++;
  181. mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
  182. }
  183. if (cleaned && netif_queue_stopped(p->netdev))
  184. netif_wake_queue(p->netdev);
  185. }
  186. static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
  187. {
  188. struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
  189. octeon_mgmt_clean_tx_buffers(p);
  190. octeon_mgmt_enable_tx_irq(p);
  191. }
  192. static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
  193. {
  194. struct octeon_mgmt *p = netdev_priv(netdev);
  195. int port = p->port;
  196. unsigned long flags;
  197. u64 drop, bad;
  198. /* These reads also clear the count registers. */
  199. drop = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port));
  200. bad = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port));
  201. if (drop || bad) {
  202. /* Do an atomic update. */
  203. spin_lock_irqsave(&p->lock, flags);
  204. netdev->stats.rx_errors += bad;
  205. netdev->stats.rx_dropped += drop;
  206. spin_unlock_irqrestore(&p->lock, flags);
  207. }
  208. }
  209. static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
  210. {
  211. struct octeon_mgmt *p = netdev_priv(netdev);
  212. int port = p->port;
  213. unsigned long flags;
  214. union cvmx_agl_gmx_txx_stat0 s0;
  215. union cvmx_agl_gmx_txx_stat1 s1;
  216. /* These reads also clear the count registers. */
  217. s0.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT0(port));
  218. s1.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT1(port));
  219. if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
  220. /* Do an atomic update. */
  221. spin_lock_irqsave(&p->lock, flags);
  222. netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
  223. netdev->stats.collisions += s1.s.scol + s1.s.mcol;
  224. spin_unlock_irqrestore(&p->lock, flags);
  225. }
  226. }
  227. /*
  228. * Dequeue a receive skb and its corresponding ring entry. The ring
  229. * entry is returned, *pskb is updated to point to the skb.
  230. */
  231. static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
  232. struct sk_buff **pskb)
  233. {
  234. union mgmt_port_ring_entry re;
  235. dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
  236. ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
  237. DMA_BIDIRECTIONAL);
  238. re.d64 = p->rx_ring[p->rx_next];
  239. p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
  240. p->rx_current_fill--;
  241. *pskb = __skb_dequeue(&p->rx_list);
  242. dma_unmap_single(p->dev, re.s.addr,
  243. ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
  244. DMA_FROM_DEVICE);
  245. return re.d64;
  246. }
  247. static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
  248. {
  249. int port = p->port;
  250. struct net_device *netdev = p->netdev;
  251. union cvmx_mixx_ircnt mix_ircnt;
  252. union mgmt_port_ring_entry re;
  253. struct sk_buff *skb;
  254. struct sk_buff *skb2;
  255. struct sk_buff *skb_new;
  256. union mgmt_port_ring_entry re2;
  257. int rc = 1;
  258. re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
  259. if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
  260. /* A good packet, send it up. */
  261. skb_put(skb, re.s.len);
  262. good:
  263. skb->protocol = eth_type_trans(skb, netdev);
  264. netdev->stats.rx_packets++;
  265. netdev->stats.rx_bytes += skb->len;
  266. netdev->last_rx = jiffies;
  267. netif_receive_skb(skb);
  268. rc = 0;
  269. } else if (re.s.code == RING_ENTRY_CODE_MORE) {
  270. /*
  271. * Packet split across skbs. This can happen if we
  272. * increase the MTU. Buffers that are already in the
  273. * rx ring can then end up being too small. As the rx
  274. * ring is refilled, buffers sized for the new MTU
  275. * will be used and we should go back to the normal
  276. * non-split case.
  277. */
  278. skb_put(skb, re.s.len);
  279. do {
  280. re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
  281. if (re2.s.code != RING_ENTRY_CODE_MORE
  282. && re2.s.code != RING_ENTRY_CODE_DONE)
  283. goto split_error;
  284. skb_put(skb2, re2.s.len);
  285. skb_new = skb_copy_expand(skb, 0, skb2->len,
  286. GFP_ATOMIC);
  287. if (!skb_new)
  288. goto split_error;
  289. if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
  290. skb2->len))
  291. goto split_error;
  292. skb_put(skb_new, skb2->len);
  293. dev_kfree_skb_any(skb);
  294. dev_kfree_skb_any(skb2);
  295. skb = skb_new;
  296. } while (re2.s.code == RING_ENTRY_CODE_MORE);
  297. goto good;
  298. } else {
  299. /* Some other error, discard it. */
  300. dev_kfree_skb_any(skb);
  301. /*
  302. * Error statistics are accumulated in
  303. * octeon_mgmt_update_rx_stats.
  304. */
  305. }
  306. goto done;
  307. split_error:
  308. /* Discard the whole mess. */
  309. dev_kfree_skb_any(skb);
  310. dev_kfree_skb_any(skb2);
  311. while (re2.s.code == RING_ENTRY_CODE_MORE) {
  312. re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
  313. dev_kfree_skb_any(skb2);
  314. }
  315. netdev->stats.rx_errors++;
  316. done:
  317. /* Tell the hardware we processed a packet. */
  318. mix_ircnt.u64 = 0;
  319. mix_ircnt.s.ircnt = 1;
  320. cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64);
  321. return rc;
  322. }
  323. static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
  324. {
  325. int port = p->port;
  326. unsigned int work_done = 0;
  327. union cvmx_mixx_ircnt mix_ircnt;
  328. int rc;
  329. mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
  330. while (work_done < budget && mix_ircnt.s.ircnt) {
  331. rc = octeon_mgmt_receive_one(p);
  332. if (!rc)
  333. work_done++;
  334. /* Check for more packets. */
  335. mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
  336. }
  337. octeon_mgmt_rx_fill_ring(p->netdev);
  338. return work_done;
  339. }
  340. static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
  341. {
  342. struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
  343. struct net_device *netdev = p->netdev;
  344. unsigned int work_done = 0;
  345. work_done = octeon_mgmt_receive_packets(p, budget);
  346. if (work_done < budget) {
  347. /* We stopped because no more packets were available. */
  348. napi_complete(napi);
  349. octeon_mgmt_enable_rx_irq(p);
  350. }
  351. octeon_mgmt_update_rx_stats(netdev);
  352. return work_done;
  353. }
  354. /* Reset the hardware to clean state. */
  355. static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
  356. {
  357. union cvmx_mixx_ctl mix_ctl;
  358. union cvmx_mixx_bist mix_bist;
  359. union cvmx_agl_gmx_bist agl_gmx_bist;
  360. mix_ctl.u64 = 0;
  361. cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64);
  362. do {
  363. mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(p->port));
  364. } while (mix_ctl.s.busy);
  365. mix_ctl.s.reset = 1;
  366. cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64);
  367. cvmx_read_csr(CVMX_MIXX_CTL(p->port));
  368. cvmx_wait(64);
  369. mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(p->port));
  370. if (mix_bist.u64)
  371. dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
  372. (unsigned long long)mix_bist.u64);
  373. agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
  374. if (agl_gmx_bist.u64)
  375. dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
  376. (unsigned long long)agl_gmx_bist.u64);
  377. }
  378. struct octeon_mgmt_cam_state {
  379. u64 cam[6];
  380. u64 cam_mask;
  381. int cam_index;
  382. };
  383. static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
  384. unsigned char *addr)
  385. {
  386. int i;
  387. for (i = 0; i < 6; i++)
  388. cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
  389. cs->cam_mask |= (1ULL << cs->cam_index);
  390. cs->cam_index++;
  391. }
  392. static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
  393. {
  394. struct octeon_mgmt *p = netdev_priv(netdev);
  395. int port = p->port;
  396. int i;
  397. union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
  398. union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
  399. unsigned long flags;
  400. unsigned int prev_packet_enable;
  401. unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
  402. unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
  403. struct octeon_mgmt_cam_state cam_state;
  404. struct dev_addr_list *list;
  405. struct list_head *pos;
  406. int available_cam_entries;
  407. memset(&cam_state, 0, sizeof(cam_state));
  408. if ((netdev->flags & IFF_PROMISC) || netdev->dev_addrs.count > 7) {
  409. cam_mode = 0;
  410. available_cam_entries = 8;
  411. } else {
  412. /*
  413. * One CAM entry for the primary address, leaves seven
  414. * for the secondary addresses.
  415. */
  416. available_cam_entries = 7 - netdev->dev_addrs.count;
  417. }
  418. if (netdev->flags & IFF_MULTICAST) {
  419. if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI)
  420. || netdev->mc_count > available_cam_entries)
  421. multicast_mode = 2; /* 1 - Accept all multicast. */
  422. else
  423. multicast_mode = 0; /* 0 - Use CAM. */
  424. }
  425. if (cam_mode == 1) {
  426. /* Add primary address. */
  427. octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
  428. list_for_each(pos, &netdev->dev_addrs.list) {
  429. struct netdev_hw_addr *hw_addr;
  430. hw_addr = list_entry(pos, struct netdev_hw_addr, list);
  431. octeon_mgmt_cam_state_add(&cam_state, hw_addr->addr);
  432. list = list->next;
  433. }
  434. }
  435. if (multicast_mode == 0) {
  436. i = netdev->mc_count;
  437. list = netdev->mc_list;
  438. while (i--) {
  439. octeon_mgmt_cam_state_add(&cam_state, list->da_addr);
  440. list = list->next;
  441. }
  442. }
  443. spin_lock_irqsave(&p->lock, flags);
  444. /* Disable packet I/O. */
  445. agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
  446. prev_packet_enable = agl_gmx_prtx.s.en;
  447. agl_gmx_prtx.s.en = 0;
  448. cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
  449. adr_ctl.u64 = 0;
  450. adr_ctl.s.cam_mode = cam_mode;
  451. adr_ctl.s.mcst = multicast_mode;
  452. adr_ctl.s.bcst = 1; /* Allow broadcast */
  453. cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), adr_ctl.u64);
  454. cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), cam_state.cam[0]);
  455. cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), cam_state.cam[1]);
  456. cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), cam_state.cam[2]);
  457. cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), cam_state.cam[3]);
  458. cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), cam_state.cam[4]);
  459. cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), cam_state.cam[5]);
  460. cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), cam_state.cam_mask);
  461. /* Restore packet I/O. */
  462. agl_gmx_prtx.s.en = prev_packet_enable;
  463. cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
  464. spin_unlock_irqrestore(&p->lock, flags);
  465. }
  466. static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
  467. {
  468. struct sockaddr *sa = addr;
  469. if (!is_valid_ether_addr(sa->sa_data))
  470. return -EADDRNOTAVAIL;
  471. memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
  472. octeon_mgmt_set_rx_filtering(netdev);
  473. return 0;
  474. }
  475. static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
  476. {
  477. struct octeon_mgmt *p = netdev_priv(netdev);
  478. int port = p->port;
  479. int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
  480. /*
  481. * Limit the MTU to make sure the ethernet packets are between
  482. * 64 bytes and 16383 bytes.
  483. */
  484. if (size_without_fcs < 64 || size_without_fcs > 16383) {
  485. dev_warn(p->dev, "MTU must be between %d and %d.\n",
  486. 64 - OCTEON_MGMT_RX_HEADROOM,
  487. 16383 - OCTEON_MGMT_RX_HEADROOM);
  488. return -EINVAL;
  489. }
  490. netdev->mtu = new_mtu;
  491. cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs);
  492. cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port),
  493. (size_without_fcs + 7) & 0xfff8);
  494. return 0;
  495. }
  496. static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
  497. {
  498. struct net_device *netdev = dev_id;
  499. struct octeon_mgmt *p = netdev_priv(netdev);
  500. int port = p->port;
  501. union cvmx_mixx_isr mixx_isr;
  502. mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port));
  503. /* Clear any pending interrupts */
  504. cvmx_write_csr(CVMX_MIXX_ISR(port),
  505. cvmx_read_csr(CVMX_MIXX_ISR(port)));
  506. cvmx_read_csr(CVMX_MIXX_ISR(port));
  507. if (mixx_isr.s.irthresh) {
  508. octeon_mgmt_disable_rx_irq(p);
  509. napi_schedule(&p->napi);
  510. }
  511. if (mixx_isr.s.orthresh) {
  512. octeon_mgmt_disable_tx_irq(p);
  513. tasklet_schedule(&p->tx_clean_tasklet);
  514. }
  515. return IRQ_HANDLED;
  516. }
  517. static int octeon_mgmt_ioctl(struct net_device *netdev,
  518. struct ifreq *rq, int cmd)
  519. {
  520. struct octeon_mgmt *p = netdev_priv(netdev);
  521. if (!netif_running(netdev))
  522. return -EINVAL;
  523. if (!p->phydev)
  524. return -EINVAL;
  525. return phy_mii_ioctl(p->phydev, if_mii(rq), cmd);
  526. }
  527. static void octeon_mgmt_adjust_link(struct net_device *netdev)
  528. {
  529. struct octeon_mgmt *p = netdev_priv(netdev);
  530. int port = p->port;
  531. union cvmx_agl_gmx_prtx_cfg prtx_cfg;
  532. unsigned long flags;
  533. int link_changed = 0;
  534. spin_lock_irqsave(&p->lock, flags);
  535. if (p->phydev->link) {
  536. if (!p->last_link)
  537. link_changed = 1;
  538. if (p->last_duplex != p->phydev->duplex) {
  539. p->last_duplex = p->phydev->duplex;
  540. prtx_cfg.u64 =
  541. cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
  542. prtx_cfg.s.duplex = p->phydev->duplex;
  543. cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port),
  544. prtx_cfg.u64);
  545. }
  546. } else {
  547. if (p->last_link)
  548. link_changed = -1;
  549. }
  550. p->last_link = p->phydev->link;
  551. spin_unlock_irqrestore(&p->lock, flags);
  552. if (link_changed != 0) {
  553. if (link_changed > 0) {
  554. netif_carrier_on(netdev);
  555. pr_info("%s: Link is up - %d/%s\n", netdev->name,
  556. p->phydev->speed,
  557. DUPLEX_FULL == p->phydev->duplex ?
  558. "Full" : "Half");
  559. } else {
  560. netif_carrier_off(netdev);
  561. pr_info("%s: Link is down\n", netdev->name);
  562. }
  563. }
  564. }
  565. static int octeon_mgmt_init_phy(struct net_device *netdev)
  566. {
  567. struct octeon_mgmt *p = netdev_priv(netdev);
  568. char phy_id[20];
  569. if (octeon_is_simulation()) {
  570. /* No PHYs in the simulator. */
  571. netif_carrier_on(netdev);
  572. return 0;
  573. }
  574. snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", p->port);
  575. p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0,
  576. PHY_INTERFACE_MODE_MII);
  577. if (IS_ERR(p->phydev)) {
  578. p->phydev = NULL;
  579. return -1;
  580. }
  581. phy_start_aneg(p->phydev);
  582. return 0;
  583. }
  584. static int octeon_mgmt_open(struct net_device *netdev)
  585. {
  586. struct octeon_mgmt *p = netdev_priv(netdev);
  587. int port = p->port;
  588. union cvmx_mixx_ctl mix_ctl;
  589. union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
  590. union cvmx_mixx_oring1 oring1;
  591. union cvmx_mixx_iring1 iring1;
  592. union cvmx_agl_gmx_prtx_cfg prtx_cfg;
  593. union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
  594. union cvmx_mixx_irhwm mix_irhwm;
  595. union cvmx_mixx_orhwm mix_orhwm;
  596. union cvmx_mixx_intena mix_intena;
  597. struct sockaddr sa;
  598. /* Allocate ring buffers. */
  599. p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
  600. GFP_KERNEL);
  601. if (!p->tx_ring)
  602. return -ENOMEM;
  603. p->tx_ring_handle =
  604. dma_map_single(p->dev, p->tx_ring,
  605. ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
  606. DMA_BIDIRECTIONAL);
  607. p->tx_next = 0;
  608. p->tx_next_clean = 0;
  609. p->tx_current_fill = 0;
  610. p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
  611. GFP_KERNEL);
  612. if (!p->rx_ring)
  613. goto err_nomem;
  614. p->rx_ring_handle =
  615. dma_map_single(p->dev, p->rx_ring,
  616. ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
  617. DMA_BIDIRECTIONAL);
  618. p->rx_next = 0;
  619. p->rx_next_fill = 0;
  620. p->rx_current_fill = 0;
  621. octeon_mgmt_reset_hw(p);
  622. mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
  623. /* Bring it out of reset if needed. */
  624. if (mix_ctl.s.reset) {
  625. mix_ctl.s.reset = 0;
  626. cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
  627. do {
  628. mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
  629. } while (mix_ctl.s.reset);
  630. }
  631. agl_gmx_inf_mode.u64 = 0;
  632. agl_gmx_inf_mode.s.en = 1;
  633. cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
  634. oring1.u64 = 0;
  635. oring1.s.obase = p->tx_ring_handle >> 3;
  636. oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
  637. cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64);
  638. iring1.u64 = 0;
  639. iring1.s.ibase = p->rx_ring_handle >> 3;
  640. iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
  641. cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64);
  642. /* Disable packet I/O. */
  643. prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
  644. prtx_cfg.s.en = 0;
  645. cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);
  646. memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
  647. octeon_mgmt_set_mac_address(netdev, &sa);
  648. octeon_mgmt_change_mtu(netdev, netdev->mtu);
  649. /*
  650. * Enable the port HW. Packets are not allowed until
  651. * cvmx_mgmt_port_enable() is called.
  652. */
  653. mix_ctl.u64 = 0;
  654. mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
  655. mix_ctl.s.en = 1; /* Enable the port */
  656. mix_ctl.s.nbtarb = 0; /* Arbitration mode */
  657. /* MII CB-request FIFO programmable high watermark */
  658. mix_ctl.s.mrq_hwm = 1;
  659. cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
  660. if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
  661. || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
  662. /*
  663. * Force compensation values, as they are not
  664. * determined properly by HW
  665. */
  666. union cvmx_agl_gmx_drv_ctl drv_ctl;
  667. drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
  668. if (port) {
  669. drv_ctl.s.byp_en1 = 1;
  670. drv_ctl.s.nctl1 = 6;
  671. drv_ctl.s.pctl1 = 6;
  672. } else {
  673. drv_ctl.s.byp_en = 1;
  674. drv_ctl.s.nctl = 6;
  675. drv_ctl.s.pctl = 6;
  676. }
  677. cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
  678. }
  679. octeon_mgmt_rx_fill_ring(netdev);
  680. /* Clear statistics. */
  681. /* Clear on read. */
  682. cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1);
  683. cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0);
  684. cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0);
  685. cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1);
  686. cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0);
  687. cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0);
  688. /* Clear any pending interrupts */
  689. cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port)));
  690. if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
  691. netdev)) {
  692. dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
  693. goto err_noirq;
  694. }
  695. /* Interrupt every single RX packet */
  696. mix_irhwm.u64 = 0;
  697. mix_irhwm.s.irhwm = 0;
  698. cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64);
  699. /* Interrupt when we have 5 or more packets to clean. */
  700. mix_orhwm.u64 = 0;
  701. mix_orhwm.s.orhwm = 5;
  702. cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64);
  703. /* Enable receive and transmit interrupts */
  704. mix_intena.u64 = 0;
  705. mix_intena.s.ithena = 1;
  706. mix_intena.s.othena = 1;
  707. cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
  708. /* Enable packet I/O. */
  709. rxx_frm_ctl.u64 = 0;
  710. rxx_frm_ctl.s.pre_align = 1;
  711. /*
  712. * When set, disables the length check for non-min sized pkts
  713. * with padding in the client data.
  714. */
  715. rxx_frm_ctl.s.pad_len = 1;
  716. /* When set, disables the length check for VLAN pkts */
  717. rxx_frm_ctl.s.vlan_len = 1;
  718. /* When set, PREAMBLE checking is less strict */
  719. rxx_frm_ctl.s.pre_free = 1;
  720. /* Control Pause Frames can match station SMAC */
  721. rxx_frm_ctl.s.ctl_smac = 0;
  722. /* Control Pause Frames can match globally assign Multicast address */
  723. rxx_frm_ctl.s.ctl_mcst = 1;
  724. /* Forward pause information to TX block */
  725. rxx_frm_ctl.s.ctl_bck = 1;
  726. /* Drop Control Pause Frames */
  727. rxx_frm_ctl.s.ctl_drp = 1;
  728. /* Strip off the preamble */
  729. rxx_frm_ctl.s.pre_strp = 1;
  730. /*
  731. * This port is configured to send PREAMBLE+SFD to begin every
  732. * frame. GMX checks that the PREAMBLE is sent correctly.
  733. */
  734. rxx_frm_ctl.s.pre_chk = 1;
  735. cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64);
  736. /* Enable the AGL block */
  737. agl_gmx_inf_mode.u64 = 0;
  738. agl_gmx_inf_mode.s.en = 1;
  739. cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
  740. /* Configure the port duplex and enables */
  741. prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
  742. prtx_cfg.s.tx_en = 1;
  743. prtx_cfg.s.rx_en = 1;
  744. prtx_cfg.s.en = 1;
  745. p->last_duplex = 1;
  746. prtx_cfg.s.duplex = p->last_duplex;
  747. cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);
  748. p->last_link = 0;
  749. netif_carrier_off(netdev);
  750. if (octeon_mgmt_init_phy(netdev)) {
  751. dev_err(p->dev, "Cannot initialize PHY.\n");
  752. goto err_noirq;
  753. }
  754. netif_wake_queue(netdev);
  755. napi_enable(&p->napi);
  756. return 0;
  757. err_noirq:
  758. octeon_mgmt_reset_hw(p);
  759. dma_unmap_single(p->dev, p->rx_ring_handle,
  760. ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
  761. DMA_BIDIRECTIONAL);
  762. kfree(p->rx_ring);
  763. err_nomem:
  764. dma_unmap_single(p->dev, p->tx_ring_handle,
  765. ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
  766. DMA_BIDIRECTIONAL);
  767. kfree(p->tx_ring);
  768. return -ENOMEM;
  769. }
  770. static int octeon_mgmt_stop(struct net_device *netdev)
  771. {
  772. struct octeon_mgmt *p = netdev_priv(netdev);
  773. napi_disable(&p->napi);
  774. netif_stop_queue(netdev);
  775. if (p->phydev)
  776. phy_disconnect(p->phydev);
  777. netif_carrier_off(netdev);
  778. octeon_mgmt_reset_hw(p);
  779. free_irq(p->irq, netdev);
  780. /* dma_unmap is a nop on Octeon, so just free everything. */
  781. skb_queue_purge(&p->tx_list);
  782. skb_queue_purge(&p->rx_list);
  783. dma_unmap_single(p->dev, p->rx_ring_handle,
  784. ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
  785. DMA_BIDIRECTIONAL);
  786. kfree(p->rx_ring);
  787. dma_unmap_single(p->dev, p->tx_ring_handle,
  788. ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
  789. DMA_BIDIRECTIONAL);
  790. kfree(p->tx_ring);
  791. return 0;
  792. }
  793. static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
  794. {
  795. struct octeon_mgmt *p = netdev_priv(netdev);
  796. int port = p->port;
  797. union mgmt_port_ring_entry re;
  798. unsigned long flags;
  799. re.d64 = 0;
  800. re.s.len = skb->len;
  801. re.s.addr = dma_map_single(p->dev, skb->data,
  802. skb->len,
  803. DMA_TO_DEVICE);
  804. spin_lock_irqsave(&p->tx_list.lock, flags);
  805. if (unlikely(p->tx_current_fill >=
  806. ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
  807. spin_unlock_irqrestore(&p->tx_list.lock, flags);
  808. dma_unmap_single(p->dev, re.s.addr, re.s.len,
  809. DMA_TO_DEVICE);
  810. netif_stop_queue(netdev);
  811. return NETDEV_TX_BUSY;
  812. }
  813. __skb_queue_tail(&p->tx_list, skb);
  814. /* Put it in the ring. */
  815. p->tx_ring[p->tx_next] = re.d64;
  816. p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
  817. p->tx_current_fill++;
  818. spin_unlock_irqrestore(&p->tx_list.lock, flags);
  819. dma_sync_single_for_device(p->dev, p->tx_ring_handle,
  820. ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
  821. DMA_BIDIRECTIONAL);
  822. netdev->stats.tx_packets++;
  823. netdev->stats.tx_bytes += skb->len;
  824. /* Ring the bell. */
  825. cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
  826. netdev->trans_start = jiffies;
  827. octeon_mgmt_clean_tx_buffers(p);
  828. octeon_mgmt_update_tx_stats(netdev);
  829. return NETDEV_TX_OK;
  830. }
  831. #ifdef CONFIG_NET_POLL_CONTROLLER
  832. static void octeon_mgmt_poll_controller(struct net_device *netdev)
  833. {
  834. struct octeon_mgmt *p = netdev_priv(netdev);
  835. octeon_mgmt_receive_packets(p, 16);
  836. octeon_mgmt_update_rx_stats(netdev);
  837. return;
  838. }
  839. #endif
  840. static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
  841. struct ethtool_drvinfo *info)
  842. {
  843. strncpy(info->driver, DRV_NAME, sizeof(info->driver));
  844. strncpy(info->version, DRV_VERSION, sizeof(info->version));
  845. strncpy(info->fw_version, "N/A", sizeof(info->fw_version));
  846. strncpy(info->bus_info, "N/A", sizeof(info->bus_info));
  847. info->n_stats = 0;
  848. info->testinfo_len = 0;
  849. info->regdump_len = 0;
  850. info->eedump_len = 0;
  851. }
  852. static int octeon_mgmt_get_settings(struct net_device *netdev,
  853. struct ethtool_cmd *cmd)
  854. {
  855. struct octeon_mgmt *p = netdev_priv(netdev);
  856. if (p->phydev)
  857. return phy_ethtool_gset(p->phydev, cmd);
  858. return -EINVAL;
  859. }
  860. static int octeon_mgmt_set_settings(struct net_device *netdev,
  861. struct ethtool_cmd *cmd)
  862. {
  863. struct octeon_mgmt *p = netdev_priv(netdev);
  864. if (!capable(CAP_NET_ADMIN))
  865. return -EPERM;
  866. if (p->phydev)
  867. return phy_ethtool_sset(p->phydev, cmd);
  868. return -EINVAL;
  869. }
  870. static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
  871. .get_drvinfo = octeon_mgmt_get_drvinfo,
  872. .get_link = ethtool_op_get_link,
  873. .get_settings = octeon_mgmt_get_settings,
  874. .set_settings = octeon_mgmt_set_settings
  875. };
  876. static const struct net_device_ops octeon_mgmt_ops = {
  877. .ndo_open = octeon_mgmt_open,
  878. .ndo_stop = octeon_mgmt_stop,
  879. .ndo_start_xmit = octeon_mgmt_xmit,
  880. .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
  881. .ndo_set_multicast_list = octeon_mgmt_set_rx_filtering,
  882. .ndo_set_mac_address = octeon_mgmt_set_mac_address,
  883. .ndo_do_ioctl = octeon_mgmt_ioctl,
  884. .ndo_change_mtu = octeon_mgmt_change_mtu,
  885. #ifdef CONFIG_NET_POLL_CONTROLLER
  886. .ndo_poll_controller = octeon_mgmt_poll_controller,
  887. #endif
  888. };
  889. static int __init octeon_mgmt_probe(struct platform_device *pdev)
  890. {
  891. struct resource *res_irq;
  892. struct net_device *netdev;
  893. struct octeon_mgmt *p;
  894. int i;
  895. netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
  896. if (netdev == NULL)
  897. return -ENOMEM;
  898. dev_set_drvdata(&pdev->dev, netdev);
  899. p = netdev_priv(netdev);
  900. netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
  901. OCTEON_MGMT_NAPI_WEIGHT);
  902. p->netdev = netdev;
  903. p->dev = &pdev->dev;
  904. p->port = pdev->id;
  905. snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
  906. res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  907. if (!res_irq)
  908. goto err;
  909. p->irq = res_irq->start;
  910. spin_lock_init(&p->lock);
  911. skb_queue_head_init(&p->tx_list);
  912. skb_queue_head_init(&p->rx_list);
  913. tasklet_init(&p->tx_clean_tasklet,
  914. octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
  915. netdev->netdev_ops = &octeon_mgmt_ops;
  916. netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
  917. /* The mgmt ports get the first N MACs. */
  918. for (i = 0; i < 6; i++)
  919. netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i];
  920. netdev->dev_addr[5] += p->port;
  921. if (p->port >= octeon_bootinfo->mac_addr_count)
  922. dev_err(&pdev->dev,
  923. "Error %s: Using MAC outside of the assigned range: "
  924. "%02x:%02x:%02x:%02x:%02x:%02x\n", netdev->name,
  925. netdev->dev_addr[0], netdev->dev_addr[1],
  926. netdev->dev_addr[2], netdev->dev_addr[3],
  927. netdev->dev_addr[4], netdev->dev_addr[5]);
  928. if (register_netdev(netdev))
  929. goto err;
  930. dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
  931. return 0;
  932. err:
  933. free_netdev(netdev);
  934. return -ENOENT;
  935. }
  936. static int __exit octeon_mgmt_remove(struct platform_device *pdev)
  937. {
  938. struct net_device *netdev = dev_get_drvdata(&pdev->dev);
  939. unregister_netdev(netdev);
  940. free_netdev(netdev);
  941. return 0;
  942. }
  943. static struct platform_driver octeon_mgmt_driver = {
  944. .driver = {
  945. .name = "octeon_mgmt",
  946. .owner = THIS_MODULE,
  947. },
  948. .probe = octeon_mgmt_probe,
  949. .remove = __exit_p(octeon_mgmt_remove),
  950. };
  951. extern void octeon_mdiobus_force_mod_depencency(void);
  952. static int __init octeon_mgmt_mod_init(void)
  953. {
  954. /* Force our mdiobus driver module to be loaded first. */
  955. octeon_mdiobus_force_mod_depencency();
  956. return platform_driver_register(&octeon_mgmt_driver);
  957. }
  958. static void __exit octeon_mgmt_mod_exit(void)
  959. {
  960. platform_driver_unregister(&octeon_mgmt_driver);
  961. }
  962. module_init(octeon_mgmt_mod_init);
  963. module_exit(octeon_mgmt_mod_exit);
  964. MODULE_DESCRIPTION(DRV_DESCRIPTION);
  965. MODULE_AUTHOR("David Daney");
  966. MODULE_LICENSE("GPL");
  967. MODULE_VERSION(DRV_VERSION);