ibm_emac_mal.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463
  1. /*
  2. * ibm_ocp_mal.c
  3. *
  4. * Armin Kuster akuster@mvista.com
  5. * Juen, 2002
  6. *
  7. * Copyright 2002 MontaVista Softare Inc.
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License as published by the
  11. * Free Software Foundation; either version 2 of the License, or (at your
  12. * option) any later version.
  13. */
  14. #include <linux/config.h>
  15. #include <linux/module.h>
  16. #include <linux/kernel.h>
  17. #include <linux/errno.h>
  18. #include <linux/netdevice.h>
  19. #include <linux/init.h>
  20. #include <linux/dma-mapping.h>
  21. #include <asm/io.h>
  22. #include <asm/irq.h>
  23. #include <asm/ocp.h>
  24. #include "ibm_emac_mal.h"
  25. // Locking: Should we share a lock with the client ? The client could provide
  26. // a lock pointer (optionally) in the commac structure... I don't think this is
  27. // really necessary though
  28. /* This lock protects the commac list. On today UP implementations, it's
  29. * really only used as IRQ protection in mal_{register,unregister}_commac()
  30. */
  31. static DEFINE_RWLOCK(mal_list_lock);
  32. int mal_register_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac)
  33. {
  34. unsigned long flags;
  35. write_lock_irqsave(&mal_list_lock, flags);
  36. /* Don't let multiple commacs claim the same channel */
  37. if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
  38. (mal->rx_chan_mask & commac->rx_chan_mask)) {
  39. write_unlock_irqrestore(&mal_list_lock, flags);
  40. return -EBUSY;
  41. }
  42. mal->tx_chan_mask |= commac->tx_chan_mask;
  43. mal->rx_chan_mask |= commac->rx_chan_mask;
  44. list_add(&commac->list, &mal->commac);
  45. write_unlock_irqrestore(&mal_list_lock, flags);
  46. return 0;
  47. }
  48. int mal_unregister_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac)
  49. {
  50. unsigned long flags;
  51. write_lock_irqsave(&mal_list_lock, flags);
  52. mal->tx_chan_mask &= ~commac->tx_chan_mask;
  53. mal->rx_chan_mask &= ~commac->rx_chan_mask;
  54. list_del_init(&commac->list);
  55. write_unlock_irqrestore(&mal_list_lock, flags);
  56. return 0;
  57. }
  58. int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size)
  59. {
  60. switch (channel) {
  61. case 0:
  62. set_mal_dcrn(mal, DCRN_MALRCBS0, size);
  63. break;
  64. #ifdef DCRN_MALRCBS1
  65. case 1:
  66. set_mal_dcrn(mal, DCRN_MALRCBS1, size);
  67. break;
  68. #endif
  69. #ifdef DCRN_MALRCBS2
  70. case 2:
  71. set_mal_dcrn(mal, DCRN_MALRCBS2, size);
  72. break;
  73. #endif
  74. #ifdef DCRN_MALRCBS3
  75. case 3:
  76. set_mal_dcrn(mal, DCRN_MALRCBS3, size);
  77. break;
  78. #endif
  79. default:
  80. return -EINVAL;
  81. }
  82. return 0;
  83. }
  84. static irqreturn_t mal_serr(int irq, void *dev_instance, struct pt_regs *regs)
  85. {
  86. struct ibm_ocp_mal *mal = dev_instance;
  87. unsigned long mal_error;
  88. /*
  89. * This SERR applies to one of the devices on the MAL, here we charge
  90. * it against the first EMAC registered for the MAL.
  91. */
  92. mal_error = get_mal_dcrn(mal, DCRN_MALESR);
  93. printk(KERN_ERR "%s: System Error (MALESR=%lx)\n",
  94. "MAL" /* FIXME: get the name right */ , mal_error);
  95. /* FIXME: decipher error */
  96. /* DIXME: distribute to commacs, if possible */
  97. /* Clear the error status register */
  98. set_mal_dcrn(mal, DCRN_MALESR, mal_error);
  99. return IRQ_HANDLED;
  100. }
  101. static irqreturn_t mal_txeob(int irq, void *dev_instance, struct pt_regs *regs)
  102. {
  103. struct ibm_ocp_mal *mal = dev_instance;
  104. struct list_head *l;
  105. unsigned long isr;
  106. isr = get_mal_dcrn(mal, DCRN_MALTXEOBISR);
  107. set_mal_dcrn(mal, DCRN_MALTXEOBISR, isr);
  108. read_lock(&mal_list_lock);
  109. list_for_each(l, &mal->commac) {
  110. struct mal_commac *mc = list_entry(l, struct mal_commac, list);
  111. if (isr & mc->tx_chan_mask) {
  112. mc->ops->txeob(mc->dev, isr & mc->tx_chan_mask);
  113. }
  114. }
  115. read_unlock(&mal_list_lock);
  116. return IRQ_HANDLED;
  117. }
  118. static irqreturn_t mal_rxeob(int irq, void *dev_instance, struct pt_regs *regs)
  119. {
  120. struct ibm_ocp_mal *mal = dev_instance;
  121. struct list_head *l;
  122. unsigned long isr;
  123. isr = get_mal_dcrn(mal, DCRN_MALRXEOBISR);
  124. set_mal_dcrn(mal, DCRN_MALRXEOBISR, isr);
  125. read_lock(&mal_list_lock);
  126. list_for_each(l, &mal->commac) {
  127. struct mal_commac *mc = list_entry(l, struct mal_commac, list);
  128. if (isr & mc->rx_chan_mask) {
  129. mc->ops->rxeob(mc->dev, isr & mc->rx_chan_mask);
  130. }
  131. }
  132. read_unlock(&mal_list_lock);
  133. return IRQ_HANDLED;
  134. }
  135. static irqreturn_t mal_txde(int irq, void *dev_instance, struct pt_regs *regs)
  136. {
  137. struct ibm_ocp_mal *mal = dev_instance;
  138. struct list_head *l;
  139. unsigned long deir;
  140. deir = get_mal_dcrn(mal, DCRN_MALTXDEIR);
  141. /* FIXME: print which MAL correctly */
  142. printk(KERN_WARNING "%s: Tx descriptor error (MALTXDEIR=%lx)\n",
  143. "MAL", deir);
  144. read_lock(&mal_list_lock);
  145. list_for_each(l, &mal->commac) {
  146. struct mal_commac *mc = list_entry(l, struct mal_commac, list);
  147. if (deir & mc->tx_chan_mask) {
  148. mc->ops->txde(mc->dev, deir & mc->tx_chan_mask);
  149. }
  150. }
  151. read_unlock(&mal_list_lock);
  152. return IRQ_HANDLED;
  153. }
  154. /*
  155. * This interrupt should be very rare at best. This occurs when
  156. * the hardware has a problem with the receive descriptors. The manual
  157. * states that it occurs when the hardware cannot the receive descriptor
  158. * empty bit is not set. The recovery mechanism will be to
  159. * traverse through the descriptors, handle any that are marked to be
  160. * handled and reinitialize each along the way. At that point the driver
  161. * will be restarted.
  162. */
  163. static irqreturn_t mal_rxde(int irq, void *dev_instance, struct pt_regs *regs)
  164. {
  165. struct ibm_ocp_mal *mal = dev_instance;
  166. struct list_head *l;
  167. unsigned long deir;
  168. deir = get_mal_dcrn(mal, DCRN_MALRXDEIR);
  169. /*
  170. * This really is needed. This case encountered in stress testing.
  171. */
  172. if (deir == 0)
  173. return IRQ_HANDLED;
  174. /* FIXME: print which MAL correctly */
  175. printk(KERN_WARNING "%s: Rx descriptor error (MALRXDEIR=%lx)\n",
  176. "MAL", deir);
  177. read_lock(&mal_list_lock);
  178. list_for_each(l, &mal->commac) {
  179. struct mal_commac *mc = list_entry(l, struct mal_commac, list);
  180. if (deir & mc->rx_chan_mask) {
  181. mc->ops->rxde(mc->dev, deir & mc->rx_chan_mask);
  182. }
  183. }
  184. read_unlock(&mal_list_lock);
  185. return IRQ_HANDLED;
  186. }
  187. static int __init mal_probe(struct ocp_device *ocpdev)
  188. {
  189. struct ibm_ocp_mal *mal = NULL;
  190. struct ocp_func_mal_data *maldata;
  191. int err = 0;
  192. maldata = (struct ocp_func_mal_data *)ocpdev->def->additions;
  193. if (maldata == NULL) {
  194. printk(KERN_ERR "mal%d: Missing additional datas !\n",
  195. ocpdev->def->index);
  196. return -ENODEV;
  197. }
  198. mal = kmalloc(sizeof(struct ibm_ocp_mal), GFP_KERNEL);
  199. if (mal == NULL) {
  200. printk(KERN_ERR
  201. "mal%d: Out of memory allocating MAL structure !\n",
  202. ocpdev->def->index);
  203. return -ENOMEM;
  204. }
  205. memset(mal, 0, sizeof(*mal));
  206. switch (ocpdev->def->index) {
  207. case 0:
  208. mal->dcrbase = DCRN_MAL_BASE;
  209. break;
  210. #ifdef DCRN_MAL1_BASE
  211. case 1:
  212. mal->dcrbase = DCRN_MAL1_BASE;
  213. break;
  214. #endif
  215. default:
  216. BUG();
  217. }
  218. /**************************/
  219. INIT_LIST_HEAD(&mal->commac);
  220. set_mal_dcrn(mal, DCRN_MALRXCARR, 0xFFFFFFFF);
  221. set_mal_dcrn(mal, DCRN_MALTXCARR, 0xFFFFFFFF);
  222. set_mal_dcrn(mal, DCRN_MALCR, MALCR_MMSR); /* 384 */
  223. /* FIXME: Add delay */
  224. /* Set the MAL configuration register */
  225. set_mal_dcrn(mal, DCRN_MALCR,
  226. MALCR_PLBB | MALCR_OPBBL | MALCR_LEA |
  227. MALCR_PLBLT_DEFAULT);
  228. /* It would be nice to allocate buffers separately for each
  229. * channel, but we can't because the channels share the upper
  230. * 13 bits of address lines. Each channels buffer must also
  231. * be 4k aligned, so we allocate 4k for each channel. This is
  232. * inefficient FIXME: do better, if possible */
  233. mal->tx_virt_addr = dma_alloc_coherent(&ocpdev->dev,
  234. MAL_DT_ALIGN *
  235. maldata->num_tx_chans,
  236. &mal->tx_phys_addr, GFP_KERNEL);
  237. if (mal->tx_virt_addr == NULL) {
  238. printk(KERN_ERR
  239. "mal%d: Out of memory allocating MAL descriptors !\n",
  240. ocpdev->def->index);
  241. err = -ENOMEM;
  242. goto fail;
  243. }
  244. /* God, oh, god, I hate DCRs */
  245. set_mal_dcrn(mal, DCRN_MALTXCTP0R, mal->tx_phys_addr);
  246. #ifdef DCRN_MALTXCTP1R
  247. if (maldata->num_tx_chans > 1)
  248. set_mal_dcrn(mal, DCRN_MALTXCTP1R,
  249. mal->tx_phys_addr + MAL_DT_ALIGN);
  250. #endif /* DCRN_MALTXCTP1R */
  251. #ifdef DCRN_MALTXCTP2R
  252. if (maldata->num_tx_chans > 2)
  253. set_mal_dcrn(mal, DCRN_MALTXCTP2R,
  254. mal->tx_phys_addr + 2 * MAL_DT_ALIGN);
  255. #endif /* DCRN_MALTXCTP2R */
  256. #ifdef DCRN_MALTXCTP3R
  257. if (maldata->num_tx_chans > 3)
  258. set_mal_dcrn(mal, DCRN_MALTXCTP3R,
  259. mal->tx_phys_addr + 3 * MAL_DT_ALIGN);
  260. #endif /* DCRN_MALTXCTP3R */
  261. #ifdef DCRN_MALTXCTP4R
  262. if (maldata->num_tx_chans > 4)
  263. set_mal_dcrn(mal, DCRN_MALTXCTP4R,
  264. mal->tx_phys_addr + 4 * MAL_DT_ALIGN);
  265. #endif /* DCRN_MALTXCTP4R */
  266. #ifdef DCRN_MALTXCTP5R
  267. if (maldata->num_tx_chans > 5)
  268. set_mal_dcrn(mal, DCRN_MALTXCTP5R,
  269. mal->tx_phys_addr + 5 * MAL_DT_ALIGN);
  270. #endif /* DCRN_MALTXCTP5R */
  271. #ifdef DCRN_MALTXCTP6R
  272. if (maldata->num_tx_chans > 6)
  273. set_mal_dcrn(mal, DCRN_MALTXCTP6R,
  274. mal->tx_phys_addr + 6 * MAL_DT_ALIGN);
  275. #endif /* DCRN_MALTXCTP6R */
  276. #ifdef DCRN_MALTXCTP7R
  277. if (maldata->num_tx_chans > 7)
  278. set_mal_dcrn(mal, DCRN_MALTXCTP7R,
  279. mal->tx_phys_addr + 7 * MAL_DT_ALIGN);
  280. #endif /* DCRN_MALTXCTP7R */
  281. mal->rx_virt_addr = dma_alloc_coherent(&ocpdev->dev,
  282. MAL_DT_ALIGN *
  283. maldata->num_rx_chans,
  284. &mal->rx_phys_addr, GFP_KERNEL);
  285. set_mal_dcrn(mal, DCRN_MALRXCTP0R, mal->rx_phys_addr);
  286. #ifdef DCRN_MALRXCTP1R
  287. if (maldata->num_rx_chans > 1)
  288. set_mal_dcrn(mal, DCRN_MALRXCTP1R,
  289. mal->rx_phys_addr + MAL_DT_ALIGN);
  290. #endif /* DCRN_MALRXCTP1R */
  291. #ifdef DCRN_MALRXCTP2R
  292. if (maldata->num_rx_chans > 2)
  293. set_mal_dcrn(mal, DCRN_MALRXCTP2R,
  294. mal->rx_phys_addr + 2 * MAL_DT_ALIGN);
  295. #endif /* DCRN_MALRXCTP2R */
  296. #ifdef DCRN_MALRXCTP3R
  297. if (maldata->num_rx_chans > 3)
  298. set_mal_dcrn(mal, DCRN_MALRXCTP3R,
  299. mal->rx_phys_addr + 3 * MAL_DT_ALIGN);
  300. #endif /* DCRN_MALRXCTP3R */
  301. err = request_irq(maldata->serr_irq, mal_serr, 0, "MAL SERR", mal);
  302. if (err)
  303. goto fail;
  304. err = request_irq(maldata->txde_irq, mal_txde, 0, "MAL TX DE ", mal);
  305. if (err)
  306. goto fail;
  307. err = request_irq(maldata->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
  308. if (err)
  309. goto fail;
  310. err = request_irq(maldata->rxde_irq, mal_rxde, 0, "MAL RX DE", mal);
  311. if (err)
  312. goto fail;
  313. err = request_irq(maldata->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
  314. if (err)
  315. goto fail;
  316. set_mal_dcrn(mal, DCRN_MALIER,
  317. MALIER_DE | MALIER_NE | MALIER_TE |
  318. MALIER_OPBE | MALIER_PLBE);
  319. /* Advertise me to the rest of the world */
  320. ocp_set_drvdata(ocpdev, mal);
  321. printk(KERN_INFO "mal%d: Initialized, %d tx channels, %d rx channels\n",
  322. ocpdev->def->index, maldata->num_tx_chans,
  323. maldata->num_rx_chans);
  324. return 0;
  325. fail:
  326. /* FIXME: dispose requested IRQs ! */
  327. if (err && mal)
  328. kfree(mal);
  329. return err;
  330. }
  331. static void __exit mal_remove(struct ocp_device *ocpdev)
  332. {
  333. struct ibm_ocp_mal *mal = ocp_get_drvdata(ocpdev);
  334. struct ocp_func_mal_data *maldata = ocpdev->def->additions;
  335. BUG_ON(!maldata);
  336. ocp_set_drvdata(ocpdev, NULL);
  337. /* FIXME: shut down the MAL, deal with dependency with emac */
  338. free_irq(maldata->serr_irq, mal);
  339. free_irq(maldata->txde_irq, mal);
  340. free_irq(maldata->txeob_irq, mal);
  341. free_irq(maldata->rxde_irq, mal);
  342. free_irq(maldata->rxeob_irq, mal);
  343. if (mal->tx_virt_addr)
  344. dma_free_coherent(&ocpdev->dev,
  345. MAL_DT_ALIGN * maldata->num_tx_chans,
  346. mal->tx_virt_addr, mal->tx_phys_addr);
  347. if (mal->rx_virt_addr)
  348. dma_free_coherent(&ocpdev->dev,
  349. MAL_DT_ALIGN * maldata->num_rx_chans,
  350. mal->rx_virt_addr, mal->rx_phys_addr);
  351. kfree(mal);
  352. }
  353. /* Structure for a device driver */
  354. static struct ocp_device_id mal_ids[] = {
  355. {.vendor = OCP_ANY_ID,.function = OCP_FUNC_MAL},
  356. {.vendor = OCP_VENDOR_INVALID}
  357. };
  358. static struct ocp_driver mal_driver = {
  359. .name = "mal",
  360. .id_table = mal_ids,
  361. .probe = mal_probe,
  362. .remove = mal_remove,
  363. };
  364. static int __init init_mals(void)
  365. {
  366. int rc;
  367. rc = ocp_register_driver(&mal_driver);
  368. if (rc < 0) {
  369. ocp_unregister_driver(&mal_driver);
  370. return -ENODEV;
  371. }
  372. return 0;
  373. }
  374. static void __exit exit_mals(void)
  375. {
  376. ocp_unregister_driver(&mal_driver);
  377. }
  378. module_init(init_mals);
  379. module_exit(exit_mals);