w83977af_ir.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332
  1. /*********************************************************************
  2. *
  3. * Filename: w83977af_ir.c
  4. * Version: 1.0
  5. * Description: FIR driver for the Winbond W83977AF Super I/O chip
  6. * Status: Experimental.
  7. * Author: Paul VanderSpek
  8. * Created at: Wed Nov 4 11:46:16 1998
  9. * Modified at: Fri Jan 28 12:10:59 2000
  10. * Modified by: Dag Brattli <dagb@cs.uit.no>
  11. *
  12. * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
  13. * Copyright (c) 1998-1999 Rebel.com
  14. *
  15. * This program is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU General Public License as
  17. * published by the Free Software Foundation; either version 2 of
  18. * the License, or (at your option) any later version.
  19. *
  20. * Neither Paul VanderSpek nor Rebel.com admit liability nor provide
  21. * warranty for any of this software. This material is provided "AS-IS"
  22. * and at no charge.
  23. *
  24. * If you find bugs in this file, its very likely that the same bug
  25. * will also be in pc87108.c since the implementations are quite
  26. * similar.
  27. *
  28. * Notice that all functions that needs to access the chip in _any_
  29. * way, must save BSR register on entry, and restore it on exit.
  30. * It is _very_ important to follow this policy!
  31. *
  32. * __u8 bank;
  33. *
  34. * bank = inb( iobase+BSR);
  35. *
  36. * do_your_stuff_here();
  37. *
  38. * outb( bank, iobase+BSR);
  39. *
  40. ********************************************************************/
  41. #include <linux/module.h>
  42. #include <linux/kernel.h>
  43. #include <linux/types.h>
  44. #include <linux/skbuff.h>
  45. #include <linux/netdevice.h>
  46. #include <linux/ioport.h>
  47. #include <linux/delay.h>
  48. #include <linux/init.h>
  49. #include <linux/rtnetlink.h>
  50. #include <linux/dma-mapping.h>
  51. #include <linux/gfp.h>
  52. #include <asm/io.h>
  53. #include <asm/dma.h>
  54. #include <asm/byteorder.h>
  55. #include <net/irda/irda.h>
  56. #include <net/irda/wrapper.h>
  57. #include <net/irda/irda_device.h>
  58. #include "w83977af.h"
  59. #include "w83977af_ir.h"
  60. #ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
  61. #undef CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */
  62. #define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */
  63. #endif
  64. #define CONFIG_USE_W977_PNP /* Currently needed */
  65. #define PIO_MAX_SPEED 115200
  66. static char *driver_name = "w83977af_ir";
  67. static int qos_mtt_bits = 0x07; /* 1 ms or more */
  68. #define CHIP_IO_EXTENT 8
  69. static unsigned int io[] = { 0x180, ~0, ~0, ~0 };
  70. #ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
  71. static unsigned int irq[] = { 6, 0, 0, 0 };
  72. #else
  73. static unsigned int irq[] = { 11, 0, 0, 0 };
  74. #endif
  75. static unsigned int dma[] = { 1, 0, 0, 0 };
  76. static unsigned int efbase[] = { W977_EFIO_BASE, W977_EFIO2_BASE };
  77. static unsigned int efio = W977_EFIO_BASE;
  78. static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL};
  79. /* Some prototypes */
  80. static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
  81. unsigned int dma);
  82. static int w83977af_close(struct w83977af_ir *self);
  83. static int w83977af_probe(int iobase, int irq, int dma);
  84. static int w83977af_dma_receive(struct w83977af_ir *self);
  85. static int w83977af_dma_receive_complete(struct w83977af_ir *self);
  86. static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
  87. struct net_device *dev);
  88. static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
  89. static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
  90. static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
  91. static int w83977af_is_receiving(struct w83977af_ir *self);
  92. static int w83977af_net_open(struct net_device *dev);
  93. static int w83977af_net_close(struct net_device *dev);
  94. static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
  95. /*
  96. * Function w83977af_init ()
  97. *
  98. * Initialize chip. Just try to find out how many chips we are dealing with
  99. * and where they are
  100. */
  101. static int __init w83977af_init(void)
  102. {
  103. int i;
  104. IRDA_DEBUG(0, "%s()\n", __func__ );
  105. for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
  106. if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
  107. return 0;
  108. }
  109. return -ENODEV;
  110. }
  111. /*
  112. * Function w83977af_cleanup ()
  113. *
  114. * Close all configured chips
  115. *
  116. */
  117. static void __exit w83977af_cleanup(void)
  118. {
  119. int i;
  120. IRDA_DEBUG(4, "%s()\n", __func__ );
  121. for (i=0; i < ARRAY_SIZE(dev_self); i++) {
  122. if (dev_self[i])
  123. w83977af_close(dev_self[i]);
  124. }
  125. }
  126. static const struct net_device_ops w83977_netdev_ops = {
  127. .ndo_open = w83977af_net_open,
  128. .ndo_stop = w83977af_net_close,
  129. .ndo_start_xmit = w83977af_hard_xmit,
  130. .ndo_do_ioctl = w83977af_net_ioctl,
  131. };
  132. /*
  133. * Function w83977af_open (iobase, irq)
  134. *
  135. * Open driver instance
  136. *
  137. */
  138. static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
  139. unsigned int dma)
  140. {
  141. struct net_device *dev;
  142. struct w83977af_ir *self;
  143. int err;
  144. IRDA_DEBUG(0, "%s()\n", __func__ );
  145. /* Lock the port that we need */
  146. if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
  147. IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
  148. __func__ , iobase);
  149. return -ENODEV;
  150. }
  151. if (w83977af_probe(iobase, irq, dma) == -1) {
  152. err = -1;
  153. goto err_out;
  154. }
  155. /*
  156. * Allocate new instance of the driver
  157. */
  158. dev = alloc_irdadev(sizeof(struct w83977af_ir));
  159. if (dev == NULL) {
  160. printk( KERN_ERR "IrDA: Can't allocate memory for "
  161. "IrDA control block!\n");
  162. err = -ENOMEM;
  163. goto err_out;
  164. }
  165. self = netdev_priv(dev);
  166. spin_lock_init(&self->lock);
  167. /* Initialize IO */
  168. self->io.fir_base = iobase;
  169. self->io.irq = irq;
  170. self->io.fir_ext = CHIP_IO_EXTENT;
  171. self->io.dma = dma;
  172. self->io.fifo_size = 32;
  173. /* Initialize QoS for this device */
  174. irda_init_max_qos_capabilies(&self->qos);
  175. /* The only value we must override it the baudrate */
  176. /* FIXME: The HP HDLS-1100 does not support 1152000! */
  177. self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
  178. IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
  179. /* The HP HDLS-1100 needs 1 ms according to the specs */
  180. self->qos.min_turn_time.bits = qos_mtt_bits;
  181. irda_qos_bits_to_value(&self->qos);
  182. /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
  183. self->rx_buff.truesize = 14384;
  184. self->tx_buff.truesize = 4000;
  185. /* Allocate memory if needed */
  186. self->rx_buff.head =
  187. dma_alloc_coherent(NULL, self->rx_buff.truesize,
  188. &self->rx_buff_dma, GFP_KERNEL);
  189. if (self->rx_buff.head == NULL) {
  190. err = -ENOMEM;
  191. goto err_out1;
  192. }
  193. memset(self->rx_buff.head, 0, self->rx_buff.truesize);
  194. self->tx_buff.head =
  195. dma_alloc_coherent(NULL, self->tx_buff.truesize,
  196. &self->tx_buff_dma, GFP_KERNEL);
  197. if (self->tx_buff.head == NULL) {
  198. err = -ENOMEM;
  199. goto err_out2;
  200. }
  201. memset(self->tx_buff.head, 0, self->tx_buff.truesize);
  202. self->rx_buff.in_frame = FALSE;
  203. self->rx_buff.state = OUTSIDE_FRAME;
  204. self->tx_buff.data = self->tx_buff.head;
  205. self->rx_buff.data = self->rx_buff.head;
  206. self->netdev = dev;
  207. dev->netdev_ops = &w83977_netdev_ops;
  208. err = register_netdev(dev);
  209. if (err) {
  210. IRDA_ERROR("%s(), register_netdevice() failed!\n", __func__);
  211. goto err_out3;
  212. }
  213. IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
  214. /* Need to store self somewhere */
  215. dev_self[i] = self;
  216. return 0;
  217. err_out3:
  218. dma_free_coherent(NULL, self->tx_buff.truesize,
  219. self->tx_buff.head, self->tx_buff_dma);
  220. err_out2:
  221. dma_free_coherent(NULL, self->rx_buff.truesize,
  222. self->rx_buff.head, self->rx_buff_dma);
  223. err_out1:
  224. free_netdev(dev);
  225. err_out:
  226. release_region(iobase, CHIP_IO_EXTENT);
  227. return err;
  228. }
  229. /*
  230. * Function w83977af_close (self)
  231. *
  232. * Close driver instance
  233. *
  234. */
  235. static int w83977af_close(struct w83977af_ir *self)
  236. {
  237. int iobase;
  238. IRDA_DEBUG(0, "%s()\n", __func__ );
  239. iobase = self->io.fir_base;
  240. #ifdef CONFIG_USE_W977_PNP
  241. /* enter PnP configuration mode */
  242. w977_efm_enter(efio);
  243. w977_select_device(W977_DEVICE_IR, efio);
  244. /* Deactivate device */
  245. w977_write_reg(0x30, 0x00, efio);
  246. w977_efm_exit(efio);
  247. #endif /* CONFIG_USE_W977_PNP */
  248. /* Remove netdevice */
  249. unregister_netdev(self->netdev);
  250. /* Release the PORT that this driver is using */
  251. IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n",
  252. __func__ , self->io.fir_base);
  253. release_region(self->io.fir_base, self->io.fir_ext);
  254. if (self->tx_buff.head)
  255. dma_free_coherent(NULL, self->tx_buff.truesize,
  256. self->tx_buff.head, self->tx_buff_dma);
  257. if (self->rx_buff.head)
  258. dma_free_coherent(NULL, self->rx_buff.truesize,
  259. self->rx_buff.head, self->rx_buff_dma);
  260. free_netdev(self->netdev);
  261. return 0;
  262. }
  263. static int w83977af_probe(int iobase, int irq, int dma)
  264. {
  265. int version;
  266. int i;
  267. for (i=0; i < 2; i++) {
  268. IRDA_DEBUG( 0, "%s()\n", __func__ );
  269. #ifdef CONFIG_USE_W977_PNP
  270. /* Enter PnP configuration mode */
  271. w977_efm_enter(efbase[i]);
  272. w977_select_device(W977_DEVICE_IR, efbase[i]);
  273. /* Configure PnP port, IRQ, and DMA channel */
  274. w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]);
  275. w977_write_reg(0x61, (iobase) & 0xff, efbase[i]);
  276. w977_write_reg(0x70, irq, efbase[i]);
  277. #ifdef CONFIG_ARCH_NETWINDER
  278. /* Netwinder uses 1 higher than Linux */
  279. w977_write_reg(0x74, dma+1, efbase[i]);
  280. #else
  281. w977_write_reg(0x74, dma, efbase[i]);
  282. #endif /*CONFIG_ARCH_NETWINDER */
  283. w977_write_reg(0x75, 0x04, efbase[i]); /* Disable Tx DMA */
  284. /* Set append hardware CRC, enable IR bank selection */
  285. w977_write_reg(0xf0, APEDCRC|ENBNKSEL, efbase[i]);
  286. /* Activate device */
  287. w977_write_reg(0x30, 0x01, efbase[i]);
  288. w977_efm_exit(efbase[i]);
  289. #endif /* CONFIG_USE_W977_PNP */
  290. /* Disable Advanced mode */
  291. switch_bank(iobase, SET2);
  292. outb(iobase+2, 0x00);
  293. /* Turn on UART (global) interrupts */
  294. switch_bank(iobase, SET0);
  295. outb(HCR_EN_IRQ, iobase+HCR);
  296. /* Switch to advanced mode */
  297. switch_bank(iobase, SET2);
  298. outb(inb(iobase+ADCR1) | ADCR1_ADV_SL, iobase+ADCR1);
  299. /* Set default IR-mode */
  300. switch_bank(iobase, SET0);
  301. outb(HCR_SIR, iobase+HCR);
  302. /* Read the Advanced IR ID */
  303. switch_bank(iobase, SET3);
  304. version = inb(iobase+AUID);
  305. /* Should be 0x1? */
  306. if (0x10 == (version & 0xf0)) {
  307. efio = efbase[i];
  308. /* Set FIFO size to 32 */
  309. switch_bank(iobase, SET2);
  310. outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
  311. /* Set FIFO threshold to TX17, RX16 */
  312. switch_bank(iobase, SET0);
  313. outb(UFR_RXTL|UFR_TXTL|UFR_TXF_RST|UFR_RXF_RST|
  314. UFR_EN_FIFO,iobase+UFR);
  315. /* Receiver frame length */
  316. switch_bank(iobase, SET4);
  317. outb(2048 & 0xff, iobase+6);
  318. outb((2048 >> 8) & 0x1f, iobase+7);
  319. /*
  320. * Init HP HSDL-1100 transceiver.
  321. *
  322. * Set IRX_MSL since we have 2 * receive paths IRRX,
  323. * and IRRXH. Clear IRSL0D since we want IRSL0 * to
  324. * be a input pin used for IRRXH
  325. *
  326. * IRRX pin 37 connected to receiver
  327. * IRTX pin 38 connected to transmitter
  328. * FIRRX pin 39 connected to receiver (IRSL0)
  329. * CIRRX pin 40 connected to pin 37
  330. */
  331. switch_bank(iobase, SET7);
  332. outb(0x40, iobase+7);
  333. IRDA_MESSAGE("W83977AF (IR) driver loaded. "
  334. "Version: 0x%02x\n", version);
  335. return 0;
  336. } else {
  337. /* Try next extented function register address */
  338. IRDA_DEBUG( 0, "%s(), Wrong chip version", __func__ );
  339. }
  340. }
  341. return -1;
  342. }
  343. static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
  344. {
  345. int ir_mode = HCR_SIR;
  346. int iobase;
  347. __u8 set;
  348. iobase = self->io.fir_base;
  349. /* Update accounting for new speed */
  350. self->io.speed = speed;
  351. /* Save current bank */
  352. set = inb(iobase+SSR);
  353. /* Disable interrupts */
  354. switch_bank(iobase, SET0);
  355. outb(0, iobase+ICR);
  356. /* Select Set 2 */
  357. switch_bank(iobase, SET2);
  358. outb(0x00, iobase+ABHL);
  359. switch (speed) {
  360. case 9600: outb(0x0c, iobase+ABLL); break;
  361. case 19200: outb(0x06, iobase+ABLL); break;
  362. case 38400: outb(0x03, iobase+ABLL); break;
  363. case 57600: outb(0x02, iobase+ABLL); break;
  364. case 115200: outb(0x01, iobase+ABLL); break;
  365. case 576000:
  366. ir_mode = HCR_MIR_576;
  367. IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__ );
  368. break;
  369. case 1152000:
  370. ir_mode = HCR_MIR_1152;
  371. IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__ );
  372. break;
  373. case 4000000:
  374. ir_mode = HCR_FIR;
  375. IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__ );
  376. break;
  377. default:
  378. ir_mode = HCR_FIR;
  379. IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __func__ , speed);
  380. break;
  381. }
  382. /* Set speed mode */
  383. switch_bank(iobase, SET0);
  384. outb(ir_mode, iobase+HCR);
  385. /* set FIFO size to 32 */
  386. switch_bank(iobase, SET2);
  387. outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
  388. /* set FIFO threshold to TX17, RX16 */
  389. switch_bank(iobase, SET0);
  390. outb(0x00, iobase+UFR); /* Reset */
  391. outb(UFR_EN_FIFO, iobase+UFR); /* First we must enable FIFO */
  392. outb(0xa7, iobase+UFR);
  393. netif_wake_queue(self->netdev);
  394. /* Enable some interrupts so we can receive frames */
  395. switch_bank(iobase, SET0);
  396. if (speed > PIO_MAX_SPEED) {
  397. outb(ICR_EFSFI, iobase+ICR);
  398. w83977af_dma_receive(self);
  399. } else
  400. outb(ICR_ERBRI, iobase+ICR);
  401. /* Restore SSR */
  402. outb(set, iobase+SSR);
  403. }
  404. /*
  405. * Function w83977af_hard_xmit (skb, dev)
  406. *
  407. * Sets up a DMA transfer to send the current frame.
  408. *
  409. */
  410. static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
  411. struct net_device *dev)
  412. {
  413. struct w83977af_ir *self;
  414. __s32 speed;
  415. int iobase;
  416. __u8 set;
  417. int mtt;
  418. self = netdev_priv(dev);
  419. iobase = self->io.fir_base;
  420. IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __func__ , jiffies,
  421. (int) skb->len);
  422. /* Lock transmit buffer */
  423. netif_stop_queue(dev);
  424. /* Check if we need to change the speed */
  425. speed = irda_get_next_speed(skb);
  426. if ((speed != self->io.speed) && (speed != -1)) {
  427. /* Check for empty frame */
  428. if (!skb->len) {
  429. w83977af_change_speed(self, speed);
  430. dev->trans_start = jiffies;
  431. dev_kfree_skb(skb);
  432. return NETDEV_TX_OK;
  433. } else
  434. self->new_speed = speed;
  435. }
  436. /* Save current set */
  437. set = inb(iobase+SSR);
  438. /* Decide if we should use PIO or DMA transfer */
  439. if (self->io.speed > PIO_MAX_SPEED) {
  440. self->tx_buff.data = self->tx_buff.head;
  441. skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len);
  442. self->tx_buff.len = skb->len;
  443. mtt = irda_get_mtt(skb);
  444. IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
  445. if (mtt)
  446. udelay(mtt);
  447. /* Enable DMA interrupt */
  448. switch_bank(iobase, SET0);
  449. outb(ICR_EDMAI, iobase+ICR);
  450. w83977af_dma_write(self, iobase);
  451. } else {
  452. self->tx_buff.data = self->tx_buff.head;
  453. self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
  454. self->tx_buff.truesize);
  455. /* Add interrupt on tx low level (will fire immediately) */
  456. switch_bank(iobase, SET0);
  457. outb(ICR_ETXTHI, iobase+ICR);
  458. }
  459. dev->trans_start = jiffies;
  460. dev_kfree_skb(skb);
  461. /* Restore set register */
  462. outb(set, iobase+SSR);
  463. return NETDEV_TX_OK;
  464. }
  465. /*
  466. * Function w83977af_dma_write (self, iobase)
  467. *
  468. * Send frame using DMA
  469. *
  470. */
  471. static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
  472. {
  473. __u8 set;
  474. #ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
  475. unsigned long flags;
  476. __u8 hcr;
  477. #endif
  478. IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len);
  479. /* Save current set */
  480. set = inb(iobase+SSR);
  481. /* Disable DMA */
  482. switch_bank(iobase, SET0);
  483. outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
  484. /* Choose transmit DMA channel */
  485. switch_bank(iobase, SET2);
  486. outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
  487. #ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
  488. spin_lock_irqsave(&self->lock, flags);
  489. disable_dma(self->io.dma);
  490. clear_dma_ff(self->io.dma);
  491. set_dma_mode(self->io.dma, DMA_MODE_READ);
  492. set_dma_addr(self->io.dma, self->tx_buff_dma);
  493. set_dma_count(self->io.dma, self->tx_buff.len);
  494. #else
  495. irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
  496. DMA_MODE_WRITE);
  497. #endif
  498. self->io.direction = IO_XMIT;
  499. /* Enable DMA */
  500. switch_bank(iobase, SET0);
  501. #ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
  502. hcr = inb(iobase+HCR);
  503. outb(hcr | HCR_EN_DMA, iobase+HCR);
  504. enable_dma(self->io.dma);
  505. spin_unlock_irqrestore(&self->lock, flags);
  506. #else
  507. outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
  508. #endif
  509. /* Restore set register */
  510. outb(set, iobase+SSR);
  511. }
  512. /*
  513. * Function w83977af_pio_write (iobase, buf, len, fifo_size)
  514. *
  515. *
  516. *
  517. */
  518. static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
  519. {
  520. int actual = 0;
  521. __u8 set;
  522. IRDA_DEBUG(4, "%s()\n", __func__ );
  523. /* Save current bank */
  524. set = inb(iobase+SSR);
  525. switch_bank(iobase, SET0);
  526. if (!(inb_p(iobase+USR) & USR_TSRE)) {
  527. IRDA_DEBUG(4,
  528. "%s(), warning, FIFO not empty yet!\n", __func__ );
  529. fifo_size -= 17;
  530. IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n",
  531. __func__ , fifo_size);
  532. }
  533. /* Fill FIFO with current frame */
  534. while ((fifo_size-- > 0) && (actual < len)) {
  535. /* Transmit next byte */
  536. outb(buf[actual++], iobase+TBR);
  537. }
  538. IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
  539. __func__ , fifo_size, actual, len);
  540. /* Restore bank */
  541. outb(set, iobase+SSR);
  542. return actual;
  543. }
  544. /*
  545. * Function w83977af_dma_xmit_complete (self)
  546. *
  547. * The transfer of a frame in finished. So do the necessary things
  548. *
  549. *
  550. */
  551. static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
  552. {
  553. int iobase;
  554. __u8 set;
  555. IRDA_DEBUG(4, "%s(%ld)\n", __func__ , jiffies);
  556. IRDA_ASSERT(self != NULL, return;);
  557. iobase = self->io.fir_base;
  558. /* Save current set */
  559. set = inb(iobase+SSR);
  560. /* Disable DMA */
  561. switch_bank(iobase, SET0);
  562. outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
  563. /* Check for underrrun! */
  564. if (inb(iobase+AUDR) & AUDR_UNDR) {
  565. IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ );
  566. self->netdev->stats.tx_errors++;
  567. self->netdev->stats.tx_fifo_errors++;
  568. /* Clear bit, by writing 1 to it */
  569. outb(AUDR_UNDR, iobase+AUDR);
  570. } else
  571. self->netdev->stats.tx_packets++;
  572. if (self->new_speed) {
  573. w83977af_change_speed(self, self->new_speed);
  574. self->new_speed = 0;
  575. }
  576. /* Unlock tx_buff and request another frame */
  577. /* Tell the network layer, that we want more frames */
  578. netif_wake_queue(self->netdev);
  579. /* Restore set */
  580. outb(set, iobase+SSR);
  581. }
  582. /*
  583. * Function w83977af_dma_receive (self)
  584. *
  585. * Get ready for receiving a frame. The device will initiate a DMA
  586. * if it starts to receive a frame.
  587. *
  588. */
  589. static int w83977af_dma_receive(struct w83977af_ir *self)
  590. {
  591. int iobase;
  592. __u8 set;
  593. #ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
  594. unsigned long flags;
  595. __u8 hcr;
  596. #endif
  597. IRDA_ASSERT(self != NULL, return -1;);
  598. IRDA_DEBUG(4, "%s\n", __func__ );
  599. iobase= self->io.fir_base;
  600. /* Save current set */
  601. set = inb(iobase+SSR);
  602. /* Disable DMA */
  603. switch_bank(iobase, SET0);
  604. outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
  605. /* Choose DMA Rx, DMA Fairness, and Advanced mode */
  606. switch_bank(iobase, SET2);
  607. outb((inb(iobase+ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/|ADCR1_ADV_SL,
  608. iobase+ADCR1);
  609. self->io.direction = IO_RECV;
  610. self->rx_buff.data = self->rx_buff.head;
  611. #ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
  612. spin_lock_irqsave(&self->lock, flags);
  613. disable_dma(self->io.dma);
  614. clear_dma_ff(self->io.dma);
  615. set_dma_mode(self->io.dma, DMA_MODE_READ);
  616. set_dma_addr(self->io.dma, self->rx_buff_dma);
  617. set_dma_count(self->io.dma, self->rx_buff.truesize);
  618. #else
  619. irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
  620. DMA_MODE_READ);
  621. #endif
  622. /*
  623. * Reset Rx FIFO. This will also flush the ST_FIFO, it's very
  624. * important that we don't reset the Tx FIFO since it might not
  625. * be finished transmitting yet
  626. */
  627. switch_bank(iobase, SET0);
  628. outb(UFR_RXTL|UFR_TXTL|UFR_RXF_RST|UFR_EN_FIFO, iobase+UFR);
  629. self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0;
  630. /* Enable DMA */
  631. switch_bank(iobase, SET0);
  632. #ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
  633. hcr = inb(iobase+HCR);
  634. outb(hcr | HCR_EN_DMA, iobase+HCR);
  635. enable_dma(self->io.dma);
  636. spin_unlock_irqrestore(&self->lock, flags);
  637. #else
  638. outb(inb(iobase+HCR) | HCR_EN_DMA, iobase+HCR);
  639. #endif
  640. /* Restore set */
  641. outb(set, iobase+SSR);
  642. return 0;
  643. }
  644. /*
  645. * Function w83977af_receive_complete (self)
  646. *
  647. * Finished with receiving a frame
  648. *
  649. */
  650. static int w83977af_dma_receive_complete(struct w83977af_ir *self)
  651. {
  652. struct sk_buff *skb;
  653. struct st_fifo *st_fifo;
  654. int len;
  655. int iobase;
  656. __u8 set;
  657. __u8 status;
  658. IRDA_DEBUG(4, "%s\n", __func__ );
  659. st_fifo = &self->st_fifo;
  660. iobase = self->io.fir_base;
  661. /* Save current set */
  662. set = inb(iobase+SSR);
  663. iobase = self->io.fir_base;
  664. /* Read status FIFO */
  665. switch_bank(iobase, SET5);
  666. while ((status = inb(iobase+FS_FO)) & FS_FO_FSFDR) {
  667. st_fifo->entries[st_fifo->tail].status = status;
  668. st_fifo->entries[st_fifo->tail].len = inb(iobase+RFLFL);
  669. st_fifo->entries[st_fifo->tail].len |= inb(iobase+RFLFH) << 8;
  670. st_fifo->tail++;
  671. st_fifo->len++;
  672. }
  673. while (st_fifo->len) {
  674. /* Get first entry */
  675. status = st_fifo->entries[st_fifo->head].status;
  676. len = st_fifo->entries[st_fifo->head].len;
  677. st_fifo->head++;
  678. st_fifo->len--;
  679. /* Check for errors */
  680. if (status & FS_FO_ERR_MSK) {
  681. if (status & FS_FO_LST_FR) {
  682. /* Add number of lost frames to stats */
  683. self->netdev->stats.rx_errors += len;
  684. } else {
  685. /* Skip frame */
  686. self->netdev->stats.rx_errors++;
  687. self->rx_buff.data += len;
  688. if (status & FS_FO_MX_LEX)
  689. self->netdev->stats.rx_length_errors++;
  690. if (status & FS_FO_PHY_ERR)
  691. self->netdev->stats.rx_frame_errors++;
  692. if (status & FS_FO_CRC_ERR)
  693. self->netdev->stats.rx_crc_errors++;
  694. }
  695. /* The errors below can be reported in both cases */
  696. if (status & FS_FO_RX_OV)
  697. self->netdev->stats.rx_fifo_errors++;
  698. if (status & FS_FO_FSF_OV)
  699. self->netdev->stats.rx_fifo_errors++;
  700. } else {
  701. /* Check if we have transferred all data to memory */
  702. switch_bank(iobase, SET0);
  703. if (inb(iobase+USR) & USR_RDR) {
  704. udelay(80); /* Should be enough!? */
  705. }
  706. skb = dev_alloc_skb(len+1);
  707. if (skb == NULL) {
  708. printk(KERN_INFO
  709. "%s(), memory squeeze, dropping frame.\n", __func__);
  710. /* Restore set register */
  711. outb(set, iobase+SSR);
  712. return FALSE;
  713. }
  714. /* Align to 20 bytes */
  715. skb_reserve(skb, 1);
  716. /* Copy frame without CRC */
  717. if (self->io.speed < 4000000) {
  718. skb_put(skb, len-2);
  719. skb_copy_to_linear_data(skb,
  720. self->rx_buff.data,
  721. len - 2);
  722. } else {
  723. skb_put(skb, len-4);
  724. skb_copy_to_linear_data(skb,
  725. self->rx_buff.data,
  726. len - 4);
  727. }
  728. /* Move to next frame */
  729. self->rx_buff.data += len;
  730. self->netdev->stats.rx_packets++;
  731. skb->dev = self->netdev;
  732. skb_reset_mac_header(skb);
  733. skb->protocol = htons(ETH_P_IRDA);
  734. netif_rx(skb);
  735. }
  736. }
  737. /* Restore set register */
  738. outb(set, iobase+SSR);
  739. return TRUE;
  740. }
  741. /*
  742. * Function pc87108_pio_receive (self)
  743. *
  744. * Receive all data in receiver FIFO
  745. *
  746. */
  747. static void w83977af_pio_receive(struct w83977af_ir *self)
  748. {
  749. __u8 byte = 0x00;
  750. int iobase;
  751. IRDA_DEBUG(4, "%s()\n", __func__ );
  752. IRDA_ASSERT(self != NULL, return;);
  753. iobase = self->io.fir_base;
  754. /* Receive all characters in Rx FIFO */
  755. do {
  756. byte = inb(iobase+RBR);
  757. async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
  758. byte);
  759. } while (inb(iobase+USR) & USR_RDR); /* Data available */
  760. }
  761. /*
  762. * Function w83977af_sir_interrupt (self, eir)
  763. *
  764. * Handle SIR interrupt
  765. *
  766. */
  767. static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
  768. {
  769. int actual;
  770. __u8 new_icr = 0;
  771. __u8 set;
  772. int iobase;
  773. IRDA_DEBUG(4, "%s(), isr=%#x\n", __func__ , isr);
  774. iobase = self->io.fir_base;
  775. /* Transmit FIFO low on data */
  776. if (isr & ISR_TXTH_I) {
  777. /* Write data left in transmit buffer */
  778. actual = w83977af_pio_write(self->io.fir_base,
  779. self->tx_buff.data,
  780. self->tx_buff.len,
  781. self->io.fifo_size);
  782. self->tx_buff.data += actual;
  783. self->tx_buff.len -= actual;
  784. self->io.direction = IO_XMIT;
  785. /* Check if finished */
  786. if (self->tx_buff.len > 0) {
  787. new_icr |= ICR_ETXTHI;
  788. } else {
  789. set = inb(iobase+SSR);
  790. switch_bank(iobase, SET0);
  791. outb(AUDR_SFEND, iobase+AUDR);
  792. outb(set, iobase+SSR);
  793. self->netdev->stats.tx_packets++;
  794. /* Feed me more packets */
  795. netif_wake_queue(self->netdev);
  796. new_icr |= ICR_ETBREI;
  797. }
  798. }
  799. /* Check if transmission has completed */
  800. if (isr & ISR_TXEMP_I) {
  801. /* Check if we need to change the speed? */
  802. if (self->new_speed) {
  803. IRDA_DEBUG(2,
  804. "%s(), Changing speed!\n", __func__ );
  805. w83977af_change_speed(self, self->new_speed);
  806. self->new_speed = 0;
  807. }
  808. /* Turn around and get ready to receive some data */
  809. self->io.direction = IO_RECV;
  810. new_icr |= ICR_ERBRI;
  811. }
  812. /* Rx FIFO threshold or timeout */
  813. if (isr & ISR_RXTH_I) {
  814. w83977af_pio_receive(self);
  815. /* Keep receiving */
  816. new_icr |= ICR_ERBRI;
  817. }
  818. return new_icr;
  819. }
  820. /*
  821. * Function pc87108_fir_interrupt (self, eir)
  822. *
  823. * Handle MIR/FIR interrupt
  824. *
  825. */
  826. static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
  827. {
  828. __u8 new_icr = 0;
  829. __u8 set;
  830. int iobase;
  831. iobase = self->io.fir_base;
  832. set = inb(iobase+SSR);
  833. /* End of frame detected in FIFO */
  834. if (isr & (ISR_FEND_I|ISR_FSF_I)) {
  835. if (w83977af_dma_receive_complete(self)) {
  836. /* Wait for next status FIFO interrupt */
  837. new_icr |= ICR_EFSFI;
  838. } else {
  839. /* DMA not finished yet */
  840. /* Set timer value, resolution 1 ms */
  841. switch_bank(iobase, SET4);
  842. outb(0x01, iobase+TMRL); /* 1 ms */
  843. outb(0x00, iobase+TMRH);
  844. /* Start timer */
  845. outb(IR_MSL_EN_TMR, iobase+IR_MSL);
  846. new_icr |= ICR_ETMRI;
  847. }
  848. }
  849. /* Timer finished */
  850. if (isr & ISR_TMR_I) {
  851. /* Disable timer */
  852. switch_bank(iobase, SET4);
  853. outb(0, iobase+IR_MSL);
  854. /* Clear timer event */
  855. /* switch_bank(iobase, SET0); */
  856. /* outb(ASCR_CTE, iobase+ASCR); */
  857. /* Check if this is a TX timer interrupt */
  858. if (self->io.direction == IO_XMIT) {
  859. w83977af_dma_write(self, iobase);
  860. new_icr |= ICR_EDMAI;
  861. } else {
  862. /* Check if DMA has now finished */
  863. w83977af_dma_receive_complete(self);
  864. new_icr |= ICR_EFSFI;
  865. }
  866. }
  867. /* Finished with DMA */
  868. if (isr & ISR_DMA_I) {
  869. w83977af_dma_xmit_complete(self);
  870. /* Check if there are more frames to be transmitted */
  871. /* if (irda_device_txqueue_empty(self)) { */
  872. /* Prepare for receive
  873. *
  874. * ** Netwinder Tx DMA likes that we do this anyway **
  875. */
  876. w83977af_dma_receive(self);
  877. new_icr = ICR_EFSFI;
  878. /* } */
  879. }
  880. /* Restore set */
  881. outb(set, iobase+SSR);
  882. return new_icr;
  883. }
  884. /*
  885. * Function w83977af_interrupt (irq, dev_id, regs)
  886. *
  887. * An interrupt from the chip has arrived. Time to do some work
  888. *
  889. */
  890. static irqreturn_t w83977af_interrupt(int irq, void *dev_id)
  891. {
  892. struct net_device *dev = dev_id;
  893. struct w83977af_ir *self;
  894. __u8 set, icr, isr;
  895. int iobase;
  896. self = netdev_priv(dev);
  897. iobase = self->io.fir_base;
  898. /* Save current bank */
  899. set = inb(iobase+SSR);
  900. switch_bank(iobase, SET0);
  901. icr = inb(iobase+ICR);
  902. isr = inb(iobase+ISR) & icr; /* Mask out the interesting ones */
  903. outb(0, iobase+ICR); /* Disable interrupts */
  904. if (isr) {
  905. /* Dispatch interrupt handler for the current speed */
  906. if (self->io.speed > PIO_MAX_SPEED )
  907. icr = w83977af_fir_interrupt(self, isr);
  908. else
  909. icr = w83977af_sir_interrupt(self, isr);
  910. }
  911. outb(icr, iobase+ICR); /* Restore (new) interrupts */
  912. outb(set, iobase+SSR); /* Restore bank register */
  913. return IRQ_RETVAL(isr);
  914. }
  915. /*
  916. * Function w83977af_is_receiving (self)
  917. *
  918. * Return TRUE is we are currently receiving a frame
  919. *
  920. */
  921. static int w83977af_is_receiving(struct w83977af_ir *self)
  922. {
  923. int status = FALSE;
  924. int iobase;
  925. __u8 set;
  926. IRDA_ASSERT(self != NULL, return FALSE;);
  927. if (self->io.speed > 115200) {
  928. iobase = self->io.fir_base;
  929. /* Check if rx FIFO is not empty */
  930. set = inb(iobase+SSR);
  931. switch_bank(iobase, SET2);
  932. if ((inb(iobase+RXFDTH) & 0x3f) != 0) {
  933. /* We are receiving something */
  934. status = TRUE;
  935. }
  936. outb(set, iobase+SSR);
  937. } else
  938. status = (self->rx_buff.state != OUTSIDE_FRAME);
  939. return status;
  940. }
  941. /*
  942. * Function w83977af_net_open (dev)
  943. *
  944. * Start the device
  945. *
  946. */
  947. static int w83977af_net_open(struct net_device *dev)
  948. {
  949. struct w83977af_ir *self;
  950. int iobase;
  951. char hwname[32];
  952. __u8 set;
  953. IRDA_DEBUG(0, "%s()\n", __func__ );
  954. IRDA_ASSERT(dev != NULL, return -1;);
  955. self = netdev_priv(dev);
  956. IRDA_ASSERT(self != NULL, return 0;);
  957. iobase = self->io.fir_base;
  958. if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
  959. (void *) dev)) {
  960. return -EAGAIN;
  961. }
  962. /*
  963. * Always allocate the DMA channel after the IRQ,
  964. * and clean up on failure.
  965. */
  966. if (request_dma(self->io.dma, dev->name)) {
  967. free_irq(self->io.irq, self);
  968. return -EAGAIN;
  969. }
  970. /* Save current set */
  971. set = inb(iobase+SSR);
  972. /* Enable some interrupts so we can receive frames again */
  973. switch_bank(iobase, SET0);
  974. if (self->io.speed > 115200) {
  975. outb(ICR_EFSFI, iobase+ICR);
  976. w83977af_dma_receive(self);
  977. } else
  978. outb(ICR_ERBRI, iobase+ICR);
  979. /* Restore bank register */
  980. outb(set, iobase+SSR);
  981. /* Ready to play! */
  982. netif_start_queue(dev);
  983. /* Give self a hardware name */
  984. sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base);
  985. /*
  986. * Open new IrLAP layer instance, now that everything should be
  987. * initialized properly
  988. */
  989. self->irlap = irlap_open(dev, &self->qos, hwname);
  990. return 0;
  991. }
  992. /*
  993. * Function w83977af_net_close (dev)
  994. *
  995. * Stop the device
  996. *
  997. */
  998. static int w83977af_net_close(struct net_device *dev)
  999. {
  1000. struct w83977af_ir *self;
  1001. int iobase;
  1002. __u8 set;
  1003. IRDA_DEBUG(0, "%s()\n", __func__ );
  1004. IRDA_ASSERT(dev != NULL, return -1;);
  1005. self = netdev_priv(dev);
  1006. IRDA_ASSERT(self != NULL, return 0;);
  1007. iobase = self->io.fir_base;
  1008. /* Stop device */
  1009. netif_stop_queue(dev);
  1010. /* Stop and remove instance of IrLAP */
  1011. if (self->irlap)
  1012. irlap_close(self->irlap);
  1013. self->irlap = NULL;
  1014. disable_dma(self->io.dma);
  1015. /* Save current set */
  1016. set = inb(iobase+SSR);
  1017. /* Disable interrupts */
  1018. switch_bank(iobase, SET0);
  1019. outb(0, iobase+ICR);
  1020. free_irq(self->io.irq, dev);
  1021. free_dma(self->io.dma);
  1022. /* Restore bank register */
  1023. outb(set, iobase+SSR);
  1024. return 0;
  1025. }
  1026. /*
  1027. * Function w83977af_net_ioctl (dev, rq, cmd)
  1028. *
  1029. * Process IOCTL commands for this device
  1030. *
  1031. */
  1032. static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  1033. {
  1034. struct if_irda_req *irq = (struct if_irda_req *) rq;
  1035. struct w83977af_ir *self;
  1036. unsigned long flags;
  1037. int ret = 0;
  1038. IRDA_ASSERT(dev != NULL, return -1;);
  1039. self = netdev_priv(dev);
  1040. IRDA_ASSERT(self != NULL, return -1;);
  1041. IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
  1042. spin_lock_irqsave(&self->lock, flags);
  1043. switch (cmd) {
  1044. case SIOCSBANDWIDTH: /* Set bandwidth */
  1045. if (!capable(CAP_NET_ADMIN)) {
  1046. ret = -EPERM;
  1047. goto out;
  1048. }
  1049. w83977af_change_speed(self, irq->ifr_baudrate);
  1050. break;
  1051. case SIOCSMEDIABUSY: /* Set media busy */
  1052. if (!capable(CAP_NET_ADMIN)) {
  1053. ret = -EPERM;
  1054. goto out;
  1055. }
  1056. irda_device_set_media_busy(self->netdev, TRUE);
  1057. break;
  1058. case SIOCGRECEIVING: /* Check if we are receiving right now */
  1059. irq->ifr_receiving = w83977af_is_receiving(self);
  1060. break;
  1061. default:
  1062. ret = -EOPNOTSUPP;
  1063. }
  1064. out:
  1065. spin_unlock_irqrestore(&self->lock, flags);
  1066. return ret;
  1067. }
  1068. MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
  1069. MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
  1070. MODULE_LICENSE("GPL");
  1071. module_param(qos_mtt_bits, int, 0);
  1072. MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time");
  1073. module_param_array(io, int, NULL, 0);
  1074. MODULE_PARM_DESC(io, "Base I/O addresses");
  1075. module_param_array(irq, int, NULL, 0);
  1076. MODULE_PARM_DESC(irq, "IRQ lines");
  1077. /*
  1078. * Function init_module (void)
  1079. *
  1080. *
  1081. *
  1082. */
  1083. module_init(w83977af_init);
  1084. /*
  1085. * Function cleanup_module (void)
  1086. *
  1087. *
  1088. *
  1089. */
  1090. module_exit(w83977af_cleanup);