bfin_sir.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824
  1. /*
  2. * Blackfin Infra-red Driver
  3. *
  4. * Copyright 2006-2009 Analog Devices Inc.
  5. *
  6. * Enter bugs at http://blackfin.uclinux.org/
  7. *
  8. * Licensed under the GPL-2 or later.
  9. *
  10. */
  11. #include "bfin_sir.h"
  12. #ifdef CONFIG_SIR_BFIN_DMA
  13. #define DMA_SIR_RX_XCNT 10
  14. #define DMA_SIR_RX_YCNT (PAGE_SIZE / DMA_SIR_RX_XCNT)
  15. #define DMA_SIR_RX_FLUSH_JIFS (HZ * 4 / 250)
  16. #endif
  17. #if ANOMALY_05000447
  18. static int max_rate = 57600;
  19. #else
  20. static int max_rate = 115200;
  21. #endif
  22. static void turnaround_delay(unsigned long last_jif, int mtt)
  23. {
  24. long ticks;
  25. mtt = mtt < 10000 ? 10000 : mtt;
  26. ticks = 1 + mtt / (USEC_PER_SEC / HZ);
  27. schedule_timeout_uninterruptible(ticks);
  28. }
  29. static void __devinit bfin_sir_init_ports(struct bfin_sir_port *sp, struct platform_device *pdev)
  30. {
  31. int i;
  32. struct resource *res;
  33. for (i = 0; i < pdev->num_resources; i++) {
  34. res = &pdev->resource[i];
  35. switch (res->flags) {
  36. case IORESOURCE_MEM:
  37. sp->membase = (void __iomem *)res->start;
  38. break;
  39. case IORESOURCE_IRQ:
  40. sp->irq = res->start;
  41. break;
  42. case IORESOURCE_DMA:
  43. sp->rx_dma_channel = res->start;
  44. sp->tx_dma_channel = res->end;
  45. break;
  46. default:
  47. break;
  48. }
  49. }
  50. sp->clk = get_sclk();
  51. #ifdef CONFIG_SIR_BFIN_DMA
  52. sp->tx_done = 1;
  53. init_timer(&(sp->rx_dma_timer));
  54. #endif
  55. }
  56. static void bfin_sir_stop_tx(struct bfin_sir_port *port)
  57. {
  58. #ifdef CONFIG_SIR_BFIN_DMA
  59. disable_dma(port->tx_dma_channel);
  60. #endif
  61. while (!(SIR_UART_GET_LSR(port) & THRE)) {
  62. cpu_relax();
  63. continue;
  64. }
  65. SIR_UART_STOP_TX(port);
  66. }
  67. static void bfin_sir_enable_tx(struct bfin_sir_port *port)
  68. {
  69. SIR_UART_ENABLE_TX(port);
  70. }
  71. static void bfin_sir_stop_rx(struct bfin_sir_port *port)
  72. {
  73. SIR_UART_STOP_RX(port);
  74. }
  75. static void bfin_sir_enable_rx(struct bfin_sir_port *port)
  76. {
  77. SIR_UART_ENABLE_RX(port);
  78. }
  79. static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed)
  80. {
  81. int ret = -EINVAL;
  82. unsigned int quot;
  83. unsigned short val, lsr, lcr;
  84. static int utime;
  85. int count = 10;
  86. lcr = WLS(8);
  87. switch (speed) {
  88. case 9600:
  89. case 19200:
  90. case 38400:
  91. case 57600:
  92. case 115200:
  93. quot = (port->clk + (8 * speed)) / (16 * speed)\
  94. - ANOMALY_05000230;
  95. do {
  96. udelay(utime);
  97. lsr = SIR_UART_GET_LSR(port);
  98. } while (!(lsr & TEMT) && count--);
  99. /* The useconds for 1 bits to transmit */
  100. utime = 1000000 / speed + 1;
  101. /* Clear UCEN bit to reset the UART state machine
  102. * and control registers
  103. */
  104. val = SIR_UART_GET_GCTL(port);
  105. val &= ~UCEN;
  106. SIR_UART_PUT_GCTL(port, val);
  107. /* Set DLAB in LCR to Access THR RBR IER */
  108. SIR_UART_SET_DLAB(port);
  109. SSYNC();
  110. SIR_UART_PUT_DLL(port, quot & 0xFF);
  111. SIR_UART_PUT_DLH(port, (quot >> 8) & 0xFF);
  112. SSYNC();
  113. /* Clear DLAB in LCR */
  114. SIR_UART_CLEAR_DLAB(port);
  115. SSYNC();
  116. SIR_UART_PUT_LCR(port, lcr);
  117. val = SIR_UART_GET_GCTL(port);
  118. val |= UCEN;
  119. SIR_UART_PUT_GCTL(port, val);
  120. ret = 0;
  121. break;
  122. default:
  123. printk(KERN_WARNING "bfin_sir: Invalid speed %d\n", speed);
  124. break;
  125. }
  126. val = SIR_UART_GET_GCTL(port);
  127. /* If not add the 'RPOLC', we can't catch the receive interrupt.
  128. * It's related with the HW layout and the IR transiver.
  129. */
  130. val |= IREN | RPOLC;
  131. SIR_UART_PUT_GCTL(port, val);
  132. return ret;
  133. }
  134. static int bfin_sir_is_receiving(struct net_device *dev)
  135. {
  136. struct bfin_sir_self *self = netdev_priv(dev);
  137. struct bfin_sir_port *port = self->sir_port;
  138. if (!(SIR_UART_GET_IER(port) & ERBFI))
  139. return 0;
  140. return self->rx_buff.state != OUTSIDE_FRAME;
  141. }
  142. #ifdef CONFIG_SIR_BFIN_PIO
  143. static void bfin_sir_tx_chars(struct net_device *dev)
  144. {
  145. unsigned int chr;
  146. struct bfin_sir_self *self = netdev_priv(dev);
  147. struct bfin_sir_port *port = self->sir_port;
  148. if (self->tx_buff.len != 0) {
  149. chr = *(self->tx_buff.data);
  150. SIR_UART_PUT_CHAR(port, chr);
  151. self->tx_buff.data++;
  152. self->tx_buff.len--;
  153. } else {
  154. self->stats.tx_packets++;
  155. self->stats.tx_bytes += self->tx_buff.data - self->tx_buff.head;
  156. if (self->newspeed) {
  157. bfin_sir_set_speed(port, self->newspeed);
  158. self->speed = self->newspeed;
  159. self->newspeed = 0;
  160. }
  161. bfin_sir_stop_tx(port);
  162. bfin_sir_enable_rx(port);
  163. /* I'm hungry! */
  164. netif_wake_queue(dev);
  165. }
  166. }
  167. static void bfin_sir_rx_chars(struct net_device *dev)
  168. {
  169. struct bfin_sir_self *self = netdev_priv(dev);
  170. struct bfin_sir_port *port = self->sir_port;
  171. unsigned char ch;
  172. SIR_UART_CLEAR_LSR(port);
  173. ch = SIR_UART_GET_CHAR(port);
  174. async_unwrap_char(dev, &self->stats, &self->rx_buff, ch);
  175. dev->last_rx = jiffies;
  176. }
  177. static irqreturn_t bfin_sir_rx_int(int irq, void *dev_id)
  178. {
  179. struct net_device *dev = dev_id;
  180. struct bfin_sir_self *self = netdev_priv(dev);
  181. struct bfin_sir_port *port = self->sir_port;
  182. spin_lock(&self->lock);
  183. while ((SIR_UART_GET_LSR(port) & DR))
  184. bfin_sir_rx_chars(dev);
  185. spin_unlock(&self->lock);
  186. return IRQ_HANDLED;
  187. }
  188. static irqreturn_t bfin_sir_tx_int(int irq, void *dev_id)
  189. {
  190. struct net_device *dev = dev_id;
  191. struct bfin_sir_self *self = netdev_priv(dev);
  192. struct bfin_sir_port *port = self->sir_port;
  193. spin_lock(&self->lock);
  194. if (SIR_UART_GET_LSR(port) & THRE)
  195. bfin_sir_tx_chars(dev);
  196. spin_unlock(&self->lock);
  197. return IRQ_HANDLED;
  198. }
  199. #endif /* CONFIG_SIR_BFIN_PIO */
  200. #ifdef CONFIG_SIR_BFIN_DMA
  201. static void bfin_sir_dma_tx_chars(struct net_device *dev)
  202. {
  203. struct bfin_sir_self *self = netdev_priv(dev);
  204. struct bfin_sir_port *port = self->sir_port;
  205. if (!port->tx_done)
  206. return;
  207. port->tx_done = 0;
  208. if (self->tx_buff.len == 0) {
  209. self->stats.tx_packets++;
  210. if (self->newspeed) {
  211. bfin_sir_set_speed(port, self->newspeed);
  212. self->speed = self->newspeed;
  213. self->newspeed = 0;
  214. }
  215. bfin_sir_enable_rx(port);
  216. port->tx_done = 1;
  217. netif_wake_queue(dev);
  218. return;
  219. }
  220. blackfin_dcache_flush_range((unsigned long)(self->tx_buff.data),
  221. (unsigned long)(self->tx_buff.data+self->tx_buff.len));
  222. set_dma_config(port->tx_dma_channel,
  223. set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP,
  224. INTR_ON_BUF, DIMENSION_LINEAR, DATA_SIZE_8,
  225. DMA_SYNC_RESTART));
  226. set_dma_start_addr(port->tx_dma_channel,
  227. (unsigned long)(self->tx_buff.data));
  228. set_dma_x_count(port->tx_dma_channel, self->tx_buff.len);
  229. set_dma_x_modify(port->tx_dma_channel, 1);
  230. enable_dma(port->tx_dma_channel);
  231. }
  232. static irqreturn_t bfin_sir_dma_tx_int(int irq, void *dev_id)
  233. {
  234. struct net_device *dev = dev_id;
  235. struct bfin_sir_self *self = netdev_priv(dev);
  236. struct bfin_sir_port *port = self->sir_port;
  237. spin_lock(&self->lock);
  238. if (!(get_dma_curr_irqstat(port->tx_dma_channel) & DMA_RUN)) {
  239. clear_dma_irqstat(port->tx_dma_channel);
  240. bfin_sir_stop_tx(port);
  241. self->stats.tx_packets++;
  242. self->stats.tx_bytes += self->tx_buff.len;
  243. self->tx_buff.len = 0;
  244. if (self->newspeed) {
  245. bfin_sir_set_speed(port, self->newspeed);
  246. self->speed = self->newspeed;
  247. self->newspeed = 0;
  248. }
  249. bfin_sir_enable_rx(port);
  250. /* I'm hungry! */
  251. netif_wake_queue(dev);
  252. port->tx_done = 1;
  253. }
  254. spin_unlock(&self->lock);
  255. return IRQ_HANDLED;
  256. }
  257. static void bfin_sir_dma_rx_chars(struct net_device *dev)
  258. {
  259. struct bfin_sir_self *self = netdev_priv(dev);
  260. struct bfin_sir_port *port = self->sir_port;
  261. int i;
  262. SIR_UART_CLEAR_LSR(port);
  263. for (i = port->rx_dma_buf.head; i < port->rx_dma_buf.tail; i++)
  264. async_unwrap_char(dev, &self->stats, &self->rx_buff, port->rx_dma_buf.buf[i]);
  265. }
  266. void bfin_sir_rx_dma_timeout(struct net_device *dev)
  267. {
  268. struct bfin_sir_self *self = netdev_priv(dev);
  269. struct bfin_sir_port *port = self->sir_port;
  270. int x_pos, pos;
  271. unsigned long flags;
  272. spin_lock_irqsave(&self->lock, flags);
  273. x_pos = DMA_SIR_RX_XCNT - get_dma_curr_xcount(port->rx_dma_channel);
  274. if (x_pos == DMA_SIR_RX_XCNT)
  275. x_pos = 0;
  276. pos = port->rx_dma_nrows * DMA_SIR_RX_XCNT + x_pos;
  277. if (pos > port->rx_dma_buf.tail) {
  278. port->rx_dma_buf.tail = pos;
  279. bfin_sir_dma_rx_chars(dev);
  280. port->rx_dma_buf.head = port->rx_dma_buf.tail;
  281. }
  282. spin_unlock_irqrestore(&self->lock, flags);
  283. }
  284. static irqreturn_t bfin_sir_dma_rx_int(int irq, void *dev_id)
  285. {
  286. struct net_device *dev = dev_id;
  287. struct bfin_sir_self *self = netdev_priv(dev);
  288. struct bfin_sir_port *port = self->sir_port;
  289. unsigned short irqstat;
  290. spin_lock(&self->lock);
  291. port->rx_dma_nrows++;
  292. port->rx_dma_buf.tail = DMA_SIR_RX_XCNT * port->rx_dma_nrows;
  293. bfin_sir_dma_rx_chars(dev);
  294. if (port->rx_dma_nrows >= DMA_SIR_RX_YCNT) {
  295. port->rx_dma_nrows = 0;
  296. port->rx_dma_buf.tail = 0;
  297. }
  298. port->rx_dma_buf.head = port->rx_dma_buf.tail;
  299. irqstat = get_dma_curr_irqstat(port->rx_dma_channel);
  300. clear_dma_irqstat(port->rx_dma_channel);
  301. spin_unlock(&self->lock);
  302. mod_timer(&port->rx_dma_timer, jiffies + DMA_SIR_RX_FLUSH_JIFS);
  303. return IRQ_HANDLED;
  304. }
  305. #endif /* CONFIG_SIR_BFIN_DMA */
  306. static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
  307. {
  308. #ifdef CONFIG_SIR_BFIN_DMA
  309. dma_addr_t dma_handle;
  310. #endif /* CONFIG_SIR_BFIN_DMA */
  311. if (request_dma(port->rx_dma_channel, "BFIN_UART_RX") < 0) {
  312. dev_warn(&dev->dev, "Unable to attach SIR RX DMA channel\n");
  313. return -EBUSY;
  314. }
  315. if (request_dma(port->tx_dma_channel, "BFIN_UART_TX") < 0) {
  316. dev_warn(&dev->dev, "Unable to attach SIR TX DMA channel\n");
  317. free_dma(port->rx_dma_channel);
  318. return -EBUSY;
  319. }
  320. #ifdef CONFIG_SIR_BFIN_DMA
  321. set_dma_callback(port->rx_dma_channel, bfin_sir_dma_rx_int, dev);
  322. set_dma_callback(port->tx_dma_channel, bfin_sir_dma_tx_int, dev);
  323. port->rx_dma_buf.buf = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA);
  324. port->rx_dma_buf.head = 0;
  325. port->rx_dma_buf.tail = 0;
  326. port->rx_dma_nrows = 0;
  327. set_dma_config(port->rx_dma_channel,
  328. set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO,
  329. INTR_ON_ROW, DIMENSION_2D,
  330. DATA_SIZE_8, DMA_SYNC_RESTART));
  331. set_dma_x_count(port->rx_dma_channel, DMA_SIR_RX_XCNT);
  332. set_dma_x_modify(port->rx_dma_channel, 1);
  333. set_dma_y_count(port->rx_dma_channel, DMA_SIR_RX_YCNT);
  334. set_dma_y_modify(port->rx_dma_channel, 1);
  335. set_dma_start_addr(port->rx_dma_channel, (unsigned long)port->rx_dma_buf.buf);
  336. enable_dma(port->rx_dma_channel);
  337. port->rx_dma_timer.data = (unsigned long)(dev);
  338. port->rx_dma_timer.function = (void *)bfin_sir_rx_dma_timeout;
  339. #else
  340. if (request_irq(port->irq, bfin_sir_rx_int, IRQF_DISABLED, "BFIN_SIR_RX", dev)) {
  341. dev_warn(&dev->dev, "Unable to attach SIR RX interrupt\n");
  342. return -EBUSY;
  343. }
  344. if (request_irq(port->irq+1, bfin_sir_tx_int, IRQF_DISABLED, "BFIN_SIR_TX", dev)) {
  345. dev_warn(&dev->dev, "Unable to attach SIR TX interrupt\n");
  346. free_irq(port->irq, dev);
  347. return -EBUSY;
  348. }
  349. #endif
  350. return 0;
  351. }
  352. static void bfin_sir_shutdown(struct bfin_sir_port *port, struct net_device *dev)
  353. {
  354. unsigned short val;
  355. bfin_sir_stop_rx(port);
  356. SIR_UART_DISABLE_INTS(port);
  357. val = SIR_UART_GET_GCTL(port);
  358. val &= ~(UCEN | IREN | RPOLC);
  359. SIR_UART_PUT_GCTL(port, val);
  360. #ifdef CONFIG_SIR_BFIN_DMA
  361. disable_dma(port->tx_dma_channel);
  362. disable_dma(port->rx_dma_channel);
  363. del_timer(&(port->rx_dma_timer));
  364. dma_free_coherent(NULL, PAGE_SIZE, port->rx_dma_buf.buf, 0);
  365. #else
  366. free_irq(port->irq+1, dev);
  367. free_irq(port->irq, dev);
  368. #endif
  369. free_dma(port->tx_dma_channel);
  370. free_dma(port->rx_dma_channel);
  371. }
  372. #ifdef CONFIG_PM
  373. static int bfin_sir_suspend(struct platform_device *pdev, pm_message_t state)
  374. {
  375. struct bfin_sir_port *sir_port;
  376. struct net_device *dev;
  377. struct bfin_sir_self *self;
  378. sir_port = platform_get_drvdata(pdev);
  379. if (!sir_port)
  380. return 0;
  381. dev = sir_port->dev;
  382. self = netdev_priv(dev);
  383. if (self->open) {
  384. flush_work(&self->work);
  385. bfin_sir_shutdown(self->sir_port, dev);
  386. netif_device_detach(dev);
  387. }
  388. return 0;
  389. }
  390. static int bfin_sir_resume(struct platform_device *pdev)
  391. {
  392. struct bfin_sir_port *sir_port;
  393. struct net_device *dev;
  394. struct bfin_sir_self *self;
  395. struct bfin_sir_port *port;
  396. sir_port = platform_get_drvdata(pdev);
  397. if (!sir_port)
  398. return 0;
  399. dev = sir_port->dev;
  400. self = netdev_priv(dev);
  401. port = self->sir_port;
  402. if (self->open) {
  403. if (self->newspeed) {
  404. self->speed = self->newspeed;
  405. self->newspeed = 0;
  406. }
  407. bfin_sir_startup(port, dev);
  408. bfin_sir_set_speed(port, 9600);
  409. bfin_sir_enable_rx(port);
  410. netif_device_attach(dev);
  411. }
  412. return 0;
  413. }
  414. #else
  415. #define bfin_sir_suspend NULL
  416. #define bfin_sir_resume NULL
  417. #endif
  418. static void bfin_sir_send_work(struct work_struct *work)
  419. {
  420. struct bfin_sir_self *self = container_of(work, struct bfin_sir_self, work);
  421. struct net_device *dev = self->sir_port->dev;
  422. struct bfin_sir_port *port = self->sir_port;
  423. unsigned short val;
  424. int tx_cnt = 10;
  425. while (bfin_sir_is_receiving(dev) && --tx_cnt)
  426. turnaround_delay(dev->last_rx, self->mtt);
  427. bfin_sir_stop_rx(port);
  428. /* To avoid losting RX interrupt, we reset IR function before
  429. * sending data. We also can set the speed, which will
  430. * reset all the UART.
  431. */
  432. val = SIR_UART_GET_GCTL(port);
  433. val &= ~(IREN | RPOLC);
  434. SIR_UART_PUT_GCTL(port, val);
  435. SSYNC();
  436. val |= IREN | RPOLC;
  437. SIR_UART_PUT_GCTL(port, val);
  438. SSYNC();
  439. /* bfin_sir_set_speed(port, self->speed); */
  440. #ifdef CONFIG_SIR_BFIN_DMA
  441. bfin_sir_dma_tx_chars(dev);
  442. #endif
  443. bfin_sir_enable_tx(port);
  444. dev->trans_start = jiffies;
  445. }
  446. static int bfin_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
  447. {
  448. struct bfin_sir_self *self = netdev_priv(dev);
  449. int speed = irda_get_next_speed(skb);
  450. netif_stop_queue(dev);
  451. self->mtt = irda_get_mtt(skb);
  452. if (speed != self->speed && speed != -1)
  453. self->newspeed = speed;
  454. self->tx_buff.data = self->tx_buff.head;
  455. if (skb->len == 0)
  456. self->tx_buff.len = 0;
  457. else
  458. self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, self->tx_buff.truesize);
  459. schedule_work(&self->work);
  460. dev_kfree_skb(skb);
  461. return 0;
  462. }
  463. static int bfin_sir_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
  464. {
  465. struct if_irda_req *rq = (struct if_irda_req *)ifreq;
  466. struct bfin_sir_self *self = netdev_priv(dev);
  467. struct bfin_sir_port *port = self->sir_port;
  468. int ret = 0;
  469. switch (cmd) {
  470. case SIOCSBANDWIDTH:
  471. if (capable(CAP_NET_ADMIN)) {
  472. if (self->open) {
  473. ret = bfin_sir_set_speed(port, rq->ifr_baudrate);
  474. bfin_sir_enable_rx(port);
  475. } else {
  476. dev_warn(&dev->dev, "SIOCSBANDWIDTH: !netif_running\n");
  477. ret = 0;
  478. }
  479. }
  480. break;
  481. case SIOCSMEDIABUSY:
  482. ret = -EPERM;
  483. if (capable(CAP_NET_ADMIN)) {
  484. irda_device_set_media_busy(dev, TRUE);
  485. ret = 0;
  486. }
  487. break;
  488. case SIOCGRECEIVING:
  489. rq->ifr_receiving = bfin_sir_is_receiving(dev);
  490. break;
  491. default:
  492. ret = -EOPNOTSUPP;
  493. break;
  494. }
  495. return ret;
  496. }
  497. static struct net_device_stats *bfin_sir_stats(struct net_device *dev)
  498. {
  499. struct bfin_sir_self *self = netdev_priv(dev);
  500. return &self->stats;
  501. }
  502. static int bfin_sir_open(struct net_device *dev)
  503. {
  504. struct bfin_sir_self *self = netdev_priv(dev);
  505. struct bfin_sir_port *port = self->sir_port;
  506. int err = -ENOMEM;
  507. self->newspeed = 0;
  508. self->speed = 9600;
  509. spin_lock_init(&self->lock);
  510. err = bfin_sir_startup(port, dev);
  511. if (err)
  512. goto err_startup;
  513. bfin_sir_set_speed(port, 9600);
  514. self->irlap = irlap_open(dev, &self->qos, DRIVER_NAME);
  515. if (!self->irlap)
  516. goto err_irlap;
  517. INIT_WORK(&self->work, bfin_sir_send_work);
  518. /*
  519. * Now enable the interrupt then start the queue
  520. */
  521. self->open = 1;
  522. bfin_sir_enable_rx(port);
  523. netif_start_queue(dev);
  524. return 0;
  525. err_irlap:
  526. self->open = 0;
  527. bfin_sir_shutdown(port, dev);
  528. err_startup:
  529. return err;
  530. }
  531. static int bfin_sir_stop(struct net_device *dev)
  532. {
  533. struct bfin_sir_self *self = netdev_priv(dev);
  534. flush_work(&self->work);
  535. bfin_sir_shutdown(self->sir_port, dev);
  536. if (self->rxskb) {
  537. dev_kfree_skb(self->rxskb);
  538. self->rxskb = NULL;
  539. }
  540. /* Stop IrLAP */
  541. if (self->irlap) {
  542. irlap_close(self->irlap);
  543. self->irlap = NULL;
  544. }
  545. netif_stop_queue(dev);
  546. self->open = 0;
  547. return 0;
  548. }
  549. static int bfin_sir_init_iobuf(iobuff_t *io, int size)
  550. {
  551. io->head = kmalloc(size, GFP_KERNEL);
  552. if (!io->head)
  553. return -ENOMEM;
  554. io->truesize = size;
  555. io->in_frame = FALSE;
  556. io->state = OUTSIDE_FRAME;
  557. io->data = io->head;
  558. return 0;
  559. }
  560. static const struct net_device_ops bfin_sir_ndo = {
  561. .ndo_open = bfin_sir_open,
  562. .ndo_stop = bfin_sir_stop,
  563. .ndo_start_xmit = bfin_sir_hard_xmit,
  564. .ndo_do_ioctl = bfin_sir_ioctl,
  565. .ndo_get_stats = bfin_sir_stats,
  566. };
  567. static int __devinit bfin_sir_probe(struct platform_device *pdev)
  568. {
  569. struct net_device *dev;
  570. struct bfin_sir_self *self;
  571. unsigned int baudrate_mask;
  572. struct bfin_sir_port *sir_port;
  573. int err;
  574. if (pdev->id >= 0 && pdev->id < ARRAY_SIZE(per) && \
  575. per[pdev->id][3] == pdev->id) {
  576. err = peripheral_request_list(per[pdev->id], DRIVER_NAME);
  577. if (err)
  578. return err;
  579. } else {
  580. dev_err(&pdev->dev, "Invalid pdev id, please check board file\n");
  581. return -ENODEV;
  582. }
  583. err = -ENOMEM;
  584. sir_port = kmalloc(sizeof(*sir_port), GFP_KERNEL);
  585. if (!sir_port)
  586. goto err_mem_0;
  587. bfin_sir_init_ports(sir_port, pdev);
  588. dev = alloc_irdadev(sizeof(*self));
  589. if (!dev)
  590. goto err_mem_1;
  591. self = netdev_priv(dev);
  592. self->dev = &pdev->dev;
  593. self->sir_port = sir_port;
  594. sir_port->dev = dev;
  595. err = bfin_sir_init_iobuf(&self->rx_buff, IRDA_SKB_MAX_MTU);
  596. if (err)
  597. goto err_mem_2;
  598. err = bfin_sir_init_iobuf(&self->tx_buff, IRDA_SIR_MAX_FRAME);
  599. if (err)
  600. goto err_mem_3;
  601. dev->netdev_ops = &bfin_sir_ndo;
  602. dev->irq = sir_port->irq;
  603. irda_init_max_qos_capabilies(&self->qos);
  604. baudrate_mask = IR_9600;
  605. switch (max_rate) {
  606. case 115200:
  607. baudrate_mask |= IR_115200;
  608. case 57600:
  609. baudrate_mask |= IR_57600;
  610. case 38400:
  611. baudrate_mask |= IR_38400;
  612. case 19200:
  613. baudrate_mask |= IR_19200;
  614. case 9600:
  615. break;
  616. default:
  617. dev_warn(&pdev->dev, "Invalid maximum baud rate, using 9600\n");
  618. }
  619. self->qos.baud_rate.bits &= baudrate_mask;
  620. self->qos.min_turn_time.bits = 1; /* 10 ms or more */
  621. irda_qos_bits_to_value(&self->qos);
  622. err = register_netdev(dev);
  623. if (err) {
  624. kfree(self->tx_buff.head);
  625. err_mem_3:
  626. kfree(self->rx_buff.head);
  627. err_mem_2:
  628. free_netdev(dev);
  629. err_mem_1:
  630. kfree(sir_port);
  631. err_mem_0:
  632. peripheral_free_list(per[pdev->id]);
  633. } else
  634. platform_set_drvdata(pdev, sir_port);
  635. return err;
  636. }
  637. static int __devexit bfin_sir_remove(struct platform_device *pdev)
  638. {
  639. struct bfin_sir_port *sir_port;
  640. struct net_device *dev = NULL;
  641. struct bfin_sir_self *self;
  642. sir_port = platform_get_drvdata(pdev);
  643. if (!sir_port)
  644. return 0;
  645. dev = sir_port->dev;
  646. self = netdev_priv(dev);
  647. unregister_netdev(dev);
  648. kfree(self->tx_buff.head);
  649. kfree(self->rx_buff.head);
  650. free_netdev(dev);
  651. kfree(sir_port);
  652. platform_set_drvdata(pdev, NULL);
  653. return 0;
  654. }
  655. static struct platform_driver bfin_ir_driver = {
  656. .probe = bfin_sir_probe,
  657. .remove = __devexit_p(bfin_sir_remove),
  658. .suspend = bfin_sir_suspend,
  659. .resume = bfin_sir_resume,
  660. .driver = {
  661. .name = DRIVER_NAME,
  662. },
  663. };
  664. static int __init bfin_sir_init(void)
  665. {
  666. return platform_driver_register(&bfin_ir_driver);
  667. }
  668. static void __exit bfin_sir_exit(void)
  669. {
  670. platform_driver_unregister(&bfin_ir_driver);
  671. }
  672. module_init(bfin_sir_init);
  673. module_exit(bfin_sir_exit);
  674. module_param(max_rate, int, 0);
  675. MODULE_PARM_DESC(max_rate, "Maximum baud rate (115200, 57600, 38400, 19200, 9600)");
  676. MODULE_AUTHOR("Graf Yang <graf.yang@analog.com>");
  677. MODULE_DESCRIPTION("Blackfin IrDA driver");
  678. MODULE_LICENSE("GPL");