caif_hsi.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220
  1. /*
  2. * Copyright (C) ST-Ericsson AB 2010
  3. * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
  4. * Author: Daniel Martensson / daniel.martensson@stericsson.com
  5. * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com
  6. * License terms: GNU General Public License (GPL) version 2.
  7. */
  8. #include <linux/version.h>
  9. #include <linux/init.h>
  10. #include <linux/module.h>
  11. #include <linux/device.h>
  12. #include <linux/platform_device.h>
  13. #include <linux/netdevice.h>
  14. #include <linux/string.h>
  15. #include <linux/list.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/delay.h>
  18. #include <linux/sched.h>
  19. #include <linux/if_arp.h>
  20. #include <linux/timer.h>
  21. #include <net/caif/caif_layer.h>
  22. #include <net/caif/caif_hsi.h>
  23. MODULE_LICENSE("GPL");
  24. MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
  25. MODULE_DESCRIPTION("CAIF HSI driver");
  26. /* Returns the number of padding bytes for alignment. */
  27. #define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
  28. (((pow)-((x)&((pow)-1)))))
  29. /*
  30. * HSI padding options.
  31. * Warning: must be a base of 2 (& operation used) and can not be zero !
  32. */
  33. static int hsi_head_align = 4;
  34. module_param(hsi_head_align, int, S_IRUGO);
  35. MODULE_PARM_DESC(hsi_head_align, "HSI head alignment.");
  36. static int hsi_tail_align = 4;
  37. module_param(hsi_tail_align, int, S_IRUGO);
  38. MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment.");
  39. /*
  40. * HSI link layer flowcontrol thresholds.
  41. * Warning: A high threshold value migth increase throughput but it will at
  42. * the same time prevent channel prioritization and increase the risk of
  43. * flooding the modem. The high threshold should be above the low.
  44. */
  45. static int hsi_high_threshold = 100;
  46. module_param(hsi_high_threshold, int, S_IRUGO);
  47. MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF).");
  48. static int hsi_low_threshold = 50;
  49. module_param(hsi_low_threshold, int, S_IRUGO);
  50. MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON).");
  51. #define ON 1
  52. #define OFF 0
  53. /*
  54. * Threshold values for the HSI packet queue. Flowcontrol will be asserted
  55. * when the number of packets exceeds HIGH_WATER_MARK. It will not be
  56. * de-asserted before the number of packets drops below LOW_WATER_MARK.
  57. */
  58. #define LOW_WATER_MARK hsi_low_threshold
  59. #define HIGH_WATER_MARK hsi_high_threshold
  60. static LIST_HEAD(cfhsi_list);
  61. static spinlock_t cfhsi_list_lock;
  62. static void cfhsi_inactivity_tout(unsigned long arg)
  63. {
  64. struct cfhsi *cfhsi = (struct cfhsi *)arg;
  65. dev_dbg(&cfhsi->ndev->dev, "%s.\n",
  66. __func__);
  67. /* Schedule power down work queue. */
  68. if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
  69. queue_work(cfhsi->wq, &cfhsi->wake_down_work);
  70. }
  71. static void cfhsi_abort_tx(struct cfhsi *cfhsi)
  72. {
  73. struct sk_buff *skb;
  74. for (;;) {
  75. spin_lock_bh(&cfhsi->lock);
  76. skb = skb_dequeue(&cfhsi->qhead);
  77. if (!skb)
  78. break;
  79. cfhsi->ndev->stats.tx_errors++;
  80. cfhsi->ndev->stats.tx_dropped++;
  81. spin_unlock_bh(&cfhsi->lock);
  82. kfree_skb(skb);
  83. }
  84. cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
  85. if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
  86. mod_timer(&cfhsi->timer, jiffies + CFHSI_INACTIVITY_TOUT);
  87. spin_unlock_bh(&cfhsi->lock);
  88. }
  89. static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
  90. {
  91. char buffer[32]; /* Any reasonable value */
  92. size_t fifo_occupancy;
  93. int ret;
  94. dev_dbg(&cfhsi->ndev->dev, "%s.\n",
  95. __func__);
  96. ret = cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
  97. if (ret) {
  98. dev_warn(&cfhsi->ndev->dev,
  99. "%s: can't wake up HSI interface: %d.\n",
  100. __func__, ret);
  101. return ret;
  102. }
  103. do {
  104. ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
  105. &fifo_occupancy);
  106. if (ret) {
  107. dev_warn(&cfhsi->ndev->dev,
  108. "%s: can't get FIFO occupancy: %d.\n",
  109. __func__, ret);
  110. break;
  111. } else if (!fifo_occupancy)
  112. /* No more data, exitting normally */
  113. break;
  114. fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
  115. set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
  116. ret = cfhsi->dev->cfhsi_rx(buffer, fifo_occupancy,
  117. cfhsi->dev);
  118. if (ret) {
  119. clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
  120. dev_warn(&cfhsi->ndev->dev,
  121. "%s: can't read data: %d.\n",
  122. __func__, ret);
  123. break;
  124. }
  125. ret = 5 * HZ;
  126. wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
  127. !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
  128. if (ret < 0) {
  129. dev_warn(&cfhsi->ndev->dev,
  130. "%s: can't wait for flush complete: %d.\n",
  131. __func__, ret);
  132. break;
  133. } else if (!ret) {
  134. ret = -ETIMEDOUT;
  135. dev_warn(&cfhsi->ndev->dev,
  136. "%s: timeout waiting for flush complete.\n",
  137. __func__);
  138. break;
  139. }
  140. } while (1);
  141. cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
  142. return ret;
  143. }
  144. static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
  145. {
  146. int nfrms = 0;
  147. int pld_len = 0;
  148. struct sk_buff *skb;
  149. u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
  150. skb = skb_dequeue(&cfhsi->qhead);
  151. if (!skb)
  152. return 0;
  153. /* Check if we can embed a CAIF frame. */
  154. if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
  155. struct caif_payload_info *info;
  156. int hpad = 0;
  157. int tpad = 0;
  158. /* Calculate needed head alignment and tail alignment. */
  159. info = (struct caif_payload_info *)&skb->cb;
  160. hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
  161. tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
  162. /* Check if frame still fits with added alignment. */
  163. if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
  164. u8 *pemb = desc->emb_frm;
  165. desc->offset = CFHSI_DESC_SHORT_SZ;
  166. *pemb = (u8)(hpad - 1);
  167. pemb += hpad;
  168. /* Update network statistics. */
  169. cfhsi->ndev->stats.tx_packets++;
  170. cfhsi->ndev->stats.tx_bytes += skb->len;
  171. /* Copy in embedded CAIF frame. */
  172. skb_copy_bits(skb, 0, pemb, skb->len);
  173. consume_skb(skb);
  174. skb = NULL;
  175. }
  176. } else
  177. /* Clear offset. */
  178. desc->offset = 0;
  179. /* Create payload CAIF frames. */
  180. pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
  181. while (nfrms < CFHSI_MAX_PKTS) {
  182. struct caif_payload_info *info;
  183. int hpad = 0;
  184. int tpad = 0;
  185. if (!skb)
  186. skb = skb_dequeue(&cfhsi->qhead);
  187. if (!skb)
  188. break;
  189. /* Calculate needed head alignment and tail alignment. */
  190. info = (struct caif_payload_info *)&skb->cb;
  191. hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
  192. tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
  193. /* Fill in CAIF frame length in descriptor. */
  194. desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
  195. /* Fill head padding information. */
  196. *pfrm = (u8)(hpad - 1);
  197. pfrm += hpad;
  198. /* Update network statistics. */
  199. cfhsi->ndev->stats.tx_packets++;
  200. cfhsi->ndev->stats.tx_bytes += skb->len;
  201. /* Copy in CAIF frame. */
  202. skb_copy_bits(skb, 0, pfrm, skb->len);
  203. /* Update payload length. */
  204. pld_len += desc->cffrm_len[nfrms];
  205. /* Update frame pointer. */
  206. pfrm += skb->len + tpad;
  207. consume_skb(skb);
  208. skb = NULL;
  209. /* Update number of frames. */
  210. nfrms++;
  211. }
  212. /* Unused length fields should be zero-filled (according to SPEC). */
  213. while (nfrms < CFHSI_MAX_PKTS) {
  214. desc->cffrm_len[nfrms] = 0x0000;
  215. nfrms++;
  216. }
  217. /* Check if we can piggy-back another descriptor. */
  218. skb = skb_peek(&cfhsi->qhead);
  219. if (skb)
  220. desc->header |= CFHSI_PIGGY_DESC;
  221. else
  222. desc->header &= ~CFHSI_PIGGY_DESC;
  223. return CFHSI_DESC_SZ + pld_len;
  224. }
  225. static void cfhsi_tx_done_work(struct work_struct *work)
  226. {
  227. struct cfhsi *cfhsi = NULL;
  228. struct cfhsi_desc *desc = NULL;
  229. int len = 0;
  230. int res;
  231. cfhsi = container_of(work, struct cfhsi, tx_done_work);
  232. dev_dbg(&cfhsi->ndev->dev, "%s.\n",
  233. __func__);
  234. if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
  235. return;
  236. desc = (struct cfhsi_desc *)cfhsi->tx_buf;
  237. do {
  238. /*
  239. * Send flow on if flow off has been previously signalled
  240. * and number of packets is below low water mark.
  241. */
  242. spin_lock_bh(&cfhsi->lock);
  243. if (cfhsi->flow_off_sent &&
  244. cfhsi->qhead.qlen <= cfhsi->q_low_mark &&
  245. cfhsi->cfdev.flowctrl) {
  246. cfhsi->flow_off_sent = 0;
  247. cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
  248. }
  249. spin_unlock_bh(&cfhsi->lock);
  250. /* Create HSI frame. */
  251. len = cfhsi_tx_frm(desc, cfhsi);
  252. if (!len) {
  253. cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
  254. /* Start inactivity timer. */
  255. mod_timer(&cfhsi->timer,
  256. jiffies + CFHSI_INACTIVITY_TOUT);
  257. break;
  258. }
  259. /* Set up new transfer. */
  260. res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
  261. if (WARN_ON(res < 0)) {
  262. dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
  263. __func__, res);
  264. }
  265. } while (res < 0);
  266. }
  267. static void cfhsi_tx_done_cb(struct cfhsi_drv *drv)
  268. {
  269. struct cfhsi *cfhsi;
  270. cfhsi = container_of(drv, struct cfhsi, drv);
  271. dev_dbg(&cfhsi->ndev->dev, "%s.\n",
  272. __func__);
  273. if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
  274. return;
  275. queue_work(cfhsi->wq, &cfhsi->tx_done_work);
  276. }
  277. static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
  278. {
  279. int xfer_sz = 0;
  280. int nfrms = 0;
  281. u16 *plen = NULL;
  282. u8 *pfrm = NULL;
  283. if ((desc->header & ~CFHSI_PIGGY_DESC) ||
  284. (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
  285. dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
  286. __func__);
  287. return 0;
  288. }
  289. /* Check for embedded CAIF frame. */
  290. if (desc->offset) {
  291. struct sk_buff *skb;
  292. u8 *dst = NULL;
  293. int len = 0, retries = 0;
  294. pfrm = ((u8 *)desc) + desc->offset;
  295. /* Remove offset padding. */
  296. pfrm += *pfrm + 1;
  297. /* Read length of CAIF frame (little endian). */
  298. len = *pfrm;
  299. len |= ((*(pfrm+1)) << 8) & 0xFF00;
  300. len += 2; /* Add FCS fields. */
  301. /* Allocate SKB (OK even in IRQ context). */
  302. skb = alloc_skb(len + 1, GFP_KERNEL);
  303. while (!skb) {
  304. retries++;
  305. schedule_timeout(1);
  306. skb = alloc_skb(len + 1, GFP_KERNEL);
  307. if (skb) {
  308. printk(KERN_WARNING "%s: slept for %u "
  309. "before getting memory\n",
  310. __func__, retries);
  311. break;
  312. }
  313. if (retries > HZ) {
  314. printk(KERN_ERR "%s: slept for 1HZ and "
  315. "did not get memory\n",
  316. __func__);
  317. cfhsi->ndev->stats.rx_dropped++;
  318. goto drop_frame;
  319. }
  320. }
  321. caif_assert(skb != NULL);
  322. dst = skb_put(skb, len);
  323. memcpy(dst, pfrm, len);
  324. skb->protocol = htons(ETH_P_CAIF);
  325. skb_reset_mac_header(skb);
  326. skb->dev = cfhsi->ndev;
  327. /*
  328. * We are called from a arch specific platform device.
  329. * Unfortunately we don't know what context we're
  330. * running in.
  331. */
  332. if (in_interrupt())
  333. netif_rx(skb);
  334. else
  335. netif_rx_ni(skb);
  336. /* Update network statistics. */
  337. cfhsi->ndev->stats.rx_packets++;
  338. cfhsi->ndev->stats.rx_bytes += len;
  339. }
  340. drop_frame:
  341. /* Calculate transfer length. */
  342. plen = desc->cffrm_len;
  343. while (nfrms < CFHSI_MAX_PKTS && *plen) {
  344. xfer_sz += *plen;
  345. plen++;
  346. nfrms++;
  347. }
  348. /* Check for piggy-backed descriptor. */
  349. if (desc->header & CFHSI_PIGGY_DESC)
  350. xfer_sz += CFHSI_DESC_SZ;
  351. if (xfer_sz % 4) {
  352. dev_err(&cfhsi->ndev->dev,
  353. "%s: Invalid payload len: %d, ignored.\n",
  354. __func__, xfer_sz);
  355. xfer_sz = 0;
  356. }
  357. return xfer_sz;
  358. }
  359. static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
  360. {
  361. int rx_sz = 0;
  362. int nfrms = 0;
  363. u16 *plen = NULL;
  364. u8 *pfrm = NULL;
  365. /* Sanity check header and offset. */
  366. if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
  367. (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
  368. dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
  369. __func__);
  370. return -EINVAL;
  371. }
  372. /* Set frame pointer to start of payload. */
  373. pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
  374. plen = desc->cffrm_len;
  375. while (nfrms < CFHSI_MAX_PKTS && *plen) {
  376. struct sk_buff *skb;
  377. u8 *dst = NULL;
  378. u8 *pcffrm = NULL;
  379. int len = 0, retries = 0;
  380. if (WARN_ON(desc->cffrm_len[nfrms] > CFHSI_MAX_PAYLOAD_SZ)) {
  381. dev_err(&cfhsi->ndev->dev, "%s: Invalid payload.\n",
  382. __func__);
  383. return -EINVAL;
  384. }
  385. /* CAIF frame starts after head padding. */
  386. pcffrm = pfrm + *pfrm + 1;
  387. /* Read length of CAIF frame (little endian). */
  388. len = *pcffrm;
  389. len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
  390. len += 2; /* Add FCS fields. */
  391. /* Allocate SKB (OK even in IRQ context). */
  392. skb = alloc_skb(len + 1, GFP_KERNEL);
  393. while (!skb) {
  394. retries++;
  395. schedule_timeout(1);
  396. skb = alloc_skb(len + 1, GFP_KERNEL);
  397. if (skb) {
  398. printk(KERN_WARNING "%s: slept for %u "
  399. "before getting memory\n",
  400. __func__, retries);
  401. break;
  402. }
  403. if (retries > HZ) {
  404. printk(KERN_ERR "%s: slept for 1HZ "
  405. "and did not get memory\n",
  406. __func__);
  407. cfhsi->ndev->stats.rx_dropped++;
  408. goto drop_frame;
  409. }
  410. }
  411. caif_assert(skb != NULL);
  412. dst = skb_put(skb, len);
  413. memcpy(dst, pcffrm, len);
  414. skb->protocol = htons(ETH_P_CAIF);
  415. skb_reset_mac_header(skb);
  416. skb->dev = cfhsi->ndev;
  417. /*
  418. * We're called from a platform device,
  419. * and don't know the context we're running in.
  420. */
  421. if (in_interrupt())
  422. netif_rx(skb);
  423. else
  424. netif_rx_ni(skb);
  425. /* Update network statistics. */
  426. cfhsi->ndev->stats.rx_packets++;
  427. cfhsi->ndev->stats.rx_bytes += len;
  428. drop_frame:
  429. pfrm += *plen;
  430. rx_sz += *plen;
  431. plen++;
  432. nfrms++;
  433. }
  434. return rx_sz;
  435. }
  436. static void cfhsi_rx_done_work(struct work_struct *work)
  437. {
  438. int res;
  439. int desc_pld_len = 0;
  440. struct cfhsi *cfhsi = NULL;
  441. struct cfhsi_desc *desc = NULL;
  442. cfhsi = container_of(work, struct cfhsi, rx_done_work);
  443. desc = (struct cfhsi_desc *)cfhsi->rx_buf;
  444. dev_dbg(&cfhsi->ndev->dev, "%s: Kick timer if pending.\n",
  445. __func__);
  446. if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
  447. return;
  448. /* Update inactivity timer if pending. */
  449. mod_timer_pending(&cfhsi->timer, jiffies + CFHSI_INACTIVITY_TOUT);
  450. if (cfhsi->rx_state == CFHSI_RX_STATE_DESC) {
  451. desc_pld_len = cfhsi_rx_desc(desc, cfhsi);
  452. } else {
  453. int pld_len;
  454. pld_len = cfhsi_rx_pld(desc, cfhsi);
  455. if ((pld_len > 0) && (desc->header & CFHSI_PIGGY_DESC)) {
  456. struct cfhsi_desc *piggy_desc;
  457. piggy_desc = (struct cfhsi_desc *)
  458. (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
  459. pld_len);
  460. /* Extract piggy-backed descriptor. */
  461. desc_pld_len = cfhsi_rx_desc(piggy_desc, cfhsi);
  462. /*
  463. * Copy needed information from the piggy-backed
  464. * descriptor to the descriptor in the start.
  465. */
  466. memcpy((u8 *)desc, (u8 *)piggy_desc,
  467. CFHSI_DESC_SHORT_SZ);
  468. }
  469. }
  470. if (desc_pld_len) {
  471. cfhsi->rx_state = CFHSI_RX_STATE_PAYLOAD;
  472. cfhsi->rx_ptr = cfhsi->rx_buf + CFHSI_DESC_SZ;
  473. cfhsi->rx_len = desc_pld_len;
  474. } else {
  475. cfhsi->rx_state = CFHSI_RX_STATE_DESC;
  476. cfhsi->rx_ptr = cfhsi->rx_buf;
  477. cfhsi->rx_len = CFHSI_DESC_SZ;
  478. }
  479. clear_bit(CFHSI_PENDING_RX, &cfhsi->bits);
  480. if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
  481. /* Set up new transfer. */
  482. dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
  483. __func__);
  484. res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len,
  485. cfhsi->dev);
  486. if (WARN_ON(res < 0)) {
  487. dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
  488. __func__, res);
  489. cfhsi->ndev->stats.rx_errors++;
  490. cfhsi->ndev->stats.rx_dropped++;
  491. }
  492. }
  493. }
  494. static void cfhsi_rx_done_cb(struct cfhsi_drv *drv)
  495. {
  496. struct cfhsi *cfhsi;
  497. cfhsi = container_of(drv, struct cfhsi, drv);
  498. dev_dbg(&cfhsi->ndev->dev, "%s.\n",
  499. __func__);
  500. if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
  501. return;
  502. set_bit(CFHSI_PENDING_RX, &cfhsi->bits);
  503. if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
  504. wake_up_interruptible(&cfhsi->flush_fifo_wait);
  505. else
  506. queue_work(cfhsi->wq, &cfhsi->rx_done_work);
  507. }
  508. static void cfhsi_wake_up(struct work_struct *work)
  509. {
  510. struct cfhsi *cfhsi = NULL;
  511. int res;
  512. int len;
  513. long ret;
  514. cfhsi = container_of(work, struct cfhsi, wake_up_work);
  515. if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
  516. return;
  517. if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
  518. /* It happenes when wakeup is requested by
  519. * both ends at the same time. */
  520. clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
  521. return;
  522. }
  523. /* Activate wake line. */
  524. cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
  525. dev_dbg(&cfhsi->ndev->dev, "%s: Start waiting.\n",
  526. __func__);
  527. /* Wait for acknowledge. */
  528. ret = CFHSI_WAKEUP_TOUT;
  529. wait_event_interruptible_timeout(cfhsi->wake_up_wait,
  530. test_bit(CFHSI_WAKE_UP_ACK,
  531. &cfhsi->bits), ret);
  532. if (unlikely(ret < 0)) {
  533. /* Interrupted by signal. */
  534. dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
  535. __func__, ret);
  536. clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
  537. cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
  538. return;
  539. } else if (!ret) {
  540. /* Wakeup timeout */
  541. dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
  542. __func__);
  543. clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
  544. cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
  545. return;
  546. }
  547. dev_dbg(&cfhsi->ndev->dev, "%s: Woken.\n",
  548. __func__);
  549. /* Clear power up bit. */
  550. set_bit(CFHSI_AWAKE, &cfhsi->bits);
  551. clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
  552. /* Resume read operation. */
  553. if (!test_bit(CFHSI_PENDING_RX, &cfhsi->bits)) {
  554. dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
  555. __func__);
  556. res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr,
  557. cfhsi->rx_len, cfhsi->dev);
  558. if (WARN_ON(res < 0)) {
  559. dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
  560. __func__, res);
  561. }
  562. }
  563. /* Clear power up acknowledment. */
  564. clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
  565. spin_lock_bh(&cfhsi->lock);
  566. /* Resume transmit if queue is not empty. */
  567. if (!skb_peek(&cfhsi->qhead)) {
  568. dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n",
  569. __func__);
  570. /* Start inactivity timer. */
  571. mod_timer(&cfhsi->timer,
  572. jiffies + CFHSI_INACTIVITY_TOUT);
  573. spin_unlock_bh(&cfhsi->lock);
  574. return;
  575. }
  576. dev_dbg(&cfhsi->ndev->dev, "%s: Host wake.\n",
  577. __func__);
  578. spin_unlock_bh(&cfhsi->lock);
  579. /* Create HSI frame. */
  580. len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
  581. if (likely(len > 0)) {
  582. /* Set up new transfer. */
  583. res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
  584. if (WARN_ON(res < 0)) {
  585. dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
  586. __func__, res);
  587. cfhsi_abort_tx(cfhsi);
  588. }
  589. } else {
  590. dev_err(&cfhsi->ndev->dev,
  591. "%s: Failed to create HSI frame: %d.\n",
  592. __func__, len);
  593. }
  594. }
  595. static void cfhsi_wake_down(struct work_struct *work)
  596. {
  597. long ret;
  598. struct cfhsi *cfhsi = NULL;
  599. size_t fifo_occupancy;
  600. cfhsi = container_of(work, struct cfhsi, wake_down_work);
  601. dev_dbg(&cfhsi->ndev->dev, "%s.\n",
  602. __func__);
  603. if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
  604. return;
  605. /* Check if there is something in FIFO. */
  606. if (WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
  607. &fifo_occupancy)))
  608. fifo_occupancy = 0;
  609. if (fifo_occupancy) {
  610. dev_dbg(&cfhsi->ndev->dev,
  611. "%s: %u words in RX FIFO, restart timer.\n",
  612. __func__, (unsigned) fifo_occupancy);
  613. spin_lock_bh(&cfhsi->lock);
  614. mod_timer(&cfhsi->timer,
  615. jiffies + CFHSI_INACTIVITY_TOUT);
  616. spin_unlock_bh(&cfhsi->lock);
  617. return;
  618. }
  619. /* Cancel pending RX requests */
  620. cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
  621. /* Deactivate wake line. */
  622. cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
  623. /* Wait for acknowledge. */
  624. ret = CFHSI_WAKEUP_TOUT;
  625. ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
  626. test_bit(CFHSI_WAKE_DOWN_ACK,
  627. &cfhsi->bits),
  628. ret);
  629. if (ret < 0) {
  630. /* Interrupted by signal. */
  631. dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
  632. __func__, ret);
  633. return;
  634. } else if (!ret) {
  635. /* Timeout */
  636. dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
  637. __func__);
  638. }
  639. /* Clear power down acknowledment. */
  640. clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
  641. clear_bit(CFHSI_AWAKE, &cfhsi->bits);
  642. /* Check if there is something in FIFO. */
  643. if (WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
  644. &fifo_occupancy)))
  645. fifo_occupancy = 0;
  646. if (fifo_occupancy) {
  647. dev_dbg(&cfhsi->ndev->dev,
  648. "%s: %u words in RX FIFO, wakeup forced.\n",
  649. __func__, (unsigned) fifo_occupancy);
  650. if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
  651. queue_work(cfhsi->wq, &cfhsi->wake_up_work);
  652. } else
  653. dev_dbg(&cfhsi->ndev->dev, "%s: Done.\n",
  654. __func__);
  655. }
  656. static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
  657. {
  658. struct cfhsi *cfhsi = NULL;
  659. cfhsi = container_of(drv, struct cfhsi, drv);
  660. dev_dbg(&cfhsi->ndev->dev, "%s.\n",
  661. __func__);
  662. set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
  663. wake_up_interruptible(&cfhsi->wake_up_wait);
  664. if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
  665. return;
  666. /* Schedule wake up work queue if the peer initiates. */
  667. if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
  668. queue_work(cfhsi->wq, &cfhsi->wake_up_work);
  669. }
  670. static void cfhsi_wake_down_cb(struct cfhsi_drv *drv)
  671. {
  672. struct cfhsi *cfhsi = NULL;
  673. cfhsi = container_of(drv, struct cfhsi, drv);
  674. dev_dbg(&cfhsi->ndev->dev, "%s.\n",
  675. __func__);
  676. /* Initiating low power is only permitted by the host (us). */
  677. set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
  678. wake_up_interruptible(&cfhsi->wake_down_wait);
  679. }
  680. static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
  681. {
  682. struct cfhsi *cfhsi = NULL;
  683. int start_xfer = 0;
  684. int timer_active;
  685. if (!dev)
  686. return -EINVAL;
  687. cfhsi = netdev_priv(dev);
  688. spin_lock_bh(&cfhsi->lock);
  689. skb_queue_tail(&cfhsi->qhead, skb);
  690. /* Sanity check; xmit should not be called after unregister_netdev */
  691. if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
  692. spin_unlock_bh(&cfhsi->lock);
  693. cfhsi_abort_tx(cfhsi);
  694. return -EINVAL;
  695. }
  696. /* Send flow off if number of packets is above high water mark. */
  697. if (!cfhsi->flow_off_sent &&
  698. cfhsi->qhead.qlen > cfhsi->q_high_mark &&
  699. cfhsi->cfdev.flowctrl) {
  700. cfhsi->flow_off_sent = 1;
  701. cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
  702. }
  703. if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
  704. cfhsi->tx_state = CFHSI_TX_STATE_XFER;
  705. start_xfer = 1;
  706. }
  707. spin_unlock_bh(&cfhsi->lock);
  708. if (!start_xfer)
  709. return 0;
  710. /* Delete inactivity timer if started. */
  711. #ifdef CONFIG_SMP
  712. timer_active = del_timer_sync(&cfhsi->timer);
  713. #else
  714. timer_active = del_timer(&cfhsi->timer);
  715. #endif /* CONFIG_SMP */
  716. if (timer_active) {
  717. struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
  718. int len;
  719. int res;
  720. /* Create HSI frame. */
  721. len = cfhsi_tx_frm(desc, cfhsi);
  722. BUG_ON(!len);
  723. /* Set up new transfer. */
  724. res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
  725. if (WARN_ON(res < 0)) {
  726. dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
  727. __func__, res);
  728. cfhsi_abort_tx(cfhsi);
  729. }
  730. } else {
  731. /* Schedule wake up work queue if the we initiate. */
  732. if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
  733. queue_work(cfhsi->wq, &cfhsi->wake_up_work);
  734. }
  735. return 0;
  736. }
  737. static int cfhsi_open(struct net_device *dev)
  738. {
  739. netif_wake_queue(dev);
  740. return 0;
  741. }
  742. static int cfhsi_close(struct net_device *dev)
  743. {
  744. netif_stop_queue(dev);
  745. return 0;
  746. }
  747. static const struct net_device_ops cfhsi_ops = {
  748. .ndo_open = cfhsi_open,
  749. .ndo_stop = cfhsi_close,
  750. .ndo_start_xmit = cfhsi_xmit
  751. };
  752. static void cfhsi_setup(struct net_device *dev)
  753. {
  754. struct cfhsi *cfhsi = netdev_priv(dev);
  755. dev->features = 0;
  756. dev->netdev_ops = &cfhsi_ops;
  757. dev->type = ARPHRD_CAIF;
  758. dev->flags = IFF_POINTOPOINT | IFF_NOARP;
  759. dev->mtu = CFHSI_MAX_PAYLOAD_SZ;
  760. dev->tx_queue_len = 0;
  761. dev->destructor = free_netdev;
  762. skb_queue_head_init(&cfhsi->qhead);
  763. cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
  764. cfhsi->cfdev.use_frag = false;
  765. cfhsi->cfdev.use_stx = false;
  766. cfhsi->cfdev.use_fcs = false;
  767. cfhsi->ndev = dev;
  768. }
  769. int cfhsi_probe(struct platform_device *pdev)
  770. {
  771. struct cfhsi *cfhsi = NULL;
  772. struct net_device *ndev;
  773. struct cfhsi_dev *dev;
  774. int res;
  775. ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
  776. if (!ndev) {
  777. dev_err(&pdev->dev, "%s: alloc_netdev failed.\n",
  778. __func__);
  779. return -ENODEV;
  780. }
  781. cfhsi = netdev_priv(ndev);
  782. cfhsi->ndev = ndev;
  783. cfhsi->pdev = pdev;
  784. /* Initialize state vaiables. */
  785. cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
  786. cfhsi->rx_state = CFHSI_RX_STATE_DESC;
  787. /* Set flow info */
  788. cfhsi->flow_off_sent = 0;
  789. cfhsi->q_low_mark = LOW_WATER_MARK;
  790. cfhsi->q_high_mark = HIGH_WATER_MARK;
  791. /* Assign the HSI device. */
  792. dev = (struct cfhsi_dev *)pdev->dev.platform_data;
  793. cfhsi->dev = dev;
  794. /* Assign the driver to this HSI device. */
  795. dev->drv = &cfhsi->drv;
  796. /*
  797. * Allocate a TX buffer with the size of a HSI packet descriptors
  798. * and the necessary room for CAIF payload frames.
  799. */
  800. cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
  801. if (!cfhsi->tx_buf) {
  802. dev_err(&ndev->dev, "%s: Failed to allocate TX buffer.\n",
  803. __func__);
  804. res = -ENODEV;
  805. goto err_alloc_tx;
  806. }
  807. /*
  808. * Allocate a RX buffer with the size of two HSI packet descriptors and
  809. * the necessary room for CAIF payload frames.
  810. */
  811. cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
  812. if (!cfhsi->rx_buf) {
  813. dev_err(&ndev->dev, "%s: Failed to allocate RX buffer.\n",
  814. __func__);
  815. res = -ENODEV;
  816. goto err_alloc_rx;
  817. }
  818. /* Initialize receive variables. */
  819. cfhsi->rx_ptr = cfhsi->rx_buf;
  820. cfhsi->rx_len = CFHSI_DESC_SZ;
  821. /* Initialize spin locks. */
  822. spin_lock_init(&cfhsi->lock);
  823. /* Set up the driver. */
  824. cfhsi->drv.tx_done_cb = cfhsi_tx_done_cb;
  825. cfhsi->drv.rx_done_cb = cfhsi_rx_done_cb;
  826. /* Initialize the work queues. */
  827. INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
  828. INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
  829. INIT_WORK(&cfhsi->rx_done_work, cfhsi_rx_done_work);
  830. INIT_WORK(&cfhsi->tx_done_work, cfhsi_tx_done_work);
  831. /* Clear all bit fields. */
  832. clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
  833. clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
  834. clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
  835. clear_bit(CFHSI_AWAKE, &cfhsi->bits);
  836. clear_bit(CFHSI_PENDING_RX, &cfhsi->bits);
  837. /* Create work thread. */
  838. cfhsi->wq = create_singlethread_workqueue(pdev->name);
  839. if (!cfhsi->wq) {
  840. dev_err(&ndev->dev, "%s: Failed to create work queue.\n",
  841. __func__);
  842. res = -ENODEV;
  843. goto err_create_wq;
  844. }
  845. /* Initialize wait queues. */
  846. init_waitqueue_head(&cfhsi->wake_up_wait);
  847. init_waitqueue_head(&cfhsi->wake_down_wait);
  848. init_waitqueue_head(&cfhsi->flush_fifo_wait);
  849. /* Setup the inactivity timer. */
  850. init_timer(&cfhsi->timer);
  851. cfhsi->timer.data = (unsigned long)cfhsi;
  852. cfhsi->timer.function = cfhsi_inactivity_tout;
  853. /* Add CAIF HSI device to list. */
  854. spin_lock(&cfhsi_list_lock);
  855. list_add_tail(&cfhsi->list, &cfhsi_list);
  856. spin_unlock(&cfhsi_list_lock);
  857. /* Activate HSI interface. */
  858. res = cfhsi->dev->cfhsi_up(cfhsi->dev);
  859. if (res) {
  860. dev_err(&cfhsi->ndev->dev,
  861. "%s: can't activate HSI interface: %d.\n",
  862. __func__, res);
  863. goto err_activate;
  864. }
  865. /* Flush FIFO */
  866. res = cfhsi_flush_fifo(cfhsi);
  867. if (res) {
  868. dev_err(&ndev->dev, "%s: Can't flush FIFO: %d.\n",
  869. __func__, res);
  870. goto err_net_reg;
  871. }
  872. cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb;
  873. cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb;
  874. /* Register network device. */
  875. res = register_netdev(ndev);
  876. if (res) {
  877. dev_err(&ndev->dev, "%s: Registration error: %d.\n",
  878. __func__, res);
  879. goto err_net_reg;
  880. }
  881. netif_stop_queue(ndev);
  882. return res;
  883. err_net_reg:
  884. cfhsi->dev->cfhsi_down(cfhsi->dev);
  885. err_activate:
  886. destroy_workqueue(cfhsi->wq);
  887. err_create_wq:
  888. kfree(cfhsi->rx_buf);
  889. err_alloc_rx:
  890. kfree(cfhsi->tx_buf);
  891. err_alloc_tx:
  892. free_netdev(ndev);
  893. return res;
  894. }
  895. static void cfhsi_shutdown(struct cfhsi *cfhsi, bool remove_platform_dev)
  896. {
  897. u8 *tx_buf, *rx_buf;
  898. /* Stop TXing */
  899. netif_tx_stop_all_queues(cfhsi->ndev);
  900. /* going to shutdown driver */
  901. set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
  902. if (remove_platform_dev) {
  903. /* Flush workqueue */
  904. flush_workqueue(cfhsi->wq);
  905. /* Notify device. */
  906. platform_device_unregister(cfhsi->pdev);
  907. }
  908. /* Flush workqueue */
  909. flush_workqueue(cfhsi->wq);
  910. /* Delete timer if pending */
  911. #ifdef CONFIG_SMP
  912. del_timer_sync(&cfhsi->timer);
  913. #else
  914. del_timer(&cfhsi->timer);
  915. #endif /* CONFIG_SMP */
  916. /* Cancel pending RX request (if any) */
  917. cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
  918. /* Flush again and destroy workqueue */
  919. destroy_workqueue(cfhsi->wq);
  920. /* Store bufferes: will be freed later. */
  921. tx_buf = cfhsi->tx_buf;
  922. rx_buf = cfhsi->rx_buf;
  923. /* Flush transmit queues. */
  924. cfhsi_abort_tx(cfhsi);
  925. /* Deactivate interface */
  926. cfhsi->dev->cfhsi_down(cfhsi->dev);
  927. /* Finally unregister the network device. */
  928. unregister_netdev(cfhsi->ndev);
  929. /* Free buffers. */
  930. kfree(tx_buf);
  931. kfree(rx_buf);
  932. }
  933. int cfhsi_remove(struct platform_device *pdev)
  934. {
  935. struct list_head *list_node;
  936. struct list_head *n;
  937. struct cfhsi *cfhsi = NULL;
  938. struct cfhsi_dev *dev;
  939. dev = (struct cfhsi_dev *)pdev->dev.platform_data;
  940. spin_lock(&cfhsi_list_lock);
  941. list_for_each_safe(list_node, n, &cfhsi_list) {
  942. cfhsi = list_entry(list_node, struct cfhsi, list);
  943. /* Find the corresponding device. */
  944. if (cfhsi->dev == dev) {
  945. /* Remove from list. */
  946. list_del(list_node);
  947. spin_unlock(&cfhsi_list_lock);
  948. /* Shutdown driver. */
  949. cfhsi_shutdown(cfhsi, false);
  950. return 0;
  951. }
  952. }
  953. spin_unlock(&cfhsi_list_lock);
  954. return -ENODEV;
  955. }
  956. struct platform_driver cfhsi_plat_drv = {
  957. .probe = cfhsi_probe,
  958. .remove = cfhsi_remove,
  959. .driver = {
  960. .name = "cfhsi",
  961. .owner = THIS_MODULE,
  962. },
  963. };
  964. static void __exit cfhsi_exit_module(void)
  965. {
  966. struct list_head *list_node;
  967. struct list_head *n;
  968. struct cfhsi *cfhsi = NULL;
  969. spin_lock(&cfhsi_list_lock);
  970. list_for_each_safe(list_node, n, &cfhsi_list) {
  971. cfhsi = list_entry(list_node, struct cfhsi, list);
  972. /* Remove from list. */
  973. list_del(list_node);
  974. spin_unlock(&cfhsi_list_lock);
  975. /* Shutdown driver. */
  976. cfhsi_shutdown(cfhsi, true);
  977. spin_lock(&cfhsi_list_lock);
  978. }
  979. spin_unlock(&cfhsi_list_lock);
  980. /* Unregister platform driver. */
  981. platform_driver_unregister(&cfhsi_plat_drv);
  982. }
  983. static int __init cfhsi_init_module(void)
  984. {
  985. int result;
  986. /* Initialize spin lock. */
  987. spin_lock_init(&cfhsi_list_lock);
  988. /* Register platform driver. */
  989. result = platform_driver_register(&cfhsi_plat_drv);
  990. if (result) {
  991. printk(KERN_ERR "Could not register platform HSI driver: %d.\n",
  992. result);
  993. goto err_dev_register;
  994. }
  995. return result;
  996. err_dev_register:
  997. return result;
  998. }
  999. module_init(cfhsi_init_module);
  1000. module_exit(cfhsi_exit_module);