fmdrv_common.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677
  1. /*
  2. * FM Driver for Connectivity chip of Texas Instruments.
  3. *
  4. * This sub-module of FM driver is common for FM RX and TX
  5. * functionality. This module is responsible for:
  6. * 1) Forming group of Channel-8 commands to perform particular
  7. * functionality (eg., frequency set require more than
  8. * one Channel-8 command to be sent to the chip).
  9. * 2) Sending each Channel-8 command to the chip and reading
  10. * response back over Shared Transport.
  11. * 3) Managing TX and RX Queues and Tasklets.
  12. * 4) Handling FM Interrupt packet and taking appropriate action.
  13. * 5) Loading FM firmware to the chip (common, FM TX, and FM RX
  14. * firmware files based on mode selection)
  15. *
  16. * Copyright (C) 2011 Texas Instruments
  17. * Author: Raja Mani <raja_mani@ti.com>
  18. * Author: Manjunatha Halli <manjunatha_halli@ti.com>
  19. *
  20. * This program is free software; you can redistribute it and/or modify
  21. * it under the terms of the GNU General Public License version 2 as
  22. * published by the Free Software Foundation.
  23. *
  24. * This program is distributed in the hope that it will be useful,
  25. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  26. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  27. * GNU General Public License for more details.
  28. *
  29. * You should have received a copy of the GNU General Public License
  30. * along with this program; if not, write to the Free Software
  31. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  32. *
  33. */
  34. #include <linux/module.h>
  35. #include <linux/firmware.h>
  36. #include <linux/delay.h>
  37. #include "fmdrv.h"
  38. #include "fmdrv_v4l2.h"
  39. #include "fmdrv_common.h"
  40. #include <linux/ti_wilink_st.h>
  41. #include "fmdrv_rx.h"
  42. #include "fmdrv_tx.h"
  43. /* Region info */
  44. static struct region_info region_configs[] = {
  45. /* Europe/US */
  46. {
  47. .chanl_space = FM_CHANNEL_SPACING_200KHZ * FM_FREQ_MUL,
  48. .bot_freq = 87500, /* 87.5 MHz */
  49. .top_freq = 108000, /* 108 MHz */
  50. .fm_band = 0,
  51. },
  52. /* Japan */
  53. {
  54. .chanl_space = FM_CHANNEL_SPACING_200KHZ * FM_FREQ_MUL,
  55. .bot_freq = 76000, /* 76 MHz */
  56. .top_freq = 90000, /* 90 MHz */
  57. .fm_band = 1,
  58. },
  59. };
  60. /* Band selection */
  61. static u8 default_radio_region; /* Europe/US */
  62. module_param(default_radio_region, byte, 0);
  63. MODULE_PARM_DESC(default_radio_region, "Region: 0=Europe/US, 1=Japan");
  64. /* RDS buffer blocks */
  65. static u32 default_rds_buf = 300;
  66. module_param(default_rds_buf, uint, 0444);
  67. MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
  68. /* Radio Nr */
  69. static u32 radio_nr = -1;
  70. module_param(radio_nr, int, 0444);
  71. MODULE_PARM_DESC(radio_nr, "Radio Nr");
  72. /* FM irq handlers forward declaration */
  73. static void fm_irq_send_flag_getcmd(struct fmdev *);
  74. static void fm_irq_handle_flag_getcmd_resp(struct fmdev *);
  75. static void fm_irq_handle_hw_malfunction(struct fmdev *);
  76. static void fm_irq_handle_rds_start(struct fmdev *);
  77. static void fm_irq_send_rdsdata_getcmd(struct fmdev *);
  78. static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *);
  79. static void fm_irq_handle_rds_finish(struct fmdev *);
  80. static void fm_irq_handle_tune_op_ended(struct fmdev *);
  81. static void fm_irq_handle_power_enb(struct fmdev *);
  82. static void fm_irq_handle_low_rssi_start(struct fmdev *);
  83. static void fm_irq_afjump_set_pi(struct fmdev *);
  84. static void fm_irq_handle_set_pi_resp(struct fmdev *);
  85. static void fm_irq_afjump_set_pimask(struct fmdev *);
  86. static void fm_irq_handle_set_pimask_resp(struct fmdev *);
  87. static void fm_irq_afjump_setfreq(struct fmdev *);
  88. static void fm_irq_handle_setfreq_resp(struct fmdev *);
  89. static void fm_irq_afjump_enableint(struct fmdev *);
  90. static void fm_irq_afjump_enableint_resp(struct fmdev *);
  91. static void fm_irq_start_afjump(struct fmdev *);
  92. static void fm_irq_handle_start_afjump_resp(struct fmdev *);
  93. static void fm_irq_afjump_rd_freq(struct fmdev *);
  94. static void fm_irq_afjump_rd_freq_resp(struct fmdev *);
  95. static void fm_irq_handle_low_rssi_finish(struct fmdev *);
  96. static void fm_irq_send_intmsk_cmd(struct fmdev *);
  97. static void fm_irq_handle_intmsk_cmd_resp(struct fmdev *);
  98. /*
  99. * When FM common module receives interrupt packet, following handlers
  100. * will be executed one after another to service the interrupt(s)
  101. */
  102. enum fmc_irq_handler_index {
  103. FM_SEND_FLAG_GETCMD_IDX,
  104. FM_HANDLE_FLAG_GETCMD_RESP_IDX,
  105. /* HW malfunction irq handler */
  106. FM_HW_MAL_FUNC_IDX,
  107. /* RDS threshold reached irq handler */
  108. FM_RDS_START_IDX,
  109. FM_RDS_SEND_RDS_GETCMD_IDX,
  110. FM_RDS_HANDLE_RDS_GETCMD_RESP_IDX,
  111. FM_RDS_FINISH_IDX,
  112. /* Tune operation ended irq handler */
  113. FM_HW_TUNE_OP_ENDED_IDX,
  114. /* TX power enable irq handler */
  115. FM_HW_POWER_ENB_IDX,
  116. /* Low RSSI irq handler */
  117. FM_LOW_RSSI_START_IDX,
  118. FM_AF_JUMP_SETPI_IDX,
  119. FM_AF_JUMP_HANDLE_SETPI_RESP_IDX,
  120. FM_AF_JUMP_SETPI_MASK_IDX,
  121. FM_AF_JUMP_HANDLE_SETPI_MASK_RESP_IDX,
  122. FM_AF_JUMP_SET_AF_FREQ_IDX,
  123. FM_AF_JUMP_HANDLE_SET_AFFREQ_RESP_IDX,
  124. FM_AF_JUMP_ENABLE_INT_IDX,
  125. FM_AF_JUMP_ENABLE_INT_RESP_IDX,
  126. FM_AF_JUMP_START_AFJUMP_IDX,
  127. FM_AF_JUMP_HANDLE_START_AFJUMP_RESP_IDX,
  128. FM_AF_JUMP_RD_FREQ_IDX,
  129. FM_AF_JUMP_RD_FREQ_RESP_IDX,
  130. FM_LOW_RSSI_FINISH_IDX,
  131. /* Interrupt process post action */
  132. FM_SEND_INTMSK_CMD_IDX,
  133. FM_HANDLE_INTMSK_CMD_RESP_IDX,
  134. };
  135. /* FM interrupt handler table */
  136. static int_handler_prototype int_handler_table[] = {
  137. fm_irq_send_flag_getcmd,
  138. fm_irq_handle_flag_getcmd_resp,
  139. fm_irq_handle_hw_malfunction,
  140. fm_irq_handle_rds_start, /* RDS threshold reached irq handler */
  141. fm_irq_send_rdsdata_getcmd,
  142. fm_irq_handle_rdsdata_getcmd_resp,
  143. fm_irq_handle_rds_finish,
  144. fm_irq_handle_tune_op_ended,
  145. fm_irq_handle_power_enb, /* TX power enable irq handler */
  146. fm_irq_handle_low_rssi_start,
  147. fm_irq_afjump_set_pi,
  148. fm_irq_handle_set_pi_resp,
  149. fm_irq_afjump_set_pimask,
  150. fm_irq_handle_set_pimask_resp,
  151. fm_irq_afjump_setfreq,
  152. fm_irq_handle_setfreq_resp,
  153. fm_irq_afjump_enableint,
  154. fm_irq_afjump_enableint_resp,
  155. fm_irq_start_afjump,
  156. fm_irq_handle_start_afjump_resp,
  157. fm_irq_afjump_rd_freq,
  158. fm_irq_afjump_rd_freq_resp,
  159. fm_irq_handle_low_rssi_finish,
  160. fm_irq_send_intmsk_cmd, /* Interrupt process post action */
  161. fm_irq_handle_intmsk_cmd_resp
  162. };
  163. long (*g_st_write) (struct sk_buff *skb);
  164. static struct completion wait_for_fmdrv_reg_comp;
  165. static inline void fm_irq_call(struct fmdev *fmdev)
  166. {
  167. fmdev->irq_info.handlers[fmdev->irq_info.stage](fmdev);
  168. }
  169. /* Continue next function in interrupt handler table */
  170. static inline void fm_irq_call_stage(struct fmdev *fmdev, u8 stage)
  171. {
  172. fmdev->irq_info.stage = stage;
  173. fm_irq_call(fmdev);
  174. }
  175. static inline void fm_irq_timeout_stage(struct fmdev *fmdev, u8 stage)
  176. {
  177. fmdev->irq_info.stage = stage;
  178. mod_timer(&fmdev->irq_info.timer, jiffies + FM_DRV_TX_TIMEOUT);
  179. }
  180. #ifdef FM_DUMP_TXRX_PKT
  181. /* To dump outgoing FM Channel-8 packets */
  182. inline void dump_tx_skb_data(struct sk_buff *skb)
  183. {
  184. int len, len_org;
  185. u8 index;
  186. struct fm_cmd_msg_hdr *cmd_hdr;
  187. cmd_hdr = (struct fm_cmd_msg_hdr *)skb->data;
  188. printk(KERN_INFO "<<%shdr:%02x len:%02x opcode:%02x type:%s dlen:%02x",
  189. fm_cb(skb)->completion ? " " : "*", cmd_hdr->hdr,
  190. cmd_hdr->len, cmd_hdr->op,
  191. cmd_hdr->rd_wr ? "RD" : "WR", cmd_hdr->dlen);
  192. len_org = skb->len - FM_CMD_MSG_HDR_SIZE;
  193. if (len_org > 0) {
  194. printk("\n data(%d): ", cmd_hdr->dlen);
  195. len = min(len_org, 14);
  196. for (index = 0; index < len; index++)
  197. printk("%x ",
  198. skb->data[FM_CMD_MSG_HDR_SIZE + index]);
  199. printk("%s", (len_org > 14) ? ".." : "");
  200. }
  201. printk("\n");
  202. }
  203. /* To dump incoming FM Channel-8 packets */
  204. inline void dump_rx_skb_data(struct sk_buff *skb)
  205. {
  206. int len, len_org;
  207. u8 index;
  208. struct fm_event_msg_hdr *evt_hdr;
  209. evt_hdr = (struct fm_event_msg_hdr *)skb->data;
  210. printk(KERN_INFO ">> hdr:%02x len:%02x sts:%02x numhci:%02x "
  211. "opcode:%02x type:%s dlen:%02x", evt_hdr->hdr, evt_hdr->len,
  212. evt_hdr->status, evt_hdr->num_fm_hci_cmds, evt_hdr->op,
  213. (evt_hdr->rd_wr) ? "RD" : "WR", evt_hdr->dlen);
  214. len_org = skb->len - FM_EVT_MSG_HDR_SIZE;
  215. if (len_org > 0) {
  216. printk("\n data(%d): ", evt_hdr->dlen);
  217. len = min(len_org, 14);
  218. for (index = 0; index < len; index++)
  219. printk("%x ",
  220. skb->data[FM_EVT_MSG_HDR_SIZE + index]);
  221. printk("%s", (len_org > 14) ? ".." : "");
  222. }
  223. printk("\n");
  224. }
  225. #endif
  226. void fmc_update_region_info(struct fmdev *fmdev, u8 region_to_set)
  227. {
  228. fmdev->rx.region = region_configs[region_to_set];
  229. }
  230. /*
  231. * FM common sub-module will schedule this tasklet whenever it receives
  232. * FM packet from ST driver.
  233. */
  234. static void recv_tasklet(unsigned long arg)
  235. {
  236. struct fmdev *fmdev;
  237. struct fm_irq *irq_info;
  238. struct fm_event_msg_hdr *evt_hdr;
  239. struct sk_buff *skb;
  240. u8 num_fm_hci_cmds;
  241. unsigned long flags;
  242. fmdev = (struct fmdev *)arg;
  243. irq_info = &fmdev->irq_info;
  244. /* Process all packets in the RX queue */
  245. while ((skb = skb_dequeue(&fmdev->rx_q))) {
  246. if (skb->len < sizeof(struct fm_event_msg_hdr)) {
  247. fmerr("skb(%p) has only %d bytes, "
  248. "at least need %zu bytes to decode\n", skb,
  249. skb->len, sizeof(struct fm_event_msg_hdr));
  250. kfree_skb(skb);
  251. continue;
  252. }
  253. evt_hdr = (void *)skb->data;
  254. num_fm_hci_cmds = evt_hdr->num_fm_hci_cmds;
  255. /* FM interrupt packet? */
  256. if (evt_hdr->op == FM_INTERRUPT) {
  257. /* FM interrupt handler started already? */
  258. if (!test_bit(FM_INTTASK_RUNNING, &fmdev->flag)) {
  259. set_bit(FM_INTTASK_RUNNING, &fmdev->flag);
  260. if (irq_info->stage != 0) {
  261. fmerr("Inval stage resetting to zero\n");
  262. irq_info->stage = 0;
  263. }
  264. /*
  265. * Execute first function in interrupt handler
  266. * table.
  267. */
  268. irq_info->handlers[irq_info->stage](fmdev);
  269. } else {
  270. set_bit(FM_INTTASK_SCHEDULE_PENDING, &fmdev->flag);
  271. }
  272. kfree_skb(skb);
  273. }
  274. /* Anyone waiting for this with completion handler? */
  275. else if (evt_hdr->op == fmdev->pre_op && fmdev->resp_comp != NULL) {
  276. spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
  277. fmdev->resp_skb = skb;
  278. spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
  279. complete(fmdev->resp_comp);
  280. fmdev->resp_comp = NULL;
  281. atomic_set(&fmdev->tx_cnt, 1);
  282. }
  283. /* Is this for interrupt handler? */
  284. else if (evt_hdr->op == fmdev->pre_op && fmdev->resp_comp == NULL) {
  285. if (fmdev->resp_skb != NULL)
  286. fmerr("Response SKB ptr not NULL\n");
  287. spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
  288. fmdev->resp_skb = skb;
  289. spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
  290. /* Execute interrupt handler where state index points */
  291. irq_info->handlers[irq_info->stage](fmdev);
  292. kfree_skb(skb);
  293. atomic_set(&fmdev->tx_cnt, 1);
  294. } else {
  295. fmerr("Nobody claimed SKB(%p),purging\n", skb);
  296. }
  297. /*
  298. * Check flow control field. If Num_FM_HCI_Commands field is
  299. * not zero, schedule FM TX tasklet.
  300. */
  301. if (num_fm_hci_cmds && atomic_read(&fmdev->tx_cnt))
  302. if (!skb_queue_empty(&fmdev->tx_q))
  303. tasklet_schedule(&fmdev->tx_task);
  304. }
  305. }
  306. /* FM send tasklet: is scheduled when FM packet has to be sent to chip */
  307. static void send_tasklet(unsigned long arg)
  308. {
  309. struct fmdev *fmdev;
  310. struct sk_buff *skb;
  311. int len;
  312. fmdev = (struct fmdev *)arg;
  313. if (!atomic_read(&fmdev->tx_cnt))
  314. return;
  315. /* Check, is there any timeout happenned to last transmitted packet */
  316. if ((jiffies - fmdev->last_tx_jiffies) > FM_DRV_TX_TIMEOUT) {
  317. fmerr("TX timeout occurred\n");
  318. atomic_set(&fmdev->tx_cnt, 1);
  319. }
  320. /* Send queued FM TX packets */
  321. skb = skb_dequeue(&fmdev->tx_q);
  322. if (!skb)
  323. return;
  324. atomic_dec(&fmdev->tx_cnt);
  325. fmdev->pre_op = fm_cb(skb)->fm_op;
  326. if (fmdev->resp_comp != NULL)
  327. fmerr("Response completion handler is not NULL\n");
  328. fmdev->resp_comp = fm_cb(skb)->completion;
  329. /* Write FM packet to ST driver */
  330. len = g_st_write(skb);
  331. if (len < 0) {
  332. kfree_skb(skb);
  333. fmdev->resp_comp = NULL;
  334. fmerr("TX tasklet failed to send skb(%p)\n", skb);
  335. atomic_set(&fmdev->tx_cnt, 1);
  336. } else {
  337. fmdev->last_tx_jiffies = jiffies;
  338. }
  339. }
  340. /*
  341. * Queues FM Channel-8 packet to FM TX queue and schedules FM TX tasklet for
  342. * transmission
  343. */
  344. static u32 fm_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
  345. int payload_len, struct completion *wait_completion)
  346. {
  347. struct sk_buff *skb;
  348. struct fm_cmd_msg_hdr *hdr;
  349. int size;
  350. if (fm_op >= FM_INTERRUPT) {
  351. fmerr("Invalid fm opcode - %d\n", fm_op);
  352. return -EINVAL;
  353. }
  354. if (test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag) && payload == NULL) {
  355. fmerr("Payload data is NULL during fw download\n");
  356. return -EINVAL;
  357. }
  358. if (!test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag))
  359. size =
  360. FM_CMD_MSG_HDR_SIZE + ((payload == NULL) ? 0 : payload_len);
  361. else
  362. size = payload_len;
  363. skb = alloc_skb(size, GFP_ATOMIC);
  364. if (!skb) {
  365. fmerr("No memory to create new SKB\n");
  366. return -ENOMEM;
  367. }
  368. /*
  369. * Don't fill FM header info for the commands which come from
  370. * FM firmware file.
  371. */
  372. if (!test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag) ||
  373. test_bit(FM_INTTASK_RUNNING, &fmdev->flag)) {
  374. /* Fill command header info */
  375. hdr = (struct fm_cmd_msg_hdr *)skb_put(skb, FM_CMD_MSG_HDR_SIZE);
  376. hdr->hdr = FM_PKT_LOGICAL_CHAN_NUMBER; /* 0x08 */
  377. /* 3 (fm_opcode,rd_wr,dlen) + payload len) */
  378. hdr->len = ((payload == NULL) ? 0 : payload_len) + 3;
  379. /* FM opcode */
  380. hdr->op = fm_op;
  381. /* read/write type */
  382. hdr->rd_wr = type;
  383. hdr->dlen = payload_len;
  384. fm_cb(skb)->fm_op = fm_op;
  385. /*
  386. * If firmware download has finished and the command is
  387. * not a read command then payload is != NULL - a write
  388. * command with u16 payload - convert to be16
  389. */
  390. if (payload != NULL)
  391. *(u16 *)payload = cpu_to_be16(*(u16 *)payload);
  392. } else if (payload != NULL) {
  393. fm_cb(skb)->fm_op = *((u8 *)payload + 2);
  394. }
  395. if (payload != NULL)
  396. memcpy(skb_put(skb, payload_len), payload, payload_len);
  397. fm_cb(skb)->completion = wait_completion;
  398. skb_queue_tail(&fmdev->tx_q, skb);
  399. tasklet_schedule(&fmdev->tx_task);
  400. return 0;
  401. }
  402. /* Sends FM Channel-8 command to the chip and waits for the response */
  403. u32 fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
  404. unsigned int payload_len, void *response, int *response_len)
  405. {
  406. struct sk_buff *skb;
  407. struct fm_event_msg_hdr *evt_hdr;
  408. unsigned long flags;
  409. u32 ret;
  410. init_completion(&fmdev->maintask_comp);
  411. ret = fm_send_cmd(fmdev, fm_op, type, payload, payload_len,
  412. &fmdev->maintask_comp);
  413. if (ret)
  414. return ret;
  415. ret = wait_for_completion_timeout(&fmdev->maintask_comp, FM_DRV_TX_TIMEOUT);
  416. if (!ret) {
  417. fmerr("Timeout(%d sec),didn't get reg"
  418. "completion signal from RX tasklet\n",
  419. jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000);
  420. return -ETIMEDOUT;
  421. }
  422. if (!fmdev->resp_skb) {
  423. fmerr("Reponse SKB is missing\n");
  424. return -EFAULT;
  425. }
  426. spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
  427. skb = fmdev->resp_skb;
  428. fmdev->resp_skb = NULL;
  429. spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
  430. evt_hdr = (void *)skb->data;
  431. if (evt_hdr->status != 0) {
  432. fmerr("Received event pkt status(%d) is not zero\n",
  433. evt_hdr->status);
  434. kfree_skb(skb);
  435. return -EIO;
  436. }
  437. /* Send response data to caller */
  438. if (response != NULL && response_len != NULL && evt_hdr->dlen) {
  439. /* Skip header info and copy only response data */
  440. skb_pull(skb, sizeof(struct fm_event_msg_hdr));
  441. memcpy(response, skb->data, evt_hdr->dlen);
  442. *response_len = evt_hdr->dlen;
  443. } else if (response_len != NULL && evt_hdr->dlen == 0) {
  444. *response_len = 0;
  445. }
  446. kfree_skb(skb);
  447. return 0;
  448. }
  449. /* --- Helper functions used in FM interrupt handlers ---*/
  450. static inline u32 check_cmdresp_status(struct fmdev *fmdev,
  451. struct sk_buff **skb)
  452. {
  453. struct fm_event_msg_hdr *fm_evt_hdr;
  454. unsigned long flags;
  455. del_timer(&fmdev->irq_info.timer);
  456. spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
  457. *skb = fmdev->resp_skb;
  458. fmdev->resp_skb = NULL;
  459. spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
  460. fm_evt_hdr = (void *)(*skb)->data;
  461. if (fm_evt_hdr->status != 0) {
  462. fmerr("irq: opcode %x response status is not zero "
  463. "Initiating irq recovery process\n",
  464. fm_evt_hdr->op);
  465. mod_timer(&fmdev->irq_info.timer, jiffies + FM_DRV_TX_TIMEOUT);
  466. return -1;
  467. }
  468. return 0;
  469. }
  470. static inline void fm_irq_common_cmd_resp_helper(struct fmdev *fmdev, u8 stage)
  471. {
  472. struct sk_buff *skb;
  473. if (!check_cmdresp_status(fmdev, &skb))
  474. fm_irq_call_stage(fmdev, stage);
  475. }
  476. /*
  477. * Interrupt process timeout handler.
  478. * One of the irq handler did not get proper response from the chip. So take
  479. * recovery action here. FM interrupts are disabled in the beginning of
  480. * interrupt process. Therefore reset stage index to re-enable default
  481. * interrupts. So that next interrupt will be processed as usual.
  482. */
  483. static void int_timeout_handler(unsigned long data)
  484. {
  485. struct fmdev *fmdev;
  486. struct fm_irq *fmirq;
  487. fmdbg("irq: timeout,trying to re-enable fm interrupts\n");
  488. fmdev = (struct fmdev *)data;
  489. fmirq = &fmdev->irq_info;
  490. fmirq->retry++;
  491. if (fmirq->retry > FM_IRQ_TIMEOUT_RETRY_MAX) {
  492. /* Stop recovery action (interrupt reenable process) and
  493. * reset stage index & retry count values */
  494. fmirq->stage = 0;
  495. fmirq->retry = 0;
  496. fmerr("Recovery action failed during"
  497. "irq processing, max retry reached\n");
  498. return;
  499. }
  500. fm_irq_call_stage(fmdev, FM_SEND_INTMSK_CMD_IDX);
  501. }
  502. /* --------- FM interrupt handlers ------------*/
  503. static void fm_irq_send_flag_getcmd(struct fmdev *fmdev)
  504. {
  505. u16 flag;
  506. /* Send FLAG_GET command , to know the source of interrupt */
  507. if (!fm_send_cmd(fmdev, FLAG_GET, REG_RD, NULL, sizeof(flag), NULL))
  508. fm_irq_timeout_stage(fmdev, FM_HANDLE_FLAG_GETCMD_RESP_IDX);
  509. }
  510. static void fm_irq_handle_flag_getcmd_resp(struct fmdev *fmdev)
  511. {
  512. struct sk_buff *skb;
  513. struct fm_event_msg_hdr *fm_evt_hdr;
  514. if (check_cmdresp_status(fmdev, &skb))
  515. return;
  516. fm_evt_hdr = (void *)skb->data;
  517. /* Skip header info and copy only response data */
  518. skb_pull(skb, sizeof(struct fm_event_msg_hdr));
  519. memcpy(&fmdev->irq_info.flag, skb->data, fm_evt_hdr->dlen);
  520. fmdev->irq_info.flag = be16_to_cpu(fmdev->irq_info.flag);
  521. fmdbg("irq: flag register(0x%x)\n", fmdev->irq_info.flag);
  522. /* Continue next function in interrupt handler table */
  523. fm_irq_call_stage(fmdev, FM_HW_MAL_FUNC_IDX);
  524. }
  525. static void fm_irq_handle_hw_malfunction(struct fmdev *fmdev)
  526. {
  527. if (fmdev->irq_info.flag & FM_MAL_EVENT & fmdev->irq_info.mask)
  528. fmerr("irq: HW MAL int received - do nothing\n");
  529. /* Continue next function in interrupt handler table */
  530. fm_irq_call_stage(fmdev, FM_RDS_START_IDX);
  531. }
  532. static void fm_irq_handle_rds_start(struct fmdev *fmdev)
  533. {
  534. if (fmdev->irq_info.flag & FM_RDS_EVENT & fmdev->irq_info.mask) {
  535. fmdbg("irq: rds threshold reached\n");
  536. fmdev->irq_info.stage = FM_RDS_SEND_RDS_GETCMD_IDX;
  537. } else {
  538. /* Continue next function in interrupt handler table */
  539. fmdev->irq_info.stage = FM_HW_TUNE_OP_ENDED_IDX;
  540. }
  541. fm_irq_call(fmdev);
  542. }
  543. static void fm_irq_send_rdsdata_getcmd(struct fmdev *fmdev)
  544. {
  545. /* Send the command to read RDS data from the chip */
  546. if (!fm_send_cmd(fmdev, RDS_DATA_GET, REG_RD, NULL,
  547. (FM_RX_RDS_FIFO_THRESHOLD * 3), NULL))
  548. fm_irq_timeout_stage(fmdev, FM_RDS_HANDLE_RDS_GETCMD_RESP_IDX);
  549. }
  550. /* Keeps track of current RX channel AF (Alternate Frequency) */
  551. static void fm_rx_update_af_cache(struct fmdev *fmdev, u8 af)
  552. {
  553. struct tuned_station_info *stat_info = &fmdev->rx.stat_info;
  554. u8 reg_idx = fmdev->rx.region.fm_band;
  555. u8 index;
  556. u32 freq;
  557. /* First AF indicates the number of AF follows. Reset the list */
  558. if ((af >= FM_RDS_1_AF_FOLLOWS) && (af <= FM_RDS_25_AF_FOLLOWS)) {
  559. fmdev->rx.stat_info.af_list_max = (af - FM_RDS_1_AF_FOLLOWS + 1);
  560. fmdev->rx.stat_info.afcache_size = 0;
  561. fmdbg("No of expected AF : %d\n", fmdev->rx.stat_info.af_list_max);
  562. return;
  563. }
  564. if (af < FM_RDS_MIN_AF)
  565. return;
  566. if (reg_idx == FM_BAND_EUROPE_US && af > FM_RDS_MAX_AF)
  567. return;
  568. if (reg_idx == FM_BAND_JAPAN && af > FM_RDS_MAX_AF_JAPAN)
  569. return;
  570. freq = fmdev->rx.region.bot_freq + (af * 100);
  571. if (freq == fmdev->rx.freq) {
  572. fmdbg("Current freq(%d) is matching with received AF(%d)\n",
  573. fmdev->rx.freq, freq);
  574. return;
  575. }
  576. /* Do check in AF cache */
  577. for (index = 0; index < stat_info->afcache_size; index++) {
  578. if (stat_info->af_cache[index] == freq)
  579. break;
  580. }
  581. /* Reached the limit of the list - ignore the next AF */
  582. if (index == stat_info->af_list_max) {
  583. fmdbg("AF cache is full\n");
  584. return;
  585. }
  586. /*
  587. * If we reached the end of the list then this AF is not
  588. * in the list - add it.
  589. */
  590. if (index == stat_info->afcache_size) {
  591. fmdbg("Storing AF %d to cache index %d\n", freq, index);
  592. stat_info->af_cache[index] = freq;
  593. stat_info->afcache_size++;
  594. }
  595. }
  596. /*
  597. * Converts RDS buffer data from big endian format
  598. * to little endian format.
  599. */
  600. static void fm_rdsparse_swapbytes(struct fmdev *fmdev,
  601. struct fm_rdsdata_format *rds_format)
  602. {
  603. u8 byte1;
  604. u8 index = 0;
  605. u8 *rds_buff;
  606. /*
  607. * Since in Orca the 2 RDS Data bytes are in little endian and
  608. * in Dolphin they are in big endian, the parsing of the RDS data
  609. * is chip dependent
  610. */
  611. if (fmdev->asci_id != 0x6350) {
  612. rds_buff = &rds_format->data.groupdatabuff.buff[0];
  613. while (index + 1 < FM_RX_RDS_INFO_FIELD_MAX) {
  614. byte1 = rds_buff[index];
  615. rds_buff[index] = rds_buff[index + 1];
  616. rds_buff[index + 1] = byte1;
  617. index += 2;
  618. }
  619. }
  620. }
  621. static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *fmdev)
  622. {
  623. struct sk_buff *skb;
  624. struct fm_rdsdata_format rds_fmt;
  625. struct fm_rds *rds = &fmdev->rx.rds;
  626. unsigned long group_idx, flags;
  627. u8 *rds_data, meta_data, tmpbuf[3];
  628. u8 type, blk_idx;
  629. u16 cur_picode;
  630. u32 rds_len;
  631. if (check_cmdresp_status(fmdev, &skb))
  632. return;
  633. /* Skip header info */
  634. skb_pull(skb, sizeof(struct fm_event_msg_hdr));
  635. rds_data = skb->data;
  636. rds_len = skb->len;
  637. /* Parse the RDS data */
  638. while (rds_len >= FM_RDS_BLK_SIZE) {
  639. meta_data = rds_data[2];
  640. /* Get the type: 0=A, 1=B, 2=C, 3=C', 4=D, 5=E */
  641. type = (meta_data & 0x07);
  642. /* Transform the blk type into index sequence (0, 1, 2, 3, 4) */
  643. blk_idx = (type <= FM_RDS_BLOCK_C ? type : (type - 1));
  644. fmdbg("Block index:%d(%s)\n", blk_idx,
  645. (meta_data & FM_RDS_STATUS_ERR_MASK) ? "Bad" : "Ok");
  646. if ((meta_data & FM_RDS_STATUS_ERR_MASK) != 0)
  647. break;
  648. if (blk_idx < FM_RDS_BLK_IDX_A || blk_idx > FM_RDS_BLK_IDX_D) {
  649. fmdbg("Block sequence mismatch\n");
  650. rds->last_blk_idx = -1;
  651. break;
  652. }
  653. /* Skip checkword (control) byte and copy only data byte */
  654. memcpy(&rds_fmt.data.groupdatabuff.
  655. buff[blk_idx * (FM_RDS_BLK_SIZE - 1)],
  656. rds_data, (FM_RDS_BLK_SIZE - 1));
  657. rds->last_blk_idx = blk_idx;
  658. /* If completed a whole group then handle it */
  659. if (blk_idx == FM_RDS_BLK_IDX_D) {
  660. fmdbg("Good block received\n");
  661. fm_rdsparse_swapbytes(fmdev, &rds_fmt);
  662. /*
  663. * Extract PI code and store in local cache.
  664. * We need this during AF switch processing.
  665. */
  666. cur_picode = be16_to_cpu(rds_fmt.data.groupgeneral.pidata);
  667. if (fmdev->rx.stat_info.picode != cur_picode)
  668. fmdev->rx.stat_info.picode = cur_picode;
  669. fmdbg("picode:%d\n", cur_picode);
  670. group_idx = (rds_fmt.data.groupgeneral.blk_b[0] >> 3);
  671. fmdbg("(fmdrv):Group:%ld%s\n", group_idx/2,
  672. (group_idx % 2) ? "B" : "A");
  673. group_idx = 1 << (rds_fmt.data.groupgeneral.blk_b[0] >> 3);
  674. if (group_idx == FM_RDS_GROUP_TYPE_MASK_0A) {
  675. fm_rx_update_af_cache(fmdev, rds_fmt.data.group0A.af[0]);
  676. fm_rx_update_af_cache(fmdev, rds_fmt.data.group0A.af[1]);
  677. }
  678. }
  679. rds_len -= FM_RDS_BLK_SIZE;
  680. rds_data += FM_RDS_BLK_SIZE;
  681. }
  682. /* Copy raw rds data to internal rds buffer */
  683. rds_data = skb->data;
  684. rds_len = skb->len;
  685. spin_lock_irqsave(&fmdev->rds_buff_lock, flags);
  686. while (rds_len > 0) {
  687. /*
  688. * Fill RDS buffer as per V4L2 specification.
  689. * Store control byte
  690. */
  691. type = (rds_data[2] & 0x07);
  692. blk_idx = (type <= FM_RDS_BLOCK_C ? type : (type - 1));
  693. tmpbuf[2] = blk_idx; /* Offset name */
  694. tmpbuf[2] |= blk_idx << 3; /* Received offset */
  695. /* Store data byte */
  696. tmpbuf[0] = rds_data[0];
  697. tmpbuf[1] = rds_data[1];
  698. memcpy(&rds->buff[rds->wr_idx], &tmpbuf, FM_RDS_BLK_SIZE);
  699. rds->wr_idx = (rds->wr_idx + FM_RDS_BLK_SIZE) % rds->buf_size;
  700. /* Check for overflow & start over */
  701. if (rds->wr_idx == rds->rd_idx) {
  702. fmdbg("RDS buffer overflow\n");
  703. rds->wr_idx = 0;
  704. rds->rd_idx = 0;
  705. break;
  706. }
  707. rds_len -= FM_RDS_BLK_SIZE;
  708. rds_data += FM_RDS_BLK_SIZE;
  709. }
  710. spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags);
  711. /* Wakeup read queue */
  712. if (rds->wr_idx != rds->rd_idx)
  713. wake_up_interruptible(&rds->read_queue);
  714. fm_irq_call_stage(fmdev, FM_RDS_FINISH_IDX);
  715. }
  716. static void fm_irq_handle_rds_finish(struct fmdev *fmdev)
  717. {
  718. fm_irq_call_stage(fmdev, FM_HW_TUNE_OP_ENDED_IDX);
  719. }
  720. static void fm_irq_handle_tune_op_ended(struct fmdev *fmdev)
  721. {
  722. if (fmdev->irq_info.flag & (FM_FR_EVENT | FM_BL_EVENT) & fmdev->
  723. irq_info.mask) {
  724. fmdbg("irq: tune ended/bandlimit reached\n");
  725. if (test_and_clear_bit(FM_AF_SWITCH_INPROGRESS, &fmdev->flag)) {
  726. fmdev->irq_info.stage = FM_AF_JUMP_RD_FREQ_IDX;
  727. } else {
  728. complete(&fmdev->maintask_comp);
  729. fmdev->irq_info.stage = FM_HW_POWER_ENB_IDX;
  730. }
  731. } else
  732. fmdev->irq_info.stage = FM_HW_POWER_ENB_IDX;
  733. fm_irq_call(fmdev);
  734. }
  735. static void fm_irq_handle_power_enb(struct fmdev *fmdev)
  736. {
  737. if (fmdev->irq_info.flag & FM_POW_ENB_EVENT) {
  738. fmdbg("irq: Power Enabled/Disabled\n");
  739. complete(&fmdev->maintask_comp);
  740. }
  741. fm_irq_call_stage(fmdev, FM_LOW_RSSI_START_IDX);
  742. }
  743. static void fm_irq_handle_low_rssi_start(struct fmdev *fmdev)
  744. {
  745. if ((fmdev->rx.af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON) &&
  746. (fmdev->irq_info.flag & FM_LEV_EVENT & fmdev->irq_info.mask) &&
  747. (fmdev->rx.freq != FM_UNDEFINED_FREQ) &&
  748. (fmdev->rx.stat_info.afcache_size != 0)) {
  749. fmdbg("irq: rssi level has fallen below threshold level\n");
  750. /* Disable further low RSSI interrupts */
  751. fmdev->irq_info.mask &= ~FM_LEV_EVENT;
  752. fmdev->rx.afjump_idx = 0;
  753. fmdev->rx.freq_before_jump = fmdev->rx.freq;
  754. fmdev->irq_info.stage = FM_AF_JUMP_SETPI_IDX;
  755. } else {
  756. /* Continue next function in interrupt handler table */
  757. fmdev->irq_info.stage = FM_SEND_INTMSK_CMD_IDX;
  758. }
  759. fm_irq_call(fmdev);
  760. }
  761. static void fm_irq_afjump_set_pi(struct fmdev *fmdev)
  762. {
  763. u16 payload;
  764. /* Set PI code - must be updated if the AF list is not empty */
  765. payload = fmdev->rx.stat_info.picode;
  766. if (!fm_send_cmd(fmdev, RDS_PI_SET, REG_WR, &payload, sizeof(payload), NULL))
  767. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SETPI_RESP_IDX);
  768. }
  769. static void fm_irq_handle_set_pi_resp(struct fmdev *fmdev)
  770. {
  771. fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_SETPI_MASK_IDX);
  772. }
  773. /*
  774. * Set PI mask.
  775. * 0xFFFF = Enable PI code matching
  776. * 0x0000 = Disable PI code matching
  777. */
  778. static void fm_irq_afjump_set_pimask(struct fmdev *fmdev)
  779. {
  780. u16 payload;
  781. payload = 0x0000;
  782. if (!fm_send_cmd(fmdev, RDS_PI_MASK_SET, REG_WR, &payload, sizeof(payload), NULL))
  783. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SETPI_MASK_RESP_IDX);
  784. }
  785. static void fm_irq_handle_set_pimask_resp(struct fmdev *fmdev)
  786. {
  787. fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_SET_AF_FREQ_IDX);
  788. }
  789. static void fm_irq_afjump_setfreq(struct fmdev *fmdev)
  790. {
  791. u16 frq_index;
  792. u16 payload;
  793. fmdbg("Swtich to %d KHz\n", fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx]);
  794. frq_index = (fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx] -
  795. fmdev->rx.region.bot_freq) / FM_FREQ_MUL;
  796. payload = frq_index;
  797. if (!fm_send_cmd(fmdev, AF_FREQ_SET, REG_WR, &payload, sizeof(payload), NULL))
  798. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SET_AFFREQ_RESP_IDX);
  799. }
  800. static void fm_irq_handle_setfreq_resp(struct fmdev *fmdev)
  801. {
  802. fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_ENABLE_INT_IDX);
  803. }
  804. static void fm_irq_afjump_enableint(struct fmdev *fmdev)
  805. {
  806. u16 payload;
  807. /* Enable FR (tuning operation ended) interrupt */
  808. payload = FM_FR_EVENT;
  809. if (!fm_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload, sizeof(payload), NULL))
  810. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_ENABLE_INT_RESP_IDX);
  811. }
  812. static void fm_irq_afjump_enableint_resp(struct fmdev *fmdev)
  813. {
  814. fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_START_AFJUMP_IDX);
  815. }
  816. static void fm_irq_start_afjump(struct fmdev *fmdev)
  817. {
  818. u16 payload;
  819. payload = FM_TUNER_AF_JUMP_MODE;
  820. if (!fm_send_cmd(fmdev, TUNER_MODE_SET, REG_WR, &payload,
  821. sizeof(payload), NULL))
  822. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_START_AFJUMP_RESP_IDX);
  823. }
  824. static void fm_irq_handle_start_afjump_resp(struct fmdev *fmdev)
  825. {
  826. struct sk_buff *skb;
  827. if (check_cmdresp_status(fmdev, &skb))
  828. return;
  829. fmdev->irq_info.stage = FM_SEND_FLAG_GETCMD_IDX;
  830. set_bit(FM_AF_SWITCH_INPROGRESS, &fmdev->flag);
  831. clear_bit(FM_INTTASK_RUNNING, &fmdev->flag);
  832. }
  833. static void fm_irq_afjump_rd_freq(struct fmdev *fmdev)
  834. {
  835. u16 payload;
  836. if (!fm_send_cmd(fmdev, FREQ_SET, REG_RD, NULL, sizeof(payload), NULL))
  837. fm_irq_timeout_stage(fmdev, FM_AF_JUMP_RD_FREQ_RESP_IDX);
  838. }
  839. static void fm_irq_afjump_rd_freq_resp(struct fmdev *fmdev)
  840. {
  841. struct sk_buff *skb;
  842. u16 read_freq;
  843. u32 curr_freq, jumped_freq;
  844. if (check_cmdresp_status(fmdev, &skb))
  845. return;
  846. /* Skip header info and copy only response data */
  847. skb_pull(skb, sizeof(struct fm_event_msg_hdr));
  848. memcpy(&read_freq, skb->data, sizeof(read_freq));
  849. read_freq = be16_to_cpu(read_freq);
  850. curr_freq = fmdev->rx.region.bot_freq + ((u32)read_freq * FM_FREQ_MUL);
  851. jumped_freq = fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx];
  852. /* If the frequency was changed the jump succeeded */
  853. if ((curr_freq != fmdev->rx.freq_before_jump) && (curr_freq == jumped_freq)) {
  854. fmdbg("Successfully switched to alternate freq %d\n", curr_freq);
  855. fmdev->rx.freq = curr_freq;
  856. fm_rx_reset_rds_cache(fmdev);
  857. /* AF feature is on, enable low level RSSI interrupt */
  858. if (fmdev->rx.af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON)
  859. fmdev->irq_info.mask |= FM_LEV_EVENT;
  860. fmdev->irq_info.stage = FM_LOW_RSSI_FINISH_IDX;
  861. } else { /* jump to the next freq in the AF list */
  862. fmdev->rx.afjump_idx++;
  863. /* If we reached the end of the list - stop searching */
  864. if (fmdev->rx.afjump_idx >= fmdev->rx.stat_info.afcache_size) {
  865. fmdbg("AF switch processing failed\n");
  866. fmdev->irq_info.stage = FM_LOW_RSSI_FINISH_IDX;
  867. } else { /* AF List is not over - try next one */
  868. fmdbg("Trying next freq in AF cache\n");
  869. fmdev->irq_info.stage = FM_AF_JUMP_SETPI_IDX;
  870. }
  871. }
  872. fm_irq_call(fmdev);
  873. }
  874. static void fm_irq_handle_low_rssi_finish(struct fmdev *fmdev)
  875. {
  876. fm_irq_call_stage(fmdev, FM_SEND_INTMSK_CMD_IDX);
  877. }
  878. static void fm_irq_send_intmsk_cmd(struct fmdev *fmdev)
  879. {
  880. u16 payload;
  881. /* Re-enable FM interrupts */
  882. payload = fmdev->irq_info.mask;
  883. if (!fm_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
  884. sizeof(payload), NULL))
  885. fm_irq_timeout_stage(fmdev, FM_HANDLE_INTMSK_CMD_RESP_IDX);
  886. }
  887. static void fm_irq_handle_intmsk_cmd_resp(struct fmdev *fmdev)
  888. {
  889. struct sk_buff *skb;
  890. if (check_cmdresp_status(fmdev, &skb))
  891. return;
  892. /*
  893. * This is last function in interrupt table to be executed.
  894. * So, reset stage index to 0.
  895. */
  896. fmdev->irq_info.stage = FM_SEND_FLAG_GETCMD_IDX;
  897. /* Start processing any pending interrupt */
  898. if (test_and_clear_bit(FM_INTTASK_SCHEDULE_PENDING, &fmdev->flag))
  899. fmdev->irq_info.handlers[fmdev->irq_info.stage](fmdev);
  900. else
  901. clear_bit(FM_INTTASK_RUNNING, &fmdev->flag);
  902. }
  903. /* Returns availability of RDS data in internel buffer */
  904. u32 fmc_is_rds_data_available(struct fmdev *fmdev, struct file *file,
  905. struct poll_table_struct *pts)
  906. {
  907. poll_wait(file, &fmdev->rx.rds.read_queue, pts);
  908. if (fmdev->rx.rds.rd_idx != fmdev->rx.rds.wr_idx)
  909. return 0;
  910. return -EAGAIN;
  911. }
  912. /* Copies RDS data from internal buffer to user buffer */
  913. u32 fmc_transfer_rds_from_internal_buff(struct fmdev *fmdev, struct file *file,
  914. u8 __user *buf, size_t count)
  915. {
  916. u32 block_count;
  917. unsigned long flags;
  918. int ret;
  919. if (fmdev->rx.rds.wr_idx == fmdev->rx.rds.rd_idx) {
  920. if (file->f_flags & O_NONBLOCK)
  921. return -EWOULDBLOCK;
  922. ret = wait_event_interruptible(fmdev->rx.rds.read_queue,
  923. (fmdev->rx.rds.wr_idx != fmdev->rx.rds.rd_idx));
  924. if (ret)
  925. return -EINTR;
  926. }
  927. /* Calculate block count from byte count */
  928. count /= 3;
  929. block_count = 0;
  930. ret = 0;
  931. spin_lock_irqsave(&fmdev->rds_buff_lock, flags);
  932. while (block_count < count) {
  933. if (fmdev->rx.rds.wr_idx == fmdev->rx.rds.rd_idx)
  934. break;
  935. if (copy_to_user(buf, &fmdev->rx.rds.buff[fmdev->rx.rds.rd_idx],
  936. FM_RDS_BLK_SIZE))
  937. break;
  938. fmdev->rx.rds.rd_idx += FM_RDS_BLK_SIZE;
  939. if (fmdev->rx.rds.rd_idx >= fmdev->rx.rds.buf_size)
  940. fmdev->rx.rds.rd_idx = 0;
  941. block_count++;
  942. buf += FM_RDS_BLK_SIZE;
  943. ret += FM_RDS_BLK_SIZE;
  944. }
  945. spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags);
  946. return ret;
  947. }
  948. u32 fmc_set_freq(struct fmdev *fmdev, u32 freq_to_set)
  949. {
  950. switch (fmdev->curr_fmmode) {
  951. case FM_MODE_RX:
  952. return fm_rx_set_freq(fmdev, freq_to_set);
  953. case FM_MODE_TX:
  954. return fm_tx_set_freq(fmdev, freq_to_set);
  955. default:
  956. return -EINVAL;
  957. }
  958. }
  959. u32 fmc_get_freq(struct fmdev *fmdev, u32 *cur_tuned_frq)
  960. {
  961. if (fmdev->rx.freq == FM_UNDEFINED_FREQ) {
  962. fmerr("RX frequency is not set\n");
  963. return -EPERM;
  964. }
  965. if (cur_tuned_frq == NULL) {
  966. fmerr("Invalid memory\n");
  967. return -ENOMEM;
  968. }
  969. switch (fmdev->curr_fmmode) {
  970. case FM_MODE_RX:
  971. *cur_tuned_frq = fmdev->rx.freq;
  972. return 0;
  973. case FM_MODE_TX:
  974. *cur_tuned_frq = 0; /* TODO : Change this later */
  975. return 0;
  976. default:
  977. return -EINVAL;
  978. }
  979. }
  980. u32 fmc_set_region(struct fmdev *fmdev, u8 region_to_set)
  981. {
  982. switch (fmdev->curr_fmmode) {
  983. case FM_MODE_RX:
  984. return fm_rx_set_region(fmdev, region_to_set);
  985. case FM_MODE_TX:
  986. return fm_tx_set_region(fmdev, region_to_set);
  987. default:
  988. return -EINVAL;
  989. }
  990. }
  991. u32 fmc_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset)
  992. {
  993. switch (fmdev->curr_fmmode) {
  994. case FM_MODE_RX:
  995. return fm_rx_set_mute_mode(fmdev, mute_mode_toset);
  996. case FM_MODE_TX:
  997. return fm_tx_set_mute_mode(fmdev, mute_mode_toset);
  998. default:
  999. return -EINVAL;
  1000. }
  1001. }
  1002. u32 fmc_set_stereo_mono(struct fmdev *fmdev, u16 mode)
  1003. {
  1004. switch (fmdev->curr_fmmode) {
  1005. case FM_MODE_RX:
  1006. return fm_rx_set_stereo_mono(fmdev, mode);
  1007. case FM_MODE_TX:
  1008. return fm_tx_set_stereo_mono(fmdev, mode);
  1009. default:
  1010. return -EINVAL;
  1011. }
  1012. }
  1013. u32 fmc_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis)
  1014. {
  1015. switch (fmdev->curr_fmmode) {
  1016. case FM_MODE_RX:
  1017. return fm_rx_set_rds_mode(fmdev, rds_en_dis);
  1018. case FM_MODE_TX:
  1019. return fm_tx_set_rds_mode(fmdev, rds_en_dis);
  1020. default:
  1021. return -EINVAL;
  1022. }
  1023. }
  1024. /* Sends power off command to the chip */
  1025. static u32 fm_power_down(struct fmdev *fmdev)
  1026. {
  1027. u16 payload;
  1028. u32 ret;
  1029. if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
  1030. fmerr("FM core is not ready\n");
  1031. return -EPERM;
  1032. }
  1033. if (fmdev->curr_fmmode == FM_MODE_OFF) {
  1034. fmdbg("FM chip is already in OFF state\n");
  1035. return 0;
  1036. }
  1037. payload = 0x0;
  1038. ret = fmc_send_cmd(fmdev, FM_POWER_MODE, REG_WR, &payload,
  1039. sizeof(payload), NULL, NULL);
  1040. if (ret < 0)
  1041. return ret;
  1042. return fmc_release(fmdev);
  1043. }
  1044. /* Reads init command from FM firmware file and loads to the chip */
  1045. static u32 fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name)
  1046. {
  1047. const struct firmware *fw_entry;
  1048. struct bts_header *fw_header;
  1049. struct bts_action *action;
  1050. struct bts_action_delay *delay;
  1051. u8 *fw_data;
  1052. int ret, fw_len, cmd_cnt;
  1053. cmd_cnt = 0;
  1054. set_bit(FM_FW_DW_INPROGRESS, &fmdev->flag);
  1055. ret = request_firmware(&fw_entry, fw_name,
  1056. &fmdev->radio_dev->dev);
  1057. if (ret < 0) {
  1058. fmerr("Unable to read firmware(%s) content\n", fw_name);
  1059. return ret;
  1060. }
  1061. fmdbg("Firmware(%s) length : %d bytes\n", fw_name, fw_entry->size);
  1062. fw_data = (void *)fw_entry->data;
  1063. fw_len = fw_entry->size;
  1064. fw_header = (struct bts_header *)fw_data;
  1065. if (fw_header->magic != FM_FW_FILE_HEADER_MAGIC) {
  1066. fmerr("%s not a legal TI firmware file\n", fw_name);
  1067. ret = -EINVAL;
  1068. goto rel_fw;
  1069. }
  1070. fmdbg("FW(%s) magic number : 0x%x\n", fw_name, fw_header->magic);
  1071. /* Skip file header info , we already verified it */
  1072. fw_data += sizeof(struct bts_header);
  1073. fw_len -= sizeof(struct bts_header);
  1074. while (fw_data && fw_len > 0) {
  1075. action = (struct bts_action *)fw_data;
  1076. switch (action->type) {
  1077. case ACTION_SEND_COMMAND: /* Send */
  1078. if (fmc_send_cmd(fmdev, 0, 0, action->data,
  1079. action->size, NULL, NULL))
  1080. goto rel_fw;
  1081. cmd_cnt++;
  1082. break;
  1083. case ACTION_DELAY: /* Delay */
  1084. delay = (struct bts_action_delay *)action->data;
  1085. mdelay(delay->msec);
  1086. break;
  1087. }
  1088. fw_data += (sizeof(struct bts_action) + (action->size));
  1089. fw_len -= (sizeof(struct bts_action) + (action->size));
  1090. }
  1091. fmdbg("Firmware commands(%d) loaded to chip\n", cmd_cnt);
  1092. rel_fw:
  1093. release_firmware(fw_entry);
  1094. clear_bit(FM_FW_DW_INPROGRESS, &fmdev->flag);
  1095. return ret;
  1096. }
  1097. /* Loads default RX configuration to the chip */
  1098. static u32 load_default_rx_configuration(struct fmdev *fmdev)
  1099. {
  1100. int ret;
  1101. ret = fm_rx_set_volume(fmdev, FM_DEFAULT_RX_VOLUME);
  1102. if (ret < 0)
  1103. return ret;
  1104. return fm_rx_set_rssi_threshold(fmdev, FM_DEFAULT_RSSI_THRESHOLD);
  1105. }
  1106. /* Does FM power on sequence */
  1107. static u32 fm_power_up(struct fmdev *fmdev, u8 mode)
  1108. {
  1109. u16 payload, asic_id, asic_ver;
  1110. int resp_len, ret;
  1111. u8 fw_name[50];
  1112. if (mode >= FM_MODE_ENTRY_MAX) {
  1113. fmerr("Invalid firmware download option\n");
  1114. return -EINVAL;
  1115. }
  1116. /*
  1117. * Initialize FM common module. FM GPIO toggling is
  1118. * taken care in Shared Transport driver.
  1119. */
  1120. ret = fmc_prepare(fmdev);
  1121. if (ret < 0) {
  1122. fmerr("Unable to prepare FM Common\n");
  1123. return ret;
  1124. }
  1125. payload = FM_ENABLE;
  1126. if (fmc_send_cmd(fmdev, FM_POWER_MODE, REG_WR, &payload,
  1127. sizeof(payload), NULL, NULL))
  1128. goto rel;
  1129. /* Allow the chip to settle down in Channel-8 mode */
  1130. msleep(20);
  1131. if (fmc_send_cmd(fmdev, ASIC_ID_GET, REG_RD, NULL,
  1132. sizeof(asic_id), &asic_id, &resp_len))
  1133. goto rel;
  1134. if (fmc_send_cmd(fmdev, ASIC_VER_GET, REG_RD, NULL,
  1135. sizeof(asic_ver), &asic_ver, &resp_len))
  1136. goto rel;
  1137. fmdbg("ASIC ID: 0x%x , ASIC Version: %d\n",
  1138. be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
  1139. sprintf(fw_name, "%s_%x.%d.bts", FM_FMC_FW_FILE_START,
  1140. be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
  1141. ret = fm_download_firmware(fmdev, fw_name);
  1142. if (ret < 0) {
  1143. fmdbg("Failed to download firmware file %s\n", fw_name);
  1144. goto rel;
  1145. }
  1146. sprintf(fw_name, "%s_%x.%d.bts", (mode == FM_MODE_RX) ?
  1147. FM_RX_FW_FILE_START : FM_TX_FW_FILE_START,
  1148. be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
  1149. ret = fm_download_firmware(fmdev, fw_name);
  1150. if (ret < 0) {
  1151. fmdbg("Failed to download firmware file %s\n", fw_name);
  1152. goto rel;
  1153. } else
  1154. return ret;
  1155. rel:
  1156. return fmc_release(fmdev);
  1157. }
  1158. /* Set FM Modes(TX, RX, OFF) */
  1159. u32 fmc_set_mode(struct fmdev *fmdev, u8 fm_mode)
  1160. {
  1161. int ret = 0;
  1162. if (fm_mode >= FM_MODE_ENTRY_MAX) {
  1163. fmerr("Invalid FM mode\n");
  1164. return -EINVAL;
  1165. }
  1166. if (fmdev->curr_fmmode == fm_mode) {
  1167. fmdbg("Already fm is in mode(%d)\n", fm_mode);
  1168. return ret;
  1169. }
  1170. switch (fm_mode) {
  1171. case FM_MODE_OFF: /* OFF Mode */
  1172. ret = fm_power_down(fmdev);
  1173. if (ret < 0) {
  1174. fmerr("Failed to set OFF mode\n");
  1175. return ret;
  1176. }
  1177. break;
  1178. case FM_MODE_TX: /* TX Mode */
  1179. case FM_MODE_RX: /* RX Mode */
  1180. /* Power down before switching to TX or RX mode */
  1181. if (fmdev->curr_fmmode != FM_MODE_OFF) {
  1182. ret = fm_power_down(fmdev);
  1183. if (ret < 0) {
  1184. fmerr("Failed to set OFF mode\n");
  1185. return ret;
  1186. }
  1187. msleep(30);
  1188. }
  1189. ret = fm_power_up(fmdev, fm_mode);
  1190. if (ret < 0) {
  1191. fmerr("Failed to load firmware\n");
  1192. return ret;
  1193. }
  1194. }
  1195. fmdev->curr_fmmode = fm_mode;
  1196. /* Set default configuration */
  1197. if (fmdev->curr_fmmode == FM_MODE_RX) {
  1198. fmdbg("Loading default rx configuration..\n");
  1199. ret = load_default_rx_configuration(fmdev);
  1200. if (ret < 0)
  1201. fmerr("Failed to load default values\n");
  1202. }
  1203. return ret;
  1204. }
  1205. /* Returns current FM mode (TX, RX, OFF) */
  1206. u32 fmc_get_mode(struct fmdev *fmdev, u8 *fmmode)
  1207. {
  1208. if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
  1209. fmerr("FM core is not ready\n");
  1210. return -EPERM;
  1211. }
  1212. if (fmmode == NULL) {
  1213. fmerr("Invalid memory\n");
  1214. return -ENOMEM;
  1215. }
  1216. *fmmode = fmdev->curr_fmmode;
  1217. return 0;
  1218. }
  1219. /* Called by ST layer when FM packet is available */
  1220. static long fm_st_receive(void *arg, struct sk_buff *skb)
  1221. {
  1222. struct fmdev *fmdev;
  1223. fmdev = (struct fmdev *)arg;
  1224. if (skb == NULL) {
  1225. fmerr("Invalid SKB received from ST\n");
  1226. return -EFAULT;
  1227. }
  1228. if (skb->cb[0] != FM_PKT_LOGICAL_CHAN_NUMBER) {
  1229. fmerr("Received SKB (%p) is not FM Channel 8 pkt\n", skb);
  1230. return -EINVAL;
  1231. }
  1232. memcpy(skb_push(skb, 1), &skb->cb[0], 1);
  1233. skb_queue_tail(&fmdev->rx_q, skb);
  1234. tasklet_schedule(&fmdev->rx_task);
  1235. return 0;
  1236. }
  1237. /*
  1238. * Called by ST layer to indicate protocol registration completion
  1239. * status.
  1240. */
  1241. static void fm_st_reg_comp_cb(void *arg, char data)
  1242. {
  1243. struct fmdev *fmdev;
  1244. fmdev = (struct fmdev *)arg;
  1245. fmdev->streg_cbdata = data;
  1246. complete(&wait_for_fmdrv_reg_comp);
  1247. }
  1248. /*
  1249. * This function will be called from FM V4L2 open function.
  1250. * Register with ST driver and initialize driver data.
  1251. */
  1252. u32 fmc_prepare(struct fmdev *fmdev)
  1253. {
  1254. static struct st_proto_s fm_st_proto;
  1255. u32 ret;
  1256. if (test_bit(FM_CORE_READY, &fmdev->flag)) {
  1257. fmdbg("FM Core is already up\n");
  1258. return 0;
  1259. }
  1260. memset(&fm_st_proto, 0, sizeof(fm_st_proto));
  1261. fm_st_proto.type = ST_FM;
  1262. fm_st_proto.recv = fm_st_receive;
  1263. fm_st_proto.match_packet = NULL;
  1264. fm_st_proto.reg_complete_cb = fm_st_reg_comp_cb;
  1265. fm_st_proto.write = NULL; /* TI ST driver will fill write pointer */
  1266. fm_st_proto.priv_data = fmdev;
  1267. ret = st_register(&fm_st_proto);
  1268. if (ret == -EINPROGRESS) {
  1269. init_completion(&wait_for_fmdrv_reg_comp);
  1270. fmdev->streg_cbdata = -EINPROGRESS;
  1271. fmdbg("%s waiting for ST reg completion signal\n", __func__);
  1272. ret = wait_for_completion_timeout(&wait_for_fmdrv_reg_comp,
  1273. FM_ST_REG_TIMEOUT);
  1274. if (!ret) {
  1275. fmerr("Timeout(%d sec), didn't get reg "
  1276. "completion signal from ST\n",
  1277. jiffies_to_msecs(FM_ST_REG_TIMEOUT) / 1000);
  1278. return -ETIMEDOUT;
  1279. }
  1280. if (fmdev->streg_cbdata != 0) {
  1281. fmerr("ST reg comp CB called with error "
  1282. "status %d\n", fmdev->streg_cbdata);
  1283. return -EAGAIN;
  1284. }
  1285. ret = 0;
  1286. } else if (ret == -1) {
  1287. fmerr("st_register failed %d\n", ret);
  1288. return -EAGAIN;
  1289. }
  1290. if (fm_st_proto.write != NULL) {
  1291. g_st_write = fm_st_proto.write;
  1292. } else {
  1293. fmerr("Failed to get ST write func pointer\n");
  1294. ret = st_unregister(ST_FM);
  1295. if (ret < 0)
  1296. fmerr("st_unregister failed %d\n", ret);
  1297. return -EAGAIN;
  1298. }
  1299. spin_lock_init(&fmdev->rds_buff_lock);
  1300. spin_lock_init(&fmdev->resp_skb_lock);
  1301. /* Initialize TX queue and TX tasklet */
  1302. skb_queue_head_init(&fmdev->tx_q);
  1303. tasklet_init(&fmdev->tx_task, send_tasklet, (unsigned long)fmdev);
  1304. /* Initialize RX Queue and RX tasklet */
  1305. skb_queue_head_init(&fmdev->rx_q);
  1306. tasklet_init(&fmdev->rx_task, recv_tasklet, (unsigned long)fmdev);
  1307. fmdev->irq_info.stage = 0;
  1308. atomic_set(&fmdev->tx_cnt, 1);
  1309. fmdev->resp_comp = NULL;
  1310. init_timer(&fmdev->irq_info.timer);
  1311. fmdev->irq_info.timer.function = &int_timeout_handler;
  1312. fmdev->irq_info.timer.data = (unsigned long)fmdev;
  1313. /*TODO: add FM_STIC_EVENT later */
  1314. fmdev->irq_info.mask = FM_MAL_EVENT;
  1315. /* Region info */
  1316. memcpy(&fmdev->rx.region, &region_configs[default_radio_region],
  1317. sizeof(struct region_info));
  1318. fmdev->rx.mute_mode = FM_MUTE_OFF;
  1319. fmdev->rx.rf_depend_mute = FM_RX_RF_DEPENDENT_MUTE_OFF;
  1320. fmdev->rx.rds.flag = FM_RDS_DISABLE;
  1321. fmdev->rx.freq = FM_UNDEFINED_FREQ;
  1322. fmdev->rx.rds_mode = FM_RDS_SYSTEM_RDS;
  1323. fmdev->rx.af_mode = FM_RX_RDS_AF_SWITCH_MODE_OFF;
  1324. fmdev->irq_info.retry = 0;
  1325. fm_rx_reset_rds_cache(fmdev);
  1326. init_waitqueue_head(&fmdev->rx.rds.read_queue);
  1327. fm_rx_reset_station_info(fmdev);
  1328. set_bit(FM_CORE_READY, &fmdev->flag);
  1329. return ret;
  1330. }
  1331. /*
  1332. * This function will be called from FM V4L2 release function.
  1333. * Unregister from ST driver.
  1334. */
  1335. u32 fmc_release(struct fmdev *fmdev)
  1336. {
  1337. u32 ret;
  1338. if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
  1339. fmdbg("FM Core is already down\n");
  1340. return 0;
  1341. }
  1342. /* Sevice pending read */
  1343. wake_up_interruptible(&fmdev->rx.rds.read_queue);
  1344. tasklet_kill(&fmdev->tx_task);
  1345. tasklet_kill(&fmdev->rx_task);
  1346. skb_queue_purge(&fmdev->tx_q);
  1347. skb_queue_purge(&fmdev->rx_q);
  1348. fmdev->resp_comp = NULL;
  1349. fmdev->rx.freq = 0;
  1350. ret = st_unregister(ST_FM);
  1351. if (ret < 0)
  1352. fmerr("Failed to de-register FM from ST %d\n", ret);
  1353. else
  1354. fmdbg("Successfully unregistered from ST\n");
  1355. clear_bit(FM_CORE_READY, &fmdev->flag);
  1356. return ret;
  1357. }
  1358. /*
  1359. * Module init function. Ask FM V4L module to register video device.
  1360. * Allocate memory for FM driver context and RX RDS buffer.
  1361. */
  1362. static int __init fm_drv_init(void)
  1363. {
  1364. struct fmdev *fmdev = NULL;
  1365. u32 ret = -ENOMEM;
  1366. fmdbg("FM driver version %s\n", FM_DRV_VERSION);
  1367. fmdev = kzalloc(sizeof(struct fmdev), GFP_KERNEL);
  1368. if (NULL == fmdev) {
  1369. fmerr("Can't allocate operation structure memory\n");
  1370. return ret;
  1371. }
  1372. fmdev->rx.rds.buf_size = default_rds_buf * FM_RDS_BLK_SIZE;
  1373. fmdev->rx.rds.buff = kzalloc(fmdev->rx.rds.buf_size, GFP_KERNEL);
  1374. if (NULL == fmdev->rx.rds.buff) {
  1375. fmerr("Can't allocate rds ring buffer\n");
  1376. goto rel_dev;
  1377. }
  1378. ret = fm_v4l2_init_video_device(fmdev, radio_nr);
  1379. if (ret < 0)
  1380. goto rel_rdsbuf;
  1381. fmdev->irq_info.handlers = int_handler_table;
  1382. fmdev->curr_fmmode = FM_MODE_OFF;
  1383. fmdev->tx_data.pwr_lvl = FM_PWR_LVL_DEF;
  1384. fmdev->tx_data.preemph = FM_TX_PREEMPH_50US;
  1385. return ret;
  1386. rel_rdsbuf:
  1387. kfree(fmdev->rx.rds.buff);
  1388. rel_dev:
  1389. kfree(fmdev);
  1390. return ret;
  1391. }
  1392. /* Module exit function. Ask FM V4L module to unregister video device */
  1393. static void __exit fm_drv_exit(void)
  1394. {
  1395. struct fmdev *fmdev = NULL;
  1396. fmdev = fm_v4l2_deinit_video_device();
  1397. if (fmdev != NULL) {
  1398. kfree(fmdev->rx.rds.buff);
  1399. kfree(fmdev);
  1400. }
  1401. }
  1402. module_init(fm_drv_init);
  1403. module_exit(fm_drv_exit);
  1404. /* ------------- Module Info ------------- */
  1405. MODULE_AUTHOR("Manjunatha Halli <manjunatha_halli@ti.com>");
  1406. MODULE_DESCRIPTION("FM Driver for TI's Connectivity chip. " FM_DRV_VERSION);
  1407. MODULE_VERSION(FM_DRV_VERSION);
  1408. MODULE_LICENSE("GPL");