cx18-mailbox.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702
  1. /*
  2. * cx18 mailbox functions
  3. *
  4. * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
  5. * Copyright (C) 2008 Andy Walls <awalls@radix.net>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  20. * 02111-1307 USA
  21. */
  22. #include <stdarg.h>
  23. #include "cx18-driver.h"
  24. #include "cx18-io.h"
  25. #include "cx18-scb.h"
  26. #include "cx18-irq.h"
  27. #include "cx18-mailbox.h"
  28. #include "cx18-queue.h"
  29. #include "cx18-streams.h"
  30. static const char *rpu_str[] = { "APU", "CPU", "EPU", "HPU" };
  31. #define API_FAST (1 << 2) /* Short timeout */
  32. #define API_SLOW (1 << 3) /* Additional 300ms timeout */
  33. struct cx18_api_info {
  34. u32 cmd;
  35. u8 flags; /* Flags, see above */
  36. u8 rpu; /* Processing unit */
  37. const char *name; /* The name of the command */
  38. };
  39. #define API_ENTRY(rpu, x, f) { (x), (f), (rpu), #x }
  40. static const struct cx18_api_info api_info[] = {
  41. /* MPEG encoder API */
  42. API_ENTRY(CPU, CX18_CPU_SET_CHANNEL_TYPE, 0),
  43. API_ENTRY(CPU, CX18_EPU_DEBUG, 0),
  44. API_ENTRY(CPU, CX18_CREATE_TASK, 0),
  45. API_ENTRY(CPU, CX18_DESTROY_TASK, 0),
  46. API_ENTRY(CPU, CX18_CPU_CAPTURE_START, API_SLOW),
  47. API_ENTRY(CPU, CX18_CPU_CAPTURE_STOP, API_SLOW),
  48. API_ENTRY(CPU, CX18_CPU_CAPTURE_PAUSE, 0),
  49. API_ENTRY(CPU, CX18_CPU_CAPTURE_RESUME, 0),
  50. API_ENTRY(CPU, CX18_CPU_SET_CHANNEL_TYPE, 0),
  51. API_ENTRY(CPU, CX18_CPU_SET_STREAM_OUTPUT_TYPE, 0),
  52. API_ENTRY(CPU, CX18_CPU_SET_VIDEO_IN, 0),
  53. API_ENTRY(CPU, CX18_CPU_SET_VIDEO_RATE, 0),
  54. API_ENTRY(CPU, CX18_CPU_SET_VIDEO_RESOLUTION, 0),
  55. API_ENTRY(CPU, CX18_CPU_SET_FILTER_PARAM, 0),
  56. API_ENTRY(CPU, CX18_CPU_SET_SPATIAL_FILTER_TYPE, 0),
  57. API_ENTRY(CPU, CX18_CPU_SET_MEDIAN_CORING, 0),
  58. API_ENTRY(CPU, CX18_CPU_SET_INDEXTABLE, 0),
  59. API_ENTRY(CPU, CX18_CPU_SET_AUDIO_PARAMETERS, 0),
  60. API_ENTRY(CPU, CX18_CPU_SET_VIDEO_MUTE, 0),
  61. API_ENTRY(CPU, CX18_CPU_SET_AUDIO_MUTE, 0),
  62. API_ENTRY(CPU, CX18_CPU_SET_MISC_PARAMETERS, 0),
  63. API_ENTRY(CPU, CX18_CPU_SET_RAW_VBI_PARAM, API_SLOW),
  64. API_ENTRY(CPU, CX18_CPU_SET_CAPTURE_LINE_NO, 0),
  65. API_ENTRY(CPU, CX18_CPU_SET_COPYRIGHT, 0),
  66. API_ENTRY(CPU, CX18_CPU_SET_AUDIO_PID, 0),
  67. API_ENTRY(CPU, CX18_CPU_SET_VIDEO_PID, 0),
  68. API_ENTRY(CPU, CX18_CPU_SET_VER_CROP_LINE, 0),
  69. API_ENTRY(CPU, CX18_CPU_SET_GOP_STRUCTURE, 0),
  70. API_ENTRY(CPU, CX18_CPU_SET_SCENE_CHANGE_DETECTION, 0),
  71. API_ENTRY(CPU, CX18_CPU_SET_ASPECT_RATIO, 0),
  72. API_ENTRY(CPU, CX18_CPU_SET_SKIP_INPUT_FRAME, 0),
  73. API_ENTRY(CPU, CX18_CPU_SET_SLICED_VBI_PARAM, 0),
  74. API_ENTRY(CPU, CX18_CPU_SET_USERDATA_PLACE_HOLDER, 0),
  75. API_ENTRY(CPU, CX18_CPU_GET_ENC_PTS, 0),
  76. API_ENTRY(CPU, CX18_CPU_DE_SET_MDL_ACK, 0),
  77. API_ENTRY(CPU, CX18_CPU_DE_SET_MDL, API_FAST),
  78. API_ENTRY(CPU, CX18_APU_RESETAI, API_FAST),
  79. API_ENTRY(CPU, CX18_CPU_DE_RELEASE_MDL, API_SLOW),
  80. API_ENTRY(0, 0, 0),
  81. };
  82. static const struct cx18_api_info *find_api_info(u32 cmd)
  83. {
  84. int i;
  85. for (i = 0; api_info[i].cmd; i++)
  86. if (api_info[i].cmd == cmd)
  87. return &api_info[i];
  88. return NULL;
  89. }
  90. static void dump_mb(struct cx18 *cx, struct cx18_mailbox *mb, char *name)
  91. {
  92. char argstr[MAX_MB_ARGUMENTS*11+1];
  93. char *p;
  94. int i;
  95. if (!(cx18_debug & CX18_DBGFLG_API))
  96. return;
  97. for (i = 0, p = argstr; i < MAX_MB_ARGUMENTS; i++, p += 11) {
  98. /* kernel snprintf() appends '\0' always */
  99. snprintf(p, 12, " %#010x", mb->args[i]);
  100. }
  101. CX18_DEBUG_API("%s: req %#010x ack %#010x cmd %#010x err %#010x args%s"
  102. "\n", name, mb->request, mb->ack, mb->cmd, mb->error, argstr);
  103. }
  104. /*
  105. * Functions that run in a work_queue work handling context
  106. */
  107. static void epu_dma_done(struct cx18 *cx, struct cx18_epu_work_order *order)
  108. {
  109. u32 handle, mdl_ack_count, id;
  110. struct cx18_mailbox *mb;
  111. struct cx18_mdl_ack *mdl_ack;
  112. struct cx18_stream *s;
  113. struct cx18_buffer *buf;
  114. int i;
  115. mb = &order->mb;
  116. handle = mb->args[0];
  117. s = cx18_handle_to_stream(cx, handle);
  118. if (s == NULL) {
  119. CX18_WARN("Got DMA done notification for unknown/inactive"
  120. " handle %d, %s mailbox seq no %d\n", handle,
  121. (order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) ?
  122. "stale" : "good", mb->request);
  123. return;
  124. }
  125. mdl_ack_count = mb->args[2];
  126. mdl_ack = order->mdl_ack;
  127. for (i = 0; i < mdl_ack_count; i++, mdl_ack++) {
  128. id = mdl_ack->id;
  129. /*
  130. * Simple integrity check for processing a stale (and possibly
  131. * inconsistent mailbox): make sure the buffer id is in the
  132. * valid range for the stream.
  133. *
  134. * We go through the trouble of dealing with stale mailboxes
  135. * because most of the time, the mailbox data is still valid and
  136. * unchanged (and in practice the firmware ping-pongs the
  137. * two mdl_ack buffers so mdl_acks are not stale).
  138. *
  139. * There are occasions when we get a half changed mailbox,
  140. * which this check catches for a handle & id mismatch. If the
  141. * handle and id do correspond, the worst case is that we
  142. * completely lost the old buffer, but pick up the new buffer
  143. * early (but the new mdl_ack is guaranteed to be good in this
  144. * case as the firmware wouldn't point us to a new mdl_ack until
  145. * it's filled in).
  146. *
  147. * cx18_queue_get buf() will detect the lost buffers
  148. * and send them back to q_free for fw rotation eventually.
  149. */
  150. if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) &&
  151. !(id >= s->mdl_offset &&
  152. id < (s->mdl_offset + s->buffers))) {
  153. CX18_WARN("Fell behind! Ignoring stale mailbox with "
  154. " inconsistent data. Lost buffer for mailbox "
  155. "seq no %d\n", mb->request);
  156. break;
  157. }
  158. buf = cx18_queue_get_buf(s, id, mdl_ack->data_used);
  159. CX18_DEBUG_HI_DMA("DMA DONE for %s (buffer %d)\n", s->name, id);
  160. if (buf == NULL) {
  161. CX18_WARN("Could not find buf %d for stream %s\n",
  162. id, s->name);
  163. /* Put as many buffers as possible back into fw use */
  164. cx18_stream_load_fw_queue(s);
  165. continue;
  166. }
  167. if (s->type == CX18_ENC_STREAM_TYPE_TS && s->dvb.enabled) {
  168. CX18_DEBUG_HI_DMA("TS recv bytesused = %d\n",
  169. buf->bytesused);
  170. dvb_dmx_swfilter(&s->dvb.demux, buf->buf,
  171. buf->bytesused);
  172. }
  173. /* Put as many buffers as possible back into fw use */
  174. cx18_stream_load_fw_queue(s);
  175. /* Put back TS buffer, since it was removed from all queues */
  176. if (s->type == CX18_ENC_STREAM_TYPE_TS)
  177. cx18_stream_put_buf_fw(s, buf);
  178. }
  179. wake_up(&cx->dma_waitq);
  180. if (s->id != -1)
  181. wake_up(&s->waitq);
  182. }
  183. static void epu_debug(struct cx18 *cx, struct cx18_epu_work_order *order)
  184. {
  185. char *p;
  186. char *str = order->str;
  187. CX18_DEBUG_INFO("%x %s\n", order->mb.args[0], str);
  188. p = strchr(str, '.');
  189. if (!test_bit(CX18_F_I_LOADED_FW, &cx->i_flags) && p && p > str)
  190. CX18_INFO("FW version: %s\n", p - 1);
  191. }
  192. static void epu_cmd(struct cx18 *cx, struct cx18_epu_work_order *order)
  193. {
  194. switch (order->rpu) {
  195. case CPU:
  196. {
  197. switch (order->mb.cmd) {
  198. case CX18_EPU_DMA_DONE:
  199. epu_dma_done(cx, order);
  200. break;
  201. case CX18_EPU_DEBUG:
  202. epu_debug(cx, order);
  203. break;
  204. default:
  205. CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n",
  206. order->mb.cmd);
  207. break;
  208. }
  209. break;
  210. }
  211. case APU:
  212. CX18_WARN("Unknown APU to EPU mailbox command %#0x\n",
  213. order->mb.cmd);
  214. break;
  215. default:
  216. break;
  217. }
  218. }
  219. static
  220. void free_epu_work_order(struct cx18 *cx, struct cx18_epu_work_order *order)
  221. {
  222. atomic_set(&order->pending, 0);
  223. }
  224. void cx18_epu_work_handler(struct work_struct *work)
  225. {
  226. struct cx18_epu_work_order *order =
  227. container_of(work, struct cx18_epu_work_order, work);
  228. struct cx18 *cx = order->cx;
  229. epu_cmd(cx, order);
  230. free_epu_work_order(cx, order);
  231. }
  232. /*
  233. * Functions that run in an interrupt handling context
  234. */
  235. static void mb_ack_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
  236. {
  237. struct cx18_mailbox __iomem *ack_mb;
  238. u32 ack_irq, req;
  239. switch (order->rpu) {
  240. case APU:
  241. ack_irq = IRQ_EPU_TO_APU_ACK;
  242. ack_mb = &cx->scb->apu2epu_mb;
  243. break;
  244. case CPU:
  245. ack_irq = IRQ_EPU_TO_CPU_ACK;
  246. ack_mb = &cx->scb->cpu2epu_mb;
  247. break;
  248. default:
  249. CX18_WARN("Unhandled RPU (%d) for command %x ack\n",
  250. order->rpu, order->mb.cmd);
  251. return;
  252. }
  253. req = order->mb.request;
  254. /* Don't ack if the RPU has gotten impatient and timed us out */
  255. if (req != cx18_readl(cx, &ack_mb->request) ||
  256. req == cx18_readl(cx, &ack_mb->ack)) {
  257. CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
  258. "incoming %s to EPU mailbox (sequence no. %u) "
  259. "while processing\n",
  260. rpu_str[order->rpu], rpu_str[order->rpu], req);
  261. order->flags |= CX18_F_EWO_MB_STALE_WHILE_PROC;
  262. return;
  263. }
  264. cx18_writel(cx, req, &ack_mb->ack);
  265. cx18_write_reg_expect(cx, ack_irq, SW2_INT_SET, ack_irq, ack_irq);
  266. return;
  267. }
  268. static int epu_dma_done_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
  269. {
  270. u32 handle, mdl_ack_offset, mdl_ack_count;
  271. struct cx18_mailbox *mb;
  272. mb = &order->mb;
  273. handle = mb->args[0];
  274. mdl_ack_offset = mb->args[1];
  275. mdl_ack_count = mb->args[2];
  276. if (handle == CX18_INVALID_TASK_HANDLE ||
  277. mdl_ack_count == 0 || mdl_ack_count > CX18_MAX_MDL_ACKS) {
  278. if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
  279. mb_ack_irq(cx, order);
  280. return -1;
  281. }
  282. cx18_memcpy_fromio(cx, order->mdl_ack, cx->enc_mem + mdl_ack_offset,
  283. sizeof(struct cx18_mdl_ack) * mdl_ack_count);
  284. if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
  285. mb_ack_irq(cx, order);
  286. return 1;
  287. }
  288. static
  289. int epu_debug_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
  290. {
  291. u32 str_offset;
  292. char *str = order->str;
  293. str[0] = '\0';
  294. str_offset = order->mb.args[1];
  295. if (str_offset) {
  296. cx18_setup_page(cx, str_offset);
  297. cx18_memcpy_fromio(cx, str, cx->enc_mem + str_offset, 252);
  298. str[252] = '\0';
  299. cx18_setup_page(cx, SCB_OFFSET);
  300. }
  301. if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
  302. mb_ack_irq(cx, order);
  303. return str_offset ? 1 : 0;
  304. }
  305. static inline
  306. int epu_cmd_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
  307. {
  308. int ret = -1;
  309. switch (order->rpu) {
  310. case CPU:
  311. {
  312. switch (order->mb.cmd) {
  313. case CX18_EPU_DMA_DONE:
  314. ret = epu_dma_done_irq(cx, order);
  315. break;
  316. case CX18_EPU_DEBUG:
  317. ret = epu_debug_irq(cx, order);
  318. break;
  319. default:
  320. CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n",
  321. order->mb.cmd);
  322. break;
  323. }
  324. break;
  325. }
  326. case APU:
  327. CX18_WARN("Unknown APU to EPU mailbox command %#0x\n",
  328. order->mb.cmd);
  329. break;
  330. default:
  331. break;
  332. }
  333. return ret;
  334. }
  335. static inline
  336. struct cx18_epu_work_order *alloc_epu_work_order_irq(struct cx18 *cx)
  337. {
  338. int i;
  339. struct cx18_epu_work_order *order = NULL;
  340. for (i = 0; i < CX18_MAX_EPU_WORK_ORDERS; i++) {
  341. /*
  342. * We only need "pending" atomic to inspect its contents,
  343. * and need not do a check and set because:
  344. * 1. Any work handler thread only clears "pending" and only
  345. * on one, particular work order at a time, per handler thread.
  346. * 2. "pending" is only set here, and we're serialized because
  347. * we're called in an IRQ handler context.
  348. */
  349. if (atomic_read(&cx->epu_work_order[i].pending) == 0) {
  350. order = &cx->epu_work_order[i];
  351. atomic_set(&order->pending, 1);
  352. break;
  353. }
  354. }
  355. return order;
  356. }
  357. void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
  358. {
  359. struct cx18_mailbox __iomem *mb;
  360. struct cx18_mailbox *order_mb;
  361. struct cx18_epu_work_order *order;
  362. int submit;
  363. switch (rpu) {
  364. case CPU:
  365. mb = &cx->scb->cpu2epu_mb;
  366. break;
  367. case APU:
  368. mb = &cx->scb->apu2epu_mb;
  369. break;
  370. default:
  371. return;
  372. }
  373. order = alloc_epu_work_order_irq(cx);
  374. if (order == NULL) {
  375. CX18_WARN("Unable to find blank work order form to schedule "
  376. "incoming mailbox command processing\n");
  377. return;
  378. }
  379. order->flags = 0;
  380. order->rpu = rpu;
  381. order_mb = &order->mb;
  382. /* mb->cmd and mb->args[0] through mb->args[2] */
  383. cx18_memcpy_fromio(cx, &order_mb->cmd, &mb->cmd, 4 * sizeof(u32));
  384. /* mb->request and mb->ack. N.B. we want to read mb->ack last */
  385. cx18_memcpy_fromio(cx, &order_mb->request, &mb->request,
  386. 2 * sizeof(u32));
  387. if (order_mb->request == order_mb->ack) {
  388. CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
  389. "incoming %s to EPU mailbox (sequence no. %u)"
  390. "\n",
  391. rpu_str[rpu], rpu_str[rpu], order_mb->request);
  392. dump_mb(cx, order_mb, "incoming");
  393. order->flags = CX18_F_EWO_MB_STALE_UPON_RECEIPT;
  394. }
  395. /*
  396. * Individual EPU command processing is responsible for ack-ing
  397. * a non-stale mailbox as soon as possible
  398. */
  399. submit = epu_cmd_irq(cx, order);
  400. if (submit > 0) {
  401. queue_work(cx->work_queue, &order->work);
  402. }
  403. }
  404. /*
  405. * Functions called from a non-interrupt, non work_queue context
  406. */
  407. static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
  408. {
  409. const struct cx18_api_info *info = find_api_info(cmd);
  410. u32 state, irq, req, ack, err;
  411. struct cx18_mailbox __iomem *mb;
  412. u32 __iomem *xpu_state;
  413. wait_queue_head_t *waitq;
  414. struct mutex *mb_lock;
  415. long int timeout, ret;
  416. int i;
  417. if (info == NULL) {
  418. CX18_WARN("unknown cmd %x\n", cmd);
  419. return -EINVAL;
  420. }
  421. if (cmd == CX18_CPU_DE_SET_MDL)
  422. CX18_DEBUG_HI_API("%s\n", info->name);
  423. else
  424. CX18_DEBUG_API("%s\n", info->name);
  425. switch (info->rpu) {
  426. case APU:
  427. waitq = &cx->mb_apu_waitq;
  428. mb_lock = &cx->epu2apu_mb_lock;
  429. irq = IRQ_EPU_TO_APU;
  430. mb = &cx->scb->epu2apu_mb;
  431. xpu_state = &cx->scb->apu_state;
  432. break;
  433. case CPU:
  434. waitq = &cx->mb_cpu_waitq;
  435. mb_lock = &cx->epu2cpu_mb_lock;
  436. irq = IRQ_EPU_TO_CPU;
  437. mb = &cx->scb->epu2cpu_mb;
  438. xpu_state = &cx->scb->cpu_state;
  439. break;
  440. default:
  441. CX18_WARN("Unknown RPU (%d) for API call\n", info->rpu);
  442. return -EINVAL;
  443. }
  444. mutex_lock(mb_lock);
  445. /*
  446. * Wait for an in-use mailbox to complete
  447. *
  448. * If the XPU is responding with Ack's, the mailbox shouldn't be in
  449. * a busy state, since we serialize access to it on our end.
  450. *
  451. * If the wait for ack after sending a previous command was interrupted
  452. * by a signal, we may get here and find a busy mailbox. After waiting,
  453. * mark it "not busy" from our end, if the XPU hasn't ack'ed it still.
  454. */
  455. state = cx18_readl(cx, xpu_state);
  456. req = cx18_readl(cx, &mb->request);
  457. timeout = msecs_to_jiffies(10);
  458. ret = wait_event_timeout(*waitq,
  459. (ack = cx18_readl(cx, &mb->ack)) == req,
  460. timeout);
  461. if (req != ack) {
  462. /* waited long enough, make the mbox "not busy" from our end */
  463. cx18_writel(cx, req, &mb->ack);
  464. CX18_ERR("mbox was found stuck busy when setting up for %s; "
  465. "clearing busy and trying to proceed\n", info->name);
  466. } else if (ret != timeout)
  467. CX18_DEBUG_API("waited %u msecs for busy mbox to be acked\n",
  468. jiffies_to_msecs(timeout-ret));
  469. /* Build the outgoing mailbox */
  470. req = ((req & 0xfffffffe) == 0xfffffffe) ? 1 : req + 1;
  471. cx18_writel(cx, cmd, &mb->cmd);
  472. for (i = 0; i < args; i++)
  473. cx18_writel(cx, data[i], &mb->args[i]);
  474. cx18_writel(cx, 0, &mb->error);
  475. cx18_writel(cx, req, &mb->request);
  476. cx18_writel(cx, req - 1, &mb->ack); /* ensure ack & req are distinct */
  477. /*
  478. * Notify the XPU and wait for it to send an Ack back
  479. */
  480. timeout = msecs_to_jiffies((info->flags & API_FAST) ? 10 : 20);
  481. CX18_DEBUG_HI_IRQ("sending interrupt SW1: %x to send %s\n",
  482. irq, info->name);
  483. cx18_write_reg_expect(cx, irq, SW1_INT_SET, irq, irq);
  484. ret = wait_event_timeout(
  485. *waitq,
  486. cx18_readl(cx, &mb->ack) == cx18_readl(cx, &mb->request),
  487. timeout);
  488. if (ret == 0) {
  489. /* Timed out */
  490. mutex_unlock(mb_lock);
  491. CX18_DEBUG_WARN("sending %s timed out waiting %d msecs for RPU "
  492. "acknowledgement\n",
  493. info->name, jiffies_to_msecs(timeout));
  494. return -EINVAL;
  495. }
  496. if (ret != timeout)
  497. CX18_DEBUG_HI_API("waited %u msecs for %s to be acked\n",
  498. jiffies_to_msecs(timeout-ret), info->name);
  499. /* Collect data returned by the XPU */
  500. for (i = 0; i < MAX_MB_ARGUMENTS; i++)
  501. data[i] = cx18_readl(cx, &mb->args[i]);
  502. err = cx18_readl(cx, &mb->error);
  503. mutex_unlock(mb_lock);
  504. /*
  505. * Wait for XPU to perform extra actions for the caller in some cases.
  506. * e.g. CX18_CPU_DE_RELEASE_MDL will cause the CPU to send all buffers
  507. * back in a burst shortly thereafter
  508. */
  509. if (info->flags & API_SLOW)
  510. cx18_msleep_timeout(300, 0);
  511. if (err)
  512. CX18_DEBUG_API("mailbox error %08x for command %s\n", err,
  513. info->name);
  514. return err ? -EIO : 0;
  515. }
  516. int cx18_api(struct cx18 *cx, u32 cmd, int args, u32 data[])
  517. {
  518. return cx18_api_call(cx, cmd, args, data);
  519. }
  520. static int cx18_set_filter_param(struct cx18_stream *s)
  521. {
  522. struct cx18 *cx = s->cx;
  523. u32 mode;
  524. int ret;
  525. mode = (cx->filter_mode & 1) ? 2 : (cx->spatial_strength ? 1 : 0);
  526. ret = cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
  527. s->handle, 1, mode, cx->spatial_strength);
  528. mode = (cx->filter_mode & 2) ? 2 : (cx->temporal_strength ? 1 : 0);
  529. ret = ret ? ret : cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
  530. s->handle, 0, mode, cx->temporal_strength);
  531. ret = ret ? ret : cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
  532. s->handle, 2, cx->filter_mode >> 2, 0);
  533. return ret;
  534. }
  535. int cx18_api_func(void *priv, u32 cmd, int in, int out,
  536. u32 data[CX2341X_MBOX_MAX_DATA])
  537. {
  538. struct cx18_api_func_private *api_priv = priv;
  539. struct cx18 *cx = api_priv->cx;
  540. struct cx18_stream *s = api_priv->s;
  541. switch (cmd) {
  542. case CX2341X_ENC_SET_OUTPUT_PORT:
  543. return 0;
  544. case CX2341X_ENC_SET_FRAME_RATE:
  545. return cx18_vapi(cx, CX18_CPU_SET_VIDEO_IN, 6,
  546. s->handle, 0, 0, 0, 0, data[0]);
  547. case CX2341X_ENC_SET_FRAME_SIZE:
  548. return cx18_vapi(cx, CX18_CPU_SET_VIDEO_RESOLUTION, 3,
  549. s->handle, data[1], data[0]);
  550. case CX2341X_ENC_SET_STREAM_TYPE:
  551. return cx18_vapi(cx, CX18_CPU_SET_STREAM_OUTPUT_TYPE, 2,
  552. s->handle, data[0]);
  553. case CX2341X_ENC_SET_ASPECT_RATIO:
  554. return cx18_vapi(cx, CX18_CPU_SET_ASPECT_RATIO, 2,
  555. s->handle, data[0]);
  556. case CX2341X_ENC_SET_GOP_PROPERTIES:
  557. return cx18_vapi(cx, CX18_CPU_SET_GOP_STRUCTURE, 3,
  558. s->handle, data[0], data[1]);
  559. case CX2341X_ENC_SET_GOP_CLOSURE:
  560. return 0;
  561. case CX2341X_ENC_SET_AUDIO_PROPERTIES:
  562. return cx18_vapi(cx, CX18_CPU_SET_AUDIO_PARAMETERS, 2,
  563. s->handle, data[0]);
  564. case CX2341X_ENC_MUTE_AUDIO:
  565. return cx18_vapi(cx, CX18_CPU_SET_AUDIO_MUTE, 2,
  566. s->handle, data[0]);
  567. case CX2341X_ENC_SET_BIT_RATE:
  568. return cx18_vapi(cx, CX18_CPU_SET_VIDEO_RATE, 5,
  569. s->handle, data[0], data[1], data[2], data[3]);
  570. case CX2341X_ENC_MUTE_VIDEO:
  571. return cx18_vapi(cx, CX18_CPU_SET_VIDEO_MUTE, 2,
  572. s->handle, data[0]);
  573. case CX2341X_ENC_SET_FRAME_DROP_RATE:
  574. return cx18_vapi(cx, CX18_CPU_SET_SKIP_INPUT_FRAME, 2,
  575. s->handle, data[0]);
  576. case CX2341X_ENC_MISC:
  577. return cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 4,
  578. s->handle, data[0], data[1], data[2]);
  579. case CX2341X_ENC_SET_DNR_FILTER_MODE:
  580. cx->filter_mode = (data[0] & 3) | (data[1] << 2);
  581. return cx18_set_filter_param(s);
  582. case CX2341X_ENC_SET_DNR_FILTER_PROPS:
  583. cx->spatial_strength = data[0];
  584. cx->temporal_strength = data[1];
  585. return cx18_set_filter_param(s);
  586. case CX2341X_ENC_SET_SPATIAL_FILTER_TYPE:
  587. return cx18_vapi(cx, CX18_CPU_SET_SPATIAL_FILTER_TYPE, 3,
  588. s->handle, data[0], data[1]);
  589. case CX2341X_ENC_SET_CORING_LEVELS:
  590. return cx18_vapi(cx, CX18_CPU_SET_MEDIAN_CORING, 5,
  591. s->handle, data[0], data[1], data[2], data[3]);
  592. }
  593. CX18_WARN("Unknown cmd %x\n", cmd);
  594. return 0;
  595. }
  596. int cx18_vapi_result(struct cx18 *cx, u32 data[MAX_MB_ARGUMENTS],
  597. u32 cmd, int args, ...)
  598. {
  599. va_list ap;
  600. int i;
  601. va_start(ap, args);
  602. for (i = 0; i < args; i++)
  603. data[i] = va_arg(ap, u32);
  604. va_end(ap);
  605. return cx18_api(cx, cmd, args, data);
  606. }
  607. int cx18_vapi(struct cx18 *cx, u32 cmd, int args, ...)
  608. {
  609. u32 data[MAX_MB_ARGUMENTS];
  610. va_list ap;
  611. int i;
  612. if (cx == NULL) {
  613. CX18_ERR("cx == NULL (cmd=%x)\n", cmd);
  614. return 0;
  615. }
  616. if (args > MAX_MB_ARGUMENTS) {
  617. CX18_ERR("args too big (cmd=%x)\n", cmd);
  618. args = MAX_MB_ARGUMENTS;
  619. }
  620. va_start(ap, args);
  621. for (i = 0; i < args; i++)
  622. data[i] = va_arg(ap, u32);
  623. va_end(ap);
  624. return cx18_api(cx, cmd, args, data);
  625. }