cx18-mailbox.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685
  1. /*
  2. * cx18 mailbox functions
  3. *
  4. * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
  19. * 02111-1307 USA
  20. */
  21. #include <stdarg.h>
  22. #include "cx18-driver.h"
  23. #include "cx18-io.h"
  24. #include "cx18-scb.h"
  25. #include "cx18-irq.h"
  26. #include "cx18-mailbox.h"
  27. #include "cx18-queue.h"
  28. #include "cx18-streams.h"
  29. static const char *rpu_str[] = { "APU", "CPU", "EPU", "HPU" };
  30. #define API_FAST (1 << 2) /* Short timeout */
  31. #define API_SLOW (1 << 3) /* Additional 300ms timeout */
  32. struct cx18_api_info {
  33. u32 cmd;
  34. u8 flags; /* Flags, see above */
  35. u8 rpu; /* Processing unit */
  36. const char *name; /* The name of the command */
  37. };
  38. #define API_ENTRY(rpu, x, f) { (x), (f), (rpu), #x }
  39. static const struct cx18_api_info api_info[] = {
  40. /* MPEG encoder API */
  41. API_ENTRY(CPU, CX18_CPU_SET_CHANNEL_TYPE, 0),
  42. API_ENTRY(CPU, CX18_EPU_DEBUG, 0),
  43. API_ENTRY(CPU, CX18_CREATE_TASK, 0),
  44. API_ENTRY(CPU, CX18_DESTROY_TASK, 0),
  45. API_ENTRY(CPU, CX18_CPU_CAPTURE_START, API_SLOW),
  46. API_ENTRY(CPU, CX18_CPU_CAPTURE_STOP, API_SLOW),
  47. API_ENTRY(CPU, CX18_CPU_CAPTURE_PAUSE, 0),
  48. API_ENTRY(CPU, CX18_CPU_CAPTURE_RESUME, 0),
  49. API_ENTRY(CPU, CX18_CPU_SET_CHANNEL_TYPE, 0),
  50. API_ENTRY(CPU, CX18_CPU_SET_STREAM_OUTPUT_TYPE, 0),
  51. API_ENTRY(CPU, CX18_CPU_SET_VIDEO_IN, 0),
  52. API_ENTRY(CPU, CX18_CPU_SET_VIDEO_RATE, 0),
  53. API_ENTRY(CPU, CX18_CPU_SET_VIDEO_RESOLUTION, 0),
  54. API_ENTRY(CPU, CX18_CPU_SET_FILTER_PARAM, 0),
  55. API_ENTRY(CPU, CX18_CPU_SET_SPATIAL_FILTER_TYPE, 0),
  56. API_ENTRY(CPU, CX18_CPU_SET_MEDIAN_CORING, 0),
  57. API_ENTRY(CPU, CX18_CPU_SET_INDEXTABLE, 0),
  58. API_ENTRY(CPU, CX18_CPU_SET_AUDIO_PARAMETERS, 0),
  59. API_ENTRY(CPU, CX18_CPU_SET_VIDEO_MUTE, 0),
  60. API_ENTRY(CPU, CX18_CPU_SET_AUDIO_MUTE, 0),
  61. API_ENTRY(CPU, CX18_CPU_SET_MISC_PARAMETERS, 0),
  62. API_ENTRY(CPU, CX18_CPU_SET_RAW_VBI_PARAM, API_SLOW),
  63. API_ENTRY(CPU, CX18_CPU_SET_CAPTURE_LINE_NO, 0),
  64. API_ENTRY(CPU, CX18_CPU_SET_COPYRIGHT, 0),
  65. API_ENTRY(CPU, CX18_CPU_SET_AUDIO_PID, 0),
  66. API_ENTRY(CPU, CX18_CPU_SET_VIDEO_PID, 0),
  67. API_ENTRY(CPU, CX18_CPU_SET_VER_CROP_LINE, 0),
  68. API_ENTRY(CPU, CX18_CPU_SET_GOP_STRUCTURE, 0),
  69. API_ENTRY(CPU, CX18_CPU_SET_SCENE_CHANGE_DETECTION, 0),
  70. API_ENTRY(CPU, CX18_CPU_SET_ASPECT_RATIO, 0),
  71. API_ENTRY(CPU, CX18_CPU_SET_SKIP_INPUT_FRAME, 0),
  72. API_ENTRY(CPU, CX18_CPU_SET_SLICED_VBI_PARAM, 0),
  73. API_ENTRY(CPU, CX18_CPU_SET_USERDATA_PLACE_HOLDER, 0),
  74. API_ENTRY(CPU, CX18_CPU_GET_ENC_PTS, 0),
  75. API_ENTRY(CPU, CX18_CPU_DE_SET_MDL_ACK, 0),
  76. API_ENTRY(CPU, CX18_CPU_DE_SET_MDL, API_FAST),
  77. API_ENTRY(CPU, CX18_APU_RESETAI, API_FAST),
  78. API_ENTRY(CPU, CX18_CPU_DE_RELEASE_MDL, API_SLOW),
  79. API_ENTRY(0, 0, 0),
  80. };
  81. static const struct cx18_api_info *find_api_info(u32 cmd)
  82. {
  83. int i;
  84. for (i = 0; api_info[i].cmd; i++)
  85. if (api_info[i].cmd == cmd)
  86. return &api_info[i];
  87. return NULL;
  88. }
  89. static void dump_mb(struct cx18 *cx, struct cx18_mailbox *mb, char *name)
  90. {
  91. char argstr[MAX_MB_ARGUMENTS*11+1];
  92. char *p;
  93. int i;
  94. if (!(cx18_debug & CX18_DBGFLG_API))
  95. return;
  96. for (i = 0, p = argstr; i < MAX_MB_ARGUMENTS; i++, p += 11) {
  97. /* kernel snprintf() appends '\0' always */
  98. snprintf(p, 12, " %#010x", mb->args[i]);
  99. }
  100. CX18_DEBUG_API("%s: req %#010x ack %#010x cmd %#010x err %#010x args%s"
  101. "\n", name, mb->request, mb->ack, mb->cmd, mb->error, argstr);
  102. }
  103. /*
  104. * Functions that run in a work_queue work handling context
  105. */
  106. static void epu_dma_done(struct cx18 *cx, struct cx18_epu_work_order *order)
  107. {
  108. u32 handle, mdl_ack_count;
  109. struct cx18_mailbox *mb;
  110. struct cx18_mdl_ack *mdl_ack;
  111. struct cx18_stream *s;
  112. struct cx18_buffer *buf;
  113. int i;
  114. mb = &order->mb;
  115. handle = mb->args[0];
  116. s = cx18_handle_to_stream(cx, handle);
  117. if (s == NULL) {
  118. CX18_WARN("Got DMA done notification for unknown/inactive"
  119. " handle %d\n", handle);
  120. return;
  121. }
  122. mdl_ack_count = mb->args[2];
  123. mdl_ack = order->mdl_ack;
  124. for (i = 0; i < mdl_ack_count; i++, mdl_ack++) {
  125. buf = cx18_queue_get_buf(s, mdl_ack->id, mdl_ack->data_used);
  126. CX18_DEBUG_HI_DMA("DMA DONE for %s (buffer %d)\n", s->name,
  127. mdl_ack->id);
  128. if (buf == NULL) {
  129. CX18_WARN("Could not find buf %d for stream %s\n",
  130. mdl_ack->id, s->name);
  131. continue;
  132. }
  133. cx18_buf_sync_for_cpu(s, buf);
  134. if (s->type == CX18_ENC_STREAM_TYPE_TS && s->dvb.enabled) {
  135. CX18_DEBUG_HI_DMA("TS recv bytesused = %d\n",
  136. buf->bytesused);
  137. dvb_dmx_swfilter(&s->dvb.demux, buf->buf,
  138. buf->bytesused);
  139. cx18_buf_sync_for_device(s, buf);
  140. if (s->handle != CX18_INVALID_TASK_HANDLE &&
  141. test_bit(CX18_F_S_STREAMING, &s->s_flags))
  142. cx18_vapi(cx,
  143. CX18_CPU_DE_SET_MDL, 5, s->handle,
  144. (void __iomem *)
  145. &cx->scb->cpu_mdl[buf->id] - cx->enc_mem,
  146. 1, buf->id, s->buf_size);
  147. } else
  148. set_bit(CX18_F_B_NEED_BUF_SWAP, &buf->b_flags);
  149. }
  150. wake_up(&cx->dma_waitq);
  151. if (s->id != -1)
  152. wake_up(&s->waitq);
  153. }
  154. static void epu_debug(struct cx18 *cx, struct cx18_epu_work_order *order)
  155. {
  156. char *p;
  157. char *str = order->str;
  158. CX18_DEBUG_INFO("%x %s\n", order->mb.args[0], str);
  159. p = strchr(str, '.');
  160. if (!test_bit(CX18_F_I_LOADED_FW, &cx->i_flags) && p && p > str)
  161. CX18_INFO("FW version: %s\n", p - 1);
  162. }
  163. static void epu_cmd(struct cx18 *cx, struct cx18_epu_work_order *order)
  164. {
  165. switch (order->rpu) {
  166. case CPU:
  167. {
  168. switch (order->mb.cmd) {
  169. case CX18_EPU_DMA_DONE:
  170. epu_dma_done(cx, order);
  171. break;
  172. case CX18_EPU_DEBUG:
  173. epu_debug(cx, order);
  174. break;
  175. default:
  176. CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n",
  177. order->mb.cmd);
  178. break;
  179. }
  180. break;
  181. }
  182. case APU:
  183. CX18_WARN("Unknown APU to EPU mailbox command %#0x\n",
  184. order->mb.cmd);
  185. break;
  186. default:
  187. break;
  188. }
  189. }
  190. static
  191. void free_epu_work_order(struct cx18 *cx, struct cx18_epu_work_order *order)
  192. {
  193. atomic_set(&order->pending, 0);
  194. }
  195. void cx18_epu_work_handler(struct work_struct *work)
  196. {
  197. struct cx18_epu_work_order *order =
  198. container_of(work, struct cx18_epu_work_order, work);
  199. struct cx18 *cx = order->cx;
  200. epu_cmd(cx, order);
  201. free_epu_work_order(cx, order);
  202. }
  203. /*
  204. * Functions that run in an interrupt handling context
  205. */
  206. static void mb_ack_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
  207. {
  208. struct cx18_mailbox __iomem *ack_mb;
  209. u32 ack_irq, req;
  210. switch (order->rpu) {
  211. case APU:
  212. ack_irq = IRQ_EPU_TO_APU_ACK;
  213. ack_mb = &cx->scb->apu2epu_mb;
  214. break;
  215. case CPU:
  216. ack_irq = IRQ_EPU_TO_CPU_ACK;
  217. ack_mb = &cx->scb->cpu2epu_mb;
  218. break;
  219. default:
  220. CX18_WARN("Unhandled RPU (%d) for command %x ack\n",
  221. order->rpu, order->mb.cmd);
  222. return;
  223. }
  224. req = order->mb.request;
  225. /* Don't ack if the RPU has gotten impatient and timed us out */
  226. if (req != cx18_readl(cx, &ack_mb->request) ||
  227. req == cx18_readl(cx, &ack_mb->ack)) {
  228. CX18_WARN("Possibly falling behind: %s self-ack'ed our incoming"
  229. " %s to EPU mailbox (sequence no. %u) while "
  230. "processing\n",
  231. rpu_str[order->rpu], rpu_str[order->rpu], req);
  232. order->flags |= CX18_F_EWO_MB_STALE_WHILE_PROC;
  233. return;
  234. }
  235. cx18_writel(cx, req, &ack_mb->ack);
  236. cx18_write_reg_expect(cx, ack_irq, SW2_INT_SET, ack_irq, ack_irq);
  237. return;
  238. }
  239. static int epu_dma_done_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
  240. {
  241. u32 handle, mdl_ack_offset, mdl_ack_count;
  242. struct cx18_mailbox *mb;
  243. mb = &order->mb;
  244. handle = mb->args[0];
  245. mdl_ack_offset = mb->args[1];
  246. mdl_ack_count = mb->args[2];
  247. if (handle == CX18_INVALID_TASK_HANDLE ||
  248. mdl_ack_count == 0 || mdl_ack_count > CX18_MAX_MDL_ACKS) {
  249. if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
  250. mb_ack_irq(cx, order);
  251. return -1;
  252. }
  253. cx18_memcpy_fromio(cx, order->mdl_ack, cx->enc_mem + mdl_ack_offset,
  254. sizeof(struct cx18_mdl_ack) * mdl_ack_count);
  255. if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
  256. mb_ack_irq(cx, order);
  257. return 1;
  258. }
  259. static
  260. int epu_debug_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
  261. {
  262. u32 str_offset;
  263. char *str = order->str;
  264. str[0] = '\0';
  265. str_offset = order->mb.args[1];
  266. if (str_offset) {
  267. cx18_setup_page(cx, str_offset);
  268. cx18_memcpy_fromio(cx, str, cx->enc_mem + str_offset, 252);
  269. str[252] = '\0';
  270. cx18_setup_page(cx, SCB_OFFSET);
  271. }
  272. if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
  273. mb_ack_irq(cx, order);
  274. return str_offset ? 1 : 0;
  275. }
  276. static inline
  277. int epu_cmd_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
  278. {
  279. int ret = -1;
  280. switch (order->rpu) {
  281. case CPU:
  282. {
  283. switch (order->mb.cmd) {
  284. case CX18_EPU_DMA_DONE:
  285. ret = epu_dma_done_irq(cx, order);
  286. break;
  287. case CX18_EPU_DEBUG:
  288. ret = epu_debug_irq(cx, order);
  289. break;
  290. default:
  291. CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n",
  292. order->mb.cmd);
  293. break;
  294. }
  295. break;
  296. }
  297. case APU:
  298. CX18_WARN("Unknown APU to EPU mailbox command %#0x\n",
  299. order->mb.cmd);
  300. break;
  301. default:
  302. break;
  303. }
  304. return ret;
  305. }
  306. static inline
  307. struct cx18_epu_work_order *alloc_epu_work_order_irq(struct cx18 *cx)
  308. {
  309. int i;
  310. struct cx18_epu_work_order *order = NULL;
  311. for (i = 0; i < CX18_MAX_EPU_WORK_ORDERS; i++) {
  312. /*
  313. * We only need "pending" atomic to inspect its contents,
  314. * and need not do a check and set because:
  315. * 1. Any work handler thread only clears "pending" and only
  316. * on one, particular work order at a time, per handler thread.
  317. * 2. "pending" is only set here, and we're serialized because
  318. * we're called in an IRQ handler context.
  319. */
  320. if (atomic_read(&cx->epu_work_order[i].pending) == 0) {
  321. order = &cx->epu_work_order[i];
  322. atomic_set(&order->pending, 1);
  323. break;
  324. }
  325. }
  326. return order;
  327. }
  328. void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
  329. {
  330. struct cx18_mailbox __iomem *mb;
  331. struct cx18_mailbox *order_mb;
  332. struct cx18_epu_work_order *order;
  333. int submit;
  334. switch (rpu) {
  335. case CPU:
  336. mb = &cx->scb->cpu2epu_mb;
  337. break;
  338. case APU:
  339. mb = &cx->scb->apu2epu_mb;
  340. break;
  341. default:
  342. return;
  343. }
  344. order = alloc_epu_work_order_irq(cx);
  345. if (order == NULL) {
  346. CX18_WARN("Unable to find blank work order form to schedule "
  347. "incoming mailbox command processing\n");
  348. return;
  349. }
  350. order->flags = 0;
  351. order->rpu = rpu;
  352. order_mb = &order->mb;
  353. cx18_memcpy_fromio(cx, order_mb, mb, sizeof(struct cx18_mailbox));
  354. if (order_mb->request == order_mb->ack) {
  355. CX18_WARN("Possibly falling behind: %s self-ack'ed our incoming"
  356. " %s to EPU mailbox (sequence no. %u)\n",
  357. rpu_str[rpu], rpu_str[rpu], order_mb->request);
  358. dump_mb(cx, order_mb, "incoming");
  359. order->flags = CX18_F_EWO_MB_STALE_UPON_RECEIPT;
  360. }
  361. /*
  362. * Individual EPU command processing is responsible for ack-ing
  363. * a non-stale mailbox as soon as possible
  364. */
  365. submit = epu_cmd_irq(cx, order);
  366. if (submit > 0) {
  367. queue_work(cx18_work_queue, &order->work);
  368. }
  369. }
  370. /*
  371. * Functions called from a non-interrupt, non work_queue context
  372. */
  373. static void cx18_api_log_ack_delay(struct cx18 *cx, int msecs)
  374. {
  375. if (msecs > CX18_MAX_MB_ACK_DELAY)
  376. msecs = CX18_MAX_MB_ACK_DELAY;
  377. atomic_inc(&cx->mbox_stats.mb_ack_delay[msecs]);
  378. }
  379. static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
  380. {
  381. const struct cx18_api_info *info = find_api_info(cmd);
  382. u32 state, irq, req, ack, err;
  383. struct cx18_mailbox __iomem *mb;
  384. u32 __iomem *xpu_state;
  385. wait_queue_head_t *waitq;
  386. struct mutex *mb_lock;
  387. long int timeout, ret;
  388. int i;
  389. if (info == NULL) {
  390. CX18_WARN("unknown cmd %x\n", cmd);
  391. return -EINVAL;
  392. }
  393. if (cmd == CX18_CPU_DE_SET_MDL)
  394. CX18_DEBUG_HI_API("%s\n", info->name);
  395. else
  396. CX18_DEBUG_API("%s\n", info->name);
  397. switch (info->rpu) {
  398. case APU:
  399. waitq = &cx->mb_apu_waitq;
  400. mb_lock = &cx->epu2apu_mb_lock;
  401. irq = IRQ_EPU_TO_APU;
  402. mb = &cx->scb->epu2apu_mb;
  403. xpu_state = &cx->scb->apu_state;
  404. break;
  405. case CPU:
  406. waitq = &cx->mb_cpu_waitq;
  407. mb_lock = &cx->epu2cpu_mb_lock;
  408. irq = IRQ_EPU_TO_CPU;
  409. mb = &cx->scb->epu2cpu_mb;
  410. xpu_state = &cx->scb->cpu_state;
  411. break;
  412. default:
  413. CX18_WARN("Unknown RPU (%d) for API call\n", info->rpu);
  414. return -EINVAL;
  415. }
  416. mutex_lock(mb_lock);
  417. /*
  418. * Wait for an in-use mailbox to complete
  419. *
  420. * If the XPU is responding with Ack's, the mailbox shouldn't be in
  421. * a busy state, since we serialize access to it on our end.
  422. *
  423. * If the wait for ack after sending a previous command was interrupted
  424. * by a signal, we may get here and find a busy mailbox. After waiting,
  425. * mark it "not busy" from our end, if the XPU hasn't ack'ed it still.
  426. */
  427. state = cx18_readl(cx, xpu_state);
  428. req = cx18_readl(cx, &mb->request);
  429. timeout = msecs_to_jiffies(20); /* 1 field at 50 Hz vertical refresh */
  430. ret = wait_event_timeout(*waitq,
  431. (ack = cx18_readl(cx, &mb->ack)) == req,
  432. timeout);
  433. if (req != ack) {
  434. /* waited long enough, make the mbox "not busy" from our end */
  435. cx18_writel(cx, req, &mb->ack);
  436. CX18_ERR("mbox was found stuck busy when setting up for %s; "
  437. "clearing busy and trying to proceed\n", info->name);
  438. } else if (ret != timeout)
  439. CX18_DEBUG_API("waited %u usecs for busy mbox to be acked\n",
  440. jiffies_to_usecs(timeout-ret));
  441. /* Build the outgoing mailbox */
  442. req = ((req & 0xfffffffe) == 0xfffffffe) ? 1 : req + 1;
  443. cx18_writel(cx, cmd, &mb->cmd);
  444. for (i = 0; i < args; i++)
  445. cx18_writel(cx, data[i], &mb->args[i]);
  446. cx18_writel(cx, 0, &mb->error);
  447. cx18_writel(cx, req, &mb->request);
  448. cx18_writel(cx, req - 1, &mb->ack); /* ensure ack & req are distinct */
  449. /*
  450. * Notify the XPU and wait for it to send an Ack back
  451. * 21 ms = ~ 0.5 frames at a frame rate of 24 fps
  452. * 42 ms = ~ 1 frame at a frame rate of 24 fps
  453. */
  454. timeout = msecs_to_jiffies((info->flags & API_FAST) ? 21 : 42);
  455. CX18_DEBUG_HI_IRQ("sending interrupt SW1: %x to send %s\n",
  456. irq, info->name);
  457. cx18_write_reg_expect(cx, irq, SW1_INT_SET, irq, irq);
  458. ret = wait_event_timeout(
  459. *waitq,
  460. cx18_readl(cx, &mb->ack) == cx18_readl(cx, &mb->request),
  461. timeout);
  462. if (ret == 0) {
  463. /* Timed out */
  464. mutex_unlock(mb_lock);
  465. i = jiffies_to_msecs(timeout);
  466. cx18_api_log_ack_delay(cx, i);
  467. CX18_WARN("sending %s timed out waiting %d msecs for RPU "
  468. "acknowledgement\n", info->name, i);
  469. return -EINVAL;
  470. } else if (ret < 0) {
  471. /* Interrupted */
  472. mutex_unlock(mb_lock);
  473. CX18_WARN("sending %s was interrupted waiting for RPU"
  474. "acknowledgement\n", info->name);
  475. return -EINTR;
  476. }
  477. i = jiffies_to_msecs(timeout-ret);
  478. cx18_api_log_ack_delay(cx, i);
  479. if (ret != timeout)
  480. CX18_DEBUG_HI_API("waited %u msecs for %s to be acked\n",
  481. i, info->name);
  482. /* Collect data returned by the XPU */
  483. for (i = 0; i < MAX_MB_ARGUMENTS; i++)
  484. data[i] = cx18_readl(cx, &mb->args[i]);
  485. err = cx18_readl(cx, &mb->error);
  486. mutex_unlock(mb_lock);
  487. /*
  488. * Wait for XPU to perform extra actions for the caller in some cases.
  489. * e.g. CX18_CPU_DE_RELEASE_MDL will cause the CPU to send all buffers
  490. * back in a burst shortly thereafter
  491. */
  492. if (info->flags & API_SLOW)
  493. cx18_msleep_timeout(300, 0);
  494. if (err)
  495. CX18_DEBUG_API("mailbox error %08x for command %s\n", err,
  496. info->name);
  497. return err ? -EIO : 0;
  498. }
  499. int cx18_api(struct cx18 *cx, u32 cmd, int args, u32 data[])
  500. {
  501. return cx18_api_call(cx, cmd, args, data);
  502. }
  503. static int cx18_set_filter_param(struct cx18_stream *s)
  504. {
  505. struct cx18 *cx = s->cx;
  506. u32 mode;
  507. int ret;
  508. mode = (cx->filter_mode & 1) ? 2 : (cx->spatial_strength ? 1 : 0);
  509. ret = cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
  510. s->handle, 1, mode, cx->spatial_strength);
  511. mode = (cx->filter_mode & 2) ? 2 : (cx->temporal_strength ? 1 : 0);
  512. ret = ret ? ret : cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
  513. s->handle, 0, mode, cx->temporal_strength);
  514. ret = ret ? ret : cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
  515. s->handle, 2, cx->filter_mode >> 2, 0);
  516. return ret;
  517. }
  518. int cx18_api_func(void *priv, u32 cmd, int in, int out,
  519. u32 data[CX2341X_MBOX_MAX_DATA])
  520. {
  521. struct cx18 *cx = priv;
  522. struct cx18_stream *s = &cx->streams[CX18_ENC_STREAM_TYPE_MPG];
  523. switch (cmd) {
  524. case CX2341X_ENC_SET_OUTPUT_PORT:
  525. return 0;
  526. case CX2341X_ENC_SET_FRAME_RATE:
  527. return cx18_vapi(cx, CX18_CPU_SET_VIDEO_IN, 6,
  528. s->handle, 0, 0, 0, 0, data[0]);
  529. case CX2341X_ENC_SET_FRAME_SIZE:
  530. return cx18_vapi(cx, CX18_CPU_SET_VIDEO_RESOLUTION, 3,
  531. s->handle, data[1], data[0]);
  532. case CX2341X_ENC_SET_STREAM_TYPE:
  533. return cx18_vapi(cx, CX18_CPU_SET_STREAM_OUTPUT_TYPE, 2,
  534. s->handle, data[0]);
  535. case CX2341X_ENC_SET_ASPECT_RATIO:
  536. return cx18_vapi(cx, CX18_CPU_SET_ASPECT_RATIO, 2,
  537. s->handle, data[0]);
  538. case CX2341X_ENC_SET_GOP_PROPERTIES:
  539. return cx18_vapi(cx, CX18_CPU_SET_GOP_STRUCTURE, 3,
  540. s->handle, data[0], data[1]);
  541. case CX2341X_ENC_SET_GOP_CLOSURE:
  542. return 0;
  543. case CX2341X_ENC_SET_AUDIO_PROPERTIES:
  544. return cx18_vapi(cx, CX18_CPU_SET_AUDIO_PARAMETERS, 2,
  545. s->handle, data[0]);
  546. case CX2341X_ENC_MUTE_AUDIO:
  547. return cx18_vapi(cx, CX18_CPU_SET_AUDIO_MUTE, 2,
  548. s->handle, data[0]);
  549. case CX2341X_ENC_SET_BIT_RATE:
  550. return cx18_vapi(cx, CX18_CPU_SET_VIDEO_RATE, 5,
  551. s->handle, data[0], data[1], data[2], data[3]);
  552. case CX2341X_ENC_MUTE_VIDEO:
  553. return cx18_vapi(cx, CX18_CPU_SET_VIDEO_MUTE, 2,
  554. s->handle, data[0]);
  555. case CX2341X_ENC_SET_FRAME_DROP_RATE:
  556. return cx18_vapi(cx, CX18_CPU_SET_SKIP_INPUT_FRAME, 2,
  557. s->handle, data[0]);
  558. case CX2341X_ENC_MISC:
  559. return cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 4,
  560. s->handle, data[0], data[1], data[2]);
  561. case CX2341X_ENC_SET_DNR_FILTER_MODE:
  562. cx->filter_mode = (data[0] & 3) | (data[1] << 2);
  563. return cx18_set_filter_param(s);
  564. case CX2341X_ENC_SET_DNR_FILTER_PROPS:
  565. cx->spatial_strength = data[0];
  566. cx->temporal_strength = data[1];
  567. return cx18_set_filter_param(s);
  568. case CX2341X_ENC_SET_SPATIAL_FILTER_TYPE:
  569. return cx18_vapi(cx, CX18_CPU_SET_SPATIAL_FILTER_TYPE, 3,
  570. s->handle, data[0], data[1]);
  571. case CX2341X_ENC_SET_CORING_LEVELS:
  572. return cx18_vapi(cx, CX18_CPU_SET_MEDIAN_CORING, 5,
  573. s->handle, data[0], data[1], data[2], data[3]);
  574. }
  575. CX18_WARN("Unknown cmd %x\n", cmd);
  576. return 0;
  577. }
  578. int cx18_vapi_result(struct cx18 *cx, u32 data[MAX_MB_ARGUMENTS],
  579. u32 cmd, int args, ...)
  580. {
  581. va_list ap;
  582. int i;
  583. va_start(ap, args);
  584. for (i = 0; i < args; i++)
  585. data[i] = va_arg(ap, u32);
  586. va_end(ap);
  587. return cx18_api(cx, cmd, args, data);
  588. }
  589. int cx18_vapi(struct cx18 *cx, u32 cmd, int args, ...)
  590. {
  591. u32 data[MAX_MB_ARGUMENTS];
  592. va_list ap;
  593. int i;
  594. if (cx == NULL) {
  595. CX18_ERR("cx == NULL (cmd=%x)\n", cmd);
  596. return 0;
  597. }
  598. if (args > MAX_MB_ARGUMENTS) {
  599. CX18_ERR("args too big (cmd=%x)\n", cmd);
  600. args = MAX_MB_ARGUMENTS;
  601. }
  602. va_start(ap, args);
  603. for (i = 0; i < args; i++)
  604. data[i] = va_arg(ap, u32);
  605. va_end(ap);
  606. return cx18_api(cx, cmd, args, data);
  607. }