|
@@ -131,7 +131,7 @@ static void dump_mb(struct cx18 *cx, struct cx18_mailbox *mb, char *name)
|
|
|
* Functions that run in a work_queue work handling context
|
|
|
*/
|
|
|
|
|
|
-static void epu_dma_done(struct cx18 *cx, struct cx18_epu_work_order *order)
|
|
|
+static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order)
|
|
|
{
|
|
|
u32 handle, mdl_ack_count, id;
|
|
|
struct cx18_mailbox *mb;
|
|
@@ -213,7 +213,7 @@ static void epu_dma_done(struct cx18 *cx, struct cx18_epu_work_order *order)
|
|
|
wake_up(&s->waitq);
|
|
|
}
|
|
|
|
|
|
-static void epu_debug(struct cx18 *cx, struct cx18_epu_work_order *order)
|
|
|
+static void epu_debug(struct cx18 *cx, struct cx18_in_work_order *order)
|
|
|
{
|
|
|
char *p;
|
|
|
char *str = order->str;
|
|
@@ -224,7 +224,7 @@ static void epu_debug(struct cx18 *cx, struct cx18_epu_work_order *order)
|
|
|
CX18_INFO("FW version: %s\n", p - 1);
|
|
|
}
|
|
|
|
|
|
-static void epu_cmd(struct cx18 *cx, struct cx18_epu_work_order *order)
|
|
|
+static void epu_cmd(struct cx18 *cx, struct cx18_in_work_order *order)
|
|
|
{
|
|
|
switch (order->rpu) {
|
|
|
case CPU:
|
|
@@ -253,18 +253,18 @@ static void epu_cmd(struct cx18 *cx, struct cx18_epu_work_order *order)
|
|
|
}
|
|
|
|
|
|
static
|
|
|
-void free_epu_work_order(struct cx18 *cx, struct cx18_epu_work_order *order)
|
|
|
+void free_in_work_order(struct cx18 *cx, struct cx18_in_work_order *order)
|
|
|
{
|
|
|
atomic_set(&order->pending, 0);
|
|
|
}
|
|
|
|
|
|
-void cx18_epu_work_handler(struct work_struct *work)
|
|
|
+void cx18_in_work_handler(struct work_struct *work)
|
|
|
{
|
|
|
- struct cx18_epu_work_order *order =
|
|
|
- container_of(work, struct cx18_epu_work_order, work);
|
|
|
+ struct cx18_in_work_order *order =
|
|
|
+ container_of(work, struct cx18_in_work_order, work);
|
|
|
struct cx18 *cx = order->cx;
|
|
|
epu_cmd(cx, order);
|
|
|
- free_epu_work_order(cx, order);
|
|
|
+ free_in_work_order(cx, order);
|
|
|
}
|
|
|
|
|
|
|
|
@@ -272,7 +272,7 @@ void cx18_epu_work_handler(struct work_struct *work)
|
|
|
* Functions that run in an interrupt handling context
|
|
|
*/
|
|
|
|
|
|
-static void mb_ack_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
|
|
|
+static void mb_ack_irq(struct cx18 *cx, struct cx18_in_work_order *order)
|
|
|
{
|
|
|
struct cx18_mailbox __iomem *ack_mb;
|
|
|
u32 ack_irq, req;
|
|
@@ -308,7 +308,7 @@ static void mb_ack_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
-static int epu_dma_done_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
|
|
|
+static int epu_dma_done_irq(struct cx18 *cx, struct cx18_in_work_order *order)
|
|
|
{
|
|
|
u32 handle, mdl_ack_offset, mdl_ack_count;
|
|
|
struct cx18_mailbox *mb;
|
|
@@ -334,7 +334,7 @@ static int epu_dma_done_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
|
|
|
}
|
|
|
|
|
|
static
|
|
|
-int epu_debug_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
|
|
|
+int epu_debug_irq(struct cx18 *cx, struct cx18_in_work_order *order)
|
|
|
{
|
|
|
u32 str_offset;
|
|
|
char *str = order->str;
|
|
@@ -355,7 +355,7 @@ int epu_debug_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
|
|
|
}
|
|
|
|
|
|
static inline
|
|
|
-int epu_cmd_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
|
|
|
+int epu_cmd_irq(struct cx18 *cx, struct cx18_in_work_order *order)
|
|
|
{
|
|
|
int ret = -1;
|
|
|
|
|
@@ -387,12 +387,12 @@ int epu_cmd_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
|
|
|
}
|
|
|
|
|
|
static inline
|
|
|
-struct cx18_epu_work_order *alloc_epu_work_order_irq(struct cx18 *cx)
|
|
|
+struct cx18_in_work_order *alloc_in_work_order_irq(struct cx18 *cx)
|
|
|
{
|
|
|
int i;
|
|
|
- struct cx18_epu_work_order *order = NULL;
|
|
|
+ struct cx18_in_work_order *order = NULL;
|
|
|
|
|
|
- for (i = 0; i < CX18_MAX_EPU_WORK_ORDERS; i++) {
|
|
|
+ for (i = 0; i < CX18_MAX_IN_WORK_ORDERS; i++) {
|
|
|
/*
|
|
|
* We only need "pending" atomic to inspect its contents,
|
|
|
* and need not do a check and set because:
|
|
@@ -401,8 +401,8 @@ struct cx18_epu_work_order *alloc_epu_work_order_irq(struct cx18 *cx)
|
|
|
* 2. "pending" is only set here, and we're serialized because
|
|
|
* we're called in an IRQ handler context.
|
|
|
*/
|
|
|
- if (atomic_read(&cx->epu_work_order[i].pending) == 0) {
|
|
|
- order = &cx->epu_work_order[i];
|
|
|
+ if (atomic_read(&cx->in_work_order[i].pending) == 0) {
|
|
|
+ order = &cx->in_work_order[i];
|
|
|
atomic_set(&order->pending, 1);
|
|
|
break;
|
|
|
}
|
|
@@ -414,7 +414,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
|
|
|
{
|
|
|
struct cx18_mailbox __iomem *mb;
|
|
|
struct cx18_mailbox *order_mb;
|
|
|
- struct cx18_epu_work_order *order;
|
|
|
+ struct cx18_in_work_order *order;
|
|
|
int submit;
|
|
|
|
|
|
switch (rpu) {
|
|
@@ -428,7 +428,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- order = alloc_epu_work_order_irq(cx);
|
|
|
+ order = alloc_in_work_order_irq(cx);
|
|
|
if (order == NULL) {
|
|
|
CX18_WARN("Unable to find blank work order form to schedule "
|
|
|
"incoming mailbox command processing\n");
|
|
@@ -461,7 +461,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
|
|
|
*/
|
|
|
submit = epu_cmd_irq(cx, order);
|
|
|
if (submit > 0) {
|
|
|
- queue_work(cx->work_queue, &order->work);
|
|
|
+ queue_work(cx->in_work_queue, &order->work);
|
|
|
}
|
|
|
}
|
|
|
|