|
@@ -55,49 +55,46 @@ int bridge_msg_create(struct msg_mgr **msg_man,
|
|
|
struct io_mgr *hio_mgr;
|
|
|
int status = 0;
|
|
|
|
|
|
- if (!msg_man || !msg_callback || !hdev_obj) {
|
|
|
- status = -EFAULT;
|
|
|
- goto func_end;
|
|
|
- }
|
|
|
+ if (!msg_man || !msg_callback || !hdev_obj)
|
|
|
+ return -EFAULT;
|
|
|
+
|
|
|
dev_get_io_mgr(hdev_obj, &hio_mgr);
|
|
|
- if (!hio_mgr) {
|
|
|
- status = -EFAULT;
|
|
|
- goto func_end;
|
|
|
- }
|
|
|
+ if (!hio_mgr)
|
|
|
+ return -EFAULT;
|
|
|
+
|
|
|
*msg_man = NULL;
|
|
|
/* Allocate msg_ctrl manager object */
|
|
|
msg_mgr_obj = kzalloc(sizeof(struct msg_mgr), GFP_KERNEL);
|
|
|
+ if (!msg_mgr_obj)
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
- if (msg_mgr_obj) {
|
|
|
- msg_mgr_obj->on_exit = msg_callback;
|
|
|
- msg_mgr_obj->hio_mgr = hio_mgr;
|
|
|
- /* List of MSG_QUEUEs */
|
|
|
- INIT_LIST_HEAD(&msg_mgr_obj->queue_list);
|
|
|
- /* Queues of message frames for messages to the DSP. Message
|
|
|
- * frames will only be added to the free queue when a
|
|
|
- * msg_queue object is created. */
|
|
|
- INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list);
|
|
|
- INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list);
|
|
|
- spin_lock_init(&msg_mgr_obj->msg_mgr_lock);
|
|
|
-
|
|
|
- /* Create an event to be used by bridge_msg_put() in waiting
|
|
|
- * for an available free frame from the message manager. */
|
|
|
- msg_mgr_obj->sync_event =
|
|
|
- kzalloc(sizeof(struct sync_object), GFP_KERNEL);
|
|
|
- if (!msg_mgr_obj->sync_event)
|
|
|
- status = -ENOMEM;
|
|
|
- else
|
|
|
- sync_init_event(msg_mgr_obj->sync_event);
|
|
|
-
|
|
|
- if (!status)
|
|
|
- *msg_man = msg_mgr_obj;
|
|
|
- else
|
|
|
- delete_msg_mgr(msg_mgr_obj);
|
|
|
-
|
|
|
- } else {
|
|
|
- status = -ENOMEM;
|
|
|
+ msg_mgr_obj->on_exit = msg_callback;
|
|
|
+ msg_mgr_obj->hio_mgr = hio_mgr;
|
|
|
+ /* List of MSG_QUEUEs */
|
|
|
+ INIT_LIST_HEAD(&msg_mgr_obj->queue_list);
|
|
|
+ /*
|
|
|
+ * Queues of message frames for messages to the DSP. Message
|
|
|
+ * frames will only be added to the free queue when a
|
|
|
+ * msg_queue object is created.
|
|
|
+ */
|
|
|
+ INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list);
|
|
|
+ INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list);
|
|
|
+ spin_lock_init(&msg_mgr_obj->msg_mgr_lock);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Create an event to be used by bridge_msg_put() in waiting
|
|
|
+ * for an available free frame from the message manager.
|
|
|
+ */
|
|
|
+ msg_mgr_obj->sync_event =
|
|
|
+ kzalloc(sizeof(struct sync_object), GFP_KERNEL);
|
|
|
+ if (!msg_mgr_obj->sync_event) {
|
|
|
+ kfree(msg_mgr_obj);
|
|
|
+ return -ENOMEM;
|
|
|
}
|
|
|
-func_end:
|
|
|
+ sync_init_event(msg_mgr_obj->sync_event);
|
|
|
+
|
|
|
+ *msg_man = msg_mgr_obj;
|
|
|
+
|
|
|
return status;
|
|
|
}
|
|
|
|
|
@@ -106,8 +103,7 @@ func_end:
|
|
|
* Create a msg_queue for sending/receiving messages to/from a node
|
|
|
* on the DSP.
|
|
|
*/
|
|
|
-int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
|
|
|
- struct msg_queue **msgq,
|
|
|
+int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr, struct msg_queue **msgq,
|
|
|
u32 msgq_id, u32 max_msgs, void *arg)
|
|
|
{
|
|
|
u32 i;
|
|
@@ -115,18 +111,15 @@ int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
|
|
|
struct msg_queue *msg_q;
|
|
|
int status = 0;
|
|
|
|
|
|
- if (!hmsg_mgr || msgq == NULL) {
|
|
|
- status = -EFAULT;
|
|
|
- goto func_end;
|
|
|
- }
|
|
|
+ if (!hmsg_mgr || msgq == NULL)
|
|
|
+ return -EFAULT;
|
|
|
|
|
|
*msgq = NULL;
|
|
|
/* Allocate msg_queue object */
|
|
|
msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL);
|
|
|
- if (!msg_q) {
|
|
|
- status = -ENOMEM;
|
|
|
- goto func_end;
|
|
|
- }
|
|
|
+ if (!msg_q)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
msg_q->max_msgs = max_msgs;
|
|
|
msg_q->hmsg_mgr = hmsg_mgr;
|
|
|
msg_q->arg = arg; /* Node handle */
|
|
@@ -137,78 +130,68 @@ int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
|
|
|
|
|
|
/* Create event that will be signalled when a message from
|
|
|
* the DSP is available. */
|
|
|
- if (!status) {
|
|
|
- msg_q->sync_event = kzalloc(sizeof(struct sync_object),
|
|
|
- GFP_KERNEL);
|
|
|
- if (msg_q->sync_event)
|
|
|
- sync_init_event(msg_q->sync_event);
|
|
|
- else
|
|
|
- status = -ENOMEM;
|
|
|
+ msg_q->sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
|
|
|
+ if (!msg_q->sync_event) {
|
|
|
+ status = -ENOMEM;
|
|
|
+ goto out_err;
|
|
|
+
|
|
|
}
|
|
|
+ sync_init_event(msg_q->sync_event);
|
|
|
|
|
|
/* Create a notification list for message ready notification. */
|
|
|
- if (!status) {
|
|
|
- msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
|
|
|
- GFP_KERNEL);
|
|
|
- if (msg_q->ntfy_obj)
|
|
|
- ntfy_init(msg_q->ntfy_obj);
|
|
|
- else
|
|
|
- status = -ENOMEM;
|
|
|
+ msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL);
|
|
|
+ if (!msg_q->ntfy_obj) {
|
|
|
+ status = -ENOMEM;
|
|
|
+ goto out_err;
|
|
|
}
|
|
|
+ ntfy_init(msg_q->ntfy_obj);
|
|
|
|
|
|
/* Create events that will be used to synchronize cleanup
|
|
|
* when the object is deleted. sync_done will be set to
|
|
|
* unblock threads in MSG_Put() or MSG_Get(). sync_done_ack
|
|
|
* will be set by the unblocked thread to signal that it
|
|
|
* is unblocked and will no longer reference the object. */
|
|
|
- if (!status) {
|
|
|
- msg_q->sync_done = kzalloc(sizeof(struct sync_object),
|
|
|
- GFP_KERNEL);
|
|
|
- if (msg_q->sync_done)
|
|
|
- sync_init_event(msg_q->sync_done);
|
|
|
- else
|
|
|
- status = -ENOMEM;
|
|
|
+ msg_q->sync_done = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
|
|
|
+ if (!msg_q->sync_done) {
|
|
|
+ status = -ENOMEM;
|
|
|
+ goto out_err;
|
|
|
}
|
|
|
+ sync_init_event(msg_q->sync_done);
|
|
|
|
|
|
- if (!status) {
|
|
|
- msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object),
|
|
|
- GFP_KERNEL);
|
|
|
- if (msg_q->sync_done_ack)
|
|
|
- sync_init_event(msg_q->sync_done_ack);
|
|
|
- else
|
|
|
- status = -ENOMEM;
|
|
|
+ msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
|
|
|
+ if (!msg_q->sync_done_ack) {
|
|
|
+ status = -ENOMEM;
|
|
|
+ goto out_err;
|
|
|
}
|
|
|
+ sync_init_event(msg_q->sync_done_ack);
|
|
|
|
|
|
- if (!status) {
|
|
|
- /* Enter critical section */
|
|
|
- spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
- /* Initialize message frames and put in appropriate queues */
|
|
|
- for (i = 0; i < max_msgs && !status; i++) {
|
|
|
- status = add_new_msg(&hmsg_mgr->msg_free_list);
|
|
|
- if (!status) {
|
|
|
- num_allocated++;
|
|
|
- status = add_new_msg(&msg_q->msg_free_list);
|
|
|
- }
|
|
|
- }
|
|
|
- if (status) {
|
|
|
- /* Stay inside CS to prevent others from taking any
|
|
|
- * of the newly allocated message frames. */
|
|
|
- delete_msg_queue(msg_q, num_allocated);
|
|
|
- } else {
|
|
|
- list_add_tail(&msg_q->list_elem,
|
|
|
- &hmsg_mgr->queue_list);
|
|
|
- *msgq = msg_q;
|
|
|
- /* Signal that free frames are now available */
|
|
|
- if (!list_empty(&hmsg_mgr->msg_free_list))
|
|
|
- sync_set_event(hmsg_mgr->sync_event);
|
|
|
-
|
|
|
+ /* Enter critical section */
|
|
|
+ spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
+ /* Initialize message frames and put in appropriate queues */
|
|
|
+ for (i = 0; i < max_msgs && !status; i++) {
|
|
|
+ status = add_new_msg(&hmsg_mgr->msg_free_list);
|
|
|
+ if (!status) {
|
|
|
+ num_allocated++;
|
|
|
+ status = add_new_msg(&msg_q->msg_free_list);
|
|
|
}
|
|
|
- /* Exit critical section */
|
|
|
+ }
|
|
|
+ if (status) {
|
|
|
spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
- } else {
|
|
|
- delete_msg_queue(msg_q, 0);
|
|
|
+ goto out_err;
|
|
|
}
|
|
|
-func_end:
|
|
|
+
|
|
|
+ list_add_tail(&msg_q->list_elem, &hmsg_mgr->queue_list);
|
|
|
+ *msgq = msg_q;
|
|
|
+ /* Signal that free frames are now available */
|
|
|
+ if (!list_empty(&hmsg_mgr->msg_free_list))
|
|
|
+ sync_set_event(hmsg_mgr->sync_event);
|
|
|
+
|
|
|
+ /* Exit critical section */
|
|
|
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+out_err:
|
|
|
+ delete_msg_queue(msg_q, num_allocated);
|
|
|
return status;
|
|
|
}
|
|
|
|
|
@@ -232,7 +215,7 @@ void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj)
|
|
|
u32 io_msg_pend;
|
|
|
|
|
|
if (!msg_queue_obj || !msg_queue_obj->hmsg_mgr)
|
|
|
- goto func_end;
|
|
|
+ return;
|
|
|
|
|
|
hmsg_mgr = msg_queue_obj->hmsg_mgr;
|
|
|
msg_queue_obj->done = true;
|
|
@@ -252,10 +235,7 @@ void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj)
|
|
|
delete_msg_queue(msg_queue_obj, msg_queue_obj->max_msgs);
|
|
|
if (list_empty(&hmsg_mgr->msg_free_list))
|
|
|
sync_reset_event(hmsg_mgr->sync_event);
|
|
|
-
|
|
|
spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
-func_end:
|
|
|
- return;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -267,19 +247,15 @@ int bridge_msg_get(struct msg_queue *msg_queue_obj,
|
|
|
{
|
|
|
struct msg_frame *msg_frame_obj;
|
|
|
struct msg_mgr *hmsg_mgr;
|
|
|
- bool got_msg = false;
|
|
|
struct sync_object *syncs[2];
|
|
|
u32 index;
|
|
|
int status = 0;
|
|
|
|
|
|
- if (!msg_queue_obj || pmsg == NULL) {
|
|
|
- status = -ENOMEM;
|
|
|
- goto func_end;
|
|
|
- }
|
|
|
+ if (!msg_queue_obj || pmsg == NULL)
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
hmsg_mgr = msg_queue_obj->hmsg_mgr;
|
|
|
|
|
|
- /* Enter critical section */
|
|
|
spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
/* If a message is already there, get it */
|
|
|
if (!list_empty(&msg_queue_obj->msg_used_list)) {
|
|
@@ -291,59 +267,54 @@ int bridge_msg_get(struct msg_queue *msg_queue_obj,
|
|
|
&msg_queue_obj->msg_free_list);
|
|
|
if (list_empty(&msg_queue_obj->msg_used_list))
|
|
|
sync_reset_event(msg_queue_obj->sync_event);
|
|
|
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
|
|
|
- got_msg = true;
|
|
|
- } else {
|
|
|
- if (msg_queue_obj->done)
|
|
|
- status = -EPERM;
|
|
|
- else
|
|
|
- msg_queue_obj->io_msg_pend++;
|
|
|
-
|
|
|
+ if (msg_queue_obj->done) {
|
|
|
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
+ return -EPERM;
|
|
|
}
|
|
|
- /* Exit critical section */
|
|
|
+ msg_queue_obj->io_msg_pend++;
|
|
|
spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
- if (!status && !got_msg) {
|
|
|
- /* Wait til message is available, timeout, or done. We don't
|
|
|
- * have to schedule the DPC, since the DSP will send messages
|
|
|
- * when they are available. */
|
|
|
- syncs[0] = msg_queue_obj->sync_event;
|
|
|
- syncs[1] = msg_queue_obj->sync_done;
|
|
|
- status = sync_wait_on_multiple_events(syncs, 2, utimeout,
|
|
|
- &index);
|
|
|
- /* Enter critical section */
|
|
|
- spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
- if (msg_queue_obj->done) {
|
|
|
- msg_queue_obj->io_msg_pend--;
|
|
|
- /* Exit critical section */
|
|
|
- spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
- /* Signal that we're not going to access msg_queue_obj
|
|
|
- * anymore, so it can be deleted. */
|
|
|
- (void)sync_set_event(msg_queue_obj->sync_done_ack);
|
|
|
- status = -EPERM;
|
|
|
- } else {
|
|
|
- if (!status && !list_empty(&msg_queue_obj->
|
|
|
- msg_used_list)) {
|
|
|
- /* Get msg from used list */
|
|
|
- msg_frame_obj = list_first_entry(
|
|
|
- &msg_queue_obj->msg_used_list,
|
|
|
- struct msg_frame, list_elem);
|
|
|
- list_del(&msg_frame_obj->list_elem);
|
|
|
- /* Copy message into pmsg and put frame on the
|
|
|
- * free list */
|
|
|
- *pmsg = msg_frame_obj->msg_data.msg;
|
|
|
- list_add_tail(&msg_frame_obj->list_elem,
|
|
|
- &msg_queue_obj->msg_free_list);
|
|
|
- }
|
|
|
- msg_queue_obj->io_msg_pend--;
|
|
|
- /* Reset the event if there are still queued messages */
|
|
|
- if (!list_empty(&msg_queue_obj->msg_used_list))
|
|
|
- sync_set_event(msg_queue_obj->sync_event);
|
|
|
-
|
|
|
- /* Exit critical section */
|
|
|
- spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
- }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Wait til message is available, timeout, or done. We don't
|
|
|
+ * have to schedule the DPC, since the DSP will send messages
|
|
|
+ * when they are available.
|
|
|
+ */
|
|
|
+ syncs[0] = msg_queue_obj->sync_event;
|
|
|
+ syncs[1] = msg_queue_obj->sync_done;
|
|
|
+ status = sync_wait_on_multiple_events(syncs, 2, utimeout, &index);
|
|
|
+
|
|
|
+ spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
+ if (msg_queue_obj->done) {
|
|
|
+ msg_queue_obj->io_msg_pend--;
|
|
|
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
+ /*
|
|
|
+ * Signal that we're not going to access msg_queue_obj
|
|
|
+ * anymore, so it can be deleted.
|
|
|
+ */
|
|
|
+ sync_set_event(msg_queue_obj->sync_done_ack);
|
|
|
+ return -EPERM;
|
|
|
}
|
|
|
-func_end:
|
|
|
+ if (!status && !list_empty(&msg_queue_obj->msg_used_list)) {
|
|
|
+ /* Get msg from used list */
|
|
|
+ msg_frame_obj = list_first_entry(&msg_queue_obj->msg_used_list,
|
|
|
+ struct msg_frame, list_elem);
|
|
|
+ list_del(&msg_frame_obj->list_elem);
|
|
|
+ /* Copy message into pmsg and put frame on the free list */
|
|
|
+ *pmsg = msg_frame_obj->msg_data.msg;
|
|
|
+ list_add_tail(&msg_frame_obj->list_elem,
|
|
|
+ &msg_queue_obj->msg_free_list);
|
|
|
+ }
|
|
|
+ msg_queue_obj->io_msg_pend--;
|
|
|
+ /* Reset the event if there are still queued messages */
|
|
|
+ if (!list_empty(&msg_queue_obj->msg_used_list))
|
|
|
+ sync_set_event(msg_queue_obj->sync_event);
|
|
|
+
|
|
|
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
+
|
|
|
return status;
|
|
|
}
|
|
|
|
|
@@ -356,15 +327,13 @@ int bridge_msg_put(struct msg_queue *msg_queue_obj,
|
|
|
{
|
|
|
struct msg_frame *msg_frame_obj;
|
|
|
struct msg_mgr *hmsg_mgr;
|
|
|
- bool put_msg = false;
|
|
|
struct sync_object *syncs[2];
|
|
|
u32 index;
|
|
|
- int status = 0;
|
|
|
+ int status;
|
|
|
+
|
|
|
+ if (!msg_queue_obj || !pmsg || !msg_queue_obj->hmsg_mgr)
|
|
|
+ return -EFAULT;
|
|
|
|
|
|
- if (!msg_queue_obj || !pmsg || !msg_queue_obj->hmsg_mgr) {
|
|
|
- status = -ENOMEM;
|
|
|
- goto func_end;
|
|
|
- }
|
|
|
hmsg_mgr = msg_queue_obj->hmsg_mgr;
|
|
|
|
|
|
spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
@@ -380,7 +349,7 @@ int bridge_msg_put(struct msg_queue *msg_queue_obj,
|
|
|
list_add_tail(&msg_frame_obj->list_elem,
|
|
|
&hmsg_mgr->msg_used_list);
|
|
|
hmsg_mgr->msgs_pending++;
|
|
|
- put_msg = true;
|
|
|
+
|
|
|
if (list_empty(&hmsg_mgr->msg_free_list))
|
|
|
sync_reset_event(hmsg_mgr->sync_event);
|
|
|
|
|
@@ -388,70 +357,70 @@ int bridge_msg_put(struct msg_queue *msg_queue_obj,
|
|
|
spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
/* Schedule a DPC, to do the actual data transfer: */
|
|
|
iosm_schedule(hmsg_mgr->hio_mgr);
|
|
|
- } else {
|
|
|
- if (msg_queue_obj->done)
|
|
|
- status = -EPERM;
|
|
|
- else
|
|
|
- msg_queue_obj->io_msg_pend++;
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
|
|
|
+ if (msg_queue_obj->done) {
|
|
|
spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
+ return -EPERM;
|
|
|
}
|
|
|
- if (!status && !put_msg) {
|
|
|
- /* Wait til a free message frame is available, timeout,
|
|
|
- * or done */
|
|
|
- syncs[0] = hmsg_mgr->sync_event;
|
|
|
- syncs[1] = msg_queue_obj->sync_done;
|
|
|
- status = sync_wait_on_multiple_events(syncs, 2, utimeout,
|
|
|
- &index);
|
|
|
- if (status)
|
|
|
- goto func_end;
|
|
|
- /* Enter critical section */
|
|
|
- spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
- if (msg_queue_obj->done) {
|
|
|
- msg_queue_obj->io_msg_pend--;
|
|
|
- /* Exit critical section */
|
|
|
- spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
- /* Signal that we're not going to access msg_queue_obj
|
|
|
- * anymore, so it can be deleted. */
|
|
|
- (void)sync_set_event(msg_queue_obj->sync_done_ack);
|
|
|
- status = -EPERM;
|
|
|
- } else {
|
|
|
- if (list_empty(&hmsg_mgr->msg_free_list)) {
|
|
|
- status = -EFAULT;
|
|
|
- goto func_cont;
|
|
|
- }
|
|
|
- /* Get msg from free list */
|
|
|
- msg_frame_obj = list_first_entry(
|
|
|
- &hmsg_mgr->msg_free_list,
|
|
|
- struct msg_frame, list_elem);
|
|
|
- list_del(&msg_frame_obj->list_elem);
|
|
|
- /*
|
|
|
- * Copy message into pmsg and put frame on the
|
|
|
- * used list.
|
|
|
- */
|
|
|
- msg_frame_obj->msg_data.msg = *pmsg;
|
|
|
- msg_frame_obj->msg_data.msgq_id =
|
|
|
- msg_queue_obj->msgq_id;
|
|
|
- list_add_tail(&msg_frame_obj->list_elem,
|
|
|
- &hmsg_mgr->msg_used_list);
|
|
|
- hmsg_mgr->msgs_pending++;
|
|
|
- /*
|
|
|
- * Schedule a DPC, to do the actual
|
|
|
- * data transfer.
|
|
|
- */
|
|
|
- iosm_schedule(hmsg_mgr->hio_mgr);
|
|
|
-
|
|
|
- msg_queue_obj->io_msg_pend--;
|
|
|
- /* Reset event if there are still frames available */
|
|
|
- if (!list_empty(&hmsg_mgr->msg_free_list))
|
|
|
- sync_set_event(hmsg_mgr->sync_event);
|
|
|
-func_cont:
|
|
|
- /* Exit critical section */
|
|
|
- spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
- }
|
|
|
+ msg_queue_obj->io_msg_pend++;
|
|
|
+
|
|
|
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
+
|
|
|
+ /* Wait til a free message frame is available, timeout, or done */
|
|
|
+ syncs[0] = hmsg_mgr->sync_event;
|
|
|
+ syncs[1] = msg_queue_obj->sync_done;
|
|
|
+ status = sync_wait_on_multiple_events(syncs, 2, utimeout, &index);
|
|
|
+ if (status)
|
|
|
+ return status;
|
|
|
+
|
|
|
+ /* Enter critical section */
|
|
|
+ spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
+ if (msg_queue_obj->done) {
|
|
|
+ msg_queue_obj->io_msg_pend--;
|
|
|
+ /* Exit critical section */
|
|
|
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
+ /*
|
|
|
+ * Signal that we're not going to access msg_queue_obj
|
|
|
+ * anymore, so it can be deleted.
|
|
|
+ */
|
|
|
+ sync_set_event(msg_queue_obj->sync_done_ack);
|
|
|
+ return -EPERM;
|
|
|
}
|
|
|
-func_end:
|
|
|
- return status;
|
|
|
+
|
|
|
+ if (list_empty(&hmsg_mgr->msg_free_list)) {
|
|
|
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
+ return -EFAULT;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Get msg from free list */
|
|
|
+ msg_frame_obj = list_first_entry(&hmsg_mgr->msg_free_list,
|
|
|
+ struct msg_frame, list_elem);
|
|
|
+ /*
|
|
|
+ * Copy message into pmsg and put frame on the
|
|
|
+ * used list.
|
|
|
+ */
|
|
|
+ list_del(&msg_frame_obj->list_elem);
|
|
|
+ msg_frame_obj->msg_data.msg = *pmsg;
|
|
|
+ msg_frame_obj->msg_data.msgq_id = msg_queue_obj->msgq_id;
|
|
|
+ list_add_tail(&msg_frame_obj->list_elem, &hmsg_mgr->msg_used_list);
|
|
|
+ hmsg_mgr->msgs_pending++;
|
|
|
+ /*
|
|
|
+ * Schedule a DPC, to do the actual
|
|
|
+ * data transfer.
|
|
|
+ */
|
|
|
+ iosm_schedule(hmsg_mgr->hio_mgr);
|
|
|
+
|
|
|
+ msg_queue_obj->io_msg_pend--;
|
|
|
+ /* Reset event if there are still frames available */
|
|
|
+ if (!list_empty(&hmsg_mgr->msg_free_list))
|
|
|
+ sync_set_event(hmsg_mgr->sync_event);
|
|
|
+
|
|
|
+ /* Exit critical section */
|
|
|
+ spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -518,16 +487,14 @@ void bridge_msg_set_queue_id(struct msg_queue *msg_queue_obj, u32 msgq_id)
|
|
|
static int add_new_msg(struct list_head *msg_list)
|
|
|
{
|
|
|
struct msg_frame *pmsg;
|
|
|
- int status = 0;
|
|
|
|
|
|
pmsg = kzalloc(sizeof(struct msg_frame), GFP_ATOMIC);
|
|
|
- if (pmsg != NULL) {
|
|
|
- list_add_tail(&pmsg->list_elem, msg_list);
|
|
|
- } else {
|
|
|
- status = -ENOMEM;
|
|
|
- }
|
|
|
+ if (!pmsg)
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
- return status;
|
|
|
+ list_add_tail(&pmsg->list_elem, msg_list);
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -536,17 +503,13 @@ static int add_new_msg(struct list_head *msg_list)
|
|
|
static void delete_msg_mgr(struct msg_mgr *hmsg_mgr)
|
|
|
{
|
|
|
if (!hmsg_mgr)
|
|
|
- goto func_end;
|
|
|
+ return;
|
|
|
|
|
|
/* FIXME: free elements from queue_list? */
|
|
|
free_msg_list(&hmsg_mgr->msg_free_list);
|
|
|
free_msg_list(&hmsg_mgr->msg_used_list);
|
|
|
-
|
|
|
kfree(hmsg_mgr->sync_event);
|
|
|
-
|
|
|
kfree(hmsg_mgr);
|
|
|
-func_end:
|
|
|
- return;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -559,7 +522,7 @@ static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp)
|
|
|
u32 i;
|
|
|
|
|
|
if (!msg_queue_obj || !msg_queue_obj->hmsg_mgr)
|
|
|
- goto func_end;
|
|
|
+ return;
|
|
|
|
|
|
hmsg_mgr = msg_queue_obj->hmsg_mgr;
|
|
|
|
|
@@ -586,9 +549,6 @@ static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp)
|
|
|
kfree(msg_queue_obj->sync_done_ack);
|
|
|
|
|
|
kfree(msg_queue_obj);
|
|
|
-func_end:
|
|
|
- return;
|
|
|
-
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -599,13 +559,10 @@ static void free_msg_list(struct list_head *msg_list)
|
|
|
struct msg_frame *pmsg, *tmp;
|
|
|
|
|
|
if (!msg_list)
|
|
|
- goto func_end;
|
|
|
+ return;
|
|
|
|
|
|
list_for_each_entry_safe(pmsg, tmp, msg_list, list_elem) {
|
|
|
list_del(&pmsg->list_elem);
|
|
|
kfree(pmsg);
|
|
|
}
|
|
|
-
|
|
|
-func_end:
|
|
|
- return;
|
|
|
}
|