|
@@ -35,8 +35,9 @@
|
|
|
#include <linux/slab.h>
|
|
|
|
|
|
#include "binder.h"
|
|
|
+#include "binder_trace.h"
|
|
|
|
|
|
-static DEFINE_MUTEX(binder_lock);
|
|
|
+static DEFINE_MUTEX(binder_main_lock);
|
|
|
static DEFINE_MUTEX(binder_deferred_lock);
|
|
|
static DEFINE_MUTEX(binder_mmap_lock);
|
|
|
|
|
@@ -411,6 +412,19 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
|
|
|
return retval;
|
|
|
}
|
|
|
|
|
|
+static inline void binder_lock(const char *tag)
|
|
|
+{
|
|
|
+ trace_binder_lock(tag);
|
|
|
+ mutex_lock(&binder_main_lock);
|
|
|
+ trace_binder_locked(tag);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void binder_unlock(const char *tag)
|
|
|
+{
|
|
|
+ trace_binder_unlock(tag);
|
|
|
+ mutex_unlock(&binder_main_lock);
|
|
|
+}
|
|
|
+
|
|
|
static void binder_set_nice(long nice)
|
|
|
{
|
|
|
long min_nice;
|
|
@@ -537,6 +551,8 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
|
|
|
if (end <= start)
|
|
|
return 0;
|
|
|
|
|
|
+ trace_binder_update_page_range(proc, allocate, start, end);
|
|
|
+
|
|
|
if (vma)
|
|
|
mm = NULL;
|
|
|
else
|
|
@@ -1461,6 +1477,9 @@ static void binder_transaction(struct binder_proc *proc,
|
|
|
t->code = tr->code;
|
|
|
t->flags = tr->flags;
|
|
|
t->priority = task_nice(current);
|
|
|
+
|
|
|
+ trace_binder_transaction(reply, t, target_node);
|
|
|
+
|
|
|
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
|
|
|
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
|
|
|
if (t->buffer == NULL) {
|
|
@@ -1471,6 +1490,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|
|
t->buffer->debug_id = t->debug_id;
|
|
|
t->buffer->transaction = t;
|
|
|
t->buffer->target_node = target_node;
|
|
|
+ trace_binder_transaction_alloc_buf(t->buffer);
|
|
|
if (target_node)
|
|
|
binder_inc_node(target_node, 1, 0, NULL);
|
|
|
|
|
@@ -1543,6 +1563,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|
|
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
|
|
|
&thread->todo);
|
|
|
|
|
|
+ trace_binder_transaction_node_to_ref(t, node, ref);
|
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
|
" node %d u%p -> ref %d desc %d\n",
|
|
|
node->debug_id, node->ptr, ref->debug_id,
|
|
@@ -1567,6 +1588,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|
|
fp->binder = ref->node->ptr;
|
|
|
fp->cookie = ref->node->cookie;
|
|
|
binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
|
|
|
+ trace_binder_transaction_ref_to_node(t, ref);
|
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
|
" ref %d desc %d -> node %d u%p\n",
|
|
|
ref->debug_id, ref->desc, ref->node->debug_id,
|
|
@@ -1580,6 +1602,8 @@ static void binder_transaction(struct binder_proc *proc,
|
|
|
}
|
|
|
fp->handle = new_ref->desc;
|
|
|
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
|
|
|
+ trace_binder_transaction_ref_to_ref(t, ref,
|
|
|
+ new_ref);
|
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
|
" ref %d desc %d -> ref %d desc %d (node %d)\n",
|
|
|
ref->debug_id, ref->desc, new_ref->debug_id,
|
|
@@ -1619,6 +1643,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|
|
goto err_get_unused_fd_failed;
|
|
|
}
|
|
|
task_fd_install(target_proc, target_fd, file);
|
|
|
+ trace_binder_transaction_fd(t, fp->handle, target_fd);
|
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
|
" fd %ld -> %d\n", fp->handle, target_fd);
|
|
|
/* TODO: fput? */
|
|
@@ -1667,6 +1692,7 @@ err_binder_new_node_failed:
|
|
|
err_bad_object_type:
|
|
|
err_bad_offset:
|
|
|
err_copy_data_failed:
|
|
|
+ trace_binder_transaction_failed_buffer_release(t->buffer);
|
|
|
binder_transaction_buffer_release(target_proc, t->buffer, offp);
|
|
|
t->buffer->transaction = NULL;
|
|
|
binder_free_buf(target_proc, t->buffer);
|
|
@@ -1712,6 +1738,7 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
|
|
|
if (get_user(cmd, (uint32_t __user *)ptr))
|
|
|
return -EFAULT;
|
|
|
ptr += sizeof(uint32_t);
|
|
|
+ trace_binder_command(cmd);
|
|
|
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
|
|
|
binder_stats.bc[_IOC_NR(cmd)]++;
|
|
|
proc->stats.bc[_IOC_NR(cmd)]++;
|
|
@@ -1881,6 +1908,7 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
|
|
|
else
|
|
|
list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
|
|
|
}
|
|
|
+ trace_binder_transaction_buffer_release(buffer);
|
|
|
binder_transaction_buffer_release(proc, buffer, NULL);
|
|
|
binder_free_buf(proc, buffer);
|
|
|
break;
|
|
@@ -2089,6 +2117,7 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
|
|
|
void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread,
|
|
|
uint32_t cmd)
|
|
|
{
|
|
|
+ trace_binder_return(cmd);
|
|
|
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
|
|
|
binder_stats.br[_IOC_NR(cmd)]++;
|
|
|
proc->stats.br[_IOC_NR(cmd)]++;
|
|
@@ -2152,7 +2181,12 @@ retry:
|
|
|
thread->looper |= BINDER_LOOPER_STATE_WAITING;
|
|
|
if (wait_for_proc_work)
|
|
|
proc->ready_threads++;
|
|
|
- mutex_unlock(&binder_lock);
|
|
|
+
|
|
|
+ binder_unlock(__func__);
|
|
|
+
|
|
|
+ trace_binder_wait_for_work(wait_for_proc_work,
|
|
|
+ !!thread->transaction_stack,
|
|
|
+ !list_empty(&thread->todo));
|
|
|
if (wait_for_proc_work) {
|
|
|
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
|
|
|
BINDER_LOOPER_STATE_ENTERED))) {
|
|
@@ -2176,7 +2210,9 @@ retry:
|
|
|
} else
|
|
|
ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
|
|
|
}
|
|
|
- mutex_lock(&binder_lock);
|
|
|
+
|
|
|
+ binder_lock(__func__);
|
|
|
+
|
|
|
if (wait_for_proc_work)
|
|
|
proc->ready_threads--;
|
|
|
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
|
|
@@ -2367,6 +2403,7 @@ retry:
|
|
|
return -EFAULT;
|
|
|
ptr += sizeof(tr);
|
|
|
|
|
|
+ trace_binder_transaction_received(t);
|
|
|
binder_stat_br(proc, thread, cmd);
|
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
|
"binder: %d:%d %s %d %d:%d, cmd %d"
|
|
@@ -2520,12 +2557,14 @@ static unsigned int binder_poll(struct file *filp,
|
|
|
struct binder_thread *thread = NULL;
|
|
|
int wait_for_proc_work;
|
|
|
|
|
|
- mutex_lock(&binder_lock);
|
|
|
+ binder_lock(__func__);
|
|
|
+
|
|
|
thread = binder_get_thread(proc);
|
|
|
|
|
|
wait_for_proc_work = thread->transaction_stack == NULL &&
|
|
|
list_empty(&thread->todo) && thread->return_error == BR_OK;
|
|
|
- mutex_unlock(&binder_lock);
|
|
|
+
|
|
|
+ binder_unlock(__func__);
|
|
|
|
|
|
if (wait_for_proc_work) {
|
|
|
if (binder_has_proc_work(proc, thread))
|
|
@@ -2553,11 +2592,13 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|
|
|
|
|
/*pr_info("binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/
|
|
|
|
|
|
+ trace_binder_ioctl(cmd, arg);
|
|
|
+
|
|
|
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
|
|
|
if (ret)
|
|
|
- return ret;
|
|
|
+ goto err_unlocked;
|
|
|
|
|
|
- mutex_lock(&binder_lock);
|
|
|
+ binder_lock(__func__);
|
|
|
thread = binder_get_thread(proc);
|
|
|
if (thread == NULL) {
|
|
|
ret = -ENOMEM;
|
|
@@ -2582,6 +2623,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|
|
|
|
|
if (bwr.write_size > 0) {
|
|
|
ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
|
|
|
+ trace_binder_write_done(ret);
|
|
|
if (ret < 0) {
|
|
|
bwr.read_consumed = 0;
|
|
|
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
|
|
@@ -2591,6 +2633,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|
|
}
|
|
|
if (bwr.read_size > 0) {
|
|
|
ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
|
|
|
+ trace_binder_read_done(ret);
|
|
|
if (!list_empty(&proc->todo))
|
|
|
wake_up_interruptible(&proc->wait);
|
|
|
if (ret < 0) {
|
|
@@ -2666,10 +2709,12 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|
|
err:
|
|
|
if (thread)
|
|
|
thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
|
|
|
- mutex_unlock(&binder_lock);
|
|
|
+ binder_unlock(__func__);
|
|
|
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
|
|
|
if (ret && ret != -ERESTARTSYS)
|
|
|
pr_info("binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
|
|
|
+err_unlocked:
|
|
|
+ trace_binder_ioctl_done(ret);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -2815,13 +2860,16 @@ static int binder_open(struct inode *nodp, struct file *filp)
|
|
|
INIT_LIST_HEAD(&proc->todo);
|
|
|
init_waitqueue_head(&proc->wait);
|
|
|
proc->default_priority = task_nice(current);
|
|
|
- mutex_lock(&binder_lock);
|
|
|
+
|
|
|
+ binder_lock(__func__);
|
|
|
+
|
|
|
binder_stats_created(BINDER_STAT_PROC);
|
|
|
hlist_add_head(&proc->proc_node, &binder_procs);
|
|
|
proc->pid = current->group_leader->pid;
|
|
|
INIT_LIST_HEAD(&proc->delivered_death);
|
|
|
filp->private_data = proc;
|
|
|
- mutex_unlock(&binder_lock);
|
|
|
+
|
|
|
+ binder_unlock(__func__);
|
|
|
|
|
|
if (binder_debugfs_dir_entry_proc) {
|
|
|
char strbuf[11];
|
|
@@ -3001,7 +3049,7 @@ static void binder_deferred_func(struct work_struct *work)
|
|
|
|
|
|
int defer;
|
|
|
do {
|
|
|
- mutex_lock(&binder_lock);
|
|
|
+ binder_lock(__func__);
|
|
|
mutex_lock(&binder_deferred_lock);
|
|
|
if (!hlist_empty(&binder_deferred_list)) {
|
|
|
proc = hlist_entry(binder_deferred_list.first,
|
|
@@ -3028,7 +3076,7 @@ static void binder_deferred_func(struct work_struct *work)
|
|
|
if (defer & BINDER_DEFERRED_RELEASE)
|
|
|
binder_deferred_release(proc); /* frees proc */
|
|
|
|
|
|
- mutex_unlock(&binder_lock);
|
|
|
+ binder_unlock(__func__);
|
|
|
if (files)
|
|
|
put_files_struct(files);
|
|
|
} while (proc);
|
|
@@ -3369,7 +3417,7 @@ static int binder_state_show(struct seq_file *m, void *unused)
|
|
|
int do_lock = !binder_debug_no_lock;
|
|
|
|
|
|
if (do_lock)
|
|
|
- mutex_lock(&binder_lock);
|
|
|
+ binder_lock(__func__);
|
|
|
|
|
|
seq_puts(m, "binder state:\n");
|
|
|
|
|
@@ -3381,7 +3429,7 @@ static int binder_state_show(struct seq_file *m, void *unused)
|
|
|
hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
|
|
|
print_binder_proc(m, proc, 1);
|
|
|
if (do_lock)
|
|
|
- mutex_unlock(&binder_lock);
|
|
|
+ binder_unlock(__func__);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -3392,7 +3440,7 @@ static int binder_stats_show(struct seq_file *m, void *unused)
|
|
|
int do_lock = !binder_debug_no_lock;
|
|
|
|
|
|
if (do_lock)
|
|
|
- mutex_lock(&binder_lock);
|
|
|
+ binder_lock(__func__);
|
|
|
|
|
|
seq_puts(m, "binder stats:\n");
|
|
|
|
|
@@ -3401,7 +3449,7 @@ static int binder_stats_show(struct seq_file *m, void *unused)
|
|
|
hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
|
|
|
print_binder_proc_stats(m, proc);
|
|
|
if (do_lock)
|
|
|
- mutex_unlock(&binder_lock);
|
|
|
+ binder_unlock(__func__);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -3412,13 +3460,13 @@ static int binder_transactions_show(struct seq_file *m, void *unused)
|
|
|
int do_lock = !binder_debug_no_lock;
|
|
|
|
|
|
if (do_lock)
|
|
|
- mutex_lock(&binder_lock);
|
|
|
+ binder_lock(__func__);
|
|
|
|
|
|
seq_puts(m, "binder transactions:\n");
|
|
|
hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
|
|
|
print_binder_proc(m, proc, 0);
|
|
|
if (do_lock)
|
|
|
- mutex_unlock(&binder_lock);
|
|
|
+ binder_unlock(__func__);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -3428,11 +3476,11 @@ static int binder_proc_show(struct seq_file *m, void *unused)
|
|
|
int do_lock = !binder_debug_no_lock;
|
|
|
|
|
|
if (do_lock)
|
|
|
- mutex_lock(&binder_lock);
|
|
|
+ binder_lock(__func__);
|
|
|
seq_puts(m, "binder proc state:\n");
|
|
|
print_binder_proc(m, proc, 1);
|
|
|
if (do_lock)
|
|
|
- mutex_unlock(&binder_lock);
|
|
|
+ binder_unlock(__func__);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -3527,4 +3575,7 @@ static int __init binder_init(void)
|
|
|
|
|
|
device_initcall(binder_init);
|
|
|
|
|
|
+#define CREATE_TRACE_POINTS
|
|
|
+#include "binder_trace.h"
|
|
|
+
|
|
|
MODULE_LICENSE("GPL v2");
|