|
@@ -158,14 +158,14 @@
|
|
*/
|
|
*/
|
|
#define PROTECT_CTX(c, f) \
|
|
#define PROTECT_CTX(c, f) \
|
|
do { \
|
|
do { \
|
|
- DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, current->pid)); \
|
|
|
|
|
|
+ DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
|
|
spin_lock_irqsave(&(c)->ctx_lock, f); \
|
|
spin_lock_irqsave(&(c)->ctx_lock, f); \
|
|
- DPRINT(("spinlocked ctx %p by [%d]\n", c, current->pid)); \
|
|
|
|
|
|
+ DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
|
|
} while(0)
|
|
} while(0)
|
|
|
|
|
|
#define UNPROTECT_CTX(c, f) \
|
|
#define UNPROTECT_CTX(c, f) \
|
|
do { \
|
|
do { \
|
|
- DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, current->pid)); \
|
|
|
|
|
|
+ DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
|
|
spin_unlock_irqrestore(&(c)->ctx_lock, f); \
|
|
spin_unlock_irqrestore(&(c)->ctx_lock, f); \
|
|
} while(0)
|
|
} while(0)
|
|
|
|
|
|
@@ -227,12 +227,12 @@
|
|
#ifdef PFM_DEBUGGING
|
|
#ifdef PFM_DEBUGGING
|
|
#define DPRINT(a) \
|
|
#define DPRINT(a) \
|
|
do { \
|
|
do { \
|
|
- if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \
|
|
|
|
|
|
+ if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
|
|
} while (0)
|
|
} while (0)
|
|
|
|
|
|
#define DPRINT_ovfl(a) \
|
|
#define DPRINT_ovfl(a) \
|
|
do { \
|
|
do { \
|
|
- if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \
|
|
|
|
|
|
+ if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
|
|
} while (0)
|
|
} while (0)
|
|
#endif
|
|
#endif
|
|
|
|
|
|
@@ -913,7 +913,7 @@ pfm_mask_monitoring(struct task_struct *task)
|
|
unsigned long mask, val, ovfl_mask;
|
|
unsigned long mask, val, ovfl_mask;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
- DPRINT_ovfl(("masking monitoring for [%d]\n", task->pid));
|
|
|
|
|
|
+ DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task)));
|
|
|
|
|
|
ovfl_mask = pmu_conf->ovfl_val;
|
|
ovfl_mask = pmu_conf->ovfl_val;
|
|
/*
|
|
/*
|
|
@@ -992,12 +992,12 @@ pfm_restore_monitoring(struct task_struct *task)
|
|
ovfl_mask = pmu_conf->ovfl_val;
|
|
ovfl_mask = pmu_conf->ovfl_val;
|
|
|
|
|
|
if (task != current) {
|
|
if (task != current) {
|
|
- printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task->pid, current->pid);
|
|
|
|
|
|
+ printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current));
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
if (ctx->ctx_state != PFM_CTX_MASKED) {
|
|
if (ctx->ctx_state != PFM_CTX_MASKED) {
|
|
printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
|
|
printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
|
|
- task->pid, current->pid, ctx->ctx_state);
|
|
|
|
|
|
+ task_pid_nr(task), task_pid_nr(current), ctx->ctx_state);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
psr = pfm_get_psr();
|
|
psr = pfm_get_psr();
|
|
@@ -1051,7 +1051,8 @@ pfm_restore_monitoring(struct task_struct *task)
|
|
if ((mask & 0x1) == 0UL) continue;
|
|
if ((mask & 0x1) == 0UL) continue;
|
|
ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
|
|
ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
|
|
ia64_set_pmc(i, ctx->th_pmcs[i]);
|
|
ia64_set_pmc(i, ctx->th_pmcs[i]);
|
|
- DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, ctx->th_pmcs[i]));
|
|
|
|
|
|
+ DPRINT(("[%d] pmc[%d]=0x%lx\n",
|
|
|
|
+ task_pid_nr(task), i, ctx->th_pmcs[i]));
|
|
}
|
|
}
|
|
ia64_srlz_d();
|
|
ia64_srlz_d();
|
|
|
|
|
|
@@ -1370,7 +1371,7 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
|
|
|
|
|
|
error_conflict:
|
|
error_conflict:
|
|
DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
|
|
DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
|
|
- pfm_sessions.pfs_sys_session[cpu]->pid,
|
|
|
|
|
|
+ task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
|
|
cpu));
|
|
cpu));
|
|
abort:
|
|
abort:
|
|
UNLOCK_PFS(flags);
|
|
UNLOCK_PFS(flags);
|
|
@@ -1442,7 +1443,7 @@ pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long siz
|
|
|
|
|
|
/* sanity checks */
|
|
/* sanity checks */
|
|
if (task->mm == NULL || size == 0UL || vaddr == NULL) {
|
|
if (task->mm == NULL || size == 0UL || vaddr == NULL) {
|
|
- printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task->pid, task->mm);
|
|
|
|
|
|
+ printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm);
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1459,7 +1460,7 @@ pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long siz
|
|
|
|
|
|
up_write(&task->mm->mmap_sem);
|
|
up_write(&task->mm->mmap_sem);
|
|
if (r !=0) {
|
|
if (r !=0) {
|
|
- printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task->pid, vaddr, size);
|
|
|
|
|
|
+ printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
|
|
}
|
|
}
|
|
|
|
|
|
DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
|
|
DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
|
|
@@ -1501,7 +1502,7 @@ pfm_free_smpl_buffer(pfm_context_t *ctx)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
invalid_free:
|
|
invalid_free:
|
|
- printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", current->pid);
|
|
|
|
|
|
+ printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current));
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
@@ -1547,13 +1548,13 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
DECLARE_WAITQUEUE(wait, current);
|
|
DECLARE_WAITQUEUE(wait, current);
|
|
if (PFM_IS_FILE(filp) == 0) {
|
|
if (PFM_IS_FILE(filp) == 0) {
|
|
- printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid);
|
|
|
|
|
|
+ printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
ctx = (pfm_context_t *)filp->private_data;
|
|
ctx = (pfm_context_t *)filp->private_data;
|
|
if (ctx == NULL) {
|
|
if (ctx == NULL) {
|
|
- printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", current->pid);
|
|
|
|
|
|
+ printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1607,7 +1608,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
|
|
|
|
|
|
PROTECT_CTX(ctx, flags);
|
|
PROTECT_CTX(ctx, flags);
|
|
}
|
|
}
|
|
- DPRINT(("[%d] back to running ret=%ld\n", current->pid, ret));
|
|
|
|
|
|
+ DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret));
|
|
set_current_state(TASK_RUNNING);
|
|
set_current_state(TASK_RUNNING);
|
|
remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
|
|
remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
|
|
|
|
|
|
@@ -1616,7 +1617,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
|
|
ret = -EINVAL;
|
|
ret = -EINVAL;
|
|
msg = pfm_get_next_msg(ctx);
|
|
msg = pfm_get_next_msg(ctx);
|
|
if (msg == NULL) {
|
|
if (msg == NULL) {
|
|
- printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, current->pid);
|
|
|
|
|
|
+ printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current));
|
|
goto abort_locked;
|
|
goto abort_locked;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1647,13 +1648,13 @@ pfm_poll(struct file *filp, poll_table * wait)
|
|
unsigned int mask = 0;
|
|
unsigned int mask = 0;
|
|
|
|
|
|
if (PFM_IS_FILE(filp) == 0) {
|
|
if (PFM_IS_FILE(filp) == 0) {
|
|
- printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid);
|
|
|
|
|
|
+ printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
ctx = (pfm_context_t *)filp->private_data;
|
|
ctx = (pfm_context_t *)filp->private_data;
|
|
if (ctx == NULL) {
|
|
if (ctx == NULL) {
|
|
- printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", current->pid);
|
|
|
|
|
|
+ printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1692,7 +1693,7 @@ pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
|
|
ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
|
|
ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
|
|
|
|
|
|
DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
|
|
DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
|
|
- current->pid,
|
|
|
|
|
|
+ task_pid_nr(current),
|
|
fd,
|
|
fd,
|
|
on,
|
|
on,
|
|
ctx->ctx_async_queue, ret));
|
|
ctx->ctx_async_queue, ret));
|
|
@@ -1707,13 +1708,13 @@ pfm_fasync(int fd, struct file *filp, int on)
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
if (PFM_IS_FILE(filp) == 0) {
|
|
if (PFM_IS_FILE(filp) == 0) {
|
|
- printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", current->pid);
|
|
|
|
|
|
+ printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current));
|
|
return -EBADF;
|
|
return -EBADF;
|
|
}
|
|
}
|
|
|
|
|
|
ctx = (pfm_context_t *)filp->private_data;
|
|
ctx = (pfm_context_t *)filp->private_data;
|
|
if (ctx == NULL) {
|
|
if (ctx == NULL) {
|
|
- printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", current->pid);
|
|
|
|
|
|
+ printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
|
|
return -EBADF;
|
|
return -EBADF;
|
|
}
|
|
}
|
|
/*
|
|
/*
|
|
@@ -1759,7 +1760,7 @@ pfm_syswide_force_stop(void *info)
|
|
if (owner != ctx->ctx_task) {
|
|
if (owner != ctx->ctx_task) {
|
|
printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
|
|
printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
|
|
smp_processor_id(),
|
|
smp_processor_id(),
|
|
- owner->pid, ctx->ctx_task->pid);
|
|
|
|
|
|
+ task_pid_nr(owner), task_pid_nr(ctx->ctx_task));
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
if (GET_PMU_CTX() != ctx) {
|
|
if (GET_PMU_CTX() != ctx) {
|
|
@@ -1769,7 +1770,7 @@ pfm_syswide_force_stop(void *info)
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), ctx->ctx_task->pid));
|
|
|
|
|
|
+ DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task)));
|
|
/*
|
|
/*
|
|
* the context is already protected in pfm_close(), we simply
|
|
* the context is already protected in pfm_close(), we simply
|
|
* need to mask interrupts to avoid a PMU interrupt race on
|
|
* need to mask interrupts to avoid a PMU interrupt race on
|
|
@@ -1821,7 +1822,7 @@ pfm_flush(struct file *filp, fl_owner_t id)
|
|
|
|
|
|
ctx = (pfm_context_t *)filp->private_data;
|
|
ctx = (pfm_context_t *)filp->private_data;
|
|
if (ctx == NULL) {
|
|
if (ctx == NULL) {
|
|
- printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", current->pid);
|
|
|
|
|
|
+ printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
|
|
return -EBADF;
|
|
return -EBADF;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1969,7 +1970,7 @@ pfm_close(struct inode *inode, struct file *filp)
|
|
|
|
|
|
ctx = (pfm_context_t *)filp->private_data;
|
|
ctx = (pfm_context_t *)filp->private_data;
|
|
if (ctx == NULL) {
|
|
if (ctx == NULL) {
|
|
- printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", current->pid);
|
|
|
|
|
|
+ printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
|
|
return -EBADF;
|
|
return -EBADF;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2066,7 +2067,7 @@ pfm_close(struct inode *inode, struct file *filp)
|
|
*/
|
|
*/
|
|
ctx->ctx_state = PFM_CTX_ZOMBIE;
|
|
ctx->ctx_state = PFM_CTX_ZOMBIE;
|
|
|
|
|
|
- DPRINT(("zombie ctx for [%d]\n", task->pid));
|
|
|
|
|
|
+ DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task)));
|
|
/*
|
|
/*
|
|
* cannot free the context on the spot. deferred until
|
|
* cannot free the context on the spot. deferred until
|
|
* the task notices the ZOMBIE state
|
|
* the task notices the ZOMBIE state
|
|
@@ -2472,7 +2473,7 @@ pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t
|
|
/* invoke and lock buffer format, if found */
|
|
/* invoke and lock buffer format, if found */
|
|
fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
|
|
fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
|
|
if (fmt == NULL) {
|
|
if (fmt == NULL) {
|
|
- DPRINT(("[%d] cannot find buffer format\n", task->pid));
|
|
|
|
|
|
+ DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task)));
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2483,7 +2484,7 @@ pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t
|
|
|
|
|
|
ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
|
|
ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
|
|
|
|
|
|
- DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task->pid, ctx_flags, cpu, fmt_arg, ret));
|
|
|
|
|
|
+ DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
|
|
|
|
|
|
if (ret) goto error;
|
|
if (ret) goto error;
|
|
|
|
|
|
@@ -2605,23 +2606,23 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
|
|
* no kernel task or task not owner by caller
|
|
* no kernel task or task not owner by caller
|
|
*/
|
|
*/
|
|
if (task->mm == NULL) {
|
|
if (task->mm == NULL) {
|
|
- DPRINT(("task [%d] has not memory context (kernel thread)\n", task->pid));
|
|
|
|
|
|
+ DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task)));
|
|
return -EPERM;
|
|
return -EPERM;
|
|
}
|
|
}
|
|
if (pfm_bad_permissions(task)) {
|
|
if (pfm_bad_permissions(task)) {
|
|
- DPRINT(("no permission to attach to [%d]\n", task->pid));
|
|
|
|
|
|
+ DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task)));
|
|
return -EPERM;
|
|
return -EPERM;
|
|
}
|
|
}
|
|
/*
|
|
/*
|
|
* cannot block in self-monitoring mode
|
|
* cannot block in self-monitoring mode
|
|
*/
|
|
*/
|
|
if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
|
|
if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
|
|
- DPRINT(("cannot load a blocking context on self for [%d]\n", task->pid));
|
|
|
|
|
|
+ DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task)));
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
if (task->exit_state == EXIT_ZOMBIE) {
|
|
if (task->exit_state == EXIT_ZOMBIE) {
|
|
- DPRINT(("cannot attach to zombie task [%d]\n", task->pid));
|
|
|
|
|
|
+ DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task)));
|
|
return -EBUSY;
|
|
return -EBUSY;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2631,7 +2632,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
|
|
if (task == current) return 0;
|
|
if (task == current) return 0;
|
|
|
|
|
|
if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
|
|
if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
|
|
- DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task->pid, task->state));
|
|
|
|
|
|
+ DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
|
|
return -EBUSY;
|
|
return -EBUSY;
|
|
}
|
|
}
|
|
/*
|
|
/*
|
|
@@ -3512,7 +3513,7 @@ pfm_use_debug_registers(struct task_struct *task)
|
|
|
|
|
|
if (pmu_conf->use_rr_dbregs == 0) return 0;
|
|
if (pmu_conf->use_rr_dbregs == 0) return 0;
|
|
|
|
|
|
- DPRINT(("called for [%d]\n", task->pid));
|
|
|
|
|
|
+ DPRINT(("called for [%d]\n", task_pid_nr(task)));
|
|
|
|
|
|
/*
|
|
/*
|
|
* do it only once
|
|
* do it only once
|
|
@@ -3543,7 +3544,7 @@ pfm_use_debug_registers(struct task_struct *task)
|
|
DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
|
|
DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
|
|
pfm_sessions.pfs_ptrace_use_dbregs,
|
|
pfm_sessions.pfs_ptrace_use_dbregs,
|
|
pfm_sessions.pfs_sys_use_dbregs,
|
|
pfm_sessions.pfs_sys_use_dbregs,
|
|
- task->pid, ret));
|
|
|
|
|
|
+ task_pid_nr(task), ret));
|
|
|
|
|
|
UNLOCK_PFS(flags);
|
|
UNLOCK_PFS(flags);
|
|
|
|
|
|
@@ -3568,7 +3569,7 @@ pfm_release_debug_registers(struct task_struct *task)
|
|
|
|
|
|
LOCK_PFS(flags);
|
|
LOCK_PFS(flags);
|
|
if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
|
|
if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
|
|
- printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task->pid);
|
|
|
|
|
|
+ printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task));
|
|
ret = -1;
|
|
ret = -1;
|
|
} else {
|
|
} else {
|
|
pfm_sessions.pfs_ptrace_use_dbregs--;
|
|
pfm_sessions.pfs_ptrace_use_dbregs--;
|
|
@@ -3620,7 +3621,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
|
|
|
|
|
|
/* sanity check */
|
|
/* sanity check */
|
|
if (unlikely(task == NULL)) {
|
|
if (unlikely(task == NULL)) {
|
|
- printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", current->pid);
|
|
|
|
|
|
+ printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current));
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3629,7 +3630,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
|
|
fmt = ctx->ctx_buf_fmt;
|
|
fmt = ctx->ctx_buf_fmt;
|
|
|
|
|
|
DPRINT(("restarting self %d ovfl=0x%lx\n",
|
|
DPRINT(("restarting self %d ovfl=0x%lx\n",
|
|
- task->pid,
|
|
|
|
|
|
+ task_pid_nr(task),
|
|
ctx->ctx_ovfl_regs[0]));
|
|
ctx->ctx_ovfl_regs[0]));
|
|
|
|
|
|
if (CTX_HAS_SMPL(ctx)) {
|
|
if (CTX_HAS_SMPL(ctx)) {
|
|
@@ -3653,11 +3654,11 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
|
|
pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
|
|
pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
|
|
|
|
|
|
if (rst_ctrl.bits.mask_monitoring == 0) {
|
|
if (rst_ctrl.bits.mask_monitoring == 0) {
|
|
- DPRINT(("resuming monitoring for [%d]\n", task->pid));
|
|
|
|
|
|
+ DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task)));
|
|
|
|
|
|
if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
|
|
if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
|
|
} else {
|
|
} else {
|
|
- DPRINT(("keeping monitoring stopped for [%d]\n", task->pid));
|
|
|
|
|
|
+ DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task)));
|
|
|
|
|
|
// cannot use pfm_stop_monitoring(task, regs);
|
|
// cannot use pfm_stop_monitoring(task, regs);
|
|
}
|
|
}
|
|
@@ -3714,10 +3715,10 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
|
|
* "self-monitoring".
|
|
* "self-monitoring".
|
|
*/
|
|
*/
|
|
if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
|
|
if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
|
|
- DPRINT(("unblocking [%d] \n", task->pid));
|
|
|
|
|
|
+ DPRINT(("unblocking [%d] \n", task_pid_nr(task)));
|
|
complete(&ctx->ctx_restart_done);
|
|
complete(&ctx->ctx_restart_done);
|
|
} else {
|
|
} else {
|
|
- DPRINT(("[%d] armed exit trap\n", task->pid));
|
|
|
|
|
|
+ DPRINT(("[%d] armed exit trap\n", task_pid_nr(task)));
|
|
|
|
|
|
ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
|
|
ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
|
|
|
|
|
|
@@ -3805,7 +3806,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
|
|
* don't bother if we are loaded and task is being debugged
|
|
* don't bother if we are loaded and task is being debugged
|
|
*/
|
|
*/
|
|
if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
|
|
if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
|
|
- DPRINT(("debug registers already in use for [%d]\n", task->pid));
|
|
|
|
|
|
+ DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task)));
|
|
return -EBUSY;
|
|
return -EBUSY;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3846,7 +3847,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
|
|
* is shared by all processes running on it
|
|
* is shared by all processes running on it
|
|
*/
|
|
*/
|
|
if (first_time && can_access_pmu) {
|
|
if (first_time && can_access_pmu) {
|
|
- DPRINT(("[%d] clearing ibrs, dbrs\n", task->pid));
|
|
|
|
|
|
+ DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task)));
|
|
for (i=0; i < pmu_conf->num_ibrs; i++) {
|
|
for (i=0; i < pmu_conf->num_ibrs; i++) {
|
|
ia64_set_ibr(i, 0UL);
|
|
ia64_set_ibr(i, 0UL);
|
|
ia64_dv_serialize_instruction();
|
|
ia64_dv_serialize_instruction();
|
|
@@ -4035,7 +4036,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
|
|
return -EBUSY;
|
|
return -EBUSY;
|
|
}
|
|
}
|
|
DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
|
|
DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
|
|
- PFM_CTX_TASK(ctx)->pid,
|
|
|
|
|
|
+ task_pid_nr(PFM_CTX_TASK(ctx)),
|
|
state,
|
|
state,
|
|
is_system));
|
|
is_system));
|
|
/*
|
|
/*
|
|
@@ -4093,7 +4094,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
|
|
* monitoring disabled in kernel at next reschedule
|
|
* monitoring disabled in kernel at next reschedule
|
|
*/
|
|
*/
|
|
ctx->ctx_saved_psr_up = 0;
|
|
ctx->ctx_saved_psr_up = 0;
|
|
- DPRINT(("task=[%d]\n", task->pid));
|
|
|
|
|
|
+ DPRINT(("task=[%d]\n", task_pid_nr(task)));
|
|
}
|
|
}
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -4298,11 +4299,12 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
|
|
|
|
|
|
if (is_system) {
|
|
if (is_system) {
|
|
if (pfm_sessions.pfs_ptrace_use_dbregs) {
|
|
if (pfm_sessions.pfs_ptrace_use_dbregs) {
|
|
- DPRINT(("cannot load [%d] dbregs in use\n", task->pid));
|
|
|
|
|
|
+ DPRINT(("cannot load [%d] dbregs in use\n",
|
|
|
|
+ task_pid_nr(task)));
|
|
ret = -EBUSY;
|
|
ret = -EBUSY;
|
|
} else {
|
|
} else {
|
|
pfm_sessions.pfs_sys_use_dbregs++;
|
|
pfm_sessions.pfs_sys_use_dbregs++;
|
|
- DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task->pid, pfm_sessions.pfs_sys_use_dbregs));
|
|
|
|
|
|
+ DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs));
|
|
set_dbregs = 1;
|
|
set_dbregs = 1;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -4394,7 +4396,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
|
|
|
|
|
|
/* allow user level control */
|
|
/* allow user level control */
|
|
ia64_psr(regs)->sp = 0;
|
|
ia64_psr(regs)->sp = 0;
|
|
- DPRINT(("clearing psr.sp for [%d]\n", task->pid));
|
|
|
|
|
|
+ DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task)));
|
|
|
|
|
|
SET_LAST_CPU(ctx, smp_processor_id());
|
|
SET_LAST_CPU(ctx, smp_processor_id());
|
|
INC_ACTIVATION();
|
|
INC_ACTIVATION();
|
|
@@ -4429,7 +4431,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
|
|
*/
|
|
*/
|
|
SET_PMU_OWNER(task, ctx);
|
|
SET_PMU_OWNER(task, ctx);
|
|
|
|
|
|
- DPRINT(("context loaded on PMU for [%d]\n", task->pid));
|
|
|
|
|
|
+ DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task)));
|
|
} else {
|
|
} else {
|
|
/*
|
|
/*
|
|
* when not current, task MUST be stopped, so this is safe
|
|
* when not current, task MUST be stopped, so this is safe
|
|
@@ -4493,7 +4495,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
|
|
int prev_state, is_system;
|
|
int prev_state, is_system;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
- DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1));
|
|
|
|
|
|
+ DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1));
|
|
|
|
|
|
prev_state = ctx->ctx_state;
|
|
prev_state = ctx->ctx_state;
|
|
is_system = ctx->ctx_fl_system;
|
|
is_system = ctx->ctx_fl_system;
|
|
@@ -4568,7 +4570,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
|
|
*/
|
|
*/
|
|
ia64_psr(regs)->sp = 1;
|
|
ia64_psr(regs)->sp = 1;
|
|
|
|
|
|
- DPRINT(("setting psr.sp for [%d]\n", task->pid));
|
|
|
|
|
|
+ DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task)));
|
|
}
|
|
}
|
|
/*
|
|
/*
|
|
* save PMDs to context
|
|
* save PMDs to context
|
|
@@ -4608,7 +4610,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
|
|
ctx->ctx_fl_can_restart = 0;
|
|
ctx->ctx_fl_can_restart = 0;
|
|
ctx->ctx_fl_going_zombie = 0;
|
|
ctx->ctx_fl_going_zombie = 0;
|
|
|
|
|
|
- DPRINT(("disconnected [%d] from context\n", task->pid));
|
|
|
|
|
|
+ DPRINT(("disconnected [%d] from context\n", task_pid_nr(task)));
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -4631,7 +4633,7 @@ pfm_exit_thread(struct task_struct *task)
|
|
|
|
|
|
PROTECT_CTX(ctx, flags);
|
|
PROTECT_CTX(ctx, flags);
|
|
|
|
|
|
- DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task->pid));
|
|
|
|
|
|
+ DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task)));
|
|
|
|
|
|
state = ctx->ctx_state;
|
|
state = ctx->ctx_state;
|
|
switch(state) {
|
|
switch(state) {
|
|
@@ -4640,13 +4642,13 @@ pfm_exit_thread(struct task_struct *task)
|
|
* only comes to this function if pfm_context is not NULL, i.e., cannot
|
|
* only comes to this function if pfm_context is not NULL, i.e., cannot
|
|
* be in unloaded state
|
|
* be in unloaded state
|
|
*/
|
|
*/
|
|
- printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid);
|
|
|
|
|
|
+ printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task));
|
|
break;
|
|
break;
|
|
case PFM_CTX_LOADED:
|
|
case PFM_CTX_LOADED:
|
|
case PFM_CTX_MASKED:
|
|
case PFM_CTX_MASKED:
|
|
ret = pfm_context_unload(ctx, NULL, 0, regs);
|
|
ret = pfm_context_unload(ctx, NULL, 0, regs);
|
|
if (ret) {
|
|
if (ret) {
|
|
- printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret);
|
|
|
|
|
|
+ printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
|
|
}
|
|
}
|
|
DPRINT(("ctx unloaded for current state was %d\n", state));
|
|
DPRINT(("ctx unloaded for current state was %d\n", state));
|
|
|
|
|
|
@@ -4655,12 +4657,12 @@ pfm_exit_thread(struct task_struct *task)
|
|
case PFM_CTX_ZOMBIE:
|
|
case PFM_CTX_ZOMBIE:
|
|
ret = pfm_context_unload(ctx, NULL, 0, regs);
|
|
ret = pfm_context_unload(ctx, NULL, 0, regs);
|
|
if (ret) {
|
|
if (ret) {
|
|
- printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret);
|
|
|
|
|
|
+ printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
|
|
}
|
|
}
|
|
free_ok = 1;
|
|
free_ok = 1;
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
- printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task->pid, state);
|
|
|
|
|
|
+ printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
UNPROTECT_CTX(ctx, flags);
|
|
UNPROTECT_CTX(ctx, flags);
|
|
@@ -4744,7 +4746,7 @@ recheck:
|
|
DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
|
|
DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
|
|
ctx->ctx_fd,
|
|
ctx->ctx_fd,
|
|
state,
|
|
state,
|
|
- task->pid,
|
|
|
|
|
|
+ task_pid_nr(task),
|
|
task->state, PFM_CMD_STOPPED(cmd)));
|
|
task->state, PFM_CMD_STOPPED(cmd)));
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -4791,7 +4793,7 @@ recheck:
|
|
*/
|
|
*/
|
|
if (PFM_CMD_STOPPED(cmd)) {
|
|
if (PFM_CMD_STOPPED(cmd)) {
|
|
if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
|
|
if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
|
|
- DPRINT(("[%d] task not in stopped state\n", task->pid));
|
|
|
|
|
|
+ DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
|
|
return -EBUSY;
|
|
return -EBUSY;
|
|
}
|
|
}
|
|
/*
|
|
/*
|
|
@@ -4884,7 +4886,7 @@ restart_args:
|
|
* limit abuse to min page size
|
|
* limit abuse to min page size
|
|
*/
|
|
*/
|
|
if (unlikely(sz > PFM_MAX_ARGSIZE)) {
|
|
if (unlikely(sz > PFM_MAX_ARGSIZE)) {
|
|
- printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", current->pid, sz);
|
|
|
|
|
|
+ printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz);
|
|
return -E2BIG;
|
|
return -E2BIG;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -5031,11 +5033,11 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
|
|
{
|
|
{
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
- DPRINT(("entering for [%d]\n", current->pid));
|
|
|
|
|
|
+ DPRINT(("entering for [%d]\n", task_pid_nr(current)));
|
|
|
|
|
|
ret = pfm_context_unload(ctx, NULL, 0, regs);
|
|
ret = pfm_context_unload(ctx, NULL, 0, regs);
|
|
if (ret) {
|
|
if (ret) {
|
|
- printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", current->pid, ret);
|
|
|
|
|
|
+ printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -5072,7 +5074,7 @@ pfm_handle_work(void)
|
|
|
|
|
|
ctx = PFM_GET_CTX(current);
|
|
ctx = PFM_GET_CTX(current);
|
|
if (ctx == NULL) {
|
|
if (ctx == NULL) {
|
|
- printk(KERN_ERR "perfmon: [%d] has no PFM context\n", current->pid);
|
|
|
|
|
|
+ printk(KERN_ERR "perfmon: [%d] has no PFM context\n", task_pid_nr(current));
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -5269,7 +5271,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
|
|
DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
|
|
DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
|
|
"used_pmds=0x%lx\n",
|
|
"used_pmds=0x%lx\n",
|
|
pmc0,
|
|
pmc0,
|
|
- task ? task->pid: -1,
|
|
|
|
|
|
+ task ? task_pid_nr(task): -1,
|
|
(regs ? regs->cr_iip : 0),
|
|
(regs ? regs->cr_iip : 0),
|
|
CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
|
|
CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
|
|
ctx->ctx_used_pmds[0]));
|
|
ctx->ctx_used_pmds[0]));
|
|
@@ -5458,7 +5460,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
|
|
}
|
|
}
|
|
|
|
|
|
DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
|
|
DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
|
|
- GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1,
|
|
|
|
|
|
+ GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
|
|
PFM_GET_WORK_PENDING(task),
|
|
PFM_GET_WORK_PENDING(task),
|
|
ctx->ctx_fl_trap_reason,
|
|
ctx->ctx_fl_trap_reason,
|
|
ovfl_pmds,
|
|
ovfl_pmds,
|
|
@@ -5483,7 +5485,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
|
|
sanity_check:
|
|
sanity_check:
|
|
printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
|
|
printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
|
|
smp_processor_id(),
|
|
smp_processor_id(),
|
|
- task ? task->pid : -1,
|
|
|
|
|
|
+ task ? task_pid_nr(task) : -1,
|
|
pmc0);
|
|
pmc0);
|
|
return;
|
|
return;
|
|
|
|
|
|
@@ -5516,7 +5518,7 @@ stop_monitoring:
|
|
*
|
|
*
|
|
* Overall pretty hairy stuff....
|
|
* Overall pretty hairy stuff....
|
|
*/
|
|
*/
|
|
- DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task->pid: -1));
|
|
|
|
|
|
+ DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1));
|
|
pfm_clear_psr_up();
|
|
pfm_clear_psr_up();
|
|
ia64_psr(regs)->up = 0;
|
|
ia64_psr(regs)->up = 0;
|
|
ia64_psr(regs)->sp = 1;
|
|
ia64_psr(regs)->sp = 1;
|
|
@@ -5577,13 +5579,13 @@ pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
|
|
|
|
|
|
report_spurious1:
|
|
report_spurious1:
|
|
printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
|
|
printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
|
|
- this_cpu, task->pid);
|
|
|
|
|
|
+ this_cpu, task_pid_nr(task));
|
|
pfm_unfreeze_pmu();
|
|
pfm_unfreeze_pmu();
|
|
return -1;
|
|
return -1;
|
|
report_spurious2:
|
|
report_spurious2:
|
|
printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
|
|
printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
|
|
this_cpu,
|
|
this_cpu,
|
|
- task->pid);
|
|
|
|
|
|
+ task_pid_nr(task));
|
|
pfm_unfreeze_pmu();
|
|
pfm_unfreeze_pmu();
|
|
return -1;
|
|
return -1;
|
|
}
|
|
}
|
|
@@ -5870,7 +5872,8 @@ pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
|
|
ia64_psr(regs)->sp = 1;
|
|
ia64_psr(regs)->sp = 1;
|
|
|
|
|
|
if (GET_PMU_OWNER() == task) {
|
|
if (GET_PMU_OWNER() == task) {
|
|
- DPRINT(("cleared ownership for [%d]\n", ctx->ctx_task->pid));
|
|
|
|
|
|
+ DPRINT(("cleared ownership for [%d]\n",
|
|
|
|
+ task_pid_nr(ctx->ctx_task)));
|
|
SET_PMU_OWNER(NULL, NULL);
|
|
SET_PMU_OWNER(NULL, NULL);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -5882,7 +5885,7 @@ pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
|
|
task->thread.pfm_context = NULL;
|
|
task->thread.pfm_context = NULL;
|
|
task->thread.flags &= ~IA64_THREAD_PM_VALID;
|
|
task->thread.flags &= ~IA64_THREAD_PM_VALID;
|
|
|
|
|
|
- DPRINT(("force cleanup for [%d]\n", task->pid));
|
|
|
|
|
|
+ DPRINT(("force cleanup for [%d]\n", task_pid_nr(task)));
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -6426,7 +6429,7 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
|
|
|
|
|
|
if (PMD_IS_COUNTING(i)) {
|
|
if (PMD_IS_COUNTING(i)) {
|
|
DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
|
|
DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
|
|
- task->pid,
|
|
|
|
|
|
+ task_pid_nr(task),
|
|
i,
|
|
i,
|
|
ctx->ctx_pmds[i].val,
|
|
ctx->ctx_pmds[i].val,
|
|
val & ovfl_val));
|
|
val & ovfl_val));
|
|
@@ -6448,11 +6451,11 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
|
|
*/
|
|
*/
|
|
if (pmc0 & (1UL << i)) {
|
|
if (pmc0 & (1UL << i)) {
|
|
val += 1 + ovfl_val;
|
|
val += 1 + ovfl_val;
|
|
- DPRINT(("[%d] pmd[%d] overflowed\n", task->pid, i));
|
|
|
|
|
|
+ DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, i, val, pmd_val));
|
|
|
|
|
|
+ DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val));
|
|
|
|
|
|
if (is_self) ctx->th_pmds[i] = pmd_val;
|
|
if (is_self) ctx->th_pmds[i] = pmd_val;
|
|
|
|
|
|
@@ -6793,14 +6796,14 @@ dump_pmu_state(const char *from)
|
|
printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
|
|
printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
|
|
this_cpu,
|
|
this_cpu,
|
|
from,
|
|
from,
|
|
- current->pid,
|
|
|
|
|
|
+ task_pid_nr(current),
|
|
regs->cr_iip,
|
|
regs->cr_iip,
|
|
current->comm);
|
|
current->comm);
|
|
|
|
|
|
task = GET_PMU_OWNER();
|
|
task = GET_PMU_OWNER();
|
|
ctx = GET_PMU_CTX();
|
|
ctx = GET_PMU_CTX();
|
|
|
|
|
|
- printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task->pid : -1, ctx);
|
|
|
|
|
|
+ printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx);
|
|
|
|
|
|
psr = pfm_get_psr();
|
|
psr = pfm_get_psr();
|
|
|
|
|
|
@@ -6848,7 +6851,7 @@ pfm_inherit(struct task_struct *task, struct pt_regs *regs)
|
|
{
|
|
{
|
|
struct thread_struct *thread;
|
|
struct thread_struct *thread;
|
|
|
|
|
|
- DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task->pid));
|
|
|
|
|
|
+ DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task)));
|
|
|
|
|
|
thread = &task->thread;
|
|
thread = &task->thread;
|
|
|
|
|