[ltt-dev] [patch] add tracepoints to trace activate/deactivate task
Peter Zijlstra
peterz at infradead.org
Mon Dec 8 14:54:10 EST 2008
On Mon, 2008-12-08 at 14:49 -0500, Jason Baron wrote:
> hi,
>
> I thought it would be useful to track when a task is
> 'activated/deactivated'. This case is different from wakeup/wait, in that
> task can be activated and deactivated, when the scheduler re-balances
> tasks, the allowable cpuset changes, or cpu hotplug occurs. Using these
> patches I can more precisely figure out when a task becomes runnable and
> why.
Then I still not agree with it because it does not expose the event that
did the change.
If you want the cpu allowed mask, put a tracepoint there. If you want
migrate information (didn't we have that?) then put one there, etc.
> -Jason
>
> Signed-off-by: Jason Baron <jbaron at redhat.com>
>
>
> diff --git a/include/trace/sched.h b/include/trace/sched.h
> index fe4767d..d1fd8a3 100644
> --- a/include/trace/sched.h
> +++ b/include/trace/sched.h
> @@ -4,6 +4,8 @@
> #include <linux/sched.h>
> #include <linux/tracepoint.h>
>
> +struct rq;
> +
> DECLARE_TRACE(sched_kthread_stop,
> TPPROTO(struct task_struct *t),
> TPARGS(t));
> @@ -44,5 +46,13 @@ DECLARE_TRACE(sched_signal_send,
> DECLARE_TRACE(sched_kthread_create,
> TPPROTO(void *fn, int pid),
> TPARGS(fn, pid));
> +DECLARE_TRACE(sched_activate_task,
> + TPPROTO(struct task_struct *p, struct rq *rq),
> + TPARGS(p, rq));
> +DECLARE_TRACE(sched_deactivate_task,
> + TPPROTO(struct task_struct *p, struct rq *rq),
> + TPARGS(p, rq));
> +
> +
>
> #endif
> diff --git a/kernel/sched.c b/kernel/sched.c
> index cc7f048..7b707cf 100644
> --- a/kernel/sched.c
> +++ b/kernel/sched.c
> @@ -122,6 +122,8 @@ DEFINE_TRACE(sched_wakeup);
> DEFINE_TRACE(sched_wakeup_new);
> DEFINE_TRACE(sched_switch);
> DEFINE_TRACE(sched_migrate_task);
> +DEFINE_TRACE(sched_activate_task);
> +DEFINE_TRACE(sched_deactivate_task);
>
> #ifdef CONFIG_SMP
> /*
> @@ -1716,6 +1718,7 @@ static int effective_prio(struct task_struct *p)
> */
> static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
> {
> + trace_sched_activate_task(p, rq);
> if (task_contributes_to_load(p))
> rq->nr_uninterruptible--;
>
> @@ -1728,6 +1731,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
> */
> static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
> {
> + trace_sched_deactivate_task(p, rq);
> if (task_contributes_to_load(p))
> rq->nr_uninterruptible++;
>
> @@ -2420,6 +2424,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
> * Let the scheduling class do new task startup
> * management (if any):
> */
> + trace_sched_activate_task(p, rq);
> p->sched_class->task_new(rq, p);
> inc_nr_running(rq);
> }
> diff --git a/ltt/probes/kernel-trace.c b/ltt/probes/kernel-trace.c
> index e241499..c27e671 100644
> --- a/ltt/probes/kernel-trace.c
> +++ b/ltt/probes/kernel-trace.c
> @@ -372,6 +372,19 @@ void probe_kernel_vprintk(unsigned long retaddr, char *buf, int len)
> }
> }
>
> +void probe_activate_task(struct task_struct *p, struct rq *rq)
> +{
> + trace_mark_tp(kernel_activate_task, sched_activate_task, probe_activate_task,
> + "pid %d state %ld cpu_id %u", p->pid, p->state, task_cpu(p));
> +}
> +
> +void probe_deactivate_task(struct task_struct *p, struct rq *rq)
> +{
> + trace_mark_tp(kernel_deactivate_task, sched_deactivate_task,
> + probe_deactivate_task, "pid %d state %ld cpu_id %u",
> + p->pid, p->state, task_cpu(p));
> +}
> +
> #ifdef CONFIG_MODULES
> void probe_kernel_module_free(struct module *mod)
> {
More information about the lttng-dev
mailing list