mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-09-04 20:19:47 +08:00 
			
		
		
		
	 c73464b1c8
			
		
	
	
		c73464b1c8
		
	
	
	
	
		
			
			__trace_sched_switch_state() is the last remaining PREEMPT_ACTIVE user, move trace_sched_switch() from prepare_task_switch() to __schedule() and propagate the @preempt argument. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Steven Rostedt <rostedt@goodmis.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
		
			
				
	
	
		
			103 lines
		
	
	
		
			2.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			103 lines
		
	
	
		
			2.2 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  * trace context switch
 | |
|  *
 | |
|  * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
 | |
|  *
 | |
|  */
 | |
| #include <linux/module.h>
 | |
| #include <linux/kallsyms.h>
 | |
| #include <linux/uaccess.h>
 | |
| #include <linux/ftrace.h>
 | |
| #include <trace/events/sched.h>
 | |
| 
 | |
| #include "trace.h"
 | |
| 
 | |
| static int			sched_ref;
 | |
| static DEFINE_MUTEX(sched_register_mutex);
 | |
| 
 | |
| static void
 | |
| probe_sched_switch(void *ignore, bool preempt,
 | |
| 		   struct task_struct *prev, struct task_struct *next)
 | |
| {
 | |
| 	if (unlikely(!sched_ref))
 | |
| 		return;
 | |
| 
 | |
| 	tracing_record_cmdline(prev);
 | |
| 	tracing_record_cmdline(next);
 | |
| }
 | |
| 
 | |
| static void
 | |
| probe_sched_wakeup(void *ignore, struct task_struct *wakee)
 | |
| {
 | |
| 	if (unlikely(!sched_ref))
 | |
| 		return;
 | |
| 
 | |
| 	tracing_record_cmdline(current);
 | |
| }
 | |
| 
 | |
| static int tracing_sched_register(void)
 | |
| {
 | |
| 	int ret;
 | |
| 
 | |
| 	ret = register_trace_sched_wakeup(probe_sched_wakeup, NULL);
 | |
| 	if (ret) {
 | |
| 		pr_info("wakeup trace: Couldn't activate tracepoint"
 | |
| 			" probe to kernel_sched_wakeup\n");
 | |
| 		return ret;
 | |
| 	}
 | |
| 
 | |
| 	ret = register_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
 | |
| 	if (ret) {
 | |
| 		pr_info("wakeup trace: Couldn't activate tracepoint"
 | |
| 			" probe to kernel_sched_wakeup_new\n");
 | |
| 		goto fail_deprobe;
 | |
| 	}
 | |
| 
 | |
| 	ret = register_trace_sched_switch(probe_sched_switch, NULL);
 | |
| 	if (ret) {
 | |
| 		pr_info("sched trace: Couldn't activate tracepoint"
 | |
| 			" probe to kernel_sched_switch\n");
 | |
| 		goto fail_deprobe_wake_new;
 | |
| 	}
 | |
| 
 | |
| 	return ret;
 | |
| fail_deprobe_wake_new:
 | |
| 	unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
 | |
| fail_deprobe:
 | |
| 	unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| static void tracing_sched_unregister(void)
 | |
| {
 | |
| 	unregister_trace_sched_switch(probe_sched_switch, NULL);
 | |
| 	unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
 | |
| 	unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
 | |
| }
 | |
| 
 | |
| static void tracing_start_sched_switch(void)
 | |
| {
 | |
| 	mutex_lock(&sched_register_mutex);
 | |
| 	if (!(sched_ref++))
 | |
| 		tracing_sched_register();
 | |
| 	mutex_unlock(&sched_register_mutex);
 | |
| }
 | |
| 
 | |
| static void tracing_stop_sched_switch(void)
 | |
| {
 | |
| 	mutex_lock(&sched_register_mutex);
 | |
| 	if (!(--sched_ref))
 | |
| 		tracing_sched_unregister();
 | |
| 	mutex_unlock(&sched_register_mutex);
 | |
| }
 | |
| 
 | |
| void tracing_start_cmdline_record(void)
 | |
| {
 | |
| 	tracing_start_sched_switch();
 | |
| }
 | |
| 
 | |
| void tracing_stop_cmdline_record(void)
 | |
| {
 | |
| 	tracing_stop_sched_switch();
 | |
| }
 |