From faad38492814112e3e7ce94d90123bbe301fff33 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sun, 10 May 2015 01:18:03 +0200 Subject: sched / idle: Call idle_set_state() from cpuidle_enter_state() Introduce a wrapper function around idle_set_state() called sched_idle_set_state() that will pass this_rq() to it as the first argument and make cpuidle_enter_state() call the new function before and after entering the target state. At the same time, remove direct invocations of idle_set_state() from call_cpuidle(). This will allow the invocation of default_idle_call() to be moved from call_cpuidle() to cpuidle_enter_state() safely and call_cpuidle() to be simplified a bit as a result. Signed-off-by: Rafael J. Wysocki Reviewed-by: Preeti U Murthy Tested-by: Preeti U Murthy Tested-by: Sudeep Holla Acked-by: Kevin Hilman --- drivers/cpuidle/cpuidle.c | 6 ++++++ include/linux/cpuidle.h | 3 +++ kernel/sched/idle.c | 15 +++++++++------ 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 597f88443bdc..9306dd5f460e 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -170,6 +170,9 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, if (broadcast && tick_broadcast_enter()) return -EBUSY; + /* Take note of the planned idle state. */ + sched_idle_set_state(target_state); + trace_cpu_idle_rcuidle(index, dev->cpu); time_start = ktime_get(); @@ -178,6 +181,9 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, time_end = ktime_get(); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); + /* The cpu is no longer idle or about to enter idle. */ + sched_idle_set_state(NULL); + if (broadcast) { if (WARN_ON_ONCE(!irqs_disabled())) local_irq_disable(); diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 9c5e89254796..301eaaab40e3 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -200,6 +200,9 @@ static inline struct cpuidle_driver *cpuidle_get_cpu_driver( struct cpuidle_device *dev) {return NULL; } #endif +/* kernel/sched/idle.c */ +extern void sched_idle_set_state(struct cpuidle_state *idle_state); + #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a); #else diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 9c919b42f846..5d9f549fffa8 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -15,6 +15,15 @@ #include "sched.h" +/** + * sched_idle_set_state - Record idle state for the current CPU. + * @idle_state: State to record. + */ +void sched_idle_set_state(struct cpuidle_state *idle_state) +{ + idle_set_state(this_rq(), idle_state); +} + static int __read_mostly cpu_idle_force_poll; void cpu_idle_poll_ctrl(bool enable) @@ -100,9 +109,6 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev, return -EBUSY; } - /* Take note of the planned idle state. */ - idle_set_state(this_rq(), &drv->states[next_state]); - /* * Enter the idle state previously returned by the governor decision. * This function will block until an interrupt occurs and will take @@ -110,9 +116,6 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev, */ entered_state = cpuidle_enter(drv, dev, next_state); - /* The cpu is no longer idle or about to enter idle. */ - idle_set_state(this_rq(), NULL); - if (entered_state == -EBUSY) default_idle_call(); -- cgit v1.2.3