summaryrefslogtreecommitdiff
path: root/kernel/time/tick-broadcast.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/tick-broadcast.c')
-rw-r--r--kernel/time/tick-broadcast.c99
1 files changed, 73 insertions, 26 deletions
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 31463d370b94..cb01cd8f919b 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -175,6 +175,8 @@ static void tick_do_periodic_broadcast(void)
*/
static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
{
+ ktime_t next;
+
tick_do_periodic_broadcast();
/*
@@ -185,10 +187,13 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
/*
* Setup the next period for devices, which do not have
- * periodic mode:
+ * periodic mode. We read dev->next_event first and add to it
+ * when the event alrady expired. clockevents_program_event()
+ * sets dev->next_event only when the event is really
+ * programmed to the device.
*/
- for (;;) {
- ktime_t next = ktime_add(dev->next_event, tick_period);
+ for (next = dev->next_event; ;) {
+ next = ktime_add(next, tick_period);
if (!clockevents_program_event(dev, next, ktime_get()))
return;
@@ -205,7 +210,7 @@ static void tick_do_broadcast_on_off(void *why)
struct clock_event_device *bc, *dev;
struct tick_device *td;
unsigned long flags, *reason = why;
- int cpu;
+ int cpu, bc_stopped;
spin_lock_irqsave(&tick_broadcast_lock, flags);
@@ -223,14 +228,16 @@ static void tick_do_broadcast_on_off(void *why)
if (!tick_device_is_functional(dev))
goto out;
+ bc_stopped = cpus_empty(tick_broadcast_mask);
+
switch (*reason) {
case CLOCK_EVT_NOTIFY_BROADCAST_ON:
case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
if (!cpu_isset(cpu, tick_broadcast_mask)) {
cpu_set(cpu, tick_broadcast_mask);
- if (td->mode == TICKDEV_MODE_PERIODIC)
- clockevents_set_mode(dev,
- CLOCK_EVT_MODE_SHUTDOWN);
+ if (tick_broadcast_device.mode ==
+ TICKDEV_MODE_PERIODIC)
+ clockevents_shutdown(dev);
}
if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
tick_broadcast_force = 1;
@@ -239,15 +246,17 @@ static void tick_do_broadcast_on_off(void *why)
if (!tick_broadcast_force &&
cpu_isset(cpu, tick_broadcast_mask)) {
cpu_clear(cpu, tick_broadcast_mask);
- if (td->mode == TICKDEV_MODE_PERIODIC)
+ if (tick_broadcast_device.mode ==
+ TICKDEV_MODE_PERIODIC)
tick_setup_periodic(dev, 0);
}
break;
}
- if (cpus_empty(tick_broadcast_mask))
- clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
- else {
+ if (cpus_empty(tick_broadcast_mask)) {
+ if (!bc_stopped)
+ clockevents_shutdown(bc);
+ } else if (bc_stopped) {
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
tick_broadcast_start_periodic(bc);
else
@@ -298,7 +307,7 @@ void tick_shutdown_broadcast(unsigned int *cpup)
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
if (bc && cpus_empty(tick_broadcast_mask))
- clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
+ clockevents_shutdown(bc);
}
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
@@ -313,7 +322,7 @@ void tick_suspend_broadcast(void)
bc = tick_broadcast_device.evtdev;
if (bc)
- clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
+ clockevents_shutdown(bc);
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
@@ -364,16 +373,8 @@ cpumask_t *tick_get_broadcast_oneshot_mask(void)
static int tick_broadcast_set_event(ktime_t expires, int force)
{
struct clock_event_device *bc = tick_broadcast_device.evtdev;
- ktime_t now = ktime_get();
- int res;
-
- for(;;) {
- res = clockevents_program_event(bc, expires, now);
- if (!res || !force)
- return res;
- now = ktime_get();
- expires = ktime_add(now, ktime_set(0, bc->min_delta_ns));
- }
+
+ return tick_dev_program_event(bc, expires, force);
}
int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
@@ -491,14 +492,52 @@ static void tick_broadcast_clear_oneshot(int cpu)
cpu_clear(cpu, tick_broadcast_oneshot_mask);
}
+static void tick_broadcast_init_next_event(cpumask_t *mask, ktime_t expires)
+{
+ struct tick_device *td;
+ int cpu;
+
+ for_each_cpu_mask_nr(cpu, *mask) {
+ td = &per_cpu(tick_cpu_device, cpu);
+ if (td->evtdev)
+ td->evtdev->next_event = expires;
+ }
+}
+
/**
* tick_broadcast_setup_oneshot - setup the broadcast device
*/
void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
- bc->event_handler = tick_handle_oneshot_broadcast;
- clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
- bc->next_event.tv64 = KTIME_MAX;
+ /* Set it up only once ! */
+ if (bc->event_handler != tick_handle_oneshot_broadcast) {
+ int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
+ int cpu = smp_processor_id();
+ cpumask_t mask;
+
+ bc->event_handler = tick_handle_oneshot_broadcast;
+ clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
+
+ /* Take the do_timer update */
+ tick_do_timer_cpu = cpu;
+
+ /*
+ * We must be careful here. There might be other CPUs
+ * waiting for periodic broadcast. We need to set the
+ * oneshot_mask bits for those and program the
+ * broadcast device to fire.
+ */
+ mask = tick_broadcast_mask;
+ cpu_clear(cpu, mask);
+ cpus_or(tick_broadcast_oneshot_mask,
+ tick_broadcast_oneshot_mask, mask);
+
+ if (was_periodic && !cpus_empty(mask)) {
+ tick_broadcast_init_next_event(&mask, tick_next_period);
+ tick_broadcast_set_event(tick_next_period, 1);
+ } else
+ bc->next_event.tv64 = KTIME_MAX;
+ }
}
/*
@@ -538,4 +577,12 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
+/*
+ * Check, whether the broadcast device is in one shot mode
+ */
+int tick_broadcast_oneshot_active(void)
+{
+ return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
+}
+
#endif