summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-15 09:39:44 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-15 09:39:44 -0700
commitada3fa15057205b7d3f727bba5cd26b5912e350f (patch)
tree60962fc9e4021b92f484d1a58e72cd3906d4f3db /kernel
parent2f82af08fcc7dc01a7e98a49a5995a77e32a2925 (diff)
parent5579fd7e6aed8860ea0c8e3f11897493153b10ad (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (46 commits) powerpc64: convert to dynamic percpu allocator sparc64: use embedding percpu first chunk allocator percpu: kill lpage first chunk allocator x86,percpu: use embedding for 64bit NUMA and page for 32bit NUMA percpu: update embedding first chunk allocator to handle sparse units percpu: use group information to allocate vmap areas sparsely vmalloc: implement pcpu_get_vm_areas() vmalloc: separate out insert_vmalloc_vm() percpu: add chunk->base_addr percpu: add pcpu_unit_offsets[] percpu: introduce pcpu_alloc_info and pcpu_group_info percpu: move pcpu_lpage_build_unit_map() and pcpul_lpage_dump_cfg() upward percpu: add @align to pcpu_fc_alloc_fn_t percpu: make @dyn_size mandatory for pcpu_setup_first_chunk() percpu: drop @static_size from first chunk allocators percpu: generalize first chunk allocator selection percpu: build first chunk allocators selectively percpu: rename 4k first chunk allocator to page percpu: improve boot messages percpu: fix pcpu_reclaim() locking ... Fix trivial conflict as by Tejun Heo in kernel/sched.c
Diffstat (limited to 'kernel')
-rw-r--r--kernel/module.c6
-rw-r--r--kernel/perf_counter.c6
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/trace/trace_events.c6
4 files changed, 11 insertions, 11 deletions
diff --git a/kernel/module.c b/kernel/module.c
index 46580edff0cb..05ce49ced8f6 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -369,7 +369,7 @@ EXPORT_SYMBOL_GPL(find_module);
#ifdef CONFIG_SMP
-#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
+#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
static void *percpu_modalloc(unsigned long size, unsigned long align,
const char *name)
@@ -394,7 +394,7 @@ static void percpu_modfree(void *freeme)
free_percpu(freeme);
}
-#else /* ... !CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
+#else /* ... CONFIG_HAVE_LEGACY_PER_CPU_AREA */
/* Number of blocks used and allocated. */
static unsigned int pcpu_num_used, pcpu_num_allocated;
@@ -540,7 +540,7 @@ static int percpu_modinit(void)
}
__initcall(percpu_modinit);
-#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
+#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
static unsigned int find_pcpusec(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index e7f60f8e31ed..8cb94a52d1bb 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -106,16 +106,16 @@ hw_perf_group_sched_in(struct perf_counter *group_leader,
void __weak perf_counter_print_debug(void) { }
-static DEFINE_PER_CPU(int, disable_count);
+static DEFINE_PER_CPU(int, perf_disable_count);
void __perf_disable(void)
{
- __get_cpu_var(disable_count)++;
+ __get_cpu_var(perf_disable_count)++;
}
bool __perf_enable(void)
{
- return !--__get_cpu_var(disable_count);
+ return !--__get_cpu_var(perf_disable_count);
}
void perf_disable(void)
diff --git a/kernel/sched.c b/kernel/sched.c
index e27a53685ed9..d9db3fb17573 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -295,12 +295,12 @@ struct task_group root_task_group;
/* Default task group's sched entity on each cpu */
static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
/* Default task group's cfs_rq on each cpu */
-static DEFINE_PER_CPU(struct cfs_rq, init_tg_cfs_rq) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
-static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
#endif /* CONFIG_RT_GROUP_SCHED */
#else /* !CONFIG_USER_SCHED */
#define root_task_group init_task_group
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 78b1ed230177..97e2c4d2e9eb 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1432,7 +1432,7 @@ static __init void event_trace_self_tests(void)
#ifdef CONFIG_FUNCTION_TRACER
-static DEFINE_PER_CPU(atomic_t, test_event_disable);
+static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
static void
function_test_events_call(unsigned long ip, unsigned long parent_ip)
@@ -1449,7 +1449,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
pc = preempt_count();
resched = ftrace_preempt_disable();
cpu = raw_smp_processor_id();
- disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
+ disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
if (disabled != 1)
goto out;
@@ -1468,7 +1468,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
out:
- atomic_dec(&per_cpu(test_event_disable, cpu));
+ atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
ftrace_preempt_enable(resched);
}