summaryrefslogtreecommitdiff
path: root/include/trace
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/events/ext4.h101
-rw-r--r--include/trace/events/jbd2.h28
-rw-r--r--include/trace/events/kvm.h41
-rw-r--r--include/trace/events/lock.h29
-rw-r--r--include/trace/ftrace.h289
-rw-r--r--include/trace/syscall.h14
6 files changed, 279 insertions, 223 deletions
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index d0b6cd3afb2f..2aa6aa3e8f61 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -874,6 +874,107 @@ TRACE_EVENT(ext4_forget,
__entry->mode, __entry->is_metadata, __entry->block)
);
+TRACE_EVENT(ext4_da_update_reserve_space,
+ TP_PROTO(struct inode *inode, int used_blocks),
+
+ TP_ARGS(inode, used_blocks),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( umode_t, mode )
+ __field( __u64, i_blocks )
+ __field( int, used_blocks )
+ __field( int, reserved_data_blocks )
+ __field( int, reserved_meta_blocks )
+ __field( int, allocated_meta_blocks )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->mode = inode->i_mode;
+ __entry->i_blocks = inode->i_blocks;
+ __entry->used_blocks = used_blocks;
+ __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
+ __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
+ __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
+ ),
+
+ TP_printk("dev %s ino %lu mode 0%o i_blocks %llu used_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d",
+ jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+ __entry->mode, (unsigned long long) __entry->i_blocks,
+ __entry->used_blocks, __entry->reserved_data_blocks,
+ __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
+);
+
+TRACE_EVENT(ext4_da_reserve_space,
+ TP_PROTO(struct inode *inode, int md_needed),
+
+ TP_ARGS(inode, md_needed),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( umode_t, mode )
+ __field( __u64, i_blocks )
+ __field( int, md_needed )
+ __field( int, reserved_data_blocks )
+ __field( int, reserved_meta_blocks )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->mode = inode->i_mode;
+ __entry->i_blocks = inode->i_blocks;
+ __entry->md_needed = md_needed;
+ __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
+ __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
+ ),
+
+ TP_printk("dev %s ino %lu mode 0%o i_blocks %llu md_needed %d reserved_data_blocks %d reserved_meta_blocks %d",
+ jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+ __entry->mode, (unsigned long long) __entry->i_blocks,
+ __entry->md_needed, __entry->reserved_data_blocks,
+ __entry->reserved_meta_blocks)
+);
+
+TRACE_EVENT(ext4_da_release_space,
+ TP_PROTO(struct inode *inode, int freed_blocks),
+
+ TP_ARGS(inode, freed_blocks),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( umode_t, mode )
+ __field( __u64, i_blocks )
+ __field( int, freed_blocks )
+ __field( int, reserved_data_blocks )
+ __field( int, reserved_meta_blocks )
+ __field( int, allocated_meta_blocks )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->mode = inode->i_mode;
+ __entry->i_blocks = inode->i_blocks;
+ __entry->freed_blocks = freed_blocks;
+ __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
+ __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
+ __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
+ ),
+
+ TP_printk("dev %s ino %lu mode 0%o i_blocks %llu freed_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d",
+ jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
+ __entry->mode, (unsigned long long) __entry->i_blocks,
+ __entry->freed_blocks, __entry->reserved_data_blocks,
+ __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
+);
+
+
#endif /* _TRACE_EXT4_H */
/* This part must be outside protection */
diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
index 96b370a050de..bf16545cc977 100644
--- a/include/trace/events/jbd2.h
+++ b/include/trace/events/jbd2.h
@@ -199,6 +199,34 @@ TRACE_EVENT(jbd2_checkpoint_stats,
__entry->forced_to_close, __entry->written, __entry->dropped)
);
+TRACE_EVENT(jbd2_cleanup_journal_tail,
+
+ TP_PROTO(journal_t *journal, tid_t first_tid,
+ unsigned long block_nr, unsigned long freed),
+
+ TP_ARGS(journal, first_tid, block_nr, freed),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( tid_t, tail_sequence )
+ __field( tid_t, first_tid )
+ __field(unsigned long, block_nr )
+ __field(unsigned long, freed )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = journal->j_fs_dev->bd_dev;
+ __entry->tail_sequence = journal->j_tail_sequence;
+ __entry->first_tid = first_tid;
+ __entry->block_nr = block_nr;
+ __entry->freed = freed;
+ ),
+
+ TP_printk("dev %s from %u to %u offset %lu freed %lu",
+ jbd2_dev_to_name(__entry->dev), __entry->tail_sequence,
+ __entry->first_tid, __entry->block_nr, __entry->freed)
+);
+
#endif /* _TRACE_JBD2_H */
/* This part must be outside protection */
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index dbe108455275..b17d49dfc3ef 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -145,6 +145,47 @@ TRACE_EVENT(kvm_mmio,
__entry->len, __entry->gpa, __entry->val)
);
+#define kvm_fpu_load_symbol \
+ {0, "unload"}, \
+ {1, "load"}
+
+TRACE_EVENT(kvm_fpu,
+ TP_PROTO(int load),
+ TP_ARGS(load),
+
+ TP_STRUCT__entry(
+ __field( u32, load )
+ ),
+
+ TP_fast_assign(
+ __entry->load = load;
+ ),
+
+ TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
+);
+
+TRACE_EVENT(kvm_age_page,
+ TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref),
+ TP_ARGS(hva, slot, ref),
+
+ TP_STRUCT__entry(
+ __field( u64, hva )
+ __field( u64, gfn )
+ __field( u8, referenced )
+ ),
+
+ TP_fast_assign(
+ __entry->hva = hva;
+ __entry->gfn =
+ slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT);
+ __entry->referenced = ref;
+ ),
+
+ TP_printk("hva %llx gfn %llx %s",
+ __entry->hva, __entry->gfn,
+ __entry->referenced ? "YOUNG" : "OLD")
+);
+
#endif /* _TRACE_KVM_MAIN_H */
/* This part must be outside protection */
diff --git a/include/trace/events/lock.h b/include/trace/events/lock.h
index a870ba125aa8..5c1dcfc16c60 100644
--- a/include/trace/events/lock.h
+++ b/include/trace/events/lock.h
@@ -20,14 +20,17 @@ TRACE_EVENT(lock_acquire,
TP_STRUCT__entry(
__field(unsigned int, flags)
__string(name, lock->name)
+ __field(void *, lockdep_addr)
),
TP_fast_assign(
__entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0);
__assign_str(name, lock->name);
+ __entry->lockdep_addr = lock;
),
- TP_printk("%s%s%s", (__entry->flags & 1) ? "try " : "",
+ TP_printk("%p %s%s%s", __entry->lockdep_addr,
+ (__entry->flags & 1) ? "try " : "",
(__entry->flags & 2) ? "read " : "",
__get_str(name))
);
@@ -40,13 +43,16 @@ TRACE_EVENT(lock_release,
TP_STRUCT__entry(
__string(name, lock->name)
+ __field(void *, lockdep_addr)
),
TP_fast_assign(
__assign_str(name, lock->name);
+ __entry->lockdep_addr = lock;
),
- TP_printk("%s", __get_str(name))
+ TP_printk("%p %s",
+ __entry->lockdep_addr, __get_str(name))
);
#ifdef CONFIG_LOCK_STAT
@@ -59,13 +65,16 @@ TRACE_EVENT(lock_contended,
TP_STRUCT__entry(
__string(name, lock->name)
+ __field(void *, lockdep_addr)
),
TP_fast_assign(
__assign_str(name, lock->name);
+ __entry->lockdep_addr = lock;
),
- TP_printk("%s", __get_str(name))
+ TP_printk("%p %s",
+ __entry->lockdep_addr, __get_str(name))
);
TRACE_EVENT(lock_acquired,
@@ -75,16 +84,18 @@ TRACE_EVENT(lock_acquired,
TP_STRUCT__entry(
__string(name, lock->name)
- __field(unsigned long, wait_usec)
- __field(unsigned long, wait_nsec_rem)
+ __field(s64, wait_nsec)
+ __field(void *, lockdep_addr)
),
+
TP_fast_assign(
__assign_str(name, lock->name);
- __entry->wait_nsec_rem = do_div(waittime, NSEC_PER_USEC);
- __entry->wait_usec = (unsigned long) waittime;
+ __entry->wait_nsec = waittime;
+ __entry->lockdep_addr = lock;
),
- TP_printk("%s (%lu.%03lu us)", __get_str(name), __entry->wait_usec,
- __entry->wait_nsec_rem)
+ TP_printk("%p %s (%llu ns)", __entry->lockdep_addr,
+ __get_str(name),
+ __entry->wait_nsec)
);
#endif
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index c6fe03e902ca..ea6f9d4a20e9 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -65,7 +65,8 @@
};
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args) \
- static struct ftrace_event_call event_##name
+ static struct ftrace_event_call \
+ __attribute__((__aligned__(4))) event_##name
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
@@ -131,130 +132,6 @@
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
- * Setup the showing format of trace point.
- *
- * int
- * ftrace_format_##call(struct trace_seq *s)
- * {
- * struct ftrace_raw_##call field;
- * int ret;
- *
- * ret = trace_seq_printf(s, #type " " #item ";"
- * " offset:%u; size:%u;\n",
- * offsetof(struct ftrace_raw_##call, item),
- * sizeof(field.type));
- *
- * }
- */
-
-#undef TP_STRUCT__entry
-#define TP_STRUCT__entry(args...) args
-
-#undef __field
-#define __field(type, item) \
- ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
- "offset:%u;\tsize:%u;\tsigned:%u;\n", \
- (unsigned int)offsetof(typeof(field), item), \
- (unsigned int)sizeof(field.item), \
- (unsigned int)is_signed_type(type)); \
- if (!ret) \
- return 0;
-
-#undef __field_ext
-#define __field_ext(type, item, filter_type) __field(type, item)
-
-#undef __array
-#define __array(type, item, len) \
- ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
- "offset:%u;\tsize:%u;\tsigned:%u;\n", \
- (unsigned int)offsetof(typeof(field), item), \
- (unsigned int)sizeof(field.item), \
- (unsigned int)is_signed_type(type)); \
- if (!ret) \
- return 0;
-
-#undef __dynamic_array
-#define __dynamic_array(type, item, len) \
- ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
- "offset:%u;\tsize:%u;\tsigned:%u;\n", \
- (unsigned int)offsetof(typeof(field), \
- __data_loc_##item), \
- (unsigned int)sizeof(field.__data_loc_##item), \
- (unsigned int)is_signed_type(type)); \
- if (!ret) \
- return 0;
-
-#undef __string
-#define __string(item, src) __dynamic_array(char, item, -1)
-
-#undef __entry
-#define __entry REC
-
-#undef __print_symbolic
-#undef __get_dynamic_array
-#undef __get_str
-
-#undef TP_printk
-#define TP_printk(fmt, args...) "\"%s\", %s\n", fmt, __stringify(args)
-
-#undef TP_fast_assign
-#define TP_fast_assign(args...) args
-
-#undef TP_perf_assign
-#define TP_perf_assign(args...)
-
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
-static int \
-ftrace_format_setup_##call(struct ftrace_event_call *unused, \
- struct trace_seq *s) \
-{ \
- struct ftrace_raw_##call field __attribute__((unused)); \
- int ret = 0; \
- \
- tstruct; \
- \
- return ret; \
-} \
- \
-static int \
-ftrace_format_##call(struct ftrace_event_call *unused, \
- struct trace_seq *s) \
-{ \
- int ret = 0; \
- \
- ret = ftrace_format_setup_##call(unused, s); \
- if (!ret) \
- return ret; \
- \
- ret = trace_seq_printf(s, "\nprint fmt: " print); \
- \
- return ret; \
-}
-
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, name, proto, args)
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
-static int \
-ftrace_format_##name(struct ftrace_event_call *unused, \
- struct trace_seq *s) \
-{ \
- int ret = 0; \
- \
- ret = ftrace_format_setup_##template(unused, s); \
- if (!ret) \
- return ret; \
- \
- trace_seq_printf(s, "\nprint fmt: " print); \
- \
- return ret; \
-}
-
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-
-/*
* Stage 3 of the trace events.
*
* Override the macros in <trace/trace_events.h> to include the following:
@@ -323,7 +200,7 @@ ftrace_format_##name(struct ftrace_event_call *unused, \
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
-static enum print_line_t \
+static notrace enum print_line_t \
ftrace_raw_output_id_##call(int event_id, const char *name, \
struct trace_iterator *iter, int flags) \
{ \
@@ -356,7 +233,7 @@ ftrace_raw_output_id_##call(int event_id, const char *name, \
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args) \
-static enum print_line_t \
+static notrace enum print_line_t \
ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \
{ \
return ftrace_raw_output_id_##template(event_##name.id, \
@@ -365,7 +242,7 @@ ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
-static enum print_line_t \
+static notrace enum print_line_t \
ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
{ \
struct trace_seq *s = &iter->seq; \
@@ -431,7 +308,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
-static int \
+static int notrace \
ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
{ \
struct ftrace_raw_##call field; \
@@ -479,7 +356,7 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
-static inline int ftrace_get_offsets_##call( \
+static inline notrace int ftrace_get_offsets_##call( \
struct ftrace_data_offsets_##call *__data_offsets, proto) \
{ \
int __data_size = 0; \
@@ -499,7 +376,7 @@ static inline int ftrace_get_offsets_##call( \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
/*
* Generate the functions needed for tracepoint perf_event support.
@@ -524,16 +401,18 @@ static inline int ftrace_get_offsets_##call( \
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args) \
\
-static void ftrace_profile_##name(proto); \
+static void perf_trace_##name(proto); \
\
-static int ftrace_profile_enable_##name(struct ftrace_event_call *unused)\
+static notrace int \
+perf_trace_enable_##name(struct ftrace_event_call *unused) \
{ \
- return register_trace_##name(ftrace_profile_##name); \
+ return register_trace_##name(perf_trace_##name); \
} \
\
-static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
+static notrace void \
+perf_trace_disable_##name(struct ftrace_event_call *unused) \
{ \
- unregister_trace_##name(ftrace_profile_##name); \
+ unregister_trace_##name(perf_trace_##name); \
}
#undef DEFINE_EVENT_PRINT
@@ -542,7 +421,7 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-#endif
+#endif /* CONFIG_PERF_EVENTS */
/*
* Stage 4 of the trace events.
@@ -622,20 +501,19 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
* .raw_init = trace_event_raw_init,
* .regfunc = ftrace_reg_event_<call>,
* .unregfunc = ftrace_unreg_event_<call>,
- * .show_format = ftrace_format_<call>,
* }
*
*/
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
-#define _TRACE_PROFILE_INIT(call) \
- .profile_enable = ftrace_profile_enable_##call, \
- .profile_disable = ftrace_profile_disable_##call,
+#define _TRACE_PERF_INIT(call) \
+ .perf_event_enable = perf_trace_enable_##call, \
+ .perf_event_disable = perf_trace_disable_##call,
#else
-#define _TRACE_PROFILE_INIT(call)
-#endif
+#define _TRACE_PERF_INIT(call)
+#endif /* CONFIG_PERF_EVENTS */
#undef __entry
#define __entry entry
@@ -657,10 +535,17 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
#define __assign_str(dst, src) \
strcpy(__get_str(dst), src);
+#undef TP_fast_assign
+#define TP_fast_assign(args...) args
+
+#undef TP_perf_assign
+#define TP_perf_assign(args...)
+
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
\
-static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
+static notrace void \
+ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
proto) \
{ \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
@@ -697,17 +582,19 @@ static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args) \
\
-static void ftrace_raw_event_##call(proto) \
+static notrace void ftrace_raw_event_##call(proto) \
{ \
ftrace_raw_event_id_##template(&event_##call, args); \
} \
\
-static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\
+static notrace int \
+ftrace_raw_reg_event_##call(struct ftrace_event_call *unused) \
{ \
return register_trace_##call(ftrace_raw_event_##call); \
} \
\
-static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\
+static notrace void \
+ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused) \
{ \
unregister_trace_##call(ftrace_raw_event_##call); \
} \
@@ -722,8 +609,20 @@ static struct trace_event ftrace_event_type_##call = { \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#undef __entry
+#define __entry REC
+
+#undef __print_flags
+#undef __print_symbolic
+#undef __get_dynamic_array
+#undef __get_str
+
+#undef TP_printk
+#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
+
#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+static const char print_fmt_##call[] = print;
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args) \
@@ -737,14 +636,16 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
.raw_init = trace_event_raw_init, \
.regfunc = ftrace_raw_reg_event_##call, \
.unregfunc = ftrace_raw_unreg_event_##call, \
- .show_format = ftrace_format_##template, \
+ .print_fmt = print_fmt_##template, \
.define_fields = ftrace_define_fields_##template, \
- _TRACE_PROFILE_INIT(call) \
+ _TRACE_PERF_INIT(call) \
}
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
\
+static const char print_fmt_##call[] = print; \
+ \
static struct ftrace_event_call __used \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_events"))) event_##call = { \
@@ -754,20 +655,20 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
.raw_init = trace_event_raw_init, \
.regfunc = ftrace_raw_reg_event_##call, \
.unregfunc = ftrace_raw_unreg_event_##call, \
- .show_format = ftrace_format_##call, \
+ .print_fmt = print_fmt_##call, \
.define_fields = ftrace_define_fields_##template, \
- _TRACE_PROFILE_INIT(call) \
+ _TRACE_PERF_INIT(call) \
}
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
- * Define the insertion callback to profile events
+ * Define the insertion callback to perf events
*
* The job is very similar to ftrace_raw_event_<call> except that we don't
* insert in the ring buffer but in a perf counter.
*
- * static void ftrace_profile_<call>(proto)
+ * static void ftrace_perf_<call>(proto)
* {
* struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
* struct ftrace_event_call *event_call = &event_<call>;
@@ -798,9 +699,9 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
* __cpu = smp_processor_id();
*
* if (in_nmi())
- * trace_buf = rcu_dereference(perf_trace_buf_nmi);
+ * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
* else
- * trace_buf = rcu_dereference(perf_trace_buf);
+ * trace_buf = rcu_dereference_sched(perf_trace_buf);
*
* if (!trace_buf)
* goto end;
@@ -835,7 +736,17 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
* }
*/
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
+
+#undef __entry
+#define __entry entry
+
+#undef __get_dynamic_array
+#define __get_dynamic_array(field) \
+ ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
+
+#undef __get_str
+#define __get_str(field) (char *)__get_dynamic_array(field)
#undef __perf_addr
#define __perf_addr(a) __addr = (a)
@@ -845,81 +756,49 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
-static void \
-ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \
+static notrace void \
+perf_trace_templ_##call(struct ftrace_event_call *event_call, \
proto) \
{ \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
- extern int perf_swevent_get_recursion_context(void); \
- extern void perf_swevent_put_recursion_context(int rctx); \
- extern void perf_tp_event(int, u64, u64, void *, int); \
struct ftrace_raw_##call *entry; \
u64 __addr = 0, __count = 1; \
unsigned long irq_flags; \
- struct trace_entry *ent; \
+ struct pt_regs *__regs; \
int __entry_size; \
int __data_size; \
- char *trace_buf; \
- char *raw_data; \
- int __cpu; \
int rctx; \
- int pc; \
- \
- pc = preempt_count(); \
\
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
sizeof(u64)); \
__entry_size -= sizeof(u32); \
\
- if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
+ if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
"profile buffer not large enough")) \
return; \
- \
- local_irq_save(irq_flags); \
- \
- rctx = perf_swevent_get_recursion_context(); \
- if (rctx < 0) \
- goto end_recursion; \
- \
- __cpu = smp_processor_id(); \
- \
- if (in_nmi()) \
- trace_buf = rcu_dereference(perf_trace_buf_nmi); \
- else \
- trace_buf = rcu_dereference(perf_trace_buf); \
- \
- if (!trace_buf) \
- goto end; \
- \
- raw_data = per_cpu_ptr(trace_buf, __cpu); \
- \
- *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
- entry = (struct ftrace_raw_##call *)raw_data; \
- ent = &entry->ent; \
- tracing_generic_entry_update(ent, irq_flags, pc); \
- ent->type = event_call->id; \
- \
+ entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
+ __entry_size, event_call->id, &rctx, &irq_flags); \
+ if (!entry) \
+ return; \
tstruct \
\
{ assign; } \
\
- perf_tp_event(event_call->id, __addr, __count, entry, \
- __entry_size); \
+ __regs = &__get_cpu_var(perf_trace_regs); \
+ perf_fetch_caller_regs(__regs, 2); \
\
-end: \
- perf_swevent_put_recursion_context(rctx); \
-end_recursion: \
- local_irq_restore(irq_flags); \
+ perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
+ __count, irq_flags, __regs); \
}
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args) \
-static void ftrace_profile_##call(proto) \
+static notrace void perf_trace_##call(proto) \
{ \
struct ftrace_event_call *event_call = &event_##call; \
\
- ftrace_profile_templ_##template(event_call, args); \
+ perf_trace_templ_##template(event_call, args); \
}
#undef DEFINE_EVENT_PRINT
@@ -927,7 +806,7 @@ static void ftrace_profile_##call(proto) \
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-#endif /* CONFIG_EVENT_PROFILE */
+#endif /* CONFIG_PERF_EVENTS */
#undef _TRACE_PROFILE_INIT
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 961fda3556bb..e5e5f48dbfb3 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -34,10 +34,6 @@ struct syscall_metadata {
extern unsigned long arch_syscall_addr(int nr);
extern int init_syscall_trace(struct ftrace_event_call *call);
-extern int syscall_enter_format(struct ftrace_event_call *call,
- struct trace_seq *s);
-extern int syscall_exit_format(struct ftrace_event_call *call,
- struct trace_seq *s);
extern int syscall_enter_define_fields(struct ftrace_event_call *call);
extern int syscall_exit_define_fields(struct ftrace_event_call *call);
extern int reg_event_syscall_enter(struct ftrace_event_call *call);
@@ -49,12 +45,12 @@ ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s);
enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags);
enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags);
#endif
-#ifdef CONFIG_EVENT_PROFILE
-int prof_sysenter_enable(struct ftrace_event_call *call);
-void prof_sysenter_disable(struct ftrace_event_call *call);
-int prof_sysexit_enable(struct ftrace_event_call *call);
-void prof_sysexit_disable(struct ftrace_event_call *call);
+#ifdef CONFIG_PERF_EVENTS
+int perf_sysenter_enable(struct ftrace_event_call *call);
+void perf_sysenter_disable(struct ftrace_event_call *call);
+int perf_sysexit_enable(struct ftrace_event_call *call);
+void perf_sysexit_disable(struct ftrace_event_call *call);
#endif
#endif /* _TRACE_SYSCALL_H */