diff options
-rw-r--r-- | arch/arc/kernel/unaligned.c | 2 | ||||
-rw-r--r-- | arch/arc/mm/fault.c | 12 |
2 files changed, 12 insertions, 2 deletions
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c index 7ff5b5c183bb..74db59b6f392 100644 --- a/arch/arc/kernel/unaligned.c +++ b/arch/arc/kernel/unaligned.c @@ -12,6 +12,7 @@ */ #include <linux/types.h> +#include <linux/perf_event.h> #include <linux/ptrace.h> #include <linux/uaccess.h> #include <asm/disasm.h> @@ -253,6 +254,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs, } } + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address); return 0; fault: diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 563cb27e37f5..6a2e006cbcce 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -14,6 +14,7 @@ #include <linux/ptrace.h> #include <linux/uaccess.h> #include <linux/kdebug.h> +#include <linux/perf_event.h> #include <asm/pgalloc.h> #include <asm/mmu.h> @@ -139,13 +140,20 @@ good_area: return; } + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + if (likely(!(fault & VM_FAULT_ERROR))) { if (flags & FAULT_FLAG_ALLOW_RETRY) { /* To avoid updating stats twice for retry case */ - if (fault & VM_FAULT_MAJOR) + if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - else + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, + regs, address); + } else { tsk->min_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, + regs, address); + } if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; |