From 67516844625f45f0ce148a01c27bf41f591872b2 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Tue, 9 Jul 2013 18:56:31 +0200 Subject: perf: Remove the 'match' callback for auxiliary events processing It gives the following benefits: - only one function pointer is passed along the way - the 'match' function is called within output function and could be inlined by the compiler Suggested-by: Peter Zijlstra Signed-off-by: Jiri Olsa Cc: Corey Ashford Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1373388991-9711-1-git-send-email-jolsa@redhat.com Signed-off-by: Ingo Molnar --- kernel/events/core.c | 79 ++++++++++++++++++++++++++-------------------------- 1 file changed, 39 insertions(+), 40 deletions(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index eba8fb5834ae..708ab70ca442 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4680,12 +4680,10 @@ perf_event_read_event(struct perf_event *event, perf_output_end(&handle); } -typedef int (perf_event_aux_match_cb)(struct perf_event *event, void *data); typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data); static void perf_event_aux_ctx(struct perf_event_context *ctx, - perf_event_aux_match_cb match, perf_event_aux_output_cb output, void *data) { @@ -4696,15 +4694,12 @@ perf_event_aux_ctx(struct perf_event_context *ctx, continue; if (!event_filter_match(event)) continue; - if (match(event, data)) - output(event, data); + output(event, data); } } static void -perf_event_aux(perf_event_aux_match_cb match, - perf_event_aux_output_cb output, - void *data, +perf_event_aux(perf_event_aux_output_cb output, void *data, struct perf_event_context *task_ctx) { struct perf_cpu_context *cpuctx; @@ -4717,7 +4712,7 @@ perf_event_aux(perf_event_aux_match_cb match, cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); if (cpuctx->unique_pmu != pmu) goto next; - perf_event_aux_ctx(&cpuctx->ctx, match, output, data); + perf_event_aux_ctx(&cpuctx->ctx, output, data); if (task_ctx) goto next; ctxn = pmu->task_ctx_nr; @@ -4725,14 +4720,14 @@ perf_event_aux(perf_event_aux_match_cb match, goto next; ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); if (ctx) - perf_event_aux_ctx(ctx, match, output, data); + perf_event_aux_ctx(ctx, output, data); next: put_cpu_ptr(pmu->pmu_cpu_context); } if (task_ctx) { preempt_disable(); - perf_event_aux_ctx(task_ctx, match, output, data); + perf_event_aux_ctx(task_ctx, output, data); preempt_enable(); } rcu_read_unlock(); @@ -4759,6 +4754,12 @@ struct perf_task_event { } event_id; }; +static int perf_event_task_match(struct perf_event *event) +{ + return event->attr.comm || event->attr.mmap || + event->attr.mmap_data || event->attr.task; +} + static void perf_event_task_output(struct perf_event *event, void *data) { @@ -4768,6 +4769,9 @@ static void perf_event_task_output(struct perf_event *event, struct task_struct *task = task_event->task; int ret, size = task_event->event_id.header.size; + if (!perf_event_task_match(event)) + return; + perf_event_header__init_id(&task_event->event_id.header, &sample, event); ret = perf_output_begin(&handle, event, @@ -4790,13 +4794,6 @@ out: task_event->event_id.header.size = size; } -static int perf_event_task_match(struct perf_event *event, - void *data __maybe_unused) -{ - return event->attr.comm || event->attr.mmap || - event->attr.mmap_data || event->attr.task; -} - static void perf_event_task(struct task_struct *task, struct perf_event_context *task_ctx, int new) @@ -4825,8 +4822,7 @@ static void perf_event_task(struct task_struct *task, }, }; - perf_event_aux(perf_event_task_match, - perf_event_task_output, + perf_event_aux(perf_event_task_output, &task_event, task_ctx); } @@ -4853,6 +4849,11 @@ struct perf_comm_event { } event_id; }; +static int perf_event_comm_match(struct perf_event *event) +{ + return event->attr.comm; +} + static void perf_event_comm_output(struct perf_event *event, void *data) { @@ -4862,6 +4863,9 @@ static void perf_event_comm_output(struct perf_event *event, int size = comm_event->event_id.header.size; int ret; + if (!perf_event_comm_match(event)) + return; + perf_event_header__init_id(&comm_event->event_id.header, &sample, event); ret = perf_output_begin(&handle, event, comm_event->event_id.header.size); @@ -4883,12 +4887,6 @@ out: comm_event->event_id.header.size = size; } -static int perf_event_comm_match(struct perf_event *event, - void *data __maybe_unused) -{ - return event->attr.comm; -} - static void perf_event_comm_event(struct perf_comm_event *comm_event) { char comm[TASK_COMM_LEN]; @@ -4903,8 +4901,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; - perf_event_aux(perf_event_comm_match, - perf_event_comm_output, + perf_event_aux(perf_event_comm_output, comm_event, NULL); } @@ -4967,6 +4964,17 @@ struct perf_mmap_event { } event_id; }; +static int perf_event_mmap_match(struct perf_event *event, + void *data) +{ + struct perf_mmap_event *mmap_event = data; + struct vm_area_struct *vma = mmap_event->vma; + int executable = vma->vm_flags & VM_EXEC; + + return (!executable && event->attr.mmap_data) || + (executable && event->attr.mmap); +} + static void perf_event_mmap_output(struct perf_event *event, void *data) { @@ -4976,6 +4984,9 @@ static void perf_event_mmap_output(struct perf_event *event, int size = mmap_event->event_id.header.size; int ret; + if (!perf_event_mmap_match(event, data)) + return; + perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); ret = perf_output_begin(&handle, event, mmap_event->event_id.header.size); @@ -4996,17 +5007,6 @@ out: mmap_event->event_id.header.size = size; } -static int perf_event_mmap_match(struct perf_event *event, - void *data) -{ - struct perf_mmap_event *mmap_event = data; - struct vm_area_struct *vma = mmap_event->vma; - int executable = vma->vm_flags & VM_EXEC; - - return (!executable && event->attr.mmap_data) || - (executable && event->attr.mmap); -} - static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) { struct vm_area_struct *vma = mmap_event->vma; @@ -5070,8 +5070,7 @@ got_name: mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; - perf_event_aux(perf_event_mmap_match, - perf_event_mmap_output, + perf_event_aux(perf_event_mmap_output, mmap_event, NULL); -- cgit v1.2.3 From fd4363fff3d96795d3feb1b3fb48ce590f186bdd Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Fri, 12 Jul 2013 11:21:48 +0200 Subject: x86: Introduce int3 (breakpoint)-based instruction patching Introduce a method for run-time instruction patching on a live SMP kernel based on int3 breakpoint, completely avoiding the need for stop_machine(). The way this is achieved: - add a int3 trap to the address that will be patched - sync cores - update all but the first byte of the patched range - sync cores - replace the first byte (int3) by the first byte of replacing opcode - sync cores According to http://lkml.indiana.edu/hypermail/linux/kernel/1001.1/01530.html synchronization after replacing "all but first" instructions should not be necessary (on Intel hardware), as the syncing after the subsequent patching of the first byte provides enough safety. But there's not only Intel HW out there, and we'd rather be on a safe side. If any CPU instruction execution would collide with the patching, it'd be trapped by the int3 breakpoint and redirected to the provided "handler" (which would typically mean just skipping over the patched region, acting as "nop" has been there, in case we are doing nop -> jump and jump -> nop transitions). Ftrace has been using this very technique since 08d636b ("ftrace/x86: Have arch x86_64 use breakpoints instead of stop machine") for ages already, and jump labels are another obvious potential user of this. Based on activities of Masami Hiramatsu a few years ago. Reviewed-by: Steven Rostedt Reviewed-by: Masami Hiramatsu Signed-off-by: Jiri Kosina Link: http://lkml.kernel.org/r/alpine.LNX.2.00.1307121102440.29788@pobox.suse.cz Signed-off-by: H. Peter Anvin --- kernel/kprobes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 6e33498d665c..b58b490fa439 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1709,7 +1709,7 @@ EXPORT_SYMBOL_GPL(unregister_kprobes); static struct notifier_block kprobe_exceptions_nb = { .notifier_call = kprobe_exceptions_notify, - .priority = 0x7fffffff /* we need to be notified first */ + .priority = 0x7ffffff0 /* High priority, but not first. */ }; unsigned long __weak arch_deref_entry_point(void *entry) -- cgit v1.2.3 From 17f41571bb2c4a398785452ac2718a6c5d77180e Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Tue, 23 Jul 2013 10:09:28 +0200 Subject: kprobes/x86: Call out into INT3 handler directly instead of using notifier In fd4363fff3d96 ("x86: Introduce int3 (breakpoint)-based instruction patching"), the mechanism that was introduced for notifying alternatives code from int3 exception handler that and exception occured was die_notifier. This is however problematic, as early code might be using jump labels even before the notifier registration has been performed, which will then lead to an oops due to unhandled exception. One of such occurences has been encountered by Fengguang: int3: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC Modules linked in: CPU: 1 PID: 0 Comm: swapper/1 Not tainted 3.11.0-rc1-01429-g04bf576 #8 task: ffff88000da1b040 ti: ffff88000da1c000 task.ti: ffff88000da1c000 RIP: 0010:[] [] ttwu_do_wakeup+0x28/0x225 RSP: 0000:ffff88000dd03f10 EFLAGS: 00000006 RAX: 0000000000000000 RBX: ffff88000dd12940 RCX: ffffffff81769c40 RDX: 0000000000000002 RSI: 0000000000000000 RDI: 0000000000000001 RBP: ffff88000dd03f28 R08: ffffffff8176a8c0 R09: 0000000000000002 R10: ffffffff810ff484 R11: ffff88000dd129e8 R12: ffff88000dbc90c0 R13: ffff88000dbc90c0 R14: ffff88000da1dfd8 R15: ffff88000da1dfd8 FS: 0000000000000000(0000) GS:ffff88000dd00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 00000000ffffffff CR3: 0000000001c88000 CR4: 00000000000006e0 Stack: ffff88000dd12940 ffff88000dbc90c0 ffff88000da1dfd8 ffff88000dd03f48 ffffffff81109e2b ffff88000dd12940 0000000000000000 ffff88000dd03f68 ffffffff81109e9e 0000000000000000 0000000000012940 ffff88000dd03f98 Call Trace: [] ttwu_do_activate.constprop.56+0x6d/0x79 [] sched_ttwu_pending+0x67/0x84 [] scheduler_ipi+0x15a/0x2b0 [] smp_reschedule_interrupt+0x38/0x41 [] reschedule_interrupt+0x6d/0x80 [] ? __atomic_notifier_call_chain+0x5/0xc1 [] ? native_safe_halt+0xd/0x16 [] default_idle+0x147/0x282 [] arch_cpu_idle+0x3d/0x5d [] cpu_idle_loop+0x46d/0x5db [] cpu_startup_entry+0x84/0x84 [] start_secondary+0x3c8/0x3d5 [...] Fix this by directly calling poke_int3_handler() from the int3 exception handler (analogically to what ftrace has been doing already), instead of relying on notifier, registration of which might not have yet been finalized by the time of the first trap. Reported-and-tested-by: Fengguang Wu Signed-off-by: Jiri Kosina Acked-by: Masami Hiramatsu Cc: H. Peter Anvin Cc: Fengguang Wu Cc: Steven Rostedt Link: http://lkml.kernel.org/r/alpine.LNX.2.00.1307231007490.14024@pobox.suse.cz Signed-off-by: Ingo Molnar --- kernel/kprobes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/kprobes.c b/kernel/kprobes.c index b58b490fa439..6e33498d665c 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1709,7 +1709,7 @@ EXPORT_SYMBOL_GPL(unregister_kprobes); static struct notifier_block kprobe_exceptions_nb = { .notifier_call = kprobe_exceptions_notify, - .priority = 0x7ffffff0 /* High priority, but not first. */ + .priority = 0x7fffffff /* we need to be notified first */ }; unsigned long __weak arch_deref_entry_point(void *entry) -- cgit v1.2.3 From a5cdd40c9877e9aba704c020fd65d26b5cfecf18 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 16 Jul 2013 17:09:07 +0200 Subject: perf: Update perf_event_type documentation Due to a discussion with Adrian I had a good look at the perf_event_type record layout and found the documentation to be somewhat unclear. Cc: Adrian Hunter Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20130716150907.GL23818@dyad.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- kernel/events/core.c | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index 5e2bce90b477..127411400116 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4462,20 +4462,6 @@ void perf_output_sample(struct perf_output_handle *handle, } } - if (!event->attr.watermark) { - int wakeup_events = event->attr.wakeup_events; - - if (wakeup_events) { - struct ring_buffer *rb = handle->rb; - int events = local_inc_return(&rb->events); - - if (events >= wakeup_events) { - local_sub(wakeup_events, &rb->events); - local_inc(&rb->wakeup); - } - } - } - if (sample_type & PERF_SAMPLE_BRANCH_STACK) { if (data->br_stack) { size_t size; @@ -4511,16 +4497,31 @@ void perf_output_sample(struct perf_output_handle *handle, } } - if (sample_type & PERF_SAMPLE_STACK_USER) + if (sample_type & PERF_SAMPLE_STACK_USER) { perf_output_sample_ustack(handle, data->stack_user_size, data->regs_user.regs); + } if (sample_type & PERF_SAMPLE_WEIGHT) perf_output_put(handle, data->weight); if (sample_type & PERF_SAMPLE_DATA_SRC) perf_output_put(handle, data->data_src.val); + + if (!event->attr.watermark) { + int wakeup_events = event->attr.wakeup_events; + + if (wakeup_events) { + struct ring_buffer *rb = handle->rb; + int events = local_inc_return(&rb->events); + + if (events >= wakeup_events) { + local_sub(wakeup_events, &rb->events); + local_inc(&rb->wakeup); + } + } + } } void perf_prepare_sample(struct perf_event_header *header, -- cgit v1.2.3 From 685207963be973fbb73550db6edaf920a283e1a7 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Mon, 15 Jul 2013 17:49:19 +0400 Subject: sched: Move h_load calculation to task_h_load() The bad thing about update_h_load(), which computes hierarchical load factor for task groups, is that it is called for each task group in the system before every load balancer run, and since rebalance can be triggered very often, this function can eat really a lot of cpu time if there are many cpu cgroups in the system. Although the situation was improved significantly by commit a35b646 ('sched, cgroup: Reduce rq->lock hold times for large cgroup hierarchies'), the problem still can arise under some kinds of loads, e.g. when cpus are switching from idle to busy and back very frequently. For instance, when I start 1000 of processes that wake up every millisecond on my 8 cpus host, 'top' and 'perf top' show: Cpu(s): 17.8%us, 24.3%sy, 0.0%ni, 57.9%id, 0.0%wa, 0.0%hi, 0.0%si Events: 243K cycles 7.57% [kernel] [k] __schedule 7.08% [kernel] [k] timerqueue_add 6.13% libc-2.12.so [.] usleep Then if I create 10000 *idle* cpu cgroups (no processes in them), cpu usage increases significantly although the 'wakers' are still executing in the root cpu cgroup: Cpu(s): 19.1%us, 48.7%sy, 0.0%ni, 31.6%id, 0.0%wa, 0.0%hi, 0.7%si Events: 230K cycles 24.56% [kernel] [k] tg_load_down 5.76% [kernel] [k] __schedule This happens because this particular kind of load triggers 'new idle' rebalance very frequently, which requires calling update_h_load(), which, in turn, calls tg_load_down() for every *idle* cpu cgroup even though it is absolutely useless, because idle cpu cgroups have no tasks to pull. This patch tries to improve the situation by making h_load calculation proceed only when h_load is really necessary. To achieve this, it substitutes update_h_load() with update_cfs_rq_h_load(), which computes h_load only for a given cfs_rq and all its ascendants, and makes the load balancer call this function whenever it considers if a task should be pulled, i.e. it moves h_load calculations directly to task_h_load(). For h_load of the same cfs_rq not to be updated multiple times (in case several tasks in the same cgroup are considered during the same balance run), the patch keeps the time of the last h_load update for each cfs_rq and breaks calculation when it finds h_load to be uptodate. The benefit of it is that h_load is computed only for those cfs_rq's, which really need it, in particular all idle task groups are skipped. Although this, in fact, moves h_load calculation under rq lock, it should not affect latency much, because the amount of work done under rq lock while trying to pull tasks is limited by sched_nr_migrate. After the patch applied with the setup described above (1000 wakers in the root cgroup and 10000 idle cgroups), I get: Cpu(s): 16.9%us, 24.8%sy, 0.0%ni, 58.4%id, 0.0%wa, 0.0%hi, 0.0%si Events: 242K cycles 7.57% [kernel] [k] __schedule 6.70% [kernel] [k] timerqueue_add 5.93% libc-2.12.so [.] usleep Signed-off-by: Vladimir Davydov Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1373896159-1278-1-git-send-email-vdavydov@parallels.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 58 ++++++++++++++++++++++++---------------------------- kernel/sched/sched.h | 7 +++---- 2 files changed, 30 insertions(+), 35 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index bb456f44b7b1..765d87acdf05 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4171,47 +4171,48 @@ static void update_blocked_averages(int cpu) } /* - * Compute the cpu's hierarchical load factor for each task group. + * Compute the hierarchical load factor for cfs_rq and all its ascendants. * This needs to be done in a top-down fashion because the load of a child * group is a fraction of its parents load. */ -static int tg_load_down(struct task_group *tg, void *data) +static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) { - unsigned long load; - long cpu = (long)data; - - if (!tg->parent) { - load = cpu_rq(cpu)->avg.load_avg_contrib; - } else { - load = tg->parent->cfs_rq[cpu]->h_load; - load = div64_ul(load * tg->se[cpu]->avg.load_avg_contrib, - tg->parent->cfs_rq[cpu]->runnable_load_avg + 1); - } - - tg->cfs_rq[cpu]->h_load = load; - - return 0; -} - -static void update_h_load(long cpu) -{ - struct rq *rq = cpu_rq(cpu); + struct rq *rq = rq_of(cfs_rq); + struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; unsigned long now = jiffies; + unsigned long load; - if (rq->h_load_throttle == now) + if (cfs_rq->last_h_load_update == now) return; - rq->h_load_throttle = now; + cfs_rq->h_load_next = NULL; + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); + cfs_rq->h_load_next = se; + if (cfs_rq->last_h_load_update == now) + break; + } - rcu_read_lock(); - walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); - rcu_read_unlock(); + if (!se) { + cfs_rq->h_load = rq->avg.load_avg_contrib; + cfs_rq->last_h_load_update = now; + } + + while ((se = cfs_rq->h_load_next) != NULL) { + load = cfs_rq->h_load; + load = div64_ul(load * se->avg.load_avg_contrib, + cfs_rq->runnable_load_avg + 1); + cfs_rq = group_cfs_rq(se); + cfs_rq->h_load = load; + cfs_rq->last_h_load_update = now; + } } static unsigned long task_h_load(struct task_struct *p) { struct cfs_rq *cfs_rq = task_cfs_rq(p); + update_cfs_rq_h_load(cfs_rq); return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load, cfs_rq->runnable_load_avg + 1); } @@ -4220,10 +4221,6 @@ static inline void update_blocked_averages(int cpu) { } -static inline void update_h_load(long cpu) -{ -} - static unsigned long task_h_load(struct task_struct *p) { return p->se.avg.load_avg_contrib; @@ -5108,7 +5105,6 @@ redo: env.src_rq = busiest; env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); - update_h_load(env.src_cpu); more_balance: local_irq_save(flags); double_rq_lock(env.dst_rq, busiest); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index ef0a7b2439dd..5e129efb84ce 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -285,7 +285,6 @@ struct cfs_rq { /* Required to track per-cpu representation of a task_group */ u32 tg_runnable_contrib; unsigned long tg_load_contrib; -#endif /* CONFIG_FAIR_GROUP_SCHED */ /* * h_load = weight * f(tg) @@ -294,6 +293,9 @@ struct cfs_rq { * this group. */ unsigned long h_load; + u64 last_h_load_update; + struct sched_entity *h_load_next; +#endif /* CONFIG_FAIR_GROUP_SCHED */ #endif /* CONFIG_SMP */ #ifdef CONFIG_FAIR_GROUP_SCHED @@ -429,9 +431,6 @@ struct rq { #ifdef CONFIG_FAIR_GROUP_SCHED /* list of leaf cfs_rq on this cpu: */ struct list_head leaf_cfs_rq_list; -#ifdef CONFIG_SMP - unsigned long h_load_throttle; -#endif /* CONFIG_SMP */ #endif /* CONFIG_FAIR_GROUP_SCHED */ #ifdef CONFIG_RT_GROUP_SCHED -- cgit v1.2.3 From 62470419e993f8d9d93db0effd3af4296ecb79a5 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Thu, 4 Jul 2013 12:55:51 +0800 Subject: sched: Implement smarter wake-affine logic The wake-affine scheduler feature is currently always trying to pull the wakee close to the waker. In theory this should be beneficial if the waker's CPU caches hot data for the wakee, and it's also beneficial in the extreme ping-pong high context switch rate case. Testing shows it can benefit hackbench up to 15%. However, the feature is somewhat blind, from which some workloads such as pgbench suffer. It's also time-consuming algorithmically. Testing shows it can damage pgbench up to 50% - far more than the benefit it brings in the best case. So wake-affine should be smarter and it should realize when to stop its thankless effort at trying to find a suitable CPU to wake on. This patch introduces 'wakee_flips', which will be increased each time the task flips (switches) its wakee target. So a high 'wakee_flips' value means the task has more than one wakee, and the bigger the number, the higher the wakeup frequency. Now when making the decision on whether to pull or not, pay attention to the wakee with a high 'wakee_flips', pulling such a task may benefit the wakee. Also imply that the waker will face cruel competition later, it could be very cruel or very fast depends on the story behind 'wakee_flips', waker therefore suffers. Furthermore, if waker also has a high 'wakee_flips', that implies that multiple tasks rely on it, then waker's higher latency will damage all of them, so pulling wakee seems to be a bad deal. Thus, when 'waker->wakee_flips / wakee->wakee_flips' becomes higher and higher, the cost of pulling seems to be worse and worse. The patch therefore helps the wake-affine feature to stop its pulling work when: wakee->wakee_flips > factor && waker->wakee_flips > (factor * wakee->wakee_flips) The 'factor' here is the number of CPUs in the current CPU's NUMA node, so a bigger node will lead to more pulling since the trial becomes more severe. After applying the patch, pgbench shows up to 40% improvements and no regressions. Tested with 12 cpu x86 server and tip 3.10.0-rc7. The percentages in the final column highlight the areas with the biggest wins, all other areas improved as well: pgbench base smart | db_size | clients | tps | | tps | +---------+---------+-------+ +-------+ | 22 MB | 1 | 10598 | | 10796 | | 22 MB | 2 | 21257 | | 21336 | | 22 MB | 4 | 41386 | | 41622 | | 22 MB | 8 | 51253 | | 57932 | | 22 MB | 12 | 48570 | | 54000 | | 22 MB | 16 | 46748 | | 55982 | +19.75% | 22 MB | 24 | 44346 | | 55847 | +25.93% | 22 MB | 32 | 43460 | | 54614 | +25.66% | 7484 MB | 1 | 8951 | | 9193 | | 7484 MB | 2 | 19233 | | 19240 | | 7484 MB | 4 | 37239 | | 37302 | | 7484 MB | 8 | 46087 | | 50018 | | 7484 MB | 12 | 42054 | | 48763 | | 7484 MB | 16 | 40765 | | 51633 | +26.66% | 7484 MB | 24 | 37651 | | 52377 | +39.11% | 7484 MB | 32 | 37056 | | 51108 | +37.92% | 15 GB | 1 | 8845 | | 9104 | | 15 GB | 2 | 19094 | | 19162 | | 15 GB | 4 | 36979 | | 36983 | | 15 GB | 8 | 46087 | | 49977 | | 15 GB | 12 | 41901 | | 48591 | | 15 GB | 16 | 40147 | | 50651 | +26.16% | 15 GB | 24 | 37250 | | 52365 | +40.58% | 15 GB | 32 | 36470 | | 50015 | +37.14% Signed-off-by: Michael Wang Cc: Mike Galbraith Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/51D50057.9000809@linux.vnet.ibm.com [ Improved the changelog. ] Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 765d87acdf05..860063a8c849 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3017,6 +3017,23 @@ static unsigned long cpu_avg_load_per_task(int cpu) return 0; } +static void record_wakee(struct task_struct *p) +{ + /* + * Rough decay (wiping) for cost saving, don't worry + * about the boundary, really active task won't care + * about the loss. + */ + if (jiffies > current->wakee_flip_decay_ts + HZ) { + current->wakee_flips = 0; + current->wakee_flip_decay_ts = jiffies; + } + + if (current->last_wakee != p) { + current->last_wakee = p; + current->wakee_flips++; + } +} static void task_waking_fair(struct task_struct *p) { @@ -3037,6 +3054,7 @@ static void task_waking_fair(struct task_struct *p) #endif se->vruntime -= min_vruntime; + record_wakee(p); } #ifdef CONFIG_FAIR_GROUP_SCHED @@ -3155,6 +3173,28 @@ static inline unsigned long effective_load(struct task_group *tg, int cpu, #endif +static int wake_wide(struct task_struct *p) +{ + int factor = nr_cpus_node(cpu_to_node(smp_processor_id())); + + /* + * Yeah, it's the switching-frequency, could means many wakee or + * rapidly switch, use factor here will just help to automatically + * adjust the loose-degree, so bigger node will lead to more pull. + */ + if (p->wakee_flips > factor) { + /* + * wakee is somewhat hot, it needs certain amount of cpu + * resource, so if waker is far more hot, prefer to leave + * it alone. + */ + if (current->wakee_flips > (factor * p->wakee_flips)) + return 1; + } + + return 0; +} + static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) { s64 this_load, load; @@ -3164,6 +3204,13 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) unsigned long weight; int balanced; + /* + * If we wake multiple tasks be careful to not bounce + * ourselves around too much. + */ + if (wake_wide(p)) + return 0; + idx = sd->wake_idx; this_cpu = smp_processor_id(); prev_cpu = task_cpu(p); -- cgit v1.2.3 From 7d9ffa8961482232d964173cccba6e14d2d543b2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 4 Jul 2013 12:56:46 +0800 Subject: sched: Micro-optimize the smart wake-affine logic Smart wake-affine is using node-size as the factor currently, but the overhead of the mask operation is high. Thus, this patch introduce the 'sd_llc_size' percpu variable, which will record the highest cache-share domain size, and make it to be the new factor, in order to reduce the overhead and make it more reasonable. Tested-by: Davidlohr Bueso Tested-by: Michael Wang Signed-off-by: Peter Zijlstra Acked-by: Michael Wang Cc: Mike Galbraith Link: http://lkml.kernel.org/r/51D5008E.6030102@linux.vnet.ibm.com [ Tidied up the changelog. ] Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 7 ++++++- kernel/sched/fair.c | 2 +- kernel/sched/sched.h | 1 + 3 files changed, 8 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b7c32cb7bfeb..6df0fbe53767 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5083,18 +5083,23 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu) * two cpus are in the same cache domain, see cpus_share_cache(). */ DEFINE_PER_CPU(struct sched_domain *, sd_llc); +DEFINE_PER_CPU(int, sd_llc_size); DEFINE_PER_CPU(int, sd_llc_id); static void update_top_cache_domain(int cpu) { struct sched_domain *sd; int id = cpu; + int size = 1; sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); - if (sd) + if (sd) { id = cpumask_first(sched_domain_span(sd)); + size = cpumask_weight(sched_domain_span(sd)); + } rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); + per_cpu(sd_llc_size, cpu) = size; per_cpu(sd_llc_id, cpu) = id; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 860063a8c849..f237437446e5 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3175,7 +3175,7 @@ static inline unsigned long effective_load(struct task_group *tg, int cpu, static int wake_wide(struct task_struct *p) { - int factor = nr_cpus_node(cpu_to_node(smp_processor_id())); + int factor = this_cpu_read(sd_llc_size); /* * Yeah, it's the switching-frequency, could means many wakee or diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 5e129efb84ce..4c1cb8029feb 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -594,6 +594,7 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag) } DECLARE_PER_CPU(struct sched_domain *, sd_llc); +DECLARE_PER_CPU(int, sd_llc_size); DECLARE_PER_CPU(int, sd_llc_id); struct sched_group_power { -- cgit v1.2.3 From 6050cb0b0b366092d1383bc23d7b16cd26db00f0 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 23 Jul 2013 02:30:59 +0200 Subject: perf: Fix branch stack refcount leak on callchain init failure On callchain buffers allocation failure, free_event() is called and all the accounting performed in perf_event_alloc() for that event is cancelled. But if the event has branch stack sampling, it is unaccounted as well from the branch stack sampling events refcounts. This is a bug because this accounting is performed after the callchain buffer allocation. As a result, the branch stack sampling events refcount can become negative. To fix this, move the branch stack event accounting before the callchain buffer allocation. Reported-by: Peter Zijlstra Signed-off-by: Frederic Weisbecker Cc: Jiri Olsa Cc: Namhyung Kim Cc: Arnaldo Carvalho de Melo Cc: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1374539466-4799-2-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- kernel/events/core.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index 127411400116..f35aa7e69e2d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6567,6 +6567,12 @@ done: atomic_inc(&nr_comm_events); if (event->attr.task) atomic_inc(&nr_task_events); + if (has_branch_stack(event)) { + static_key_slow_inc(&perf_sched_events.key); + if (!(event->attach_state & PERF_ATTACH_TASK)) + atomic_inc(&per_cpu(perf_branch_stack_events, + event->cpu)); + } if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { err = get_callchain_buffers(); if (err) { @@ -6574,12 +6580,6 @@ done: return ERR_PTR(err); } } - if (has_branch_stack(event)) { - static_key_slow_inc(&perf_sched_events.key); - if (!(event->attach_state & PERF_ATTACH_TASK)) - atomic_inc(&per_cpu(perf_branch_stack_events, - event->cpu)); - } } return event; -- cgit v1.2.3 From 90983b16078ab0fdc58f0dab3e8e3da79c9579a2 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 23 Jul 2013 02:31:00 +0200 Subject: perf: Sanitize get_callchain_buffer() In case of allocation failure, get_callchain_buffer() keeps the refcount incremented for the current event. As a result, when get_callchain_buffers() returns an error, we must cleanup what it did by cancelling its last refcount with a call to put_callchain_buffers(). This is a hack in order to be able to call free_event() after that failure. The original purpose of that was to simplify the failure path. But this error handling is actually counter intuitive, ugly and not very easy to follow because one expect to see the resources used to perform a service to be cleaned by the callee if case of failure, not by the caller. So lets clean this up by cancelling the refcount from get_callchain_buffer() in case of failure. And correctly free the event accordingly in perf_event_alloc(). Signed-off-by: Frederic Weisbecker Cc: Jiri Olsa Cc: Namhyung Kim Cc: Arnaldo Carvalho de Melo Cc: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1374539466-4799-3-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- kernel/events/callchain.c | 2 ++ kernel/events/core.c | 41 +++++++++++++++++++++-------------------- 2 files changed, 23 insertions(+), 20 deletions(-) (limited to 'kernel') diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index c77206184b8b..76a8bc5f6265 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -117,6 +117,8 @@ int get_callchain_buffers(void) err = alloc_callchain_buffers(); exit: mutex_unlock(&callchain_mutex); + if (err) + atomic_dec(&nr_callchain_events); return err; } diff --git a/kernel/events/core.c b/kernel/events/core.c index f35aa7e69e2d..3b998626b7a0 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6457,7 +6457,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, struct pmu *pmu; struct perf_event *event; struct hw_perf_event *hwc; - long err; + long err = -EINVAL; if ((unsigned)cpu >= nr_cpu_ids) { if (!task || cpu != -1) @@ -6540,25 +6540,23 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, * we currently do not support PERF_FORMAT_GROUP on inherited events */ if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) - goto done; + goto err_ns; pmu = perf_init_event(event); - -done: - err = 0; if (!pmu) - err = -EINVAL; - else if (IS_ERR(pmu)) + goto err_ns; + else if (IS_ERR(pmu)) { err = PTR_ERR(pmu); - - if (err) { - if (event->ns) - put_pid_ns(event->ns); - kfree(event); - return ERR_PTR(err); + goto err_ns; } if (!event->parent) { + if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { + err = get_callchain_buffers(); + if (err) + goto err_pmu; + } + if (event->attach_state & PERF_ATTACH_TASK) static_key_slow_inc(&perf_sched_events.key); if (event->attr.mmap || event->attr.mmap_data) @@ -6573,16 +6571,19 @@ done: atomic_inc(&per_cpu(perf_branch_stack_events, event->cpu)); } - if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { - err = get_callchain_buffers(); - if (err) { - free_event(event); - return ERR_PTR(err); - } - } } return event; + +err_pmu: + if (event->destroy) + event->destroy(event); +err_ns: + if (event->ns) + put_pid_ns(event->ns); + kfree(event); + + return ERR_PTR(err); } static int perf_copy_attr(struct perf_event_attr __user *uattr, -- cgit v1.2.3 From 766d6c076928191d75ad5b0d0f58f52b1e7682d8 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 23 Jul 2013 02:31:01 +0200 Subject: perf: Factor out event accounting code to account_event()/__free_event() Gather all the event accounting code to a single place, once all the prerequisites are completed. This simplifies the refcounting. Original-patch-by: Peter Zijlstra Signed-off-by: Frederic Weisbecker Cc: Jiri Olsa Cc: Namhyung Kim Cc: Arnaldo Carvalho de Melo Cc: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1374539466-4799-4-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- kernel/events/core.c | 79 +++++++++++++++++++++++++++++++--------------------- 1 file changed, 47 insertions(+), 32 deletions(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index 3b998626b7a0..158fd5789e58 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3128,6 +3128,21 @@ static void free_event_rcu(struct rcu_head *head) static void ring_buffer_put(struct ring_buffer *rb); static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb); +static void __free_event(struct perf_event *event) +{ + if (!event->parent) { + if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) + put_callchain_buffers(); + } + + if (event->destroy) + event->destroy(event); + + if (event->ctx) + put_ctx(event->ctx); + + call_rcu(&event->rcu_head, free_event_rcu); +} static void free_event(struct perf_event *event) { irq_work_sync(&event->pending); @@ -3141,8 +3156,6 @@ static void free_event(struct perf_event *event) atomic_dec(&nr_comm_events); if (event->attr.task) atomic_dec(&nr_task_events); - if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) - put_callchain_buffers(); if (is_cgroup_event(event)) { atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); static_key_slow_dec_deferred(&perf_sched_events); @@ -3180,13 +3193,8 @@ static void free_event(struct perf_event *event) if (is_cgroup_event(event)) perf_detach_cgroup(event); - if (event->destroy) - event->destroy(event); - - if (event->ctx) - put_ctx(event->ctx); - call_rcu(&event->rcu_head, free_event_rcu); + __free_event(event); } int perf_event_release_kernel(struct perf_event *event) @@ -6443,6 +6451,29 @@ unlock: return pmu; } +static void account_event(struct perf_event *event) +{ + if (event->attach_state & PERF_ATTACH_TASK) + static_key_slow_inc(&perf_sched_events.key); + if (event->attr.mmap || event->attr.mmap_data) + atomic_inc(&nr_mmap_events); + if (event->attr.comm) + atomic_inc(&nr_comm_events); + if (event->attr.task) + atomic_inc(&nr_task_events); + if (has_branch_stack(event)) { + static_key_slow_inc(&perf_sched_events.key); + if (!(event->attach_state & PERF_ATTACH_TASK)) + atomic_inc(&per_cpu(perf_branch_stack_events, + event->cpu)); + } + + if (is_cgroup_event(event)) { + atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); + static_key_slow_inc(&perf_sched_events.key); + } +} + /* * Allocate and initialize a event structure */ @@ -6556,21 +6587,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, if (err) goto err_pmu; } - - if (event->attach_state & PERF_ATTACH_TASK) - static_key_slow_inc(&perf_sched_events.key); - if (event->attr.mmap || event->attr.mmap_data) - atomic_inc(&nr_mmap_events); - if (event->attr.comm) - atomic_inc(&nr_comm_events); - if (event->attr.task) - atomic_inc(&nr_task_events); - if (has_branch_stack(event)) { - static_key_slow_inc(&perf_sched_events.key); - if (!(event->attach_state & PERF_ATTACH_TASK)) - atomic_inc(&per_cpu(perf_branch_stack_events, - event->cpu)); - } } return event; @@ -6865,17 +6881,14 @@ SYSCALL_DEFINE5(perf_event_open, if (flags & PERF_FLAG_PID_CGROUP) { err = perf_cgroup_connect(pid, event, &attr, group_leader); - if (err) - goto err_alloc; - /* - * one more event: - * - that has cgroup constraint on event->cpu - * - that may need work on context switch - */ - atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); - static_key_slow_inc(&perf_sched_events.key); + if (err) { + __free_event(event); + goto err_task; + } } + account_event(event); + /* * Special case software events and allow them to be part of * any hardware group. @@ -7071,6 +7084,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, goto err; } + account_event(event); + ctx = find_get_context(event->pmu, task, cpu); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); -- cgit v1.2.3 From 4beb31f3657348a8b702dd014d01c520e522012f Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 23 Jul 2013 02:31:02 +0200 Subject: perf: Split the per-cpu accounting part of the event accounting code This way we can use the per-cpu handling seperately. This is going to be used by to fix the event migration code accounting. Original-patch-by: Peter Zijlstra Signed-off-by: Frederic Weisbecker Cc: Jiri Olsa Cc: Namhyung Kim Cc: Arnaldo Carvalho de Melo Cc: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1374539466-4799-5-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- kernel/events/core.c | 87 +++++++++++++++++++++++++++++++++------------------- 1 file changed, 55 insertions(+), 32 deletions(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index 158fd5789e58..3a4b73aebc42 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3128,6 +3128,40 @@ static void free_event_rcu(struct rcu_head *head) static void ring_buffer_put(struct ring_buffer *rb); static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb); +static void unaccount_event_cpu(struct perf_event *event, int cpu) +{ + if (event->parent) + return; + + if (has_branch_stack(event)) { + if (!(event->attach_state & PERF_ATTACH_TASK)) + atomic_dec(&per_cpu(perf_branch_stack_events, cpu)); + } + if (is_cgroup_event(event)) + atomic_dec(&per_cpu(perf_cgroup_events, cpu)); +} + +static void unaccount_event(struct perf_event *event) +{ + if (event->parent) + return; + + if (event->attach_state & PERF_ATTACH_TASK) + static_key_slow_dec_deferred(&perf_sched_events); + if (event->attr.mmap || event->attr.mmap_data) + atomic_dec(&nr_mmap_events); + if (event->attr.comm) + atomic_dec(&nr_comm_events); + if (event->attr.task) + atomic_dec(&nr_task_events); + if (is_cgroup_event(event)) + static_key_slow_dec_deferred(&perf_sched_events); + if (has_branch_stack(event)) + static_key_slow_dec_deferred(&perf_sched_events); + + unaccount_event_cpu(event, event->cpu); +} + static void __free_event(struct perf_event *event) { if (!event->parent) { @@ -3147,29 +3181,7 @@ static void free_event(struct perf_event *event) { irq_work_sync(&event->pending); - if (!event->parent) { - if (event->attach_state & PERF_ATTACH_TASK) - static_key_slow_dec_deferred(&perf_sched_events); - if (event->attr.mmap || event->attr.mmap_data) - atomic_dec(&nr_mmap_events); - if (event->attr.comm) - atomic_dec(&nr_comm_events); - if (event->attr.task) - atomic_dec(&nr_task_events); - if (is_cgroup_event(event)) { - atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); - static_key_slow_dec_deferred(&perf_sched_events); - } - - if (has_branch_stack(event)) { - static_key_slow_dec_deferred(&perf_sched_events); - /* is system-wide event */ - if (!(event->attach_state & PERF_ATTACH_TASK)) { - atomic_dec(&per_cpu(perf_branch_stack_events, - event->cpu)); - } - } - } + unaccount_event(event); if (event->rb) { struct ring_buffer *rb; @@ -6451,8 +6463,24 @@ unlock: return pmu; } +static void account_event_cpu(struct perf_event *event, int cpu) +{ + if (event->parent) + return; + + if (has_branch_stack(event)) { + if (!(event->attach_state & PERF_ATTACH_TASK)) + atomic_inc(&per_cpu(perf_branch_stack_events, cpu)); + } + if (is_cgroup_event(event)) + atomic_inc(&per_cpu(perf_cgroup_events, cpu)); +} + static void account_event(struct perf_event *event) { + if (event->parent) + return; + if (event->attach_state & PERF_ATTACH_TASK) static_key_slow_inc(&perf_sched_events.key); if (event->attr.mmap || event->attr.mmap_data) @@ -6461,17 +6489,12 @@ static void account_event(struct perf_event *event) atomic_inc(&nr_comm_events); if (event->attr.task) atomic_inc(&nr_task_events); - if (has_branch_stack(event)) { + if (has_branch_stack(event)) static_key_slow_inc(&perf_sched_events.key); - if (!(event->attach_state & PERF_ATTACH_TASK)) - atomic_inc(&per_cpu(perf_branch_stack_events, - event->cpu)); - } - - if (is_cgroup_event(event)) { - atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); + if (is_cgroup_event(event)) static_key_slow_inc(&perf_sched_events.key); - } + + account_event_cpu(event, event->cpu); } /* -- cgit v1.2.3 From 9a545de019b536771feefb76f85e5038b65c2190 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 23 Jul 2013 02:31:03 +0200 Subject: perf: Migrate per cpu event accounting When an event is migrated, move the event per-cpu accounting accordingly so that branch stack and cgroup events work correctly on the new CPU. Original-patch-by: Peter Zijlstra Signed-off-by: Frederic Weisbecker Cc: Jiri Olsa Cc: Namhyung Kim Cc: Arnaldo Carvalho de Melo Cc: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1374539466-4799-6-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- kernel/events/core.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index 3a4b73aebc42..63bdec9fdd21 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7145,6 +7145,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) list_for_each_entry_safe(event, tmp, &src_ctx->event_list, event_entry) { perf_remove_from_context(event); + unaccount_event_cpu(event, src_cpu); put_ctx(src_ctx); list_add(&event->event_entry, &events); } @@ -7157,6 +7158,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) list_del(&event->event_entry); if (event->state >= PERF_EVENT_STATE_OFF) event->state = PERF_EVENT_STATE_INACTIVE; + account_event_cpu(event, dst_cpu); perf_install_in_context(dst_ctx, event, dst_cpu); get_ctx(dst_ctx); } -- cgit v1.2.3 From ba8a75c16e292c0a3a87406a77508cbbc6cf4ee2 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 23 Jul 2013 02:31:04 +0200 Subject: perf: Account freq events per cpu This is going to be used by the full dynticks subsystem as a finer-grained information to know when to keep and when to stop the tick. Original-patch-by: Peter Zijlstra Signed-off-by: Frederic Weisbecker Cc: Jiri Olsa Cc: Namhyung Kim Cc: Arnaldo Carvalho de Melo Cc: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1374539466-4799-7-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- kernel/events/core.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index 63bdec9fdd21..3fe385aa93e6 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -141,6 +141,7 @@ enum event_type_t { struct static_key_deferred perf_sched_events __read_mostly; static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events); +static DEFINE_PER_CPU(atomic_t, perf_freq_events); static atomic_t nr_mmap_events __read_mostly; static atomic_t nr_comm_events __read_mostly; @@ -3139,6 +3140,9 @@ static void unaccount_event_cpu(struct perf_event *event, int cpu) } if (is_cgroup_event(event)) atomic_dec(&per_cpu(perf_cgroup_events, cpu)); + + if (event->attr.freq) + atomic_dec(&per_cpu(perf_freq_events, cpu)); } static void unaccount_event(struct perf_event *event) @@ -6474,6 +6478,9 @@ static void account_event_cpu(struct perf_event *event, int cpu) } if (is_cgroup_event(event)) atomic_inc(&per_cpu(perf_cgroup_events, cpu)); + + if (event->attr.freq) + atomic_inc(&per_cpu(perf_freq_events, cpu)); } static void account_event(struct perf_event *event) -- cgit v1.2.3 From d84153d6c96f61aa06429586284639f32debf03e Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 23 Jul 2013 02:31:05 +0200 Subject: perf: Implement finer grained full dynticks kick Currently the full dynticks subsystem keep the tick alive as long as there are perf events running. This prevents the tick from being stopped as long as features such that the lockup detectors are running. As a temporary fix, the lockup detector is disabled by default when full dynticks is built but this is not a long term viable solution. To fix this, only keep the tick alive when an event configured with a frequency rather than a period is running on the CPU, or when an event throttles on the CPU. These are the only purposes of the perf tick, especially now that the rotation of flexible events is handled from a seperate hrtimer. The tick can be shutdown the rest of the time. Original-patch-by: Peter Zijlstra Signed-off-by: Frederic Weisbecker Cc: Jiri Olsa Cc: Namhyung Kim Cc: Arnaldo Carvalho de Melo Cc: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1374539466-4799-8-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- kernel/events/core.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index 3fe385aa93e6..916cf1f593b4 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -870,12 +870,8 @@ static void perf_pmu_rotate_start(struct pmu *pmu) WARN_ON(!irqs_disabled()); - if (list_empty(&cpuctx->rotation_list)) { - int was_empty = list_empty(head); + if (list_empty(&cpuctx->rotation_list)) list_add(&cpuctx->rotation_list, head); - if (was_empty) - tick_nohz_full_kick(); - } } static void get_ctx(struct perf_event_context *ctx) @@ -1875,6 +1871,9 @@ static int __perf_install_in_context(void *info) perf_pmu_enable(cpuctx->ctx.pmu); perf_ctx_unlock(cpuctx, task_ctx); + if (atomic_read(&__get_cpu_var(perf_freq_events))) + tick_nohz_full_kick(); + return 0; } @@ -2812,10 +2811,11 @@ done: #ifdef CONFIG_NO_HZ_FULL bool perf_event_can_stop_tick(void) { - if (list_empty(&__get_cpu_var(rotation_list))) - return true; - else + if (atomic_read(&__get_cpu_var(perf_freq_events)) || + __this_cpu_read(perf_throttled_count)) return false; + else + return true; } #endif @@ -5202,6 +5202,7 @@ static int __perf_event_overflow(struct perf_event *event, __this_cpu_inc(perf_throttled_count); hwc->interrupts = MAX_INTERRUPTS; perf_log_throttle(event, 0); + tick_nohz_full_kick(); ret = 1; } } -- cgit v1.2.3 From 93786a5f6aeb9c032c1c240246c5aabcf457b38f Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 23 Jul 2013 02:31:06 +0200 Subject: watchdog: Make it work under full dynticks A perf event can be used without forcing the tick to stay alive if it doesn't use a frequency but a sample period and if it doesn't throttle (raise storm of events). Since the lockup detector neither use a perf event frequency nor should ever throttle due to its high period, it can now run concurrently with the full dynticks feature. So remove the hack that disabled the watchdog. Signed-off-by: Frederic Weisbecker Cc: Jiri Olsa Cc: Namhyung Kim Cc: Arnaldo Carvalho de Melo Cc: Stephane Eranian Cc: Don Zickus Cc: Srivatsa S. Bhat Cc: Anish Singh Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1374539466-4799-9-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- kernel/watchdog.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'kernel') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 1241d8c91d5e..51c4f34d258e 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -553,14 +553,6 @@ void __init lockup_detector_init(void) { set_sample_period(); -#ifdef CONFIG_NO_HZ_FULL - if (watchdog_user_enabled) { - watchdog_user_enabled = 0; - pr_warning("Disabled lockup detectors by default for full dynticks\n"); - pr_warning("You can reactivate it with 'sysctl -w kernel.watchdog=1'\n"); - } -#endif - if (watchdog_user_enabled) watchdog_enable_all_cpus(); } -- cgit v1.2.3 From cf4957f17f2a89984915ea808876d9c82225b862 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 24 Oct 2012 13:37:58 +0200 Subject: perf: Add PERF_EVENT_IOC_ID ioctl to return event ID The only way to get the event ID is by reading the event fd, followed by parsing the ID value out of the returned data. While this is ok for current read format used by perf tool, it is not ok when we use PERF_FORMAT_GROUP format. With this format the data are returned for the whole group and there's no way to find out what ID belongs to our fd (if we are not group leader event). Adding a simple ioctl that returns event primary ID for given fd. Signed-off-by: Jiri Olsa Acked-by: Namhyung Kim Acked-by: Peter Zijlstra Cc: Corey Ashford Cc: Frederic Weisbecker Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/n/tip-v1bn5cto707jn0bon34afqr1@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- kernel/events/core.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index 916cf1f593b4..5200b608b481 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3568,6 +3568,15 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case PERF_EVENT_IOC_PERIOD: return perf_event_period(event, (u64 __user *)arg); + case PERF_EVENT_IOC_ID: + { + u64 id = primary_event_id(event); + + if (copy_to_user((void __user *)arg, &id, sizeof(id))) + return -EFAULT; + return 0; + } + case PERF_EVENT_IOC_SET_OUTPUT: { int ret; -- cgit v1.2.3 From 6f5ab0019fd328b50a8488c9e5193fc1dbd8d6ed Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 15 Oct 2012 20:13:45 +0200 Subject: perf: Do not get values from disabled counters in group format read It's possible some of the counters in the group could be disabled when sampling member of the event group is reading the rest via PERF_SAMPLE_READ sample type processing. Disabled counters could then produce wrong numbers. Fixing that by reading only enabled counters for PERF_SAMPLE_READ sample type processing. Signed-off-by: Jiri Olsa Acked-by: Namhyung Kim Acked-by: Peter Zijlstra Cc: Corey Ashford Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/n/tip-wwkjb0bbcuslnz0klrmqi26r@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- kernel/events/core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index 5200b608b481..e82e70025d42 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4388,7 +4388,8 @@ static void perf_output_read_group(struct perf_output_handle *handle, list_for_each_entry(sub, &leader->sibling_list, group_entry) { n = 0; - if (sub != event) + if ((sub != event) && + (sub->state == PERF_EVENT_STATE_ACTIVE)) sub->pmu->read(sub); values[n++] = perf_event_count(sub); -- cgit v1.2.3 From fc3b86d673e41ac66b4ba5b75a90c2fcafb90089 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 2 Aug 2013 18:29:54 +0200 Subject: perf: Roll back callchain buffer refcount under the callchain mutex When we fail to allocate the callchain buffers, we roll back the refcount we did and return from get_callchain_buffers(). However we take the refcount and allocate under the callchain lock but the rollback is done outside the lock. As a result, while we roll back, some concurrent callchain user may call get_callchain_buffers(), see the non-zero refcount and give up because the buffers are NULL without itself retrying the allocation. The consequences aren't that bad but that behaviour looks weird enough and it's better to give their chances to the following callchain users where we failed. Reported-by: Jiri Olsa Signed-off-by: Frederic Weisbecker Acked-by: Jiri Olsa Cc: Namhyung Kim Cc: Arnaldo Carvalho de Melo Cc: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1375460996-16329-2-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- kernel/events/callchain.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index 76a8bc5f6265..97b67df8fbfe 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -116,10 +116,11 @@ int get_callchain_buffers(void) err = alloc_callchain_buffers(); exit: - mutex_unlock(&callchain_mutex); if (err) atomic_dec(&nr_callchain_events); + mutex_unlock(&callchain_mutex); + return err; } -- cgit v1.2.3 From 948b26b6ddd08a57cb95ebb0dc96fde2edd5c383 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 2 Aug 2013 18:29:55 +0200 Subject: perf: Account freq events globally Freq events may not always be affine to a particular CPU. As such, account_event_cpu() may crash if we account per cpu a freq event that has event->cpu == -1. To solve this, lets account freq events globally. In practice this doesn't change much the picture because perf tools create per-task perf events with one event per CPU by default. Profiling a single CPU is usually a corner case so there is no much point in optimizing things that way. Reported-by: Jiri Olsa Suggested-by: Peter Zijlstra Signed-off-by: Frederic Weisbecker Tested-by: Jiri Olsa Cc: Namhyung Kim Cc: Arnaldo Carvalho de Melo Cc: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1375460996-16329-3-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- kernel/events/core.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index e82e70025d42..2e675e830976 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -141,11 +141,11 @@ enum event_type_t { struct static_key_deferred perf_sched_events __read_mostly; static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events); -static DEFINE_PER_CPU(atomic_t, perf_freq_events); static atomic_t nr_mmap_events __read_mostly; static atomic_t nr_comm_events __read_mostly; static atomic_t nr_task_events __read_mostly; +static atomic_t nr_freq_events __read_mostly; static LIST_HEAD(pmus); static DEFINE_MUTEX(pmus_lock); @@ -1871,9 +1871,6 @@ static int __perf_install_in_context(void *info) perf_pmu_enable(cpuctx->ctx.pmu); perf_ctx_unlock(cpuctx, task_ctx); - if (atomic_read(&__get_cpu_var(perf_freq_events))) - tick_nohz_full_kick(); - return 0; } @@ -2811,7 +2808,7 @@ done: #ifdef CONFIG_NO_HZ_FULL bool perf_event_can_stop_tick(void) { - if (atomic_read(&__get_cpu_var(perf_freq_events)) || + if (atomic_read(&nr_freq_events) || __this_cpu_read(perf_throttled_count)) return false; else @@ -3140,9 +3137,6 @@ static void unaccount_event_cpu(struct perf_event *event, int cpu) } if (is_cgroup_event(event)) atomic_dec(&per_cpu(perf_cgroup_events, cpu)); - - if (event->attr.freq) - atomic_dec(&per_cpu(perf_freq_events, cpu)); } static void unaccount_event(struct perf_event *event) @@ -3158,6 +3152,8 @@ static void unaccount_event(struct perf_event *event) atomic_dec(&nr_comm_events); if (event->attr.task) atomic_dec(&nr_task_events); + if (event->attr.freq) + atomic_dec(&nr_freq_events); if (is_cgroup_event(event)) static_key_slow_dec_deferred(&perf_sched_events); if (has_branch_stack(event)) @@ -6489,9 +6485,6 @@ static void account_event_cpu(struct perf_event *event, int cpu) } if (is_cgroup_event(event)) atomic_inc(&per_cpu(perf_cgroup_events, cpu)); - - if (event->attr.freq) - atomic_inc(&per_cpu(perf_freq_events, cpu)); } static void account_event(struct perf_event *event) @@ -6507,6 +6500,10 @@ static void account_event(struct perf_event *event) atomic_inc(&nr_comm_events); if (event->attr.task) atomic_inc(&nr_task_events); + if (event->attr.freq) { + if (atomic_inc_return(&nr_freq_events) == 1) + tick_nohz_full_kick_all(); + } if (has_branch_stack(event)) static_key_slow_inc(&perf_sched_events.key); if (is_cgroup_event(event)) -- cgit v1.2.3 From 5ec4c599a52362896c3e7c6a31ba6145dca9c6f5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 2 Aug 2013 21:16:30 +0200 Subject: perf: Do not compute time values unnecessarily We should not be calling calc_timer_values() for events that do not actually have an mmap()'ed userpage. Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20130802191630.GT27162@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- kernel/events/core.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index 2e675e830976..928fae7ca8c7 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3670,6 +3670,10 @@ void perf_event_update_userpage(struct perf_event *event) u64 enabled, running, now; rcu_read_lock(); + rb = rcu_dereference(event->rb); + if (!rb) + goto unlock; + /* * compute total_time_enabled, total_time_running * based on snapshot values taken when the event @@ -3680,12 +3684,8 @@ void perf_event_update_userpage(struct perf_event *event) * NMI context */ calc_timer_values(event, &now, &enabled, &running); - rb = rcu_dereference(event->rb); - if (!rb) - goto unlock; userpg = rb->user_page; - /* * Disable preemption so as to not let the corresponding user-space * spin too long if we get preempted. -- cgit v1.2.3 From ff3d527cebc1fa3707c617bfe9e74f53fcfb0955 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 27 Aug 2013 11:23:07 +0300 Subject: perf: make events stream always parsable The event stream is not always parsable because the format of a sample is dependent on the sample_type of the selected event. When there is more than one selected event and the sample_types are not the same then parsing becomes problematic. A sample can be matched to its selected event using the ID that is allocated when the event is opened. Unfortunately, to get the ID from the sample means first parsing it. This patch adds a new sample format bit PERF_SAMPLE_IDENTIFER that puts the ID at a fixed position so that the ID can be retrieved without parsing the sample. For sample events, that is the first position immediately after the header. For non-sample events, that is the last position. In this respect parsing samples requires that the sample_type and ID values are recorded. For example, perf tools records struct perf_event_attr and the IDs within the perf.data file. Those must be read first before it is possible to parse samples found later in the perf.data file. Signed-off-by: Adrian Hunter Tested-by: Stephane Eranian Acked-by: Peter Zijlstra Cc: David Ahern Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/r/1377591794-30553-6-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- kernel/events/core.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index 928fae7ca8c7..15d0f2418e54 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1213,6 +1213,9 @@ static void perf_event__id_header_size(struct perf_event *event) if (sample_type & PERF_SAMPLE_TIME) size += sizeof(data->time); + if (sample_type & PERF_SAMPLE_IDENTIFIER) + size += sizeof(data->id); + if (sample_type & PERF_SAMPLE_ID) size += sizeof(data->id); @@ -4280,7 +4283,7 @@ static void __perf_event_header__init_id(struct perf_event_header *header, if (sample_type & PERF_SAMPLE_TIME) data->time = perf_clock(); - if (sample_type & PERF_SAMPLE_ID) + if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) data->id = primary_event_id(event); if (sample_type & PERF_SAMPLE_STREAM_ID) @@ -4319,6 +4322,9 @@ static void __perf_event__output_id_sample(struct perf_output_handle *handle, if (sample_type & PERF_SAMPLE_CPU) perf_output_put(handle, data->cpu_entry); + + if (sample_type & PERF_SAMPLE_IDENTIFIER) + perf_output_put(handle, data->id); } void perf_event__output_id_sample(struct perf_event *event, @@ -4432,6 +4438,9 @@ void perf_output_sample(struct perf_output_handle *handle, perf_output_put(handle, *header); + if (sample_type & PERF_SAMPLE_IDENTIFIER) + perf_output_put(handle, data->id); + if (sample_type & PERF_SAMPLE_IP) perf_output_put(handle, data->ip); -- cgit v1.2.3 From ae23bff1d71f8b416ed740bc458df67355c77c92 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sat, 24 Aug 2013 16:45:54 +0200 Subject: perf: Prevent race in unthrottling code The current throttling code triggers WARN below via following workload (only hit on AMD machine with 48 CPUs): # while [ 1 ]; do perf record perf bench sched messaging; done WARNING: at arch/x86/kernel/cpu/perf_event.c:1054 x86_pmu_start+0xc6/0x100() SNIP Call Trace: [] dump_stack+0x19/0x1b [] warn_slowpath_common+0x61/0x80 [] warn_slowpath_null+0x1a/0x20 [] x86_pmu_start+0xc6/0x100 [] perf_adjust_freq_unthr_context.part.75+0x182/0x1a0 [] perf_event_task_tick+0xc8/0xf0 [] scheduler_tick+0xd1/0x140 [] update_process_times+0x66/0x80 [] tick_sched_handle.isra.15+0x25/0x60 [] tick_sched_timer+0x41/0x60 [] __run_hrtimer+0x74/0x1d0 [] ? tick_sched_handle.isra.15+0x60/0x60 [] hrtimer_interrupt+0xf7/0x240 [] smp_apic_timer_interrupt+0x69/0x9c [] apic_timer_interrupt+0x6d/0x80 [] ? __perf_event_task_sched_in+0x184/0x1a0 [] ? kfree_skbmem+0x37/0x90 [] ? __slab_free+0x1ac/0x30f [] ? kfree+0xfd/0x130 [] kmem_cache_free+0x1b2/0x1d0 [] kfree_skbmem+0x37/0x90 [] consume_skb+0x34/0x80 [] unix_stream_recvmsg+0x4e7/0x820 [] sock_aio_read.part.7+0x116/0x130 [] ? __perf_sw_event+0x19c/0x1e0 [] sock_aio_read+0x21/0x30 [] do_sync_read+0x80/0xb0 [] vfs_read+0x145/0x170 [] SyS_read+0x49/0xa0 [] ? __audit_syscall_exit+0x1f6/0x2a0 [] system_call_fastpath+0x16/0x1b ---[ end trace 622b7e226c4a766a ]--- The reason is a race in perf_event_task_tick() throttling code. The race flow (simplified code): - perf_throttled_count is per cpu variable and is CPU throttling flag, here starting with 0 - perf_throttled_seq is sequence/domain for allowed count of interrupts within the tick, gets increased each tick on single CPU (CPU bounded event): ... workload perf_event_task_tick: | | T0 inc(perf_throttled_seq) | T1 needs_unthr = xchg(perf_throttled_count, 0) == 0 tick gets interrupted: ... event gets throttled under new seq ... T2 last NMI comes, event is throttled - inc(perf_throttled_count) back to tick: | perf_adjust_freq_unthr_context: | | T3 unthrottling is skiped for event (needs_unthr == 0) | T4 event is stop and started via freq adjustment | tick ends ... workload ... no sample is hit for event ... perf_event_task_tick: | | T5 needs_unthr = xchg(perf_throttled_count, 0) != 0 (from T2) | T6 unthrottling is done on event (interrupts == MAX_INTERRUPTS) | event is already started (from T4) -> WARN Fixing this by not checking needs_unthr again and thus check all events for unthrottling. Signed-off-by: Jiri Olsa Reported-by: Jan Stancek Suggested-by: Peter Zijlstra Cc: Corey Ashford Cc: Frederic Weisbecker Cc: Namhyung Kim Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Andi Kleen Cc: Stephane Eranian Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1377355554-8934-1-git-send-email-jolsa@redhat.com Signed-off-by: Ingo Molnar --- kernel/events/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index f86599e8c123..258eaaffe95a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2712,7 +2712,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, hwc = &event->hw; - if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) { + if (hwc->interrupts == MAX_INTERRUPTS) { hwc->interrupts = 0; perf_log_throttle(event, 1); event->pmu->start(event, 0); -- cgit v1.2.3 From 13d7a2410fa637f450a29ecb515ac318ee40c741 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Wed, 21 Aug 2013 12:10:24 +0200 Subject: perf: Add attr->mmap2 attribute to an event Adds a new PERF_RECORD_MMAP2 record type which is essence an expanded version of PERF_RECORD_MMAP. Used to request mmap records with more information about the mapping, including device major, minor and the inode number and generation for mappings associated with files or shared memory segments. Works for code and data (with attr->mmap_data set). Existing PERF_RECORD_MMAP record is unmodified by this patch. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra Cc: Al Viro Link: http://lkml.kernel.org/r/1377079825-19057-2-git-send-email-eranian@google.com [ Added Al to the Cc:. Are the ino, maj/min exports of vma->vm_file OK? ] Signed-off-by: Ingo Molnar --- kernel/events/core.c | 46 ++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 42 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index 15d0f2418e54..c7ee497c39a7 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4776,7 +4776,7 @@ next: /* * task tracking -- fork/exit * - * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task + * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task */ struct perf_task_event { @@ -4796,8 +4796,9 @@ struct perf_task_event { static int perf_event_task_match(struct perf_event *event) { - return event->attr.comm || event->attr.mmap || - event->attr.mmap_data || event->attr.task; + return event->attr.comm || event->attr.mmap || + event->attr.mmap2 || event->attr.mmap_data || + event->attr.task; } static void perf_event_task_output(struct perf_event *event, @@ -4992,6 +4993,9 @@ struct perf_mmap_event { const char *file_name; int file_size; + int maj, min; + u64 ino; + u64 ino_generation; struct { struct perf_event_header header; @@ -5012,7 +5016,7 @@ static int perf_event_mmap_match(struct perf_event *event, int executable = vma->vm_flags & VM_EXEC; return (!executable && event->attr.mmap_data) || - (executable && event->attr.mmap); + (executable && (event->attr.mmap || event->attr.mmap2)); } static void perf_event_mmap_output(struct perf_event *event, @@ -5027,6 +5031,13 @@ static void perf_event_mmap_output(struct perf_event *event, if (!perf_event_mmap_match(event, data)) return; + if (event->attr.mmap2) { + mmap_event->event_id.header.type = PERF_RECORD_MMAP2; + mmap_event->event_id.header.size += sizeof(mmap_event->maj); + mmap_event->event_id.header.size += sizeof(mmap_event->min); + mmap_event->event_id.header.size += sizeof(mmap_event->ino); + } + perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); ret = perf_output_begin(&handle, event, mmap_event->event_id.header.size); @@ -5037,6 +5048,14 @@ static void perf_event_mmap_output(struct perf_event *event, mmap_event->event_id.tid = perf_event_tid(event, current); perf_output_put(&handle, mmap_event->event_id); + + if (event->attr.mmap2) { + perf_output_put(&handle, mmap_event->maj); + perf_output_put(&handle, mmap_event->min); + perf_output_put(&handle, mmap_event->ino); + perf_output_put(&handle, mmap_event->ino_generation); + } + __output_copy(&handle, mmap_event->file_name, mmap_event->file_size); @@ -5051,6 +5070,8 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) { struct vm_area_struct *vma = mmap_event->vma; struct file *file = vma->vm_file; + int maj = 0, min = 0; + u64 ino = 0, gen = 0; unsigned int size; char tmp[16]; char *buf = NULL; @@ -5059,6 +5080,8 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) memset(tmp, 0, sizeof(tmp)); if (file) { + struct inode *inode; + dev_t dev; /* * d_path works from the end of the rb backwards, so we * need to add enough zero bytes after the string to handle @@ -5074,6 +5097,13 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) name = strncpy(tmp, "//toolong", sizeof(tmp)); goto got_name; } + inode = file_inode(vma->vm_file); + dev = inode->i_sb->s_dev; + ino = inode->i_ino; + gen = inode->i_generation; + maj = MAJOR(dev); + min = MINOR(dev); + } else { if (arch_vma_name(mmap_event->vma)) { name = strncpy(tmp, arch_vma_name(mmap_event->vma), @@ -5104,6 +5134,10 @@ got_name: mmap_event->file_name = name; mmap_event->file_size = size; + mmap_event->maj = maj; + mmap_event->min = min; + mmap_event->ino = ino; + mmap_event->ino_generation = gen; if (!(vma->vm_flags & VM_EXEC)) mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA; @@ -5140,6 +5174,10 @@ void perf_event_mmap(struct vm_area_struct *vma) .len = vma->vm_end - vma->vm_start, .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT, }, + /* .maj (attr_mmap2 only) */ + /* .min (attr_mmap2 only) */ + /* .ino (attr_mmap2 only) */ + /* .ino_generation (attr_mmap2 only) */ }; perf_event_mmap_event(&mmap_event); -- cgit v1.2.3