aboutsummaryrefslogtreecommitdiff
path: root/tools/perf/builtin-record.c
diff options
context:
space:
mode:
authorGravatar Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> 2022-01-17 21:34:27 +0300
committerGravatar Arnaldo Carvalho de Melo <acme@redhat.com> 2022-02-10 16:25:53 -0300
commit3217e9fecf118d5dcabdd68d91e0c6afcb4c3e1b (patch)
tree9f60c3d41c7070266ef94a51bda19ec9154b3d13 /tools/perf/builtin-record.c
parentperf record: Stop threads in the end of trace streaming (diff)
downloadlinux-3217e9fecf118d5dcabdd68d91e0c6afcb4c3e1b.tar.gz
linux-3217e9fecf118d5dcabdd68d91e0c6afcb4c3e1b.tar.bz2
linux-3217e9fecf118d5dcabdd68d91e0c6afcb4c3e1b.zip
perf record: Start threads in the beginning of trace streaming
Start thread in detached state because its management is implemented via messaging to avoid any scaling issues. Block signals prior thread start so only main tool thread would be notified on external async signals during data collection. Thread affinity mask is used to assign eligible CPUs for the thread to run. Wait and sync on thread start using thread ack pipe. Reviewed-by: Riccardo Mancini <rickyman7@gmail.com> Signed-off-by: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Tested-by: Jiri Olsa <jolsa@kernel.org> Tested-by: Riccardo Mancini <rickyman7@gmail.com> Acked-by: Namhyung Kim <namhyung@gmail.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Antonov <alexander.antonov@linux.intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexei Budankov <abudankov@huawei.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: https://lore.kernel.org/r/95784dd9f7c81ee408eab27b50b4c09ad4cf7be6.1642440724.git.alexey.v.bayduraev@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/builtin-record.c')
-rw-r--r--tools/perf/builtin-record.c121
1 files changed, 120 insertions, 1 deletions
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 0e65b80927b7..517520ae1520 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -170,6 +170,11 @@ static inline pid_t gettid(void)
}
#endif
+static int record__threads_enabled(struct record *rec)
+{
+ return rec->opts.threads_spec;
+}
+
static bool switch_output_signal(struct record *rec)
{
return rec->switch_output.signal &&
@@ -1473,6 +1478,68 @@ static void record__thread_munmap_filtered(struct fdarray *fda, int fd,
perf_mmap__put(map);
}
+static void *record__thread(void *arg)
+{
+ enum thread_msg msg = THREAD_MSG__READY;
+ bool terminate = false;
+ struct fdarray *pollfd;
+ int err, ctlfd_pos;
+
+ thread = arg;
+ thread->tid = gettid();
+
+ err = write(thread->pipes.ack[1], &msg, sizeof(msg));
+ if (err == -1)
+ pr_warning("threads[%d]: failed to notify on start: %s\n",
+ thread->tid, strerror(errno));
+
+ pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu());
+
+ pollfd = &thread->pollfd;
+ ctlfd_pos = thread->ctlfd_pos;
+
+ for (;;) {
+ unsigned long long hits = thread->samples;
+
+ if (record__mmap_read_all(thread->rec, false) < 0 || terminate)
+ break;
+
+ if (hits == thread->samples) {
+
+ err = fdarray__poll(pollfd, -1);
+ /*
+ * Propagate error, only if there's any. Ignore positive
+ * number of returned events and interrupt error.
+ */
+ if (err > 0 || (err < 0 && errno == EINTR))
+ err = 0;
+ thread->waking++;
+
+ if (fdarray__filter(pollfd, POLLERR | POLLHUP,
+ record__thread_munmap_filtered, NULL) == 0)
+ break;
+ }
+
+ if (pollfd->entries[ctlfd_pos].revents & POLLHUP) {
+ terminate = true;
+ close(thread->pipes.msg[0]);
+ thread->pipes.msg[0] = -1;
+ pollfd->entries[ctlfd_pos].fd = -1;
+ pollfd->entries[ctlfd_pos].events = 0;
+ }
+
+ pollfd->entries[ctlfd_pos].revents = 0;
+ }
+ record__mmap_read_all(thread->rec, true);
+
+ err = write(thread->pipes.ack[1], &msg, sizeof(msg));
+ if (err == -1)
+ pr_warning("threads[%d]: failed to notify on termination: %s\n",
+ thread->tid, strerror(errno));
+
+ return NULL;
+}
+
static void record__init_features(struct record *rec)
{
struct perf_session *session = rec->session;
@@ -1916,13 +1983,65 @@ static int record__terminate_thread(struct record_thread *thread_data)
static int record__start_threads(struct record *rec)
{
+ int t, tt, err, ret = 0, nr_threads = rec->nr_threads;
struct record_thread *thread_data = rec->thread_data;
+ sigset_t full, mask;
+ pthread_t handle;
+ pthread_attr_t attrs;
thread = &thread_data[0];
+ if (!record__threads_enabled(rec))
+ return 0;
+
+ sigfillset(&full);
+ if (sigprocmask(SIG_SETMASK, &full, &mask)) {
+ pr_err("Failed to block signals on threads start: %s\n", strerror(errno));
+ return -1;
+ }
+
+ pthread_attr_init(&attrs);
+ pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED);
+
+ for (t = 1; t < nr_threads; t++) {
+ enum thread_msg msg = THREAD_MSG__UNDEFINED;
+
+#ifdef HAVE_PTHREAD_ATTR_SETAFFINITY_NP
+ pthread_attr_setaffinity_np(&attrs,
+ MMAP_CPU_MASK_BYTES(&(thread_data[t].mask->affinity)),
+ (cpu_set_t *)(thread_data[t].mask->affinity.bits));
+#endif
+ if (pthread_create(&handle, &attrs, record__thread, &thread_data[t])) {
+ for (tt = 1; tt < t; tt++)
+ record__terminate_thread(&thread_data[t]);
+ pr_err("Failed to start threads: %s\n", strerror(errno));
+ ret = -1;
+ goto out_err;
+ }
+
+ err = read(thread_data[t].pipes.ack[0], &msg, sizeof(msg));
+ if (err > 0)
+ pr_debug2("threads[%d]: sent %s\n", rec->thread_data[t].tid,
+ thread_msg_tags[msg]);
+ else
+ pr_warning("threads[%d]: failed to receive start notification from %d\n",
+ thread->tid, rec->thread_data[t].tid);
+ }
+
+ sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity),
+ (cpu_set_t *)thread->mask->affinity.bits);
+
pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu());
- return 0;
+out_err:
+ pthread_attr_destroy(&attrs);
+
+ if (sigprocmask(SIG_SETMASK, &mask, NULL)) {
+ pr_err("Failed to unblock signals on threads start: %s\n", strerror(errno));
+ ret = -1;
+ }
+
+ return ret;
}
static int record__stop_threads(struct record *rec)