aboutsummaryrefslogtreecommitdiff
path: root/tools/perf/builtin-sched.c
diff options
context:
space:
mode:
authorGravatar Linus Torvalds <torvalds@linux-foundation.org> 2024-03-14 16:31:23 -0700
committerGravatar Linus Torvalds <torvalds@linux-foundation.org> 2024-03-14 16:31:23 -0700
commit1bbeaf83dd7b5e3628b98bec66ff8fe2646e14aa (patch)
treea391eed8ae206613b48e02e56e6ad5c4432d8767 /tools/perf/builtin-sched.c
parentMerge tag 'trace-ring-buffer-v6.8-rc7-2' of git://git.kernel.org/pub/scm/linu... (diff)
parentperf annotate: Add comments in the data structures (diff)
downloadlinux-1bbeaf83dd7b5e3628b98bec66ff8fe2646e14aa.tar.gz
linux-1bbeaf83dd7b5e3628b98bec66ff8fe2646e14aa.tar.bz2
linux-1bbeaf83dd7b5e3628b98bec66ff8fe2646e14aa.zip
Merge tag 'perf-tools-for-v6.9-2024-03-13' of git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools
Pull perf tools updates from Namhyung Kim: "perf stat: - Support new 'cluster' aggregation mode for shared resources depending on the hardware configuration: $ sudo perf stat -a --per-cluster -e cycles,instructions sleep 1 Performance counter stats for 'system wide': S0-D0-CLS0 2 85,051,822 cycles S0-D0-CLS0 2 73,909,908 instructions # 0.87 insn per cycle S0-D0-CLS2 2 93,365,918 cycles S0-D0-CLS2 2 83,006,158 instructions # 0.89 insn per cycle S0-D0-CLS4 2 104,157,523 cycles S0-D0-CLS4 2 53,234,396 instructions # 0.51 insn per cycle S0-D0-CLS6 2 65,891,079 cycles S0-D0-CLS6 2 41,478,273 instructions # 0.63 insn per cycle 1.002407989 seconds time elapsed - Various fixes and cleanups for event metrics including NaN handling perf script: - Use libcapstone if available to disassemble the instructions. This enables 'perf script -F disasm' and 'perf script --insn-trace=disasm' (for Intel-PT): $ perf script -F event,ip,disasm cycles:P: ffffffffa988d428 wrmsr cycles:P: ffffffffa9839d25 movq %rax, %r14 cycles:P: ffffffffa9cdcaf0 endbr64 cycles:P: ffffffffa988d428 wrmsr cycles:P: ffffffffa988d428 wrmsr cycles:P: ffffffffaa401f86 iretq cycles:P: ffffffffa99c4de5 movq 0x30(%rcx), %r8 cycles:P: ffffffffa988d428 wrmsr cycles:P: ffffffffaa401f86 iretq cycles:P: ffffffffa9907983 movl 0x68(%rbx), %eax cycles:P: ffffffffa988d428 wrmsr - Expose sample ID / stream ID to python scripts perf test: - Add more perf test cases from Redhat internal test suites. This time it adds the base infra and a few perf probe tests. More to come. :) - Add 'perf test -p' for parallel execution and fix some issues found by the parallel test - Support symbol test to print symbols in given (active) module: $ perf test -F -v Symbols --dso /lib/modules/$(uname -r)/kernel/fs/ext4/ext4.ko --- start --- Testing /lib/modules/6.5.13-1rodete2-amd64/kernel/fs/ext4/ext4.ko Overlapping symbols: 7a990-7a9a0 l __pfx_ext4_exit_fs 7a990-7a9a0 g __pfx_cleanup_module Overlapping symbols: 7a9a0-7aa1c l ext4_exit_fs 7a9a0-7aa1c g cleanup_module ... JSON metric updates: - A new round of Intel metric updates - Support Power11 PVR (compatible to Power10) - Fix cache latency events on Zen 4 to set SliceId properly Internal: - Fix reference counting for 'map' data structure, tireless work from Ian! - More memory optimization for struct thread and annotate histogram. Now, 'perf report' (TUI) and 'perf annotate' should be much lighter-weight in terms of memory footprint - Support cross-arch perf register access. Clean up the build configuration so that it can detect arch-register support at runtime. This can allow to parse register data in sample which was recorded in a different arch Others: - Sync task state in 'perf sched' to kernel using trace event fields. The task states have been changed so tools cannot assume a fixed encoding - Clean up 'perf mem' to generalize the arch-specific events - Add support for local and global variables to data type profiling. This would increase the success rate of type resolution with DWARF - Add short option -H for --hierarchy in 'perf report' and 'perf top'" * tag 'perf-tools-for-v6.9-2024-03-13' of git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools: (154 commits) perf annotate: Add comments in the data structures perf annotate: Remove sym_hist.addr[] array perf annotate: Calculate instruction overhead using hashmap perf annotate: Add a hashmap for symbol histogram perf threads: Reduce table size from 256 to 8 perf threads: Switch from rbtree to hashmap perf threads: Move threads to its own files perf machine: Move machine's threads into its own abstraction perf machine: Move fprintf to for_each loop and a callback perf trace: Ignore thread hashing in summary perf report: Sort child tasks by tid perf vendor events amd: Fix Zen 4 cache latency events perf version: Display availability of OpenCSD support perf vendor events intel: Add umasks/occ_sel to PCU events. perf map: Fix map reference count issues libperf evlist: Avoid out-of-bounds access perf lock contention: Account contending locks too perf metrics: Fix segv for metrics with no events perf metrics: Fix metric matching perf pmu: Fix a potential memory leak in perf_pmu__lookup() ...
Diffstat (limited to 'tools/perf/builtin-sched.c')
-rw-r--r--tools/perf/builtin-sched.c220
1 files changed, 114 insertions, 106 deletions
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index dd6065afbbaf..b248c433529a 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -92,24 +92,6 @@ struct sched_atom {
struct task_desc *wakee;
};
-#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
-
-/* task state bitmask, copied from include/linux/sched.h */
-#define TASK_RUNNING 0
-#define TASK_INTERRUPTIBLE 1
-#define TASK_UNINTERRUPTIBLE 2
-#define __TASK_STOPPED 4
-#define __TASK_TRACED 8
-/* in tsk->exit_state */
-#define EXIT_DEAD 16
-#define EXIT_ZOMBIE 32
-#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
-/* in tsk->state again */
-#define TASK_DEAD 64
-#define TASK_WAKEKILL 128
-#define TASK_WAKING 256
-#define TASK_PARKED 512
-
enum thread_state {
THREAD_SLEEPING = 0,
THREAD_WAIT_CPU,
@@ -266,7 +248,7 @@ struct thread_runtime {
u64 total_preempt_time;
u64 total_delay_time;
- int last_state;
+ char last_state;
char shortname[3];
bool comm_changed;
@@ -436,7 +418,7 @@ static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *t
}
static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
- u64 timestamp, u64 task_state __maybe_unused)
+ u64 timestamp, const char task_state __maybe_unused)
{
struct sched_atom *event = get_new_event(task, timestamp);
@@ -860,7 +842,7 @@ static int replay_switch_event(struct perf_sched *sched,
*next_comm = evsel__strval(evsel, sample, "next_comm");
const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
next_pid = evsel__intval(evsel, sample, "next_pid");
- const u64 prev_state = evsel__intval(evsel, sample, "prev_state");
+ const char prev_state = evsel__taskstate(evsel, sample, "prev_state");
struct task_desc *prev, __maybe_unused *next;
u64 timestamp0, timestamp = sample->time;
int cpu = sample->cpu;
@@ -1050,13 +1032,6 @@ static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
return 0;
}
-static char sched_out_state(u64 prev_state)
-{
- const char *str = TASK_STATE_TO_CHAR_STR;
-
- return str[prev_state];
-}
-
static int
add_sched_out_event(struct work_atoms *atoms,
char run_state,
@@ -1132,7 +1107,7 @@ static int latency_switch_event(struct perf_sched *sched,
{
const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
next_pid = evsel__intval(evsel, sample, "next_pid");
- const u64 prev_state = evsel__intval(evsel, sample, "prev_state");
+ const char prev_state = evsel__taskstate(evsel, sample, "prev_state");
struct work_atoms *out_events, *in_events;
struct thread *sched_out, *sched_in;
u64 timestamp0, timestamp = sample->time;
@@ -1168,7 +1143,7 @@ static int latency_switch_event(struct perf_sched *sched,
goto out_put;
}
}
- if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
+ if (add_sched_out_event(out_events, prev_state, timestamp))
return -1;
in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
@@ -2033,24 +2008,12 @@ static void timehist_header(struct perf_sched *sched)
printf("\n");
}
-static char task_state_char(struct thread *thread, int state)
-{
- static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
- unsigned bit = state ? ffs(state) : 0;
-
- /* 'I' for idle */
- if (thread__tid(thread) == 0)
- return 'I';
-
- return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
-}
-
static void timehist_print_sample(struct perf_sched *sched,
struct evsel *evsel,
struct perf_sample *sample,
struct addr_location *al,
struct thread *thread,
- u64 t, int state)
+ u64 t, const char state)
{
struct thread_runtime *tr = thread__priv(thread);
const char *next_comm = evsel__strval(evsel, sample, "next_comm");
@@ -2091,7 +2054,7 @@ static void timehist_print_sample(struct perf_sched *sched,
print_sched_time(tr->dt_run, 6);
if (sched->show_state)
- printf(" %5c ", task_state_char(thread, state));
+ printf(" %5c ", thread__tid(thread) == 0 ? 'I' : state);
if (sched->show_next) {
snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid);
@@ -2163,9 +2126,9 @@ static void timehist_update_runtime_stats(struct thread_runtime *r,
else if (r->last_time) {
u64 dt_wait = tprev - r->last_time;
- if (r->last_state == TASK_RUNNING)
+ if (r->last_state == 'R')
r->dt_preempt = dt_wait;
- else if (r->last_state == TASK_UNINTERRUPTIBLE)
+ else if (r->last_state == 'D')
r->dt_iowait = dt_wait;
else
r->dt_sleep = dt_wait;
@@ -2590,7 +2553,7 @@ static int timehist_sched_change_event(struct perf_tool *tool,
struct thread_runtime *tr = NULL;
u64 tprev, t = sample->time;
int rc = 0;
- int state = evsel__intval(evsel, sample, "prev_state");
+ const char state = evsel__taskstate(evsel, sample, "prev_state");
addr_location__init(&al);
if (machine__resolve(machine, &al, sample) < 0) {
@@ -3204,14 +3167,44 @@ static void perf_sched__merge_lat(struct perf_sched *sched)
}
}
+static int setup_cpus_switch_event(struct perf_sched *sched)
+{
+ unsigned int i;
+
+ sched->cpu_last_switched = calloc(MAX_CPUS, sizeof(*(sched->cpu_last_switched)));
+ if (!sched->cpu_last_switched)
+ return -1;
+
+ sched->curr_pid = malloc(MAX_CPUS * sizeof(*(sched->curr_pid)));
+ if (!sched->curr_pid) {
+ zfree(&sched->cpu_last_switched);
+ return -1;
+ }
+
+ for (i = 0; i < MAX_CPUS; i++)
+ sched->curr_pid[i] = -1;
+
+ return 0;
+}
+
+static void free_cpus_switch_event(struct perf_sched *sched)
+{
+ zfree(&sched->curr_pid);
+ zfree(&sched->cpu_last_switched);
+}
+
static int perf_sched__lat(struct perf_sched *sched)
{
+ int rc = -1;
struct rb_node *next;
setup_pager();
+ if (setup_cpus_switch_event(sched))
+ return rc;
+
if (perf_sched__read_events(sched))
- return -1;
+ goto out_free_cpus_switch_event;
perf_sched__merge_lat(sched);
perf_sched__sort_lat(sched);
@@ -3240,13 +3233,15 @@ static int perf_sched__lat(struct perf_sched *sched)
print_bad_events(sched);
printf("\n");
- return 0;
+ rc = 0;
+
+out_free_cpus_switch_event:
+ free_cpus_switch_event(sched);
+ return rc;
}
static int setup_map_cpus(struct perf_sched *sched)
{
- struct perf_cpu_map *map;
-
sched->max_cpu.cpu = sysconf(_SC_NPROCESSORS_CONF);
if (sched->map.comp) {
@@ -3255,16 +3250,15 @@ static int setup_map_cpus(struct perf_sched *sched)
return -1;
}
- if (!sched->map.cpus_str)
- return 0;
-
- map = perf_cpu_map__new(sched->map.cpus_str);
- if (!map) {
- pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
- return -1;
+ if (sched->map.cpus_str) {
+ sched->map.cpus = perf_cpu_map__new(sched->map.cpus_str);
+ if (!sched->map.cpus) {
+ pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
+ zfree(&sched->map.comp_cpus);
+ return -1;
+ }
}
- sched->map.cpus = map;
return 0;
}
@@ -3304,33 +3298,69 @@ static int setup_color_cpus(struct perf_sched *sched)
static int perf_sched__map(struct perf_sched *sched)
{
+ int rc = -1;
+
+ sched->curr_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_thread)));
+ if (!sched->curr_thread)
+ return rc;
+
+ if (setup_cpus_switch_event(sched))
+ goto out_free_curr_thread;
+
if (setup_map_cpus(sched))
- return -1;
+ goto out_free_cpus_switch_event;
if (setup_color_pids(sched))
- return -1;
+ goto out_put_map_cpus;
if (setup_color_cpus(sched))
- return -1;
+ goto out_put_color_pids;
setup_pager();
if (perf_sched__read_events(sched))
- return -1;
+ goto out_put_color_cpus;
+
+ rc = 0;
print_bad_events(sched);
- return 0;
+
+out_put_color_cpus:
+ perf_cpu_map__put(sched->map.color_cpus);
+
+out_put_color_pids:
+ perf_thread_map__put(sched->map.color_pids);
+
+out_put_map_cpus:
+ zfree(&sched->map.comp_cpus);
+ perf_cpu_map__put(sched->map.cpus);
+
+out_free_cpus_switch_event:
+ free_cpus_switch_event(sched);
+
+out_free_curr_thread:
+ zfree(&sched->curr_thread);
+ return rc;
}
static int perf_sched__replay(struct perf_sched *sched)
{
+ int ret;
unsigned long i;
+ mutex_init(&sched->start_work_mutex);
+ mutex_init(&sched->work_done_wait_mutex);
+
+ ret = setup_cpus_switch_event(sched);
+ if (ret)
+ goto out_mutex_destroy;
+
calibrate_run_measurement_overhead(sched);
calibrate_sleep_measurement_overhead(sched);
test_calibrations(sched);
- if (perf_sched__read_events(sched))
- return -1;
+ ret = perf_sched__read_events(sched);
+ if (ret)
+ goto out_free_cpus_switch_event;
printf("nr_run_events: %ld\n", sched->nr_run_events);
printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
@@ -3355,7 +3385,14 @@ static int perf_sched__replay(struct perf_sched *sched)
sched->thread_funcs_exit = true;
destroy_tasks(sched);
- return 0;
+
+out_free_cpus_switch_event:
+ free_cpus_switch_event(sched);
+
+out_mutex_destroy:
+ mutex_destroy(&sched->start_work_mutex);
+ mutex_destroy(&sched->work_done_wait_mutex);
+ return ret;
}
static void setup_sorting(struct perf_sched *sched, const struct option *options,
@@ -3590,28 +3627,7 @@ int cmd_sched(int argc, const char **argv)
.switch_event = replay_switch_event,
.fork_event = replay_fork_event,
};
- unsigned int i;
- int ret = 0;
-
- mutex_init(&sched.start_work_mutex);
- mutex_init(&sched.work_done_wait_mutex);
- sched.curr_thread = calloc(MAX_CPUS, sizeof(*sched.curr_thread));
- if (!sched.curr_thread) {
- ret = -ENOMEM;
- goto out;
- }
- sched.cpu_last_switched = calloc(MAX_CPUS, sizeof(*sched.cpu_last_switched));
- if (!sched.cpu_last_switched) {
- ret = -ENOMEM;
- goto out;
- }
- sched.curr_pid = malloc(MAX_CPUS * sizeof(*sched.curr_pid));
- if (!sched.curr_pid) {
- ret = -ENOMEM;
- goto out;
- }
- for (i = 0; i < MAX_CPUS; i++)
- sched.curr_pid[i] = -1;
+ int ret;
argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
@@ -3622,9 +3638,9 @@ int cmd_sched(int argc, const char **argv)
* Aliased to 'perf script' for now:
*/
if (!strcmp(argv[0], "script")) {
- ret = cmd_script(argc, argv);
+ return cmd_script(argc, argv);
} else if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
- ret = __cmd_record(argc, argv);
+ return __cmd_record(argc, argv);
} else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
sched.tp_handler = &lat_ops;
if (argc > 1) {
@@ -3633,7 +3649,7 @@ int cmd_sched(int argc, const char **argv)
usage_with_options(latency_usage, latency_options);
}
setup_sorting(&sched, latency_options, latency_usage);
- ret = perf_sched__lat(&sched);
+ return perf_sched__lat(&sched);
} else if (!strcmp(argv[0], "map")) {
if (argc) {
argc = parse_options(argc, argv, map_options, map_usage, 0);
@@ -3642,7 +3658,7 @@ int cmd_sched(int argc, const char **argv)
}
sched.tp_handler = &map_ops;
setup_sorting(&sched, latency_options, latency_usage);
- ret = perf_sched__map(&sched);
+ return perf_sched__map(&sched);
} else if (strlen(argv[0]) > 2 && strstarts("replay", argv[0])) {
sched.tp_handler = &replay_ops;
if (argc) {
@@ -3650,7 +3666,7 @@ int cmd_sched(int argc, const char **argv)
if (argc)
usage_with_options(replay_usage, replay_options);
}
- ret = perf_sched__replay(&sched);
+ return perf_sched__replay(&sched);
} else if (!strcmp(argv[0], "timehist")) {
if (argc) {
argc = parse_options(argc, argv, timehist_options,
@@ -3666,24 +3682,16 @@ int cmd_sched(int argc, const char **argv)
parse_options_usage(NULL, timehist_options, "w", true);
if (sched.show_next)
parse_options_usage(NULL, timehist_options, "n", true);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
ret = symbol__validate_sym_arguments();
if (ret)
- goto out;
+ return ret;
- ret = perf_sched__timehist(&sched);
+ return perf_sched__timehist(&sched);
} else {
usage_with_options(sched_usage, sched_options);
}
-out:
- free(sched.curr_pid);
- free(sched.cpu_last_switched);
- free(sched.curr_thread);
- mutex_destroy(&sched.start_work_mutex);
- mutex_destroy(&sched.work_done_wait_mutex);
-
- return ret;
+ return 0;
}