From d119357d0743548f90243315de905cf6a79e5221 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 14 May 2023 18:06:12 -0700 Subject: rcu-tasks: Treat only synchronous grace periods urgently The performance requirements on RCU Tasks, and in particular on RCU Tasks Trace, have evolved over time as the workloads have evolved. The current implementation is designed to provide low grace-period latencies, and also to accommodate short-duration floods of callbacks. However, current workloads can also provide a constant background callback-queuing rate of a few hundred call_rcu_tasks_trace() invocations per second. This results in continuous back-to-back RCU Tasks Trace grace periods, which in turn can consume the better part of 10% of a CPU. One could take the attitude that there are several tens of other CPUs on the systems running such workloads, but energy efficiency is a thing. On these systems, although asynchronous grace-period requests happen every few milliseconds, synchronous grace-period requests are quite rare. This commit therefore arrranges for grace periods to be initiated immediately in response to calls to synchronize_rcu_tasks*() and also to calls to synchronize_rcu_mult() that are passed one of the call_rcu_tasks*() functions. These are recognized by the tell-tale wakeme_after_rcu callback function. In other cases, callbacks are gathered up for up to about 250 milliseconds before a grace period is initiated. This results in more than an order of magnitude reduction in RCU Tasks Trace grace periods, with corresponding reduction in consumption of CPU time. Reported-by: Alexei Starovoitov Reported-by: Martin KaFai Lau Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 81 ++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 73 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index b770add3f843..c88d7fe29d92 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -25,6 +25,8 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp); * @cblist: Callback list. * @lock: Lock protecting per-CPU callback list. * @rtp_jiffies: Jiffies counter value for statistics. + * @lazy_timer: Timer to unlazify callbacks. + * @urgent_gp: Number of additional non-lazy grace periods. * @rtp_n_lock_retries: Rough lock-contention statistic. * @rtp_work: Work queue for invoking callbacks. * @rtp_irq_work: IRQ work queue for deferred wakeups. @@ -38,6 +40,8 @@ struct rcu_tasks_percpu { raw_spinlock_t __private lock; unsigned long rtp_jiffies; unsigned long rtp_n_lock_retries; + struct timer_list lazy_timer; + unsigned int urgent_gp; struct work_struct rtp_work; struct irq_work rtp_irq_work; struct rcu_head barrier_q_head; @@ -51,7 +55,6 @@ struct rcu_tasks_percpu { * @cbs_wait: RCU wait allowing a new callback to get kthread's attention. * @cbs_gbl_lock: Lock protecting callback list. * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone. - * @kthread_ptr: This flavor's grace-period/callback-invocation kthread. * @gp_func: This flavor's grace-period-wait function. * @gp_state: Grace period's most recent state transition (debugging). * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping. @@ -61,6 +64,8 @@ struct rcu_tasks_percpu { * @tasks_gp_seq: Number of grace periods completed since boot. * @n_ipis: Number of IPIs sent to encourage grace periods to end. * @n_ipis_fails: Number of IPI-send failures. + * @kthread_ptr: This flavor's grace-period/callback-invocation kthread. + * @lazy_jiffies: Number of jiffies to allow callbacks to be lazy. * @pregp_func: This flavor's pre-grace-period function (optional). * @pertask_func: This flavor's per-task scan function (optional). * @postscan_func: This flavor's post-task scan function (optional). @@ -92,6 +97,7 @@ struct rcu_tasks { unsigned long n_ipis; unsigned long n_ipis_fails; struct task_struct *kthread_ptr; + unsigned long lazy_jiffies; rcu_tasks_gp_func_t gp_func; pregp_func_t pregp_func; pertask_func_t pertask_func; @@ -127,6 +133,7 @@ static struct rcu_tasks rt_name = \ .gp_func = gp, \ .call_func = call, \ .rtpcpu = &rt_name ## __percpu, \ + .lazy_jiffies = DIV_ROUND_UP(HZ, 4), \ .name = n, \ .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \ .percpu_enqueue_lim = 1, \ @@ -276,6 +283,33 @@ static void cblist_init_generic(struct rcu_tasks *rtp) data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim), rcu_task_cb_adjust); } +// Compute wakeup time for lazy callback timer. +static unsigned long rcu_tasks_lazy_time(struct rcu_tasks *rtp) +{ + return jiffies + rtp->lazy_jiffies; +} + +// Timer handler that unlazifies lazy callbacks. +static void call_rcu_tasks_generic_timer(struct timer_list *tlp) +{ + unsigned long flags; + bool needwake = false; + struct rcu_tasks *rtp; + struct rcu_tasks_percpu *rtpcp = from_timer(rtpcp, tlp, lazy_timer); + + rtp = rtpcp->rtpp; + raw_spin_lock_irqsave_rcu_node(rtpcp, flags); + if (!rcu_segcblist_empty(&rtpcp->cblist) && rtp->lazy_jiffies) { + if (!rtpcp->urgent_gp) + rtpcp->urgent_gp = 1; + needwake = true; + mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp)); + } + raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); + if (needwake) + rcuwait_wake_up(&rtp->cbs_wait); +} + // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic(). static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp) { @@ -292,6 +326,7 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, { int chosen_cpu; unsigned long flags; + bool havekthread = smp_load_acquire(&rtp->kthread_ptr); int ideal_cpu; unsigned long j; bool needadjust = false; @@ -321,7 +356,15 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, cblist_init_generic(rtp); raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. } - needwake = rcu_segcblist_empty(&rtpcp->cblist); + needwake = func == wakeme_after_rcu; + if (havekthread && !timer_pending(&rtpcp->lazy_timer)) { + if (rtp->lazy_jiffies) + mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp)); + else + needwake = rcu_segcblist_empty(&rtpcp->cblist); + } + if (needwake) + rtpcp->urgent_gp = 3; rcu_segcblist_enqueue(&rtpcp->cblist, rhp); raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); if (unlikely(needadjust)) { @@ -415,9 +458,14 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp) } rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); - if (rcu_segcblist_pend_cbs(&rtpcp->cblist)) + if (rtpcp->urgent_gp > 0 && rcu_segcblist_pend_cbs(&rtpcp->cblist)) { + if (rtp->lazy_jiffies) + rtpcp->urgent_gp--; needgpcb |= 0x3; - if (!rcu_segcblist_empty(&rtpcp->cblist)) + } else if (rcu_segcblist_empty(&rtpcp->cblist)) { + rtpcp->urgent_gp = 0; + } + if (rcu_segcblist_ready_cbs(&rtpcp->cblist)) needgpcb |= 0x1; raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); } @@ -549,11 +597,19 @@ static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot) // RCU-tasks kthread that detects grace periods and invokes callbacks. static int __noreturn rcu_tasks_kthread(void *arg) { + int cpu; struct rcu_tasks *rtp = arg; + for_each_possible_cpu(cpu) { + struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); + + timer_setup(&rtpcp->lazy_timer, call_rcu_tasks_generic_timer, 0); + rtpcp->urgent_gp = 1; + } + /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ housekeeping_affine(current, HK_TYPE_RCU); - WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start! + smp_store_release(&rtp->kthread_ptr, current); // Let GPs start! /* * Each pass through the following loop makes one check for @@ -635,16 +691,22 @@ static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s) { int cpu; bool havecbs = false; + bool haveurgent = false; + bool haveurgentcbs = false; for_each_possible_cpu(cpu) { struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); - if (!data_race(rcu_segcblist_empty(&rtpcp->cblist))) { + if (!data_race(rcu_segcblist_empty(&rtpcp->cblist))) havecbs = true; + if (data_race(rtpcp->urgent_gp)) + haveurgent = true; + if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)) && data_race(rtpcp->urgent_gp)) + haveurgentcbs = true; + if (havecbs && haveurgent && haveurgentcbs) break; - } } - pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n", + pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c%c%c l:%lu %s\n", rtp->kname, tasks_gp_state_getname(rtp), data_race(rtp->gp_state), jiffies - data_race(rtp->gp_jiffies), @@ -652,6 +714,9 @@ static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s) data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis), ".k"[!!data_race(rtp->kthread_ptr)], ".C"[havecbs], + ".u"[haveurgent], + ".U"[haveurgentcbs], + rtp->lazy_jiffies, s); } #endif // #ifndef CONFIG_TINY_RCU -- cgit v1.2.3 From 5ae769c611e7d77c1d945cb132140deb93a32d49 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 17 May 2023 05:20:03 -0700 Subject: rcu-tasks: Remove redundant #ifdef CONFIG_TASKS_RCU The kernel/rcu/tasks.h file has a #endif immediately followed by an Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index c88d7fe29d92..7eae67fbe47c 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -146,9 +146,7 @@ static struct rcu_tasks rt_name = \ #ifdef CONFIG_TASKS_RCU /* Track exiting tasks in order to allow them to be waited for. */ DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu); -#endif -#ifdef CONFIG_TASKS_RCU /* Report delay in synchronize_srcu() completion in rcu_tasks_postscan(). */ static void tasks_rcu_exit_srcu_stall(struct timer_list *unused); static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer, tasks_rcu_exit_srcu_stall); -- cgit v1.2.3 From 450d461aa629024c0f01022122838b5243fe0f2a Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 17 May 2023 06:40:23 -0700 Subject: rcu-tasks: Add kernel boot parameters for callback laziness This commit adds kernel boot parameters for callback laziness, allowing the RCU Tasks flavors to be individually adjusted. Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'kernel') diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 7eae67fbe47c..28e986627e3f 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -1083,11 +1083,16 @@ void rcu_barrier_tasks(void) } EXPORT_SYMBOL_GPL(rcu_barrier_tasks); +int rcu_tasks_lazy_ms = -1; +module_param(rcu_tasks_lazy_ms, int, 0444); + static int __init rcu_spawn_tasks_kthread(void) { cblist_init_generic(&rcu_tasks); rcu_tasks.gp_sleep = HZ / 10; rcu_tasks.init_fract = HZ / 10; + if (rcu_tasks_lazy_ms >= 0) + rcu_tasks.lazy_jiffies = msecs_to_jiffies(rcu_tasks_lazy_ms); rcu_tasks.pregp_func = rcu_tasks_pregp_step; rcu_tasks.pertask_func = rcu_tasks_pertask; rcu_tasks.postscan_func = rcu_tasks_postscan; @@ -1236,10 +1241,15 @@ void rcu_barrier_tasks_rude(void) } EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude); +int rcu_tasks_rude_lazy_ms = -1; +module_param(rcu_tasks_rude_lazy_ms, int, 0444); + static int __init rcu_spawn_tasks_rude_kthread(void) { cblist_init_generic(&rcu_tasks_rude); rcu_tasks_rude.gp_sleep = HZ / 10; + if (rcu_tasks_rude_lazy_ms >= 0) + rcu_tasks_rude.lazy_jiffies = msecs_to_jiffies(rcu_tasks_rude_lazy_ms); rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude); return 0; } @@ -1856,6 +1866,9 @@ void rcu_barrier_tasks_trace(void) } EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace); +int rcu_tasks_trace_lazy_ms = -1; +module_param(rcu_tasks_trace_lazy_ms, int, 0444); + static int __init rcu_spawn_tasks_trace_kthread(void) { cblist_init_generic(&rcu_tasks_trace); @@ -1870,6 +1883,8 @@ static int __init rcu_spawn_tasks_trace_kthread(void) if (rcu_tasks_trace.init_fract <= 0) rcu_tasks_trace.init_fract = 1; } + if (rcu_tasks_trace_lazy_ms >= 0) + rcu_tasks_trace.lazy_jiffies = msecs_to_jiffies(rcu_tasks_trace_lazy_ms); rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step; rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan; rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace; -- cgit v1.2.3 From db13710a0365ba66f8f232eb3bdfaa1a10d820f7 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 26 Jun 2023 13:45:13 -0700 Subject: rcu-tasks: Cancel callback laziness if too many callbacks The various RCU Tasks flavors now do lazy grace periods when there are only asynchronous grace period requests. By default, the system will let 250 milliseconds elapse after the first call_rcu_tasks*() callbacki is queued before starting a grace period. In contrast, synchronous grace period requests such as synchronize_rcu_tasks*() will start a grace period immediately. However, invoking one of the call_rcu_tasks*() functions in a too-tight loop can result in a callback flood, which in turn can exhaust memory if grace periods are delayed for too long. This commit therefore sets a limit so that the grace-period kthread will be awakened when any CPU's callback list expands to contain rcupdate.rcu_task_lazy_lim callbacks elements (defaulting to 32, set to -1 to disable), the grace-period kthread will be awakened, thus cancelling any ongoing laziness and getting out in front of the potential callback flood. Cc: Alexei Starovoitov Cc: Martin KaFai Lau Cc: Andrii Nakryiko Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 28e986627e3f..291d536b1789 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -176,6 +176,8 @@ static int rcu_task_contend_lim __read_mostly = 100; module_param(rcu_task_contend_lim, int, 0444); static int rcu_task_collapse_lim __read_mostly = 10; module_param(rcu_task_collapse_lim, int, 0444); +static int rcu_task_lazy_lim __read_mostly = 32; +module_param(rcu_task_lazy_lim, int, 0444); /* RCU tasks grace-period state for debugging. */ #define RTGS_INIT 0 @@ -354,8 +356,9 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, cblist_init_generic(rtp); raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. } - needwake = func == wakeme_after_rcu; - if (havekthread && !timer_pending(&rtpcp->lazy_timer)) { + needwake = (func == wakeme_after_rcu) || + (rcu_segcblist_n_cbs(&rtpcp->cblist) == rcu_task_lazy_lim); + if (havekthread && !needwake && !timer_pending(&rtpcp->lazy_timer)) { if (rtp->lazy_jiffies) mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp)); else -- cgit v1.2.3 From f5063e8948dad7f31adb007284a5d5038ae31bb8 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Fri, 7 Jul 2023 13:53:55 -0400 Subject: refscale: Fix uninitalized use of wait_queue_head_t Running the refscale test occasionally crashes the kernel with the following error: [ 8569.952896] BUG: unable to handle page fault for address: ffffffffffffffe8 [ 8569.952900] #PF: supervisor read access in kernel mode [ 8569.952902] #PF: error_code(0x0000) - not-present page [ 8569.952904] PGD c4b048067 P4D c4b049067 PUD c4b04b067 PMD 0 [ 8569.952910] Oops: 0000 [#1] PREEMPT_RT SMP NOPTI [ 8569.952916] Hardware name: Dell Inc. PowerEdge R750/0WMWCR, BIOS 1.2.4 05/28/2021 [ 8569.952917] RIP: 0010:prepare_to_wait_event+0x101/0x190 : [ 8569.952940] Call Trace: [ 8569.952941] [ 8569.952944] ref_scale_reader+0x380/0x4a0 [refscale] [ 8569.952959] kthread+0x10e/0x130 [ 8569.952966] ret_from_fork+0x1f/0x30 [ 8569.952973] The likely cause is that init_waitqueue_head() is called after the call to the torture_create_kthread() function that creates the ref_scale_reader kthread. Although this init_waitqueue_head() call will very likely complete before this kthread is created and starts running, it is possible that the calling kthread will be delayed between the calls to torture_create_kthread() and init_waitqueue_head(). In this case, the new kthread will use the waitqueue head before it is properly initialized, which is not good for the kernel's health and well-being. The above crash happened here: static inline void __add_wait_queue(...) { : if (!(wq->flags & WQ_FLAG_PRIORITY)) <=== Crash here The offset of flags from list_head entry in wait_queue_entry is -0x18. If reader_tasks[i].wq.head.next is NULL as allocated reader_task structure is zero initialized, the instruction will try to access address 0xffffffffffffffe8, which is exactly the fault address listed above. This commit therefore invokes init_waitqueue_head() before creating the kthread. Fixes: 653ed64b01dc ("refperf: Add a test to measure performance of read-side synchronization") Signed-off-by: Waiman Long Reviewed-by: Qiuxu Zhuo Reviewed-by: Davidlohr Bueso Acked-by: Joel Fernandes (Google) Signed-off-by: Paul E. McKenney --- kernel/rcu/refscale.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c index 1970ce5f22d4..71d138573856 100644 --- a/kernel/rcu/refscale.c +++ b/kernel/rcu/refscale.c @@ -1107,12 +1107,11 @@ ref_scale_init(void) VERBOSE_SCALEOUT("Starting %d reader threads", nreaders); for (i = 0; i < nreaders; i++) { + init_waitqueue_head(&reader_tasks[i].wq); firsterr = torture_create_kthread(ref_scale_reader, (void *)i, reader_tasks[i].task); if (torture_init_error(firsterr)) goto unwind; - - init_waitqueue_head(&(reader_tasks[i].wq)); } // Main Task -- cgit v1.2.3 From b5a2801fc05350b3b81e6fdcefddd6cfdbf8b238 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 11 Jul 2023 18:01:38 -0700 Subject: refscale: Add a "jiffies" test This commit adds a "jiffies" test to refscale, allowing use of jiffies to be compared to ktime_get_real_fast_ns(). On my x86 laptop, jiffies is more than 20x faster. (Though for many uses, the tens-of-nanoseconds overhead of ktime_get_real_fast_ns() will be just fine.) Signed-off-by: Paul E. McKenney --- kernel/rcu/refscale.c | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c index 71d138573856..91a0fd0d4d9a 100644 --- a/kernel/rcu/refscale.c +++ b/kernel/rcu/refscale.c @@ -528,6 +528,38 @@ static struct ref_scale_ops clock_ops = { .name = "clock" }; +static void ref_jiffies_section(const int nloops) +{ + u64 x = 0; + int i; + + preempt_disable(); + for (i = nloops; i >= 0; i--) + x += jiffies; + preempt_enable(); + stopopts = x; +} + +static void ref_jiffies_delay_section(const int nloops, const int udl, const int ndl) +{ + u64 x = 0; + int i; + + preempt_disable(); + for (i = nloops; i >= 0; i--) { + x += jiffies; + un_delay(udl, ndl); + } + preempt_enable(); + stopopts = x; +} + +static struct ref_scale_ops jiffies_ops = { + .readsection = ref_jiffies_section, + .delaysection = ref_jiffies_delay_section, + .name = "jiffies" +}; + //////////////////////////////////////////////////////////////////////// // // Methods leveraging SLAB_TYPESAFE_BY_RCU. @@ -1047,7 +1079,7 @@ ref_scale_init(void) int firsterr = 0; static struct ref_scale_ops *scale_ops[] = { &rcu_ops, &srcu_ops, RCU_TRACE_OPS RCU_TASKS_OPS &refcnt_ops, &rwlock_ops, - &rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops, &clock_ops, + &rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops, &clock_ops, &jiffies_ops, &typesafe_ref_ops, &typesafe_lock_ops, &typesafe_seqlock_ops, }; -- cgit v1.2.3 From 2226f3dc05a988ce81308bfc3d125f5d0b0d592c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 16 May 2023 07:02:01 -0700 Subject: rcuscale: Permit blocking delays between writers Some workloads do isolated RCU work, and this can affect operation latencies. This commit therefore adds a writer_holdoff_jiffies module parameter that causes writers to block for the specified number of jiffies between each pair of consecutive write-side operations. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcuscale.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'kernel') diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c index d1221731c7cf..4277037e0996 100644 --- a/kernel/rcu/rcuscale.c +++ b/kernel/rcu/rcuscale.c @@ -93,6 +93,7 @@ torture_param(bool, shutdown, RCUSCALE_SHUTDOWN, "Shutdown at end of scalability tests."); torture_param(int, verbose, 1, "Enable verbose debugging printk()s"); torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable"); +torture_param(int, writer_holdoff_jiffies, 0, "Holdoff (jiffies) between GPs, zero to disable"); torture_param(int, kfree_rcu_test, 0, "Do we run a kfree_rcu() scale test?"); torture_param(int, kfree_mult, 1, "Multiple of kfree_obj size to allocate."); torture_param(int, kfree_by_call_rcu, 0, "Use call_rcu() to emulate kfree_rcu()?"); @@ -414,6 +415,7 @@ rcu_scale_writer(void *arg) struct rcu_head *rhp = NULL; bool started = false, done = false, alldone = false; u64 t; + DEFINE_TORTURE_RANDOM(tr); u64 *wdp; u64 *wdpp = writer_durations[me]; @@ -448,6 +450,8 @@ rcu_scale_writer(void *arg) do { if (writer_holdoff) udelay(writer_holdoff); + if (writer_holdoff_jiffies) + schedule_timeout_idle(torture_random(&tr) % writer_holdoff_jiffies + 1); wdp = &wdpp[i]; *wdp = ktime_get_mono_fast_ns(); if (gp_async) { -- cgit v1.2.3 From ee7516a16350ce2c1d45314a8c79bc93750fca8f Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 16 May 2023 08:21:17 -0700 Subject: rcuscale: Fix gp_async_max typo: s/reader/writer/ Signed-off-by: Paul E. McKenney --- kernel/rcu/rcuscale.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c index 4277037e0996..15edd8c82933 100644 --- a/kernel/rcu/rcuscale.c +++ b/kernel/rcu/rcuscale.c @@ -84,7 +84,7 @@ MODULE_AUTHOR("Paul E. McKenney "); #endif torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives"); -torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader"); +torture_param(int, gp_async_max, 1000, "Max # outstanding waits per writer"); torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); torture_param(int, holdoff, 10, "Holdoff time before test start (s)"); torture_param(int, nreaders, -1, "Number of RCU reader threads"); -- cgit v1.2.3 From 7221f493c5ff8bee5c39de1e43cb8a8997c2cda5 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 16 May 2023 08:22:31 -0700 Subject: rcuscale: Add minruntime module parameter By default, rcuscale collects only 100 points of data per writer, but arranging for all kthreads to be actively collecting (if not recording) data during the time that any kthread might be recording. This works well, but does not allow much time to bring external performance tools to bear. This commit therefore adds a minruntime module parameter that specifies a minimum data-collection interval in seconds. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcuscale.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c index 15edd8c82933..7c5bab5a4f19 100644 --- a/kernel/rcu/rcuscale.c +++ b/kernel/rcu/rcuscale.c @@ -87,6 +87,7 @@ torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives"); torture_param(int, gp_async_max, 1000, "Max # outstanding waits per writer"); torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); torture_param(int, holdoff, 10, "Holdoff time before test start (s)"); +torture_param(int, minruntime, 0, "Minimum run time (s)"); torture_param(int, nreaders, -1, "Number of RCU reader threads"); torture_param(int, nwriters, -1, "Number of RCU updater threads"); torture_param(bool, shutdown, RCUSCALE_SHUTDOWN, @@ -411,6 +412,7 @@ rcu_scale_writer(void *arg) { int i = 0; int i_max; + unsigned long jdone; long me = (long)arg; struct rcu_head *rhp = NULL; bool started = false, done = false, alldone = false; @@ -447,6 +449,7 @@ rcu_scale_writer(void *arg) } } + jdone = jiffies + minruntime * HZ; do { if (writer_holdoff) udelay(writer_holdoff); @@ -479,7 +482,7 @@ retry: if (!started && atomic_read(&n_rcu_scale_writer_started) >= nrealwriters) started = true; - if (!done && i >= MIN_MEAS) { + if (!done && i >= MIN_MEAS && time_after(jiffies, jdone)) { done = true; sched_set_normal(current, 0); pr_alert("%s%s rcu_scale_writer %ld has %d measurements\n", -- cgit v1.2.3 From c68465dfaac3a36e2a72030f834811e651da8097 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 16 May 2023 13:45:16 -0700 Subject: rcuscale: Print out full set of module parameters Signed-off-by: Paul E. McKenney --- kernel/rcu/rcuscale.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c index 7c5bab5a4f19..18f1b0e13604 100644 --- a/kernel/rcu/rcuscale.c +++ b/kernel/rcu/rcuscale.c @@ -525,8 +525,8 @@ static void rcu_scale_print_module_parms(struct rcu_scale_ops *cur_ops, const char *tag) { pr_alert("%s" SCALE_FLAG - "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n", - scale_type, tag, nrealreaders, nrealwriters, verbose, shutdown); + "--- %s: gp_async=%d gp_async_max=%d gp_exp=%d holdoff=%d minruntime=%d nreaders=%d nwriters=%d writer_holdoff=%d writer_holdoff_jiffies=%d verbose=%d shutdown=%d\n", + scale_type, tag, gp_async, gp_async_max, gp_exp, holdoff, minruntime, nrealreaders, nrealwriters, writer_holdoff, writer_holdoff_jiffies, verbose, shutdown); } /* -- cgit v1.2.3 From bb7bad3dae42b669cfe471716f0073ac611e7f75 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 16 May 2023 16:04:52 -0700 Subject: rcuscale: Print out full set of kfree_rcu parameters Signed-off-by: Paul E. McKenney Cc: Uladzislau Rezki (Sony) --- kernel/rcu/rcuscale.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'kernel') diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c index 18f1b0e13604..821a3e65c54a 100644 --- a/kernel/rcu/rcuscale.c +++ b/kernel/rcu/rcuscale.c @@ -708,6 +708,10 @@ kfree_scale_init(void) unsigned long jif_start; unsigned long orig_jif; + pr_alert("%s" SCALE_FLAG + "--- kfree_rcu_test: kfree_mult=%d kfree_by_call_rcu=%d kfree_nthreads=%d kfree_alloc_num=%d kfree_loops=%d kfree_rcu_test_double=%d kfree_rcu_test_single=%d\n", + scale_type, kfree_mult, kfree_by_call_rcu, kfree_nthreads, kfree_alloc_num, kfree_loops, kfree_rcu_test_double, kfree_rcu_test_single); + // Also, do a quick self-test to ensure laziness is as much as // expected. if (kfree_by_call_rcu && !IS_ENABLED(CONFIG_RCU_LAZY)) { -- cgit v1.2.3 From 5f8e3202696f5e94d3fc54cfed75d270ae843ee9 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 17 May 2023 05:01:28 -0700 Subject: rcuscale: Measure grace-period kthread CPU time This commit adds the ability to output the CPU time consumed by the grace-period kthread for the RCU variant under test. The CPU time is whatever is in the designated task's current->stime field, and thus is controlled by whatever CPU-time accounting scheme is in effect. This output appears in microseconds as follows on the console: rcu_scale: Grace-period kthread CPU time: 42367.037 [ paulmck: Apply feedback from Stephen Rothwell and kernel test robot. ] Signed-off-by: Paul E. McKenney Tested-by: Yujie Liu --- kernel/rcu/rcuscale.c | 21 +++++++++++++++++++++ kernel/rcu/tasks.h | 6 ++++++ 2 files changed, 27 insertions(+) (limited to 'kernel') diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c index 821a3e65c54a..7fba3ab66e35 100644 --- a/kernel/rcu/rcuscale.c +++ b/kernel/rcu/rcuscale.c @@ -141,6 +141,7 @@ struct rcu_scale_ops { void (*gp_barrier)(void); void (*sync)(void); void (*exp_sync)(void); + struct task_struct *(*rso_gp_kthread)(void); const char *name; }; @@ -336,6 +337,7 @@ static struct rcu_scale_ops tasks_tracing_ops = { .gp_barrier = rcu_barrier_tasks_trace, .sync = synchronize_rcu_tasks_trace, .exp_sync = synchronize_rcu_tasks_trace, + .rso_gp_kthread = get_rcu_tasks_trace_gp_kthread, .name = "tasks-tracing" }; @@ -563,6 +565,8 @@ static struct task_struct **kfree_reader_tasks; static int kfree_nrealthreads; static atomic_t n_kfree_scale_thread_started; static atomic_t n_kfree_scale_thread_ended; +static struct task_struct *kthread_tp; +static u64 kthread_stime; struct kfree_obj { char kfree_obj[8]; @@ -808,6 +812,18 @@ rcu_scale_cleanup(void) if (gp_exp && gp_async) SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!"); + // If built-in, just report all of the GP kthread's CPU time. + if (IS_BUILTIN(CONFIG_RCU_SCALE_TEST) && !kthread_tp && cur_ops->rso_gp_kthread) + kthread_tp = cur_ops->rso_gp_kthread(); + if (kthread_tp) { + u32 ns; + u64 us; + + kthread_stime = kthread_tp->stime - kthread_stime; + us = div_u64_rem(kthread_stime, 1000, &ns); + pr_info("rcu_scale: Grace-period kthread CPU time: %llu.%03u us\n", us, ns); + show_rcu_gp_kthreads(); + } if (kfree_rcu_test) { kfree_scale_cleanup(); return; @@ -921,6 +937,11 @@ rcu_scale_init(void) if (cur_ops->init) cur_ops->init(); + if (cur_ops->rso_gp_kthread) { + kthread_tp = cur_ops->rso_gp_kthread(); + if (kthread_tp) + kthread_stime = kthread_tp->stime; + } if (kfree_rcu_test) return kfree_scale_init(); diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index b770add3f843..990a6cf5fa35 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -1830,6 +1830,12 @@ void show_rcu_tasks_trace_gp_kthread(void) EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread); #endif // !defined(CONFIG_TINY_RCU) +struct task_struct *get_rcu_tasks_trace_gp_kthread(void) +{ + return rcu_tasks_trace.kthread_ptr; +} +EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread); + #else /* #ifdef CONFIG_TASKS_TRACE_RCU */ static void exit_tasks_rcu_finish_trace(struct task_struct *t) { } #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */ -- cgit v1.2.3 From 271a8467a5f7ab7654f06541d2483b5340bb7192 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 2 Jun 2023 14:38:44 -0700 Subject: rcuscale: Measure RCU Tasks Trace grace-period kthread CPU time This commit causes RCU Tasks Trace to output the CPU time consumed by its grace-period kthread. The CPU time is whatever is in the designated task's current->stime field, and thus is controlled by whatever CPU-time accounting scheme is in effect. This output appears in microseconds as follows on the console: rcu_scale: Grace-period kthread CPU time: 42367.037 [ paulmck: Apply Willy Tarreau feedback. ] Signed-off-by: Paul E. McKenney --- kernel/rcu/rcu.h | 3 +++ kernel/rcu/rcuscale.c | 1 + kernel/rcu/tasks.h | 6 ++++++ 3 files changed, 10 insertions(+) (limited to 'kernel') diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 98c1544cf572..d58bc0e86769 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -505,6 +505,9 @@ void rcu_async_relax(void); void rcupdate_announce_bootup_oddness(void); #ifdef CONFIG_TASKS_RCU_GENERIC void show_rcu_tasks_gp_kthreads(void); +# ifdef CONFIG_TASKS_RCU +struct task_struct *get_rcu_tasks_gp_kthread(void); +# endif // # ifdef CONFIG_TASKS_RCU #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ static inline void show_rcu_tasks_gp_kthreads(void) {} #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c index 7fba3ab66e35..35e06c86acc9 100644 --- a/kernel/rcu/rcuscale.c +++ b/kernel/rcu/rcuscale.c @@ -298,6 +298,7 @@ static struct rcu_scale_ops tasks_ops = { .gp_barrier = rcu_barrier_tasks, .sync = synchronize_rcu_tasks, .exp_sync = synchronize_rcu_tasks, + .rso_gp_kthread = get_rcu_tasks_gp_kthread, .name = "tasks" }; diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 990a6cf5fa35..0f03de023097 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -1042,6 +1042,12 @@ void show_rcu_tasks_classic_gp_kthread(void) EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread); #endif // !defined(CONFIG_TINY_RCU) +struct task_struct *get_rcu_tasks_gp_kthread(void) +{ + return rcu_tasks.kthread_ptr; +} +EXPORT_SYMBOL_GPL(get_rcu_tasks_gp_kthread); + /* * Contribute to protect against tasklist scan blind spot while the * task is exiting and may be removed from the tasklist. See -- cgit v1.2.3 From a15ec57cfcf83858aced058e00a7feb824322fa4 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 3 Jun 2023 09:07:44 -0700 Subject: rcuscale: Add RCU Tasks Rude testing Add a "tasks-rude" option to the rcuscale.scale_type module parameter. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcu.h | 3 +++ kernel/rcu/rcuscale.c | 31 ++++++++++++++++++++++++++++++- kernel/rcu/tasks.h | 7 +++++++ 3 files changed, 40 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index d58bc0e86769..9829d8161b21 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -508,6 +508,9 @@ void show_rcu_tasks_gp_kthreads(void); # ifdef CONFIG_TASKS_RCU struct task_struct *get_rcu_tasks_gp_kthread(void); # endif // # ifdef CONFIG_TASKS_RCU +# ifdef CONFIG_TASKS_RUDE_RCU +struct task_struct *get_rcu_tasks_rude_gp_kthread(void); +# endif // # ifdef CONFIG_TASKS_RUDE_RCU #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ static inline void show_rcu_tasks_gp_kthreads(void) {} #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c index 35e06c86acc9..5ce3b4e7ce71 100644 --- a/kernel/rcu/rcuscale.c +++ b/kernel/rcu/rcuscale.c @@ -310,6 +310,35 @@ static struct rcu_scale_ops tasks_ops = { #endif // #else // #ifdef CONFIG_TASKS_RCU +#ifdef CONFIG_TASKS_RUDE_RCU + +/* + * Definitions for RCU-tasks-rude scalability testing. + */ + +static struct rcu_scale_ops tasks_rude_ops = { + .ptype = RCU_TASKS_RUDE_FLAVOR, + .init = rcu_sync_scale_init, + .readlock = tasks_scale_read_lock, + .readunlock = tasks_scale_read_unlock, + .get_gp_seq = rcu_no_completed, + .gp_diff = rcu_seq_diff, + .async = call_rcu_tasks_rude, + .gp_barrier = rcu_barrier_tasks_rude, + .sync = synchronize_rcu_tasks_rude, + .exp_sync = synchronize_rcu_tasks_rude, + .rso_gp_kthread = get_rcu_tasks_rude_gp_kthread, + .name = "tasks-rude" +}; + +#define TASKS_RUDE_OPS &tasks_rude_ops, + +#else // #ifdef CONFIG_TASKS_RUDE_RCU + +#define TASKS_RUDE_OPS + +#endif // #else // #ifdef CONFIG_TASKS_RUDE_RCU + #ifdef CONFIG_TASKS_TRACE_RCU /* @@ -913,7 +942,7 @@ rcu_scale_init(void) long i; int firsterr = 0; static struct rcu_scale_ops *scale_ops[] = { - &rcu_ops, &srcu_ops, &srcud_ops, TASKS_OPS TASKS_TRACING_OPS + &rcu_ops, &srcu_ops, &srcud_ops, TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS }; if (!torture_init_begin(scale_type, verbose)) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 0f03de023097..0372c5cbc83b 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -1194,6 +1194,13 @@ void show_rcu_tasks_rude_gp_kthread(void) } EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread); #endif // !defined(CONFIG_TINY_RCU) + +struct task_struct *get_rcu_tasks_rude_gp_kthread(void) +{ + return rcu_tasks_rude.kthread_ptr; +} +EXPORT_SYMBOL_GPL(get_rcu_tasks_rude_gp_kthread); + #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */ //////////////////////////////////////////////////////////////////////// -- cgit v1.2.3 From e0a34641eb551e3c51e1588ef7874f2de2844a06 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 9 Jun 2023 14:05:14 +0200 Subject: rcuscale: fix building with RCU_TINY Both the CONFIG_TASKS_RCU and CONFIG_TASKS_RUDE_RCU options are broken when RCU_TINY is enabled as well, as some functions are missing a declaration. In file included from kernel/rcu/update.c:649: kernel/rcu/tasks.h:1271:21: error: no previous prototype for 'get_rcu_tasks_rude_gp_kthread' [-Werror=missing-prototypes] 1271 | struct task_struct *get_rcu_tasks_rude_gp_kthread(void) | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~ kernel/rcu/rcuscale.c:330:27: error: 'get_rcu_tasks_rude_gp_kthread' undeclared here (not in a function); did you mean 'get_rcu_tasks_trace_gp_kthread'? 330 | .rso_gp_kthread = get_rcu_tasks_rude_gp_kthread, | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | get_rcu_tasks_trace_gp_kthread In file included from /home/arnd/arm-soc/kernel/rcu/update.c:649: kernel/rcu/tasks.h:1113:21: error: no previous prototype for 'get_rcu_tasks_gp_kthread' [-Werror=missing-prototypes] 1113 | struct task_struct *get_rcu_tasks_gp_kthread(void) | ^~~~~~~~~~~~~~~~~~~~~~~~ Also, building with CONFIG_TASKS_RUDE_RCU but not CONFIG_TASKS_RCU is broken because of some missing stub functions: kernel/rcu/rcuscale.c:322:27: error: 'tasks_scale_read_lock' undeclared here (not in a function); did you mean 'srcu_scale_read_lock'? 322 | .readlock = tasks_scale_read_lock, | ^~~~~~~~~~~~~~~~~~~~~ | srcu_scale_read_lock kernel/rcu/rcuscale.c:323:27: error: 'tasks_scale_read_unlock' undeclared here (not in a function); did you mean 'srcu_scale_read_unlock'? 323 | .readunlock = tasks_scale_read_unlock, | ^~~~~~~~~~~~~~~~~~~~~~~ | srcu_scale_read_unlock Move the declarations outside of the RCU_TINY #ifdef and duplicate the shared stub functions to address all of the above. Fixes: 88d7ff818f0ce ("rcuscale: Add RCU Tasks Rude testing") Fixes: 755f1c5eb416b ("rcuscale: Measure RCU Tasks Trace grace-period kthread CPU time") Signed-off-by: Arnd Bergmann Signed-off-by: Paul E. McKenney --- kernel/rcu/rcu.h | 14 ++++++++------ kernel/rcu/rcuscale.c | 13 +++++++++++-- 2 files changed, 19 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 9829d8161b21..5befd8780dcd 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -505,18 +505,20 @@ void rcu_async_relax(void); void rcupdate_announce_bootup_oddness(void); #ifdef CONFIG_TASKS_RCU_GENERIC void show_rcu_tasks_gp_kthreads(void); -# ifdef CONFIG_TASKS_RCU -struct task_struct *get_rcu_tasks_gp_kthread(void); -# endif // # ifdef CONFIG_TASKS_RCU -# ifdef CONFIG_TASKS_RUDE_RCU -struct task_struct *get_rcu_tasks_rude_gp_kthread(void); -# endif // # ifdef CONFIG_TASKS_RUDE_RCU #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ static inline void show_rcu_tasks_gp_kthreads(void) {} #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ void rcu_request_urgent_qs_task(struct task_struct *t); #endif /* #else #ifdef CONFIG_TINY_RCU */ +#ifdef CONFIG_TASKS_RCU +struct task_struct *get_rcu_tasks_gp_kthread(void); +#endif // # ifdef CONFIG_TASKS_RCU + +#ifdef CONFIG_TASKS_RUDE_RCU +struct task_struct *get_rcu_tasks_rude_gp_kthread(void); +#endif // # ifdef CONFIG_TASKS_RUDE_RCU + #define RCU_SCHEDULER_INACTIVE 0 #define RCU_SCHEDULER_INIT 1 #define RCU_SCHEDULER_RUNNING 2 diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c index 5ce3b4e7ce71..a0eae1900708 100644 --- a/kernel/rcu/rcuscale.c +++ b/kernel/rcu/rcuscale.c @@ -316,11 +316,20 @@ static struct rcu_scale_ops tasks_ops = { * Definitions for RCU-tasks-rude scalability testing. */ +static int tasks_rude_scale_read_lock(void) +{ + return 0; +} + +static void tasks_rude_scale_read_unlock(int idx) +{ +} + static struct rcu_scale_ops tasks_rude_ops = { .ptype = RCU_TASKS_RUDE_FLAVOR, .init = rcu_sync_scale_init, - .readlock = tasks_scale_read_lock, - .readunlock = tasks_scale_read_unlock, + .readlock = tasks_rude_scale_read_lock, + .readunlock = tasks_rude_scale_read_unlock, .get_gp_seq = rcu_no_completed, .gp_diff = rcu_seq_diff, .async = call_rcu_tasks_rude, -- cgit v1.2.3 From e60c122a1614b4f65b29a7bef9d83b9fd30e937a Mon Sep 17 00:00:00 2001 From: Zqiang Date: Fri, 16 Jun 2023 15:39:26 +0800 Subject: rcuscale: Move rcu_scale_writer() schedule_timeout_uninterruptible() to _idle() The rcuscale.holdoff module parameter can be used to delay the start of rcu_scale_writer() kthread. However, the hung-task timeout will trigger when the timeout specified by rcuscale.holdoff is greater than hung_task_timeout_secs: runqemu kvm nographic slirp qemuparams="-smp 4 -m 2048M" bootparams="rcuscale.shutdown=0 rcuscale.holdoff=300" [ 247.071753] INFO: task rcu_scale_write:59 blocked for more than 122 seconds. [ 247.072529] Not tainted 6.4.0-rc1-00134-gb9ed6de8d4ff #7 [ 247.073400] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [ 247.074331] task:rcu_scale_write state:D stack:30144 pid:59 ppid:2 flags:0x00004000 [ 247.075346] Call Trace: [ 247.075660] [ 247.075965] __schedule+0x635/0x1280 [ 247.076448] ? __pfx___schedule+0x10/0x10 [ 247.076967] ? schedule_timeout+0x2dc/0x4d0 [ 247.077471] ? __pfx_lock_release+0x10/0x10 [ 247.078018] ? enqueue_timer+0xe2/0x220 [ 247.078522] schedule+0x84/0x120 [ 247.078957] schedule_timeout+0x2e1/0x4d0 [ 247.079447] ? __pfx_schedule_timeout+0x10/0x10 [ 247.080032] ? __pfx_rcu_scale_writer+0x10/0x10 [ 247.080591] ? __pfx_process_timeout+0x10/0x10 [ 247.081163] ? __pfx_sched_set_fifo_low+0x10/0x10 [ 247.081760] ? __pfx_rcu_scale_writer+0x10/0x10 [ 247.082287] rcu_scale_writer+0x6b1/0x7f0 [ 247.082773] ? mark_held_locks+0x29/0xa0 [ 247.083252] ? __pfx_rcu_scale_writer+0x10/0x10 [ 247.083865] ? __pfx_rcu_scale_writer+0x10/0x10 [ 247.084412] kthread+0x179/0x1c0 [ 247.084759] ? __pfx_kthread+0x10/0x10 [ 247.085098] ret_from_fork+0x2c/0x50 [ 247.085433] This commit therefore replaces schedule_timeout_uninterruptible() with schedule_timeout_idle(). Signed-off-by: Zqiang Signed-off-by: Paul E. McKenney --- kernel/rcu/rcuscale.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c index a0eae1900708..ffdb30495e3c 100644 --- a/kernel/rcu/rcuscale.c +++ b/kernel/rcu/rcuscale.c @@ -469,7 +469,7 @@ rcu_scale_writer(void *arg) sched_set_fifo_low(current); if (holdoff) - schedule_timeout_uninterruptible(holdoff * HZ); + schedule_timeout_idle(holdoff * HZ); /* * Wait until rcu_end_inkernel_boot() is called for normal GP tests -- cgit v1.2.3 From 9cafe974cf46425c18cf78971400ae12e416e166 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 15 May 2023 09:18:20 -0700 Subject: rcutorture: Dump grace-period state upon rtort_pipe_count incidents The rtort_pipe_count WARN() indicates that grace periods were unable to invoke all callbacks during a stutter_wait() interval. But it is sometimes helpful to have a bit more information as to why. This commit therefore invokes show_rcu_gp_kthreads() immediately before that WARN() in order to dump out some relevant information. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 147551c23baf..d291a1438c30 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1581,6 +1581,7 @@ rcu_torture_writer(void *arg) rcu_access_pointer(rcu_torture_current) != &rcu_tortures[i]) { tracing_off(); + show_rcu_gp_kthreads(); WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); rcu_ftrace_dump(DUMP_ALL); } -- cgit v1.2.3 From e2a0b786c50cf4f0c3abfcd05888ae583c98cf25 Mon Sep 17 00:00:00 2001 From: Connor O'Brien Date: Fri, 2 Jun 2023 22:02:09 +0000 Subject: torture: Support randomized shuffling for proxy exec testing Currently shuffling sets the same cpu affinities for all tasks, which makes us less likely to hit paths involving migrating blocked tasks onto a cpu where they can't run. This patch adds an element of randomness to allow affinities of different writer tasks to diverge. This has helped uncover issues in testing with Proxy Execution Cc: "Paul E. McKenney" Cc: Josh Triplett Cc: Joel Fernandes Cc: Juri Lelli Cc: Valentin Schneider Cc: Dietmar Eggemann Cc: kernel-team@android.com Signed-off-by: Connor O'Brien Signed-off-by: John Stultz Acked-by: Davidlohr Bueso Signed-off-by: Paul E. McKenney --- kernel/torture.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/torture.c b/kernel/torture.c index 1a0519b836ac..8be83fdc6be1 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -54,6 +54,9 @@ module_param(verbose_sleep_frequency, int, 0444); static int verbose_sleep_duration = 1; module_param(verbose_sleep_duration, int, 0444); +static int random_shuffle; +module_param(random_shuffle, int, 0444); + static char *torture_type; static int verbose; @@ -518,6 +521,7 @@ static void torture_shuffle_task_unregister_all(void) */ static void torture_shuffle_tasks(void) { + DEFINE_TORTURE_RANDOM(rand); struct shuffle_task *stp; cpumask_setall(shuffle_tmp_mask); @@ -537,8 +541,10 @@ static void torture_shuffle_tasks(void) cpumask_clear_cpu(shuffle_idle_cpu, shuffle_tmp_mask); mutex_lock(&shuffle_task_mutex); - list_for_each_entry(stp, &shuffle_task_list, st_l) - set_cpus_allowed_ptr(stp->st_t, shuffle_tmp_mask); + list_for_each_entry(stp, &shuffle_task_list, st_l) { + if (!random_shuffle || torture_random(&rand) & 0x1) + set_cpus_allowed_ptr(stp->st_t, shuffle_tmp_mask); + } mutex_unlock(&shuffle_task_mutex); cpus_read_unlock(); -- cgit v1.2.3 From c924bf5a43e47d5591d527d39b474f8ca9e63c0e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 23 May 2023 02:22:10 -0700 Subject: rcu: Clarify rcu_is_watching() kernel-doc comment Make it clear that this function always returns either true or false without other planned failure modes. Reported-by: Masami Hiramatsu Signed-off-by: Paul E. McKenney Reviewed-by: Joel Fernandes (Google) --- kernel/rcu/tree.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 1449cb69a0e0..f788981272f8 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -677,12 +677,16 @@ static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp) } /** - * rcu_is_watching - see if RCU thinks that the current CPU is not idle + * rcu_is_watching - RCU read-side critical sections permitted on current CPU? * - * Return true if RCU is watching the running CPU, which means that this - * CPU can safely enter RCU read-side critical sections. In other words, - * if the current CPU is not in its idle loop or is in an interrupt or - * NMI handler, return true. + * Return @true if RCU is watching the running CPU and @false otherwise. + * An @true return means that this CPU can safely enter RCU read-side + * critical sections. + * + * Although calls to rcu_is_watching() from most parts of the kernel + * will return @true, there are important exceptions. For example, if the + * current CPU is deep within its idle loop, in kernel entry/exit code, + * or offline, rcu_is_watching() will return @false. * * Make notrace because it can be called by the internal functions of * ftrace, and making this notrace removes unnecessary recursion calls. -- cgit v1.2.3 From cb88f7f51bc6f351a529ff61d0a706c6eae1417a Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 28 Jul 2023 13:33:02 -0700 Subject: rcu-tasks: Permit use of debug-objects with RCU Tasks flavors Currently, cblist_init_generic() holds a raw spinlock when invoking INIT_WORK(). This fails in kernels built with CONFIG_DEBUG_OBJECTS=y due to memory allocation being forbidden while holding a raw spinlock. But the only reason for holding the raw spinlock is to synchronize with early boot calls to call_rcu_tasks(), call_rcu_tasks_rude, and, last but not least, call_rcu_tasks_trace(). These calls also invoke cblist_init_generic() in order to support early boot queueing of callbacks. Except that there are no early boot calls to either of these three functions, and the BPF guys confirm that they have no plans to add any such calls. This commit therefore removes the synchronization and adds a WARN_ON_ONCE() to catch the case of now-prohibited early boot RCU Tasks callback queueing. If early boot queueing is needed, an "initialized" flag may be added to the rcu_tasks structure. Then queueing a callback before this flag is set would initialize the callback list (if needed) and queue the callback. The decision as to where to queue the callback given the possibility of non-zero boot CPUs is left as an exercise for the reader. Reported-by: Jakub Kicinski Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 291d536b1789..a4cd4ea2402c 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -236,7 +236,7 @@ static const char *tasks_gp_state_getname(struct rcu_tasks *rtp) #endif /* #ifndef CONFIG_TINY_RCU */ // Initialize per-CPU callback lists for the specified flavor of -// Tasks RCU. +// Tasks RCU. Do not enqueue callbacks before this function is invoked. static void cblist_init_generic(struct rcu_tasks *rtp) { int cpu; @@ -244,7 +244,6 @@ static void cblist_init_generic(struct rcu_tasks *rtp) int lim; int shift; - raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); if (rcu_task_enqueue_lim < 0) { rcu_task_enqueue_lim = 1; rcu_task_cb_adjust = true; @@ -267,17 +266,16 @@ static void cblist_init_generic(struct rcu_tasks *rtp) WARN_ON_ONCE(!rtpcp); if (cpu) raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock)); - raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. + local_irq_save(flags); // serialize initialization if (rcu_segcblist_empty(&rtpcp->cblist)) rcu_segcblist_init(&rtpcp->cblist); + local_irq_restore(flags); INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq); rtpcp->cpu = cpu; rtpcp->rtpp = rtp; if (!rtpcp->rtp_blkd_tasks.next) INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks); - raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled. } - raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d.\n", rtp->name, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim), rcu_task_cb_adjust); @@ -351,11 +349,9 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids) needadjust = true; // Defer adjustment to avoid deadlock. } - if (!rcu_segcblist_is_enabled(&rtpcp->cblist)) { - raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled. - cblist_init_generic(rtp); - raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. - } + // Queuing callbacks before initialization not yet supported. + if (WARN_ON_ONCE(!rcu_segcblist_is_enabled(&rtpcp->cblist))) + rcu_segcblist_init(&rtpcp->cblist); needwake = (func == wakeme_after_rcu) || (rcu_segcblist_n_cbs(&rtpcp->cblist) == rcu_task_lazy_lim); if (havekthread && !needwake && !timer_pending(&rtpcp->lazy_timer)) { -- cgit v1.2.3 From 9d0cce2bc3874dd03f7471ec00ae4acb5a77e43c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 1 Aug 2023 12:11:18 -0700 Subject: rcu-tasks: Fix boot-time RCU tasks debug-only deadlock In kernels built with CONFIG_PROVE_RCU=y (for example, lockdep kernels), the following sequence of events can occur: o rcu_init_tasks_generic() is invoked just before init is spawned. It invokes rcu_spawn_tasks_kthread() and friends. o rcu_spawn_tasks_kthread() invokes rcu_spawn_tasks_kthread_generic(), which uses kthread_run() to create the needed kthread. o Control returns to rcu_init_tasks_generic(), which, because this is a CONFIG_PROVE_RCU=y kernel, invokes the version of the rcu_tasks_initiate_self_tests() function that actually does something, including invoking synchronize_rcu_tasks(), which in turn invokes synchronize_rcu_tasks_generic(). o synchronize_rcu_tasks_generic() sees that the ->kthread_ptr is still NULL, because the newly spawned kthread has not yet started. o The new kthread starts, preempting synchronize_rcu_tasks_generic() just after its check. This kthread invokes rcu_tasks_one_gp(), which acquires ->tasks_gp_mutex, and, seeing no work, blocks in rcuwait_wait_event(). Note that this step requires either a preemptible kernel or a fault-injection-style sleep at the beginning of mutex_lock(). o synchronize_rcu_tasks_generic() resumes and invokes rcu_tasks_one_gp(). o rcu_tasks_one_gp() attempts to acquire ->tasks_gp_mutex, which is still held by the newly spawned kthread's rcu_tasks_one_gp() function. Deadlock. Because the only reason for ->tasks_gp_mutex is to handle pre-kthread synchronous grace periods, this commit avoids this deadlock by having rcu_tasks_one_gp() momentarily release ->tasks_gp_mutex while invoking rcuwait_wait_event(). This allows the call to rcu_tasks_one_gp() from synchronize_rcu_tasks_generic() proceed. Note that it is not necessary to release the mutex anywhere else in rcu_tasks_one_gp() because rcuwait_wait_event() is the only function that can block indefinitely. Reported-by: Guenter Roeck Reported-by: Roy Hopkins Reported-by: Peter Zijlstra Signed-off-by: Paul E. McKenney Tested-by: Roy Hopkins --- kernel/rcu/tasks.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index a4cd4ea2402c..5572f4a22996 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -570,10 +570,12 @@ static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot) if (unlikely(midboot)) { needgpcb = 0x2; } else { + mutex_unlock(&rtp->tasks_gp_mutex); set_tasks_gp_state(rtp, RTGS_WAIT_CBS); rcuwait_wait_event(&rtp->cbs_wait, (needgpcb = rcu_tasks_need_gpcb(rtp)), TASK_IDLE); + mutex_lock(&rtp->tasks_gp_mutex); } if (needgpcb & 0x2) { -- cgit v1.2.3 From 67d5404d274376890d6d095a10e6565854918f8e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 19 Jul 2023 15:50:07 -0700 Subject: torture: Add a kthread-creation callback to _torture_create_kthread() This commit adds a kthread-creation callback to the _torture_create_kthread() function, which allows callers of a new torture_create_kthread_cb() macro to specify a function to be invoked after the kthread is created but before it is awakened for the first time. Signed-off-by: Paul E. McKenney Cc: Dietmar Eggemann Cc: Josh Triplett Cc: Juri Lelli Cc: Valentin Schneider Cc: Dietmar Eggemann Cc: kernel-team@android.com Reviewed-by: Joel Fernandes (Google) Acked-by: John Stultz --- kernel/torture.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/torture.c b/kernel/torture.c index 8be83fdc6be1..b88a1a86d9da 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -932,7 +932,7 @@ EXPORT_SYMBOL_GPL(torture_kthread_stopping); * it starts, you will need to open-code your own. */ int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m, - char *f, struct task_struct **tp) + char *f, struct task_struct **tp, void (*cbf)(struct task_struct *tp)) { int ret = 0; @@ -944,6 +944,10 @@ int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m, *tp = NULL; return ret; } + + if (cbf) + cbf(*tp); + wake_up_process(*tp); // Process is sleeping, so ordering provided. torture_shuffle_task_register(*tp); return ret; -- cgit v1.2.3 From 5d248bb39fe1388943acb6510f8f48fa5570e0ec Mon Sep 17 00:00:00 2001 From: Dietmar Eggemann Date: Fri, 2 Jun 2023 22:02:10 +0000 Subject: torture: Add lock_torture writer_fifo module parameter This commit adds a module parameter that causes the locktorture writer to run at real-time priority. To use it: insmod /lib/modules/torture.ko random_shuffle=1 insmod /lib/modules/locktorture.ko torture_type=mutex_lock rt_boost=1 rt_boost_factor=50 nested_locks=3 writer_fifo=1 ^^^^^^^^^^^^^ A predecessor to this patch has been helpful to uncover issues with the proxy-execution series. [ paulmck: Remove locktorture-specific code from kernel/torture.c. ] Cc: "Paul E. McKenney" Cc: Josh Triplett Cc: Joel Fernandes Cc: Juri Lelli Cc: Valentin Schneider Cc: kernel-team@android.com Signed-off-by: Dietmar Eggemann [jstultz: Include header change to build, reword commit message] Signed-off-by: John Stultz Acked-by: Davidlohr Bueso Signed-off-by: Paul E. McKenney --- kernel/locking/locktorture.c | 12 +++++++----- kernel/torture.c | 3 ++- 2 files changed, 9 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index 949d3deae506..270c7f80ce84 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -45,6 +45,7 @@ torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable"); torture_param(int, rt_boost, 2, "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types."); torture_param(int, rt_boost_factor, 50, "A factor determining how often rt-boost happens."); +torture_param(int, writer_fifo, 0, "Run writers at sched_set_fifo() priority"); torture_param(int, verbose, 1, "Enable verbose debugging printk()s"); torture_param(int, nested_locks, 0, "Number of nested locks (max = 8)"); /* Going much higher trips "BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!" errors */ @@ -809,7 +810,8 @@ static int lock_torture_writer(void *arg) bool skip_main_lock; VERBOSE_TOROUT_STRING("lock_torture_writer task started"); - set_user_nice(current, MAX_NICE); + if (!rt_task(current)) + set_user_nice(current, MAX_NICE); do { if ((torture_random(&rand) & 0xfffff) == 0) @@ -1015,8 +1017,7 @@ static void lock_torture_cleanup(void) if (writer_tasks) { for (i = 0; i < cxt.nrealwriters_stress; i++) - torture_stop_kthread(lock_torture_writer, - writer_tasks[i]); + torture_stop_kthread(lock_torture_writer, writer_tasks[i]); kfree(writer_tasks); writer_tasks = NULL; } @@ -1244,8 +1245,9 @@ static int __init lock_torture_init(void) goto create_reader; /* Create writer. */ - firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i], - writer_tasks[i]); + firsterr = torture_create_kthread_cb(lock_torture_writer, &cxt.lwsa[i], + writer_tasks[i], + writer_fifo ? sched_set_fifo : NULL); if (torture_init_error(firsterr)) goto unwind; diff --git a/kernel/torture.c b/kernel/torture.c index b88a1a86d9da..a1ac493488e2 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -37,6 +37,7 @@ #include #include #include +#include #include "rcu/rcu.h" MODULE_LICENSE("GPL"); @@ -734,7 +735,7 @@ bool stutter_wait(const char *title) cond_resched_tasks_rcu_qs(); spt = READ_ONCE(stutter_pause_test); for (; spt; spt = READ_ONCE(stutter_pause_test)) { - if (!ret) { + if (!ret && !rt_task(current)) { sched_set_normal(current, MAX_NICE); ret = true; } -- cgit v1.2.3 From 872948c665f50a1446e8a34b1ed57bb0b3a9ca4a Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 26 Jun 2023 14:59:29 -0700 Subject: torture: Make torture_hrtimeout_*() use TASK_IDLE Given that it is expected that more code will use torture_hrtimeout_*(), including for longer timeouts, make it use TASK_IDLE instead of TASK_UNINTERRUPTIBLE. Signed-off-by: Paul E. McKenney --- kernel/torture.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/torture.c b/kernel/torture.c index a1ac493488e2..68c831a43a58 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -93,7 +93,7 @@ int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_s if (trsp) hto += (torture_random(trsp) >> 3) % fuzzt_ns; - set_current_state(TASK_UNINTERRUPTIBLE); + set_current_state(TASK_IDLE); return schedule_hrtimeout(&hto, HRTIMER_MODE_REL); } EXPORT_SYMBOL_GPL(torture_hrtimeout_ns); -- cgit v1.2.3 From 3f0c06e1cba6207703733474bc7b55292bc86c6a Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 26 Jun 2023 15:19:56 -0700 Subject: torture: Move torture_onoff() timeouts to hrtimers In order to gain better race coverage, move the CPU-hotplug-related timed waits in torture_onoff() to torture_hrtimeout_jiffies(). Signed-off-by: Paul E. McKenney --- kernel/torture.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/torture.c b/kernel/torture.c index 68c831a43a58..2c441d5a1bdd 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -354,12 +354,12 @@ torture_onoff(void *arg) if (onoff_holdoff > 0) { VERBOSE_TOROUT_STRING("torture_onoff begin holdoff"); - schedule_timeout_interruptible(onoff_holdoff); + torture_hrtimeout_jiffies(onoff_holdoff, &rand); VERBOSE_TOROUT_STRING("torture_onoff end holdoff"); } while (!torture_must_stop()) { if (disable_onoff_at_boot && !rcu_inkernel_boot_has_ended()) { - schedule_timeout_interruptible(HZ / 10); + torture_hrtimeout_jiffies(HZ / 10, &rand); continue; } cpu = (torture_random(&rand) >> 4) % (maxcpu + 1); @@ -369,7 +369,7 @@ torture_onoff(void *arg) torture_online(cpu, &n_online_attempts, &n_online_successes, &sum_online, &min_online, &max_online); - schedule_timeout_interruptible(onoff_interval); + torture_hrtimeout_jiffies(onoff_interval, &rand); } stop: -- cgit v1.2.3 From dea81dcfd3497e75eb23e7543434f88c34289d31 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 26 Jun 2023 16:27:02 -0700 Subject: torture: Move torture_shuffle() timeouts to hrtimers In order to gain better race coverage, move the CPU-migration timed waits in torture_shuffle() to torture_hrtimeout_jiffies(). Signed-off-by: Paul E. McKenney --- kernel/torture.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/torture.c b/kernel/torture.c index 2c441d5a1bdd..c5311154bc4d 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -557,9 +557,11 @@ static void torture_shuffle_tasks(void) */ static int torture_shuffle(void *arg) { + DEFINE_TORTURE_RANDOM(rand); + VERBOSE_TOROUT_STRING("torture_shuffle task started"); do { - schedule_timeout_interruptible(shuffle_interval); + torture_hrtimeout_jiffies(shuffle_interval, &rand); torture_shuffle_tasks(); torture_shutdown_absorb("torture_shuffle"); } while (!torture_must_stop()); -- cgit v1.2.3 From 10af43671e8bf4ac153c4991a17cdf57bc6d2cfe Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 26 Jun 2023 16:52:35 -0700 Subject: torture: Move stutter_wait() timeouts to hrtimers In order to gain better race coverage, move the test start/stop waits in stutter_wait() to torture_hrtimeout_jiffies(). Signed-off-by: Paul E. McKenney --- kernel/torture.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/torture.c b/kernel/torture.c index c5311154bc4d..984651de4b55 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -742,7 +742,7 @@ bool stutter_wait(const char *title) ret = true; } if (spt == 1) { - schedule_timeout_interruptible(1); + torture_hrtimeout_jiffies(1, NULL); } else if (spt == 2) { while (READ_ONCE(stutter_pause_test)) { if (!(i++ & 0xffff)) @@ -750,7 +750,7 @@ bool stutter_wait(const char *title) cond_resched(); } } else { - schedule_timeout_interruptible(round_jiffies_relative(HZ)); + torture_hrtimeout_jiffies(round_jiffies_relative(HZ), NULL); } torture_shutdown_absorb(title); } -- cgit v1.2.3 From 6cab60ceb1d32b795ae22fea5719fb6150e4cda9 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 12 Jul 2023 14:56:51 -0700 Subject: torture: Stop right-shifting torture_random() return values Now that torture_random() uses swahw32(), its callers no longer see not-so-random low-order bits, as these are now swapped up into the upper 16 bits of the torture_random() function's return value. This commit therefore removes the right-shifting of torture_random() return values. Signed-off-by: Paul E. McKenney --- kernel/torture.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/torture.c b/kernel/torture.c index 984651de4b55..b28b05bbef02 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -92,7 +92,7 @@ int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_s ktime_t hto = baset_ns; if (trsp) - hto += (torture_random(trsp) >> 3) % fuzzt_ns; + hto += torture_random(trsp) % fuzzt_ns; set_current_state(TASK_IDLE); return schedule_hrtimeout(&hto, HRTIMER_MODE_REL); } @@ -362,7 +362,7 @@ torture_onoff(void *arg) torture_hrtimeout_jiffies(HZ / 10, &rand); continue; } - cpu = (torture_random(&rand) >> 4) % (maxcpu + 1); + cpu = torture_random(&rand) % (maxcpu + 1); if (!torture_offline(cpu, &n_offline_attempts, &n_offline_successes, &sum_offline, &min_offline, &max_offline)) -- cgit v1.2.3 From bc19e86e285f679d39e1671e72e2f02ac04de999 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 12 Jul 2023 16:16:37 -0700 Subject: rcutorture: Stop right-shifting torture_random() return values Now that torture_random() uses swahw32(), its callers no longer see not-so-random low-order bits, as these are now swapped up into the upper 16 bits of the torture_random() function's return value. This commit therefore removes the right-shifting of torture_random() return values. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index d291a1438c30..ade42d6a9d9b 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1877,7 +1877,7 @@ static int rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) { int mask = rcutorture_extend_mask_max(); - unsigned long randmask1 = torture_random(trsp) >> 8; + unsigned long randmask1 = torture_random(trsp); unsigned long randmask2 = randmask1 >> 3; unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ; @@ -1936,7 +1936,7 @@ rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, if (!((mask - 1) & mask)) return rtrsp; /* Current RCU reader not extendable. */ /* Bias towards larger numbers of loops. */ - i = (torture_random(trsp) >> 3); + i = torture_random(trsp); i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; for (j = 0; j < i; j++) { mask = rcutorture_extend_mask(*readstate, trsp); @@ -2137,7 +2137,7 @@ static int rcu_nocb_toggle(void *arg) toggle_fuzz = NSEC_PER_USEC; do { r = torture_random(&rand); - cpu = (r >> 4) % (maxcpu + 1); + cpu = (r >> 1) % (maxcpu + 1); if (r & 0x1) { rcu_nocb_cpu_offload(cpu); atomic_long_inc(&n_nocb_offload); -- cgit v1.2.3 From 343640cb5b4ed349b3656ee4b100b36e2ae7e2da Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 26 Jun 2023 18:37:05 -0700 Subject: rcu: Mark __rcu_irq_enter_check_tick() ->rcu_urgent_qs load The rcu_request_urgent_qs_task() function does a cross-CPU store to ->rcu_urgent_qs, so this commit therefore marks the load in __rcu_irq_enter_check_tick() with READ_ONCE(). Signed-off-by: Paul E. McKenney Reviewed-by: Joel Fernandes (Google) --- kernel/rcu/tree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index f788981272f8..cb1caefa8bd0 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -632,7 +632,7 @@ void __rcu_irq_enter_check_tick(void) // prevents self-deadlock. So we can safely recheck under the lock. // Note that the nohz_full state currently cannot change. raw_spin_lock_rcu_node(rdp->mynode); - if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) { + if (READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) { // A nohz_full CPU is in the kernel and RCU needs a // quiescent state. Turn on the tick! WRITE_ONCE(rdp->rcu_forced_tick, true); -- cgit v1.2.3 From 3292ba0229dbe5f3e79b78b230354ada2888dc29 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 5 Jul 2023 16:20:58 -0700 Subject: rcu: Make the rcu_nocb_poll boot parameter usable via boot config MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The rcu_nocb_poll kernel boot parameter is defined via early_param(), whose parsing functions are invoked from parse_early_param() which is in turn invoked by setup_arch(), which is very early indeed.  It is invoked so early that the console output timestamps read 0.000000, in other words, before time begins. This use of early_param() means that the rcu_nocb_poll kernel boot parameter cannot usefully be embedded into the kernel image. Yes, you can embed it, but setup_boot_config() is invoked from start_kernel() too late for it to be parsed. But it makes no sense to parse this parameter so early. After all, it cannot do anything until the rcuog kthreads are created, which is long after rcu_init() time, let alone setup_boot_config() time. This commit therefore switches the rcu_nocb_poll kernel boot parameter from early_param() to __setup(), which allows boot-config parsing of this parameter, in turn allowing it to be embedded into the kernel image. Signed-off-by: Paul E. McKenney Reviewed-by: Joel Fernandes (Google) --- kernel/rcu/tree_nocb.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index 43229d2b0c44..5598212d1f27 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -77,9 +77,9 @@ __setup("rcu_nocbs", rcu_nocb_setup); static int __init parse_rcu_nocb_poll(char *arg) { rcu_nocb_poll = true; - return 0; + return 1; } -early_param("rcu_nocb_poll", parse_rcu_nocb_poll); +__setup("rcu_nocb_poll", parse_rcu_nocb_poll); /* * Don't bother bypassing ->cblist if the call_rcu() rate is low. -- cgit v1.2.3