d a read of preempt_count() after changing preempt_count() on certain architectures. Signed-off-by: Boqun Feng --- V10: * Add commit message I forgot * Rebase against latest pcpu_hot changes V11: * Remove CONFIG_PROFILE_ALL_BRANCHES workaround from __preempt_count_add_return() arch/arm64/include/asm/preempt.h | 18 ++++++++++++++++++ arch/s390/include/asm/preempt.h | 10 ++++++++++ arch/x86/include/asm/preempt.h | 10 ++++++++++ include/asm-generic/preempt.h | 14 ++++++++++++++ 4 files changed, 52 insertions(+) diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h index 932ea4b620428..0dd8221d1bef7 100644 --- a/arch/arm64/include/asm/preempt.h +++ b/arch/arm64/include/asm/preempt.h @@ -55,6 +55,24 @@ static inline void __preempt_count_sub(int val) WRITE_ONCE(current_thread_info()->preempt.count, pc); } +static inline int __preempt_count_add_return(int val) +{ + u32 pc = READ_ONCE(current_thread_info()->preempt.count); + pc += val; + WRITE_ONCE(current_thread_info()->preempt.count, pc); + + return pc; +} + +static inline int __preempt_count_sub_return(int val) +{ + u32 pc = READ_ONCE(current_thread_info()->preempt.count); + pc -= val; + WRITE_ONCE(current_thread_info()->preempt.count, pc); + + return pc; +} + static inline bool __preempt_count_dec_and_test(void) { struct thread_info *ti = current_thread_info(); diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h index 6ccd033acfe52..5ae366e26c57d 100644 --- a/arch/s390/include/asm/preempt.h +++ b/arch/s390/include/asm/preempt.h @@ -98,6 +98,16 @@ static __always_inline bool should_resched(int preempt_offset) return unlikely(READ_ONCE(get_lowcore()->preempt_count) == preempt_offset); } +static __always_inline int __preempt_count_add_return(int val) +{ + return val + __atomic_add(val, &get_lowcore()->preempt_count); +} + +static __always_inline int __preempt_count_sub_return(int val) +{ + return __preempt_count_add_return(-val); +} + #define init_task_preempt_count(p) do { } while (0) /* Deferred to CPU bringup time */ #define init_idle_preempt_count(p, cpu) do { } while (0) diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index 578441db09f0b..1220656f3370b 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -85,6 +85,16 @@ static __always_inline void __preempt_count_sub(int val) raw_cpu_add_4(__preempt_count, -val); } +static __always_inline int __preempt_count_add_return(int val) +{ + return raw_cpu_add_return_4(__preempt_count, val); +} + +static __always_inline int __preempt_count_sub_return(int val) +{ + return raw_cpu_add_return_4(__preempt_count, -val); +} + /* * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule * a decrement which hits zero means we have no preempt_count and should diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index 51f8f3881523a..c8683c046615d 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h @@ -59,6 +59,20 @@ static __always_inline void __preempt_count_sub(int val) *preempt_count_ptr() -= val; } +static __always_inline int __preempt_count_add_return(int val) +{ + *preempt_count_ptr() += val; + + return *preempt_count_ptr(); +} + +static __always_inline int __preempt_count_sub_return(int val) +{ + *preempt_count_ptr() -= val; + + return *preempt_count_ptr(); +} + static __always_inline bool __preempt_count_dec_and_test(void) { /* -- 2.51.0[PATCH v13 04/17] preempt: Introduce __preempt_count_{sub, add}_return()Lyude Paul undefinedrust-for-linux@vger.kernel.org, Thomas Gleixner , Boqun Feng , linux-kernel@vger.kernel.org, Daniel Almeida undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined