diff --git a/include/linux/sched.h b/include/linux/sched.h index 431cb079f6a9..cf3d130319cb 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -758,10 +758,8 @@ struct task_struct { #endif unsigned int __state; -#ifdef CONFIG_PREEMPT_RT /* saved state for "spinlock sleepers" */ - unsigned int saved_state; -#endif + /* moved to ANDROID_KABI_USE(1, unsigned int saved_state) */ /* * This begins the randomizable portion of task_struct. Only @@ -1548,7 +1546,7 @@ struct task_struct { */ union rv_task_monitor rv[RV_PER_TASK_MONITORS]; #endif - ANDROID_KABI_RESERVE(1); + ANDROID_KABI_USE(1, unsigned int saved_state); ANDROID_KABI_RESERVE(2); ANDROID_KABI_RESERVE(3); ANDROID_KABI_RESERVE(4); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c033651150cc..0f1a8b497672 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2255,17 +2255,15 @@ int __task_state_match(struct task_struct *p, unsigned int state) if (READ_ONCE(p->__state) & state) return 1; -#ifdef CONFIG_PREEMPT_RT if (READ_ONCE(p->saved_state) & state) return -1; -#endif + return 0; } static __always_inline int task_state_match(struct task_struct *p, unsigned int state) { -#ifdef CONFIG_PREEMPT_RT int match; /* @@ -2277,9 +2275,6 @@ int task_state_match(struct task_struct *p, unsigned int state) raw_spin_unlock_irq(&p->pi_lock); return match; -#else - return __task_state_match(p, state); -#endif } /* @@ -4068,7 +4063,6 @@ bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success) *success = !!(match = __task_state_match(p, state)); -#ifdef CONFIG_PREEMPT_RT /* * Saved state preserves the task state across blocking on * an RT lock. If the state matches, set p::saved_state to @@ -4084,7 +4078,7 @@ bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success) */ if (match < 0) p->saved_state = TASK_RUNNING; -#endif + return match > 0; }