BACKPORT: FROMGIT: sched/core: Remove ifdeffery for saved_state
In preparation for freezer to also use saved_state, remove the
CONFIG_PREEMPT_RT compilation guard around saved_state.
On the arm64 platform I tested which did not have CONFIG_PREEMPT_RT,
there was no statistically significant deviation by applying this patch.
Test methodology:
perf bench sched message -g 40 -l 40
Signed-off-by: Elliot Berman <quic_eberman@quicinc.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
(cherry picked from commit fa14aa2c23d31eb39bc615feb920f28d32d2a87e
https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core)
Bug: 292064955
Bug: 304294320
Change-Id: I9c11ab7ce31ba3b48b304229898d4c7c18a6cb2c
[eberman: Use KABI reservation to preserve CRC/ABI of struct task_struct and
preserved raw_spin_(un)lock instead of new guard(...) syntax in task_state_match]
Signed-off-by: Elliot Berman <quic_eberman@quicinc.com>
(cherry picked from commit 457e65696a)
This commit is contained in:
parent
c8bbfb7e22
commit
8095efce6c
2 changed files with 4 additions and 12 deletions
|
|
@ -758,10 +758,8 @@ struct task_struct {
|
|||
#endif
|
||||
unsigned int __state;
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RT
|
||||
/* saved state for "spinlock sleepers" */
|
||||
unsigned int saved_state;
|
||||
#endif
|
||||
/* moved to ANDROID_KABI_USE(1, unsigned int saved_state) */
|
||||
|
||||
/*
|
||||
* This begins the randomizable portion of task_struct. Only
|
||||
|
|
@ -1548,7 +1546,7 @@ struct task_struct {
|
|||
*/
|
||||
union rv_task_monitor rv[RV_PER_TASK_MONITORS];
|
||||
#endif
|
||||
ANDROID_KABI_RESERVE(1);
|
||||
ANDROID_KABI_USE(1, unsigned int saved_state);
|
||||
ANDROID_KABI_RESERVE(2);
|
||||
ANDROID_KABI_RESERVE(3);
|
||||
ANDROID_KABI_RESERVE(4);
|
||||
|
|
|
|||
|
|
@ -2255,17 +2255,15 @@ int __task_state_match(struct task_struct *p, unsigned int state)
|
|||
if (READ_ONCE(p->__state) & state)
|
||||
return 1;
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RT
|
||||
if (READ_ONCE(p->saved_state) & state)
|
||||
return -1;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
int task_state_match(struct task_struct *p, unsigned int state)
|
||||
{
|
||||
#ifdef CONFIG_PREEMPT_RT
|
||||
int match;
|
||||
|
||||
/*
|
||||
|
|
@ -2277,9 +2275,6 @@ int task_state_match(struct task_struct *p, unsigned int state)
|
|||
raw_spin_unlock_irq(&p->pi_lock);
|
||||
|
||||
return match;
|
||||
#else
|
||||
return __task_state_match(p, state);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -4068,7 +4063,6 @@ bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
|
|||
|
||||
*success = !!(match = __task_state_match(p, state));
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RT
|
||||
/*
|
||||
* Saved state preserves the task state across blocking on
|
||||
* an RT lock. If the state matches, set p::saved_state to
|
||||
|
|
@ -4084,7 +4078,7 @@ bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
|
|||
*/
|
||||
if (match < 0)
|
||||
p->saved_state = TASK_RUNNING;
|
||||
#endif
|
||||
|
||||
return match > 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue