Revert "Revert "sbitmap: Use single per-bitmap counting to wake up queued tags""

This reverts commit 8ec4245b45.

It was perserving the ABI, but that is not needed anymore at this point
in time.

Change-Id: I82776674a83f38800e3144d025631e4256cc53f4
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-03-16 18:38:39 +00:00
parent 345103eb06
commit 02fb5b0cc5
2 changed files with 43 additions and 107 deletions

View file

@ -86,11 +86,6 @@ struct sbitmap {
* struct sbq_wait_state - Wait queue in a &struct sbitmap_queue.
*/
struct sbq_wait_state {
/**
* @wait_cnt: Number of frees remaining before we wake up.
*/
atomic_t wait_cnt;
/**
* @wait: Wait queue.
*/
@ -138,6 +133,17 @@ struct sbitmap_queue {
* sbitmap_queue_get_shallow()
*/
unsigned int min_shallow_depth;
/**
* @completion_cnt: Number of bits cleared passed to the
* wakeup function.
*/
atomic_t completion_cnt;
/**
* @wakeup_cnt: Number of thread wake ups issued.
*/
atomic_t wakeup_cnt;
};
/**

View file

@ -434,6 +434,8 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
atomic_set(&sbq->wake_index, 0);
atomic_set(&sbq->ws_active, 0);
atomic_set(&sbq->completion_cnt, 0);
atomic_set(&sbq->wakeup_cnt, 0);
sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
if (!sbq->ws) {
@ -441,40 +443,21 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
return -ENOMEM;
}
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
for (i = 0; i < SBQ_WAIT_QUEUES; i++)
init_waitqueue_head(&sbq->ws[i].wait);
atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
}
return 0;
}
EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
static inline void __sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
unsigned int wake_batch)
{
int i;
if (sbq->wake_batch != wake_batch) {
WRITE_ONCE(sbq->wake_batch, wake_batch);
/*
* Pairs with the memory barrier in sbitmap_queue_wake_up()
* to ensure that the batch size is updated before the wait
* counts.
*/
smp_mb();
for (i = 0; i < SBQ_WAIT_QUEUES; i++)
atomic_set(&sbq->ws[i].wait_cnt, 1);
}
}
static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
unsigned int depth)
{
unsigned int wake_batch;
wake_batch = sbq_calc_wake_batch(sbq, depth);
__sbitmap_queue_update_wake_batch(sbq, wake_batch);
if (sbq->wake_batch != wake_batch)
WRITE_ONCE(sbq->wake_batch, wake_batch);
}
void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
@ -488,7 +471,8 @@ void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES,
min_batch, SBQ_WAKE_BATCH);
__sbitmap_queue_update_wake_batch(sbq, wake_batch);
WRITE_ONCE(sbq->wake_batch, wake_batch);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch);
@ -585,7 +569,7 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
struct sbq_wait_state *ws = &sbq->ws[wake_index];
if (waitqueue_active(&ws->wait) && atomic_read(&ws->wait_cnt)) {
if (waitqueue_active(&ws->wait)) {
if (wake_index != atomic_read(&sbq->wake_index))
atomic_set(&sbq->wake_index, wake_index);
return ws;
@ -597,83 +581,31 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
return NULL;
}
static bool __sbq_wake_up(struct sbitmap_queue *sbq, int *nr)
{
struct sbq_wait_state *ws;
unsigned int wake_batch;
int wait_cnt, cur, sub;
bool ret;
if (*nr <= 0)
return false;
ws = sbq_wake_ptr(sbq);
if (!ws)
return false;
cur = atomic_read(&ws->wait_cnt);
do {
/*
* For concurrent callers of this, callers should call this
* function again to wakeup a new batch on a different 'ws'.
*/
if (cur == 0)
return true;
sub = min(*nr, cur);
wait_cnt = cur - sub;
} while (!atomic_try_cmpxchg(&ws->wait_cnt, &cur, wait_cnt));
/*
* If we decremented queue without waiters, retry to avoid lost
* wakeups.
*/
if (wait_cnt > 0)
return !waitqueue_active(&ws->wait);
*nr -= sub;
/*
* When wait_cnt == 0, we have to be particularly careful as we are
* responsible to reset wait_cnt regardless whether we've actually
* woken up anybody. But in case we didn't wakeup anybody, we still
* need to retry.
*/
ret = !waitqueue_active(&ws->wait);
wake_batch = READ_ONCE(sbq->wake_batch);
/*
* Wake up first in case that concurrent callers decrease wait_cnt
* while waitqueue is empty.
*/
wake_up_nr(&ws->wait, wake_batch);
/*
* Pairs with the memory barrier in sbitmap_queue_resize() to
* ensure that we see the batch size update before the wait
* count is reset.
*
* Also pairs with the implicit barrier between decrementing wait_cnt
* and checking for waitqueue_active() to make sure waitqueue_active()
* sees result of the wakeup if atomic_dec_return() has seen the result
* of atomic_set().
*/
smp_mb__before_atomic();
/*
* Increase wake_index before updating wait_cnt, otherwise concurrent
* callers can see valid wait_cnt in old waitqueue, which can cause
* invalid wakeup on the old waitqueue.
*/
sbq_index_atomic_inc(&sbq->wake_index);
atomic_set(&ws->wait_cnt, wake_batch);
return ret || *nr;
}
void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
{
while (__sbq_wake_up(sbq, &nr))
;
unsigned int wake_batch = READ_ONCE(sbq->wake_batch);
struct sbq_wait_state *ws = NULL;
unsigned int wakeups;
if (!atomic_read(&sbq->ws_active))
return;
atomic_add(nr, &sbq->completion_cnt);
wakeups = atomic_read(&sbq->wakeup_cnt);
do {
if (atomic_read(&sbq->completion_cnt) - wakeups < wake_batch)
return;
if (!ws) {
ws = sbq_wake_ptr(sbq);
if (!ws)
return;
}
} while (!atomic_try_cmpxchg(&sbq->wakeup_cnt,
&wakeups, wakeups + wake_batch));
wake_up_nr(&ws->wait, wake_batch);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
@ -790,9 +722,7 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
seq_puts(m, "ws={\n");
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
struct sbq_wait_state *ws = &sbq->ws[i];
seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
atomic_read(&ws->wait_cnt),
seq_printf(m, "\t{.wait=%s},\n",
waitqueue_active(&ws->wait) ? "active" : "inactive");
}
seq_puts(m, "}\n");