sbitmap: add helper to clear a batch of tags
ANBZ: #11744
commit 1aec5e4a29
upstream.
sbitmap currently only supports clearing tags one-by-one, add a helper
that allows the caller to pass in an array of tags to clear.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
[joe: use sbitmap_queue in sbitmap_update_cpu_hint()]
Signed-off-by: Joseph Qi <joseph.qi@linux.alibaba.com>
Reviewed-by: Ferry Meng <mengferry@linux.alibaba.com>
Link: https://gitee.com/anolis/cloud-kernel/pulls/4160
This commit is contained in:
parent
ddd4974732
commit
67e5b27fc9
|
@ -490,6 +490,17 @@ void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
|
|||
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
|
||||
unsigned int cpu);
|
||||
|
||||
/**
|
||||
* sbitmap_queue_clear_batch() - Free a batch of allocated bits
|
||||
* &struct sbitmap_queue.
|
||||
* @sbq: Bitmap to free from.
|
||||
* @offset: offset for each tag in array
|
||||
* @tags: array of tags
|
||||
* @nr_tags: number of tags in array
|
||||
*/
|
||||
void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
|
||||
int *tags, int nr_tags);
|
||||
|
||||
static inline int sbq_index_inc(int index)
|
||||
{
|
||||
return (index + 1) & (SBQ_WAIT_QUEUES - 1);
|
||||
|
|
|
@ -559,6 +559,47 @@ void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
|
||||
|
||||
static inline void sbitmap_update_cpu_hint(struct sbitmap_queue *sbq, int cpu,
|
||||
int tag)
|
||||
{
|
||||
if (likely(!sbq->round_robin && tag < sbq->sb.depth))
|
||||
*per_cpu_ptr(sbq->alloc_hint, cpu) = tag;
|
||||
}
|
||||
|
||||
void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
|
||||
int *tags, int nr_tags)
|
||||
{
|
||||
struct sbitmap *sb = &sbq->sb;
|
||||
unsigned long *addr = NULL;
|
||||
unsigned long mask = 0;
|
||||
int i;
|
||||
|
||||
smp_mb__before_atomic();
|
||||
for (i = 0; i < nr_tags; i++) {
|
||||
const int tag = tags[i] - offset;
|
||||
unsigned long *this_addr;
|
||||
|
||||
/* since we're clearing a batch, skip the deferred map */
|
||||
this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word;
|
||||
if (!addr) {
|
||||
addr = this_addr;
|
||||
} else if (addr != this_addr) {
|
||||
atomic_long_andnot(mask, (atomic_long_t *) addr);
|
||||
mask = 0;
|
||||
addr = this_addr;
|
||||
}
|
||||
mask |= (1UL << SB_NR_TO_BIT(sb, tag));
|
||||
}
|
||||
|
||||
if (mask)
|
||||
atomic_long_andnot(mask, (atomic_long_t *) addr);
|
||||
|
||||
smp_mb__after_atomic();
|
||||
sbitmap_queue_wake_up(sbq);
|
||||
sbitmap_update_cpu_hint(sbq, raw_smp_processor_id(),
|
||||
tags[nr_tags - 1] - offset);
|
||||
}
|
||||
|
||||
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
|
||||
unsigned int cpu)
|
||||
{
|
||||
|
@ -584,8 +625,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
|
|||
smp_mb__after_atomic();
|
||||
sbitmap_queue_wake_up(sbq);
|
||||
|
||||
if (likely(!sbq->round_robin && nr < sbq->sb.depth))
|
||||
*per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
|
||||
sbitmap_update_cpu_hint(sbq, cpu, nr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
|
||||
|
||||
|
|
Loading…
Reference in New Issue