From 88459642cba452630326b9cab1c651e09577d4e4 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Sat, 17 Sep 2016 08:38:44 -0600 Subject: blk-mq: abstract tag allocation out into sbitmap library This is a generally useful data structure, so make it available to anyone else who might want to use it. It's also a nice cleanup separating the allocation logic from the rest of the tag handling logic. The code is behind a new Kconfig option, CONFIG_SBITMAP, which is only selected by CONFIG_BLOCK for now. This should be a complete noop functionality-wise. Signed-off-by: Omar Sandoval Signed-off-by: Jens Axboe --- lib/Kconfig | 3 + lib/Makefile | 2 + lib/sbitmap.c | 301 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 306 insertions(+) create mode 100644 lib/sbitmap.c (limited to 'lib') diff --git a/lib/Kconfig b/lib/Kconfig index d79909dc01ec..942fb8091a86 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -550,4 +550,7 @@ config STACKDEPOT bool select STACKTRACE +config SBITMAP + bool + endmenu diff --git a/lib/Makefile b/lib/Makefile index cfa68eb269e4..2cbfd2904994 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -228,3 +228,5 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string.o obj-$(CONFIG_UBSAN) += ubsan.o UBSAN_SANITIZE_ubsan.o := n + +obj-$(CONFIG_SBITMAP) += sbitmap.o diff --git a/lib/sbitmap.c b/lib/sbitmap.c new file mode 100644 index 000000000000..dfc084ac6937 --- /dev/null +++ b/lib/sbitmap.c @@ -0,0 +1,301 @@ +/* + * Copyright (C) 2016 Facebook + * Copyright (C) 2013-2014 Jens Axboe + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include + +int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, + gfp_t flags, int node) +{ + unsigned int bits_per_word; + unsigned int i; + + if (shift < 0) { + shift = ilog2(BITS_PER_LONG); + /* + * If the bitmap is small, shrink the number of bits per word so + * we spread over a few cachelines, at least. If less than 4 + * bits, just forget about it, it's not going to work optimally + * anyway. + */ + if (depth >= 4) { + while ((4U << shift) > depth) + shift--; + } + } + bits_per_word = 1U << shift; + if (bits_per_word > BITS_PER_LONG) + return -EINVAL; + + sb->shift = shift; + sb->depth = depth; + sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); + + if (depth == 0) { + sb->map = NULL; + return 0; + } + + sb->map = kzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node); + if (!sb->map) + return -ENOMEM; + + for (i = 0; i < sb->map_nr; i++) { + sb->map[i].depth = min(depth, bits_per_word); + depth -= sb->map[i].depth; + } + return 0; +} +EXPORT_SYMBOL_GPL(sbitmap_init_node); + +void sbitmap_resize(struct sbitmap *sb, unsigned int depth) +{ + unsigned int bits_per_word = 1U << sb->shift; + unsigned int i; + + sb->depth = depth; + sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); + + for (i = 0; i < sb->map_nr; i++) { + sb->map[i].depth = min(depth, bits_per_word); + depth -= sb->map[i].depth; + } +} +EXPORT_SYMBOL_GPL(sbitmap_resize); + +static int __sbitmap_get_word(struct sbitmap_word *word, unsigned int hint, + bool wrap) +{ + unsigned int orig_hint = hint; + int nr; + + while (1) { + nr = find_next_zero_bit(&word->word, word->depth, hint); + if (unlikely(nr >= word->depth)) { + /* + * We started with an offset, and we didn't reset the + * offset to 0 in a failure case, so start from 0 to + * exhaust the map. + */ + if (orig_hint && hint && wrap) { + hint = orig_hint = 0; + continue; + } + return -1; + } + + if (!test_and_set_bit(nr, &word->word)) + break; + + hint = nr + 1; + if (hint >= word->depth - 1) + hint = 0; + } + + return nr; +} + +int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) +{ + unsigned int i, index; + int nr = -1; + + index = SB_NR_TO_INDEX(sb, alloc_hint); + + for (i = 0; i < sb->map_nr; i++) { + nr = __sbitmap_get_word(&sb->map[index], + SB_NR_TO_BIT(sb, alloc_hint), + !round_robin); + if (nr != -1) { + nr += index << sb->shift; + break; + } + + /* Jump to next index. */ + index++; + alloc_hint = index << sb->shift; + + if (index >= sb->map_nr) { + index = 0; + alloc_hint = 0; + } + } + + return nr; +} +EXPORT_SYMBOL_GPL(sbitmap_get); + +bool sbitmap_any_bit_set(const struct sbitmap *sb) +{ + unsigned int i; + + for (i = 0; i < sb->map_nr; i++) { + if (sb->map[i].word) + return true; + } + return false; +} +EXPORT_SYMBOL_GPL(sbitmap_any_bit_set); + +bool sbitmap_any_bit_clear(const struct sbitmap *sb) +{ + unsigned int i; + + for (i = 0; i < sb->map_nr; i++) { + const struct sbitmap_word *word = &sb->map[i]; + unsigned long ret; + + ret = find_first_zero_bit(&word->word, word->depth); + if (ret < word->depth) + return true; + } + return false; +} +EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear); + +unsigned int sbitmap_weight(const struct sbitmap *sb) +{ + unsigned int i, weight; + + for (i = 0; i < sb->map_nr; i++) { + const struct sbitmap_word *word = &sb->map[i]; + + weight += bitmap_weight(&word->word, word->depth); + } + return weight; +} +EXPORT_SYMBOL_GPL(sbitmap_weight); + +static unsigned int sbq_calc_wake_batch(unsigned int depth) +{ + unsigned int wake_batch; + + /* + * For each batch, we wake up one queue. We need to make sure that our + * batch size is small enough that the full depth of the bitmap is + * enough to wake up all of the queues. + */ + wake_batch = SBQ_WAKE_BATCH; + if (wake_batch > depth / SBQ_WAIT_QUEUES) + wake_batch = max(1U, depth / SBQ_WAIT_QUEUES); + + return wake_batch; +} + +int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, + int shift, gfp_t flags, int node) +{ + int ret; + int i; + + ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node); + if (ret) + return ret; + + sbq->wake_batch = sbq_calc_wake_batch(depth); + atomic_set(&sbq->wake_index, 0); + + sbq->ws = kzalloc(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags); + if (!sbq->ws) { + sbitmap_free(&sbq->sb); + return -ENOMEM; + } + + for (i = 0; i < SBQ_WAIT_QUEUES; i++) { + init_waitqueue_head(&sbq->ws[i].wait); + atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch); + } + return 0; +} +EXPORT_SYMBOL_GPL(sbitmap_queue_init_node); + +void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) +{ + sbq->wake_batch = sbq_calc_wake_batch(depth); + sbitmap_resize(&sbq->sb, depth); +} +EXPORT_SYMBOL_GPL(sbitmap_queue_resize); + +static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) +{ + int i, wake_index; + + wake_index = atomic_read(&sbq->wake_index); + for (i = 0; i < SBQ_WAIT_QUEUES; i++) { + struct sbq_wait_state *ws = &sbq->ws[wake_index]; + + if (waitqueue_active(&ws->wait)) { + int o = atomic_read(&sbq->wake_index); + + if (wake_index != o) + atomic_cmpxchg(&sbq->wake_index, o, wake_index); + return ws; + } + + wake_index = sbq_index_inc(wake_index); + } + + return NULL; +} + +static void sbq_wake_up(struct sbitmap_queue *sbq) +{ + struct sbq_wait_state *ws; + int wait_cnt; + + /* Ensure that the wait list checks occur after clear_bit(). */ + smp_mb(); + + ws = sbq_wake_ptr(sbq); + if (!ws) + return; + + wait_cnt = atomic_dec_return(&ws->wait_cnt); + if (unlikely(wait_cnt < 0)) + wait_cnt = atomic_inc_return(&ws->wait_cnt); + if (wait_cnt == 0) { + atomic_add(sbq->wake_batch, &ws->wait_cnt); + sbq_index_atomic_inc(&sbq->wake_index); + wake_up(&ws->wait); + } +} + +void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr) +{ + sbitmap_clear_bit(&sbq->sb, nr); + sbq_wake_up(sbq); +} +EXPORT_SYMBOL_GPL(sbitmap_queue_clear); + +void sbitmap_queue_wake_all(struct sbitmap_queue *sbq) +{ + int i, wake_index; + + /* + * Make sure all changes prior to this are visible from other CPUs. + */ + smp_mb(); + wake_index = atomic_read(&sbq->wake_index); + for (i = 0; i < SBQ_WAIT_QUEUES; i++) { + struct sbq_wait_state *ws = &sbq->ws[wake_index]; + + if (waitqueue_active(&ws->wait)) + wake_up(&ws->wait); + + wake_index = sbq_index_inc(wake_index); + } +} +EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all); -- cgit v1.2.3 From 48e28166a7b608e19a6aea3acadd81cdfe660f6b Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Sat, 17 Sep 2016 01:28:22 -0700 Subject: sbitmap: allocate wait queues on a specific node The original bt_alloc() we converted from was using kzalloc(), not kzalloc_node(), to allocate the wait queues. This was probably an oversight, so fix it for sbitmap_queue_init_node(). Signed-off-by: Omar Sandoval Signed-off-by: Jens Axboe --- lib/sbitmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/sbitmap.c b/lib/sbitmap.c index dfc084ac6937..4d8e97e470ee 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -208,7 +208,7 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, sbq->wake_batch = sbq_calc_wake_batch(depth); atomic_set(&sbq->wake_index, 0); - sbq->ws = kzalloc(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags); + sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); if (!sbq->ws) { sbitmap_free(&sbq->sb); return -ENOMEM; -- cgit v1.2.3 From 40aabb67464d5aad9ca3d2a5fedee56e2ff45aa0 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Sat, 17 Sep 2016 01:28:23 -0700 Subject: sbitmap: push per-cpu last_tag into sbitmap_queue Allocating your own per-cpu allocation hint separately makes for an awkward API. Instead, allocate the per-cpu hint as part of the struct sbitmap_queue. There's no point for a struct sbitmap_queue without the cache, but you can still use a bare struct sbitmap. Signed-off-by: Omar Sandoval Signed-off-by: Jens Axboe --- lib/sbitmap.c | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 4d8e97e470ee..1651ad9d5530 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -205,11 +205,18 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, if (ret) return ret; + sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags); + if (!sbq->alloc_hint) { + sbitmap_free(&sbq->sb); + return -ENOMEM; + } + sbq->wake_batch = sbq_calc_wake_batch(depth); atomic_set(&sbq->wake_index, 0); sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); if (!sbq->ws) { + free_percpu(sbq->alloc_hint); sbitmap_free(&sbq->sb); return -ENOMEM; } @@ -229,6 +236,29 @@ void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) } EXPORT_SYMBOL_GPL(sbitmap_queue_resize); +int __sbitmap_queue_get(struct sbitmap_queue *sbq, bool round_robin) +{ + unsigned int hint; + int nr; + + hint = this_cpu_read(*sbq->alloc_hint); + nr = sbitmap_get(&sbq->sb, hint, round_robin); + + if (nr == -1) { + /* If the map is full, a hint won't do us much good. */ + this_cpu_write(*sbq->alloc_hint, 0); + } else if (nr == hint || unlikely(round_robin)) { + /* Only update the hint if we used it. */ + hint = nr + 1; + if (hint >= sbq->sb.depth - 1) + hint = 0; + this_cpu_write(*sbq->alloc_hint, hint); + } + + return nr; +} +EXPORT_SYMBOL_GPL(__sbitmap_queue_get); + static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) { int i, wake_index; @@ -273,10 +303,13 @@ static void sbq_wake_up(struct sbitmap_queue *sbq) } } -void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr) +void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, + bool round_robin, unsigned int cpu) { sbitmap_clear_bit(&sbq->sb, nr); sbq_wake_up(sbq); + if (likely(!round_robin)) + *per_cpu_ptr(sbq->alloc_hint, cpu) = nr; } EXPORT_SYMBOL_GPL(sbitmap_queue_clear); -- cgit v1.2.3 From f4a644db86669d938c71f19560aebf69d4720d63 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Sat, 17 Sep 2016 01:28:24 -0700 Subject: sbitmap: push alloc policy into sbitmap_queue Again, there's no point in passing this in every time. Make it part of struct sbitmap_queue and clean up the API. Signed-off-by: Omar Sandoval Signed-off-by: Jens Axboe --- lib/sbitmap.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'lib') diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 1651ad9d5530..be55f744b771 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -196,7 +196,7 @@ static unsigned int sbq_calc_wake_batch(unsigned int depth) } int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, - int shift, gfp_t flags, int node) + int shift, bool round_robin, gfp_t flags, int node) { int ret; int i; @@ -225,6 +225,8 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, init_waitqueue_head(&sbq->ws[i].wait); atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch); } + + sbq->round_robin = round_robin; return 0; } EXPORT_SYMBOL_GPL(sbitmap_queue_init_node); @@ -236,18 +238,18 @@ void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) } EXPORT_SYMBOL_GPL(sbitmap_queue_resize); -int __sbitmap_queue_get(struct sbitmap_queue *sbq, bool round_robin) +int __sbitmap_queue_get(struct sbitmap_queue *sbq) { unsigned int hint; int nr; hint = this_cpu_read(*sbq->alloc_hint); - nr = sbitmap_get(&sbq->sb, hint, round_robin); + nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin); if (nr == -1) { /* If the map is full, a hint won't do us much good. */ this_cpu_write(*sbq->alloc_hint, 0); - } else if (nr == hint || unlikely(round_robin)) { + } else if (nr == hint || unlikely(sbq->round_robin)) { /* Only update the hint if we used it. */ hint = nr + 1; if (hint >= sbq->sb.depth - 1) @@ -304,11 +306,11 @@ static void sbq_wake_up(struct sbitmap_queue *sbq) } void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, - bool round_robin, unsigned int cpu) + unsigned int cpu) { sbitmap_clear_bit(&sbq->sb, nr); sbq_wake_up(sbq); - if (likely(!round_robin)) + if (likely(!sbq->round_robin)) *per_cpu_ptr(sbq->alloc_hint, cpu) = nr; } EXPORT_SYMBOL_GPL(sbitmap_queue_clear); -- cgit v1.2.3 From 98d95416dbfaf4910caadfb4ddc75e4aacbdff8c Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Sat, 17 Sep 2016 01:28:25 -0700 Subject: sbitmap: randomize initial alloc_hint values In order to get good cache behavior from a sbitmap, we want each CPU to stick to its own cacheline(s) as much as possible. This might happen naturally as the bitmap gets filled up and the alloc_hint values spread out, but we really want this behavior from the start. blk-mq apparently intended to do this, but the code to do this was never wired up. Get rid of the dead code and make it part of the sbitmap library. Signed-off-by: Omar Sandoval Signed-off-by: Jens Axboe --- lib/sbitmap.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'lib') diff --git a/lib/sbitmap.c b/lib/sbitmap.c index be55f744b771..928b82a733f2 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -15,6 +15,7 @@ * along with this program. If not, see . */ +#include #include int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, @@ -211,6 +212,11 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, return -ENOMEM; } + if (depth && !round_robin) { + for_each_possible_cpu(i) + *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth; + } + sbq->wake_batch = sbq_calc_wake_batch(depth); atomic_set(&sbq->wake_index, 0); -- cgit v1.2.3 From 05fd095d53b979878f016c3a7080d3683cc89d72 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Sat, 17 Sep 2016 01:28:26 -0700 Subject: sbitmap: re-initialize allocation hints after resize After a struct sbitmap_queue is resized smaller, the allocation hints may still be set to bits beyond the new depth of the bitmap. This means that, for example, if the number of blk-mq tags is reduced through sysfs, more requests than the nominal queue depth may be in flight. It's tempting to fix this at resize time by doing a one-time reinitialization of the hints, but this can race with __sbitmap_queue_get() updating the hint. Instead, check the hint before we use it. This caused no measurable performance difference in my synthetic benchmarks. Signed-off-by: Omar Sandoval Signed-off-by: Jens Axboe --- lib/sbitmap.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 928b82a733f2..f736c52a712c 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -246,10 +246,15 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_resize); int __sbitmap_queue_get(struct sbitmap_queue *sbq) { - unsigned int hint; + unsigned int hint, depth; int nr; hint = this_cpu_read(*sbq->alloc_hint); + depth = READ_ONCE(sbq->sb.depth); + if (unlikely(hint >= depth)) { + hint = depth ? prandom_u32() % depth : 0; + this_cpu_write(*sbq->alloc_hint, hint); + } nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin); if (nr == -1) { @@ -258,7 +263,7 @@ int __sbitmap_queue_get(struct sbitmap_queue *sbq) } else if (nr == hint || unlikely(sbq->round_robin)) { /* Only update the hint if we used it. */ hint = nr + 1; - if (hint >= sbq->sb.depth - 1) + if (hint >= depth - 1) hint = 0; this_cpu_write(*sbq->alloc_hint, hint); } -- cgit v1.2.3 From 5c64a8df0ca88c79c9cb74674c2481e5f7ede511 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Sat, 17 Sep 2016 12:20:54 -0700 Subject: sbitmap: don't update the allocation hint on clear after resize If we have a bunch of high-numbered bits allocated and then we resize the struct sbitmap_queue, when those bits get cleared, we'll update the hint and then have to re-randomize it repeatedly. Avoid that by checking that the cleared bit is still a valid hint. No measurable performance difference in the common case. Signed-off-by: Omar Sandoval Signed-off-by: Jens Axboe --- lib/sbitmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/sbitmap.c b/lib/sbitmap.c index f736c52a712c..e40808921544 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -321,7 +321,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, { sbitmap_clear_bit(&sbq->sb, nr); sbq_wake_up(sbq); - if (likely(!sbq->round_robin)) + if (likely(!sbq->round_robin && nr < sbq->sb.depth)) *per_cpu_ptr(sbq->alloc_hint, cpu) = nr; } EXPORT_SYMBOL_GPL(sbitmap_queue_clear); -- cgit v1.2.3 From 60658e0dc1df058607990278fdf9d831e0c2c71a Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 19 Sep 2016 14:34:08 +0100 Subject: sbitmap: initialize weight to zero Variable weight is not being initialized to zero before it is used to compute the weight sum. Ensure it is initialized to zero. Found with static analysis with cppcheck: [lib/sbitmap.c:177]: (error) Uninitialized variable: weight Signed-off-by: Colin Ian King Signed-off-by: Jens Axboe --- lib/sbitmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/sbitmap.c b/lib/sbitmap.c index e40808921544..2cecf05c82fd 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -169,7 +169,7 @@ EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear); unsigned int sbitmap_weight(const struct sbitmap *sb) { - unsigned int i, weight; + unsigned int i, weight = 0; for (i = 0; i < sb->map_nr; i++) { const struct sbitmap_word *word = &sb->map[i]; -- cgit v1.2.3