summaryrefslogtreecommitdiff
path: root/block/blk-map.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2022-08-05 16:44:34 -0600
committerJens Axboe <axboe@kernel.dk>2022-08-22 10:07:56 -0600
commite88811bc43b971e06fa82d4e421e21b3c999c1a7 (patch)
tree63b3e0072fbd45bd78ec3b04c0973db6e61c6347 /block/blk-map.c
parent8af870aa5b84729d7a39f84226be6f84e4943d8f (diff)
block: use on-stack page vec for <= UIO_FASTIOV
Avoid a kmalloc+kfree for each page array, if we only have a few pages that are mapped. An alloc+free for each IO is quite expensive, and it's pretty pointless if we're only dealing with 1 or a few vecs. Use UIO_FASTIOV like we do in other spots to set a sane limit for how big of an IO we want to avoid allocations for. Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-map.c')
-rw-r--r--block/blk-map.c14
1 files changed, 11 insertions, 3 deletions
diff --git a/block/blk-map.c b/block/blk-map.c
index 16153fdc478f..f3768876d618 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -268,12 +268,19 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
}
while (iov_iter_count(iter)) {
- struct page **pages;
+ struct page **pages, *stack_pages[UIO_FASTIOV];
ssize_t bytes;
size_t offs, added = 0;
int npages;
- bytes = iov_iter_get_pages_alloc2(iter, &pages, LONG_MAX, &offs);
+ if (nr_vecs <= ARRAY_SIZE(stack_pages)) {
+ pages = stack_pages;
+ bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
+ nr_vecs, &offs);
+ } else {
+ bytes = iov_iter_get_pages_alloc2(iter, &pages,
+ LONG_MAX, &offs);
+ }
if (unlikely(bytes <= 0)) {
ret = bytes ? bytes : -EFAULT;
goto out_unmap;
@@ -309,7 +316,8 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
*/
while (j < npages)
put_page(pages[j++]);
- kvfree(pages);
+ if (pages != stack_pages)
+ kvfree(pages);
/* couldn't stuff something into bio? */
if (bytes) {
iov_iter_revert(iter, bytes);