From 4d0d98b60eba726e0a4f3e6617628b070c444707 Mon Sep 17 00:00:00 2001 From: Wanlong Gao Date: Mon, 13 Jun 2011 10:45:38 +0200 Subject: block:fix the comment error in blkdev.h There is not a function rq_init but blk_rq_init in block/blk-core.c. Signed-off-by: Wanlong Gao Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux/blkdev.h') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ae9091a68480..4ce6e68da2bd 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -73,7 +73,7 @@ enum rq_cmd_type_bits { /* * try to put the fields that are referenced together in the same cacheline. - * if you modify this structure, be sure to check block/blk-core.c:rq_init() + * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init() * as well! */ struct request { -- cgit v1.2.3 From 55c022bbddb2c056b5dff1bd1b1758d31b6d64c9 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Fri, 8 Jul 2011 08:19:20 +0200 Subject: block: avoid building too big plug list When I test fio script with big I/O depth, I found the total throughput drops compared to some relative small I/O depth. The reason is the thread accumulates big requests in its plug list and causes some delays (surely this depends on CPU speed). I thought we'd better have a threshold for requests. When a threshold reaches, this means there is no request merge and queue lock contention isn't severe when pushing per-task requests to queue, so the main advantages of blk plug don't exist. We can force a plug list flush in this case. With this, my test throughput actually increases and almost equals to small I/O depth. Another side effect is irq off time decreases in blk_flush_plug_list() for big I/O depth. The BLK_MAX_REQUEST_COUNT is choosen arbitarily, but 16 is efficiently to reduce lock contention to me. But I'm open here, 32 is ok in my test too. Signed-off-by: Shaohua Li Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux/blkdev.h') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 36f2e2b99ae3..92edb9601242 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -862,7 +862,10 @@ struct blk_plug { struct list_head list; struct list_head cb_list; unsigned int should_sort; + unsigned int count; }; +#define BLK_MAX_REQUEST_COUNT 16 + struct blk_plug_cb { struct list_head list; void (*callback)(struct blk_plug_cb *); -- cgit v1.2.3 From 316cc67d5e03801a5ee4ac660a4dfe9e02aed475 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Fri, 8 Jul 2011 08:19:21 +0200 Subject: block: document blk_plug list access I'm often confused why not disable preempt when changing blk_plug list. It would be better to add comments here in case others have the similar concerns. Signed-off-by: Shaohua Li Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux/blkdev.h') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 92edb9601242..6dcea6885a5d 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -857,6 +857,12 @@ struct request_queue *blk_alloc_queue(gfp_t); struct request_queue *blk_alloc_queue_node(gfp_t, int); extern void blk_put_queue(struct request_queue *); +/* + * Note: Code in between changing the blk_plug list/cb_list or element of such + * lists is preemptable, but such code can't do sleep (or be very careful), + * otherwise data is corrupted. For details, please check schedule() where + * blk_schedule_flush_plug() is called. + */ struct blk_plug { unsigned long magic; struct list_head list; -- cgit v1.2.3 From d7b7630130e52361af66ce3b994696e2357ba7de Mon Sep 17 00:00:00 2001 From: Richard Kennedy Date: Wed, 13 Jul 2011 21:17:23 +0200 Subject: block: reorder request_queue to remove 64 bit alignment padding Reorder request_queue to remove 16 bytes of alignment padding in 64 bit builds. On my config this shrinks the size of this structure from 1608 to 1592 bytes and therefore needs one fewer cachelines. Also trivially move the open bracket { to be on the same line as the structure name to make it easier to grep. Signed-off-by: Richard Kennedy Acked-by: Tejun Heo Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6dcea6885a5d..c0cd9a2f22ef 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -260,8 +260,7 @@ struct queue_limits { unsigned char discard_zeroes_data; }; -struct request_queue -{ +struct request_queue { /* * Together with queue_head for cacheline sharing */ @@ -304,14 +303,14 @@ struct request_queue void *queuedata; /* - * queue needs bounce pages for pages above this limit + * various queue flags, see QUEUE_* below */ - gfp_t bounce_gfp; + unsigned long queue_flags; /* - * various queue flags, see QUEUE_* below + * queue needs bounce pages for pages above this limit */ - unsigned long queue_flags; + gfp_t bounce_gfp; /* * protects queue structures from reentrancy. ->__queue_lock should @@ -334,8 +333,8 @@ struct request_queue unsigned int nr_congestion_off; unsigned int nr_batching; - void *dma_drain_buffer; unsigned int dma_drain_size; + void *dma_drain_buffer; unsigned int dma_pad_mask; unsigned int dma_alignment; -- cgit v1.2.3 From 5757a6d76cdf6dda2a492c09b985c015e86779b1 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Sat, 23 Jul 2011 20:44:25 +0200 Subject: block: strict rq_affinity Some systems benefit from completions always being steered to the strict requester cpu rather than the looser "per-socket" steering that blk_cpu_to_group() attempts by default. This is because the first CPU in the group mask ends up being completely overloaded with work, while the others (including the original submitter) has power left to spare. Allow the strict mode to be set by writing '2' to the sysfs control file. This is identical to the scheme used for the nomerges file, where '2' is a more aggressive setting than just being turned on. echo 2 > /sys/block//queue/rq_affinity Cc: Christoph Hellwig Cc: Roland Dreier Tested-by: Dave Jiang Signed-off-by: Dan Williams Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux/blkdev.h') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c0cd9a2f22ef..0e67c45b3bc9 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -392,7 +392,7 @@ struct request_queue { #define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */ #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ -#define QUEUE_FLAG_SAME_COMP 9 /* force complete on same CPU */ +#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ @@ -402,6 +402,7 @@ struct request_queue { #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ +#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_STACKABLE) | \ -- cgit v1.2.3