summaryrefslogtreecommitdiff
path: root/drivers/block
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pretzel.yyz.us>2005-06-26 23:42:30 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-06-26 23:42:30 -0400
commitf45727d52d1581e9ff4df9d1a12a60789ad2d1eb (patch)
tree773ae25f98542e6d382c688f7e85e8137d065614 /drivers/block
parent4c925f452cfd16c690209e96821ee094e09a2404 (diff)
parent5696c1944a33b4434a9a1ebb6383b906afd43a10 (diff)
Merge /spare/repo/netdev-2.6/ branch 'ieee80211'
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/aoe/aoechr.c10
-rw-r--r--drivers/block/as-iosched.c12
-rw-r--r--drivers/block/cciss.c12
-rw-r--r--drivers/block/cfq-iosched.c13
-rw-r--r--drivers/block/deadline-iosched.c12
-rw-r--r--drivers/block/elevator.c22
-rw-r--r--drivers/block/genhd.c27
-rw-r--r--drivers/block/ioctl.c74
-rw-r--r--drivers/block/ll_rw_blk.c208
-rw-r--r--drivers/block/loop.c81
-rw-r--r--drivers/block/paride/pd.c2
-rw-r--r--drivers/block/paride/pg.c14
-rw-r--r--drivers/block/paride/pt.c20
-rw-r--r--drivers/block/pktcdvd.c39
-rw-r--r--drivers/block/sx8.c4
-rw-r--r--drivers/block/ub.c600
16 files changed, 638 insertions, 512 deletions
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index 14aeca3e2e8c..45a243096187 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -36,7 +36,7 @@ static int emsgs_head_idx, emsgs_tail_idx;
static struct semaphore emsgs_sema;
static spinlock_t emsgs_lock;
static int nblocked_emsgs_readers;
-static struct class_simple *aoe_class;
+static struct class *aoe_class;
static struct aoe_chardev chardevs[] = {
{ MINOR_ERR, "err" },
{ MINOR_DISCOVER, "discover" },
@@ -218,13 +218,13 @@ aoechr_init(void)
}
sema_init(&emsgs_sema, 0);
spin_lock_init(&emsgs_lock);
- aoe_class = class_simple_create(THIS_MODULE, "aoe");
+ aoe_class = class_create(THIS_MODULE, "aoe");
if (IS_ERR(aoe_class)) {
unregister_chrdev(AOE_MAJOR, "aoechr");
return PTR_ERR(aoe_class);
}
for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
- class_simple_device_add(aoe_class,
+ class_device_create(aoe_class,
MKDEV(AOE_MAJOR, chardevs[i].minor),
NULL, chardevs[i].name);
@@ -237,8 +237,8 @@ aoechr_exit(void)
int i;
for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
- class_simple_device_remove(MKDEV(AOE_MAJOR, chardevs[i].minor));
- class_simple_destroy(aoe_class);
+ class_device_destroy(aoe_class, MKDEV(AOE_MAJOR, chardevs[i].minor));
+ class_destroy(aoe_class);
unregister_chrdev(AOE_MAJOR, "aoechr");
}
diff --git a/drivers/block/as-iosched.c b/drivers/block/as-iosched.c
index a9575bb58a5e..3410b4d294b9 100644
--- a/drivers/block/as-iosched.c
+++ b/drivers/block/as-iosched.c
@@ -1871,20 +1871,22 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
if (!arq_pool)
return -ENOMEM;
- ad = kmalloc(sizeof(*ad), GFP_KERNEL);
+ ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node);
if (!ad)
return -ENOMEM;
memset(ad, 0, sizeof(*ad));
ad->q = q; /* Identify what queue the data belongs to */
- ad->hash = kmalloc(sizeof(struct list_head)*AS_HASH_ENTRIES,GFP_KERNEL);
+ ad->hash = kmalloc_node(sizeof(struct list_head)*AS_HASH_ENTRIES,
+ GFP_KERNEL, q->node);
if (!ad->hash) {
kfree(ad);
return -ENOMEM;
}
- ad->arq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, arq_pool);
+ ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
+ mempool_free_slab, arq_pool, q->node);
if (!ad->arq_pool) {
kfree(ad->hash);
kfree(ad);
@@ -2044,7 +2046,7 @@ as_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
struct as_fs_entry *entry = to_as(attr);
if (!entry->show)
- return 0;
+ return -EIO;
return entry->show(e->elevator_data, page);
}
@@ -2057,7 +2059,7 @@ as_attr_store(struct kobject *kobj, struct attribute *attr,
struct as_fs_entry *entry = to_as(attr);
if (!entry->store)
- return -EINVAL;
+ return -EIO;
return entry->store(e->elevator_data, page, length);
}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 8f7c1a1ed7f4..abde27027c06 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -41,6 +41,7 @@
#include <asm/uaccess.h>
#include <asm/io.h>
+#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
#include <linux/genhd.h>
#include <linux/completion.h>
@@ -126,8 +127,6 @@ static struct board_type products[] = {
#define MAX_CTLR_ORIG 8
-#define CCISS_DMA_MASK 0xFFFFFFFF /* 32 bit DMA */
-
static ctlr_info_t *hba[MAX_CTLR];
static void do_cciss_request(request_queue_t *q);
@@ -2393,11 +2392,6 @@ static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
return( -1);
}
- if (pci_set_dma_mask(pdev, CCISS_DMA_MASK ) != 0)
- {
- printk(KERN_ERR "cciss: Unable to set DMA mask\n");
- return(-1);
- }
subsystem_vendor_id = pdev->subsystem_vendor;
subsystem_device_id = pdev->subsystem_device;
@@ -2747,9 +2741,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
hba[i]->pdev = pdev;
/* configure PCI DMA stuff */
- if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL))
+ if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
printk("cciss: using DAC cycles\n");
- else if (!pci_set_dma_mask(pdev, 0xffffffff))
+ else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
printk("cciss: not using DAC cycles\n");
else {
printk("cciss: no suitable DMA available\n");
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c
index 0ef7a0065ece..3ac47dde64da 100644
--- a/drivers/block/cfq-iosched.c
+++ b/drivers/block/cfq-iosched.c
@@ -1202,13 +1202,16 @@ retry:
if (new_cfqq) {
cfqq = new_cfqq;
new_cfqq = NULL;
- } else if (gfp_mask & __GFP_WAIT) {
+ } else {
spin_unlock_irq(cfqd->queue->queue_lock);
new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
spin_lock_irq(cfqd->queue->queue_lock);
+
+ if (!new_cfqq && !(gfp_mask & __GFP_WAIT))
+ goto out;
+
goto retry;
- } else
- goto out;
+ }
memset(cfqq, 0, sizeof(*cfqq));
@@ -1772,7 +1775,7 @@ cfq_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
struct cfq_fs_entry *entry = to_cfq(attr);
if (!entry->show)
- return 0;
+ return -EIO;
return entry->show(e->elevator_data, page);
}
@@ -1785,7 +1788,7 @@ cfq_attr_store(struct kobject *kobj, struct attribute *attr,
struct cfq_fs_entry *entry = to_cfq(attr);
if (!entry->store)
- return -EINVAL;
+ return -EIO;
return entry->store(e->elevator_data, page, length);
}
diff --git a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c
index d63d34c671f7..4bc2fea73273 100644
--- a/drivers/block/deadline-iosched.c
+++ b/drivers/block/deadline-iosched.c
@@ -711,18 +711,20 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e)
if (!drq_pool)
return -ENOMEM;
- dd = kmalloc(sizeof(*dd), GFP_KERNEL);
+ dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
if (!dd)
return -ENOMEM;
memset(dd, 0, sizeof(*dd));
- dd->hash = kmalloc(sizeof(struct list_head)*DL_HASH_ENTRIES,GFP_KERNEL);
+ dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES,
+ GFP_KERNEL, q->node);
if (!dd->hash) {
kfree(dd);
return -ENOMEM;
}
- dd->drq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, drq_pool);
+ dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
+ mempool_free_slab, drq_pool, q->node);
if (!dd->drq_pool) {
kfree(dd->hash);
kfree(dd);
@@ -886,7 +888,7 @@ deadline_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
struct deadline_fs_entry *entry = to_deadline(attr);
if (!entry->show)
- return 0;
+ return -EIO;
return entry->show(e->elevator_data, page);
}
@@ -899,7 +901,7 @@ deadline_attr_store(struct kobject *kobj, struct attribute *attr,
struct deadline_fs_entry *entry = to_deadline(attr);
if (!entry->store)
- return -EINVAL;
+ return -EIO;
return entry->store(e->elevator_data, page, length);
}
diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c
index 6b79b4314622..f831f08f839c 100644
--- a/drivers/block/elevator.c
+++ b/drivers/block/elevator.c
@@ -220,11 +220,6 @@ void elevator_exit(elevator_t *e)
kfree(e);
}
-static int elevator_global_init(void)
-{
- return 0;
-}
-
int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
{
elevator_t *e = q->elevator;
@@ -291,6 +286,13 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
}
/*
+ * the request is prepped and may have some resources allocated.
+ * allowing unprepped requests to pass this one may cause resource
+ * deadlock. turn on softbarrier.
+ */
+ rq->flags |= REQ_SOFTBARRIER;
+
+ /*
* if iosched has an explicit requeue hook, then use that. otherwise
* just put the request at the front of the queue
*/
@@ -322,7 +324,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
int nrq = q->rq.count[READ] + q->rq.count[WRITE]
- q->in_flight;
- if (nrq == q->unplug_thresh)
+ if (nrq >= q->unplug_thresh)
__generic_unplug_device(q);
}
} else
@@ -386,6 +388,12 @@ struct request *elv_next_request(request_queue_t *q)
if (ret == BLKPREP_OK) {
break;
} else if (ret == BLKPREP_DEFER) {
+ /*
+ * the request may have been (partially) prepped.
+ * we need to keep this request in the front to
+ * avoid resource deadlock. turn on softbarrier.
+ */
+ rq->flags |= REQ_SOFTBARRIER;
rq = NULL;
break;
} else if (ret == BLKPREP_KILL) {
@@ -692,8 +700,6 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name)
return len;
}
-module_init(elevator_global_init);
-
EXPORT_SYMBOL(elv_add_request);
EXPORT_SYMBOL(__elv_add_request);
EXPORT_SYMBOL(elv_requeue_request);
diff --git a/drivers/block/genhd.c b/drivers/block/genhd.c
index 8bbe01d4b487..47fd3659a061 100644
--- a/drivers/block/genhd.c
+++ b/drivers/block/genhd.c
@@ -40,7 +40,7 @@ static inline int major_to_index(int major)
#ifdef CONFIG_PROC_FS
/* get block device names in somewhat random order */
-int get_blkdev_list(char *p)
+int get_blkdev_list(char *p, int used)
{
struct blk_major_name *n;
int i, len;
@@ -49,10 +49,18 @@ int get_blkdev_list(char *p)
down(&block_subsys_sem);
for (i = 0; i < ARRAY_SIZE(major_names); i++) {
- for (n = major_names[i]; n; n = n->next)
+ for (n = major_names[i]; n; n = n->next) {
+ /*
+ * If the curent string plus the 5 extra characters
+ * in the line would run us off the page, then we're done
+ */
+ if ((len + used + strlen(n->name) + 5) >= PAGE_SIZE)
+ goto page_full;
len += sprintf(p+len, "%3d %s\n",
n->major, n->name);
+ }
}
+page_full:
up(&block_subsys_sem);
return len;
@@ -322,7 +330,7 @@ static ssize_t disk_attr_show(struct kobject *kobj, struct attribute *attr,
struct gendisk *disk = to_disk(kobj);
struct disk_attribute *disk_attr =
container_of(attr,struct disk_attribute,attr);
- ssize_t ret = 0;
+ ssize_t ret = -EIO;
if (disk_attr->show)
ret = disk_attr->show(disk,page);
@@ -582,10 +590,16 @@ struct seq_operations diskstats_op = {
.show = diskstats_show
};
-
struct gendisk *alloc_disk(int minors)
{
- struct gendisk *disk = kmalloc(sizeof(struct gendisk), GFP_KERNEL);
+ return alloc_disk_node(minors, -1);
+}
+
+struct gendisk *alloc_disk_node(int minors, int node_id)
+{
+ struct gendisk *disk;
+
+ disk = kmalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
if (disk) {
memset(disk, 0, sizeof(struct gendisk));
if (!init_disk_stats(disk)) {
@@ -594,7 +608,7 @@ struct gendisk *alloc_disk(int minors)
}
if (minors > 1) {
int size = (minors - 1) * sizeof(struct hd_struct *);
- disk->part = kmalloc(size, GFP_KERNEL);
+ disk->part = kmalloc_node(size, GFP_KERNEL, node_id);
if (!disk->part) {
kfree(disk);
return NULL;
@@ -610,6 +624,7 @@ struct gendisk *alloc_disk(int minors)
}
EXPORT_SYMBOL(alloc_disk);
+EXPORT_SYMBOL(alloc_disk_node);
struct kobject *get_disk(struct gendisk *disk)
{
diff --git a/drivers/block/ioctl.c b/drivers/block/ioctl.c
index 6d7bcc9da9e7..6e278474f9a8 100644
--- a/drivers/block/ioctl.c
+++ b/drivers/block/ioctl.c
@@ -133,11 +133,9 @@ static int put_u64(unsigned long arg, u64 val)
return put_user(val, (u64 __user *)arg);
}
-int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
- unsigned long arg)
+static int blkdev_locked_ioctl(struct file *file, struct block_device *bdev,
+ unsigned cmd, unsigned long arg)
{
- struct block_device *bdev = inode->i_bdev;
- struct gendisk *disk = bdev->bd_disk;
struct backing_dev_info *bdi;
int ret, n;
@@ -190,36 +188,72 @@ int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
return put_ulong(arg, bdev->bd_inode->i_size >> 9);
case BLKGETSIZE64:
return put_u64(arg, bdev->bd_inode->i_size);
+ }
+ return -ENOIOCTLCMD;
+}
+
+static int blkdev_driver_ioctl(struct inode *inode, struct file *file,
+ struct gendisk *disk, unsigned cmd, unsigned long arg)
+{
+ int ret;
+ if (disk->fops->unlocked_ioctl)
+ return disk->fops->unlocked_ioctl(file, cmd, arg);
+
+ if (disk->fops->ioctl) {
+ lock_kernel();
+ ret = disk->fops->ioctl(inode, file, cmd, arg);
+ unlock_kernel();
+ return ret;
+ }
+
+ return -ENOTTY;
+}
+
+int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
+ unsigned long arg)
+{
+ struct block_device *bdev = inode->i_bdev;
+ struct gendisk *disk = bdev->bd_disk;
+ int ret, n;
+
+ switch(cmd) {
case BLKFLSBUF:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- if (disk->fops->ioctl) {
- ret = disk->fops->ioctl(inode, file, cmd, arg);
- /* -EINVAL to handle old uncorrected drivers */
- if (ret != -EINVAL && ret != -ENOTTY)
- return ret;
- }
+
+ ret = blkdev_driver_ioctl(inode, file, disk, cmd, arg);
+ /* -EINVAL to handle old uncorrected drivers */
+ if (ret != -EINVAL && ret != -ENOTTY)
+ return ret;
+
+ lock_kernel();
fsync_bdev(bdev);
invalidate_bdev(bdev, 0);
+ unlock_kernel();
return 0;
+
case BLKROSET:
- if (disk->fops->ioctl) {
- ret = disk->fops->ioctl(inode, file, cmd, arg);
- /* -EINVAL to handle old uncorrected drivers */
- if (ret != -EINVAL && ret != -ENOTTY)
- return ret;
- }
+ ret = blkdev_driver_ioctl(inode, file, disk, cmd, arg);
+ /* -EINVAL to handle old uncorrected drivers */
+ if (ret != -EINVAL && ret != -ENOTTY)
+ return ret;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (get_user(n, (int __user *)(arg)))
return -EFAULT;
+ lock_kernel();
set_device_ro(bdev, n);
+ unlock_kernel();
return 0;
- default:
- if (disk->fops->ioctl)
- return disk->fops->ioctl(inode, file, cmd, arg);
}
- return -ENOTTY;
+
+ lock_kernel();
+ ret = blkdev_locked_ioctl(file, bdev, cmd, arg);
+ unlock_kernel();
+ if (ret != -ENOIOCTLCMD)
+ return ret;
+
+ return blkdev_driver_ioctl(inode, file, disk, cmd, arg);
}
/* Most of the generic ioctls are handled in the normal fallback path.
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 11ef9d9ea139..60e64091de1b 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/writeback.h>
+#include <linux/blkdev.h>
/*
* for max sense size
@@ -36,6 +37,7 @@
static void blk_unplug_work(void *data);
static void blk_unplug_timeout(unsigned long data);
+static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
/*
* For the allocated request tables
@@ -716,7 +718,7 @@ struct request *blk_queue_find_tag(request_queue_t *q, int tag)
{
struct blk_queue_tag *bqt = q->queue_tags;
- if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
+ if (unlikely(bqt == NULL || tag >= bqt->max_depth))
return NULL;
return bqt->tag_index[tag];
@@ -774,9 +776,9 @@ EXPORT_SYMBOL(blk_queue_free_tags);
static int
init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
{
- int bits, i;
struct request **tag_index;
unsigned long *tag_map;
+ int nr_ulongs;
if (depth > q->nr_requests * 2) {
depth = q->nr_requests * 2;
@@ -788,24 +790,17 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
if (!tag_index)
goto fail;
- bits = (depth / BLK_TAGS_PER_LONG) + 1;
- tag_map = kmalloc(bits * sizeof(unsigned long), GFP_ATOMIC);
+ nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
+ tag_map = kmalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
if (!tag_map)
goto fail;
memset(tag_index, 0, depth * sizeof(struct request *));
- memset(tag_map, 0, bits * sizeof(unsigned long));
+ memset(tag_map, 0, nr_ulongs * sizeof(unsigned long));
tags->max_depth = depth;
- tags->real_max_depth = bits * BITS_PER_LONG;
tags->tag_index = tag_index;
tags->tag_map = tag_map;
- /*
- * set the upper bits if the depth isn't a multiple of the word size
- */
- for (i = depth; i < bits * BLK_TAGS_PER_LONG; i++)
- __set_bit(i, tag_map);
-
return 0;
fail:
kfree(tag_index);
@@ -870,32 +865,24 @@ int blk_queue_resize_tags(request_queue_t *q, int new_depth)
struct blk_queue_tag *bqt = q->queue_tags;
struct request **tag_index;
unsigned long *tag_map;
- int bits, max_depth;
+ int max_depth, nr_ulongs;
if (!bqt)
return -ENXIO;
/*
- * don't bother sizing down
- */
- if (new_depth <= bqt->real_max_depth) {
- bqt->max_depth = new_depth;
- return 0;
- }
-
- /*
* save the old state info, so we can copy it back
*/
tag_index = bqt->tag_index;
tag_map = bqt->tag_map;
- max_depth = bqt->real_max_depth;
+ max_depth = bqt->max_depth;
if (init_tag_map(q, bqt, new_depth))
return -ENOMEM;
memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
- bits = max_depth / BLK_TAGS_PER_LONG;
- memcpy(bqt->tag_map, tag_map, bits * sizeof(unsigned long));
+ nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
+ memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
kfree(tag_index);
kfree(tag_map);
@@ -925,11 +912,16 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq)
BUG_ON(tag == -1);
- if (unlikely(tag >= bqt->real_max_depth))
+ if (unlikely(tag >= bqt->max_depth))
+ /*
+ * This can happen after tag depth has been reduced.
+ * FIXME: how about a warning or info message here?
+ */
return;
if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) {
- printk("attempt to clear non-busy tag (%d)\n", tag);
+ printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
+ __FUNCTION__, tag);
return;
}
@@ -938,7 +930,8 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq)
rq->tag = -1;
if (unlikely(bqt->tag_index[tag] == NULL))
- printk("tag %d is missing\n", tag);
+ printk(KERN_ERR "%s: tag %d is missing\n",
+ __FUNCTION__, tag);
bqt->tag_index[tag] = NULL;
bqt->busy--;
@@ -967,24 +960,20 @@ EXPORT_SYMBOL(blk_queue_end_tag);
int blk_queue_start_tag(request_queue_t *q, struct request *rq)
{
struct blk_queue_tag *bqt = q->queue_tags;
- unsigned long *map = bqt->tag_map;
- int tag = 0;
+ int tag;
if (unlikely((rq->flags & REQ_QUEUED))) {
printk(KERN_ERR
- "request %p for device [%s] already tagged %d",
- rq, rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
+ "%s: request %p for device [%s] already tagged %d",
+ __FUNCTION__, rq,
+ rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
BUG();
}
- for (map = bqt->tag_map; *map == -1UL; map++) {
- tag += BLK_TAGS_PER_LONG;
-
- if (tag >= bqt->max_depth)
- return 1;
- }
+ tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
+ if (tag >= bqt->max_depth)
+ return 1;
- tag += ffz(*map);
__set_bit(tag, bqt->tag_map);
rq->flags |= REQ_QUEUED;
@@ -1020,7 +1009,8 @@ void blk_queue_invalidate_tags(request_queue_t *q)
rq = list_entry_rq(tmp);
if (rq->tag == -1) {
- printk("bad tag found on list\n");
+ printk(KERN_ERR
+ "%s: bad tag found on list\n", __FUNCTION__);
list_del_init(&rq->queuelist);
rq->flags &= ~REQ_QUEUED;
} else
@@ -1148,7 +1138,7 @@ new_hw_segment:
}
-int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
+static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
struct bio *nxt)
{
if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
@@ -1169,9 +1159,7 @@ int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
return 0;
}
-EXPORT_SYMBOL(blk_phys_contig_segment);
-
-int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
+static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
struct bio *nxt)
{
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
@@ -1187,8 +1175,6 @@ int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
return 1;
}
-EXPORT_SYMBOL(blk_hw_contig_segment);
-
/*
* map a request to scatterlist, return number of sg entries setup. Caller
* must make sure sg can hold rq->nr_phys_segments entries
@@ -1358,8 +1344,8 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req,
static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
struct request *next)
{
- int total_phys_segments = req->nr_phys_segments +next->nr_phys_segments;
- int total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
+ int total_phys_segments;
+ int total_hw_segments;
/*
* First check if the either of the requests are re-queued
@@ -1369,7 +1355,7 @@ static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
return 0;
/*
- * Will it become to large?
+ * Will it become too large?
*/
if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
return 0;
@@ -1450,7 +1436,7 @@ EXPORT_SYMBOL(blk_remove_plug);
*/
void __generic_unplug_device(request_queue_t *q)
{
- if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
+ if (unlikely(test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)))
return;
if (!blk_remove_plug(q))
@@ -1645,7 +1631,8 @@ static int blk_init_free_list(request_queue_t *q)
init_waitqueue_head(&rl->wait[WRITE]);
init_waitqueue_head(&rl->drain);
- rl->rq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, request_cachep);
+ rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
+ mempool_free_slab, request_cachep, q->node);
if (!rl->rq_pool)
return -ENOMEM;
@@ -1657,8 +1644,15 @@ static int __make_request(request_queue_t *, struct bio *);
request_queue_t *blk_alloc_queue(int gfp_mask)
{
- request_queue_t *q = kmem_cache_alloc(requestq_cachep, gfp_mask);
+ return blk_alloc_queue_node(gfp_mask, -1);
+}
+EXPORT_SYMBOL(blk_alloc_queue);
+request_queue_t *blk_alloc_queue_node(int gfp_mask, int node_id)
+{
+ request_queue_t *q;
+
+ q = kmem_cache_alloc_node(requestq_cachep, gfp_mask, node_id);
if (!q)
return NULL;
@@ -1671,8 +1665,7 @@ request_queue_t *blk_alloc_queue(int gfp_mask)
return q;
}
-
-EXPORT_SYMBOL(blk_alloc_queue);
+EXPORT_SYMBOL(blk_alloc_queue_node);
/**
* blk_init_queue - prepare a request queue for use with a block device
@@ -1705,13 +1698,22 @@ EXPORT_SYMBOL(blk_alloc_queue);
* blk_init_queue() must be paired with a blk_cleanup_queue() call
* when the block device is deactivated (such as at module unload).
**/
+
request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
{
- request_queue_t *q = blk_alloc_queue(GFP_KERNEL);
+ return blk_init_queue_node(rfn, lock, -1);
+}
+EXPORT_SYMBOL(blk_init_queue);
+
+request_queue_t *
+blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
+{
+ request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
if (!q)
return NULL;
+ q->node = node_id;
if (blk_init_free_list(q))
goto out_init;
@@ -1754,12 +1756,11 @@ out_init:
kmem_cache_free(requestq_cachep, q);
return NULL;
}
-
-EXPORT_SYMBOL(blk_init_queue);
+EXPORT_SYMBOL(blk_init_queue_node);
int blk_get_queue(request_queue_t *q)
{
- if (!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
+ if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
atomic_inc(&q->refcnt);
return 0;
}
@@ -1821,7 +1822,7 @@ static inline int ioc_batching(request_queue_t *q, struct io_context *ioc)
* is the behaviour we want though - once it gets a wakeup it should be given
* a nice run.
*/
-void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
+static void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
{
if (!ioc || ioc_batching(q, ioc))
return;
@@ -1838,7 +1839,6 @@ static void __freed_request(request_queue_t *q, int rw)
clear_queue_congested(q, rw);
if (rl->count[rw] + 1 <= q->nr_requests) {
- smp_mb();
if (waitqueue_active(&rl->wait[rw]))
wake_up(&rl->wait[rw]);
@@ -1966,7 +1966,6 @@ static struct request *get_request_wait(request_queue_t *q, int rw)
DEFINE_WAIT(wait);
struct request *rq;
- generic_unplug_device(q);
do {
struct request_list *rl = &q->rq;
@@ -1978,6 +1977,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw)
if (!rq) {
struct io_context *ioc;
+ generic_unplug_device(q);
io_schedule();
/*
@@ -2038,7 +2038,6 @@ EXPORT_SYMBOL(blk_requeue_request);
* @rq: request to be inserted
* @at_head: insert request at head or tail of queue
* @data: private data
- * @reinsert: true if request it a reinsertion of previously processed one
*
* Description:
* Many block devices need to execute commands asynchronously, so they don't
@@ -2053,8 +2052,9 @@ EXPORT_SYMBOL(blk_requeue_request);
* host that is unable to accept a particular command.
*/
void blk_insert_request(request_queue_t *q, struct request *rq,
- int at_head, void *data, int reinsert)
+ int at_head, void *data)
{
+ int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
unsigned long flags;
/*
@@ -2071,20 +2071,12 @@ void blk_insert_request(request_queue_t *q, struct request *rq,
/*
* If command is tagged, release the tag
*/
- if (reinsert)
- blk_requeue_request(q, rq);
- else {
- int where = ELEVATOR_INSERT_BACK;
-
- if (at_head)
- where = ELEVATOR_INSERT_FRONT;
+ if (blk_rq_tagged(rq))
+ blk_queue_end_tag(q, rq);
- if (blk_rq_tagged(rq))
- blk_queue_end_tag(q, rq);
+ drive_stat_acct(rq, rq->nr_sectors, 1);
+ __elv_add_request(q, rq, where, 0);
- drive_stat_acct(rq, rq->nr_sectors, 1);
- __elv_add_request(q, rq, where, 0);
- }
if (blk_queue_plugged(q))
__generic_unplug_device(q);
else
@@ -2259,45 +2251,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
EXPORT_SYMBOL(blkdev_issue_flush);
-/**
- * blkdev_scsi_issue_flush_fn - issue flush for SCSI devices
- * @q: device queue
- * @disk: gendisk
- * @error_sector: error offset
- *
- * Description:
- * Devices understanding the SCSI command set, can use this function as
- * a helper for issuing a cache flush. Note: driver is required to store
- * the error offset (in case of error flushing) in ->sector of struct
- * request.
- */
-int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- struct request *rq = blk_get_request(q, WRITE, __GFP_WAIT);
- int ret;
-
- rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER;
- rq->sector = 0;
- memset(rq->cmd, 0, sizeof(rq->cmd));
- rq->cmd[0] = 0x35;
- rq->cmd_len = 12;
- rq->data = NULL;
- rq->data_len = 0;
- rq->timeout = 60 * HZ;
-
- ret = blk_execute_rq(q, disk, rq);
-
- if (ret && error_sector)
- *error_sector = rq->sector;
-
- blk_put_request(rq);
- return ret;
-}
-
-EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn);
-
-void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
+static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
{
int rw = rq_data_dir(rq);
@@ -2556,16 +2510,6 @@ void blk_attempt_remerge(request_queue_t *q, struct request *rq)
EXPORT_SYMBOL(blk_attempt_remerge);
-/*
- * Non-locking blk_attempt_remerge variant.
- */
-void __blk_attempt_remerge(request_queue_t *q, struct request *rq)
-{
- attempt_back_merge(q, rq);
-}
-
-EXPORT_SYMBOL(__blk_attempt_remerge);
-
static int __make_request(request_queue_t *q, struct bio *bio)
{
struct request *req, *freereq = NULL;
@@ -2589,7 +2533,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
spin_lock_prefetch(q->queue_lock);
barrier = bio_barrier(bio);
- if (barrier && (q->ordered == QUEUE_ORDERED_NONE)) {
+ if (unlikely(barrier) && (q->ordered == QUEUE_ORDERED_NONE)) {
err = -EOPNOTSUPP;
goto end_io;
}
@@ -2690,7 +2634,7 @@ get_rq:
/*
* REQ_BARRIER implies no merging, but lets make it explicit
*/
- if (barrier)
+ if (unlikely(barrier))
req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
req->errors = 0;
@@ -2814,7 +2758,7 @@ static inline void block_wait_queue_running(request_queue_t *q)
{
DEFINE_WAIT(wait);
- while (test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)) {
+ while (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) {
struct request_list *rl = &q->rq;
prepare_to_wait_exclusive(&rl->drain, &wait,
@@ -2923,7 +2867,7 @@ end_io:
goto end_io;
}
- if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))
+ if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
goto end_io;
block_wait_queue_running(q);
@@ -2976,7 +2920,7 @@ void submit_bio(int rw, struct bio *bio)
EXPORT_SYMBOL(submit_bio);
-void blk_recalc_rq_segments(struct request *rq)
+static void blk_recalc_rq_segments(struct request *rq)
{
struct bio *bio, *prevbio = NULL;
int nr_phys_segs, nr_hw_segs;
@@ -3018,7 +2962,7 @@ void blk_recalc_rq_segments(struct request *rq)
rq->nr_hw_segments = nr_hw_segs;
}
-void blk_recalc_rq_sectors(struct request *rq, int nsect)
+static void blk_recalc_rq_sectors(struct request *rq, int nsect)
{
if (blk_fs_request(rq)) {
rq->hard_sector += nsect;
@@ -3582,7 +3526,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
q = container_of(kobj, struct request_queue, kobj);
if (!entry->show)
- return 0;
+ return -EIO;
return entry->show(q, page);
}
@@ -3596,7 +3540,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
q = container_of(kobj, struct request_queue, kobj);
if (!entry->store)
- return -EINVAL;
+ return -EIO;
return entry->store(q, page, length);
}
@@ -3606,7 +3550,7 @@ static struct sysfs_ops queue_sysfs_ops = {
.store = queue_attr_store,
};
-struct kobj_type queue_ktype = {
+static struct kobj_type queue_ktype = {
.sysfs_ops = &queue_sysfs_ops,
.default_attrs = default_attrs,
};
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 6f011d0d8e97..b35e08876dd4 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -472,17 +472,11 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
*/
static void loop_add_bio(struct loop_device *lo, struct bio *bio)
{
- unsigned long flags;
-
- spin_lock_irqsave(&lo->lo_lock, flags);
if (lo->lo_biotail) {
lo->lo_biotail->bi_next = bio;
lo->lo_biotail = bio;
} else
lo->lo_bio = lo->lo_biotail = bio;
- spin_unlock_irqrestore(&lo->lo_lock, flags);
-
- up(&lo->lo_bh_mutex);
}
/*
@@ -492,14 +486,12 @@ static struct bio *loop_get_bio(struct loop_device *lo)
{
struct bio *bio;
- spin_lock_irq(&lo->lo_lock);
if ((bio = lo->lo_bio)) {
if (bio == lo->lo_biotail)
lo->lo_biotail = NULL;
lo->lo_bio = bio->bi_next;
bio->bi_next = NULL;
}
- spin_unlock_irq(&lo->lo_lock);
return bio;
}
@@ -509,35 +501,28 @@ static int loop_make_request(request_queue_t *q, struct bio *old_bio)
struct loop_device *lo = q->queuedata;
int rw = bio_rw(old_bio);
- if (!lo)
- goto out;
+ if (rw == READA)
+ rw = READ;
+
+ BUG_ON(!lo || (rw != READ && rw != WRITE));
spin_lock_irq(&lo->lo_lock);
if (lo->lo_state != Lo_bound)
- goto inactive;
- atomic_inc(&lo->lo_pending);
- spin_unlock_irq(&lo->lo_lock);
-
- if (rw == WRITE) {
- if (lo->lo_flags & LO_FLAGS_READ_ONLY)
- goto err;
- } else if (rw == READA) {
- rw = READ;
- } else if (rw != READ) {
- printk(KERN_ERR "loop: unknown command (%x)\n", rw);
- goto err;
- }
+ goto out;
+ if (unlikely(rw == WRITE && (lo->lo_flags & LO_FLAGS_READ_ONLY)))
+ goto out;
+ lo->lo_pending++;
loop_add_bio(lo, old_bio);
+ spin_unlock_irq(&lo->lo_lock);
+ up(&lo->lo_bh_mutex);
return 0;
-err:
- if (atomic_dec_and_test(&lo->lo_pending))
- up(&lo->lo_bh_mutex);
+
out:
+ if (lo->lo_pending == 0)
+ up(&lo->lo_bh_mutex);
+ spin_unlock_irq(&lo->lo_lock);
bio_io_error(old_bio, old_bio->bi_size);
return 0;
-inactive:
- spin_unlock_irq(&lo->lo_lock);
- goto out;
}
/*
@@ -560,13 +545,11 @@ static void do_loop_switch(struct loop_device *, struct switch_request *);
static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
{
- int ret;
-
if (unlikely(!bio->bi_bdev)) {
do_loop_switch(lo, bio->bi_private);
bio_put(bio);
} else {
- ret = do_bio_filebacked(lo, bio);
+ int ret = do_bio_filebacked(lo, bio);
bio_endio(bio, bio->bi_size, ret);
}
}
@@ -594,7 +577,7 @@ static int loop_thread(void *data)
set_user_nice(current, -20);
lo->lo_state = Lo_bound;
- atomic_inc(&lo->lo_pending);
+ lo->lo_pending = 1;
/*
* up sem, we are running
@@ -602,26 +585,37 @@ static int loop_thread(void *data)
up(&lo->lo_sem);
for (;;) {
- down_interruptible(&lo->lo_bh_mutex);
+ int pending;
+
/*
- * could be upped because of tear-down, not because of
- * pending work
+ * interruptible just to not contribute to load avg
*/
- if (!atomic_read(&lo->lo_pending))
+ if (down_interruptible(&lo->lo_bh_mutex))
+ continue;
+
+ spin_lock_irq(&lo->lo_lock);
+
+ /*
+ * could be upped because of tear-down, not pending work
+ */
+ if (unlikely(!lo->lo_pending)) {
+ spin_unlock_irq(&lo->lo_lock);
break;
+ }
bio = loop_get_bio(lo);
- if (!bio) {
- printk("loop: missing bio\n");
- continue;
- }
+ lo->lo_pending--;
+ pending = lo->lo_pending;
+ spin_unlock_irq(&lo->lo_lock);
+
+ BUG_ON(!bio);
loop_handle_bio(lo, bio);
/*
* upped both for pending work and tear-down, lo_pending
* will hit zero then
*/
- if (atomic_dec_and_test(&lo->lo_pending))
+ if (unlikely(!pending))
break;
}
@@ -900,7 +894,8 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
spin_lock_irq(&lo->lo_lock);
lo->lo_state = Lo_rundown;
- if (atomic_dec_and_test(&lo->lo_pending))
+ lo->lo_pending--;
+ if (!lo->lo_pending)
up(&lo->lo_bh_mutex);
spin_unlock_irq(&lo->lo_lock);
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 202a5a74ad37..fa49d62626ba 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -723,7 +723,7 @@ static int pd_special_command(struct pd_unit *disk,
rq.ref_count = 1;
rq.waiting = &wait;
rq.end_io = blk_end_sync_rq;
- blk_insert_request(disk->gd->queue, &rq, 0, func, 0);
+ blk_insert_request(disk->gd->queue, &rq, 0, func);
wait_for_completion(&wait);
rq.waiting = NULL;
if (rq.errors)
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index dbeb107bb971..84d8e291ed96 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -222,7 +222,7 @@ static int pg_identify(struct pg *dev, int log);
static char pg_scratch[512]; /* scratch block buffer */
-static struct class_simple *pg_class;
+static struct class *pg_class;
/* kernel glue structures */
@@ -666,7 +666,7 @@ static int __init pg_init(void)
err = -1;
goto out;
}
- pg_class = class_simple_create(THIS_MODULE, "pg");
+ pg_class = class_create(THIS_MODULE, "pg");
if (IS_ERR(pg_class)) {
err = PTR_ERR(pg_class);
goto out_chrdev;
@@ -675,7 +675,7 @@ static int __init pg_init(void)
for (unit = 0; unit < PG_UNITS; unit++) {
struct pg *dev = &devices[unit];
if (dev->present) {
- class_simple_device_add(pg_class, MKDEV(major, unit),
+ class_device_create(pg_class, MKDEV(major, unit),
NULL, "pg%u", unit);
err = devfs_mk_cdev(MKDEV(major, unit),
S_IFCHR | S_IRUSR | S_IWUSR, "pg/%u",
@@ -688,8 +688,8 @@ static int __init pg_init(void)
goto out;
out_class:
- class_simple_device_remove(MKDEV(major, unit));
- class_simple_destroy(pg_class);
+ class_device_destroy(pg_class, MKDEV(major, unit));
+ class_destroy(pg_class);
out_chrdev:
unregister_chrdev(major, "pg");
out:
@@ -703,11 +703,11 @@ static void __exit pg_exit(void)
for (unit = 0; unit < PG_UNITS; unit++) {
struct pg *dev = &devices[unit];
if (dev->present) {
- class_simple_device_remove(MKDEV(major, unit));
+ class_device_destroy(pg_class, MKDEV(major, unit));
devfs_remove("pg/%u", unit);
}
}
- class_simple_destroy(pg_class);
+ class_destroy(pg_class);
devfs_remove("pg");
unregister_chrdev(major, name);
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 8fbd6922fe0d..5fe8ee86f095 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -242,7 +242,7 @@ static struct file_operations pt_fops = {
};
/* sysfs class support */
-static struct class_simple *pt_class;
+static struct class *pt_class;
static inline int status_reg(struct pi_adapter *pi)
{
@@ -963,7 +963,7 @@ static int __init pt_init(void)
err = -1;
goto out;
}
- pt_class = class_simple_create(THIS_MODULE, "pt");
+ pt_class = class_create(THIS_MODULE, "pt");
if (IS_ERR(pt_class)) {
err = PTR_ERR(pt_class);
goto out_chrdev;
@@ -972,29 +972,29 @@ static int __init pt_init(void)
devfs_mk_dir("pt");
for (unit = 0; unit < PT_UNITS; unit++)
if (pt[unit].present) {
- class_simple_device_add(pt_class, MKDEV(major, unit),
+ class_device_create(pt_class, MKDEV(major, unit),
NULL, "pt%d", unit);
err = devfs_mk_cdev(MKDEV(major, unit),
S_IFCHR | S_IRUSR | S_IWUSR,
"pt/%d", unit);
if (err) {
- class_simple_device_remove(MKDEV(major, unit));
+ class_device_destroy(pt_class, MKDEV(major, unit));
goto out_class;
}
- class_simple_device_add(pt_class, MKDEV(major, unit + 128),
+ class_device_create(pt_class, MKDEV(major, unit + 128),
NULL, "pt%dn", unit);
err = devfs_mk_cdev(MKDEV(major, unit + 128),
S_IFCHR | S_IRUSR | S_IWUSR,
"pt/%dn", unit);
if (err) {
- class_simple_device_remove(MKDEV(major, unit + 128));
+ class_device_destroy(pt_class, MKDEV(major, unit + 128));
goto out_class;
}
}
goto out;
out_class:
- class_simple_destroy(pt_class);
+ class_destroy(pt_class);
out_chrdev:
unregister_chrdev(major, "pt");
out:
@@ -1006,12 +1006,12 @@ static void __exit pt_exit(void)
int unit;
for (unit = 0; unit < PT_UNITS; unit++)
if (pt[unit].present) {
- class_simple_device_remove(MKDEV(major, unit));
+ class_device_destroy(pt_class, MKDEV(major, unit));
devfs_remove("pt/%d", unit);
- class_simple_device_remove(MKDEV(major, unit + 128));
+ class_device_destroy(pt_class, MKDEV(major, unit + 128));
devfs_remove("pt/%dn", unit);
}
- class_simple_destroy(pt_class);
+ class_destroy(pt_class);
devfs_remove("pt");
unregister_chrdev(major, name);
for (unit = 0; unit < PT_UNITS; unit++)
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index bc56770bcc90..7b838342f0a3 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -467,14 +467,12 @@ static int pkt_set_speed(struct pktcdvd_device *pd, unsigned write_speed, unsign
* Queue a bio for processing by the low-level CD device. Must be called
* from process context.
*/
-static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio, int high_prio_read)
+static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
{
spin_lock(&pd->iosched.lock);
if (bio_data_dir(bio) == READ) {
pkt_add_list_last(bio, &pd->iosched.read_queue,
&pd->iosched.read_queue_tail);
- if (high_prio_read)
- pd->iosched.high_prio_read = 1;
} else {
pkt_add_list_last(bio, &pd->iosched.write_queue,
&pd->iosched.write_queue_tail);
@@ -490,15 +488,16 @@ static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio, int high_p
* requirements for CDRW drives:
* - A cache flush command must be inserted before a read request if the
* previous request was a write.
- * - Switching between reading and writing is slow, so don't it more often
+ * - Switching between reading and writing is slow, so don't do it more often
* than necessary.
+ * - Optimize for throughput at the expense of latency. This means that streaming
+ * writes will never be interrupted by a read, but if the drive has to seek
+ * before the next write, switch to reading instead if there are any pending
+ * read requests.
* - Set the read speed according to current usage pattern. When only reading
* from the device, it's best to use the highest possible read speed, but
* when switching often between reading and writing, it's better to have the
* same read and write speeds.
- * - Reads originating from user space should have higher priority than reads
- * originating from pkt_gather_data, because some process is usually waiting
- * on reads of the first kind.
*/
static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
{
@@ -512,21 +511,24 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
for (;;) {
struct bio *bio;
- int reads_queued, writes_queued, high_prio_read;
+ int reads_queued, writes_queued;
spin_lock(&pd->iosched.lock);
reads_queued = (pd->iosched.read_queue != NULL);
writes_queued = (pd->iosched.write_queue != NULL);
- if (!reads_queued)
- pd->iosched.high_prio_read = 0;
- high_prio_read = pd->iosched.high_prio_read;
spin_unlock(&pd->iosched.lock);
if (!reads_queued && !writes_queued)
break;
if (pd->iosched.writing) {
- if (high_prio_read || (!writes_queued && reads_queued)) {
+ int need_write_seek = 1;
+ spin_lock(&pd->iosched.lock);
+ bio = pd->iosched.write_queue;
+ spin_unlock(&pd->iosched.lock);
+ if (bio && (bio->bi_sector == pd->iosched.last_write))
+ need_write_seek = 0;
+ if (need_write_seek && reads_queued) {
if (atomic_read(&pd->cdrw.pending_bios) > 0) {
VPRINTK("pktcdvd: write, waiting\n");
break;
@@ -559,8 +561,10 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
if (bio_data_dir(bio) == READ)
pd->iosched.successive_reads += bio->bi_size >> 10;
- else
+ else {
pd->iosched.successive_reads = 0;
+ pd->iosched.last_write = bio->bi_sector + bio_sectors(bio);
+ }
if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
if (pd->read_speed == pd->write_speed) {
pd->read_speed = MAX_SPEED;
@@ -765,7 +769,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
atomic_inc(&pkt->io_wait);
bio->bi_rw = READ;
- pkt_queue_bio(pd, bio, 0);
+ pkt_queue_bio(pd, bio);
frames_read++;
}
@@ -1062,7 +1066,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
atomic_set(&pkt->io_wait, 1);
pkt->w_bio->bi_rw = WRITE;
- pkt_queue_bio(pd, pkt->w_bio, 0);
+ pkt_queue_bio(pd, pkt->w_bio);
}
static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
@@ -1247,8 +1251,7 @@ static int kcdrwd(void *foobar)
VPRINTK("kcdrwd: wake up\n");
/* make swsusp happy with our thread */
- if (current->flags & PF_FREEZE)
- refrigerator(PF_FREEZE);
+ try_to_freeze();
list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
if (!pkt->sleep_time)
@@ -2120,7 +2123,7 @@ static int pkt_make_request(request_queue_t *q, struct bio *bio)
cloned_bio->bi_private = psd;
cloned_bio->bi_end_io = pkt_end_io_read_cloned;
pd->stats.secs_r += bio->bi_size >> 9;
- pkt_queue_bio(pd, cloned_bio, 1);
+ pkt_queue_bio(pd, cloned_bio);
return 0;
}
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 797f5988c2b5..5ed3a6379452 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -614,7 +614,7 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
spin_unlock_irq(&host->lock);
DPRINTK("blk_insert_request, tag == %u\n", idx);
- blk_insert_request(host->oob_q, crq->rq, 1, crq, 0);
+ blk_insert_request(host->oob_q, crq->rq, 1, crq);
return 0;
@@ -653,7 +653,7 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
crq->msg_bucket = (u32) rc;
DPRINTK("blk_insert_request, tag == %u\n", idx);
- blk_insert_request(host->oob_q, crq->rq, 1, crq, 0);
+ blk_insert_request(host->oob_q, crq->rq, 1, crq);
return 0;
}
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index ce42889f98fb..685f061e69b2 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -8,13 +8,12 @@
* and is not licensed separately. See file COPYING for details.
*
* TODO (sorted by decreasing priority)
+ * -- Kill first_open (Al Viro fixed the block layer now)
* -- Do resets with usb_device_reset (needs a thread context, use khubd)
* -- set readonly flag for CDs, set removable flag for CF readers
* -- do inquiry and verify we got a disk and not a tape (for LUN mismatch)
- * -- support pphaneuf's SDDR-75 with two LUNs (also broken capacity...)
* -- special case some senses, e.g. 3a/0 -> no media present, reduce retries
* -- verify the 13 conditions and do bulk resets
- * -- normal pool of commands instead of cmdv[]?
* -- kill last_pipe and simply do two-state clearing on both pipes
* -- verify protocol (bulk) from USB descriptors (maybe...)
* -- highmem and sg
@@ -49,7 +48,14 @@
#define US_SC_SCSI 0x06 /* Transparent */
/*
+ * This many LUNs per USB device.
+ * Every one of them takes a host, see UB_MAX_HOSTS.
*/
+#define UB_MAX_LUNS 9
+
+/*
+ */
+
#define UB_MINORS_PER_MAJOR 8
#define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */
@@ -65,7 +71,7 @@ struct bulk_cb_wrap {
u32 Tag; /* unique per command id */
__le32 DataTransferLength; /* size of data */
u8 Flags; /* direction in bit 0 */
- u8 Lun; /* LUN normally 0 */
+ u8 Lun; /* LUN */
u8 Length; /* of of the CDB */
u8 CDB[UB_MAX_CDB_SIZE]; /* max command */
};
@@ -168,6 +174,7 @@ struct ub_scsi_cmd {
unsigned int len; /* Requested length */
// struct scatterlist sgv[UB_MAX_REQ_SG];
+ struct ub_lun *lun;
void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
void *back;
};
@@ -252,25 +259,47 @@ struct ub_scsi_cmd_queue {
};
/*
- * The UB device instance.
+ * The block device instance (one per LUN).
+ */
+struct ub_lun {
+ struct ub_dev *udev;
+ struct list_head link;
+ struct gendisk *disk;
+ int id; /* Host index */
+ int num; /* LUN number */
+ char name[16];
+
+ int changed; /* Media was changed */
+ int removable;
+ int readonly;
+ int first_open; /* Kludge. See ub_bd_open. */
+
+ /* Use Ingo's mempool if or when we have more than one command. */
+ /*
+ * Currently we never need more than one command for the whole device.
+ * However, giving every LUN a command is a cheap and automatic way
+ * to enforce fairness between them.
+ */
+ int cmda[1];
+ struct ub_scsi_cmd cmdv[1];
+
+ struct ub_capacity capacity;
+};
+
+/*
+ * The USB device instance.
*/
struct ub_dev {
spinlock_t lock;
- int id; /* Number among ub's */
atomic_t poison; /* The USB device is disconnected */
int openc; /* protected by ub_lock! */
/* kref is too implicit for our taste */
unsigned int tagcnt;
- int changed; /* Media was changed */
- int removable;
- int readonly;
- int first_open; /* Kludge. See ub_bd_open. */
- char name[8];
+ char name[12];
struct usb_device *dev;
struct usb_interface *intf;
- struct ub_capacity capacity;
- struct gendisk *disk;
+ struct list_head luns;
unsigned int send_bulk_pipe; /* cached pipe values */
unsigned int recv_bulk_pipe;
@@ -279,10 +308,6 @@ struct ub_dev {
struct tasklet_struct tasklet;
- /* XXX Use Ingo's mempool (once we have more than one) */
- int cmda[1];
- struct ub_scsi_cmd cmdv[1];
-
struct ub_scsi_cmd_queue cmd_queue;
struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */
unsigned char top_sense[UB_SENSE_SIZE];
@@ -301,9 +326,9 @@ struct ub_dev {
/*
*/
static void ub_cleanup(struct ub_dev *sc);
-static int ub_bd_rq_fn_1(struct ub_dev *sc, struct request *rq);
-static int ub_cmd_build_block(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
- struct request *rq);
+static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq);
+static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
+ struct ub_scsi_cmd *cmd, struct request *rq);
static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
struct request *rq);
static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
@@ -320,8 +345,10 @@ static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
int stalled_pipe);
static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
-static int ub_sync_tur(struct ub_dev *sc);
-static int ub_sync_read_cap(struct ub_dev *sc, struct ub_capacity *ret);
+static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
+static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
+ struct ub_capacity *ret);
+static int ub_probe_lun(struct ub_dev *sc, int lnum);
/*
*/
@@ -342,6 +369,7 @@ MODULE_DEVICE_TABLE(usb, ub_usb_ids);
*/
#define UB_MAX_HOSTS 26
static char ub_hostv[UB_MAX_HOSTS];
+
static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */
/*
@@ -402,10 +430,12 @@ static void ub_cmdtr_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
}
}
-static ssize_t ub_diag_show(struct device *dev, char *page)
+static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr, char *page)
{
struct usb_interface *intf;
struct ub_dev *sc;
+ struct list_head *p;
+ struct ub_lun *lun;
int cnt;
unsigned long flags;
int nc, nh;
@@ -421,9 +451,15 @@ static ssize_t ub_diag_show(struct device *dev, char *page)
spin_lock_irqsave(&sc->lock, flags);
cnt += sprintf(page + cnt,
- "qlen %d qmax %d changed %d removable %d readonly %d\n",
- sc->cmd_queue.qlen, sc->cmd_queue.qmax,
- sc->changed, sc->removable, sc->readonly);
+ "qlen %d qmax %d\n",
+ sc->cmd_queue.qlen, sc->cmd_queue.qmax);
+
+ list_for_each (p, &sc->luns) {
+ lun = list_entry(p, struct ub_lun, link);
+ cnt += sprintf(page + cnt,
+ "lun %u changed %d removable %d readonly %d\n",
+ lun->num, lun->changed, lun->removable, lun->readonly);
+ }
if ((nc = sc->tr.cur + 1) == SCMD_TRACE_SZ) nc = 0;
for (j = 0; j < SCMD_TRACE_SZ; j++) {
@@ -523,53 +559,63 @@ static void ub_put(struct ub_dev *sc)
*/
static void ub_cleanup(struct ub_dev *sc)
{
+ struct list_head *p;
+ struct ub_lun *lun;
request_queue_t *q;
- /* I don't think queue can be NULL. But... Stolen from sx8.c */
- if ((q = sc->disk->queue) != NULL)
- blk_cleanup_queue(q);
+ while (!list_empty(&sc->luns)) {
+ p = sc->luns.next;
+ lun = list_entry(p, struct ub_lun, link);
+ list_del(p);
- /*
- * If we zero disk->private_data BEFORE put_disk, we have to check
- * for NULL all over the place in open, release, check_media and
- * revalidate, because the block level semaphore is well inside the
- * put_disk. But we cannot zero after the call, because *disk is gone.
- * The sd.c is blatantly racy in this area.
- */
- /* disk->private_data = NULL; */
- put_disk(sc->disk);
- sc->disk = NULL;
+ /* I don't think queue can be NULL. But... Stolen from sx8.c */
+ if ((q = lun->disk->queue) != NULL)
+ blk_cleanup_queue(q);
+ /*
+ * If we zero disk->private_data BEFORE put_disk, we have
+ * to check for NULL all over the place in open, release,
+ * check_media and revalidate, because the block level
+ * semaphore is well inside the put_disk.
+ * But we cannot zero after the call, because *disk is gone.
+ * The sd.c is blatantly racy in this area.
+ */
+ /* disk->private_data = NULL; */
+ put_disk(lun->disk);
+ lun->disk = NULL;
+
+ ub_id_put(lun->id);
+ kfree(lun);
+ }
- ub_id_put(sc->id);
kfree(sc);
}
/*
* The "command allocator".
*/
-static struct ub_scsi_cmd *ub_get_cmd(struct ub_dev *sc)
+static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun)
{
struct ub_scsi_cmd *ret;
- if (sc->cmda[0])
+ if (lun->cmda[0])
return NULL;
- ret = &sc->cmdv[0];
- sc->cmda[0] = 1;
+ ret = &lun->cmdv[0];
+ lun->cmda[0] = 1;
return ret;
}
-static void ub_put_cmd(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
+static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd)
{
- if (cmd != &sc->cmdv[0]) {
+ if (cmd != &lun->cmdv[0]) {
printk(KERN_WARNING "%s: releasing a foreign cmd %p\n",
- sc->name, cmd);
+ lun->name, cmd);
return;
}
- if (!sc->cmda[0]) {
- printk(KERN_WARNING "%s: releasing a free cmd\n", sc->name);
+ if (!lun->cmda[0]) {
+ printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name);
return;
}
- sc->cmda[0] = 0;
+ lun->cmda[0] = 0;
}
/*
@@ -630,29 +676,30 @@ static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
static void ub_bd_rq_fn(request_queue_t *q)
{
- struct ub_dev *sc = q->queuedata;
+ struct ub_lun *lun = q->queuedata;
struct request *rq;
while ((rq = elv_next_request(q)) != NULL) {
- if (ub_bd_rq_fn_1(sc, rq) != 0) {
+ if (ub_bd_rq_fn_1(lun, rq) != 0) {
blk_stop_queue(q);
break;
}
}
}
-static int ub_bd_rq_fn_1(struct ub_dev *sc, struct request *rq)
+static int ub_bd_rq_fn_1(struct ub_lun *lun, struct request *rq)
{
+ struct ub_dev *sc = lun->udev;
struct ub_scsi_cmd *cmd;
int rc;
- if (atomic_read(&sc->poison) || sc->changed) {
+ if (atomic_read(&sc->poison) || lun->changed) {
blkdev_dequeue_request(rq);
ub_end_rq(rq, 0);
return 0;
}
- if ((cmd = ub_get_cmd(sc)) == NULL)
+ if ((cmd = ub_get_cmd(lun)) == NULL)
return -1;
memset(cmd, 0, sizeof(struct ub_scsi_cmd));
@@ -661,32 +708,30 @@ static int ub_bd_rq_fn_1(struct ub_dev *sc, struct request *rq)
if (blk_pc_request(rq)) {
rc = ub_cmd_build_packet(sc, cmd, rq);
} else {
- rc = ub_cmd_build_block(sc, cmd, rq);
+ rc = ub_cmd_build_block(sc, lun, cmd, rq);
}
if (rc != 0) {
- ub_put_cmd(sc, cmd);
+ ub_put_cmd(lun, cmd);
ub_end_rq(rq, 0);
- blk_start_queue(sc->disk->queue);
return 0;
}
-
cmd->state = UB_CMDST_INIT;
+ cmd->lun = lun;
cmd->done = ub_rw_cmd_done;
cmd->back = rq;
cmd->tag = sc->tagcnt++;
if ((rc = ub_submit_scsi(sc, cmd)) != 0) {
- ub_put_cmd(sc, cmd);
+ ub_put_cmd(lun, cmd);
ub_end_rq(rq, 0);
- blk_start_queue(sc->disk->queue);
return 0;
}
return 0;
}
-static int ub_cmd_build_block(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
- struct request *rq)
+static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
+ struct ub_scsi_cmd *cmd, struct request *rq)
{
int ub_dir;
#if 0 /* We use rq->buffer for now */
@@ -707,7 +752,7 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
sg = &cmd->sgv[0];
n_elem = blk_rq_map_sg(q, rq, sg);
if (n_elem <= 0) {
- ub_put_cmd(sc, cmd);
+ ub_put_cmd(lun, cmd);
ub_end_rq(rq, 0);
blk_start_queue(q);
return 0; /* request with no s/g entries? */
@@ -716,7 +761,7 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
if (n_elem != 1) { /* Paranoia */
printk(KERN_WARNING "%s: request with %d segments\n",
sc->name, n_elem);
- ub_put_cmd(sc, cmd);
+ ub_put_cmd(lun, cmd);
ub_end_rq(rq, 0);
blk_start_queue(q);
return 0;
@@ -748,8 +793,8 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
* The call to blk_queue_hardsect_size() guarantees that request
* is aligned, but it is given in terms of 512 byte units, always.
*/
- block = rq->sector >> sc->capacity.bshift;
- nblks = rq->nr_sectors >> sc->capacity.bshift;
+ block = rq->sector >> lun->capacity.bshift;
+ nblks = rq->nr_sectors >> lun->capacity.bshift;
cmd->cdb[0] = (ub_dir == UB_DIR_READ)? READ_10: WRITE_10;
/* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
@@ -803,7 +848,8 @@ static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
struct request *rq = cmd->back;
- struct gendisk *disk = sc->disk;
+ struct ub_lun *lun = cmd->lun;
+ struct gendisk *disk = lun->disk;
request_queue_t *q = disk->queue;
int uptodate;
@@ -818,7 +864,7 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
else
uptodate = 0;
- ub_put_cmd(sc, cmd);
+ ub_put_cmd(lun, cmd);
ub_end_rq(rq, uptodate);
blk_start_queue(q);
}
@@ -887,7 +933,7 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
bcb->Tag = cmd->tag; /* Endianness is not important */
bcb->DataTransferLength = cpu_to_le32(cmd->len);
bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0;
- bcb->Lun = 0; /* No multi-LUN yet */
+ bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0;
bcb->Length = cmd->cdb_len;
/* copy the command payload */
@@ -1002,9 +1048,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
* The control pipe clears itself - nothing to do.
* XXX Might try to reset the device here and retry.
*/
- printk(KERN_NOTICE "%s: "
- "stall on control pipe for device %u\n",
- sc->name, sc->dev->devnum);
+ printk(KERN_NOTICE "%s: stall on control pipe\n",
+ sc->name);
goto Bad_End;
}
@@ -1025,9 +1070,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
* The control pipe clears itself - nothing to do.
* XXX Might try to reset the device here and retry.
*/
- printk(KERN_NOTICE "%s: "
- "stall on control pipe for device %u\n",
- sc->name, sc->dev->devnum);
+ printk(KERN_NOTICE "%s: stall on control pipe\n",
+ sc->name);
goto Bad_End;
}
@@ -1046,9 +1090,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
if (rc != 0) {
printk(KERN_NOTICE "%s: "
- "unable to submit clear for device %u"
- " (code %d)\n",
- sc->name, sc->dev->devnum, rc);
+ "unable to submit clear (%d)\n",
+ sc->name, rc);
/*
* This is typically ENOMEM or some other such shit.
* Retrying is pointless. Just do Bad End on it...
@@ -1107,9 +1150,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
if (rc != 0) {
printk(KERN_NOTICE "%s: "
- "unable to submit clear for device %u"
- " (code %d)\n",
- sc->name, sc->dev->devnum, rc);
+ "unable to submit clear (%d)\n",
+ sc->name, rc);
/*
* This is typically ENOMEM or some other such shit.
* Retrying is pointless. Just do Bad End on it...
@@ -1140,9 +1182,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
if (rc != 0) {
printk(KERN_NOTICE "%s: "
- "unable to submit clear for device %u"
- " (code %d)\n",
- sc->name, sc->dev->devnum, rc);
+ "unable to submit clear (%d)\n",
+ sc->name, rc);
/*
* This is typically ENOMEM or some other such shit.
* Retrying is pointless. Just do Bad End on it...
@@ -1164,9 +1205,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
* encounter such a thing, try to read the CSW again.
*/
if (++cmd->stat_count >= 4) {
- printk(KERN_NOTICE "%s: "
- "unable to get CSW on device %u\n",
- sc->name, sc->dev->devnum);
+ printk(KERN_NOTICE "%s: unable to get CSW\n",
+ sc->name);
goto Bad_End;
}
__ub_state_stat(sc, cmd);
@@ -1207,10 +1247,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
*/
if (++cmd->stat_count >= 4) {
printk(KERN_NOTICE "%s: "
- "tag mismatch orig 0x%x reply 0x%x "
- "on device %u\n",
- sc->name, cmd->tag, bcs->Tag,
- sc->dev->devnum);
+ "tag mismatch orig 0x%x reply 0x%x\n",
+ sc->name, cmd->tag, bcs->Tag);
goto Bad_End;
}
__ub_state_stat(sc, cmd);
@@ -1244,8 +1282,8 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
} else {
printk(KERN_WARNING "%s: "
- "wrong command state %d on device %u\n",
- sc->name, cmd->state, sc->dev->devnum);
+ "wrong command state %d\n",
+ sc->name, cmd->state);
goto Bad_End;
}
return;
@@ -1288,7 +1326,6 @@ static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
/* XXX Clear stalls */
- printk("%s: CSW #%d submit failed (%d)\n", sc->name, cmd->tag, rc); /* P3 */
ub_complete(&sc->work_done);
ub_state_done(sc, cmd, rc);
return;
@@ -1333,6 +1370,7 @@ static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
scmd->state = UB_CMDST_INIT;
scmd->data = sc->top_sense;
scmd->len = UB_SENSE_SIZE;
+ scmd->lun = cmd->lun;
scmd->done = ub_top_sense_done;
scmd->back = cmd;
@@ -1411,14 +1449,14 @@ static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
}
if (cmd != scmd->back) {
printk(KERN_WARNING "%s: "
- "sense done for wrong command 0x%x on device %u\n",
- sc->name, cmd->tag, sc->dev->devnum);
+ "sense done for wrong command 0x%x\n",
+ sc->name, cmd->tag);
return;
}
if (cmd->state != UB_CMDST_SENSE) {
printk(KERN_WARNING "%s: "
- "sense done with bad cmd state %d on device %u\n",
- sc->name, cmd->state, sc->dev->devnum);
+ "sense done with bad cmd state %d\n",
+ sc->name, cmd->state);
return;
}
@@ -1429,68 +1467,32 @@ static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
ub_scsi_urb_compl(sc, cmd);
}
-#if 0
-/* Determine what the maximum LUN supported is */
-int usb_stor_Bulk_max_lun(struct us_data *us)
-{
- int result;
-
- /* issue the command */
- result = usb_stor_control_msg(us, us->recv_ctrl_pipe,
- US_BULK_GET_MAX_LUN,
- USB_DIR_IN | USB_TYPE_CLASS |
- USB_RECIP_INTERFACE,
- 0, us->ifnum, us->iobuf, 1, HZ);
-
- /*
- * Some devices (i.e. Iomega Zip100) need this -- apparently
- * the bulk pipes get STALLed when the GetMaxLUN request is
- * processed. This is, in theory, harmless to all other devices
- * (regardless of if they stall or not).
- */
- if (result < 0) {
- usb_stor_clear_halt(us, us->recv_bulk_pipe);
- usb_stor_clear_halt(us, us->send_bulk_pipe);
- }
-
- US_DEBUGP("GetMaxLUN command result is %d, data is %d\n",
- result, us->iobuf[0]);
-
- /* if we have a successful request, return the result */
- if (result == 1)
- return us->iobuf[0];
-
- /* return the default -- no LUNs */
- return 0;
-}
-#endif
-
/*
* This is called from a process context.
*/
-static void ub_revalidate(struct ub_dev *sc)
+static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun)
{
- sc->readonly = 0; /* XXX Query this from the device */
+ lun->readonly = 0; /* XXX Query this from the device */
- sc->capacity.nsec = 0;
- sc->capacity.bsize = 512;
- sc->capacity.bshift = 0;
+ lun->capacity.nsec = 0;
+ lun->capacity.bsize = 512;
+ lun->capacity.bshift = 0;
- if (ub_sync_tur(sc) != 0)
+ if (ub_sync_tur(sc, lun) != 0)
return; /* Not ready */
- sc->changed = 0;
+ lun->changed = 0;
- if (ub_sync_read_cap(sc, &sc->capacity) != 0) {
+ if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
/*
* The retry here means something is wrong, either with the
* device, with the transport, or with our code.
* We keep this because sd.c has retries for capacity.
*/
- if (ub_sync_read_cap(sc, &sc->capacity) != 0) {
- sc->capacity.nsec = 0;
- sc->capacity.bsize = 512;
- sc->capacity.bshift = 0;
+ if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
+ lun->capacity.nsec = 0;
+ lun->capacity.bsize = 512;
+ lun->capacity.bshift = 0;
}
}
}
@@ -1503,12 +1505,15 @@ static void ub_revalidate(struct ub_dev *sc)
static int ub_bd_open(struct inode *inode, struct file *filp)
{
struct gendisk *disk = inode->i_bdev->bd_disk;
+ struct ub_lun *lun;
struct ub_dev *sc;
unsigned long flags;
int rc;
- if ((sc = disk->private_data) == NULL)
+ if ((lun = disk->private_data) == NULL)
return -ENXIO;
+ sc = lun->udev;
+
spin_lock_irqsave(&ub_lock, flags);
if (atomic_read(&sc->poison)) {
spin_unlock_irqrestore(&ub_lock, flags);
@@ -1529,15 +1534,15 @@ static int ub_bd_open(struct inode *inode, struct file *filp)
* The bottom line is, Al Viro says that we should not allow
* bdev->bd_invalidated to be set when doing add_disk no matter what.
*/
- if (sc->first_open) {
- if (sc->changed) {
- sc->first_open = 0;
+ if (lun->first_open) {
+ lun->first_open = 0;
+ if (lun->changed) {
rc = -ENOMEDIUM;
goto err_open;
}
}
- if (sc->removable || sc->readonly)
+ if (lun->removable || lun->readonly)
check_disk_change(inode->i_bdev);
/*
@@ -1545,12 +1550,12 @@ static int ub_bd_open(struct inode *inode, struct file *filp)
* under some pretty murky conditions (a failure of READ CAPACITY).
* We may need it one day.
*/
- if (sc->removable && sc->changed && !(filp->f_flags & O_NDELAY)) {
+ if (lun->removable && lun->changed && !(filp->f_flags & O_NDELAY)) {
rc = -ENOMEDIUM;
goto err_open;
}
- if (sc->readonly && (filp->f_mode & FMODE_WRITE)) {
+ if (lun->readonly && (filp->f_mode & FMODE_WRITE)) {
rc = -EROFS;
goto err_open;
}
@@ -1567,7 +1572,8 @@ err_open:
static int ub_bd_release(struct inode *inode, struct file *filp)
{
struct gendisk *disk = inode->i_bdev->bd_disk;
- struct ub_dev *sc = disk->private_data;
+ struct ub_lun *lun = disk->private_data;
+ struct ub_dev *sc = lun->udev;
ub_put(sc);
return 0;
@@ -1597,20 +1603,14 @@ static int ub_bd_ioctl(struct inode *inode, struct file *filp,
*/
static int ub_bd_revalidate(struct gendisk *disk)
{
- struct ub_dev *sc = disk->private_data;
-
- ub_revalidate(sc);
- /* This is pretty much a long term P3 */
- if (!atomic_read(&sc->poison)) { /* Cover sc->dev */
- printk(KERN_INFO "%s: device %u capacity nsec %ld bsize %u\n",
- sc->name, sc->dev->devnum,
- sc->capacity.nsec, sc->capacity.bsize);
- }
+ struct ub_lun *lun = disk->private_data;
+
+ ub_revalidate(lun->udev, lun);
/* XXX Support sector size switching like in sr.c */
- blk_queue_hardsect_size(disk->queue, sc->capacity.bsize);
- set_capacity(disk, sc->capacity.nsec);
- // set_disk_ro(sdkp->disk, sc->readonly);
+ blk_queue_hardsect_size(disk->queue, lun->capacity.bsize);
+ set_capacity(disk, lun->capacity.nsec);
+ // set_disk_ro(sdkp->disk, lun->readonly);
return 0;
}
@@ -1626,9 +1626,9 @@ static int ub_bd_revalidate(struct gendisk *disk)
*/
static int ub_bd_media_changed(struct gendisk *disk)
{
- struct ub_dev *sc = disk->private_data;
+ struct ub_lun *lun = disk->private_data;
- if (!sc->removable)
+ if (!lun->removable)
return 0;
/*
@@ -1640,12 +1640,12 @@ static int ub_bd_media_changed(struct gendisk *disk)
* will fail, then block layer discards the data. Since we never
* spin drives up, such devices simply cannot be used with ub anyway.
*/
- if (ub_sync_tur(sc) != 0) {
- sc->changed = 1;
+ if (ub_sync_tur(lun->udev, lun) != 0) {
+ lun->changed = 1;
return 1;
}
- return sc->changed;
+ return lun->changed;
}
static struct block_device_operations ub_bd_fops = {
@@ -1669,7 +1669,7 @@ static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
/*
* Test if the device has a check condition on it, synchronously.
*/
-static int ub_sync_tur(struct ub_dev *sc)
+static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun)
{
struct ub_scsi_cmd *cmd;
enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) };
@@ -1688,6 +1688,7 @@ static int ub_sync_tur(struct ub_dev *sc)
cmd->cdb_len = 6;
cmd->dir = UB_DIR_NONE;
cmd->state = UB_CMDST_INIT;
+ cmd->lun = lun; /* This may be NULL, but that's ok */
cmd->done = ub_probe_done;
cmd->back = &compl;
@@ -1718,7 +1719,8 @@ err_alloc:
/*
* Read the SCSI capacity synchronously (for probing).
*/
-static int ub_sync_read_cap(struct ub_dev *sc, struct ub_capacity *ret)
+static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
+ struct ub_capacity *ret)
{
struct ub_scsi_cmd *cmd;
char *p;
@@ -1743,6 +1745,7 @@ static int ub_sync_read_cap(struct ub_dev *sc, struct ub_capacity *ret)
cmd->state = UB_CMDST_INIT;
cmd->data = p;
cmd->len = 8;
+ cmd->lun = lun;
cmd->done = ub_probe_done;
cmd->back = &compl;
@@ -1812,6 +1815,90 @@ static void ub_probe_timeout(unsigned long arg)
}
/*
+ * Get number of LUNs by the way of Bulk GetMaxLUN command.
+ */
+static int ub_sync_getmaxlun(struct ub_dev *sc)
+{
+ int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
+ unsigned char *p;
+ enum { ALLOC_SIZE = 1 };
+ struct usb_ctrlrequest *cr;
+ struct completion compl;
+ struct timer_list timer;
+ int nluns;
+ int rc;
+
+ init_completion(&compl);
+
+ rc = -ENOMEM;
+ if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
+ goto err_alloc;
+ *p = 55;
+
+ cr = &sc->work_cr;
+ cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ cr->bRequest = US_BULK_GET_MAX_LUN;
+ cr->wValue = cpu_to_le16(0);
+ cr->wIndex = cpu_to_le16(ifnum);
+ cr->wLength = cpu_to_le16(1);
+
+ usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe,
+ (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl);
+ sc->work_urb.transfer_flags = 0;
+ sc->work_urb.actual_length = 0;
+ sc->work_urb.error_count = 0;
+ sc->work_urb.status = 0;
+
+ if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
+ if (rc == -EPIPE) {
+ printk("%s: Stall at GetMaxLUN, using 1 LUN\n",
+ sc->name); /* P3 */
+ } else {
+ printk(KERN_WARNING
+ "%s: Unable to submit GetMaxLUN (%d)\n",
+ sc->name, rc);
+ }
+ goto err_submit;
+ }
+
+ init_timer(&timer);
+ timer.function = ub_probe_timeout;
+ timer.data = (unsigned long) &compl;
+ timer.expires = jiffies + UB_CTRL_TIMEOUT;
+ add_timer(&timer);
+
+ wait_for_completion(&compl);
+
+ del_timer_sync(&timer);
+ usb_kill_urb(&sc->work_urb);
+
+ if (sc->work_urb.actual_length != 1) {
+ printk("%s: GetMaxLUN returned %d bytes\n", sc->name,
+ sc->work_urb.actual_length); /* P3 */
+ nluns = 0;
+ } else {
+ if ((nluns = *p) == 55) {
+ nluns = 0;
+ } else {
+ /* GetMaxLUN returns the maximum LUN number */
+ nluns += 1;
+ if (nluns > UB_MAX_LUNS)
+ nluns = UB_MAX_LUNS;
+ }
+ printk("%s: GetMaxLUN returned %d, using %d LUNs\n", sc->name,
+ *p, nluns); /* P3 */
+ }
+
+ kfree(p);
+ return nluns;
+
+err_submit:
+ kfree(p);
+err_alloc:
+ return rc;
+}
+
+/*
* Clear initial stalls.
*/
static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
@@ -1897,8 +1984,8 @@ static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev,
}
if (ep_in == NULL || ep_out == NULL) {
- printk(KERN_NOTICE "%s: device %u failed endpoint check\n",
- sc->name, sc->dev->devnum);
+ printk(KERN_NOTICE "%s: failed endpoint check\n",
+ sc->name);
return -EIO;
}
@@ -1921,8 +2008,7 @@ static int ub_probe(struct usb_interface *intf,
const struct usb_device_id *dev_id)
{
struct ub_dev *sc;
- request_queue_t *q;
- struct gendisk *disk;
+ int nluns;
int rc;
int i;
@@ -1931,6 +2017,7 @@ static int ub_probe(struct usb_interface *intf,
goto err_core;
memset(sc, 0, sizeof(struct ub_dev));
spin_lock_init(&sc->lock);
+ INIT_LIST_HEAD(&sc->luns);
usb_init_urb(&sc->work_urb);
tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
atomic_set(&sc->poison, 0);
@@ -1942,19 +2029,16 @@ static int ub_probe(struct usb_interface *intf,
ub_init_completion(&sc->work_done);
sc->work_done.done = 1; /* A little yuk, but oh well... */
- rc = -ENOSR;
- if ((sc->id = ub_id_get()) == -1)
- goto err_id;
- snprintf(sc->name, 8, DRV_NAME "%c", sc->id + 'a');
-
sc->dev = interface_to_usbdev(intf);
sc->intf = intf;
// sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
-
usb_set_intfdata(intf, sc);
usb_get_dev(sc->dev);
// usb_get_intf(sc->intf); /* Do we need this? */
+ snprintf(sc->name, 12, DRV_NAME "(%d.%d)",
+ sc->dev->bus->busnum, sc->dev->devnum);
+
/* XXX Verify that we can handle the device (from descriptors) */
ub_get_pipes(sc, sc->dev, intf);
@@ -1992,35 +2076,88 @@ static int ub_probe(struct usb_interface *intf,
* In any case it's not our business how revaliadation is implemented.
*/
for (i = 0; i < 3; i++) { /* Retries for benh's key */
- if ((rc = ub_sync_tur(sc)) <= 0) break;
+ if ((rc = ub_sync_tur(sc, NULL)) <= 0) break;
if (rc != 0x6) break;
msleep(10);
}
- sc->removable = 1; /* XXX Query this from the device */
- sc->changed = 1; /* ub_revalidate clears only */
- sc->first_open = 1;
+ nluns = 1;
+ for (i = 0; i < 3; i++) {
+ if ((rc = ub_sync_getmaxlun(sc)) < 0) {
+ /*
+ * Some devices (i.e. Iomega Zip100) need this --
+ * apparently the bulk pipes get STALLed when the
+ * GetMaxLUN request is processed.
+ * XXX I have a ZIP-100, verify it does this.
+ */
+ if (rc == -EPIPE) {
+ ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
+ ub_probe_clear_stall(sc, sc->send_bulk_pipe);
+ }
+ break;
+ }
+ if (rc != 0) {
+ nluns = rc;
+ break;
+ }
+ msleep(100);
+ }
- ub_revalidate(sc);
- /* This is pretty much a long term P3 */
- printk(KERN_INFO "%s: device %u capacity nsec %ld bsize %u\n",
- sc->name, sc->dev->devnum, sc->capacity.nsec, sc->capacity.bsize);
+ for (i = 0; i < nluns; i++) {
+ ub_probe_lun(sc, i);
+ }
+ return 0;
+
+ /* device_remove_file(&sc->intf->dev, &dev_attr_diag); */
+err_diag:
+ usb_set_intfdata(intf, NULL);
+ // usb_put_intf(sc->intf);
+ usb_put_dev(sc->dev);
+ kfree(sc);
+err_core:
+ return rc;
+}
+
+static int ub_probe_lun(struct ub_dev *sc, int lnum)
+{
+ struct ub_lun *lun;
+ request_queue_t *q;
+ struct gendisk *disk;
+ int rc;
+
+ rc = -ENOMEM;
+ if ((lun = kmalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL)
+ goto err_alloc;
+ memset(lun, 0, sizeof(struct ub_lun));
+ lun->num = lnum;
+
+ rc = -ENOSR;
+ if ((lun->id = ub_id_get()) == -1)
+ goto err_id;
+
+ lun->udev = sc;
+ list_add(&lun->link, &sc->luns);
+
+ snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)",
+ lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num);
+
+ lun->removable = 1; /* XXX Query this from the device */
+ lun->changed = 1; /* ub_revalidate clears only */
+ lun->first_open = 1;
+ ub_revalidate(sc, lun);
- /*
- * Just one disk per sc currently, but maybe more.
- */
rc = -ENOMEM;
if ((disk = alloc_disk(UB_MINORS_PER_MAJOR)) == NULL)
goto err_diskalloc;
- sc->disk = disk;
- sprintf(disk->disk_name, DRV_NAME "%c", sc->id + 'a');
- sprintf(disk->devfs_name, DEVFS_NAME "/%c", sc->id + 'a');
+ lun->disk = disk;
+ sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a');
+ sprintf(disk->devfs_name, DEVFS_NAME "/%c", lun->id + 'a');
disk->major = UB_MAJOR;
- disk->first_minor = sc->id * UB_MINORS_PER_MAJOR;
+ disk->first_minor = lun->id * UB_MINORS_PER_MAJOR;
disk->fops = &ub_bd_fops;
- disk->private_data = sc;
- disk->driverfs_dev = &intf->dev;
+ disk->private_data = lun;
+ disk->driverfs_dev = &sc->intf->dev; /* XXX Many to one ok? */
rc = -ENOMEM;
if ((q = blk_init_queue(ub_bd_rq_fn, &sc->lock)) == NULL)
@@ -2028,28 +2165,17 @@ static int ub_probe(struct usb_interface *intf,
disk->queue = q;
- // blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
+ blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
blk_queue_max_hw_segments(q, UB_MAX_REQ_SG);
blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
- // blk_queue_segment_boundary(q, CARM_SG_BOUNDARY);
+ blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */
blk_queue_max_sectors(q, UB_MAX_SECTORS);
- blk_queue_hardsect_size(q, sc->capacity.bsize);
-
- /*
- * This is a serious infraction, caused by a deficiency in the
- * USB sg interface (usb_sg_wait()). We plan to remove this once
- * we get mileage on the driver and can justify a change to USB API.
- * See blk_queue_bounce_limit() to understand this part.
- *
- * XXX And I still need to be aware of the DMA mask in the HC.
- */
- q->bounce_pfn = blk_max_low_pfn;
- q->bounce_gfp = GFP_NOIO;
+ blk_queue_hardsect_size(q, lun->capacity.bsize);
- q->queuedata = sc;
+ q->queuedata = lun;
- set_capacity(disk, sc->capacity.nsec);
- if (sc->removable)
+ set_capacity(disk, lun->capacity.nsec);
+ if (lun->removable)
disk->flags |= GENHD_FL_REMOVABLE;
add_disk(disk);
@@ -2059,22 +2185,20 @@ static int ub_probe(struct usb_interface *intf,
err_blkqinit:
put_disk(disk);
err_diskalloc:
- device_remove_file(&sc->intf->dev, &dev_attr_diag);
-err_diag:
- usb_set_intfdata(intf, NULL);
- // usb_put_intf(sc->intf);
- usb_put_dev(sc->dev);
- ub_id_put(sc->id);
+ list_del(&lun->link);
+ ub_id_put(lun->id);
err_id:
- kfree(sc);
-err_core:
+ kfree(lun);
+err_alloc:
return rc;
}
static void ub_disconnect(struct usb_interface *intf)
{
struct ub_dev *sc = usb_get_intfdata(intf);
- struct gendisk *disk = sc->disk;
+ struct list_head *p;
+ struct ub_lun *lun;
+ struct gendisk *disk;
unsigned long flags;
/*
@@ -2124,14 +2248,18 @@ static void ub_disconnect(struct usb_interface *intf)
/*
* Unregister the upper layer.
*/
- if (disk->flags & GENHD_FL_UP)
- del_gendisk(disk);
- /*
- * I wish I could do:
- * set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
- * As it is, we rely on our internal poisoning and let
- * the upper levels to spin furiously failing all the I/O.
- */
+ list_for_each (p, &sc->luns) {
+ lun = list_entry(p, struct ub_lun, link);
+ disk = lun->disk;
+ if (disk->flags & GENHD_FL_UP)
+ del_gendisk(disk);
+ /*
+ * I wish I could do:
+ * set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
+ * As it is, we rely on our internal poisoning and let
+ * the upper levels to spin furiously failing all the I/O.
+ */
+ }
/*
* Taking a lock on a structure which is about to be freed
@@ -2182,8 +2310,8 @@ static int __init ub_init(void)
{
int rc;
- /* P3 */ printk("ub: sizeof ub_scsi_cmd %zu ub_dev %zu\n",
- sizeof(struct ub_scsi_cmd), sizeof(struct ub_dev));
+ /* P3 */ printk("ub: sizeof ub_scsi_cmd %zu ub_dev %zu ub_lun %zu\n",
+ sizeof(struct ub_scsi_cmd), sizeof(struct ub_dev), sizeof(struct ub_lun));
if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
goto err_regblkdev;