struct dm_dev_internal *dd;
list_for_each_entry (dd, l, list)
- if (dd->dm_dev.bdev->bd_dev == dev)
+ if (dd->dm_dev.bd_dev == dev)
return dd;
return NULL;
struct request_queue *q;
struct queue_limits *limits = data;
struct block_device *bdev = dev->bdev;
- sector_t dev_size =
- i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ sector_t dev_size;
unsigned short logical_block_size_sectors =
limits->logical_block_size >> SECTOR_SHIFT;
char b[BDEVNAME_SIZE];
+ if (!bdev)
+ return 0;
+
+ dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+
/*
* Some devices exist without request functions,
* such as loop devices not yet bound to backing files.
dd_new.dm_dev.mode |= new_mode;
dd_new.dm_dev.bdev = NULL;
+ if (!dd->dm_dev.bdev)
+ return 0;
+
r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md);
if (r)
return r;
* Add a device to the list, or just increment the usage count if
* it's already present.
*/
-int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
- struct dm_dev **result)
+int __dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
+ struct dm_dev **result, bool open_may_fail)
{
int r;
dev_t uninitialized_var(dev);
dd->dm_dev.mode = mode;
dd->dm_dev.bdev = NULL;
- if ((r = open_dev(dd, dev, t->md))) {
+ r = open_dev(dd, dev, t->md);
+ if (r && !open_may_fail) {
kfree(dd);
return r;
}
format_dev_t(dd->dm_dev.name, dev);
+ dd->dm_dev.bd_dev = dev;
atomic_set(&dd->count, 0);
list_add(&dd->list, &t->devices);
+ } else if (!dd->dm_dev.bdev) {
+ dd->dm_dev.mode = mode;
+ r = open_dev(dd, dev, t->md);
+ if (r && !open_may_fail)
+ return r;
+
} else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
r = upgrade_mode(dd, mode, t->md);
if (r)
*result = &dd->dm_dev;
return 0;
}
+EXPORT_SYMBOL_GPL(__dm_get_device);
+
+int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
+ struct dm_dev **result)
+{
+ return __dm_get_device(ti, path, mode, result, false);
+}
EXPORT_SYMBOL(dm_get_device);
static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
{
struct queue_limits *limits = data;
struct block_device *bdev = dev->bdev;
- struct request_queue *q = bdev_get_queue(bdev);
+ struct request_queue *q;
char b[BDEVNAME_SIZE];
+ if (!bdev)
+ return 0;
+
+ q = bdev_get_queue(bdev);
if (unlikely(!q)) {
DMWARN("%s: Cannot set limits for nonexistent device %s",
dm_device_name(ti->table->md), bdevname(bdev, b));
*/
void dm_put_device(struct dm_target *ti, struct dm_dev *d)
{
- struct dm_dev_internal *dd;
+ struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
+ dm_dev);
- if (!d)
- return;
-
- dd = container_of(d, struct dm_dev_internal, dm_dev);
if (atomic_dec_and_test(&dd->count)) {
close_dev(dd, ti->table->md);
list_del(&dd->list);
/* Non-request-stackable devices can't be used for request-based dm */
devices = dm_table_get_devices(t);
list_for_each_entry(dd, devices, list) {
+ if (!dd->dm_dev.bdev)
+ continue;
if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev.bdev))) {
DMWARN("table load rejected: including"
" non-request-stackable devices");
struct gendisk *prev_disk = NULL, *template_disk = NULL;
list_for_each_entry(dd, devices, list) {
+ if (!dd->dm_dev.bdev)
+ continue;
template_disk = dd->dm_dev.bdev->bd_disk;
if (!blk_get_integrity(template_disk))
goto no_integrity;
sector_t start, sector_t len, void *data)
{
unsigned flush = (*(unsigned *)data);
- struct request_queue *q = bdev_get_queue(dev->bdev);
+ struct request_queue *q = dm_dev_get_queue(dev);
return q && (q->flush_flags & flush);
}
static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
+ struct request_queue *q = dm_dev_get_queue(dev);
return q && blk_queue_nonrot(q);
}
static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
+ struct request_queue *q = dm_dev_get_queue(dev);
return q && !blk_queue_add_random(q);
}
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
+ struct request_queue *q = dm_dev_get_queue(dev);
return q && !q->limits.max_write_same_sectors;
}
static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
+ struct request_queue *q = dm_dev_get_queue(dev);
return q && blk_queue_discard(q);
}
int r = 0;
list_for_each_entry(dd, devices, list) {
- struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
+ struct request_queue *q;
char b[BDEVNAME_SIZE];
+ if (!dd->dm_dev.bdev)
+ continue;
+
+ q = bdev_get_queue(dd->dm_dev.bdev);
if (likely(q))
r |= bdi_congested(&q->backing_dev_info, bdi_bits);
else