|
@@ -250,7 +250,8 @@ static void free_devices(struct list_head *devices)
|
|
struct list_head *tmp, *next;
|
|
struct list_head *tmp, *next;
|
|
|
|
|
|
list_for_each_safe(tmp, next, devices) {
|
|
list_for_each_safe(tmp, next, devices) {
|
|
- struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
|
|
|
|
|
|
+ struct dm_dev_internal *dd =
|
|
|
|
+ list_entry(tmp, struct dm_dev_internal, list);
|
|
kfree(dd);
|
|
kfree(dd);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -327,12 +328,12 @@ static int lookup_device(const char *path, dev_t *dev)
|
|
/*
|
|
/*
|
|
* See if we've already got a device in the list.
|
|
* See if we've already got a device in the list.
|
|
*/
|
|
*/
|
|
-static struct dm_dev *find_device(struct list_head *l, dev_t dev)
|
|
|
|
|
|
+static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
|
|
{
|
|
{
|
|
- struct dm_dev *dd;
|
|
|
|
|
|
+ struct dm_dev_internal *dd;
|
|
|
|
|
|
list_for_each_entry (dd, l, list)
|
|
list_for_each_entry (dd, l, list)
|
|
- if (dd->bdev->bd_dev == dev)
|
|
|
|
|
|
+ if (dd->dm_dev.bdev->bd_dev == dev)
|
|
return dd;
|
|
return dd;
|
|
|
|
|
|
return NULL;
|
|
return NULL;
|
|
@@ -341,45 +342,47 @@ static struct dm_dev *find_device(struct list_head *l, dev_t dev)
|
|
/*
|
|
/*
|
|
* Open a device so we can use it as a map destination.
|
|
* Open a device so we can use it as a map destination.
|
|
*/
|
|
*/
|
|
-static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md)
|
|
|
|
|
|
+static int open_dev(struct dm_dev_internal *d, dev_t dev,
|
|
|
|
+ struct mapped_device *md)
|
|
{
|
|
{
|
|
static char *_claim_ptr = "I belong to device-mapper";
|
|
static char *_claim_ptr = "I belong to device-mapper";
|
|
struct block_device *bdev;
|
|
struct block_device *bdev;
|
|
|
|
|
|
int r;
|
|
int r;
|
|
|
|
|
|
- BUG_ON(d->bdev);
|
|
|
|
|
|
+ BUG_ON(d->dm_dev.bdev);
|
|
|
|
|
|
- bdev = open_by_devnum(dev, d->mode);
|
|
|
|
|
|
+ bdev = open_by_devnum(dev, d->dm_dev.mode);
|
|
if (IS_ERR(bdev))
|
|
if (IS_ERR(bdev))
|
|
return PTR_ERR(bdev);
|
|
return PTR_ERR(bdev);
|
|
r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
|
|
r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
|
|
if (r)
|
|
if (r)
|
|
blkdev_put(bdev);
|
|
blkdev_put(bdev);
|
|
else
|
|
else
|
|
- d->bdev = bdev;
|
|
|
|
|
|
+ d->dm_dev.bdev = bdev;
|
|
return r;
|
|
return r;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
* Close a device that we've been using.
|
|
* Close a device that we've been using.
|
|
*/
|
|
*/
|
|
-static void close_dev(struct dm_dev *d, struct mapped_device *md)
|
|
|
|
|
|
+static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
|
|
{
|
|
{
|
|
- if (!d->bdev)
|
|
|
|
|
|
+ if (!d->dm_dev.bdev)
|
|
return;
|
|
return;
|
|
|
|
|
|
- bd_release_from_disk(d->bdev, dm_disk(md));
|
|
|
|
- blkdev_put(d->bdev);
|
|
|
|
- d->bdev = NULL;
|
|
|
|
|
|
+ bd_release_from_disk(d->dm_dev.bdev, dm_disk(md));
|
|
|
|
+ blkdev_put(d->dm_dev.bdev);
|
|
|
|
+ d->dm_dev.bdev = NULL;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
* If possible, this checks an area of a destination device is valid.
|
|
* If possible, this checks an area of a destination device is valid.
|
|
*/
|
|
*/
|
|
-static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
|
|
|
|
|
|
+static int check_device_area(struct dm_dev_internal *dd, sector_t start,
|
|
|
|
+ sector_t len)
|
|
{
|
|
{
|
|
- sector_t dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
|
|
|
|
|
|
+ sector_t dev_size = dd->dm_dev.bdev->bd_inode->i_size >> SECTOR_SHIFT;
|
|
|
|
|
|
if (!dev_size)
|
|
if (!dev_size)
|
|
return 1;
|
|
return 1;
|
|
@@ -392,16 +395,17 @@ static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
|
|
* careful to leave things as they were if we fail to reopen the
|
|
* careful to leave things as they were if we fail to reopen the
|
|
* device.
|
|
* device.
|
|
*/
|
|
*/
|
|
-static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *md)
|
|
|
|
|
|
+static int upgrade_mode(struct dm_dev_internal *dd, int new_mode,
|
|
|
|
+ struct mapped_device *md)
|
|
{
|
|
{
|
|
int r;
|
|
int r;
|
|
- struct dm_dev dd_copy;
|
|
|
|
- dev_t dev = dd->bdev->bd_dev;
|
|
|
|
|
|
+ struct dm_dev_internal dd_copy;
|
|
|
|
+ dev_t dev = dd->dm_dev.bdev->bd_dev;
|
|
|
|
|
|
dd_copy = *dd;
|
|
dd_copy = *dd;
|
|
|
|
|
|
- dd->mode |= new_mode;
|
|
|
|
- dd->bdev = NULL;
|
|
|
|
|
|
+ dd->dm_dev.mode |= new_mode;
|
|
|
|
+ dd->dm_dev.bdev = NULL;
|
|
r = open_dev(dd, dev, md);
|
|
r = open_dev(dd, dev, md);
|
|
if (!r)
|
|
if (!r)
|
|
close_dev(&dd_copy, md);
|
|
close_dev(&dd_copy, md);
|
|
@@ -421,7 +425,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
|
|
{
|
|
{
|
|
int r;
|
|
int r;
|
|
dev_t uninitialized_var(dev);
|
|
dev_t uninitialized_var(dev);
|
|
- struct dm_dev *dd;
|
|
|
|
|
|
+ struct dm_dev_internal *dd;
|
|
unsigned int major, minor;
|
|
unsigned int major, minor;
|
|
|
|
|
|
BUG_ON(!t);
|
|
BUG_ON(!t);
|
|
@@ -443,20 +447,20 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
|
|
if (!dd)
|
|
if (!dd)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
|
|
|
- dd->mode = mode;
|
|
|
|
- dd->bdev = NULL;
|
|
|
|
|
|
+ dd->dm_dev.mode = mode;
|
|
|
|
+ dd->dm_dev.bdev = NULL;
|
|
|
|
|
|
if ((r = open_dev(dd, dev, t->md))) {
|
|
if ((r = open_dev(dd, dev, t->md))) {
|
|
kfree(dd);
|
|
kfree(dd);
|
|
return r;
|
|
return r;
|
|
}
|
|
}
|
|
|
|
|
|
- format_dev_t(dd->name, dev);
|
|
|
|
|
|
+ format_dev_t(dd->dm_dev.name, dev);
|
|
|
|
|
|
atomic_set(&dd->count, 0);
|
|
atomic_set(&dd->count, 0);
|
|
list_add(&dd->list, &t->devices);
|
|
list_add(&dd->list, &t->devices);
|
|
|
|
|
|
- } else if (dd->mode != (mode | dd->mode)) {
|
|
|
|
|
|
+ } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
|
|
r = upgrade_mode(dd, mode, t->md);
|
|
r = upgrade_mode(dd, mode, t->md);
|
|
if (r)
|
|
if (r)
|
|
return r;
|
|
return r;
|
|
@@ -465,11 +469,11 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
|
|
|
|
|
|
if (!check_device_area(dd, start, len)) {
|
|
if (!check_device_area(dd, start, len)) {
|
|
DMWARN("device %s too small for target", path);
|
|
DMWARN("device %s too small for target", path);
|
|
- dm_put_device(ti, dd);
|
|
|
|
|
|
+ dm_put_device(ti, &dd->dm_dev);
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
- *result = dd;
|
|
|
|
|
|
+ *result = &dd->dm_dev;
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -478,6 +482,13 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
|
|
{
|
|
{
|
|
struct request_queue *q = bdev_get_queue(bdev);
|
|
struct request_queue *q = bdev_get_queue(bdev);
|
|
struct io_restrictions *rs = &ti->limits;
|
|
struct io_restrictions *rs = &ti->limits;
|
|
|
|
+ char b[BDEVNAME_SIZE];
|
|
|
|
+
|
|
|
|
+ if (unlikely(!q)) {
|
|
|
|
+ DMWARN("%s: Cannot set limits for nonexistent device %s",
|
|
|
|
+ dm_device_name(ti->table->md), bdevname(bdev, b));
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
/*
|
|
* Combine the device limits low.
|
|
* Combine the device limits low.
|
|
@@ -540,8 +551,11 @@ int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
|
|
/*
|
|
/*
|
|
* Decrement a devices use count and remove it if necessary.
|
|
* Decrement a devices use count and remove it if necessary.
|
|
*/
|
|
*/
|
|
-void dm_put_device(struct dm_target *ti, struct dm_dev *dd)
|
|
|
|
|
|
+void dm_put_device(struct dm_target *ti, struct dm_dev *d)
|
|
{
|
|
{
|
|
|
|
+ struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
|
|
|
|
+ dm_dev);
|
|
|
|
+
|
|
if (atomic_dec_and_test(&dd->count)) {
|
|
if (atomic_dec_and_test(&dd->count)) {
|
|
close_dev(dd, ti->table->md);
|
|
close_dev(dd, ti->table->md);
|
|
list_del(&dd->list);
|
|
list_del(&dd->list);
|
|
@@ -937,13 +951,20 @@ int dm_table_resume_targets(struct dm_table *t)
|
|
|
|
|
|
int dm_table_any_congested(struct dm_table *t, int bdi_bits)
|
|
int dm_table_any_congested(struct dm_table *t, int bdi_bits)
|
|
{
|
|
{
|
|
- struct dm_dev *dd;
|
|
|
|
|
|
+ struct dm_dev_internal *dd;
|
|
struct list_head *devices = dm_table_get_devices(t);
|
|
struct list_head *devices = dm_table_get_devices(t);
|
|
int r = 0;
|
|
int r = 0;
|
|
|
|
|
|
list_for_each_entry(dd, devices, list) {
|
|
list_for_each_entry(dd, devices, list) {
|
|
- struct request_queue *q = bdev_get_queue(dd->bdev);
|
|
|
|
- r |= bdi_congested(&q->backing_dev_info, bdi_bits);
|
|
|
|
|
|
+ struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
|
|
|
|
+ char b[BDEVNAME_SIZE];
|
|
|
|
+
|
|
|
|
+ if (likely(q))
|
|
|
|
+ r |= bdi_congested(&q->backing_dev_info, bdi_bits);
|
|
|
|
+ else
|
|
|
|
+ DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
|
|
|
|
+ dm_device_name(t->md),
|
|
|
|
+ bdevname(dd->dm_dev.bdev, b));
|
|
}
|
|
}
|
|
|
|
|
|
return r;
|
|
return r;
|
|
@@ -951,13 +972,19 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
|
|
|
|
|
|
void dm_table_unplug_all(struct dm_table *t)
|
|
void dm_table_unplug_all(struct dm_table *t)
|
|
{
|
|
{
|
|
- struct dm_dev *dd;
|
|
|
|
|
|
+ struct dm_dev_internal *dd;
|
|
struct list_head *devices = dm_table_get_devices(t);
|
|
struct list_head *devices = dm_table_get_devices(t);
|
|
|
|
|
|
list_for_each_entry(dd, devices, list) {
|
|
list_for_each_entry(dd, devices, list) {
|
|
- struct request_queue *q = bdev_get_queue(dd->bdev);
|
|
|
|
-
|
|
|
|
- blk_unplug(q);
|
|
|
|
|
|
+ struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
|
|
|
|
+ char b[BDEVNAME_SIZE];
|
|
|
|
+
|
|
|
|
+ if (likely(q))
|
|
|
|
+ blk_unplug(q);
|
|
|
|
+ else
|
|
|
|
+ DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
|
|
|
|
+ dm_device_name(t->md),
|
|
|
|
+ bdevname(dd->dm_dev.bdev, b));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|