static struct metadata *init_meta_inram(void *init_param, bool *unformatted)
{
- uint64_t smap_size;
+ uint64_t smap_size, tmp;
struct metadata *md;
struct init_param_inram *p = (struct init_param_inram *)init_param;
return ERR_PTR(-ENOMEM);
}
+ tmp = smap_size;
+ (void) do_div(tmp, (1024 * 1024));
DMINFO("Space allocated for pbn reference count map: %llu.%06llu MB\n",
- smap_size / (1024 * 1024),
- smap_size - ((smap_size /
- (1024 * 1024)) * (1024 * 1024)));
+ tmp, smap_size - (tmp * (1024 * 1024)));
memset(md->smap, 0, smap_size);
* Space Management Functions *
********************************************************/
+static uint64_t next_head(uint64_t current_head, uint64_t smax)
+{
+ current_head += 1;
+ return dm_sector_div64(current_head, smax);
+}
+
static int alloc_data_block_inram(struct metadata *md, uint64_t *blockn)
{
uint64_t head, tail;
if (!md->smap[head]) {
md->smap[head] = 1;
*blockn = head;
- md->allocptr = (head + 1) % md->smax;
+ md->allocptr = next_head(head, md->smax);
return 0;
}
- head = (head + 1) % md->smax;
+ head = next_head(head, md->smax);
} while (head != tail);
bool unformatted)
{
struct kvstore_inram *kvs;
- uint64_t kvstore_size;
+ uint64_t kvstore_size, tmp;
if (!vsize || !ksize || !kmax)
return ERR_PTR(-ENOTSUPP);
return ERR_PTR(-ENOMEM);
}
+ tmp = kvstore_size;
+ (void) do_div(tmp, (1024 * 1024));
DMINFO("Space allocated for linear key value store: %llu.%06llu MB\n",
- kvstore_size / (1024 * 1024),
- kvstore_size - ((kvstore_size / (1024 * 1024))
- * (1024 * 1024)));
+ tmp, kvstore_size - (tmp * (1024 * 1024)));
memset(kvs->store, EMPTY_ENTRY, kvstore_size);
********************************************************/
static int kvs_delete_sparse_inram(struct kvstore *kvs,
- void *key, int32_t ksize)
+ void *key, int32_t ksize)
{
uint64_t idxhead = *((uint64_t *)key);
uint32_t entry_size, head, tail;
kvinram = container_of(kvs, struct kvstore_inram, ckvs);
entry_size = kvs->vsize + kvs->ksize;
- head = idxhead % kvinram->kmax;
+ head = do_div(idxhead, kvinram->kmax);
tail = head;
do {
goto doesnotexist;
if (memcmp(ptr, key, kvs->ksize))
- head = (head + 1) % kvinram->kmax;
+ head = next_head(head, kvinram->kmax);
else {
memset(ptr, DELETED_ENTRY, entry_size);
return 0;
* < 0 - error on lookup
*/
static int kvs_lookup_sparse_inram(struct kvstore *kvs, void *key,
- int32_t ksize, void *value, int32_t *vsize)
+ int32_t ksize, void *value, int32_t *vsize)
{
uint64_t idxhead = *((uint64_t *)key);
uint32_t entry_size, head, tail;
kvinram = container_of(kvs, struct kvstore_inram, ckvs);
entry_size = kvs->vsize + kvs->ksize;
- head = idxhead % kvinram->kmax;
+ head = do_div(idxhead, kvinram->kmax);
tail = head;
do {
return 0;
if (memcmp(ptr, key, kvs->ksize))
- head = (head + 1) % kvinram->kmax;
+ head = next_head(head, kvinram->kmax);
else {
memcpy(value, ptr + kvs->ksize, kvs->vsize);
return 1;
- }
+ }
} while (head != tail);
}
static int kvs_insert_sparse_inram(struct kvstore *kvs, void *key,
- int32_t ksize, void *value, int32_t vsize)
+ int32_t ksize, void *value, int32_t vsize)
{
uint64_t idxhead = *((uint64_t *)key);
uint32_t entry_size, head, tail;
kvinram = container_of(kvs, struct kvstore_inram, ckvs);
entry_size = kvs->vsize + kvs->ksize;
- head = idxhead % kvinram->kmax;
+ head = do_div(idxhead, kvinram->kmax);
tail = head;
do {
return 0;
}
- head = (head + 1) % kvinram->kmax;
+ head = next_head(head, kvinram->kmax);
} while (head != tail);
goto out;
}
- head = (head + 1) % kvinram->kmax;
+ head = next_head(head, kvinram->kmax);
} while (head);
out:
bool unformatted)
{
struct kvstore_inram *kvs;
- uint64_t kvstore_size;
+ uint64_t kvstore_size, tmp;
if (!vsize || !ksize || !knummax)
return ERR_PTR(-ENOTSUPP);
if (!kvs)
return ERR_PTR(-ENOMEM);
- knummax += knummax * HASHTABLE_OVERPROV / 100;
+ knummax += (knummax * HASHTABLE_OVERPROV) / 100;
kvstore_size = (knummax * (vsize + ksize));
return ERR_PTR(-ENOMEM);
}
+ tmp = kvstore_size;
+ (void) do_div(tmp, (1024 * 1024));
DMINFO("Space allocated for sparse key value store: %llu.%06llu MB\n",
- kvstore_size / (1024 * 1024),
- kvstore_size - ((kvstore_size / (1024 * 1024))
- * (1024 * 1024)));
+ tmp, kvstore_size - (tmp * (1024 * 1024)));
memset(kvs->store, EMPTY_ENTRY, kvstore_size);
#define DMD_IO_SIZE 4096
-static uint64_t compute_sector(struct bio *bio,
+static sector_t compute_sector(struct bio *bio,
struct dedup_config *dc)
{
- uint64_t to_be_lbn;
+ sector_t to_be_lbn;
to_be_lbn = bio->bi_iter.bi_sector;
- to_be_lbn /= dc->sectors_per_block;
+ (void) sector_div(to_be_lbn, dc->sectors_per_block);
to_be_lbn *= dc->sectors_per_block;
return to_be_lbn;
}
static int fetch_whole_block(struct dedup_config *dc,
- uint64_t pbn, struct page_list *pl)
+ uint64_t pbn, struct page_list *pl)
{
struct dm_io_request iorq;
struct dm_io_region where;
}
static int merge_data(struct dedup_config *dc, struct page *page,
- struct bio *bio)
+ struct bio *bio)
{
+ sector_t bi_sector = bio->bi_iter.bi_sector;
void *src_page_vaddr, *dest_page_vaddr;
int position, err = 0;
/* Relative offset in terms of sector size */
- position = (bio->bi_iter.bi_sector % dc->sectors_per_block);
+ position = sector_div(bi_sector, dc->sectors_per_block);
if (!page || !bio->bi_io_vec->bv_page) {
err = -EINVAL;
struct bio *prepare_bio_on_write(struct dedup_config *dc, struct bio *bio)
{
int r;
- uint64_t lbn_sector;
- uint64_t lbn;
+ sector_t lbn;
uint32_t vsize;
struct lbn_pbn_value lbnpbn_value;
struct bio *clone;
- lbn_sector = compute_sector(bio, dc);
- lbn = lbn_sector / dc->sectors_per_block;
+ lbn = compute_sector(bio, dc);
+ (void) sector_div(lbn, dc->sectors_per_block);
/* check for old or new lbn and fetch the appropriate pbn */
r = dc->kvs_lbn_pbn->kvs_lookup(dc->kvs_lbn_pbn, (void *)&lbn,
- sizeof(lbn), (void *)&lbnpbn_value, &vsize);
+ sizeof(lbn), (void *)&lbnpbn_value, &vsize);
if (r == 0)
clone = prepare_bio_without_pbn(dc, bio);
else if (r == 1)
- clone = prepare_bio_with_pbn(dc, bio, lbnpbn_value.pbn
- * dc->sectors_per_block);
+ clone = prepare_bio_with_pbn(dc, bio,
+ lbnpbn_value.pbn * dc->sectors_per_block);
else
BUG();
static uint64_t bio_lbn(struct dedup_config *dc, struct bio *bio)
{
- return bio->bi_iter.bi_sector / dc->sectors_per_block;
+ sector_t lbn = bio->bi_iter.bi_sector;
+
+ sector_div(lbn, dc->sectors_per_block);
+
+ return lbn;
}
-static void do_io(struct dedup_config *dc, struct bio *bio,
- uint64_t pbn)
+static void do_io(struct dedup_config *dc, struct bio *bio, uint64_t pbn)
{
int offset;
- offset = (sector_t) bio->bi_iter.bi_sector % dc->sectors_per_block;
+ offset = sector_div(bio->bi_iter.bi_sector, dc->sectors_per_block);
bio->bi_iter.bi_sector = (sector_t)pbn * dc->sectors_per_block + offset;
bio->bi_bdev = dc->data_dev->bdev;
void *iparam;
struct metadata *md = NULL;
- uint64_t data_size;
+ sector_t data_size;
int r;
int crypto_key_size;
dc->block_size = da.block_size;
dc->sectors_per_block = to_sector(da.block_size);
- dc->lblocks = ti->len / dc->sectors_per_block;
+ data_size = ti->len;
+ (void) sector_div(data_size, dc->sectors_per_block);
+ dc->lblocks = data_size;
- data_size = i_size_read(da.data_dev->bdev->bd_inode);
- dc->pblocks = data_size / da.block_size;
+ data_size = i_size_read(da.data_dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ (void) sector_div(data_size, dc->sectors_per_block);
+ dc->pblocks = data_size;
/* Meta-data backend specific part */
if (da.backend == BKND_INRAM) {
static int mark_and_sweep(struct dedup_config *dc)
{
int err = 0;
- uint64_t data_size = 0;
+ sector_t data_size = 0;
uint64_t bitmap_size = 0;
struct mark_and_sweep_data ms_data;
BUG_ON(!dc);
- data_size = i_size_read(dc->data_dev->bdev->bd_inode);
- bitmap_size = data_size / dc->block_size;
+ data_size = i_size_read(dc->data_dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ (void) sector_div(data_size, dc->sectors_per_block);
+ bitmap_size = data_size;
memset(&ms_data, 0, sizeof(struct mark_and_sweep_data));