Exemplo n.º 1
0
static struct block_place get_degraded_block(u64 block, int total_disks, int block_size, u64 device_length)
{
    struct block_place degraded_place;
    u64 block_pos;
    u64 empty_zone_offset;

    block_pos = block;

    // Let's count device number in empty zone
    degraded_place.device_number = sector_div(block_pos, (total_disks - 1));
    // Little fix
    if (degraded_place.device_number >= DEGRADED_DISK)
        degraded_place.device_number++;

    // Now let's count sector number in empty_zone.
    // First of all, we should count empty_zone_offset.
    empty_zone_offset = device_length;

    // p_blocks + e_block = 3.
    sector_div(empty_zone_offset, (total_disks - 3));
    // Now empty_zone_offset == one real disk capacity (including empty and parity)

    sector_div(empty_zone_offset, total_disks); // Almost ready.

    degraded_place.sector = empty_zone_offset + block_pos * block_size;

    return degraded_place;
}
Exemplo n.º 2
0
static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
							struct ppa_addr r)
{
	struct ppa_addr l;
	int secs, pgs, blks, luns;
	sector_t ppa = r.ppa;

	l.ppa = 0;

	div_u64_rem(ppa, dev->sec_per_pg, &secs);
	l.g.sec = secs;

	sector_div(ppa, dev->sec_per_pg);
	div_u64_rem(ppa, dev->pgs_per_blk, &pgs);
	l.g.pg = pgs;

	sector_div(ppa, dev->pgs_per_blk);
	div_u64_rem(ppa, dev->blks_per_lun, &blks);
	l.g.blk = blks;

	sector_div(ppa, dev->blks_per_lun);
	div_u64_rem(ppa, dev->luns_per_chnl, &luns);
	l.g.lun = luns;

	sector_div(ppa, dev->luns_per_chnl);
	l.g.ch = ppa;

	return l;
}
Exemplo n.º 3
0
static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
{
	sector_t size = gb * 1024 * 1024 * 1024ULL;
	sector_t blksize;
	struct nvm_id_group *grp;

	id->ver_id = 0x1;
	id->vmnt = 0;
	id->cgrps = 1;
	id->cap = 0x2;
	id->dom = 0x1;

	id->ppaf.blk_offset = 0;
	id->ppaf.blk_len = 16;
	id->ppaf.pg_offset = 16;
	id->ppaf.pg_len = 16;
	id->ppaf.sect_offset = 32;
	id->ppaf.sect_len = 8;
	id->ppaf.pln_offset = 40;
	id->ppaf.pln_len = 8;
	id->ppaf.lun_offset = 48;
	id->ppaf.lun_len = 8;
	id->ppaf.ch_offset = 56;
	id->ppaf.ch_len = 8;

	sector_div(size, bs); /* convert size to pages */
	size >>= 8; /* concert size to pgs pr blk */
	grp = &id->groups[0];
	grp->mtype = 0;
	grp->fmtype = 0;
	grp->num_ch = 1;
	grp->num_pg = 256;
	blksize = size;
	size >>= 16;
	grp->num_lun = size + 1;
	sector_div(blksize, grp->num_lun);
	grp->num_blk = blksize;
	grp->num_pln = 1;

	grp->fpg_sz = bs;
	grp->csecs = bs;
	grp->trdt = 25000;
	grp->trdm = 25000;
	grp->tprt = 500000;
	grp->tprm = 500000;
	grp->tbet = 1500000;
	grp->tbem = 1500000;
	grp->mpos = 0x010101; /* single plane rwe */
	grp->cpar = hw_queue_depth;

	return 0;
}
Exemplo n.º 4
0
/**
 *	ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
 *	@sdev: SCSI device for which BIOS geometry is to be determined
 *	@bdev: block device associated with @sdev
 *	@capacity: capacity of SCSI device
 *	@geom: location to which geometry will be output
 *
 *	Generic bios head/sector/cylinder calculator
 *	used by sd. Most BIOSes nowadays expect a XXX/255/16  (CHS)
 *	mapping. Some situations may arise where the disk is not
 *	bootable if this is not used.
 *
 *	LOCKING:
 *	Defined by the SCSI layer.  We don't really care.
 *
 *	RETURNS:
 *	Zero.
 */
int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
		       sector_t capacity, int geom[])
{
	geom[0] = 255;
	geom[1] = 63;
	sector_div(capacity, 255*63);
	geom[2] = capacity;

	return 0;
}
Exemplo n.º 5
0
int pt_getgeo(struct block_device * block_device, struct hd_geometry * hg)
{
	hg->heads = 255;
	hg->sectors = 63;

	hg->cylinders = get_capacity(block_device->bd_disk);
	sector_div(hg->cylinders, hg->heads * hg->sectors);

	return 0;
}
Exemplo n.º 6
0
Arquivo: raid0.c Projeto: mhei/linux
static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
{
    int i, c, err;
    sector_t curr_zone_end, sectors;
    struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
    struct strip_zone *zone;
    int cnt;
    char b[BDEVNAME_SIZE];
    char b2[BDEVNAME_SIZE];
    struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
    unsigned short blksize = 512;

    *private_conf = ERR_PTR(-ENOMEM);
    if (!conf)
        return -ENOMEM;
    rdev_for_each(rdev1, mddev) {
        pr_debug("md/raid0:%s: looking at %s\n",
                 mdname(mddev),
                 bdevname(rdev1->bdev, b));
        c = 0;

        /* round size to chunk_size */
        sectors = rdev1->sectors;
        sector_div(sectors, mddev->chunk_sectors);
        rdev1->sectors = sectors * mddev->chunk_sectors;

        blksize = max(blksize, queue_logical_block_size(
                          rdev1->bdev->bd_disk->queue));

        rdev_for_each(rdev2, mddev) {
            pr_debug("md/raid0:%s:   comparing %s(%llu)"
                     " with %s(%llu)\n",
                     mdname(mddev),
                     bdevname(rdev1->bdev,b),
                     (unsigned long long)rdev1->sectors,
                     bdevname(rdev2->bdev,b2),
                     (unsigned long long)rdev2->sectors);
            if (rdev2 == rdev1) {
                pr_debug("md/raid0:%s:   END\n",
                         mdname(mddev));
                break;
            }
            if (rdev2->sectors == rdev1->sectors) {
                /*
                 * Not unique, don't count it as a new
                 * group
                 */
                pr_debug("md/raid0:%s:   EQUAL\n",
                         mdname(mddev));
                c = 1;
                break;
            }
            pr_debug("md/raid0:%s:   NOT EQUAL\n",
                     mdname(mddev));
        }
Exemplo n.º 7
0
static struct recover_stripe raid6_recover(struct insane_c *ctx, u64 block, int device_number) {
    struct recover_stripe result;
   
    int block_place, counter, device, total_disks, chunk_size;

    u64 onotole;

    total_disks = raid6_alg.ndisks;
    chunk_size = ctx->chunk_size;

    // place of block in current stripe
    onotole = block + device_number;

    block_place = sector_div(onotole ,total_disks);

    // starting block
    onotole = block;
    device = sector_div(onotole, total_disks);
    if (device != 0)
        device = total_disks - device;
    else
        device = 0;

    counter = 0;
    // we should read (total_disks - 2) blocks to recover
    while (counter < total_disks - 2) {
        if (device != device_number) {
            result.read_sector[counter] = block * chunk_size;
            result.read_device[counter] = device;
            counter++;
        }
        device++;
        onotole = sector_div(device, total_disks);
	device = onotole;
    }

    result.write_device = device_number;
    result.write_sector = block * chunk_size;

    result.quantity = total_disks - 2;
    return result;
}
Exemplo n.º 8
0
static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
{
	struct nvm_tgt_dev *dev = pblk->dev;
	struct nvm_geo *geo = &dev->geo;
	sector_t provisioned;

	pblk->over_pct = 20;

	provisioned = nr_free_blks;
	provisioned *= (100 - pblk->over_pct);
	sector_div(provisioned, 100);

	/* Internally pblk manages all free blocks, but all calculations based
	 * on user capacity consider only provisioned blocks
	 */
	pblk->rl.total_blocks = nr_free_blks;
	pblk->rl.nr_secs = nr_free_blks * geo->sec_per_blk;
	pblk->capacity = provisioned * geo->sec_per_blk;
	atomic_set(&pblk->rl.free_blocks, nr_free_blks);
}
Exemplo n.º 9
0
/*
 * Check the amount of free space and suspend/resume accordingly.
 */
static int check_free_space(struct bsd_acct_struct *acct, struct file *file)
{
	struct kstatfs sbuf;
	int res;
	int act;
	sector_t resume;
	sector_t suspend;

	spin_lock(&acct_lock);
	res = acct->active;
	if (!file || !acct->needcheck)
		goto out;
	spin_unlock(&acct_lock);

	/* May block */
	if (vfs_statfs(file->f_path.dentry, &sbuf))
		return res;
	suspend = sbuf.f_blocks * SUSPEND;
	resume = sbuf.f_blocks * RESUME;

	sector_div(suspend, 100);
	sector_div(resume, 100);

	if (sbuf.f_bavail <= suspend)
		act = -1;
	else if (sbuf.f_bavail >= resume)
		act = 1;
	else
		act = 0;

	/*
	 * If some joker switched acct->file under us we'ld better be
	 * silent and _not_ touch anything.
	 */
	spin_lock(&acct_lock);
	if (file != acct->file) {
		if (act)
			res = act>0;
		goto out;
	}

	if (acct->active) {
		if (act < 0) {
			acct->active = 0;
			printk(KERN_INFO "Process accounting paused\n");
		}
	} else {
		if (act > 0) {
			acct->active = 1;
			printk(KERN_INFO "Process accounting resumed\n");
		}
	}

	del_timer(&acct->timer);
	acct->needcheck = 0;
	acct->timer.expires = jiffies + ACCT_TIMEOUT*HZ;
	add_timer(&acct->timer);
	res = acct->active;
out:
	spin_unlock(&acct_lock);
	return res;
}
Exemplo n.º 10
0
static int null_add_dev(void)
{
	struct gendisk *disk;
	struct nullb *nullb;
	sector_t size;
	int rv;

	nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
	if (!nullb) {
		rv = -ENOMEM;
		goto out;
	}

	spin_lock_init(&nullb->lock);

	if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
		submit_queues = nr_online_nodes;

	rv = setup_queues(nullb);
	if (rv)
		goto out_free_nullb;

	if (queue_mode == NULL_Q_MQ) {
		nullb->tag_set.ops = &null_mq_ops;
		nullb->tag_set.nr_hw_queues = submit_queues;
		nullb->tag_set.queue_depth = hw_queue_depth;
		nullb->tag_set.numa_node = home_node;
		nullb->tag_set.cmd_size	= sizeof(struct nullb_cmd);
		nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
		nullb->tag_set.driver_data = nullb;

		rv = blk_mq_alloc_tag_set(&nullb->tag_set);
		if (rv)
			goto out_cleanup_queues;

		nullb->q = blk_mq_init_queue(&nullb->tag_set);
		if (IS_ERR(nullb->q)) {
			rv = -ENOMEM;
			goto out_cleanup_tags;
		}
	} else if (queue_mode == NULL_Q_BIO) {
		nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
		if (!nullb->q) {
			rv = -ENOMEM;
			goto out_cleanup_queues;
		}
		blk_queue_make_request(nullb->q, null_queue_bio);
		rv = init_driver_queues(nullb);
		if (rv)
			goto out_cleanup_blk_queue;
	} else {
		nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
		if (!nullb->q) {
			rv = -ENOMEM;
			goto out_cleanup_queues;
		}
		blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
		blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
		rv = init_driver_queues(nullb);
		if (rv)
			goto out_cleanup_blk_queue;
	}

	nullb->q->queuedata = nullb;
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);

	disk = nullb->disk = alloc_disk_node(1, home_node);
	if (!disk) {
		rv = -ENOMEM;
		goto out_cleanup_blk_queue;
	}

	mutex_lock(&lock);
	list_add_tail(&nullb->list, &nullb_list);
	nullb->index = nullb_indexes++;
	mutex_unlock(&lock);

	blk_queue_logical_block_size(nullb->q, bs);
	blk_queue_physical_block_size(nullb->q, bs);

	size = gb * 1024 * 1024 * 1024ULL;
	sector_div(size, bs);
	set_capacity(disk, size);

	disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
	disk->major		= null_major;
	disk->first_minor	= nullb->index;
	disk->fops		= &null_fops;
	disk->private_data	= nullb;
	disk->queue		= nullb->q;
	sprintf(disk->disk_name, "nullb%d", nullb->index);
	add_disk(disk);
	return 0;

out_cleanup_blk_queue:
	blk_cleanup_queue(nullb->q);
out_cleanup_tags:
	if (queue_mode == NULL_Q_MQ)
		blk_mq_free_tag_set(&nullb->tag_set);
out_cleanup_queues:
	cleanup_queues(nullb);
out_free_nullb:
	kfree(nullb);
out:
	return rv;
}
Exemplo n.º 11
0
// Sector and device mapping callback
static struct parity_places algorithm_raid6(struct insane_c *ctx, u64 block, sector_t *sector, int *device_number)
{
	struct parity_places parity;

	u64 position;
	u64 i, Y;

	u64 data_block; // Data block number
	u64 local_gap;	// Parity blocks skipped in current lane
	u64 lane;
	u64 block_start, block_offset;

	int block_size;
	int total_disks;

	block_size = ctx->chunk_size;
	total_disks = raid6_alg.ndisks;

	data_block = *device_number + block * total_disks;

	lane = data_block;
	position = sector_div(lane, total_disks - raid6_alg.p_blocks); 

	i = lane;
	Y = sector_div(i, total_disks);

	local_gap = 2;

/* Now we have "square" of blocks. 
 * Position is horisontal coordinate, Y - vertical coordinate
 *
 * We would like to see something like this (D - data block, S - syndrome).
 *			_
 *	DDDDDSS  |
 *	DDDDSSD  |
 *	DDDSSDD  |
 *	DDSSDDD   > Square 1  
 *	DSSDDDD  |
 *	SSDDDDD  |
 *	SDDDDDS _|
 =	DDDDDSS  |
 *	DDDDSSD  |
 *	DDDSSDD  |
 *	DDSSDDD   > Square 2
 *	DSSDDDD  |
 *	SSDDDDD  |
 *	SDDDDDS _|
 *
 *
 * Local gap - how many syndromes we should skip in current stripe.
 * For example, in this masterpiece scheme local gap equals: 
 *	0 in the top left corner; 
 *	1 in the last stripe; 
 *	2 in all other positions.
 *
 * So, local_gap_scheme looks like:
 *	
 *	0000022
 *	0000222
 *	0002222
 *	0022222
 *	0222222
 *	2222222
 *	1111111
 *
 * Default local gap equals 2. 
 * At first, we will decrease local gap in the last stripe (Inner clause)
 * Secondly, we will decrease local gap in the top left corner (Outer clause)
 */

	// If we are in last stripe in square then we skip 1 syndrom in current lane
	if( Y == (total_disks - 1) )
		local_gap = 1;

	// If we didn't cross square diagonal then we don't skip syndromes in
	// current lane
	if( position + Y < (total_disks - 2) )
		local_gap = 0;

	// Remap block accounting all gaps
	position = data_block + local_gap + (2 * lane);

	// Remap device number
	*device_number = sector_div(position, total_disks);

	// For sequential writing: let's check number of current block
		parity.last_block = false;
	if (ctx->io_pattern == SEQUENTIAL)
	{
		if (*device_number + (2 - local_gap) == (total_disks - 1))
			parity.last_block = true;
	}

	// Get offset in block and remap sector
	block_offset = sector_div(*sector, block_size);
	block_start = position * block_size;
	*sector = block_start + block_offset;

	// Now it's time to count, where our syndromes are.
	parity.start_device = 0;
	parity.start_sector = block_start;
	
	// Parities have same sector, different devices
	parity.sector_number[0] = block_start;
	parity.sector_number[1] = block_start;

	parity.device_number[1] = total_disks - 1 - Y;
	if( Y < total_disks - 1 )
		parity.device_number[0] = parity.device_number[1] - 1;
	 else
		parity.device_number[0] = total_disks - 1;

	parity.device_number[2] = -1;
	return parity;
}
Exemplo n.º 12
0
int nbdx_register_block_device(struct nbdx_file *nbdx_file)
{
	sector_t size = nbdx_file->stbuf.st_size;
	int page_size = PAGE_SIZE;
	int err = 0;

	pr_debug("%s called\n", __func__);

	nbdx_file->major = nbdx_major;

#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
	nbdx_mq_reg.nr_hw_queues = submit_queues;

	nbdx_file->queue = blk_mq_init_queue(&nbdx_mq_reg, nbdx_file);
#else
	nbdx_file->tag_set.ops = &nbdx_mq_ops;
	nbdx_file->tag_set.nr_hw_queues = submit_queues;
	nbdx_file->tag_set.queue_depth = NBDX_QUEUE_DEPTH;
	nbdx_file->tag_set.numa_node = NUMA_NO_NODE;
	nbdx_file->tag_set.cmd_size	= sizeof(struct raio_io_u);
	nbdx_file->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
	nbdx_file->tag_set.driver_data = nbdx_file;

	err = blk_mq_alloc_tag_set(&nbdx_file->tag_set);
	if (err)
		goto out;

	nbdx_file->queue = blk_mq_init_queue(&nbdx_file->tag_set);
#endif
	if (IS_ERR(nbdx_file->queue)) {
		pr_err("%s: Failed to allocate blk queue ret=%ld\n",
		       __func__, PTR_ERR(nbdx_file->queue));
		err = PTR_ERR(nbdx_file->queue);
		goto blk_mq_init;
	}

	nbdx_file->queue->queuedata = nbdx_file;
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nbdx_file->queue);
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nbdx_file->queue);

	nbdx_file->disk = alloc_disk_node(1, NUMA_NO_NODE);
	if (!nbdx_file->disk) {
		pr_err("%s: Failed to allocate disk node\n", __func__);
		err = -ENOMEM;
		goto alloc_disk;
	}

	nbdx_file->disk->major = nbdx_file->major;
	nbdx_file->disk->first_minor = nbdx_file->index;
	nbdx_file->disk->fops = &nbdx_ops;
	nbdx_file->disk->queue = nbdx_file->queue;
	nbdx_file->disk->private_data = nbdx_file;
	blk_queue_logical_block_size(nbdx_file->queue, NBDX_SECT_SIZE);
	blk_queue_physical_block_size(nbdx_file->queue, NBDX_SECT_SIZE);
	sector_div(page_size, NBDX_SECT_SIZE);
	blk_queue_max_hw_sectors(nbdx_file->queue, page_size * MAX_SGL_LEN);
	sector_div(size, NBDX_SECT_SIZE);
	set_capacity(nbdx_file->disk, size);
	sscanf(nbdx_file->dev_name, "%s", nbdx_file->disk->disk_name);
	add_disk(nbdx_file->disk);
	goto out;

alloc_disk:
	blk_cleanup_queue(nbdx_file->queue);
blk_mq_init:
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
	blk_mq_free_tag_set(&nbdx_file->tag_set);
#endif
out:
	return err;
}
static dm_oblock_t to_hblock(struct smq_policy *mq, dm_oblock_t b)
{
	sector_t r = from_oblock(b);
	(void) sector_div(r, mq->cache_blocks_per_hotspot_block);
	return to_oblock(r);
}
Exemplo n.º 14
0
static int null_add_dev(void)
{
	struct gendisk *disk;
	struct nullb *nullb;
	sector_t size;

	nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
	if (!nullb)
		return -ENOMEM;

	spin_lock_init(&nullb->lock);

	if (setup_queues(nullb))
		goto err;

	if (queue_mode == NULL_Q_MQ) {
		null_mq_reg.numa_node = home_node;
		null_mq_reg.queue_depth = hw_queue_depth;

		if (use_per_node_hctx) {
			null_mq_reg.ops->alloc_hctx = null_alloc_hctx;
			null_mq_reg.ops->free_hctx = null_free_hctx;

			null_mq_reg.nr_hw_queues = nr_online_nodes;
		} else {
			null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue;
			null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue;

			null_mq_reg.nr_hw_queues = submit_queues;
		}

		nullb->q = blk_mq_init_queue(&null_mq_reg, nullb);
	} else if (queue_mode == NULL_Q_BIO) {
		nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
		blk_queue_make_request(nullb->q, null_queue_bio);
	} else {
		nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
		blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
		if (nullb->q)
			blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
	}

	if (!nullb->q)
		goto queue_fail;

	nullb->q->queuedata = nullb;
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);

	disk = nullb->disk = alloc_disk_node(1, home_node);
	if (!disk) {
queue_fail:
		if (queue_mode == NULL_Q_MQ)
			blk_mq_free_queue(nullb->q);
		else
			blk_cleanup_queue(nullb->q);
		cleanup_queues(nullb);
err:
		kfree(nullb);
		return -ENOMEM;
	}

	mutex_lock(&lock);
	list_add_tail(&nullb->list, &nullb_list);
	nullb->index = nullb_indexes++;
	mutex_unlock(&lock);

	blk_queue_logical_block_size(nullb->q, bs);
	blk_queue_physical_block_size(nullb->q, bs);

	size = gb * 1024 * 1024 * 1024ULL;
	sector_div(size, bs);
	set_capacity(disk, size);

	disk->flags |= GENHD_FL_EXT_DEVT;
	disk->major		= null_major;
	disk->first_minor	= nullb->index;
	disk->fops		= &null_fops;
	disk->private_data	= nullb;
	disk->queue		= nullb->q;
	sprintf(disk->disk_name, "nullb%d", nullb->index);
	add_disk(disk);
	return 0;
}
Exemplo n.º 15
0
/*
 * Algorithm of RAID6E with degraded drive
 */
static struct parity_places algorithm_raid6e( struct insane_c *ctx, u64 block, sector_t *sector, int *device_number)
{
    struct parity_places parity;
    struct block_place degraded_place;

    u64 i, Y;
    u64 position;
    u64 local_gap;

    u64 data_block;
    u64 lane;
    u64 block_offset, block_start;

    int block_size;
    int total_disks;
    sector_t device_length;


    block_size = ctx->chunk_size;
    total_disks = raid6e_alg.ndisks;
    device_length = ctx->ti->len;

    data_block = *device_number + block * total_disks;
    lane = data_block;

    // NORMAL SITUATION
    // Everything like in RAID 6
    position = sector_div(lane, total_disks - raid6e_alg.p_blocks);
    i = lane;
    Y = sector_div(i, total_disks);

    local_gap = 2;

    // If we are in last stripe in square then we skip 1 syndrome in current lane
    if (Y == (total_disks - 1))
        local_gap = 1;

    // If we didn't cross square diagonal then we don't skip syndromes in
    // current lane
    if (position + Y < (total_disks - raid6e_alg.p_blocks))
        local_gap = 0;

    // Remap block accounting all gaps
    position = data_block + local_gap + (raid6e_alg.p_blocks * lane);

    // Remap device_number
    *device_number = sector_div(position, total_disks);

    // For sequential writing: let's check number of current block
    parity.last_block = false;
    if (ctx->io_pattern == SEQUENTIAL)
    {
        if (*device_number + (2 - local_gap) == (total_disks - 1))
            parity.last_block = true;
    }

    // Get offset in block and remap sector
    block_offset = sector_div(*sector, block_size);
    block_start = position * block_size;
    *sector = position * block_size + i;

    // Now it's time to count, where our syndromes are

    parity.start_device = 0;
    parity.start_sector = block_start;

    parity.sector_number[0] = block_start;
    parity.sector_number[1] = block_start;

    parity.device_number[1] = total_disks - 1 - Y;

    if (Y < total_disks - 1)
        parity.device_number[0] = parity.device_number[1] - 1;
    else
        parity.device_number[0] = total_disks - 1;

    if (*device_number == DEGRADED_DISK) {
        degraded_place = get_degraded_block(position, total_disks, block_size, device_length);
        *device_number = degraded_place.device_number;
        *sector = degraded_place.sector + i;
    }
    else if (parity.device_number[0] == DEGRADED_DISK) {
        degraded_place = get_degraded_block(position, total_disks, block_size, device_length);
        parity.device_number[0] = degraded_place.device_number;
        parity.sector_number[0] = degraded_place.sector;
    }
    else if (parity.device_number[1] == DEGRADED_DISK) {
        degraded_place = get_degraded_block(position, total_disks, block_size, device_length);
        parity.device_number[1] = degraded_place.device_number;
        parity.sector_number[1] = degraded_place.sector;
    }

    parity.device_number[2] = -1;
    return parity;
}
Exemplo n.º 16
0
/*
 * Construct a striped mapping.
 * <number of stripes> <chunk size> [<dev_path> <offset>]+
 */
static int vm_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
	struct vm_c *vc;
	sector_t width, tmp_len;
	uint32_t vms;
	uint32_t chunk_size;
	int r;
	unsigned long long i;

	if (argc < 2) {
		ti->error = "Not enough arguments";
		return -EINVAL;
	}

	if (kstrtouint(argv[0], 10, &vms) || !vms) {
		ti->error = "Invalid stripe count";
		return -EINVAL;
	}

	if (kstrtouint(argv[1], 10, &chunk_size) || !chunk_size) {
		ti->error = "Invalid chunk_size";
		return -EINVAL;
	}

	width = ti->len;
	if (sector_div(width, vms)) {
		ti->error = "Target length not divisible by "
		    "number of stripes";
		return -EINVAL;
	}

	tmp_len = width;
	if (sector_div(tmp_len, chunk_size)) {
		ti->error = "Target length not divisible by "
		    "chunk size";
		return -EINVAL;
	}

	/*
	 * Do we have enough arguments for that many stripes ?
	 */
	if (argc != (2 + 2 * vms)) {
		ti->error = "Not enough destinations "
			"specified";
		return -EINVAL;
	}

	vc = alloc_context(vms);
	if (!vc) {
		ti->error = "Memory allocation for striped context "
		    "failed";
		return -ENOMEM;
	}

	INIT_WORK(&vc->trigger_event, trigger_event);

	/* Set pointer to dm target; used in trigger_event */
	vc->ti = ti;
	vc->vms = vms;
	vc->vm_width = width;

	if (vms & (vms - 1))
		vc->vms_shift = -1;
	else
		vc->vms_shift = __ffs(vms);

	r = dm_set_target_max_io_len(ti, chunk_size);
	if (r) {
		kfree(vc);
		return r;
	}

	ti->num_flush_bios = vms;
	ti->num_discard_bios = vms;
	ti->num_write_same_bios = vms;

	vc->chunk_size = chunk_size;
	if (chunk_size & (chunk_size - 1))
		vc->chunk_size_shift = -1;
	else
		vc->chunk_size_shift = __ffs(chunk_size);

	/*
	 * Get the stripe destinations.
	 */
	for (i = 0; i < vms; i++) {
		argv += 2;

		r = get_vm(ti, vc, i, argv);
		if (r < 0) {
			ti->error = "Couldn't parse stripe destination";
			while (i--)
				dm_put_device(ti, vc->vm[i].dev);
			kfree(vc);
			return r;
		}
		atomic_set(&(vc->vm[i].error_count), 0);
	}

	/*volume manager initialize*/
	vc->wp = 0;//////current 0 is NVMe
	//vc->wp = 1;
	vc->ws = kmalloc(sizeof(unsigned long long) * vc->vms, GFP_KERNEL);
	for(i = 0; i<vc->vms; i++)
		vc->ws[i] = 0;
	vc->gp_list = kmalloc(sizeof(char) * vc->vms, GFP_KERNEL);
	vc->num_gp = 0;
	vc->io_client = dm_io_client_create();
	vc->gs = NULL;
	vc->overload = 0;
	for(i=0; i<vc->vms; i++)
		vc->gp_list[i] = Clean_Weight;//0 is clean
	{
		unsigned long long tem, disk_size;
		
		tem = 0;
		for(i = 0; i<vms; i++){
			struct block_device *cur_bdev = vc->vm[i].dev->bdev;
			vc->vm[i].end_sector = i_size_read(cur_bdev->bd_inode)>>9;//unit of sector
			printk("vm%llu start_sector %llu, end_sector %llu, target_offset %llu\n",
					i, (unsigned long long) vc->vm[i].physical_start, (unsigned long long) vc->vm[i].end_sector, (unsigned long long)dm_target_offset(ti, vc->ws[i]));
			disk_size = vc->vm[i].end_sector * 512;
			do_div(disk_size, (unsigned long long) vc->vm[i].dev->bdev->bd_block_size);
			tem += disk_size;
		}
		vc->num_entry = tem;//num entry is blk num
	}
	printk("num entry is %llu, node size is %lu, req mem is %llu\n", vc->num_entry, sizeof(struct flag_nodes), sizeof(struct flag_nodes) * vc->num_entry);
	
	//flag set initialize
	vc->fs = (struct flag_set *) kmalloc(sizeof(struct flag_set), GFP_KERNEL);
	vc->fs->node_buf = kmem_cache_create("dirty_data_buf", sizeof(struct flag_nodes),
			0, (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD), NULL);

	vc->fs->table = (struct flag_nodes **)vmalloc(sizeof(struct flag_nodes*) * vc->num_entry);
	for(i=0; i<vc->num_entry; i++){
		//vc->fs->table[i] = NULL;//late alloc code
		vc->fs->table[i] = kmem_cache_alloc(vc->fs->node_buf, GFP_KERNEL);//pre alloc start
		vc->fs->table[i]->msector = -1;
		vc->fs->table[i]->wp = -1;//pre alloc end
	}
	vc->num_map_block = 0;//vc->num_entry * sizeof(struct flag_nodes) / 4096;
	//vc->ws[0] += vc->num_map_block;

	vc->fs->reverse_table = vmalloc(sizeof(struct reverse_nodes*) * vc->vms);
	vc->d_num = kmalloc(sizeof(unsigned long long) * vc->vms, GFP_KERNEL);
	for(i=0; i<vc->vms; i++){
		unsigned long long j;
		unsigned long long r_table_size = (vc->vm[i].end_sector + 7);
		unsigned long long phy_sect = vc->vm[i].physical_start;
		do_div(phy_sect, 8);
		do_div(r_table_size, 8);
		printk("r_table_size = %llu\n", r_table_size);
		vc->vm[i].num_dirty = r_table_size - phy_sect;
		vc->d_num[i] = vc->vm[i].num_dirty;
		vc->fs->reverse_table[i] = vmalloc(sizeof(struct reverse_nodes) * r_table_size);
		for(j=0; j<r_table_size; j++){
			vc->fs->reverse_table[i][j].index = -1;
			vc->fs->reverse_table[i][j].dirty = 1;
			vc->fs->reverse_table[i][j].size = -1;
		}
		//printk("%u's first ptr is %p, final ptr is %p\n", i, &(vc->fs->reverse_table[i][0]), &(vc->fs->reverse_table[i][j]));
	}

	for(i=0; i<vc->vms; i++){
		unsigned int minor = atom(vc->vm[i].dev->name);
		unsigned int major = atoj(vc->vm[i].dev->name);

		printk("dev name is %s\t", vc->vm[i].dev->name);
		if(major != 2600) vc->vm[i].main_dev = minor >> minor_shift;
		else vc->vm[i].main_dev = minor - 1;
		vc->vm[i].maj_dev = major;
		printk("main %u, maj %u\n", vc->vm[i].main_dev, vc->vm[i].maj_dev);
	}