Пример #1
0
static void cf_request(struct request_queue *q)
{
	struct cf_device *cf;
	struct request *req;
	unsigned block, count;
	int rw, err;

	DPRINTK(DEBUG_CF_REQUEST, "%s: q %p", __FUNCTION__, q);

	req = blk_fetch_request(q);
	while (req) {
		err = -EIO;
		DPRINTK(DEBUG_CF_REQUEST, "%s:%d req %p", __FUNCTION__, __LINE__, req);

		if (!blk_fs_request(req))
			goto done;

		

		block = blk_rq_pos(req);
		count = blk_rq_cur_sectors(req);
		rw = rq_data_dir(req);
		cf = req->rq_disk->private_data;

		DPRINTK(DEBUG_CF_REQUEST, "req %p block %d count %d rw %c\n", req, block, count, (rw == READ)?'R':'W');

		if (block+count > get_capacity(req->rq_disk)) {
			printk("%s: %u+%u is larger than %llu\n", __FUNCTION__, block, count, get_capacity(req->rq_disk));
			goto done;
		}

		/* Grab the R/W semaphore to prevent more than
		 * one request from trying to R/W at the same time */
		err = down_interruptible(&cf->rw_sem);
		if (err)
			break;

		if (rw == READ)
			err = cf_read_sectors(cf, req->buffer, block, count);
		else
			err = cf_write_sectors(cf, req->buffer, block, count);
		up(&cf->rw_sem);

	done:
		DPRINTK(DEBUG_CF_REQUEST, "%s: blk_end_request_cur(%p, %d)\n", __FUNCTION__, req, err);
		if (!__blk_end_request_cur(req, err))
			req = blk_fetch_request(q);
	}
	DPRINTK(DEBUG_CF_REQUEST, "end\n");
	cf_in_request--;
}
Пример #2
0
/*
 * The driver enables interrupts as much as possible.  In order to do this,
 * (a) the device-interrupt is disabled before entering hd_request(),
 * and (b) the timeout-interrupt is disabled before the sti().
 *
 * Interrupts are still masked (by default) whenever we are exchanging
 * data/cmds with a drive, because some drives seem to have very poor
 * tolerance for latency during I/O. The IDE driver has support to unmask
 * interrupts for non-broken hardware, so use that driver if required.
 */
static void hd_request(void)
{
	unsigned int block, nsect, sec, track, head, cyl;
	struct hd_i_struct *disk;
	struct request *req;

	if (do_hd)
		return;
repeat:
	del_timer(&device_timer);

	if (!hd_req) {
		hd_req = blk_fetch_request(hd_queue);
		if (!hd_req) {
			do_hd = NULL;
			return;
		}
	}
	req = hd_req;

	if (reset) {
		reset_hd();
		return;
	}
	disk = req->rq_disk->private_data;
	block = blk_rq_pos(req);
	nsect = blk_rq_sectors(req);
	if (block >= get_capacity(req->rq_disk) ||
	    ((block+nsect) > get_capacity(req->rq_disk))) {
		printk("%s: bad access: block=%d, count=%d\n",
			req->rq_disk->disk_name, block, nsect);
		hd_end_request_cur(-EIO);
		goto repeat;
	}

	if (disk->special_op) {
		if (do_special_op(disk, req))
			goto repeat;
		return;
	}
	sec   = block % disk->sect + 1;
	track = block / disk->sect;
	head  = track % disk->head;
	cyl   = track / disk->head;
#ifdef DEBUG
	printk("%s: %sing: CHS=%d/%d/%d, sectors=%d, buffer=%p\n",
		req->rq_disk->disk_name,
		req_data_dir(req) == READ ? "read" : "writ",
		cyl, head, sec, nsect, req->buffer);
#endif
<<<<<<< HEAD
Пример #3
0
static int mmc_blk_prep_rq(struct mmc_queue *mq, struct request *req)
{
	struct mmc_blk_data *md = mq->data;
	int stat = BLKPREP_OK;

	/*
	 * If we have no device, we haven't finished initialising.
	 */
	if (!md || !mq->card) {
		printk(KERN_ERR "%s: killing request - no device/host\n",
		       req->rq_disk->disk_name);
		stat = BLKPREP_KILL;
	}

	if (md->suspended) {
		blk_plug_device(md->queue.queue);
		stat = BLKPREP_DEFER;
	}

	/*
	 * Check for excessive requests.
	 */
	if (req->sector + req->nr_sectors > get_capacity(req->rq_disk)) {
		printk("bad request size\n");
		stat = BLKPREP_KILL;
	}

	return stat;
}
Пример #4
0
wbfs_t *wbfs_try_open_partition(char *partitionLetter, int reset)
{
	HANDLE *handle;
	char drivePath[8] = "\\\\?\\Z:";
	
	u32 sector_size, sector_count;
	
	if (strlen(partitionLetter) != 1)
	{
		wbfs_error("bad drive name");
		return NULL;
	}

	drivePath[4] = partitionLetter[0];
	
	if (!get_capacity(drivePath, &sector_size, &sector_count))
	{
		return NULL;
	}
	
	handle = CreateFile(drivePath, GENERIC_READ | GENERIC_WRITE, 0, NULL, OPEN_EXISTING, FILE_FLAG_NO_BUFFERING, NULL);
	
	if (handle == INVALID_HANDLE_VALUE)
	{
		return NULL;
	}
	
	return wbfs_open_partition(read_sector, write_sector, close_handle, handle, sector_size, sector_count, 0, reset);
}
Пример #5
0
static int card_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
	geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
	geo->heads = 4;
	geo->sectors = 16;
	return 0;
}
/**ltl
 * 功能
 * 参数
 * 返回值
 * 说明:此函数不能调用blk_end_request_all函数,因为blk_end_request_all持有请求队列锁,用的话就会导致死锁。
 *      Q:为什么调用blk_end_request_all函数就会发生死锁呢?
 */
static void mem_block_requeut_fn(struct request_queue* q)
{
	struct request* req = NULL;
	while(NULL != (req = blk_fetch_request(q)))//
	{
		if(blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(req->rq_disk))
		{
			__blk_end_request_all(req,-EIO); /* 不能被blk_end_request_all替换 */
			continue;
		}

		switch(rq_data_dir(req))
		{
			case READ:
                {
				    memcpy(req->buffer,g_mem_buf + (blk_rq_pos(req) << 9),blk_rq_cur_bytes(req));
				    __blk_end_request_all(req,0); /* 不能被blk_end_request_all替换 */
				    break;
                }
			case WRITE:
                {
				    memcpy(g_mem_buf + (blk_rq_pos(req) << 9),req->buffer,blk_rq_cur_bytes(req));
				    __blk_end_request_all(req,0); /* 不能被blk_end_request_all替换 */
				    break;
                }
			default:
				__blk_end_request_all(req,-EIO); /* 不能被blk_end_request_all替换 */
				break;
		}
	}
  //  BLK_PLog("req:0x%p",req);
}
static void mem_block_requeut_fn(struct request_queue* q)
{
    struct request* req = NULL;
   while(NULL != (req = elv_next_request(q))) 
   {
       if(req -> sector + req -> current_nr_sectors > get_capacity(req->rq_disk))
       {
           end_request(req,0);
          // return 0;
          LogPath();
          continue;
       }
  //  Log("sector:%d,current_nr_sectors:%d",req->sector,req->current_nr_sectors);
       switch(rq_data_dir(req))
       {
           case READ:
           {
               memcpy(req->buffer,g_mem_buf + (req->sector << 9),req->current_nr_sectors << 9);
               end_request(req,1);
               break;
           }
           case WRITE:
           {
               memcpy(g_mem_buf + (req->sector << 9), req->buffer,req->current_nr_sectors << 9);
               end_request(req,1);
                break;
           }
           default:
                Log("[Error] Unknown request...");
				break;
             //   return 0;
       }
   }
}
Пример #8
0
/**
 * Resize disk.
 *
 * @gd disk.
 * @new_size new size [logical block].
 *
 * RETURN:
 *   true in success, or false.
 */
bool resize_disk(struct gendisk *gd, u64 new_size)
{
	struct block_device *bdev;
	u64 old_size;

	ASSERT(gd);

	old_size = get_capacity(gd);
	if (old_size == new_size) {
		return true;
	}
	set_capacity(gd, new_size);

	bdev = bdget_disk(gd, 0);
	if (!bdev) {
		LOGe("bdget_disk failed.\n");
		return false;
	}
	mutex_lock(&bdev->bd_mutex);
	if (old_size > new_size) {
		LOGn("Shrink disk should discard block cache.\n");
		check_disk_size_change(gd, bdev);
		/* This should be implemented in check_disk_size_change(). */
		bdev->bd_invalidated = 0;
	} else {
		i_size_write(bdev->bd_inode,
			(loff_t)new_size * LOGICAL_BLOCK_SIZE);
	}
	mutex_unlock(&bdev->bd_mutex);
	bdput(bdev);
	return true;
}
Пример #9
0
// after a raw write user can set NEW_YS size
// if you know the size before the write use assign()
void input_buffer::add_size(uint i) 
{ 
    if (error_ == 0 && check(size_ + i-1, get_capacity()) == 0)
        size_ += i;
    else
        error_ = -1;
}
bool ConcurrentCircularBuffer::bl_consume(uint size, char* dest, CommandInitiator* curr_thread) 
{
  uint  capacity_mask = m_capacity_mask;
  uint  capacity      = get_capacity();
  uint  chunk;
  while (size > 0) {
    chunk = size;
    if (chunk > capacity) chunk = capacity;
    uint  rd = m_rd_ptr;
    m_rd_wr_lock.lock();
    if (m_size < chunk) {
      m_blocked_consumer = curr_thread;
      m_waited_bytes = chunk;
      m_rd_wr_lock.unlock();
      if (!curr_thread->yield()) return false;
      m_rd_wr_lock.lock();
    }
    copy_to(dest, rd, chunk, capacity);
    m_size -= chunk;
    dest   += chunk;
    m_rd_ptr = (rd+chunk) & capacity_mask; // single reader ensured:no need to protect...
    size -= chunk;
    m_rd_wr_lock.unlock();
  }
  return true;
}
Пример #11
0
static int blk_report_zones(struct gendisk *disk, sector_t sector,
			    struct blk_zone *zones, unsigned int *nr_zones,
			    gfp_t gfp_mask)
{
	struct request_queue *q = disk->queue;
	unsigned int z = 0, n, nrz = *nr_zones;
	sector_t capacity = get_capacity(disk);
	int ret;

	while (z < nrz && sector < capacity) {
		n = nrz - z;
		ret = disk->fops->report_zones(disk, sector, &zones[z], &n,
					       gfp_mask);
		if (ret)
			return ret;
		if (!n)
			break;
		sector += blk_queue_zone_sectors(q) * n;
		z += n;
	}

	WARN_ON(z > *nr_zones);
	*nr_zones = z;

	return 0;
}
Пример #12
0
static int cyasblkdev_blk_ioctl(
			struct block_device *bdev,
			fmode_t mode,
			unsigned int cmd,
			unsigned long arg
			)
{
	DBGPRN_FUNC_NAME;

	if (cmd == HDIO_GETGEO) {
		/*for now  we only process geometry IOCTL*/
		struct hd_geometry geo;

		memset(&geo, 0, sizeof(struct hd_geometry));

		geo.cylinders	= get_capacity(bdev->bd_disk) / (4 * 16);
		geo.heads	= 4;
		geo.sectors	= 16;
		geo.start	= get_start_sect(bdev);

		/* copy to user space */
		return copy_to_user((void __user *)arg, &geo, sizeof(geo))
			? -EFAULT : 0;
	}

	return -ENOTTY;
}
Пример #13
0
static int __bind(struct mapped_device *md, struct dm_table *t)
{
	request_queue_t *q = md->queue;
	sector_t size;

	size = dm_table_get_size(t);

	/*
	 * Wipe any geometry if the size of the table changed.
	 */
	if (size != get_capacity(md->disk))
		memset(&md->geometry, 0, sizeof(md->geometry));

	if (md->suspended_bdev)
		__set_size(md, size);
	if (size == 0)
		return 0;

	dm_table_get(t);
	dm_table_event_callback(t, event_callback, md);

	write_lock(&md->map_lock);
	md->map = t;
	dm_table_set_restrictions(t, q);
	write_unlock(&md->map_lock);

	return 0;
}
Пример #14
0
static int partinfo_show_proc(struct seq_file *m, void *v)
{
    dev_t devt;
    int partno;
    struct disk_part_iter piter;
    struct gendisk *disk;
    struct hd_struct *part; 
    u64 last = 0;

    devt = blk_lookup_devt("mmcblk0", 0);
    disk = get_gendisk(devt, &partno), 

    seq_printf(m, "%-16s %-16s\t%-16s\n", "Name", "Start", "Size");

    if (!disk || get_capacity(disk) == 0)
        return 0;

	disk_part_iter_init(&piter, disk, 0);
    seq_printf(m, "%-16s 0x%016llx\t0x%016llx\n", "pgpt", 0ULL, 512 * 1024ULL);

    while ((part = disk_part_iter_next(&piter))) {
        seq_printf(m, "%-16s 0x%016llx\t0x%016llx\n", 
            part->info ? (char *)(part->info->volname) : "unknown",
            (u64)part->start_sect * 512,
            (u64)part->nr_sects * 512);
        last = (part->start_sect + part->nr_sects) * 512;
	}

    seq_printf(m, "%-16s 0x%016llx\t0x%016llx\n", "sgpt", last, 512 * 1024ULL);
	disk_part_iter_exit(&piter);

	return 0;
}
Пример #15
0
static int show_partition(struct seq_file *part, void *v)
{
	struct gendisk *sgp = v;
	int n;
	char buf[BDEVNAME_SIZE];

	if (&sgp->kobj.entry == block_subsys.kset.list.next)
		seq_puts(part, "major minor  #blocks  name\n\n");

	/* Don't show non-partitionable removeable devices or empty devices */
	if (!get_capacity(sgp) ||
			(sgp->minors == 1 && (sgp->flags & GENHD_FL_REMOVABLE)))
		return 0;
	if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)
		return 0;

	/* show the full disk and all non-0 size partitions of it */
	seq_printf(part, "%4d  %4d %10llu %s\n",
		sgp->major, sgp->first_minor,
		(unsigned long long)get_capacity(sgp) >> 1,
		disk_name(sgp, 0, buf));
	for (n = 0; n < sgp->minors - 1; n++) {
		if (!sgp->part[n])
			continue;
		if (sgp->part[n]->nr_sects == 0)
			continue;
		seq_printf(part, "%4d  %4d %10llu %s\n",
			sgp->major, n + 1 + sgp->first_minor,
			(unsigned long long)sgp->part[n]->nr_sects >> 1 ,
			disk_name(sgp, n + 1, buf));
	}

	return 0;
}
Пример #16
0
static ssize_t upgrade_proc_read(struct file *file, char __user *buf, 
            size_t count, loff_t *ppos)
{
    dev_t devt;
    int partno;
    struct gendisk *disk;

    struct partition_package *package;
    int len; 
    size_t ret;

    devt = blk_lookup_devt("mmcblk0", 0);
    disk = get_gendisk(devt, &partno);

    if (!disk || get_capacity(disk) == 0)
        return 0;

    package = alloc_partition_package(disk, &len);
    if (!package) {
        ret = -ENOMEM;
        part_err("upgrade_proc_read: fail to malloc package\n");
        goto fail_malloc;
    }

    get_partition_package(disk, package);

    ret = simple_read_from_buffer(buf, count, ppos, package, len);

    kfree(package);

fail_malloc:
    return ret;
}
Пример #17
0
static int show_partition(struct seq_file *part, void *v)
{
	struct gendisk *sgp = v;
	int n;
	char buf[64];

	if (&sgp->kobj.entry == block_subsys.kset.list.next)
		seq_puts(part, "major minor  #blocks  name\n\n");

	/* Don't show non-partitionable devices or empty devices */
	if (!get_capacity(sgp) || sgp->minors == 1)
		return 0;

	/* show the full disk and all non-0 size partitions of it */
	seq_printf(part, "%4d  %4d %10llu %s\n",
		sgp->major, sgp->first_minor,
		(unsigned long long)get_capacity(sgp) >> 1,
		disk_name(sgp, 0, buf));
	for (n = 0; n < sgp->minors - 1; n++) {
		if (sgp->part[n].nr_sects == 0)
			continue;
		seq_printf(part, "%4d  %4d %10llu %s\n",
			sgp->major, n + 1 + sgp->first_minor,
			(unsigned long long)sgp->part[n].nr_sects >> 1 ,
			disk_name(sgp, n + 1, buf));
	}

	return 0;
}
Пример #18
0
static int micro_batt_get_property(struct power_supply *b,
                                   enum power_supply_property psp,
                                   union power_supply_propval *val)
{
	switch (psp) {
	case POWER_SUPPLY_PROP_STATUS:
		val->intval = get_status(b);
		break;
	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
		val->intval = 4700000;
		break;
	case POWER_SUPPLY_PROP_CAPACITY:
		val->intval = get_capacity(b);
		break;
	case POWER_SUPPLY_PROP_TEMP:
		val->intval = micro_battery.temperature;
		break;
	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
		val->intval = micro_battery.voltage;
		break;
	default:
		return -EINVAL;
	};

	return 0;
}
Пример #19
0
/*
 * Loop through every CONFIG_MMC_BLOCK_MINORS'th minor device for
 * MMC_BLOCK_MAJOR, get the struct gendisk for each device. Returns
 * nr of found disks. Populate mmc_disks.
 */
static int scan_mmc_devices(struct gendisk *mmc_disks[])
{
	dev_t devnr;
	int i, j = 0, part;
	struct gendisk *mmc_devices[256 / CONFIG_MMC_BLOCK_MINORS];

	memset(&mmc_devices, 0, sizeof(mmc_devices));

	for (i = 0; i * CONFIG_MMC_BLOCK_MINORS < 256; i++) {
		devnr = MKDEV(MMC_BLOCK_MAJOR, i * CONFIG_MMC_BLOCK_MINORS);
		mmc_devices[i] = get_gendisk(devnr, &part);

		/* Invalid capacity of device, do not add to list */
		if (!mmc_devices[i] || !get_capacity(mmc_devices[i]))
			continue;

		mmc_disks[j] = mmc_devices[i];
		j++;

		if (j == PERF_MMC_HOSTS)
			break;
	}

	return j;
}
Пример #20
0
/* west bridge block device prep request function */
static int cyasblkdev_blk_prep_rq(
					struct cyasblkdev_queue *bq,
					struct request *req
					)
{
	struct cyasblkdev_blk_data *bd = bq->data;
	int stat = BLKPREP_OK;

	DBGPRN_FUNC_NAME;

	/* If we have no device, we haven't finished initialising. */
	if (!bd || !bd->dev_handle) {
		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message(KERN_ERR
			"cyasblkdev %s: killing request - no device/host\n",
			req->rq_disk->disk_name);
		#endif
		stat = BLKPREP_KILL;
	}

	if (bd->suspended) {
		blk_plug_device(bd->queue.queue);
		stat = BLKPREP_DEFER;
	}

	/* Check for excessive requests.*/
	if (blk_rq_pos(req) + blk_rq_sectors(req) > get_capacity(req->rq_disk)) {
		cy_as_hal_print_message("cyasblkdev: bad request address\n");
		stat = BLKPREP_KILL;
	}

	return stat;
}
Пример #21
0
struct hd_struct *get_part(char *name)
{
    dev_t devt;
    int partno;
    struct disk_part_iter piter;
    struct gendisk *disk;
    struct hd_struct *part = NULL; 
    
    if (!name)
        return part;

    devt = blk_lookup_devt("mmcblk0", 0);
    disk = get_gendisk(devt, &partno);

    if (!disk || get_capacity(disk) == 0)
        return 0;

	disk_part_iter_init(&piter, disk, 0);
	while ((part = disk_part_iter_next(&piter))) {
        if (part->info && !strcmp(part->info->volname, name)) {
            get_device(part_to_dev(part));
            break;
        }
	}
	disk_part_iter_exit(&piter);
    
    return part;
}
Пример #22
0
/*
 * Common request path.  Rather than registering a custom make_request()
 * function we use the generic Linux version.  This is done because it allows
 * us to easily merge read requests which would otherwise we performed
 * synchronously by the DMU.  This is less critical in write case where the
 * DMU will perform the correct merging within a transaction group.  Using
 * the generic make_request() also let's use leverage the fact that the
 * elevator with ensure correct ordering in regards to barrior IOs.  On
 * the downside it means that in the write case we end up doing request
 * merging twice once in the elevator and once in the DMU.
 *
 * The request handler is called under a spin lock so all the real work
 * is handed off to be done in the context of the zvol taskq.  This function
 * simply performs basic request sanity checking and hands off the request.
 */
static void
zvol_request(struct request_queue *q)
{
	zvol_state_t *zv = q->queuedata;
	struct request *req;
	unsigned int size;

	while ((req = blk_fetch_request(q)) != NULL) {
		size = blk_rq_bytes(req);

		if (size != 0 && blk_rq_pos(req) + blk_rq_sectors(req) >
		    get_capacity(zv->zv_disk)) {
			printk(KERN_INFO
			       "%s: bad access: block=%llu, count=%lu\n",
			       req->rq_disk->disk_name,
			       (long long unsigned)blk_rq_pos(req),
			       (long unsigned)blk_rq_sectors(req));
			__blk_end_request(req, -EIO, size);
			continue;
		}

		if (!blk_fs_request(req)) {
			printk(KERN_INFO "%s: non-fs cmd\n",
			       req->rq_disk->disk_name);
			__blk_end_request(req, -EIO, size);
			continue;
		}

		switch (rq_data_dir(req)) {
		case READ:
			zvol_dispatch(zvol_read, req);
			break;
		case WRITE:
			if (unlikely(get_disk_ro(zv->zv_disk)) ||
			    unlikely(zv->zv_flags & ZVOL_RDONLY)) {
				__blk_end_request(req, -EROFS, size);
				break;
			}

#ifdef HAVE_BLK_QUEUE_DISCARD
			if (req->cmd_flags & VDEV_REQ_DISCARD) {
				zvol_dispatch(zvol_discard, req);
				break;
			}
#endif /* HAVE_BLK_QUEUE_DISCARD */

			zvol_dispatch(zvol_write, req);
			break;
		default:
			printk(KERN_INFO "%s: unknown cmd: %d\n",
			       req->rq_disk->disk_name, (int)rq_data_dir(req));
			__blk_end_request(req, -EIO, size);
			break;
		}
	}
}
Пример #23
0
// Validate a MAP packet.
//
// If the require_checksum argument is true, then the packet must contain a (valid) checksum
// in the outermost encapsulation to be considered valid.
// If the remove_checksums argument is true, then any checksums present will be removed
// once validated. (Note that the process aborts after encountering the first packet error.)
bool MAP::MAPPacket::validate(HeaderOffset_t headerOffset, bool require_checksum, bool remove_checksums){
// Make sure packet includes at least an initial header
// Make sure packet includes an outermost checksum, if requested.
  Data_t* header = get_header(headerOffset);
  if(   header == NULL
     || ( require_checksum && !get_checksumPresent(*header) )
    ) return false;

  DEBUGprint_MAP("MPPval: cap=%d, size=%d\n", get_capacity(), get_size());

// Validate (possibly nested) MAP header structure.
  // This requires a pass through the entire header structure.
  // Some processing time could perhaps be saved by using the same
  // pass to feed all the headers through the checksum engine,
  // especially if the packet data is stored in something slower
  // than RAM.
  Data_t* data_ptr = get_data(header);
  if(data_ptr == NULL)
    return false;

  // Stop point
  Data_t *stop_ptr = back();

  // Stop at actual data (no more MAP headers).
  while(header != NULL){
  // If header indicates a checksum, validate it.
    if(get_checksumPresent(*header)){
       DEBUGprint_MAP("MPPval: val crc\n");
    // Validate the checksum
      if(! validateChecksum(header, stop_ptr))
        return false;

    // Encapsulated data is now one checksum back.
      stop_ptr -= MAP::ChecksumLength;

      if(remove_checksums){
        *header = MAP::set_checksumPresent(*header, false);
        DEBUGprint_MAP("MPPval: rem crc\n");
      }
    }

  // Next header
    header = get_next_header(header);
  }

// If requested, cut off all checksums.
  if(remove_checksums){
    // Set the packet size such that the checksums (at the end) are all removed.
    set_size(stop_ptr - front());
    // Try to eliminate excess capacity
    //set_capacity(stop_ptr - front());
  }

  // Checksums (if any) were all valid.
  return true;
}
Пример #24
0
int pt_getgeo(struct block_device * block_device, struct hd_geometry * hg)
{
	hg->heads = 255;
	hg->sectors = 63;

	hg->cylinders = get_capacity(block_device->bd_disk);
	sector_div(hg->cylinders, hg->heads * hg->sectors);

	return 0;
}
Пример #25
0
wbfs_t *wbfs_try_open_partition(char *fn,int reset)
{
	u32 sector_size, n_sector;
	if(!get_capacity(fn,&sector_size,&n_sector))
		return NULL;
	FILE *f = fopen(fn,"r+");
	if (!f)
		return NULL;
	return wbfs_open_partition(wbfs_fread_sector,wbfs_fwrite_sector,f,
				   sector_size ,n_sector,0,reset);
}
Пример #26
0
// write function, should use at/near construction
void input_buffer::assign(const byte* t, uint s)
{
    if (t && error_ == 0 && check(current_, get_capacity()) == 0) {
        add_size(s);
        if (error_ == 0) {
            memcpy(&buffer_[current_], t, s);
            return;  // success
        }
    }

    error_ = -1;
}
Пример #27
0
static MAKE_REQUEST_FN_RET
zvol_request(struct request_queue *q, struct bio *bio)
{
	zvol_state_t *zv = q->queuedata;
	fstrans_cookie_t cookie = spl_fstrans_mark();
	uint64_t offset = BIO_BI_SECTOR(bio);
	unsigned int sectors = bio_sectors(bio);
	int rw = bio_data_dir(bio);
#ifdef HAVE_GENERIC_IO_ACCT
	unsigned long start = jiffies;
#endif
	int error = 0;

	if (bio_has_data(bio) && offset + sectors >
	    get_capacity(zv->zv_disk)) {
		printk(KERN_INFO
		    "%s: bad access: block=%llu, count=%lu\n",
		    zv->zv_disk->disk_name,
		    (long long unsigned)offset,
		    (long unsigned)sectors);
		error = SET_ERROR(EIO);
		goto out1;
	}

	generic_start_io_acct(rw, sectors, &zv->zv_disk->part0);

	if (rw == WRITE) {
		if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
			error = SET_ERROR(EROFS);
			goto out2;
		}

		if (bio->bi_rw & VDEV_REQ_DISCARD) {
			error = zvol_discard(bio);
			goto out2;
		}

		error = zvol_write(bio);
	} else
		error = zvol_read(bio);

out2:
	generic_end_io_acct(rw, &zv->zv_disk->part0, start);
out1:
	BIO_END_IO(bio, -error);
	spl_fstrans_unmark(cookie);
#ifdef HAVE_MAKE_REQUEST_FN_RET_INT
	return (0);
#elif defined(HAVE_MAKE_REQUEST_FN_RET_QC)
	return (BLK_QC_T_NONE);
#endif
}
Пример #28
0
    void pokemon_box_gcnimpl::set_pokemon(
        int index,
        const pkmn::pokemon::sptr& new_pokemon
    )
    {
        int max_index = get_capacity();
        pkmn::enforce_bounds("Box index", index, 0, max_index);

        if(_pokemon_list.at(index)->get_native_pc_data() == new_pokemon->get_native_pc_data())
        {
            throw std::invalid_argument("Cannot set a Pokémon to itself.");
        }

        boost::lock_guard<pokemon_box_gcnimpl> lock(*this);

        // If the given Pokémon isn't from this box's game, convert it if we can.
        pkmn::pokemon::sptr actual_new_pokemon;
        if(_game_id == new_pokemon->get_database_entry().get_game_id())
        {
            actual_new_pokemon = new_pokemon;
        }
        else
        {
            actual_new_pokemon = new_pokemon->to_game(get_game());
        }

        // Make sure no one else is using the new Pokémon variable.
        pokemon_gcnimpl* p_new_pokemon = dynamic_cast<pokemon_gcnimpl*>(
                                             actual_new_pokemon.get()
                                         );
        BOOST_ASSERT(p_new_pokemon != nullptr);
        boost::lock_guard<pokemon_gcnimpl> new_pokemon_lock(*p_new_pokemon);

        // Copy the underlying memory to the party. At the end of this process,
        // all existing variables will correspond to the same Pokémon, even if
        // their underlying memory has changed.
        //
        // Note: as we control the implementation, we know the PC data points
        // to the whole Pokémon data structure.
        delete _libpkmgc_box_uptr->pkm[index];

        _libpkmgc_box_uptr->pkm[index] =
            static_cast<LibPkmGC::GC::Pokemon*>(
                actual_new_pokemon->get_native_pc_data()
            )->clone();
        _pokemon_list[index] = std::make_shared<pokemon_gcnimpl>(
                                   dynamic_cast<LibPkmGC::GC::Pokemon*>(
                                       _libpkmgc_box_uptr->pkm[index]
                                   ),
                                   _game_id
                               );
    }
Пример #29
0
static int tbio_transfer(struct request *req, struct tbio_device *dev)
{
	unsigned int i = 0, offset = 0;
	char *buf;
	unsigned long flags;
	size_t size;

	struct bio_vec *bv;
	struct req_iterator iter;

	size = blk_rq_cur_bytes(req);
	prk_info("bio req of size %zu:", size);
	offset = blk_rq_pos(req) * 512;

	rq_for_each_segment(bv, req, iter) {
		size = bv->bv_len;
		prk_info("%s bio(%u), segs(%u) sect(%u) pos(%lu) off(%u)",
			(bio_data_dir(iter.bio) == READ) ? "READ" : "WRITE",
			i, bio_segments(iter.bio), bio_sectors(iter.bio),
			iter.bio->bi_sector, offset);

		if (get_capacity(req->rq_disk) * 512 < offset) {
			prk_info("Error, small capacity %zu, offset %u",
				get_capacity(req->rq_disk) * 512,
				offset);
			continue;
		}

		buf = bvec_kmap_irq(bv, &flags);
		if (bio_data_dir(iter.bio) == WRITE)
			memcpy(dev->data + offset, buf, size);
		else
			memcpy(buf, dev->data + offset, size);
		offset += size;
		flush_kernel_dcache_page(bv->bv_page);
		bvec_kunmap_irq(buf, &flags);
		++i;
	}
Пример #30
0
    int pokemon_box_gcnimpl::get_num_pokemon()
    {
        boost::lock_guard<pokemon_box_gcnimpl> lock(*this);

        int num_pokemon = 0;
        for(int i = 0; i < get_capacity(); ++i)
        {
            if(_libpkmgc_box_uptr->pkm[i]->species > LibPkmGC::NoSpecies)
            {
                ++num_pokemon;
            }
        }

        return num_pokemon;
    }