Exemple #1
0
static int tbio_io(struct block_device *bdev,
		 struct tbio_interface  *uptr)
{
	tbio_interface_t inter;
	struct bio *bio = NULL;
	int reading = 0 , writing = 0 ;
	void * buffer = NULL;
	//struct request *rq;
	request_queue_t *q;
	q = bdev_get_queue(Device.bdev);


	if (copy_from_user(&inter , uptr , sizeof(tbio_interface_t))) {
		printk("tbio: copy_from_user\n");
		return -EFAULT;
	}


	if (inter.data_len > (q->max_sectors << 9)) {
		printk("tbio: inter.in_len > q->max_sectors << 9\n");
		return -EIO;
	}

	if (inter.data_len) {
	
		switch (inter.direction) {
			default:
				return -EINVAL;
			case TBIO_TO_DEV:
				writing = 1;
				break;
			case TBIO_FROM_DEV:
				reading = 1;
				break;
		}

		bio = bio_map_user(bdev , (unsigned long )inter.data ,
					inter.data_len , reading);
	
		if(!bio) {
			printk("tbio : bio_map_user failed\n");
			buffer = kmalloc (inter.data_len , q->bounce_gfp | GFP_USER);
			if(!buffer){
				printk("tbio: buffer no memory\n");
				return -1;
			}
			copy_from_user(buffer , inter.data , inter.data_len);
			printk("tbio: buffer %s\n",(char *)buffer);
		}

	}

	send_request(q, bio ,bdev,&inter , writing);

	
	if (bio)
		bio_unmap_user(bio, reading);
	return 0;
}
Exemple #2
0
static int tbio_io(struct block_device *bdev, struct tbio_interface *uptr)
{
	int ret;
	tbio_interface_t inter;
	struct bio *bio = NULL;
	int reading = 0, writing = 0;
	void *buf = NULL;
	struct request_queue *q = bdev_get_queue(bdev);

	if (copy_from_user(&inter, uptr, sizeof(tbio_interface_t))) {
		prk_err("copy_from_user");
		return -EFAULT;
	}

	if (inter.data_len > (q->limits.max_sectors << 9)) {
		prk_err("inter.in_len > q->max_sectors << 9");
		return -EIO;
	}

	if (inter.data_len) {

		switch (inter.direction) {
		default:
			return -EINVAL;
		case TBIO_TO_DEV:
			writing = 1;
			break;
		case TBIO_FROM_DEV:
			reading = 1;
			break;
		}

		bio = bio_map_user(q, bdev, (unsigned long)inter.data,
			inter.data_len, reading, GFP_KERNEL);

		if (!bio) {
			prk_err("bio_map_user failed");
			buf = kmalloc(inter.data_len, q->bounce_gfp | GFP_USER);
			if (!buf) {
				prk_err("buffer no memory");
				return -1;
			}
			ret = copy_from_user(buf, inter.data, inter.data_len);
			if (ret)
				prk_err("copy_from_user() failed");

			prk_info("buffer %s\n, copy_from_user returns '%d'",
				(char *)buf, ret);
		}

	}

	send_request(q, bio, bdev, &inter, writing);

	if (bio)
		bio_unmap_user(bio);
	return 0;
}
Exemple #3
0
static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
			     struct rq_map_data *map_data, void __user *ubuf,
			     unsigned int len, gfp_t gfp_mask)
{
	unsigned long uaddr;
	struct bio *bio, *orig_bio;
	int reading, ret;

	reading = rq_data_dir(rq) == READ;

	/*
	 * if alignment requirement is satisfied, map in user pages for
	 * direct dma. else, set up kernel bounce buffers
	 */
	uaddr = (unsigned long) ubuf;
	if (blk_rq_aligned(q, uaddr, len) && !map_data)
		bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
	else
		bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);

	if (IS_ERR(bio))
		return PTR_ERR(bio);

	if (map_data && map_data->null_mapped)
		bio->bi_flags |= (1 << BIO_NULL_MAPPED);

	orig_bio = bio;
	blk_queue_bounce(q, &bio);

	/*
	 * We link the bounce buffer in and could have to traverse it
	 * later so we have to get a ref to prevent it from being freed
	 */
	bio_get(bio);

	ret = blk_rq_append_bio(q, rq, bio);
	if (!ret)
		return bio->bi_size;

	/* if it was boucned we must call the end io function */
	bio_endio(bio, 0);
	__blk_rq_unmap_user(orig_bio);
	bio_put(bio);
	return ret;
}
Exemple #4
0
static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
			     void __user *ubuf, unsigned int len)
{
	unsigned long uaddr;
	unsigned int alignment;
	struct bio *bio, *orig_bio;
	int reading, ret;

	reading = rq_data_dir(rq) == READ;

	/*
	 * if alignment requirement is satisfied, map in user pages for
	 * direct dma. else, set up kernel bounce buffers
	 */
	uaddr = (unsigned long) ubuf;
	alignment = queue_dma_alignment(q) | q->dma_pad_mask;
	if (!(uaddr & alignment) && !(len & alignment))
		bio = bio_map_user(q, NULL, uaddr, len, reading);
	else
		bio = bio_copy_user(q, uaddr, len, reading);

	if (IS_ERR(bio))
		return PTR_ERR(bio);

	orig_bio = bio;
	blk_queue_bounce(q, &bio);

	/*
	 * We link the bounce buffer in and could have to traverse it
	 * later so we have to get a ref to prevent it from being freed
	 */
	bio_get(bio);

	ret = blk_rq_append_bio(q, rq, bio);
	if (!ret)
		return bio->bi_size;

	/* if it was boucned we must call the end io function */
	bio_endio(bio, 0);
	__blk_rq_unmap_user(orig_bio);
	bio_put(bio);
	return ret;
}
static int ltpdev_ioctl ( struct inode *pinode, struct file *pfile, unsigned int cmd, unsigned long arg )
{

    struct bio *my_bio = NULL;
    struct bio *my_bio_copy = NULL;
	request_queue_t *q = NULL;
    struct block_device *bdev = NULL;
    unsigned long uaddr;

    unsigned int bytes_done = 100;

    int          error = 0;
    int          rc = 0;

    /*****************************************************************************/




    printk(KERN_ALERT "ltpdev_ioctl fs tests\n");

    switch (cmd) {
    
    case LTPAIODEV_CMD:
        printk(KERN_ALERT "Running AIO FS tests \n");
        printk(KERN_ALERT "AIO FS tests complete\n");
        break;

    case LTPBIODEV_CMD:

        printk(KERN_ALERT "Running BIO FS tests \n");

        my_bio = bio_alloc(GFP_KERNEL, 0);
        if (!my_bio) {
            printk(KERN_ALERT "Error getting kernel slab memory !!\n");
        }
        else {
            printk(KERN_ALERT "kernel slab memory alloc OK\n");
        }

        bio_endio(my_bio, bytes_done, error);

        printk(KERN_ALERT "Return from bio_endio = %d \n", error);

        my_bio_copy = bio_clone(my_bio,GFP_ATOMIC);

        if (!my_bio_copy) {
            printk(KERN_ALERT "Error getting kernel bio clone !!\n");
        }
        else {
            printk(KERN_ALERT "kernel bio clone OK\n");
        }

        my_bio_copy = bio_clone(my_bio,GFP_NOIO);

        if (!my_bio_copy) {
            printk(KERN_ALERT "Error getting kernel bio clone !!\n");
        }
        else {
            printk(KERN_ALERT "kernel bio clone OK\n");
        }

//        q = bdev_get_queue(my_bio->bi_bdev);

//        rc = bio_phys_segments(q, my_bio);

//        rc = bio_hw_segments(q, my_bio);

        bdev = lookup_bdev(LTP_FS_DEVICE_NAME);

        printk(KERN_ALERT "return from bdev size %d\n", bdev->bd_block_size);

        printk(KERN_ALERT "Return from phys_segments = %d \n", rc);

//        Don't use this API, causes system to hang and corrupts FS
//        bio_put(my_bio);

        (char *)uaddr = kmalloc(TEST_MEM_SIZE, GFP_KERNEL);

        my_bio_copy = bio_map_user(bdev, uaddr, TEST_MEM_SIZE, FALSE);

        printk(KERN_ALERT "Return from bio_map_user %p\n", my_bio_copy);

        do_buffer_c_tests();

        printk(KERN_ALERT "BIO FS tests complete\n");

    break;
    }


    return 0;
}
Exemple #6
0
static int sg_io(request_queue_t *q, struct block_device *bdev,
		 struct sg_io_hdr *hdr)
{
	unsigned long start_time;
	int reading, writing;
	struct request *rq;
	struct bio *bio;
	char sense[SCSI_SENSE_BUFFERSIZE];
	void *buffer;

	if (hdr->interface_id != 'S')
		return -EINVAL;
	if (hdr->cmd_len > sizeof(rq->cmd))
		return -EINVAL;

	/*
	 * we'll do that later
	 */
	if (hdr->iovec_count)
		return -EOPNOTSUPP;

	if (hdr->dxfer_len > (q->max_sectors << 9))
		return -EIO;

	reading = writing = 0;
	buffer = NULL;
	bio = NULL;
	if (hdr->dxfer_len) {
		unsigned int bytes = (hdr->dxfer_len + 511) & ~511;

		switch (hdr->dxfer_direction) {
		default:
			return -EINVAL;
		case SG_DXFER_TO_FROM_DEV:
			reading = 1;
			/* fall through */
		case SG_DXFER_TO_DEV:
			writing = 1;
			break;
		case SG_DXFER_FROM_DEV:
			reading = 1;
			break;
		}

		/*
		 * first try to map it into a bio. reading from device will
		 * be a write to vm.
		 */
		bio = bio_map_user(bdev, (unsigned long) hdr->dxferp,
				   hdr->dxfer_len, reading);

		/*
		 * if bio setup failed, fall back to slow approach
		 */
		if (!bio) {
			buffer = kmalloc(bytes, q->bounce_gfp | GFP_USER);
			if (!buffer)
				return -ENOMEM;

			if (writing) {
				if (copy_from_user(buffer, hdr->dxferp,
						   hdr->dxfer_len))
					goto out_buffer;
			} else
				memset(buffer, 0, hdr->dxfer_len);
		}
	}

	rq = blk_get_request(q, writing ? WRITE : READ, __GFP_WAIT);

	/*
	 * fill in request structure
	 */
	rq->cmd_len = hdr->cmd_len;
	memcpy(rq->cmd, hdr->cmdp, hdr->cmd_len);
	if (sizeof(rq->cmd) != hdr->cmd_len)
		memset(rq->cmd + hdr->cmd_len, 0, sizeof(rq->cmd) - hdr->cmd_len);

	memset(sense, 0, sizeof(sense));
	rq->sense = sense;
	rq->sense_len = 0;

	rq->flags |= REQ_BLOCK_PC;

	rq->bio = rq->biotail = NULL;

	if (bio)
		blk_rq_bio_prep(q, rq, bio);

	rq->data = buffer;
	rq->data_len = hdr->dxfer_len;

	rq->timeout = (hdr->timeout * HZ) / 1000;
	if (!rq->timeout)
		rq->timeout = q->sg_timeout;
	if (!rq->timeout)
		rq->timeout = BLK_DEFAULT_TIMEOUT;

	start_time = jiffies;

	/* ignore return value. All information is passed back to caller
	 * (if he doesn't check that is his problem).
	 * N.B. a non-zero SCSI status is _not_ necessarily an error.
	 */
	blk_do_rq(q, bdev, rq);

	if (bio)
		bio_unmap_user(bio, reading);

	/* write to all output members */
	hdr->status = rq->errors;	
	hdr->masked_status = (hdr->status >> 1) & 0x1f;
	hdr->msg_status = 0;
	hdr->host_status = 0;
	hdr->driver_status = 0;
	hdr->info = 0;
	if (hdr->masked_status || hdr->host_status || hdr->driver_status)
		hdr->info |= SG_INFO_CHECK;
	hdr->resid = rq->data_len;
	hdr->duration = ((jiffies - start_time) * 1000) / HZ;
	hdr->sb_len_wr = 0;

	if (rq->sense_len && hdr->sbp) {
		int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);

		if (!copy_to_user(hdr->sbp, rq->sense, len))
			hdr->sb_len_wr = len;
	}

	blk_put_request(rq);

	if (buffer) {
		if (reading)
			if (copy_to_user(hdr->dxferp, buffer, hdr->dxfer_len))
				goto out_buffer;

		kfree(buffer);
	}

	/* may not have succeeded, but output values written to control
	 * structure (struct sg_io_hdr).  */
	return 0;
out_buffer:
	kfree(buffer);
	return -EFAULT;
}