static int tbio_io(struct block_device *bdev, struct tbio_interface *uptr) { tbio_interface_t inter; struct bio *bio = NULL; int reading = 0 , writing = 0 ; void * buffer = NULL; //struct request *rq; request_queue_t *q; q = bdev_get_queue(Device.bdev); if (copy_from_user(&inter , uptr , sizeof(tbio_interface_t))) { printk("tbio: copy_from_user\n"); return -EFAULT; } if (inter.data_len > (q->max_sectors << 9)) { printk("tbio: inter.in_len > q->max_sectors << 9\n"); return -EIO; } if (inter.data_len) { switch (inter.direction) { default: return -EINVAL; case TBIO_TO_DEV: writing = 1; break; case TBIO_FROM_DEV: reading = 1; break; } bio = bio_map_user(bdev , (unsigned long )inter.data , inter.data_len , reading); if(!bio) { printk("tbio : bio_map_user failed\n"); buffer = kmalloc (inter.data_len , q->bounce_gfp | GFP_USER); if(!buffer){ printk("tbio: buffer no memory\n"); return -1; } copy_from_user(buffer , inter.data , inter.data_len); printk("tbio: buffer %s\n",(char *)buffer); } } send_request(q, bio ,bdev,&inter , writing); if (bio) bio_unmap_user(bio, reading); return 0; }
static int tbio_io(struct block_device *bdev, struct tbio_interface *uptr) { int ret; tbio_interface_t inter; struct bio *bio = NULL; int reading = 0, writing = 0; void *buf = NULL; struct request_queue *q = bdev_get_queue(bdev); if (copy_from_user(&inter, uptr, sizeof(tbio_interface_t))) { prk_err("copy_from_user"); return -EFAULT; } if (inter.data_len > (q->limits.max_sectors << 9)) { prk_err("inter.in_len > q->max_sectors << 9"); return -EIO; } if (inter.data_len) { switch (inter.direction) { default: return -EINVAL; case TBIO_TO_DEV: writing = 1; break; case TBIO_FROM_DEV: reading = 1; break; } bio = bio_map_user(q, bdev, (unsigned long)inter.data, inter.data_len, reading, GFP_KERNEL); if (!bio) { prk_err("bio_map_user failed"); buf = kmalloc(inter.data_len, q->bounce_gfp | GFP_USER); if (!buf) { prk_err("buffer no memory"); return -1; } ret = copy_from_user(buf, inter.data, inter.data_len); if (ret) prk_err("copy_from_user() failed"); prk_info("buffer %s\n, copy_from_user returns '%d'", (char *)buf, ret); } } send_request(q, bio, bdev, &inter, writing); if (bio) bio_unmap_user(bio); return 0; }
static int __blk_rq_unmap_user(struct bio *bio) { int ret = 0; if (bio) { if (bio_flagged(bio, BIO_USER_MAPPED)) bio_unmap_user(bio); else ret = bio_uncopy_user(bio); } return ret; }
/** * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage * @q: request queue where request should be inserted * @rq: request to map data to * @iov: pointer to the iovec * @iov_count: number of elements in the iovec * @len: I/O byte count * * Description: * Data will be mapped directly for zero copy io, if possible. Otherwise * a kernel bounce buffer is used. * * A matching blk_rq_unmap_user() must be issued at the end of io, while * still in process context. * * Note: The mapped bio may need to be bounced through blk_queue_bounce() * before being submitted to the device, as pages mapped may be out of * reach. It's the callers responsibility to make sure this happens. The * original bio must be passed back in to blk_rq_unmap_user() for proper * unmapping. */ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, struct sg_iovec *iov, int iov_count, unsigned int len) { struct bio *bio; int i, read = rq_data_dir(rq) == READ; int unaligned = 0; if (!iov || iov_count <= 0) return -EINVAL; for (i = 0; i < iov_count; i++) { unsigned long uaddr = (unsigned long)iov[i].iov_base; if (uaddr & queue_dma_alignment(q)) { unaligned = 1; break; } if (!iov[i].iov_len) return -EINVAL; } if (unaligned || (q->dma_pad_mask & len)) bio = bio_copy_user_iov(q, iov, iov_count, read); else bio = bio_map_user_iov(q, NULL, iov, iov_count, read); if (IS_ERR(bio)) return PTR_ERR(bio); if (bio->bi_size != len) { bio_endio(bio, 0); bio_unmap_user(bio); return -EINVAL; } if (!bio_flagged(bio, BIO_USER_MAPPED)) rq->cmd_flags |= REQ_COPY_USER; blk_queue_bounce(q, &bio); bio_get(bio); blk_rq_bio_prep(q, rq, bio); rq->buffer = rq->data = NULL; return 0; }
static int sg_io(request_queue_t *q, struct block_device *bdev, struct sg_io_hdr *hdr) { unsigned long start_time; int reading, writing; struct request *rq; struct bio *bio; char sense[SCSI_SENSE_BUFFERSIZE]; void *buffer; if (hdr->interface_id != 'S') return -EINVAL; if (hdr->cmd_len > sizeof(rq->cmd)) return -EINVAL; /* * we'll do that later */ if (hdr->iovec_count) return -EOPNOTSUPP; if (hdr->dxfer_len > (q->max_sectors << 9)) return -EIO; reading = writing = 0; buffer = NULL; bio = NULL; if (hdr->dxfer_len) { unsigned int bytes = (hdr->dxfer_len + 511) & ~511; switch (hdr->dxfer_direction) { default: return -EINVAL; case SG_DXFER_TO_FROM_DEV: reading = 1; /* fall through */ case SG_DXFER_TO_DEV: writing = 1; break; case SG_DXFER_FROM_DEV: reading = 1; break; } /* * first try to map it into a bio. reading from device will * be a write to vm. */ bio = bio_map_user(bdev, (unsigned long) hdr->dxferp, hdr->dxfer_len, reading); /* * if bio setup failed, fall back to slow approach */ if (!bio) { buffer = kmalloc(bytes, q->bounce_gfp | GFP_USER); if (!buffer) return -ENOMEM; if (writing) { if (copy_from_user(buffer, hdr->dxferp, hdr->dxfer_len)) goto out_buffer; } else memset(buffer, 0, hdr->dxfer_len); } } rq = blk_get_request(q, writing ? WRITE : READ, __GFP_WAIT); /* * fill in request structure */ rq->cmd_len = hdr->cmd_len; memcpy(rq->cmd, hdr->cmdp, hdr->cmd_len); if (sizeof(rq->cmd) != hdr->cmd_len) memset(rq->cmd + hdr->cmd_len, 0, sizeof(rq->cmd) - hdr->cmd_len); memset(sense, 0, sizeof(sense)); rq->sense = sense; rq->sense_len = 0; rq->flags |= REQ_BLOCK_PC; rq->bio = rq->biotail = NULL; if (bio) blk_rq_bio_prep(q, rq, bio); rq->data = buffer; rq->data_len = hdr->dxfer_len; rq->timeout = (hdr->timeout * HZ) / 1000; if (!rq->timeout) rq->timeout = q->sg_timeout; if (!rq->timeout) rq->timeout = BLK_DEFAULT_TIMEOUT; start_time = jiffies; /* ignore return value. All information is passed back to caller * (if he doesn't check that is his problem). * N.B. a non-zero SCSI status is _not_ necessarily an error. */ blk_do_rq(q, bdev, rq); if (bio) bio_unmap_user(bio, reading); /* write to all output members */ hdr->status = rq->errors; hdr->masked_status = (hdr->status >> 1) & 0x1f; hdr->msg_status = 0; hdr->host_status = 0; hdr->driver_status = 0; hdr->info = 0; if (hdr->masked_status || hdr->host_status || hdr->driver_status) hdr->info |= SG_INFO_CHECK; hdr->resid = rq->data_len; hdr->duration = ((jiffies - start_time) * 1000) / HZ; hdr->sb_len_wr = 0; if (rq->sense_len && hdr->sbp) { int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len); if (!copy_to_user(hdr->sbp, rq->sense, len)) hdr->sb_len_wr = len; } blk_put_request(rq); if (buffer) { if (reading) if (copy_to_user(hdr->dxferp, buffer, hdr->dxfer_len)) goto out_buffer; kfree(buffer); } /* may not have succeeded, but output values written to control * structure (struct sg_io_hdr). */ return 0; out_buffer: kfree(buffer); return -EFAULT; }