/* * Transfer an iovec */ static int sbullr_rw_iovec(Sbull_Dev *dev, struct kiobuf *iobuf, int rw, int sector, int nsectors) { struct request fakereq; struct page *page; int offset = iobuf->offset, ndone = 0, pageno, result; /* Perform I/O on each sector */ fakereq.sector = sector; fakereq.current_nr_sectors = 1; fakereq.cmd = rw; for (pageno = 0; pageno < iobuf->nr_pages; pageno++) { page = iobuf->maplist[pageno]; while (ndone < nsectors) { /* Fake up a request structure for the operation */ fakereq.buffer = kmap(page) + offset; result = sbull_transfer(dev, &fakereq); kunmap(page); if (result == 0) return ndone; /* Move on to the next one */ ndone++; fakereq.sector++; offset += SBULLR_SECTOR; if (offset >= PAGE_SIZE) { offset = 0; break; } } } return ndone; }
void sbull_request(request_queue_t *q) { Sbull_Dev *device; struct request *req; int status; /* Find our device */ device = sbull_locate_device (blkdev_entry_next_request(&q->queue_head)); if (device->busy) /* no race here - io_request_lock held */ return; device->busy = 1; /* Process requests in the queue */ while(! list_empty(&q->queue_head)) { /* Pull the next request off the list. */ req = blkdev_entry_next_request(&q->queue_head); blkdev_dequeue_request(req); spin_unlock_irq (&io_request_lock); spin_lock(&device->lock); /* Process all of the buffers in this (possibly clustered) request. */ do { status = sbull_transfer(device, req); } while (end_that_request_first(req, status, DEVICE_NAME)); spin_unlock(&device->lock); spin_lock_irq (&io_request_lock); end_that_request_last(req); } device->busy = 0; }
/* * The simple form of the request function. */ static void sbull_request(struct request_queue *q) { struct request *req; int ret; req = blk_fetch_request(q); while (req) { struct sbull_dev *dev = req->rq_disk->private_data; if (req->cmd_type != REQ_TYPE_FS) { printk (KERN_NOTICE "Skip non-fs request\n"); ret = -EIO; goto done; } printk (KERN_NOTICE "Req dev %u dir %d sec %ld, nr %d\n", (unsigned)(dev - Devices), rq_data_dir(req), blk_rq_pos(req), blk_rq_cur_sectors(req)); sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req), req->buffer, rq_data_dir(req)); ret = 0; done: if(!__blk_end_request_cur(req, ret)){ req = blk_fetch_request(q); } } }
/* * Transfer a single BIO. */ static int sbull_xfer_bio(struct sbull_dev *dev, struct bio *bio) { int i; struct bio_vec *bvec; sector_t sector = bio->bi_sector; /* Do each segment independently. */ bio_for_each_segment(bvec, bio, i) { char *buffer = __bio_kmap_atomic(bio, i, KM_USER0); sbull_transfer(dev, sector, bio_cur_sectors(bio), buffer, bio_data_dir(bio) == WRITE); sector += bio_cur_sectors(bio); __bio_kunmap_atomic(bio, KM_USER0); }
/* * Transfer a single BIO. */ static int sbull_xfer_bio(struct sbull_dev *dev, struct bio *bio) { struct bio_vec bvec; struct bvec_iter iter; sector_t sector = bio->bi_iter.bi_sector; /* Do each segment independently. */ bio_for_each_segment(bvec, bio, iter) { char *buffer = __bio_kmap_atomic(bio, iter); sbull_transfer(dev, sector,bytes_to_sectors_checked(bio_cur_bytes(bio)), buffer, bio_data_dir(bio) == WRITE); sector += (bytes_to_sectors_checked(bio_cur_bytes(bio))); __bio_kunmap_atomic(bio); }
/* * The simple form of the request function. */ static void sbull_request(request_queue_t *q) { struct request *req = NULL; struct sbull_dev *dev = req->rq_disk->private_data; while ((req = blk_fetch_request(q)) != NULL) { if (! req->cmd_type != REQ_TYPE_FS) { printk (KERN_NOTICE "Skip non-fs request\n"); blk_end_request_all(req, -EIO); continue; } sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req), req->buffer, rq_data_dir(req)); blk_end_request_all(req, 1); } }
static void sbull_request(struct request_queue*q) { struct request *req; while((req = blk_fetch_request(q)) != NULL) { struct sbull_dev *dev = req->rq_disk->private_data; if(!blk_fs_request(req)) { printk(KERN_NOTICE " Skip non-fs request\n"); __blk_end_request_cur(req, 0); continue; } sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req), req->buffer, rq_data_dir(req)); __blk_end_request_cur(req, 1); } }
/* * The simple form of the request function. */ static void sbull_request(request_queue_t *q) { struct request *req; while ((req = elv_next_request(q)) != NULL) { struct sbull_dev *dev = req->rq_disk->private_data; if (! blk_fs_request(req)) { printk (KERN_NOTICE "Skip non-fs request\n"); end_request(req, 0); continue; } // printk (KERN_NOTICE "Req dev %d dir %ld sec %ld, nr %d f %lx\n", // dev - Devices, rq_data_dir(req), // req->sector, req->current_nr_sectors, // req->flags); sbull_transfer(dev, req->sector, req->current_nr_sectors, req->buffer, rq_data_dir(req)); end_request(req, 1); } }
/* * The simple form of the request function. */ static void sbull_request(struct request_queue *q) { struct request *req; while ((req = blk_fetch_request(q)) != NULL) { struct sbull_dev *dev = req->rq_disk->private_data; if (req->cmd_type != REQ_TYPE_FS) { printk (KERN_NOTICE "Skip non-fs request\n"); __blk_end_request_cur(req, -EIO); continue; } // printk (KERN_NOTICE "Req dev %d dir %ld sec %ld, nr %d f %lx\n", // dev - Devices, rq_data_dir(req), // req->sector, req->current_nr_sectors, // req->flags); sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req), req->buffer, rq_data_dir(req)); __blk_end_request_cur(req, 0); } }
void sbull_request() #endif { Sbull_Dev *device; int status; while(1) { INIT_REQUEST; /* returns when queue is empty */ /* Which "device" are we using? */ device = sbull_locate_device (CURRENT); if (device == NULL) { end_request(0); continue; } /* Perform the transfer and clean up. */ spin_lock(&device->lock); status = sbull_transfer(device, CURRENT); spin_unlock(&device->lock); end_request(status); } }