Пример #1
0
int blk_do_ordered(struct request_queue *q, struct request **rqp)
{
	struct request *rq = *rqp;
	const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);

	if (!q->ordseq) {
		if (!is_barrier)
			return 1;

		if (q->next_ordered != QUEUE_ORDERED_NONE) {
			*rqp = start_ordered(q, rq);
			return 1;
		} else {
			/*
			 * This can happen when the queue switches to
			 * ORDERED_NONE while this request is on it.
			 */
			blkdev_dequeue_request(rq);
			if (__blk_end_request(rq, -EOPNOTSUPP,
					      blk_rq_bytes(rq)))
				BUG();
			*rqp = NULL;
			return 0;
		}
	}

	/*
	 * Ordered sequence in progress
	 */

	/* Special requests are not subject to ordering rules. */
	if (!blk_fs_request(rq) &&
	    rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
		return 1;

	if (q->ordered & QUEUE_ORDERED_TAG) {
		/* Ordered by tag.  Blocking the next barrier is enough. */
		if (is_barrier && rq != &q->bar_rq)
			*rqp = NULL;
	} else {
		/* Ordered by draining.  Wait for turn. */
		WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
		if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
			*rqp = NULL;
	}

	return 1;
}
Пример #2
0
int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
{
	unsigned int nr_bytes = nr_sectors << 9;
	struct request *rq = HWGROUP(drive)->rq;
	int rc, error = 0;

	if (!nr_bytes) {
		if (blk_pc_request(rq))
			nr_bytes = rq->data_len;
		else
			nr_bytes = rq->hard_cur_sectors << 9;
	}

	/*
	 * if failfast is set on a request, override number of sectors
	 * and complete the whole request right now
	 */
	if (blk_noretry_request(rq) && uptodate <= 0)
		nr_bytes = rq->hard_nr_sectors << 9;

	if (blk_fs_request(rq) == 0 && uptodate <= 0 && rq->errors == 0)
		rq->errors = -EIO;

	if (uptodate <= 0)
		error = uptodate ? uptodate : -EIO;

	rc = ide_end_rq(drive, rq, error, nr_bytes);
	if (rc == 0)
		drive->hwif->hwgroup->rq = NULL;

	return rc;
}
Пример #3
0
/*
 * osprd_process_request(d, req)
 *   Called when the user reads or writes a sector.
 *   Should perform the read or write, as appropriate.
 */
static void osprd_process_request(osprd_info_t *d, struct request *req)
{
	if (!blk_fs_request(req)) {
		end_request(req, 0);
		return;
	}

	// EXERCISE: Perform the read or write request by copying data between
	// our data array and the request's buffer.
	// Hint: The 'struct request' argument tells you what kind of request
	// this is, and which sectors are being read or written.
	// Read about 'struct request' in <linux/blkdev.h>.
	// Consider the 'req->sector', 'req->current_nr_sectors', and
	// 'req->buffer' members, and the rq_data_dir() function.

	// Your code here.
    
    int offset = req->sector * SECTOR_SIZE;
    int bytes = req->current_nr_sectors * SECTOR_SIZE;
    // current_nr_sectors = number of sectors that is requested
    
    // rq_data_dir will tell if its a read or write
    if (rq_data_dir(req) == WRITE) memcpy(d->data + offset, req->buffer, bytes);
    else if (rq_data_dir(req) == READ) memcpy(req->buffer, d->data + offset, bytes);
    else eprintk("Must be read or written");

	end_request(req, 1);
}
Пример #4
0
/*
 * osprd_process_request(d, req)
 *   Called when the user reads or writes a sector.
 *   Should perform the read or write, as appropriate.
 */
static void osprd_process_request(osprd_info_t *d, struct request *req)
{
	sector_t offset = req->sector * SECTOR_SIZE;
	if (!blk_fs_request(req)) {
		end_request(req, 0);
		return;
	}

	// EXERCISE: Perform the read or write request by copying data between
	// our data array and the request's buffer.
	// Hint: The 'struct request' argument tells you what kind of request
	// this is, and which sectors are being read or written.
	// Read about 'struct request' in <linux/blkdev.h>.
	// Consider the 'req->sector', 'req->current_nr_sectors', and
	// 'req->buffer' members, and the rq_data_dir() function.

	// Your code here.
	// Calculate offset per ramdisk
	if(rq_data_dir(req)==WRITE){
		// Write - copy contents of req to d
		memcpy(&(d->data[offset]), req->buffer,
				req->current_nr_sectors * SECTOR_SIZE);
	} else {
		// Read - copy contents of d to req
		memcpy(req->buffer, &(d->data[offset]), 
				req->current_nr_sectors * SECTOR_SIZE);
	}
	end_request(req, 1);
}
Пример #5
0
ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat)
{
	struct request *rq;
	u8 err;

	err = ide_dump_status(drive, msg, stat);

	if ((rq = HWGROUP(drive)->rq) == NULL)
		return ide_stopped;

	/* retry only "normal" I/O: */
	if (!blk_fs_request(rq)) {
		rq->errors = 1;
		ide_end_drive_cmd(drive, stat, err);
		return ide_stopped;
	}

	if (rq->rq_disk) {
		ide_driver_t *drv;

		drv = *(ide_driver_t **)rq->rq_disk->private_data;
		return drv->error(drive, rq, stat, err);
	} else
		return __ide_error(drive, rq, stat, err);
}
Пример #6
0
unsigned blk_ordered_req_seq(struct request *rq)
{
	struct request_queue *q = rq->q;

	BUG_ON(q->ordseq == 0);

	if (rq == &q->pre_flush_rq)
		return QUEUE_ORDSEQ_PREFLUSH;
	if (rq == &q->bar_rq)
		return QUEUE_ORDSEQ_BAR;
	if (rq == &q->post_flush_rq)
		return QUEUE_ORDSEQ_POSTFLUSH;

	/*
	 * !fs requests don't need to follow barrier ordering.  Always
	 * put them at the front.  This fixes the following deadlock.
	 *
	 * http://thread.gmane.org/gmane.linux.kernel/537473
	 */
	if (!blk_fs_request(rq))
		return QUEUE_ORDSEQ_DRAIN;

	if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
	    (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
		return QUEUE_ORDSEQ_DRAIN;
	else
		return QUEUE_ORDSEQ_DONE;
}
Пример #7
0
static void
deadline_insert_request(request_queue_t *q, struct request *rq, int where)
{
	struct deadline_data *dd = q->elevator->elevator_data;

	/* barriers must flush the reorder queue */
	if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)
			&& where == ELEVATOR_INSERT_SORT))
		where = ELEVATOR_INSERT_BACK;

	switch (where) {
		case ELEVATOR_INSERT_BACK:
			while (deadline_dispatch_requests(dd))
				;
			list_add_tail(&rq->queuelist, dd->dispatch);
			break;
		case ELEVATOR_INSERT_FRONT:
			list_add(&rq->queuelist, dd->dispatch);
			break;
		case ELEVATOR_INSERT_SORT:
			BUG_ON(!blk_fs_request(rq));
			deadline_add_request(q, rq);
			break;
		default:
			printk("%s: bad insert point %d\n", __FUNCTION__,where);
			return;
	}
}
Пример #8
0
/*
 * osprd_process_request(d, req)
 *   Called when the user reads or writes a sector.
 *   Should perform the read or write, as appropriate.
 */
static void osprd_process_request(osprd_info_t *d, struct request *req)
{
	if (!blk_fs_request(req)) {
		end_request(req, 0);
		return;
	}

	// EXERCISE: Perform the read or write request by copying data between
	// our data array and the request's buffer.
	// Hint: The 'struct request' argument tells you what kind of request
	// this is, and which sectors are being read or written.
	// Read about 'struct request' in <linux/blkdev.h>.
	// Consider the 'req->sector', 'req->current_nr_sectors', and
	// 'req->buffer' members, and the rq_data_dir() function.

	// Your code here.
	unsigned request_type;
	uint8_t *data_ptr;
	request_type = rq_data_dir(req);
	data_ptr = d->data + req->sector * SECTOR_SIZE;
	//eprintk("passwd_hash: %d\n", d->passwd_hash);
	if (request_type == READ) {
		memcpy((void*)req->buffer, (void*)data_ptr,
			req->current_nr_sectors * SECTOR_SIZE);
	}
	else if (request_type == WRITE) {
		memcpy((void*)data_ptr, (void*)req->buffer,
			req->current_nr_sectors * SECTOR_SIZE);
	}
	//eprintk("Should process request...\n");

	end_request(req, 1);
}
Пример #9
0
/*
 * osprd_process_request(d, req)
 *   Called when the user reads or writes a sector.
 *   Should perform the read or write, as appropriate.
 */
static void osprd_process_request(osprd_info_t *d, struct request *req)
{
	if (!blk_fs_request(req)) {
		end_request(req, 0);
		return;
	}

	// DONE EXERCISE: Perform the read or write request by copying data between
	// our data array and the request's buffer.
	// Hint: The 'struct request' argument tells you what kind of request
	// this is, and which sectors are being read or written.
	// Read about 'struct request' in <linux/blkdev.h>.
	// Consider the 'req->sector', 'req->current_nr_sectors', and
	// 'req->buffer' members, and the rq_data_dir() function.

	// Your code here.

	// compute the offset, set pointer to correct region
	uint8_t *dataPtr = d->data + (req->sector) * SECTOR_SIZE;
	
	// check if it's read or write and copy data
	unsigned int requestType = rq_data_dir(req);

	if(requestType == READ)
		memcpy((void*) req->buffer, (void*)dataPtr, req->current_nr_sectors * SECTOR_SIZE);
	else if (requestType == WRITE)
		memcpy((void*)dataPtr, (void*) req->buffer, req->current_nr_sectors * SECTOR_SIZE);

	//eprintk("Should process request...\n");
	end_request(req, 1);
}
Пример #10
0
/*
 * osprd_process_request(d, req)
 *   Called when the user reads or writes a sector.
 *   Should perform the read or write, as appropriate.
 */
static void osprd_process_request(osprd_info_t *d, struct request *req)
{
    if (!blk_fs_request(req)) {
        end_request(req, 0);
        return;
    }

    // EXERCISE: Perform the read or write request by copying data between
    // our data array and the request's buffer.
    // Hint: The 'struct request' argument tells you what kind of request
    // this is, and which sectors are being read or written.
    // Read about 'struct request' in <linux/blkdev.h>.
    // Consider the 'req->sector', 'req->current_nr_sectors', and
    // 'req->buffer' members, and the rq_data_dir() function.

    // Your code here.

    long int sector_offset = req->sector * SECTOR_SIZE;
    long int request_size = req->current_nr_sectors * SECTOR_SIZE;

    if (rq_data_dir(req) == READ) {
        memcpy(req->buffer, d->data + sector_offset, request_size);
    } else {
        memcpy(d->data + sector_offset, req->buffer, request_size);
    }

    // eprintk("Should process request...\n");

    end_request(req, 1);
}
Пример #11
0
void blk_recalc_rq_sectors(struct request *rq, int nsect)
{
	if (blk_fs_request(rq) || blk_discard_rq(rq)) {
		rq->hard_sector += nsect;
		rq->hard_nr_sectors -= nsect;

		/*
		 * Move the I/O submission pointers ahead if required.
		 */
		if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
		    (rq->sector <= rq->hard_sector)) {
			rq->sector = rq->hard_sector;
			rq->nr_sectors = rq->hard_nr_sectors;
			rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
			rq->current_nr_sectors = rq->hard_cur_sectors;
			rq->buffer = bio_data(rq->bio);
		}

		/*
		 * if total number of sectors is less than the first segment
		 * size, something has gone terribly wrong
		 */
		if (rq->nr_sectors < rq->current_nr_sectors) {
			printk(KERN_ERR "blk: request botched\n");
			rq->nr_sectors = rq->current_nr_sectors;
		}
	}
}
Пример #12
0
/*
 * osprd_process_request(d, req)
 *	 Called when the user reads or writes a sector.
 *	 Should perform the read or write, as appropriate.
 */
static void osprd_process_request(osprd_info_t *d, struct request *req)
{
	void *data_offset;
	unsigned int data_length;

	if (!blk_fs_request(req)) {
		end_request(req, 0);
		return;
	}

	// EXERCISE: Perform the read or write request by copying data between
	// our data array and the request's buffer.
	// Hint: The 'struct request' argument tells you what kind of request
	// this is, and which sectors are being read or written.
	// Read about 'struct request' in <linux/blkdev.h>.
	// Consider the 'req->sector', 'req->current_nr_sectors', and
	// 'req->buffer' members, and the rq_data_dir() function.

	data_offset = d->data + (SECTOR_SIZE * req->sector);
	data_length = req->current_nr_sectors * SECTOR_SIZE;

	// TODO: include a test for out-of-range read/writes
	if (rq_data_dir(req) == WRITE)
		memcpy(data_offset, req->buffer, data_length);
	else if (rq_data_dir(req) == READ)
		memcpy(req->buffer, data_offset, data_length);
	else {
		eprintk("Unrecognized command.\n");
		end_request(req, 0);
	}

	end_request(req, 1);
}
Пример #13
0
/*
 * Verifies if a request should be dispatched or not.
 *
 * Returns:
 *  <0 in case of error.
 *  0  if request passes the checks
 */
static int sd_check_request(struct sd_host *host, struct request *req)
{
	unsigned long nr_sectors;

	if (!blk_fs_request(req))
		return -EIO;

	if (test_bit(__SD_MEDIA_CHANGED, &host->flags)) {
		sd_printk(KERN_ERR, "media changed, aborting\n");
		return -ENOMEDIUM;
	}

	/* unit is kernel sectors */
	nr_sectors =
	    host->card.csd.capacity << (host->card.csd.read_blkbits -
					KERNEL_SECTOR_SHIFT);

	/* keep our reads within limits */
	if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > nr_sectors) {
		sd_printk(KERN_ERR, "reading past end, aborting\n");
		return -EINVAL;
	}

	return 0;
}
Пример #14
0
static void request_exemple(request_queue_t * rqueue)
{
	unsigned long secteur_debut;
	unsigned long nb_secteurs;
	struct request * rq;

	while ((rq = elv_next_request(rqueue)) != NULL) {
		if (! blk_fs_request(rq)) {
			end_request(rq, 0);
			continue;
		}
		
		/*
		 * Les numeros de secteurs pour le transfert correspondent 
		 * a des secteurs de 512 octets... -> convertir.
		 */
		secteur_debut = rq->sector * 512 / lg_sect_exemple;
		nb_secteurs   = rq->current_nr_sectors * 512 / lg_sect_exemple;
		if (secteur_debut + nb_secteurs > nb_sect_exemple) {
			end_request(rq,1);
			continue;
		}
		if (rq_data_dir(rq)) /* write */
			memmove(& data_exemple[secteur_debut * lg_sect_exemple],
			        rq->buffer,
			        nb_secteurs * lg_sect_exemple);
		else /* read */
			memmove(rq->buffer,
			        & data_exemple[secteur_debut * lg_sect_exemple],
			        nb_secteurs * lg_sect_exemple);
		end_request(rq, 1);
	}
}
Пример #15
0
/*
 * Prepare a MMC request.  Essentially, this means passing the
 * preparation off to the media driver.  The media driver will
 * create a mmc_io_request in req->special.
 */
static int mmc_prep_request(struct request_queue *q, struct request *req)
{
    struct mmc_queue *mq = q->queuedata;
    int ret = BLKPREP_KILL;

    if (blk_special_request(req)) {
        /*
         * Special commands already have the command
         * blocks already setup in req->special.
         */
        BUG_ON(!req->special);

        ret = BLKPREP_OK;
    } else if (blk_fs_request(req) || blk_pc_request(req)) {
        /*
         * Block I/O requests need translating according
         * to the protocol.
         */
        ret = mq->prep_fn(mq, req);
    } else {
        /*
         * Everything else is invalid.
         */
        blk_dump_rq_flags(req, "MMC bad request");
    }

    if (ret == BLKPREP_OK)
        req->cmd_flags |= REQ_DONTPREP;

    return ret;
}
Пример #16
0
bool blk_do_ordered(struct request_queue *q, struct request **rqp)
{
	struct request *rq = *rqp;
	const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);

	if (!q->ordseq) {
		if (!is_barrier)
			return true;

		if (q->next_ordered != QUEUE_ORDERED_NONE)
			return start_ordered(q, rqp);
		else {
			/*
			 * Queue ordering not supported.  Terminate
			 * with prejudice.
			 */
			elv_dequeue_request(q, rq);
			if (__blk_end_request(rq, -EOPNOTSUPP,
					      blk_rq_bytes(rq)))
				BUG();
			*rqp = NULL;
			return false;
		}
	}

	/*
	 * Ordered sequence in progress
	 */

	/* Special requests are not subject to ordering rules. */
	if (!blk_fs_request(rq) &&
	    rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
		return true;

	if (q->ordered & QUEUE_ORDERED_BY_TAG) {
		/* Ordered by tag.  Blocking the next barrier is enough. */
		if (is_barrier && rq != &q->bar_rq)
			*rqp = NULL;
	} else {
		/* Ordered by draining.  Wait for turn. */
		WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
		if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
			*rqp = NULL;
	}

	return true;
}
Пример #17
0
/*
 * osprd_process_request(d, req)
 *   Called when the user reads or writes a sector.
 *   Should perform the read or write, as appropriate.
 */
static void osprd_process_request(osprd_info_t *d, struct request *req)
{
  size_t offset;
  size_t num_bytes;

	if (!blk_fs_request(req)) {
		end_request(req, 0);
		return;
	}

	// EXERCISE: Perform the read or write request by copying data between
	// our data array and the request's buffer.
	// Hint: The 'struct request' argument tells you what kind of request
	// this is, and which sectors are being read or written.
	// Read about 'struct request' in <linux/blkdev.h>.
	// Consider the 'req->sector', 'req->current_nr_sectors', and
	// 'req->buffer' members, and the rq_data_dir() function.

  if (req->sector < 0 || req->sector >= nsectors)
  {
    // sector_t is defined as an unsigned long in <linux/types.h>
    eprintk("Invalid sector requested: [%lu]. max sectors: [%i]\n", (unsigned long)req->sector, nsectors);
    end_request(req, 0);
  }

  offset = req->sector * SECTOR_SIZE;

  // If the number of requested sectors would reach the end of the disk
  // use as many sectors as possible until the end is reached
  if(req->sector + req->current_nr_sectors > nsectors)
  {
    num_bytes = (nsectors - req->sector) * SECTOR_SIZE;
    eprintk("Requested sector [%lu] with [%u] additional sectors.\n", (unsigned long)req->sector, req->current_nr_sectors);
    eprintk("Using [%u] additional sectors instead.\n", num_bytes / SECTOR_SIZE);
  }
  else
  {
    num_bytes = req->current_nr_sectors * SECTOR_SIZE;
  }

  // According to http://www.makelinux.net/ldd3/chp-16-sect-3
  // it is save to dereference req->buffer and write to it.

  // Note from @ipetkov: I'm not sure if req->buffer needs to
  // be resized at all, I'm assuming linux will allocate the
  // memory before the request is sent. No issues are apparent
  // from the first 8 default test cases.
  spin_lock(&d->mutex);

  if(rq_data_dir(req) == READ)
    memcpy(req->buffer, d->data + offset, num_bytes);
  else // WRITE
    memcpy(d->data + offset, req->buffer, num_bytes);

  spin_unlock(&d->mutex);

	end_request(req, 1);
}
Пример #18
0
static int bml_transfer(u32 volume, u32 partno, const struct request *req)
#endif
{
	unsigned long sector, nsect;
	char *buf;
	FSRVolSpec *vs;
	FSRPartI *ps;
	u32 nPgsPerUnit = 0, n1stVpn = 0, spp_shift, spp_mask;
	int ret;
	
	DEBUG(DL3,"TINY[I]: volume(%d), partno(%d)\n", volume, partno);

	if (!blk_fs_request(req))
	{
		return 0;
	}

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
	sector = blk_rq_pos(req);
	nsect = data_len >> 9;
#else
	sector = req->sector;
	nsect = req->current_nr_sectors;
#endif
	buf = req->buffer;
	
	vs = fsr_get_vol_spec(volume);
	ps = fsr_get_part_spec(volume);
	spp_shift = ffs(vs->nSctsPerPg) - 1;
	spp_mask = vs->nSctsPerPg - 1;
	
	if(!fsr_is_whole_dev(partno))
	{
		if (FSR_BML_GetVirUnitInfo(volume, 
			fsr_part_start(ps, partno), &n1stVpn, &nPgsPerUnit) 
				!= FSR_BML_SUCCESS)
		{
			ERRPRINTK("FSR_BML_GetVirUnitInfo FAIL\n");
			return -EIO;
		}
	}

	switch (rq_data_dir(req)) 
	{
		case READ:
			/*
			 * If sector and nsect are aligned with vs->nSctsPerPg,
			 * you have to use a FSR_BML_Read() function using page unit,
			 * If not, use a FSR_BML_ReadScts() function using sector unit.
			 */
			if ((!(sector & spp_mask) && !(nsect & spp_mask))) 
			{
				ret = FSR_BML_Read(volume, n1stVpn + (sector >> spp_shift),
						nsect >> spp_shift, buf, NULL, FSR_BML_FLAG_ECC_ON);
			} 
			else 
			{
Пример #19
0
static int stheno_request_thread( void *arg )
{
    struct request *req;
    int ret;

    while( 1 ){
        ret = wait_event_interruptible( stheno_wait_q, (kthread_should_stop() || stheno_wakeup == 1) );
        if( ret != 0 ) break;

        stheno_wakeup = 0;

        if( kthread_should_stop() ) break;

        while( 1 ){
            spin_lock_irq( stheno_queue->queue_lock );
            req = blk_fetch_request( stheno_queue );
            spin_unlock_irq( stheno_queue->queue_lock );
        next_segment:
            if( req == NULL ) break;

            if( !blk_fs_request( req ) ){
                /*blk_end_request_cur( req, -EIO );*/
                spin_lock_irq( stheno_queue->queue_lock );
                ret = __blk_end_request_cur( req, -EIO );
                spin_unlock_irq( stheno_queue->queue_lock );
                if( ret == true ) goto next_segment;
                continue;
            }
            if( stheno_read_sector0() != 0 ){
                spin_lock_irq( stheno_queue->queue_lock );
                ret = __blk_end_request_cur( req, -EIO );
                spin_unlock_irq( stheno_queue->queue_lock );
                if( ret == true ) goto next_segment;
                continue;
            }
            if( blk_rq_sectors( req ) == 0 || blk_rq_cur_sectors( req ) == 0 ){
                spin_lock_irq( stheno_queue->queue_lock );
                ret = __blk_end_request_cur( req, -EIO );
                spin_unlock_irq( stheno_queue->queue_lock );
                if( ret == true ) goto next_segment;
                continue;
            }
            if( rq_data_dir( req ) == 0 ){
                ret = euryale_read_process( stheno_lbaoffset + blk_rq_pos( req ), blk_rq_cur_sectors( req ), req->buffer );
            }else{
                ret = euryale_write_process( stheno_lbaoffset + blk_rq_pos( req ), blk_rq_cur_sectors( req ), req->buffer );
            }
            /*blk_end_request_cur( req, ret == 0 ? 0 : -EIO );*/
            spin_lock_irq( stheno_queue->queue_lock );
            ret = __blk_end_request_cur( req, ret == 0 ? 0 : -EIO );
            spin_unlock_irq( stheno_queue->queue_lock );
            if( ret == true ) goto next_segment;
        }
    }
    print_debug("stheno_request_thread was terminated.\n");
    return 0;
}
Пример #20
0
static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted,
						  struct scsi_cmnd *cmd,
						  int write_flag)
{
	if(blk_fs_request(cmd->request))
 		return wanted;
	else
		return 0;
}
Пример #21
0
/*
 * Common request path.  Rather than registering a custom make_request()
 * function we use the generic Linux version.  This is done because it allows
 * us to easily merge read requests which would otherwise we performed
 * synchronously by the DMU.  This is less critical in write case where the
 * DMU will perform the correct merging within a transaction group.  Using
 * the generic make_request() also let's use leverage the fact that the
 * elevator with ensure correct ordering in regards to barrior IOs.  On
 * the downside it means that in the write case we end up doing request
 * merging twice once in the elevator and once in the DMU.
 *
 * The request handler is called under a spin lock so all the real work
 * is handed off to be done in the context of the zvol taskq.  This function
 * simply performs basic request sanity checking and hands off the request.
 */
static void
zvol_request(struct request_queue *q)
{
	zvol_state_t *zv = q->queuedata;
	struct request *req;
	unsigned int size;

	while ((req = blk_fetch_request(q)) != NULL) {
		size = blk_rq_bytes(req);

		if (size != 0 && blk_rq_pos(req) + blk_rq_sectors(req) >
		    get_capacity(zv->zv_disk)) {
			printk(KERN_INFO
			       "%s: bad access: block=%llu, count=%lu\n",
			       req->rq_disk->disk_name,
			       (long long unsigned)blk_rq_pos(req),
			       (long unsigned)blk_rq_sectors(req));
			__blk_end_request(req, -EIO, size);
			continue;
		}

		if (!blk_fs_request(req)) {
			printk(KERN_INFO "%s: non-fs cmd\n",
			       req->rq_disk->disk_name);
			__blk_end_request(req, -EIO, size);
			continue;
		}

		switch (rq_data_dir(req)) {
		case READ:
			zvol_dispatch(zvol_read, req);
			break;
		case WRITE:
			if (unlikely(get_disk_ro(zv->zv_disk)) ||
			    unlikely(zv->zv_flags & ZVOL_RDONLY)) {
				__blk_end_request(req, -EROFS, size);
				break;
			}

#ifdef HAVE_BLK_QUEUE_DISCARD
			if (req->cmd_flags & VDEV_REQ_DISCARD) {
				zvol_dispatch(zvol_discard, req);
				break;
			}
#endif /* HAVE_BLK_QUEUE_DISCARD */

			zvol_dispatch(zvol_write, req);
			break;
		default:
			printk(KERN_INFO "%s: unknown cmd: %d\n",
			       req->rq_disk->disk_name, (int)rq_data_dir(req));
			__blk_end_request(req, -EIO, size);
			break;
		}
	}
}
Пример #22
0
/* Get the next read/write request; ending requests that we don't handle */
struct request *ace_get_next_request(struct request_queue * q)
{
	struct request *req;

	while ((req = elv_next_request(q)) != NULL) {
		if (blk_fs_request(req))
			break;
		end_request(req, 0);
	}
	return req;
}
Пример #23
0
/*
 * osprd_process_request(d, req)
 *   Called when the user reads or writes a sector.
 *   Should perform the read or write, as appropriate.
 */
static void osprd_process_request(osprd_info_t *d, struct request *req)
{
    //BEGIN TUAN
    unsigned int requestType;
    uint8_t* dataPtr;
    //END TUAN


    /*
    TUAN: a nonzero return value from blk_fs_request() macro says this is
    a normal filesystem request. Other types of requests (i.e. packet-mode
    or device-specific diagnostic operations) are not something that sbd
    supports, so it simply fails any such request.
    <linux/blkdev.h>
    */
    if (!blk_fs_request(req)) {
        end_request(req, 0);
        return;
    }

    // EXERCISE: Perform the read or write request by copying data between
    // our data array and the request's buffer.
    // Hint: The 'struct request' argument tells you what kind of request
    // this is, and which sectors are being read or written.
    // Read about 'struct request' in <linux/blkdev.h>.
    // Consider the 'req->sector', 'req->current_nr_sectors', and
    // 'req->buffer' members, and the rq_data_dir() function.

    //BEGIN TUAN
    /*
    We first need to determine if this is a read or write request
    The macro rq_data_dir(rq) will tell us whether this is a read or write
    request
    */
    requestType = rq_data_dir(req);

    //get pointer to data on disk requested by the user
    //TUAN: req->sector => next sector to read from or write to
    //      (req->sector)*SECTOR_SIZE => This computes the offset
    dataPtr = d->data + (req->sector)*SECTOR_SIZE;

    if (requestType == READ) {
        //copy contents of data buffer into request's buffer
        memcpy ((void*)req->buffer, (void*) dataPtr, req->current_nr_sectors * SECTOR_SIZE);
    }

    else if (requestType == WRITE) {
        //copy contents of request buffer into data buffer
        memcpy((void*) dataPtr, (void*)req->buffer, req->current_nr_sectors * SECTOR_SIZE);
    }
    //END TUAN

    end_request(req, 1);
}
Пример #24
0
/* Get the next read/write request; ending requests that we don't handle */
struct request *ace_get_next_request(struct request_queue * q)
{
	struct request *req;

	while ((req = blk_peek_request(q)) != NULL) {
		if (blk_fs_request(req))
			break;
		blk_start_request(req);
		__blk_end_request_all(req, -EIO);
	}
	return req;
}
Пример #25
0
/*
 * osprd_process_request(d, req)
 *   Called when the user reads or writes a sector.
 *   Should perform the read or write, as appropriate.
 */
static void osprd_process_request(osprd_info_t *d, struct request *req)
{
        // Declare variables at the beginning
        // Calculate where to starta and the amount of data needed to copy
        int data_size = req->current_nr_sectors * SECTOR_SIZE;
        int data_offset = req->sector * SECTOR_SIZE;

        // Check for a bad request
        if (!blk_fs_request(req)) {
                end_request(req, 0);
                return;
        }

        // EXERCISE: Perform the read or write request by copying data between
        // our data array and the request's buffer.
        // Hint: The 'struct request' argument tells you what kind of request
        // this is, and which sectors are being read or written.
        // Read about 'struct request' in <linux/blkdev.h>.
        // Consider the 'req->sector', 'req->current_nr_sectors', and
        // 'req->buffer' members, and the rq_data_dir() function.

        // Your code here:
        
        // Check to see if we are trying to write to nonexistant sectors
        if(req->sector + req->current_nr_sectors > nsectors)
        {
                eprintk("Trying to write to nonexistant sectors\n");
                end_request(req, 0);
        }
        
        // Read from the RAMDISK
        // Copy the data in the requested sectors into the buffer
        if(rq_data_dir(req) == READ)
        {
                memcpy(req->buffer, d->data + data_offset, data_size);
        }
        
        // Write to the RAMDISK
        // Copy the data in the buffer into the requested sectors
        else if(rq_data_dir(req) == WRITE)
        {
                memcpy(d->data + data_offset, req->buffer, data_size);
        }
        
        // Trying to perform an invalid action
        else
        {
                eprintk("Neither a read nor a write\n");
                end_request(req,0);
        }
        
        end_request(req, 1);
}
Пример #26
0
static void cf_request(struct request_queue *q)
{
	struct cf_device *cf;
	struct request *req;
	unsigned block, count;
	int rw, err;

	DPRINTK(DEBUG_CF_REQUEST, "%s: q %p", __FUNCTION__, q);

	req = blk_fetch_request(q);
	while (req) {
		err = -EIO;
		DPRINTK(DEBUG_CF_REQUEST, "%s:%d req %p", __FUNCTION__, __LINE__, req);

		if (!blk_fs_request(req))
			goto done;

		

		block = blk_rq_pos(req);
		count = blk_rq_cur_sectors(req);
		rw = rq_data_dir(req);
		cf = req->rq_disk->private_data;

		DPRINTK(DEBUG_CF_REQUEST, "req %p block %d count %d rw %c\n", req, block, count, (rw == READ)?'R':'W');

		if (block+count > get_capacity(req->rq_disk)) {
			printk("%s: %u+%u is larger than %llu\n", __FUNCTION__, block, count, get_capacity(req->rq_disk));
			goto done;
		}

		/* Grab the R/W semaphore to prevent more than
		 * one request from trying to R/W at the same time */
		err = down_interruptible(&cf->rw_sem);
		if (err)
			break;

		if (rw == READ)
			err = cf_read_sectors(cf, req->buffer, block, count);
		else
			err = cf_write_sectors(cf, req->buffer, block, count);
		up(&cf->rw_sem);

	done:
		DPRINTK(DEBUG_CF_REQUEST, "%s: blk_end_request_cur(%p, %d)\n", __FUNCTION__, req, err);
		if (!__blk_end_request_cur(req, err))
			req = blk_fetch_request(q);
	}
	DPRINTK(DEBUG_CF_REQUEST, "end\n");
	cf_in_request--;
}
Пример #27
0
/*
 * osprd_process_request(d, req)
 *   Called when the user reads or writes a sector.
 *   Should perform the read or write, as appropriate.
 */
static void osprd_process_request(osprd_info_t *d, struct request *req)
{
	if (!blk_fs_request(req)) {
		end_request(req, 0);
		return;
	}

	// EXERCISE: Perform the read or write request by copying data between
	// our data array and the request's buffer.
	// Hint: The 'struct request' argument tells you what kind of request
	// this is, and which sectors are being read or written.
	// Read about 'struct request' in <linux/blkdev.h>.
	// Consider the 'req->sector', 'req->current_nr_sectors', and
	// 'req->buffer' members, and the rq_data_dir() function.
    //
    // req->sector == Target location
    // req->current_nr_sectors == Number of sectors in first segment of request
    // req->buffer == Map of first segment
    // rq_data_dir() == macro to get data direction(READ or WRITE)

	// Your code here.
    int sector = (int)(req->sector);
    uint8_t *ptr = d->data + (sector * SECTOR_SIZE);
    int size = (int)(req->current_nr_sectors * SECTOR_SIZE);
    // DEBUG: just prints that request was received

    //check that the ptr didn't go "off the end"
    if (ptr + size > d->data + SECTOR_SIZE*nsectors)
    {
        printk(KERN_WARNING "request past the end of the device!\n");
        end_request(req, 0);
    }

    //just copy the memory from the osprd_info struct to the request (it's a read)
    switch (rq_data_dir(req))
    {
        case READ:
            osp_spin_lock(&d->mutex);
            memcpy(req->buffer, ptr, size);
            osp_spin_unlock(&d->mutex);
            break;
        case WRITE:
            osp_spin_lock(&d->mutex);
            memcpy(ptr, req->buffer, size);
            osp_spin_unlock(&d->mutex);
            break;
        default: //error
            break;
    }
	end_request(req, 1);
}
Пример #28
0
/*
 * Prepare a MMC request. This just filters out odd stuff.
 */
static int mmc_prep_request(struct request_queue *q, struct request *req)
{
	/*
	 * We only like normal block requests.
	 */
	if (!blk_fs_request(req)) {
		blk_dump_rq_flags(req, "MMC bad request");
		return BLKPREP_KILL;
	}

	req->cmd_flags |= REQ_DONTPREP;

	return BLKPREP_OK;
}
Пример #29
0
/*
 * osprd_process_request(d, req)
 *   Called when the user reads or writes a sector.
 *   Should perform the read or write, as appropriate.
 */
static void osprd_process_request(osprd_info_t *d, struct request *req)
{
  int i;
  char* data_loc;
  unsigned long rwsize;
  data_loc = d->data + req->sector*SECTOR_SIZE;
  rwsize = req->current_nr_sectors * SECTOR_SIZE;

  if (!blk_fs_request(req)) {
    end_request(req, 0);
    return;
  }
  
  // EXERCISE: Perform the read or write request by copying data between
  // our data array and the request's buffer.
  // Hint: The 'struct request' argument tells you what kind of request
  // this is, and which sectors are being read or written.
  // Read about 'struct request' in <linux/blkdev.h>.
  // Consider the 'req->sector', 'req->current_nr_sectors', and
  // 'req->buffer' members, and the rq_data_dir() function.
  
  // Your code here.
  
  
  
  if(!req->buffer || !data_loc)
    {
      end_request(req, 0);
      return;
    }
  
  if(rq_data_dir(req) == READ)
    {
      for(i=0; i < rwsize; i++)
	{
	  req->buffer[i] = data_loc[i];
	}
    }
  else if(rq_data_dir(req) == WRITE)
    {
      for(i=0; i < rwsize; i++)
	{
	  data_loc[i] = req->buffer[i];
	}
    }
  else
    end_request(req,0);
  end_request(req, 1);
}
Пример #30
0
/*
 * The simple form of the request function.
 */
static void ubiblk_request(request_queue_t *q)
{
	struct request *req;

	while ((req = elv_next_request(q)) != NULL) {
		struct ubiblk_dev *dev = req->rq_disk->private_data;
		if(! blk_fs_request(req)) {
			printk (KERN_NOTICE "Skip non-fs request\n");
			end_request(req, 0);
			continue;
		}
		ubiblk_transfer(dev, req->sector, req->current_nr_sectors,
				req->buffer, rq_data_dir(req));
		end_request(req, 1);
	}
}