Exemple #1
0
/*
 * Common read path running under the zvol taskq context.  This function
 * is responsible for copying the requested data out of the DMU and in to
 * a linux request structure.  It then must signal the request queue with
 * an error code describing the result of the copy.
 */
static void
zvol_read(void *arg)
{
	struct request *req = (struct request *)arg;
	struct request_queue *q = req->q;
	zvol_state_t *zv = q->queuedata;
	uint64_t offset = blk_rq_pos(req) << 9;
	uint64_t size = blk_rq_bytes(req);
	int error;
	rl_t *rl;

	if (size == 0) {
		blk_end_request(req, 0, size);
		return;
	}

	rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);

	error = dmu_read_req(zv->zv_objset, ZVOL_OBJ, req);

	zfs_range_unlock(rl);

	/* convert checksum errors into IO errors */
	if (error == ECKSUM)
		error = EIO;

	blk_end_request(req, -error, size);
}
Exemple #2
0
static void
zvol_discard(void *arg)
{
    struct request *req = (struct request *)arg;
    struct request_queue *q = req->q;
    zvol_state_t *zv = q->queuedata;
    uint64_t offset = blk_rq_pos(req) << 9;
    uint64_t size = blk_rq_bytes(req);
    int error;
    rl_t *rl;

    if (offset + size > zv->zv_volsize) {
        blk_end_request(req, -EIO, size);
        return;
    }

    if (size == 0) {
        blk_end_request(req, 0, size);
        return;
    }

    rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);

    error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, size);

    /*
     * TODO: maybe we should add the operation to the log.
     */

    zfs_range_unlock(rl);

    blk_end_request(req, -error, size);
}
Exemple #3
0
static void
zvol_discard(void *arg)
{
	struct request *req = (struct request *)arg;
	struct request_queue *q = req->q;
	zvol_state_t *zv = q->queuedata;
	uint64_t start = blk_rq_pos(req) << 9;
	uint64_t end = start + blk_rq_bytes(req);
	int error;
	rl_t *rl;

	/*
	 * Annotate this call path with a flag that indicates that it is
	 * unsafe to use KM_SLEEP during memory allocations due to the
	 * potential for a deadlock.  KM_PUSHPAGE should be used instead.
	 */
	ASSERT(!(current->flags & PF_NOFS));
	current->flags |= PF_NOFS;

	if (end > zv->zv_volsize) {
		blk_end_request(req, -EIO, blk_rq_bytes(req));
		goto out;
	}

	/*
	 * Align the request to volume block boundaries. If we don't,
	 * then this will force dnode_free_range() to zero out the
	 * unaligned parts, which is slow (read-modify-write) and
	 * useless since we are not freeing any space by doing so.
	 */
	start = P2ROUNDUP(start, zv->zv_volblocksize);
	end = P2ALIGN(end, zv->zv_volblocksize);

	if (start >= end) {
		blk_end_request(req, 0, blk_rq_bytes(req));
		goto out;
	}

	rl = zfs_range_lock(&zv->zv_znode, start, end - start, RL_WRITER);

	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, start, end - start);

	/*
	 * TODO: maybe we should add the operation to the log.
	 */

	zfs_range_unlock(rl);

	blk_end_request(req, -error, blk_rq_bytes(req));
out:
	current->flags &= ~PF_NOFS;
}
Exemple #4
0
/*
 * Common write path running under the zvol taskq context.  This function
 * is responsible for copying the request structure data in to the DMU and
 * signaling the request queue with the result of the copy.
 */
static void
zvol_write(void *arg)
{
    struct request *req = (struct request *)arg;
    struct request_queue *q = req->q;
    zvol_state_t *zv = q->queuedata;
    uint64_t offset = blk_rq_pos(req) << 9;
    uint64_t size = blk_rq_bytes(req);
    int error = 0;
    dmu_tx_t *tx;
    rl_t *rl;

    if (req->cmd_flags & VDEV_REQ_FLUSH)
        zil_commit(zv->zv_zilog, ZVOL_OBJ);

    /*
     * Some requests are just for flush and nothing else.
     */
    if (size == 0) {
        blk_end_request(req, 0, size);
        return;
    }

    rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);

    tx = dmu_tx_create(zv->zv_objset);
    dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size);

    /* This will only fail for ENOSPC */
    error = dmu_tx_assign(tx, TXG_WAIT);
    if (error) {
        dmu_tx_abort(tx);
        zfs_range_unlock(rl);
        blk_end_request(req, -error, size);
        return;
    }

    error = dmu_write_req(zv->zv_objset, ZVOL_OBJ, req, tx);
    if (error == 0)
        zvol_log_write(zv, tx, offset, size,
                       req->cmd_flags & VDEV_REQ_FUA);

    dmu_tx_commit(tx);
    zfs_range_unlock(rl);

    if ((req->cmd_flags & VDEV_REQ_FUA) ||
            zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
        zil_commit(zv->zv_zilog, ZVOL_OBJ);

    blk_end_request(req, -error, size);
}
Exemple #5
0
static ssize_t
omap_mbox_read(struct device *dev, struct device_attribute *attr, char *buf)
{
    unsigned long flags;
    struct request *rq;
    mbox_msg_t *p = (mbox_msg_t *) buf;
    struct omap_mbox *mbox = dev_get_drvdata(dev);
    struct request_queue *q = mbox->rxq->queue;

    while (1) {
        spin_lock_irqsave(q->queue_lock, flags);
        rq = elv_next_request(q);
        spin_unlock_irqrestore(q->queue_lock, flags);

        if (!rq)
            break;

        *p = (mbox_msg_t) rq->data;

        if (blk_end_request(rq, 0, 0))
            BUG();

        if (unlikely(mbox_seq_test(mbox, *p))) {
            pr_info("mbox: Illegal seq bit!(%08x) ignored\n", *p);
            continue;
        }
        p++;
    }

    pr_debug("%02x %02x %02x %02x\n", buf[0], buf[1], buf[2], buf[3]);

    return (size_t) ((char *)p - buf);
}
Exemple #6
0
/*
 * Message receiver(workqueue)
 */
static void mbox_rx_work(struct work_struct *work)
{
    struct omap_mbox_queue *mq =
            container_of(work, struct omap_mbox_queue, work);
    struct omap_mbox *mbox = mq->queue->queuedata;
    struct request_queue *q = mbox->rxq->queue;
    struct request *rq;
    mbox_msg_t msg;
    unsigned long flags;

    if (mbox->rxq->callback == NULL) {
        sysfs_notify(&mbox->dev.kobj, NULL, "mbox");
        return;
    }

    while (1) {
        spin_lock_irqsave(q->queue_lock, flags);
        rq = elv_next_request(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
        if (!rq)
            break;

        msg = (mbox_msg_t) rq->data;

        if (blk_end_request(rq, 0, 0))
            BUG();

        mbox->rxq->callback((void *)msg);
    }
}
Exemple #7
0
Fichier : zvol.c Projet : avg-I/zfs
/*
 * Common read path running under the zvol taskq context.  This function
 * is responsible for copying the requested data out of the DMU and in to
 * a linux request structure.  It then must signal the request queue with
 * an error code describing the result of the copy.
 */
static void
zvol_read(void *arg)
{
    struct request *req = (struct request *)arg;
    struct request_queue *q = req->q;
    zvol_state_t *zv = q->queuedata;
    fstrans_cookie_t cookie = spl_fstrans_mark();
    uint64_t offset = blk_rq_pos(req) << 9;
    uint64_t size = blk_rq_bytes(req);
    int error;
    rl_t *rl;

    if (size == 0) {
        error = 0;
        goto out;
    }

    rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);

    error = dmu_read_req(zv->zv_objset, ZVOL_OBJ, req);

    zfs_range_unlock(rl);

    /* convert checksum errors into IO errors */
    if (error == ECKSUM)
        error = SET_ERROR(EIO);

out:
    blk_end_request(req, -error, size);
    spl_fstrans_unmark(cookie);
}
Exemple #8
0
/**
 *	ide_complete_pm_rq - end the current Power Management request
 *	@drive: target drive
 *	@rq: request
 *
 *	This function cleans up the current PM request and stops the queue
 *	if necessary.
 */
void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
{
	struct request_queue *q = drive->queue;
	struct request_pm_state *pm = rq->special;
	unsigned long flags;

	ide_complete_power_step(drive, rq);
	if (pm->pm_step != IDE_PM_COMPLETED)
		return;

#ifdef DEBUG_PM
#ifdef CONFIG_DEBUG_PRINTK
	printk("%s: completing PM request, %s\n", drive->name,
	       (rq->cmd_type == REQ_TYPE_PM_SUSPEND) ? "suspend" : "resume");
#else
	;
#endif
#endif
	spin_lock_irqsave(q->queue_lock, flags);
	if (rq->cmd_type == REQ_TYPE_PM_SUSPEND)
		blk_stop_queue(q);
	else
		drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
	spin_unlock_irqrestore(q->queue_lock, flags);

	drive->hwif->rq = NULL;

	if (blk_end_request(rq, 0, 0))
		BUG();
}
Exemple #9
0
Fichier : zvol.c Projet : torn5/zfs
static void
zvol_discard(void *arg)
{
	struct request *req = (struct request *)arg;
	struct request_queue *q = req->q;
	zvol_state_t *zv = q->queuedata;
	uint64_t offset = blk_rq_pos(req) << 9;
	uint64_t size = blk_rq_bytes(req);
	int error;
	rl_t *rl;

	/*
	 * Annotate this call path with a flag that indicates that it is
	 * unsafe to use KM_SLEEP during memory allocations due to the
	 * potential for a deadlock.  KM_PUSHPAGE should be used instead.
	 */
	ASSERT(!(current->flags & PF_NOFS));
	current->flags |= PF_NOFS;

	if (offset + size > zv->zv_volsize) {
		blk_end_request(req, -EIO, size);
		goto out;
	}

	if (size == 0) {
		blk_end_request(req, 0, size);
		goto out;
	}

	rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);

	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, size);

	/*
	 * TODO: maybe we should add the operation to the log.
	 */

	zfs_range_unlock(rl);

	blk_end_request(req, -error, size);
out:
	current->flags &= ~PF_NOFS;
}
Exemple #10
0
/*
 * The simple form of the request function.
 */
static void sbull_request(struct request_queue *q)
{
	struct request *req;

	while ((req = elv_next_request(q)) != NULL) {
		struct sbull_dev *dev = req->rq_disk->private_data;
		if (! blk_fs_request(req)) {
			printk (KERN_NOTICE "Skip non-fs request\n");
			blk_end_request(req, -EIO, req->current_nr_sectors << 9);
			continue;
		}
    //    	printk (KERN_NOTICE "Req dev %d dir %ld sec %ld, nr %d f %lx\n",
    //    			dev - Devices, rq_data_dir(req),
    //    			req->sector, req->current_nr_sectors,
    //    			req->flags);
		sbull_transfer(dev, req->sector, req->current_nr_sectors,
				req->buffer, rq_data_dir(req));
		blk_end_request(req, 1, req->current_nr_sectors << 9);
	}
}
Exemple #11
0
/*
 * Common write path running under the zvol taskq context.  This function
 * is responsible for copying the request structure data in to the DMU and
 * signaling the request queue with the result of the copy.
 */
static void
zvol_write(void *arg)
{
	struct request *req = (struct request *)arg;
	struct request_queue *q = req->q;
	zvol_state_t *zv = q->queuedata;
	uint64_t offset = blk_rq_pos(req) << 9;
	uint64_t size = blk_rq_bytes(req);
	int error = 0;
	dmu_tx_t *tx;
	rl_t *rl;

	rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);

	tx = dmu_tx_create(zv->zv_objset);
	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size);

	/* This will only fail for ENOSPC */
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		dmu_tx_abort(tx);
		zfs_range_unlock(rl);
		blk_end_request(req, -error, size);
		return;
	}

	error = dmu_write_req(zv->zv_objset, ZVOL_OBJ, req, tx);
	if (error == 0)
		zvol_log_write(zv, tx, offset, size, rq_is_sync(req));

	dmu_tx_commit(tx);
	zfs_range_unlock(rl);

	if (rq_is_sync(req))
		zil_commit(zv->zv_zilog, ZVOL_OBJ);

	blk_end_request(req, -error, size);
}
Exemple #12
0
int ide_end_rq(ide_drive_t *drive, struct request *rq, int error,
	       unsigned int nr_bytes)
{
	/*
	 * decide whether to reenable DMA -- 3 is a random magic for now,
	 * if we DMA timeout more than 3 times, just stay in PIO
	 */
	if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) {
		drive->state = 0;
		HWGROUP(drive)->hwif->ide_dma_on(drive);
	}

	return blk_end_request(rq, error, nr_bytes);
}
Exemple #13
0
int ide_end_rq(ide_drive_t *drive, struct request *rq, int error,
	       unsigned int nr_bytes)
{
	/*
	 * decide whether to reenable DMA -- 3 is a random magic for now,
	 * if we DMA timeout more than 3 times, just stay in PIO
	 */
	if ((drive->dev_flags & IDE_DFLAG_DMA_PIO_RETRY) &&
	    drive->retry_pio <= 3) {
		drive->dev_flags &= ~IDE_DFLAG_DMA_PIO_RETRY;
		ide_dma_on(drive);
	}

	return blk_end_request(rq, error, nr_bytes);
}
Exemple #14
0
Fichier : zvol.c Projet : avg-I/zfs
static void
zvol_discard(void *arg)
{
    struct request *req = (struct request *)arg;
    struct request_queue *q = req->q;
    zvol_state_t *zv = q->queuedata;
    fstrans_cookie_t cookie = spl_fstrans_mark();
    uint64_t start = blk_rq_pos(req) << 9;
    uint64_t end = start + blk_rq_bytes(req);
    int error;
    rl_t *rl;

    if (end > zv->zv_volsize) {
        error = EIO;
        goto out;
    }

    /*
     * Align the request to volume block boundaries. If we don't,
     * then this will force dnode_free_range() to zero out the
     * unaligned parts, which is slow (read-modify-write) and
     * useless since we are not freeing any space by doing so.
     */
    start = P2ROUNDUP(start, zv->zv_volblocksize);
    end = P2ALIGN(end, zv->zv_volblocksize);

    if (start >= end) {
        error = 0;
        goto out;
    }

    rl = zfs_range_lock(&zv->zv_znode, start, end - start, RL_WRITER);

    error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, start, end-start);

    /*
     * TODO: maybe we should add the operation to the log.
     */

    zfs_range_unlock(rl);
out:
    blk_end_request(req, -error, blk_rq_bytes(req));
    spl_fstrans_unmark(cookie);
}
Exemple #15
0
/**
 *	ide_complete_pm_request - end the current Power Management request
 *	@drive: target drive
 *	@rq: request
 *
 *	This function cleans up the current PM request and stops the queue
 *	if necessary.
 */
static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
{
	unsigned long flags;

#ifdef DEBUG_PM
	printk("%s: completing PM request, %s\n", drive->name,
	       blk_pm_suspend_request(rq) ? "suspend" : "resume");
#endif
	spin_lock_irqsave(&ide_lock, flags);
	if (blk_pm_suspend_request(rq)) {
		blk_stop_queue(drive->queue);
	} else {
		drive->blocked = 0;
		blk_start_queue(drive->queue);
	}
	spin_unlock_irqrestore(&ide_lock, flags);

	HWGROUP(drive)->rq = NULL;
	if (blk_end_request(rq, 0, 0))
		BUG();
}
/**
 *	ide_complete_pm_request - end the current Power Management request
 *	@drive: target drive
 *	@rq: request
 *
 *	This function cleans up the current PM request and stops the queue
 *	if necessary.
 */
void ide_complete_pm_request(ide_drive_t *drive, struct request *rq)
{
	struct request_queue *q = drive->queue;
	unsigned long flags;

#ifdef DEBUG_PM
	printk("%s: completing PM request, %s\n", drive->name,
	       blk_pm_suspend_request(rq) ? "suspend" : "resume");
#endif
	spin_lock_irqsave(q->queue_lock, flags);
	if (blk_pm_suspend_request(rq))
		blk_stop_queue(q);
	else
		drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
	spin_unlock_irqrestore(q->queue_lock, flags);

	drive->hwif->rq = NULL;

	if (blk_end_request(rq, 0, 0))
		BUG();
}
Exemple #17
0
static int __ide_end_request(ide_drive_t *drive, struct request *rq,
			     int uptodate, unsigned int nr_bytes, int dequeue)
{
	int ret = 1;
	int error = 0;

	if (uptodate <= 0)
		error = uptodate ? uptodate : -EIO;

	/*
	 * if failfast is set on a request, override number of sectors and
	 * complete the whole request right now
	 */
	if (blk_noretry_request(rq) && error)
		nr_bytes = rq->hard_nr_sectors << 9;

	if (!blk_fs_request(rq) && error && !rq->errors)
		rq->errors = -EIO;

	/*
	 * decide whether to reenable DMA -- 3 is a random magic for now,
	 * if we DMA timeout more than 3 times, just stay in PIO
	 */
	if ((drive->dev_flags & IDE_DFLAG_DMA_PIO_RETRY) &&
	    drive->retry_pio <= 3) {
		drive->dev_flags &= ~IDE_DFLAG_DMA_PIO_RETRY;
		ide_dma_on(drive);
	}

	if (!blk_end_request(rq, error, nr_bytes))
		ret = 0;

	if (ret == 0 && dequeue)
		drive->hwif->rq = NULL;

	return ret;
}
Exemple #18
0
void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
{
	ide_hwif_t *hwif = drive->hwif;
	struct request *rq = hwif->rq;

	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
		ide_task_t *task = (ide_task_t *)rq->special;

		if (task) {
			struct ide_taskfile *tf = &task->tf;

			tf->error = err;
			tf->status = stat;

			drive->hwif->tp_ops->tf_read(drive, task);

			if (task->tf_flags & IDE_TFLAG_DYN)
				kfree(task);
		}
	} else if (blk_pm_request(rq)) {
		struct request_pm_state *pm = rq->data;

		ide_complete_power_step(drive, rq);
		if (pm->pm_step == IDE_PM_COMPLETED)
			ide_complete_pm_request(drive, rq);
		return;
	}

	hwif->rq = NULL;

	rq->errors = err;

	if (unlikely(blk_end_request(rq, (rq->errors ? -EIO : 0),
				     blk_rq_bytes(rq))))
		BUG();
}
Exemple #19
0
static void lkl_disk_request(struct request_queue *q)
{
	struct request *req;

	while ((req = elv_next_request(q)) != NULL) {
		struct lkl_disk_dev *dev = req->rq_disk->private_data;
		struct lkl_disk_cs cs;

		if (! blk_fs_request(req)) {
			printk (KERN_NOTICE "lkl_disk_request: skip non-fs request\n");
			__blk_end_request(req, -EIO, req->hard_cur_sectors << 9);
			continue;
		}

		cs.linux_cookie=req;
		lkl_disk_do_rw(dev->data, req->sector, req->current_nr_sectors,
			       req->buffer, rq_data_dir(req), &cs);
		/*
		 * Async is broken.
		 */
		BUG_ON (cs.sync == 0);
		blk_end_request(req, cs.error ? -EIO : 0, blk_rq_bytes(req));
	}
}
/* issue astoria blkdev request (issue_fn) */
static int cyasblkdev_blk_issue_rq(
					struct cyasblkdev_queue *bq,
					struct request *req
					)
{
	struct cyasblkdev_blk_data *bd = bq->data;
	int index = 0;
	int ret = CY_AS_ERROR_SUCCESS;
	uint32_t req_sector = 0;
	uint32_t req_nr_sectors = 0;
	int bus_num = 0;
	int lcl_unit_no = 0;

	DBGPRN_FUNC_NAME;

	/*
	 * will construct a scatterlist for the given request;
	 * the return value is the number of actually used
	 * entries in the resulting list. Then, this scatterlist
	 * can be used for the actual DMA prep operation.
	 */
	spin_lock_irq(&bd->lock);
	index = blk_rq_map_sg(bq->queue, req, bd->sg);

	if (req->rq_disk == bd->user_disk_0) {
		bus_num = bd->user_disk_0_bus_num;
		req_sector = blk_rq_pos(req) + gl_bd->user_disk_0_first_sector;
		req_nr_sectors = blk_rq_sectors(req);
		lcl_unit_no = gl_bd->user_disk_0_unit_no;

		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message("%s: request made to disk 0 "
			"for sector=%d, num_sectors=%d, unit_no=%d\n",
			__func__, req_sector, (int) blk_rq_sectors(req),
			lcl_unit_no);
		#endif
	} else if (req->rq_disk == bd->user_disk_1) {
		bus_num = bd->user_disk_1_bus_num;
		req_sector = blk_rq_pos(req) + gl_bd->user_disk_1_first_sector;
		/*SECT_NUM_TRANSLATE(blk_rq_sectors(req));*/
		req_nr_sectors = blk_rq_sectors(req);
		lcl_unit_no = gl_bd->user_disk_1_unit_no;

		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message("%s: request made to disk 1 for "
			"sector=%d, num_sectors=%d, unit_no=%d\n", __func__,
			req_sector, (int) blk_rq_sectors(req), lcl_unit_no);
		#endif
	} else if (req->rq_disk == bd->system_disk) {
		bus_num = bd->system_disk_bus_num;
		req_sector = blk_rq_pos(req) + gl_bd->system_disk_first_sector;
		req_nr_sectors = blk_rq_sectors(req);
		lcl_unit_no = gl_bd->system_disk_unit_no;

		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message("%s: request made to system disk "
			"for sector=%d, num_sectors=%d, unit_no=%d\n", __func__,
			req_sector, (int) blk_rq_sectors(req), lcl_unit_no);
		#endif
	}
	#ifndef WESTBRIDGE_NDEBUG
	else {
		cy_as_hal_print_message(
			"%s: invalid disk used for request\n", __func__);
	}
	#endif

	spin_unlock_irq(&bd->lock);

	if (rq_data_dir(req) == READ) {
		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message("%s: calling readasync() "
			"req_sector=0x%x, req_nr_sectors=0x%x, bd->sg:%x\n\n",
			__func__, req_sector, req_nr_sectors, (uint32_t)bd->sg);
		#endif

		ret = cy_as_storage_read_async(bd->dev_handle, bus_num, 0,
			lcl_unit_no, req_sector, bd->sg, req_nr_sectors,
			(cy_as_storage_callback)cyasblkdev_issuecallback);

		if (ret != CY_AS_ERROR_SUCCESS) {
			#ifndef WESTBRIDGE_NDEBUG
			cy_as_hal_print_message("%s:readasync() error %d at "
				"address %ld, unit no %d\n", __func__, ret,
				blk_rq_pos(req), lcl_unit_no);
			cy_as_hal_print_message("%s:ending i/o request "
				"on reg:%x\n", __func__, (uint32_t)req);
			#endif

			while (blk_end_request(req,
				(ret == CY_AS_ERROR_SUCCESS),
				req_nr_sectors*512))
				;

			bq->req = NULL;
		}
	} else {
		ret = cy_as_storage_write_async(bd->dev_handle, bus_num, 0,
			lcl_unit_no, req_sector, bd->sg, req_nr_sectors,
			(cy_as_storage_callback)cyasblkdev_issuecallback);

		if (ret != CY_AS_ERROR_SUCCESS) {
			#ifndef WESTBRIDGE_NDEBUG
			cy_as_hal_print_message("%s: write failed with "
			"error %d at address %ld, unit no %d\n",
			__func__, ret, blk_rq_pos(req), lcl_unit_no);
			#endif

			/*end IO op on this request(does both
			 * end_that_request_... _first & _last) */
			while (blk_end_request(req,
				(ret == CY_AS_ERROR_SUCCESS),
				req_nr_sectors*512))
				;

			bq->req = NULL;
		}
	}

	return ret;
}
/*west bridge storage async api on_completed callback */
static void cyasblkdev_issuecallback(
	/* Handle to the device completing the storage operation */
	cy_as_device_handle handle,
	/* The media type completing the operation */
	cy_as_media_type type,
	/* The device completing the operation */
	uint32_t device,
	/* The unit completing the operation */
	uint32_t unit,
	/* The block number of the completed operation */
	uint32_t block_number,
	/* The type of operation */
	cy_as_oper_type op,
	/* The error status */
	cy_as_return_status_t status
	)
{
	int retry_cnt = 0;
	DBGPRN_FUNC_NAME;

	if (status != CY_AS_ERROR_SUCCESS) {
		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message(
		  "%s: async r/w: op:%d failed with error %d at address %d\n",
			__func__, op, status, block_number);
		#endif
	}

	#ifndef WESTBRIDGE_NDEBUG
	cy_as_hal_print_message(
		"%s calling blk_end_request from issue_callback "
		"req=0x%x, status=0x%x, nr_sectors=0x%x\n",
		__func__, (unsigned int) gl_bd->queue.req, status,
		(unsigned int) blk_rq_sectors(gl_bd->queue.req));
	#endif

	/* note: blk_end_request w/o __ prefix should
	 * not require spinlocks on the queue*/
	while (blk_end_request(gl_bd->queue.req,
	status, blk_rq_sectors(gl_bd->queue.req)*512)) {
		retry_cnt++;
	}

	#ifndef WESTBRIDGE_NDEBUG
	cy_as_hal_print_message(
		"%s blkdev_callback: ended rq on %d sectors, "
		"with err:%d, n:%d times\n", __func__,
		(int)blk_rq_sectors(gl_bd->queue.req), status,
		retry_cnt
	);
	#endif

	spin_lock_irq(&gl_bd->lock);

	/*elevate next request, if there is one*/
	if (!blk_queue_plugged(gl_bd->queue.queue)) {
		/* queue is not plugged */
		gl_bd->queue.req = blk_fetch_request(gl_bd->queue.queue);
		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message("%s blkdev_callback: "
		"blk_fetch_request():%p\n",
			__func__, gl_bd->queue.req);
		#endif
	}

	if (gl_bd->queue.req) {
		spin_unlock_irq(&gl_bd->lock);

		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message("%s blkdev_callback: about to "
		"call issue_fn:%p\n", __func__, gl_bd->queue.req);
		#endif

		gl_bd->queue.issue_fn(&gl_bd->queue, gl_bd->queue.req);
	} else {
		spin_unlock_irq(&gl_bd->lock);
	}
}
Exemple #22
0
/*
 * Common write path running under the zvol taskq context.  This function
 * is responsible for copying the request structure data in to the DMU and
 * signaling the request queue with the result of the copy.
 */
static void
zvol_write(void *arg)
{
	struct request *req = (struct request *)arg;
	struct request_queue *q = req->q;
	zvol_state_t *zv = q->queuedata;
	uint64_t offset = blk_rq_pos(req) << 9;
	uint64_t size = blk_rq_bytes(req);
	int error = 0;
	dmu_tx_t *tx;
	rl_t *rl;

	/*
	 * Annotate this call path with a flag that indicates that it is
	 * unsafe to use KM_SLEEP during memory allocations due to the
	 * potential for a deadlock.  KM_PUSHPAGE should be used instead.
	 */
	ASSERT(!(current->flags & PF_NOFS));
	current->flags |= PF_NOFS;

	if (req->cmd_flags & VDEV_REQ_FLUSH)
		zil_commit(zv->zv_zilog, ZVOL_OBJ);

	/*
	 * Some requests are just for flush and nothing else.
	 */
	if (size == 0) {
		blk_end_request(req, 0, size);
		goto out;
	}

	rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);

	tx = dmu_tx_create(zv->zv_objset);
	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size);

	/* This will only fail for ENOSPC */
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		dmu_tx_abort(tx);
		zfs_range_unlock(rl);
		blk_end_request(req, -error, size);
		goto out;
	}

	error = dmu_write_req(zv->zv_objset, ZVOL_OBJ, req, tx);
	if (error == 0)
		zvol_log_write(zv, tx, offset, size,
		    req->cmd_flags & VDEV_REQ_FUA);

	dmu_tx_commit(tx);
	zfs_range_unlock(rl);

	if ((req->cmd_flags & VDEV_REQ_FUA) ||
	    zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
		zil_commit(zv->zv_zilog, ZVOL_OBJ);

	blk_end_request(req, -error, size);
out:
	current->flags &= ~PF_NOFS;
}
Exemple #23
0
static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
{
	ide_hwif_t *hwif = drive->hwif;
	struct ide_cmd *cmd = &hwif->cmd;
	struct request *rq = hwif->rq;
	ide_expiry_t *expiry = NULL;
	int dma_error = 0, dma, stat, thislen, uptodate = 0;
	int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc, nsectors;
	int sense = blk_sense_request(rq);
	unsigned int timeout;
	u16 len;
	u8 ireason;

	ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x",
				  rq->cmd[0], write);

	/* check for errors */
	dma = drive->dma;
	if (dma) {
		drive->dma = 0;
		drive->waiting_for_dma = 0;
		dma_error = hwif->dma_ops->dma_end(drive);
		ide_dma_unmap_sg(drive, cmd);
		if (dma_error) {
			printk(KERN_ERR PFX "%s: DMA %s error\n", drive->name,
					write ? "write" : "read");
			ide_dma_off(drive);
		}
	}

	rc = cdrom_decode_status(drive, 0, &stat);
	if (rc) {
		if (rc == 2)
			goto out_end;
		return ide_stopped;
	}

	/* using dma, transfer is complete now */
	if (dma) {
		if (dma_error)
			return ide_error(drive, "dma error", stat);
		uptodate = 1;
		goto out_end;
	}

	ide_read_bcount_and_ireason(drive, &len, &ireason);

	thislen = blk_fs_request(rq) ? len : cmd->nleft;
	if (thislen > len)
		thislen = len;

	ide_debug_log(IDE_DBG_PC, "DRQ: stat: 0x%x, thislen: %d",
				  stat, thislen);

	/* If DRQ is clear, the command has completed. */
	if ((stat & ATA_DRQ) == 0) {
		if (blk_fs_request(rq)) {
			/*
			 * If we're not done reading/writing, complain.
			 * Otherwise, complete the command normally.
			 */
			uptodate = 1;
			if (cmd->nleft > 0) {
				printk(KERN_ERR PFX "%s: %s: data underrun "
					"(%u bytes)\n", drive->name, __func__,
					cmd->nleft);
				if (!write)
					rq->cmd_flags |= REQ_FAILED;
				uptodate = 0;
			}
		} else if (!blk_pc_request(rq)) {
			ide_cd_request_sense_fixup(drive, cmd);
			/* complain if we still have data left to transfer */
			uptodate = cmd->nleft ? 0 : 1;
			if (uptodate == 0)
				rq->cmd_flags |= REQ_FAILED;
		}
		goto out_end;
	}

	/* check which way to transfer data */
	rc = ide_cd_check_ireason(drive, rq, len, ireason, write);
	if (rc)
		goto out_end;

	cmd->last_xfer_len = 0;

	ide_debug_log(IDE_DBG_PC, "data transfer, rq->cmd_type: 0x%x, "
				  "ireason: 0x%x",
				  rq->cmd_type, ireason);

	/* transfer data */
	while (thislen > 0) {
		int blen = min_t(int, thislen, cmd->nleft);

		if (cmd->nleft == 0)
			break;

		ide_pio_bytes(drive, cmd, write, blen);
		cmd->last_xfer_len += blen;

		thislen -= blen;
		len -= blen;

		if (sense && write == 0)
			rq->sense_len += blen;
	}

	/* pad, if necessary */
	if (len > 0) {
		if (blk_fs_request(rq) == 0 || write == 0)
			ide_pad_transfer(drive, write, len);
		else {
			printk(KERN_ERR PFX "%s: confused, missing data\n",
				drive->name);
			blk_dump_rq_flags(rq, "cdrom_newpc_intr");
		}
	}

	if (blk_pc_request(rq)) {
		timeout = rq->timeout;
	} else {
		timeout = ATAPI_WAIT_PC;
		if (!blk_fs_request(rq))
			expiry = ide_cd_expiry;
	}

	hwif->expiry = expiry;
	ide_set_handler(drive, cdrom_newpc_intr, timeout);
	return ide_started;

out_end:
	if (blk_pc_request(rq) && rc == 0) {
		unsigned int dlen = rq->data_len;

		rq->data_len = 0;

		if (blk_end_request(rq, 0, dlen))
			BUG();

		hwif->rq = NULL;
	} else {
		if (sense && uptodate)
			ide_cd_complete_failed_rq(drive, rq);

		if (blk_fs_request(rq)) {
			if (cmd->nleft == 0)
				uptodate = 1;
		} else {
			if (uptodate <= 0 && rq->errors == 0)
				rq->errors = -EIO;
		}

		if (uptodate == 0)
			ide_cd_error_cmd(drive, cmd);

		/* make sure it's fully ended */
		if (blk_pc_request(rq))
			nsectors = (rq->data_len + 511) >> 9;
		else
			nsectors = rq->hard_nr_sectors;

		if (nsectors == 0)
			nsectors = 1;

		if (blk_fs_request(rq) == 0) {
			rq->data_len -= (cmd->nbytes - cmd->nleft);
			if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
				rq->data_len += cmd->last_xfer_len;
		}

		ide_complete_rq(drive, uptodate ? 0 : -EIO, nsectors << 9);

		if (sense && rc == 2)
			ide_error(drive, "request sense failure", stat);
	}