Example #1
0
void card_cleanup_queue(struct card_queue *cq)
{
	struct request_queue *q = cq->queue;
	unsigned long flags;
	
	card_queue_resume(cq);

	/*should unregister reboot notifier before kthread stop*/
	unregister_reboot_notifier(&cq->nb);

	/* Then terminate our worker thread */
	kthread_stop(cq->thread);

	/* Empty the queue */   
	spin_lock_irqsave(q->queue_lock, flags);
	q->queuedata = NULL;
	blk_start_queue(q);
	spin_unlock_irqrestore(q->queue_lock, flags);
	
 	if (cq->bounce_sg)
 		kfree(cq->bounce_sg);
 	cq->bounce_sg = NULL;
    if (cq->sg)
	kfree(cq->sg);
	cq->sg = NULL;

	//if (cq->bounce_buf)
	//	kfree(cq->bounce_buf);
	cq->bounce_buf = NULL;

	cq->card = NULL;
}
Example #2
0
static irqreturn_t htifblk_isr(struct htif_device *dev, sbi_device_message *msg)
{
	struct htifblk_device *htifblk_dev;
	irqreturn_t ret;
	int err;

	htifblk_dev = dev_get_drvdata(&dev->dev);
	ret = IRQ_NONE;

	spin_lock(&htifblk_dev->lock);
	if (unlikely(htifblk_dev->req == NULL)) {
		dev_err(&dev->dev, "null request\n");
		goto out;
	}

	err = 0;
	if (unlikely(msg->data != htifblk_dev->tag)) {
		dev_err(&dev->dev, "tag mismatch: expected=%u actual=%lu\n",
			htifblk_dev->tag, msg->data);
		err = -EIO;
	}

	wmb();
	WARN_ON(__blk_end_request_cur(htifblk_dev->req, err));
	htifblk_dev->req = NULL;
	blk_start_queue(htifblk_dev->disk->queue);
	ret = IRQ_HANDLED;
out:
	spin_unlock(&htifblk_dev->lock);
	return ret;
}
Example #3
0
static irqreturn_t htifblk_isr(struct htif_device *dev, unsigned long data)
{
	struct htifblk_device *htifblk_dev;
	unsigned int tag;
	irqreturn_t ret;
	int err;

	htifblk_dev = dev_get_drvdata(&dev->dev);
	ret = IRQ_NONE;

	spin_lock(&htifblk_dev->lock);
	if (unlikely(htifblk_dev->req == NULL)) {
		dev_err(&dev->dev, "null request\n");
		goto out;
	}

	err = 0;
	tag = (data << HTIF_DEV_SHIFT) >> HTIF_DEV_SHIFT;
	if (unlikely(tag != htifblk_dev->tag)) {
		dev_err(&dev->dev, "tag mismatch: expected=%u actual=%u\n",
			htifblk_dev->tag, tag);
		err = -EIO;
	}

	wmb();
	WARN_ON(__blk_end_request_cur(htifblk_dev->req, err));
	htifblk_dev->req = NULL;
	blk_start_queue(htifblk_dev->disk->queue);
	ret = IRQ_HANDLED;
out:
	spin_unlock(&htifblk_dev->lock);
	return ret;
}
Example #4
0
static void end_cmd(struct nullb_cmd *cmd)
{
	struct request_queue *q = NULL;

	switch (queue_mode)  {
	case NULL_Q_MQ:
		blk_mq_end_request(cmd->rq, 0);
		return;
	case NULL_Q_RQ:
		INIT_LIST_HEAD(&cmd->rq->queuelist);
		blk_end_request_all(cmd->rq, 0);
		break;
	case NULL_Q_BIO:
		bio_endio(cmd->bio);
		goto free_cmd;
	}

	if (cmd->rq)
		q = cmd->rq->q;

	/* Restart queue if needed, as we are freeing a tag */
	if (q && !q->mq_ops && blk_queue_stopped(q)) {
		unsigned long flags;

		spin_lock_irqsave(q->queue_lock, flags);
		if (blk_queue_stopped(q))
			blk_start_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
free_cmd:
	free_cmd(cmd);
}
Example #5
0
/**
* @brief 	SD module clean up function.
* @param 	sd[in]: Card information.
* @return 	None.
*/
static void gp_sdcard_cleanup(gpSDInfo_t* sd)
{
	/* ----- Stop new requests from getting into the queue ----- */
	if(sd->gd)
		del_gendisk(sd->gd);
	/* ----- Then terminate our worker thread ----- */
	if(sd->thread)
	{
		kthread_stop(sd->thread);
		sd->thread = NULL;
	}

	/* ----- Empty the queue ----- */
	if(sd->queue)
	{
		unsigned long flags;
		spin_lock_irqsave(&sd->lock, flags);
		sd->queue->queuedata = NULL;
		blk_start_queue(sd->queue);
		spin_unlock_irqrestore(&sd->lock, flags);
	}

	if (sd->sg)
		kfree(sd->sg);
	sd->sg = NULL;
	/* ----- free dma channel ----- */
	if(sd->handle_dma)
		gp_apbdma0_release(sd->handle_dma);
	sd->handle_dma = 0;
}
Example #6
0
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
{
	struct completion_queue *cq;
	struct llist_node *entry;
	struct nullb_cmd *cmd;

	cq = &per_cpu(completion_queues, smp_processor_id());

	while ((entry = llist_del_all(&cq->list)) != NULL) {
		entry = llist_reverse_order(entry);
		do {
			struct request_queue *q = NULL;

			cmd = container_of(entry, struct nullb_cmd, ll_list);
			entry = entry->next;
			if (cmd->rq)
				q = cmd->rq->q;
			end_cmd(cmd);

			if (q && !q->mq_ops && blk_queue_stopped(q)) {
				spin_lock(q->queue_lock);
				if (blk_queue_stopped(q))
					blk_start_queue(q);
				spin_unlock(q->queue_lock);
			}
		} while (entry);
	}

	return HRTIMER_NORESTART;
}
Example #7
0
void mmc_cleanup_queue(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;

	/* Make sure the queue isn't suspended, as that will deadlock */
	mmc_queue_resume(mq);

	/* Then terminate our worker thread */
	kthread_stop(mq->thread);

	/* Empty the queue */
	spin_lock_irqsave(q->queue_lock, flags);
	q->queuedata = NULL;
	blk_start_queue(q);
	spin_unlock_irqrestore(q->queue_lock, flags);

 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;

	kfree(mq->sg);
	mq->sg = NULL;

	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;

	mq->card = NULL;
}
Example #8
0
static void kick_pending_request_queues(struct blkfront_info *info)
{
	if (!RING_FULL(&info->ring)) {
		/* Re-enable calldowns. */
		blk_start_queue(info->rq);
		/* Kick things off immediately. */
		do_blkif_request(info->rq);
	}
}
Example #9
0
static void dm_old_start_queue(struct request_queue *q)
{
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
	if (blk_queue_stopped(q))
		blk_start_queue(q);
	spin_unlock_irqrestore(q->queue_lock, flags);
}
Example #10
0
void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
{
	struct request_pm_state *pm = rq->special;

	if (rq->cmd_type == REQ_TYPE_PM_SUSPEND &&
	    pm->pm_step == IDE_PM_START_SUSPEND)
		/* Mark drive blocked when starting the suspend sequence. */
		drive->dev_flags |= IDE_DFLAG_BLOCKED;
	else if (rq->cmd_type == REQ_TYPE_PM_RESUME &&
		 pm->pm_step == IDE_PM_START_RESUME) {
		/*
		 * The first thing we do on wakeup is to wait for BSY bit to
		 * go away (with a looong timeout) as a drive on this hwif may
		 * just be POSTing itself.
		 * We do that before even selecting as the "other" device on
		 * the bus may be broken enough to walk on our toes at this
		 * point.
		 */
		ide_hwif_t *hwif = drive->hwif;
		const struct ide_tp_ops *tp_ops = hwif->tp_ops;
		struct request_queue *q = drive->queue;
		unsigned long flags;
		int rc;
#ifdef DEBUG_PM
#ifdef CONFIG_DEBUG_PRINTK
		printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
#else
		;
#endif
#endif
		rc = ide_wait_not_busy(hwif, 35000);
		if (rc)
#ifdef CONFIG_DEBUG_PRINTK
			printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
#else
			;
#endif
		tp_ops->dev_select(drive);
		tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
		rc = ide_wait_not_busy(hwif, 100000);
		if (rc)
#ifdef CONFIG_DEBUG_PRINTK
			printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
#else
			;
#endif

		spin_lock_irqsave(q->queue_lock, flags);
		blk_start_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
}
Example #11
0
/**
 * mmc_queue_resume - resume a previously suspended MMC request queue
 * @mq: MMC queue to resume
 */
void mmc_queue_resume(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;

	if (mq->suspended) {
		mq->suspended = false;

		up(&mq->thread_sem);

		spin_lock_irqsave(q->queue_lock, flags);
		blk_start_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
}
Example #12
0
void card_queue_resume(struct card_queue *cq)
{
	struct request_queue *q = cq->queue;
	unsigned long flags;

	if (cq->flags & CARD_QUEUE_SUSPENDED) {
		cq->flags &= ~CARD_QUEUE_SUSPENDED;

		up(&cq->thread_sem);

		spin_lock_irqsave(q->queue_lock, flags);
		blk_start_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
}
Example #13
0
/**
 * mmc_queue_resume - resume a previously suspended MMC request queue
 * @mq: MMC queue to resume
 */
void mmc_queue_resume(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;

	if (mq->flags & MMC_QUEUE_SUSPENDED) {
		mq->flags &= ~MMC_QUEUE_SUSPENDED;

		up(&mq->thread_sem);

		spin_lock_irqsave(q->queue_lock, flags);
		blk_start_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
}
Example #14
0
/*cyasblkdev_queue_resume - resume a previously suspended
 * CyAsBlkDev request queue @bq: CyAsBlkDev queue to resume */
void cyasblkdev_queue_resume(struct cyasblkdev_queue *bq)
{
    struct request_queue *q = bq->queue;
    unsigned long flags;

    DBGPRN_FUNC_NAME;

    if (bq->flags & CYASBLKDEV_QUEUE_SUSPENDED)  {
        bq->flags &= ~CYASBLKDEV_QUEUE_SUSPENDED;

        up(&bq->thread_sem);

        spin_lock_irqsave(q->queue_lock, flags);
        blk_start_queue(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
    }
}
static void start_queue(int index)
{
	unsigned long flags;
	struct request_queue *q;

	if (bdev[index]) {
		q = bdev_get_queue(bdev[index]);
		if (!q) {
			pr_err("queue not found bdev[index]=%d\n",
				index);
			return;
		}
		spin_lock_irqsave(q->queue_lock, flags);
		blk_start_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
		blkdev_put(bdev[index], FMODE_READ);
		bdev[index] = NULL;
	}
}
Example #16
0
void mmc_cleanup_queue(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;
	struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
	struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;

	
	mmc_queue_resume(mq);

	
	kthread_stop(mq->thread);

	
	spin_lock_irqsave(q->queue_lock, flags);
	q->queuedata = NULL;
	blk_start_queue(q);
	spin_unlock_irqrestore(q->queue_lock, flags);

	kfree(mqrq_cur->bounce_sg);
	mqrq_cur->bounce_sg = NULL;

	kfree(mqrq_cur->sg);
	mqrq_cur->sg = NULL;

	if(!mmc_card_sd(mq->card)) {
		kfree(mqrq_cur->bounce_buf);
	}
	mqrq_cur->bounce_buf = NULL;

	kfree(mqrq_prev->bounce_sg);
	mqrq_prev->bounce_sg = NULL;

	kfree(mqrq_prev->sg);
	mqrq_prev->sg = NULL;

	if(!mmc_card_sd(mq->card)) {
		kfree(mqrq_prev->bounce_buf);
	}
	mqrq_prev->bounce_buf = NULL;

	mq->card = NULL;
}
Example #17
0
void mmc_cleanup_queue(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;

#ifndef CONFIG_ARCH_EMXX
	/* Mark that we should start throwing out stragglers */
	spin_lock_irqsave(q->queue_lock, flags);
	q->queuedata = NULL;
	spin_unlock_irqrestore(q->queue_lock, flags);
#endif
	/* Make sure the queue isn't suspended, as that will deadlock */
	mmc_queue_resume(mq);

	/* Then terminate our worker thread */
	kthread_stop(mq->thread);

#ifdef CONFIG_ARCH_EMXX
	/* Empty the queue */
	spin_lock_irqsave(q->queue_lock, flags);
	q->queuedata = NULL;
	blk_start_queue(q);
	spin_unlock_irqrestore(q->queue_lock, flags);
#endif

	if (mq->bounce_sg)
		kfree(mq->bounce_sg);
	mq->bounce_sg = NULL;

	kfree(mq->sg);
	mq->sg = NULL;

	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;

#ifndef CONFIG_ARCH_EMXX
	blk_cleanup_queue(mq->queue);
#endif

	mq->card = NULL;
}
Example #18
0
/**
 *	ide_complete_pm_request - end the current Power Management request
 *	@drive: target drive
 *	@rq: request
 *
 *	This function cleans up the current PM request and stops the queue
 *	if necessary.
 */
static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
{
	unsigned long flags;

#ifdef DEBUG_PM
	printk("%s: completing PM request, %s\n", drive->name,
	       blk_pm_suspend_request(rq) ? "suspend" : "resume");
#endif
	spin_lock_irqsave(&ide_lock, flags);
	if (blk_pm_suspend_request(rq)) {
		blk_stop_queue(drive->queue);
	} else {
		drive->blocked = 0;
		blk_start_queue(drive->queue);
	}
	HWGROUP(drive)->rq = NULL;
	if (__blk_end_request(rq, 0, 0))
		BUG();
	spin_unlock_irqrestore(&ide_lock, flags);
}
Example #19
0
void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
{
	struct request_pm_state *pm = rq->data;

	if (blk_pm_suspend_request(rq) &&
	    pm->pm_step == IDE_PM_START_SUSPEND)
		/* Mark drive blocked when starting the suspend sequence. */
		drive->dev_flags |= IDE_DFLAG_BLOCKED;
	else if (blk_pm_resume_request(rq) &&
		 pm->pm_step == IDE_PM_START_RESUME) {
		/*
		 * The first thing we do on wakeup is to wait for BSY bit to
		 * go away (with a looong timeout) as a drive on this hwif may
		 * just be POSTing itself.
		 * We do that before even selecting as the "other" device on
		 * the bus may be broken enough to walk on our toes at this
		 * point.
		 */
		ide_hwif_t *hwif = drive->hwif;
		struct request_queue *q = drive->queue;
		unsigned long flags;
		int rc;
#ifdef DEBUG_PM
		printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
#endif
		rc = ide_wait_not_busy(hwif, 35000);
		if (rc)
			printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
		SELECT_DRIVE(drive);
		hwif->tp_ops->set_irq(hwif, 1);
		rc = ide_wait_not_busy(hwif, 100000);
		if (rc)
			printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);

		spin_lock_irqsave(q->queue_lock, flags);
		blk_start_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
}
Example #20
0
/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
static void mmc_request(struct request_queue *q)
{
	struct mmc_queue *mq = q->queuedata;
	struct request *req;
	int ret;
#if 0
	if (!mq) {
#else
    //插着USB线(充电姿态),拔插卡,有偶尔死机现象。出现mq->thread为空的现象;modifyed by xbw
    if (!mq ||!mq->thread) {
#endif	
		printk(KERN_ERR "MMC: killing requests for dead queue\n");
		while ((req = elv_next_request(q)) != NULL) {
			do {
				ret = __blk_end_request(req, -EIO,
							blk_rq_cur_bytes(req));
			} while (ret);
		}
		return;
	}

	if (!mq->req)
		wake_up_process(mq->thread);
}

/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_ANY ; // BLK_BOUNCE_HIGH;
	int ret;

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = *mmc_dev(host)->dma_mask;

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request, lock);
	if (!mq->queue)
		return -ENOMEM;

	mq->queue->queuedata = mq;
	mq->req = NULL;

	blk_queue_prep_rq(mq->queue, mmc_prep_request);

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_hw_segs == 1) {
		unsigned int bouncesz;

		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;

		mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
		if (!mq->bounce_buf) {
			printk(KERN_WARNING "%s: unable to allocate "
				"bounce buffer\n", mmc_card_name(card));
		} else {
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
			blk_queue_max_sectors(mq->queue, bouncesz / 512);
			blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
			blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
			blk_queue_max_segment_size(mq->queue, bouncesz);

			mq->sg = kmalloc(sizeof(struct scatterlist),
				GFP_KERNEL);
			if (!mq->sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->sg, 1);

			mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
				bouncesz / 512, GFP_KERNEL);
			if (!mq->bounce_sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->bounce_sg, bouncesz / 512);
		}
	}
#endif

	if (!mq->bounce_buf) {
		blk_queue_bounce_limit(mq->queue, limit);
		blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
		blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
		blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

		mq->sg = kmalloc(sizeof(struct scatterlist) *
			host->max_phys_segs, GFP_KERNEL);
		if (!mq->sg) {
			ret = -ENOMEM;
			goto cleanup_queue;
		}
		sg_init_table(mq->sg, host->max_phys_segs);
	}

	init_MUTEX(&mq->thread_sem);

	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
		goto free_bounce_sg;
	}

	return 0;
 free_bounce_sg:
 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;
 cleanup_queue:
 	if (mq->sg)
		kfree(mq->sg);
	mq->sg = NULL;
	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;
	blk_cleanup_queue(mq->queue);
	return ret;
}

void mmc_cleanup_queue(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;

	/* Mark that we should start throwing out stragglers */
	spin_lock_irqsave(q->queue_lock, flags);
	q->queuedata = NULL;
	spin_unlock_irqrestore(q->queue_lock, flags);

	/* Make sure the queue isn't suspended, as that will deadlock */
	mmc_queue_resume(mq);

	/* Then terminate our worker thread */
	kthread_stop(mq->thread);

 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;

	kfree(mq->sg);
	mq->sg = NULL;

	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;

	blk_cleanup_queue(mq->queue);

	mq->card = NULL;
}
EXPORT_SYMBOL(mmc_cleanup_queue);

/**
 * mmc_queue_suspend - suspend a MMC request queue
 * @mq: MMC queue to suspend
 *
 * Stop the block request queue, and wait for our thread to
 * complete any outstanding requests.  This ensures that we
 * won't suspend while a request is being processed.
 */
void mmc_queue_suspend(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;

	if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
		mq->flags |= MMC_QUEUE_SUSPENDED;

		spin_lock_irqsave(q->queue_lock, flags);
		blk_stop_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);

		down(&mq->thread_sem);
	}
}

/**
 * mmc_queue_resume - resume a previously suspended MMC request queue
 * @mq: MMC queue to resume
 */
void mmc_queue_resume(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;

	if (mq->flags & MMC_QUEUE_SUSPENDED) {
		mq->flags &= ~MMC_QUEUE_SUSPENDED;

		up(&mq->thread_sem);

		spin_lock_irqsave(q->queue_lock, flags);
		blk_start_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
}

static void copy_sg(struct scatterlist *dst, unsigned int dst_len,
	struct scatterlist *src, unsigned int src_len)
{
	unsigned int chunk;
	char *dst_buf, *src_buf;
	unsigned int dst_size, src_size;

	dst_buf = NULL;
	src_buf = NULL;
	dst_size = 0;
	src_size = 0;

	while (src_len) {
		BUG_ON(dst_len == 0);

		if (dst_size == 0) {
			dst_buf = sg_virt(dst);
			dst_size = dst->length;
		}

		if (src_size == 0) {
			src_buf = sg_virt(src);
			src_size = src->length;
		}

		chunk = min(dst_size, src_size);

		memcpy(dst_buf, src_buf, chunk);

		dst_buf += chunk;
		src_buf += chunk;
		dst_size -= chunk;
		src_size -= chunk;

		if (dst_size == 0) {
			dst++;
			dst_len--;
		}

		if (src_size == 0) {
			src++;
			src_len--;
		}
	}
}
Example #21
0
static void skd_timer_tick_not_online(struct skd_device *skdev)
{
	switch (skdev->state) {
	case SKD_DRVR_STATE_IDLE:
	case SKD_DRVR_STATE_LOAD:
		break;
	case SKD_DRVR_STATE_BUSY_SANITIZE:
		pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
			 skdev->name, __func__, __LINE__,
			 skdev->drive_state, skdev->state);
		/* If we've been in sanitize for 3 seconds, we figure we're not
		 * going to get anymore completions, so recover requests now
		 */
		if (skdev->timer_countdown > 0) {
			skdev->timer_countdown--;
			return;
		}
		skd_recover_requests(skdev, 0);
		break;

	case SKD_DRVR_STATE_BUSY:
	case SKD_DRVR_STATE_BUSY_IMMINENT:
	case SKD_DRVR_STATE_BUSY_ERASE:
		pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
			 skdev->name, __func__, __LINE__,
			 skdev->state, skdev->timer_countdown);
		if (skdev->timer_countdown > 0) {
			skdev->timer_countdown--;
			return;
		}
		pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
			 skdev->name, __func__, __LINE__,
			 skdev->state, skdev->timer_countdown);
		skd_restart_device(skdev);
		break;

	case SKD_DRVR_STATE_WAIT_BOOT:
	case SKD_DRVR_STATE_STARTING:
		if (skdev->timer_countdown > 0) {
			skdev->timer_countdown--;
			return;
		}
		/* For now, we fault the drive.  Could attempt resets to
		 * revcover at some point. */
		skdev->state = SKD_DRVR_STATE_FAULT;

		pr_err("(%s): DriveFault Connect Timeout (%x)\n",
		       skd_name(skdev), skdev->drive_state);

		/*start the queue so we can respond with error to requests */
		/* wakeup anyone waiting for startup complete */
		blk_start_queue(skdev->queue);
		skdev->gendisk_on = -1;
		wake_up_interruptible(&skdev->waitq);
		break;

	case SKD_DRVR_STATE_ONLINE:
		/* shouldn't get here. */
		break;

	case SKD_DRVR_STATE_PAUSING:
	case SKD_DRVR_STATE_PAUSED:
		break;

	case SKD_DRVR_STATE_DRAINING_TIMEOUT:
		pr_debug("%s:%s:%d "
			 "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
			 skdev->name, __func__, __LINE__,
			 skdev->timo_slot,
			 skdev->timer_countdown,
			 skdev->in_flight,
			 skdev->timeout_slot[skdev->timo_slot]);
		/* if the slot has cleared we can let the I/O continue */
		if (skdev->timeout_slot[skdev->timo_slot] == 0) {
			pr_debug("%s:%s:%d Slot drained, starting queue.\n",
				 skdev->name, __func__, __LINE__);
			skdev->state = SKD_DRVR_STATE_ONLINE;
			blk_start_queue(skdev->queue);
			return;
		}
		if (skdev->timer_countdown > 0) {
			skdev->timer_countdown--;
			return;
		}
		skd_restart_device(skdev);
		break;

	case SKD_DRVR_STATE_RESTARTING:
		if (skdev->timer_countdown > 0) {
			skdev->timer_countdown--;
			return;
		}
		/* For now, we fault the drive. Could attempt resets to
		 * revcover at some point. */
		skdev->state = SKD_DRVR_STATE_FAULT;
		pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
		       skd_name(skdev), skdev->drive_state);

		/*
		 * Recovering does two things:
		 * 1. completes IO with error
		 * 2. reclaims dma resources
		 * When is it safe to recover requests?
		 * - if the drive state is faulted
		 * - if the state is still soft reset after out timeout
		 * - if the drive registers are dead (state = FF)
		 * If it is "unsafe", we still need to recover, so we will
		 * disable pci bus mastering and disable our interrupts.
		 */

		if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
		    (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
		    (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
			/* It never came out of soft reset. Try to
			 * recover the requests and then let them
			 * fail. This is to mitigate hung processes. */
			skd_recover_requests(skdev, 0);
		else {
			pr_err("(%s): Disable BusMaster (%x)\n",
			       skd_name(skdev), skdev->drive_state);
			pci_disable_device(skdev->pdev);
			skd_disable_interrupts(skdev);
			skd_recover_requests(skdev, 0);
		}

		/*start the queue so we can respond with error to requests */
		/* wakeup anyone waiting for startup complete */
		blk_start_queue(skdev->queue);
		skdev->gendisk_on = -1;
		wake_up_interruptible(&skdev->waitq);
		break;

	case SKD_DRVR_STATE_RESUMING:
	case SKD_DRVR_STATE_STOPPING:
	case SKD_DRVR_STATE_SYNCING:
	case SKD_DRVR_STATE_FAULT:
	case SKD_DRVR_STATE_DISAPPEARED:
	default:
		break;
	}
}