Beispiel #1
0
/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
static void mmc_request(struct request_queue *q)
{
	struct mmc_queue *mq = q->queuedata;
	struct request *req;

	if (!mq) {
		while ((req = blk_fetch_request(q)) != NULL) {
			req->cmd_flags |= REQ_QUIET;
			__blk_end_request_all(req, -EIO);
		}
		return;
	}

	if (!mq->req)
		wake_up_process(mq->thread);
}
Beispiel #2
0
/**
* @brief 	Request thread function.
* @param 	d[in]: Private data.
* @return 	SUCCESS/ERROR_ID.
*/
static int gp_sdcard_queue_thread(void *d)
{
	gpSDInfo_t *sd = d;
	struct request_queue *q = sd->queue;

	current->flags |= PF_MEMALLOC;

	down(&sd->thread_sem);
	do
	{
		struct request *req = NULL;
		if(sd->queue==NULL)
			continue;
		spin_lock_irq(q->queue_lock);
		set_current_state(TASK_INTERRUPTIBLE);
		if (!blk_queue_plugged(q))
			req = blk_fetch_request(q);
		sd->req = req;
		spin_unlock_irq(q->queue_lock);

		if (!req)
		{
			if (kthread_should_stop())
			{
				set_current_state(TASK_RUNNING);
				break;
			}
			up(&sd->thread_sem);
			schedule();
			down(&sd->thread_sem);
			continue;
		}
		set_current_state(TASK_RUNNING);
		if((gp_sdcard_ckinsert(sd)==0))
		{
			spin_lock_irq(&sd->lock);
			__blk_end_request_all(req, -ENXIO);
			spin_unlock_irq(&sd->lock);
			sd->fremove = 1;
			continue;
		}
		if(gp_sdcard_xfer_request(sd, req)<0 && (gp_sdcard_inner_card(sd)!=1))
			sd->fremove = 1;
	} while (1);
	up(&sd->thread_sem);
	return 0;
}
Beispiel #3
0
/* 
 * Cette fonction permet de sélectionner une requête dans une file
 * donnée (q) et de l'envoyer à la fonction sbd_transfert afin de la
 * traiter.
 * Une requête peut être composée de plusieurs "morceaux". Dans cette 
 * fonction, chaque "morceau" de la requête sera traité consécutivement
 * jusqu'à ce que cette dernière soit traitée entièrement. 
 */
static void sbd_request(struct request_queue *q) {
	struct request *req; /* Instancie la requête */

	req = blk_fetch_request(q); /* Sélection de la requête dans la file */
	while (req != NULL) { /* Tant que la requête n'est pas nulle, i.e. file de requête n'est pas vide */
		if (req == NULL || (req->cmd_type != REQ_TYPE_FS)) { /* Si requête nulle ou n'ayant pas le type "fs", i.e. s'il ne s'agit pas d'une requête liée au système de fichiers */
			printk (KERN_NOTICE "Skip non-CMD request\n"); /* Inscription dans syslog de la non-exécution de la requête */
			__blk_end_request_all(req, -EIO); /* Finition de la requête */
			continue; /* Ignore les instructions suivantes et effectue un nouveau tour de boucle */
		}
		sbd_transfer(&Device, blk_rq_pos(req), blk_rq_cur_sectors(req),
				req->buffer, rq_data_dir(req)); /* Traitement de la requete */
		if ( ! __blk_end_request_cur(req, 0) ) { /* Si la requete n'est pas complètement traitée */
			req = blk_fetch_request(q); /* Sélectionne la suite de la requête dans la file */
		}
	}
}
Beispiel #4
0
/**
* @brief 	Request queue function.
* @param 	request_queue[in]: Request.
* @return 	SUCCESS/ERROR_ID
*/
static void gp_sdcard_request(struct request_queue *q)
{
	gpSDInfo_t *sd = (gpSDInfo_t*)q->queuedata;
	struct request *req;

	if (!sd)
	{
		while ((req = blk_fetch_request(q)) != NULL)
		{
			req->cmd_flags |= REQ_QUIET;
			__blk_end_request_all(req, -EIO);
		}
		return;
	}
	if (!sd->req)
		wake_up_process(sd->thread);
}
Beispiel #5
0
Datei: vbd.c Projekt: truncs/vbd
/*
 * Service each request in the queue. If the request
 * is not a REQ_TYPE_FS type then just skip the request
 * notifying that it is skipping this request.
 */
static void vbd_request(struct request_queue * q) {
  struct request *req;
  req = blk_fetch_request(q);

  while(req != NULL) {

	/* This should not happen normally but just in case */
	if(req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
	  printk(KERN_NOTICE "Skip non fs type request\n");
	  __blk_end_request_all(req, -EIO);
	  continue;
	}

	vbd_tx(&device,blk_rq_pos(req), blk_rq_cur_sectors(req),
		   req->buffer, rq_data_dir(req));
	if(!__blk_end_request_cur(req, 0))
	  req = blk_fetch_request(q);
  }
}
Beispiel #6
0
/*
 * do_blkif_request
 *  read a block; request is in a request queue
 */
static void do_blkif_request(struct request_queue *rq)
{
	struct blkfront_info *info = NULL;
	struct request *req;
	int queued;

	pr_debug("Entered do_blkif_request\n");

	queued = 0;

	while ((req = blk_peek_request(rq)) != NULL) {
		info = req->rq_disk->private_data;

		if (RING_FULL(&info->ring))
			goto wait;

		blk_start_request(req);

		if (!blk_fs_request(req)) {
			__blk_end_request_all(req, -EIO);
			continue;
		}

		pr_debug("do_blk_req %p: cmd %p, sec %lx, "
			 "(%u/%u) buffer:%p [%s]\n",
			 req, req->cmd, (unsigned long)blk_rq_pos(req),
			 blk_rq_cur_sectors(req), blk_rq_sectors(req),
			 req->buffer, rq_data_dir(req) ? "write" : "read");

		if (blkif_queue_request(req)) {
			blk_requeue_request(rq, req);
wait:
			/* Avoid pointless unplugs. */
			blk_stop_queue(rq);
			break;
		}

		queued++;
	}

	if (queued != 0)
		flush_requests(info);
}
Beispiel #7
0
static void looper_request(struct request_queue *q) {
 
  struct request *req;
 
  printk(KERN_INFO "looper: executing request");
   
  req = blk_fetch_request(q);
  while (req != NULL) {
    if (req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
      printk (KERN_NOTICE "Skip non-CMD request\n");
      __blk_end_request_all(req, -EIO);
      continue;
    }
    looper_transfer(&Device, blk_rq_pos(req), blk_rq_cur_sectors(req),
		 req->buffer, rq_data_dir(req));
    if ( ! __blk_end_request_cur(req, 0) ) {
      req = blk_fetch_request(q);
    }
  }
}
static void srb_rq_fn(struct request_queue *q)
{
	struct srb_device_s *dev = q->queuedata;	
	struct request *req;
	unsigned long flags;

	while ((req = blk_fetch_request(q)) != NULL) {
		if (req->cmd_type != REQ_TYPE_FS) {
			SRBDEV_LOG_DEBUG(dev, "Skip non-CMD request");

			__blk_end_request_all(req, -EIO);
			continue;
		}

		spin_lock_irqsave(&dev->waiting_lock, flags);
		list_add_tail(&req->queuelist, &dev->waiting_queue);
		spin_unlock_irqrestore(&dev->waiting_lock, flags);
		wake_up_nr(&dev->waiting_wq, 1);
	}
}
Beispiel #9
0
static void sb_request(struct request_queue *q) {
	struct request *req;
	int error;

	req = blk_fetch_request(q);
	while (req != NULL) {
		/* Check request type */
		if (req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
			__blk_end_request_all(req, -EIO);
			continue;
		}
		/* Do transfer */
		error = sb_transfer(sbd, blk_rq_pos(req), blk_rq_cur_sectors(req), req->buffer, rq_data_dir(req));
		if (!__blk_end_request_cur(req, error ? -EIO : 0) ) {
			req = blk_fetch_request(q);
		}
	}

	return;
}
Beispiel #10
0
/*
 * Simply used for requesting a transfer (read or write) of
 * data from the RAM disk.
 */
static void osurd_request(struct request_queue *q)
{
    struct request *req;
    req = blk_fetch_request(q);

    while(req != NULL) {
        struct osurd_dev *dev = req->rq_disk->private_data;
        if(req->cmd_type != REQ_TYPE_FS) {
            printk(KERN_NOTICE "Skip non-fs request\n");
            __blk_end_request_all(req, -EIO);
            continue;
        }
        osurd_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),
                       req->buffer, rq_data_dir(req)):

            if(!__blk_end_request_cur(req, 0)) {
            req = blk_fetch_request(q);
        }
    }
}
Beispiel #11
0
Datei: sbd.c Projekt: OSLL/ioperf
static void sbd_request(struct request_queue *q) {
    struct request *req;

    req = blk_fetch_request(q);
    while (req != NULL) {
        // blk_fs_request() was removed in 2.6.36 - many thanks to
        // Christian Paro for the heads up and fix...
        //if (!blk_fs_request(req)) {
        if (req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
            printk (KERN_NOTICE "Skip non-CMD request\n");
            __blk_end_request_all(req, -EIO);
            continue;
        }
        sbd_transfer(&Device, blk_rq_pos(req), blk_rq_cur_sectors(req),
                req->buffer, rq_data_dir(req));
        if ( ! __blk_end_request_cur(req, 0) ) {
            req = blk_fetch_request(q);
        }
    }
}
Beispiel #12
0
static void sbd_request(struct request_queue *q) {
        struct request *req;
        unsigned long offset;
        unsigned long nbytes;

        req = blk_fetch_request(q);
        while (req != NULL) {
                if (req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
                        printk (KERN_NOTICE "Skip non-CMD request\n");
                        __blk_end_request_all(req, -EIO);
                        continue;
                }
		offset = blk_rq_pos(req) * logical_block_size;
		nbytes = blk_rq_cur_sectors(req) * logical_block_size;

		operar_sector(&Device, offset, nbytes, req->buffer, rq_data_dir(req));

                if ( ! __blk_end_request_cur(req, 0) ) {
                        req = blk_fetch_request(q);
                }
        }
}
Beispiel #13
0
static void htifbd_request(struct request_queue *q)
{
	struct request *req;

	req = blk_fetch_request(q);
	while (req != NULL) {
		struct htifbd_dev *dev;

		dev = req->rq_disk->private_data;
		if (req->cmd_type != REQ_TYPE_FS) {
			pr_notice(DRIVER_NAME ": ignoring non-fs request for %s\n",
				req->rq_disk->disk_name);
			__blk_end_request_all(req, -EIO);
			continue;
		}

		htifbd_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),
			req->buffer, rq_data_dir(req));
		if (!__blk_end_request_cur(req, 0)) {
			req = blk_fetch_request(q);
		}
	}
}
Beispiel #14
0
/*
* The simple form of the request function.
*/
static void sbull_request(struct request_queue *q)
{
	struct request *req;

	req = blk_fetch_request(q);
	while (req != NULL) {
		struct sbull_dev *dev = req->rq_disk->private_data;
		if (! blk_fs_request(req)) {
			printk (KERN_NOTICE "Skip non-fs request\n");
			__blk_end_request_all(req, -EIO);
			continue;
		}
	    //    printk (KERN_NOTICE "Req dev %d dir %ld sec %ld, nr %d f %lx\n",
	    //        dev - Devices, rq_data_dir(req),
	    //        req->sector, req->current_nr_sectors,
	    //        req->flags);
		sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req), req->buffer, rq_data_dir(req));
		/* end_request(req, 1); */
		if(!__blk_end_request_cur(req, 0)) {
			req = blk_fetch_request(q);
		}
	}
}
Beispiel #15
0
/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
static void mmc_request_fn(struct request_queue *q)
{
	struct mmc_queue *mq = q->queuedata;
	struct request *req;
	struct mmc_context_info *cntx;

	if (!mq) {
		while ((req = blk_fetch_request(q)) != NULL) {
			req->rq_flags |= RQF_QUIET;
			__blk_end_request_all(req, -EIO);
		}
		return;
	}

	cntx = &mq->card->host->context_info;

	if (cntx->is_waiting_last_req) {
		cntx->is_new_req = true;
		wake_up_interruptible(&cntx->wait);
	}

	if (mq->asleep)
		wake_up_process(mq->thread);
}
Beispiel #16
0
static int ide_pm_execute_rq(struct request *rq)
{
	struct request_queue *q = rq->q;
	DECLARE_COMPLETION_ONSTACK(wait);

	rq->end_io_data = &wait;
	rq->end_io = ide_end_sync_rq;

	spin_lock_irq(q->queue_lock);
	if (unlikely(blk_queue_dying(q))) {
		rq->rq_flags |= RQF_QUIET;
		scsi_req(rq)->result = -ENXIO;
		__blk_end_request_all(rq, BLK_STS_OK);
		spin_unlock_irq(q->queue_lock);
		return -ENXIO;
	}
	__elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);
	__blk_run_queue_uncond(q);
	spin_unlock_irq(q->queue_lock);

	wait_for_completion_io(&wait);

	return scsi_req(rq)->result ? -EIO : 0;
}
Beispiel #17
0
static void ace_fsm_dostate(struct ace_device *ace)
{
	struct request *req;
	u32 status;
	u16 val;
	int count;

#if defined(DEBUG)
	dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n",
		ace->fsm_state, ace->id_req_count);
#endif

	/* Verify that there is actually a CF in the slot. If not, then
	 * bail out back to the idle state and wake up all the waiters */
	status = ace_in32(ace, ACE_STATUS);
	if ((status & ACE_STATUS_CFDETECT) == 0) {
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		ace->media_change = 1;
		set_capacity(ace->gd, 0);
		dev_info(ace->dev, "No CF in slot\n");

		/* Drop all in-flight and pending requests */
		if (ace->req) {
			__blk_end_request_all(ace->req, -EIO);
			ace->req = NULL;
		}
		while ((req = blk_fetch_request(ace->queue)) != NULL)
			__blk_end_request_all(req, -EIO);

		/* Drop back to IDLE state and notify waiters */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		ace->id_result = -EIO;
		while (ace->id_req_count) {
			complete(&ace->id_completion);
			ace->id_req_count--;
		}
	}

	switch (ace->fsm_state) {
	case ACE_FSM_STATE_IDLE:
		/* See if there is anything to do */
		if (ace->id_req_count || ace_get_next_request(ace->queue)) {
			ace->fsm_iter_num++;
			ace->fsm_state = ACE_FSM_STATE_REQ_LOCK;
			mod_timer(&ace->stall_timer, jiffies + HZ);
			if (!timer_pending(&ace->stall_timer))
				add_timer(&ace->stall_timer);
			break;
		}
		del_timer(&ace->stall_timer);
		ace->fsm_continue_flag = 0;
		break;

	case ACE_FSM_STATE_REQ_LOCK:
		if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
			/* Already have the lock, jump to next state */
			ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
			break;
		}

		/* Request the lock */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ);
		ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK;
		break;

	case ACE_FSM_STATE_WAIT_LOCK:
		if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
			/* got the lock; move to next state */
			ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
			break;
		}

		/* wait a bit for the lock */
		ace_fsm_yield(ace);
		break;

	case ACE_FSM_STATE_WAIT_CFREADY:
		status = ace_in32(ace, ACE_STATUS);
		if (!(status & ACE_STATUS_RDYFORCFCMD) ||
		    (status & ACE_STATUS_CFBSY)) {
			/* CF card isn't ready; it needs to be polled */
			ace_fsm_yield(ace);
			break;
		}

		/* Device is ready for command; determine what to do next */
		if (ace->id_req_count)
			ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE;
		else
			ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE;
		break;

	case ACE_FSM_STATE_IDENTIFY_PREPARE:
		/* Send identify command */
		ace->fsm_task = ACE_TASK_IDENTIFY;
		ace->data_ptr = ace->cf_id;
		ace->data_count = ACE_BUF_PER_SECTOR;
		ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY);

		/* As per datasheet, put config controller in reset */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);

		/* irq handler takes over from this point; wait for the
		 * transfer to complete */
		ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER;
		ace_fsm_yieldirq(ace);
		break;

	case ACE_FSM_STATE_IDENTIFY_TRANSFER:
		/* Check that the sysace is ready to receive data */
		status = ace_in32(ace, ACE_STATUS);
		if (status & ACE_STATUS_CFBSY) {
			dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				ace->data_count);
			ace_fsm_yield(ace);
			break;
		}
		if (!(status & ACE_STATUS_DATABUFRDY)) {
			ace_fsm_yield(ace);
			break;
		}

		/* Transfer the next buffer */
		ace->reg_ops->datain(ace);
		ace->data_count--;

		/* If there are still buffers to be transfers; jump out here */
		if (ace->data_count != 0) {
			ace_fsm_yieldirq(ace);
			break;
		}

		/* transfer finished; kick state machine */
		dev_dbg(ace->dev, "identify finished\n");
		ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE;
		break;

	case ACE_FSM_STATE_IDENTIFY_COMPLETE:
		ace_fix_driveid(ace->cf_id);
		ace_dump_mem(ace->cf_id, 512);	/* Debug: Dump out disk ID */

		if (ace->data_result) {
			/* Error occured, disable the disk */
			ace->media_change = 1;
			set_capacity(ace->gd, 0);
			dev_err(ace->dev, "error fetching CF id (%i)\n",
				ace->data_result);
		} else {
			ace->media_change = 0;

			/* Record disk parameters */
			set_capacity(ace->gd,
				ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY));
			dev_info(ace->dev, "capacity: %i sectors\n",
				ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY));
		}

		/* We're done, drop to IDLE state and notify waiters */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		ace->id_result = ace->data_result;
		while (ace->id_req_count) {
			complete(&ace->id_completion);
			ace->id_req_count--;
		}
		break;

	case ACE_FSM_STATE_REQ_PREPARE:
		req = ace_get_next_request(ace->queue);
		if (!req) {
			ace->fsm_state = ACE_FSM_STATE_IDLE;
			break;
		}
		blk_start_request(req);

		/* Okay, it's a data request, set it up for transfer */
		dev_dbg(ace->dev,
			"request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n",
			(unsigned long long)blk_rq_pos(req),
			blk_rq_sectors(req), blk_rq_cur_sectors(req),
			rq_data_dir(req));

		ace->req = req;
		ace->data_ptr = req->buffer;
		ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR;
		ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF);

		count = blk_rq_sectors(req);
		if (rq_data_dir(req)) {
			/* Kick off write request */
			dev_dbg(ace->dev, "write data\n");
			ace->fsm_task = ACE_TASK_WRITE;
			ace_out(ace, ACE_SECCNTCMD,
				count | ACE_SECCNTCMD_WRITE_DATA);
		} else {
			/* Kick off read request */
			dev_dbg(ace->dev, "read data\n");
			ace->fsm_task = ACE_TASK_READ;
			ace_out(ace, ACE_SECCNTCMD,
				count | ACE_SECCNTCMD_READ_DATA);
		}

		/* As per datasheet, put config controller in reset */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);

		/* Move to the transfer state.  The systemace will raise
		 * an interrupt once there is something to do
		 */
		ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER;
		if (ace->fsm_task == ACE_TASK_READ)
			ace_fsm_yieldirq(ace);	/* wait for data ready */
		break;

	case ACE_FSM_STATE_REQ_TRANSFER:
		/* Check that the sysace is ready to receive data */
		status = ace_in32(ace, ACE_STATUS);
		if (status & ACE_STATUS_CFBSY) {
			dev_dbg(ace->dev,
				"CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				blk_rq_cur_sectors(ace->req) * 16,
				ace->data_count, ace->in_irq);
			ace_fsm_yield(ace);	/* need to poll CFBSY bit */
			break;
		}
		if (!(status & ACE_STATUS_DATABUFRDY)) {
			dev_dbg(ace->dev,
				"DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				blk_rq_cur_sectors(ace->req) * 16,
				ace->data_count, ace->in_irq);
			ace_fsm_yieldirq(ace);
			break;
		}

		/* Transfer the next buffer */
		if (ace->fsm_task == ACE_TASK_WRITE)
			ace->reg_ops->dataout(ace);
		else
			ace->reg_ops->datain(ace);
		ace->data_count--;

		/* If there are still buffers to be transfers; jump out here */
		if (ace->data_count != 0) {
			ace_fsm_yieldirq(ace);
			break;
		}

		/* bio finished; is there another one? */
		if (__blk_end_request_cur(ace->req, 0)) {
			/* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
			 *      blk_rq_sectors(ace->req),
			 *      blk_rq_cur_sectors(ace->req));
			 */
			ace->data_ptr = ace->req->buffer;
			ace->data_count = blk_rq_cur_sectors(ace->req) * 16;
			ace_fsm_yieldirq(ace);
			break;
		}

		ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE;
		break;

	case ACE_FSM_STATE_REQ_COMPLETE:
		ace->req = NULL;

		/* Finished request; go to idle state */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		break;

	default:
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		break;
	}
}
Beispiel #18
0
/**
* @brief 	Request service function.
* @param 	sd[in]: Card information.
* @param 	req[in]: Start sector.
* @return 	SUCCESS/ERROR_ID.
*/
static int gp_sdcard_xfer_request(gpSDInfo_t *sd, struct request *req)
{
	int ret = 1;

	while (ret)
	{
		unsigned int ln;
		unsigned int retry = 0;

		ln = blk_rq_map_sg(sd->queue, req, sd->sg);

#if 0	/* This is used for usb disk check */
		{
			bool do_sync = (rq_is_sync(req) && rq_data_dir(req) == WRITE);
			if (do_sync)
			{
				DEBUG("[Jerry] detect do write sync\n");
			}
		}
#endif
		while(1)
		{
			ret = gp_sdcard_transfer_scatter(sd, blk_rq_pos(req), sd->sg, ln, rq_data_dir(req));
			/* ----- Re-try procedure ----- */
			if(ret<0)
			{
				unsigned int cid[4];
				unsigned int capacity;
				if((retry>=SD_RETRY)||(gp_sdcard_ckinsert(sd)==0)||sd->fremove)
					goto out_error;
				/* ----- Re-initialize sd card ----- */
				memcpy(cid, sd->CID, sizeof(cid));
				capacity = sd->capacity;
				if(gp_sdcard_cardinit(sd)!=0)
				{
					DERROR("[%d]: Re-initialize fail\n",sd->device_id);
					goto out_error;
				}
				else if((cid[0]!=sd->CID[0])||(cid[1]!=sd->CID[1])||(cid[2]!=sd->CID[2])||(cid[3]!=sd->CID[3])||(capacity!=sd->capacity))
				{
					DERROR("[%d]: Different card insert\n",sd->device_id);
					goto out_error;
				}
				retry ++;
			}
			else
				break;
		}
		/* ----- End of request ----- */
		spin_lock_irq(&sd->lock);
		ret = __blk_end_request(req, 0, ret<<9);
		spin_unlock_irq(&sd->lock);
	}
	return 1;
out_error:
	spin_lock_irq(&sd->lock);
	DEBUG("[%d]: txrx fail %d\n", sd->device_id, ret);
	__blk_end_request_all(req, ret);;
	spin_unlock_irq(&sd->lock);
	return -ENXIO;
}
Beispiel #19
0
static int stheno_request_thread( void *arg )
{
#if defined( USE_KMALLOC )
    struct euryale_block *euryale_block;
#else
    /* kernel stack size has 1024 bytes(?) */
    /*struct euryale_block euryale_block[MAX_SECTORS+1];*/
#endif
    unsigned long sector;
    unsigned long nr;
    struct request *req;
    struct bio_vec *bvec;
    struct req_iterator iter;
    int ret;
    int i;

#if defined( USE_KMALLOC )
    euryale_block = (struct euryale_block*)kmalloc( sizeof( struct euryale_block ) * (MAX_SECTORS + 1), GFP_KERNEL );
    if( euryale_block == NULL ){
        print_error( "stheno kmalloc failed.\n" );
    }
#endif
    while( 1 ){
        ret = wait_event_interruptible( stheno_wait_q, (kthread_should_stop() || stheno_wakeup == 1) );
        if( ret != 0 ) break;

        stheno_wakeup = 0;

        if( kthread_should_stop() ) break;

        wake_lock( &stheno_wakelock );

        while( 1 ){
            spin_lock_irq( stheno_queue->queue_lock );
            req = blk_fetch_request( stheno_queue );
            spin_unlock_irq( stheno_queue->queue_lock );
            if( req == NULL ) break;

#if 0 /* S Mod 2011.12.27 For ICS */
            if( !blk_fs_request( req ) ){
#else
            if( !(req->cmd_type == REQ_TYPE_FS) ){
#endif /* S Mod 2011.12.27 For ICS */
                ret = -EIO;
                goto skip;
            }
            if( stheno_read_sector0() != 0 ){
                ret = -EIO;
                goto skip;
            }
            sector = blk_rq_pos( req );
            nr = blk_rq_sectors( req );

/* S Add 2012.02.09 For ICS */
            if( nr == 0 ){
                ret = -EIO;
                goto skip;
            }
/* E Add 2012.02.09 For ICS */

#if defined( USE_KMALLOC )
            if( euryale_buffer == NULL ){
                ret = -EIO;
                goto skip;
            }
#endif
            i = 0;
            rq_for_each_segment( bvec, req, iter ){
                if( i >= MAX_SECTORS ){
                    print_error( "stheno euryale_block overrun error.\n" );
                    ret = -EIO;
                    goto skip;
                }
                euryale_block[i].buffer = page_address(bvec->bv_page) + bvec->bv_offset;
                euryale_block[i].length = bvec->bv_len;
                ++i;
            }
            euryale_block[i].buffer = 0; /* end of buffer */

            if( rq_data_dir( req ) == 0 ){
                ret = euryale_api_blockread( stheno_lbaoffset + sector, nr, euryale_block );
                print_debug( "stheno euryale_api_blockread sec=%ld nr=%ld ret=%d.\n", stheno_lbaoffset + sector, nr, ret );
            }else{
                ret = euryale_api_blockwrite( stheno_lbaoffset + sector, nr, euryale_block );
                print_debug( "stheno euryale_api_blockwrite sec=%ld nr=%ld ret=%d.\n", stheno_lbaoffset + sector, nr, ret );
            }
        skip:
            spin_lock_irq( stheno_queue->queue_lock );
            __blk_end_request_all( req, ret == 0 ? 0 : -EIO );
            /*__blk_end_request( req, ret == 0 ? 0 : -EIO, blk_rq_bytes( req ) );*/
            spin_unlock_irq( stheno_queue->queue_lock );
            /*print_debug( "stheno blk_end_request called.(ret=%d)\n", ret );*/
        }
        wake_unlock( &stheno_wakelock );
        /*print_debug( "stheno end of request.\n" );*/
    }
    print_debug("stheno_request_thread was terminated.\n");
#if defined( USE_KMALLOC )
    if( euryale_block != NULL ) kfree( euryale_block );
#endif
    return 0;
}
#endif

static void stheno_request( struct request_queue *q )
{
    /* caution : should be atomic procedure */
    stheno_wakeup = 1;
    wake_up_interruptible( &stheno_wait_q );
}
static void ace_fsm_dostate(struct ace_device *ace)
{
	struct request *req;
	u32 status;
	u16 val;
	int count;

#if defined(DEBUG)
	dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n",
		ace->fsm_state, ace->id_req_count);
#endif

	/*                                                             
                                                                */
	status = ace_in32(ace, ACE_STATUS);
	if ((status & ACE_STATUS_CFDETECT) == 0) {
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		ace->media_change = 1;
		set_capacity(ace->gd, 0);
		dev_info(ace->dev, "No CF in slot\n");

		/*                                         */
		if (ace->req) {
			__blk_end_request_all(ace->req, -EIO);
			ace->req = NULL;
		}
		while ((req = blk_fetch_request(ace->queue)) != NULL)
			__blk_end_request_all(req, -EIO);

		/*                                            */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		ace->id_result = -EIO;
		while (ace->id_req_count) {
			complete(&ace->id_completion);
			ace->id_req_count--;
		}
	}

	switch (ace->fsm_state) {
	case ACE_FSM_STATE_IDLE:
		/*                                */
		if (ace->id_req_count || ace_get_next_request(ace->queue)) {
			ace->fsm_iter_num++;
			ace->fsm_state = ACE_FSM_STATE_REQ_LOCK;
			mod_timer(&ace->stall_timer, jiffies + HZ);
			if (!timer_pending(&ace->stall_timer))
				add_timer(&ace->stall_timer);
			break;
		}
		del_timer(&ace->stall_timer);
		ace->fsm_continue_flag = 0;
		break;

	case ACE_FSM_STATE_REQ_LOCK:
		if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
			/*                                           */
			ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
			break;
		}

		/*                  */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ);
		ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK;
		break;

	case ACE_FSM_STATE_WAIT_LOCK:
		if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
			/*                                  */
			ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
			break;
		}

		/*                         */
		ace_fsm_yield(ace);
		break;

	case ACE_FSM_STATE_WAIT_CFREADY:
		status = ace_in32(ace, ACE_STATUS);
		if (!(status & ACE_STATUS_RDYFORCFCMD) ||
		    (status & ACE_STATUS_CFBSY)) {
			/*                                            */
			ace_fsm_yield(ace);
			break;
		}

		/*                                                        */
		if (ace->id_req_count)
			ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE;
		else
			ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE;
		break;

	case ACE_FSM_STATE_IDENTIFY_PREPARE:
		/*                       */
		ace->fsm_task = ACE_TASK_IDENTIFY;
		ace->data_ptr = ace->cf_id;
		ace->data_count = ACE_BUF_PER_SECTOR;
		ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY);

		/*                                                  */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);

		/*                                                     
                          */
		ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER;
		ace_fsm_yieldirq(ace);
		break;

	case ACE_FSM_STATE_IDENTIFY_TRANSFER:
		/*                                                */
		status = ace_in32(ace, ACE_STATUS);
		if (status & ACE_STATUS_CFBSY) {
			dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				ace->data_count);
			ace_fsm_yield(ace);
			break;
		}
		if (!(status & ACE_STATUS_DATABUFRDY)) {
			ace_fsm_yield(ace);
			break;
		}

		/*                          */
		ace->reg_ops->datain(ace);
		ace->data_count--;

		/*                                                           */
		if (ace->data_count != 0) {
			ace_fsm_yieldirq(ace);
			break;
		}

		/*                                       */
		dev_dbg(ace->dev, "identify finished\n");
		ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE;
		break;

	case ACE_FSM_STATE_IDENTIFY_COMPLETE:
		ace_fix_driveid(ace->cf_id);
		ace_dump_mem(ace->cf_id, 512);	/*                         */

		if (ace->data_result) {
			/*                                  */
			ace->media_change = 1;
			set_capacity(ace->gd, 0);
			dev_err(ace->dev, "error fetching CF id (%i)\n",
				ace->data_result);
		} else {
			ace->media_change = 0;

			/*                        */
			set_capacity(ace->gd,
				ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY));
			dev_info(ace->dev, "capacity: %i sectors\n",
				ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY));
		}

		/*                                                   */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		ace->id_result = ace->data_result;
		while (ace->id_req_count) {
			complete(&ace->id_completion);
			ace->id_req_count--;
		}
		break;

	case ACE_FSM_STATE_REQ_PREPARE:
		req = ace_get_next_request(ace->queue);
		if (!req) {
			ace->fsm_state = ACE_FSM_STATE_IDLE;
			break;
		}
		blk_start_request(req);

		/*                                                   */
		dev_dbg(ace->dev,
			"request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n",
			(unsigned long long)blk_rq_pos(req),
			blk_rq_sectors(req), blk_rq_cur_sectors(req),
			rq_data_dir(req));

		ace->req = req;
		ace->data_ptr = req->buffer;
		ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR;
		ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF);

		count = blk_rq_sectors(req);
		if (rq_data_dir(req)) {
			/*                        */
			dev_dbg(ace->dev, "write data\n");
			ace->fsm_task = ACE_TASK_WRITE;
			ace_out(ace, ACE_SECCNTCMD,
				count | ACE_SECCNTCMD_WRITE_DATA);
		} else {
			/*                       */
			dev_dbg(ace->dev, "read data\n");
			ace->fsm_task = ACE_TASK_READ;
			ace_out(ace, ACE_SECCNTCMD,
				count | ACE_SECCNTCMD_READ_DATA);
		}

		/*                                                  */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);

		/*                                                      
                                               
   */
		ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER;
		if (ace->fsm_task == ACE_TASK_READ)
			ace_fsm_yieldirq(ace);	/*                     */
		break;

	case ACE_FSM_STATE_REQ_TRANSFER:
		/*                                                */
		status = ace_in32(ace, ACE_STATUS);
		if (status & ACE_STATUS_CFBSY) {
			dev_dbg(ace->dev,
				"CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				blk_rq_cur_sectors(ace->req) * 16,
				ace->data_count, ace->in_irq);
			ace_fsm_yield(ace);	/*                        */
			break;
		}
		if (!(status & ACE_STATUS_DATABUFRDY)) {
			dev_dbg(ace->dev,
				"DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				blk_rq_cur_sectors(ace->req) * 16,
				ace->data_count, ace->in_irq);
			ace_fsm_yieldirq(ace);
			break;
		}

		/*                          */
		if (ace->fsm_task == ACE_TASK_WRITE)
			ace->reg_ops->dataout(ace);
		else
			ace->reg_ops->datain(ace);
		ace->data_count--;

		/*                                                           */
		if (ace->data_count != 0) {
			ace_fsm_yieldirq(ace);
			break;
		}

		/*                                     */
		if (__blk_end_request_cur(ace->req, 0)) {
			/*                                             
                                    
                                         
    */
			ace->data_ptr = ace->req->buffer;
			ace->data_count = blk_rq_cur_sectors(ace->req) * 16;
			ace_fsm_yieldirq(ace);
			break;
		}

		ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE;
		break;

	case ACE_FSM_STATE_REQ_COMPLETE:
		ace->req = NULL;

		/*                                    */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		break;

	default:
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		break;
	}
}
Beispiel #21
0
static int mmc_queue_thread(void *d)
{
	struct mmc_queue *mq = d;
	struct request_queue *q = mq->queue;

	current->flags |= PF_MEMALLOC;

	down(&mq->thread_sem);
	do {
		struct request *req = NULL;
		struct mmc_queue_req *tmp;

		spin_lock_irq(q->queue_lock);
		set_current_state(TASK_INTERRUPTIBLE);
		req = blk_fetch_request(q);
		mq->mqrq_cur->req = req;
		spin_unlock_irq(q->queue_lock);

#ifdef MTK_MMC_USE_ASYNC_REQUEST
		if (req || mq->mqrq_prev->req) {
#else
		if (req) {
#endif
			set_current_state(TASK_RUNNING);
			mq->issue_fn(mq, req);
		} else {
			if (kthread_should_stop()) {
				set_current_state(TASK_RUNNING);
				break;
			}
			up(&mq->thread_sem);
			schedule();
			down(&mq->thread_sem);
		}

		/* Current request becomes previous request and vice versa. */
		mq->mqrq_prev->brq.mrq.data = NULL;
		mq->mqrq_prev->req = NULL;
		tmp = mq->mqrq_prev;
#ifdef MTK_MMC_USE_ASYNC_REQUEST
		mq->mqrq_prev = mq->mqrq_cur;
#endif
		mq->mqrq_cur = tmp;
	} while (1);
	up(&mq->thread_sem);

	return 0;
}

/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
static void mmc_request(struct request_queue *q)
{
	struct mmc_queue *mq = q->queuedata;
	struct request *req;

	if (!mq) {
		while ((req = blk_fetch_request(q)) != NULL) {
			req->cmd_flags |= REQ_QUIET;
			__blk_end_request_all(req, -EIO);
		}
		return;
	}

	if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
		wake_up_process(mq->thread);
}

static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
{
	struct scatterlist *sg;

	sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
	if (!sg)
		*err = -ENOMEM;
	else {
		*err = 0;
		sg_init_table(sg, sg_len);
	}

	return sg;
}

static void mmc_queue_setup_discard(struct request_queue *q,
				    struct mmc_card *card)
{
	unsigned max_discard;

	max_discard = mmc_calc_max_discard(card);
	if (!max_discard)
		return;

	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
	q->limits.max_discard_sectors = max_discard;
	if (card->erased_byte == 0 && !mmc_can_discard(card))
		q->limits.discard_zeroes_data = 1;
	q->limits.discard_granularity = card->pref_erase << 9;
	/* granularity must not be greater than max. discard */
	if (card->pref_erase > max_discard)
		q->limits.discard_granularity = 0;
	if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))
		queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
}