static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup) { ide_drive_t *drive, *best; repeat: best = NULL; drive = hwgroup->drive; /* * drive is doing pre-flush, ordered write, post-flush sequence. even * though that is 3 requests, it must be seen as a single transaction. * we must not preempt this drive until that is complete */ if (blk_queue_flushing(drive->queue)) { /* * small race where queue could get replugged during * the 3-request flush cycle, just yank the plug since * we want it to finish asap */ blk_remove_plug(drive->queue); return drive; } do { u8 dev_s = !!(drive->dev_flags & IDE_DFLAG_SLEEPING); u8 best_s = (best && !!(best->dev_flags & IDE_DFLAG_SLEEPING)); if ((dev_s == 0 || time_after_eq(jiffies, drive->sleep)) && !elv_queue_empty(drive->queue)) { if (best == NULL || (dev_s && (best_s == 0 || time_before(drive->sleep, best->sleep))) || (best_s == 0 && time_before(WAKEUP(drive), WAKEUP(best)))) { if (!blk_queue_plugged(drive->queue)) best = drive; } } } while ((drive = drive->next) != hwgroup->drive); if (best && (best->dev_flags & IDE_DFLAG_NICE1) && (best->dev_flags & IDE_DFLAG_SLEEPING) == 0 && best != hwgroup->drive && best->service_time > WAIT_MIN_SLEEP) { long t = (signed long)(WAKEUP(best) - jiffies); if (t >= WAIT_MIN_SLEEP) { /* * We *may* have some time to spare, but first let's see if * someone can potentially benefit from our nice mood today.. */ drive = best->next; do { if ((drive->dev_flags & IDE_DFLAG_SLEEPING) == 0 && time_before(jiffies - best->service_time, WAKEUP(drive)) && time_before(WAKEUP(drive), jiffies + t)) { ide_stall_queue(best, min_t(long, t, 10 * WAIT_MIN_SLEEP)); goto repeat; } } while ((drive = drive->next) != best); }
/* * Input/Output thread. */ static int sd_io_thread(void *param) { struct sd_host *host = param; struct request *req; unsigned long flags; int nr_sectors; int error; #if 0 /* * We are going to perfom badly due to the read problem explained * above. At least, be nice with other processes trying to use the * cpu. */ set_user_nice(current, 0); #endif current->flags |= PF_NOFREEZE|PF_MEMALLOC; mutex_lock(&host->io_mutex); for (;;) { req = NULL; set_current_state(TASK_INTERRUPTIBLE); spin_lock_irqsave(&host->queue_lock, flags); if (!blk_queue_plugged(host->queue)) req = blk_fetch_request(host->queue); spin_unlock_irqrestore(&host->queue_lock, flags); if (!req) { if (kthread_should_stop()) { set_current_state(TASK_RUNNING); break; } mutex_unlock(&host->io_mutex); schedule(); mutex_lock(&host->io_mutex); continue; } set_current_state(TASK_INTERRUPTIBLE); nr_sectors = sd_do_request(host, req); error = (nr_sectors < 0) ? nr_sectors : 0; spin_lock_irqsave(&host->queue_lock, flags); __blk_end_request(req, error, nr_sectors << 9); spin_unlock_irqrestore(&host->queue_lock, flags); } mutex_unlock(&host->io_mutex); return 0; }
static int card_queue_thread(void *d) { struct card_queue *cq = d; struct request_queue *q = cq->queue; // unsigned char rewait; /* * Set iothread to ensure that we aren't put to sleep by * the process freezing. We handle suspension ourselves. */ current->flags |= PF_MEMALLOC; down(&cq->thread_sem); do { struct request *req = NULL; /*wait sdio handle irq & xfer data*/ //for(rewait=3;(!sdio_irq_handled)&&(rewait--);) // schedule(); spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); q = cq->queue; if (!blk_queue_plugged(q)) { req = blk_fetch_request(q); } cq->req = req; spin_unlock_irq(q->queue_lock); if (!req) { if (kthread_should_stop()) { set_current_state(TASK_RUNNING); break; } up(&cq->thread_sem); schedule(); down(&cq->thread_sem); continue; } set_current_state(TASK_RUNNING); cq->issue_fn(cq, req); cond_resched(); } while (1); /*Stop queue*/ spin_lock_irq(q->queue_lock); queue_flag_set_unlocked(QUEUE_FLAG_STOPPED, cq->queue); spin_unlock_irq(q->queue_lock); up(&cq->thread_sem); cq->thread = NULL; return 0; }
/** * @brief Request thread function. * @param d[in]: Private data. * @return SUCCESS/ERROR_ID. */ static int gp_sdcard_queue_thread(void *d) { gpSDInfo_t *sd = d; struct request_queue *q = sd->queue; current->flags |= PF_MEMALLOC; down(&sd->thread_sem); do { struct request *req = NULL; if(sd->queue==NULL) continue; spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); if (!blk_queue_plugged(q)) req = blk_fetch_request(q); sd->req = req; spin_unlock_irq(q->queue_lock); if (!req) { if (kthread_should_stop()) { set_current_state(TASK_RUNNING); break; } up(&sd->thread_sem); schedule(); down(&sd->thread_sem); continue; } set_current_state(TASK_RUNNING); if((gp_sdcard_ckinsert(sd)==0)) { spin_lock_irq(&sd->lock); __blk_end_request_all(req, -ENXIO); spin_unlock_irq(&sd->lock); sd->fremove = 1; continue; } if(gp_sdcard_xfer_request(sd, req)<0 && (gp_sdcard_inner_card(sd)!=1)) sd->fremove = 1; } while (1); up(&sd->thread_sem); return 0; }
static int mmc_queue_thread(void *d) { struct mmc_queue *mq = d; struct request_queue *q = mq->queue; DECLARE_WAITQUEUE(wait, current); /* * Set iothread to ensure that we aren't put to sleep by * the process freezing. We handle suspension ourselves. */ current->flags |= PF_MEMALLOC|PF_NOFREEZE; daemonize("mmcqd"); complete(&mq->thread_complete); down(&mq->thread_sem); add_wait_queue(&mq->thread_wq, &wait); do { struct request *req = NULL; spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); if (!blk_queue_plugged(q)) mq->req = req = elv_next_request(q); spin_unlock(q->queue_lock); if (!req) { if (mq->flags & MMC_QUEUE_EXIT) break; up(&mq->thread_sem); schedule(); down(&mq->thread_sem); continue; } set_current_state(TASK_RUNNING); mq->issue_fn(mq, req); } while (1); remove_wait_queue(&mq->thread_wq, &wait); up(&mq->thread_sem); complete_and_exit(&mq->thread_complete, 0); return 0; }
static int mmc_queue_thread(void *d) { struct mmc_queue *mq = d; struct request_queue *q = mq->queue; /* * Set iothread to ensure that we aren't put to sleep by * the process freezing. We handle suspension ourselves. */ current->flags |= PF_MEMALLOC|PF_NOFREEZE; down(&mq->thread_sem); do { struct request *req = NULL; spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); if (!blk_queue_plugged(q)) req = elv_next_request(q); mq->req = req; spin_unlock_irq(q->queue_lock); if (!req) { if (kthread_should_stop()) { set_current_state(TASK_RUNNING); break; } up(&mq->thread_sem); schedule(); down(&mq->thread_sem); continue; } set_current_state(TASK_RUNNING); mq->issue_fn(mq, req); } while (1); up(&mq->thread_sem); return 0; }
static int mmc_queue_thread(void *d) { struct mmc_queue *mq = d; struct request_queue *q = mq->queue; current->flags |= PF_MEMALLOC; down(&mq->thread_sem); do { struct request *req = NULL; spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); if (!blk_queue_plugged(q)) req = elv_next_request(q); mq->req = req; spin_unlock_irq(q->queue_lock); if (!req) { if (kthread_should_stop()) { set_current_state(TASK_RUNNING); break; } up(&mq->thread_sem); schedule(); down(&mq->thread_sem); continue; } set_current_state(TASK_RUNNING); mq->issue_fn(mq, req); } while (1); up(&mq->thread_sem); return 0; }
static int mmc_queue_thread(void *d) { struct mmc_queue *mq = d; struct request_queue *q = mq->queue; struct request *req; #ifdef CONFIG_MMC_PERF_PROFILING ktime_t start, diff; struct mmc_host *host = mq->card->host; unsigned long bytes_xfer; #endif current->flags |= PF_MEMALLOC; down(&mq->thread_sem); do { req = NULL; /* Must be set to NULL at each iteration */ spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); if (!blk_queue_plugged(q)) req = blk_fetch_request(q); mq->req = req; spin_unlock_irq(q->queue_lock); if (!req) { if (kthread_should_stop()) { set_current_state(TASK_RUNNING); break; } up(&mq->thread_sem); schedule(); down(&mq->thread_sem); continue; } set_current_state(TASK_RUNNING); #ifdef CONFIG_IDLECTRL_EMMC_CUST_SH mmc_idle_wake_lock(); #endif /* CONFIG_IDLECTRL_EMMC_CUST_SH */ #ifdef CONFIG_MMC_PERF_PROFILING bytes_xfer = blk_rq_bytes(req); if (rq_data_dir(req) == READ) { start = ktime_get(); mq->issue_fn(mq, req); diff = ktime_sub(ktime_get(), start); host->perf.rbytes_mmcq += bytes_xfer; host->perf.rtime_mmcq = ktime_add(host->perf.rtime_mmcq, diff); } else { start = ktime_get(); mq->issue_fn(mq, req); diff = ktime_sub(ktime_get(), start); host->perf.wbytes_mmcq += bytes_xfer; host->perf.wtime_mmcq = ktime_add(host->perf.wtime_mmcq, diff); } #else mq->issue_fn(mq, req); #endif #ifdef CONFIG_IDLECTRL_EMMC_CUST_SH mmc_idle_wake_unlock(); #endif /* CONFIG_IDLECTRL_EMMC_CUST_SH */ } while (1); up(&mq->thread_sem); return 0; }
static int mmc_queue_thread(void *d) { struct mmc_queue *mq = d; struct request_queue *q = mq->queue; current->flags |= PF_MEMALLOC; down(&mq->thread_sem); do { struct request *req = NULL; spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); if (!blk_queue_plugged(q)) req = elv_next_request(q); mq->req = req; spin_unlock_irq(q->queue_lock); if (!req) { if (kthread_should_stop()) { set_current_state(TASK_RUNNING); break; } up(&mq->thread_sem); schedule(); down(&mq->thread_sem); continue; } set_current_state(TASK_RUNNING); #ifdef CONFIG_MMC_BLOCK_PARANOID_RESUME if (mq->check_status) { struct mmc_command cmd; do { int err; cmd.opcode = MMC_SEND_STATUS; cmd.arg = mq->card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; mmc_claim_host(mq->card->host); err = mmc_wait_for_cmd(mq->card->host, &cmd, 5); mmc_release_host(mq->card->host); if (err) { printk(KERN_ERR "%s: failed to get status (%d)\n", __func__, err); msleep(5); continue; } printk(KERN_DEBUG "%s: status 0x%.8x\n", __func__, cmd.resp[0]); } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || (R1_CURRENT_STATE(cmd.resp[0]) == 7)); mq->check_status = 0; } #endif mq->issue_fn(mq, req); } while (1); up(&mq->thread_sem); return 0; }
/*west bridge storage async api on_completed callback */ static void cyasblkdev_issuecallback( /* Handle to the device completing the storage operation */ cy_as_device_handle handle, /* The media type completing the operation */ cy_as_media_type type, /* The device completing the operation */ uint32_t device, /* The unit completing the operation */ uint32_t unit, /* The block number of the completed operation */ uint32_t block_number, /* The type of operation */ cy_as_oper_type op, /* The error status */ cy_as_return_status_t status ) { int retry_cnt = 0; DBGPRN_FUNC_NAME; if (status != CY_AS_ERROR_SUCCESS) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: async r/w: op:%d failed with error %d at address %d\n", __func__, op, status, block_number); #endif } #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s calling blk_end_request from issue_callback " "req=0x%x, status=0x%x, nr_sectors=0x%x\n", __func__, (unsigned int) gl_bd->queue.req, status, (unsigned int) blk_rq_sectors(gl_bd->queue.req)); #endif /* note: blk_end_request w/o __ prefix should * not require spinlocks on the queue*/ while (blk_end_request(gl_bd->queue.req, status, blk_rq_sectors(gl_bd->queue.req)*512)) { retry_cnt++; } #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s blkdev_callback: ended rq on %d sectors, " "with err:%d, n:%d times\n", __func__, (int)blk_rq_sectors(gl_bd->queue.req), status, retry_cnt ); #endif spin_lock_irq(&gl_bd->lock); /*elevate next request, if there is one*/ if (!blk_queue_plugged(gl_bd->queue.queue)) { /* queue is not plugged */ gl_bd->queue.req = blk_fetch_request(gl_bd->queue.queue); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s blkdev_callback: " "blk_fetch_request():%p\n", __func__, gl_bd->queue.req); #endif } if (gl_bd->queue.req) { spin_unlock_irq(&gl_bd->lock); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s blkdev_callback: about to " "call issue_fn:%p\n", __func__, gl_bd->queue.req); #endif gl_bd->queue.issue_fn(&gl_bd->queue, gl_bd->queue.req); } else { spin_unlock_irq(&gl_bd->lock); } }
static int mmc_queue_thread(void *d) { struct mmc_queue *mq = d; struct request_queue *q = mq->queue; struct request *req; //ruanmeisi_20100603 int issue_ret = 0; #ifdef CONFIG_MMC_PERF_PROFILING ktime_t start, diff; struct mmc_host *host = mq->card->host; unsigned long bytes_xfer; #endif current->flags |= PF_MEMALLOC; down(&mq->thread_sem); do { req = NULL; //ruanmeisi_20100603 if (kthread_should_stop()) { remove_all_req(mq); break; } //end spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); if (!blk_queue_plugged(q)) req = blk_fetch_request(q); mq->req = req; spin_unlock_irq(q->queue_lock); if (!req) { if (kthread_should_stop()) { set_current_state(TASK_RUNNING); break; } up(&mq->thread_sem); schedule(); down(&mq->thread_sem); continue; } set_current_state(TASK_RUNNING); #ifdef CONFIG_MMC_AUTO_SUSPEND mmc_auto_suspend(mq->card->host, 0); #endif #ifdef CONFIG_MMC_BLOCK_PARANOID_RESUME if (mq->check_status) { struct mmc_command cmd; int retries = 3; do { int err; cmd.opcode = MMC_SEND_STATUS; cmd.arg = mq->card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; mmc_claim_host(mq->card->host); err = mmc_wait_for_cmd(mq->card->host, &cmd, 5); mmc_release_host(mq->card->host); if (err) { printk(KERN_ERR "%s: failed to get status (%d)\n", __func__, err); msleep(5); retries--; continue; } printk(KERN_DEBUG "%s: status 0x%.8x\n", __func__, cmd.resp[0]); } while (retries && (!(cmd.resp[0] & R1_READY_FOR_DATA) || (R1_CURRENT_STATE(cmd.resp[0]) == 7))); mq->check_status = 0; } #endif //ruanmeisi_20100529 #ifdef CONFIG_MMC_PERF_PROFILING bytes_xfer = blk_rq_bytes(req); if (rq_data_dir(req) == READ) { start = ktime_get(); issue_ret = mq->issue_fn(mq, req); diff = ktime_sub(ktime_get(), start); host->perf.rbytes_mmcq += bytes_xfer; host->perf.rtime_mmcq = ktime_add(host->perf.rtime_mmcq, diff); } else { start = ktime_get(); issue_ret = mq->issue_fn(mq, req); diff = ktime_sub(ktime_get(), start); host->perf.wbytes_mmcq += bytes_xfer; host->perf.wtime_mmcq = ktime_add(host->perf.wtime_mmcq, diff); } #else issue_ret = mq->issue_fn(mq, req); #endif //ruanmeisi if (0 == issue_ret) { int err; mmc_claim_host(mq->card->host); err = mmc_send_status(mq->card, NULL); mmc_release_host(mq->card->host); if (err) { printk(KERN_ERR "rms:%s: failed to get status (%d) maybe the card is removed\n", __func__, err); //sdcard is removed? mmc_detect_change(mq->card->host, 0); msleep(500); //set_current_state(TASK_INTERRUPTIBLE); //schedule_timeout(HZ / 2); continue; } } } while (1); up(&mq->thread_sem); return 0; }
static void do_read_write(void *data) { XStatus stat; struct request *req; request_queue_t *q; q = xsysace_queue; spin_lock_irq(&xsysace_lock); if (blk_queue_plugged(q)) { printk(KERN_ERR "XSysAce: Queue is plugged\n"); spin_unlock_irq(&xsysace_lock); return; } while ((req = elv_next_request(q)) != NULL) { if (!blk_fs_request(req)) { printk(KERN_NOTICE "Skip non-fs request\n"); xsysace_end_request(req, 0); continue; } if (rq_data_dir(req) == WRITE) { req_str = "writing"; req_fnc = XSysAce_SectorWrite; } else { req_str = "reading"; req_fnc = XSysAce_SectorRead; } xsysace_req = req; break; } spin_unlock_irq(&xsysace_lock); if (!req) return; /* We have a request. */ while ((stat = XSysAce_Lock(&SysAce, 0)) == XST_DEVICE_BUSY) { msleep_interruptible(1); } if (stat != XST_SUCCESS) { printk(KERN_ERR "%s: Error %d when locking.\n", DEVICE_NAME, stat); xsa_complete_request(0); /* Request failed. */ } while ((stat = req_fnc(&SysAce, xsysace_req->sector, xsysace_req->current_nr_sectors, xsysace_req->buffer)) == XST_DEVICE_BUSY) { msleep_interruptible(1); } /* * If the stat is XST_SUCCESS, we have successfully * gotten the request started on the hardware. The * completion (or error) interrupt will unlock the * CompactFlash and complete the request, so we don't * need to do anything except just loop around and wait * for the next request. If the status is not * XST_SUCCESS, we need to finish the request with an * error before waiting for the next request. */ if (stat != XST_SUCCESS) { printk(KERN_ERR "%s: Error %d when %s sector %lu.\n", DEVICE_NAME, stat, req_str, xsysace_req->sector); xsa_complete_request(0); /* Request failed. */ } }
static int mmc_queue_thread(void *d) { struct mmc_queue *mq = d; struct request_queue *q = mq->queue; #ifdef CONFIG_MMC_DISCARD_MERGE int ret; int state = DCS_NO_DISCARD_REQ; int flag; #endif current->flags |= PF_MEMALLOC; down(&mq->thread_sem); do { struct request *req = NULL; spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); if (!blk_queue_plugged(q)) req = blk_fetch_request(q); mq->req = req; spin_unlock_irq(q->queue_lock); if (!req) { if (kthread_should_stop()) { set_current_state(TASK_RUNNING); break; } #ifdef CONFIG_MMC_DISCARD_MERGE if (mmc_card_mmc(mq->card)) { flag = mmc_read_idle(mq->card); if (flag == DCS_IDLE_OPS_TURNED_ON) { mmc_claim_host(mq->card->host); ret = mmc_do_idle_ops(mq->card); mmc_release_host(mq->card->host); if (ret) { if (mq->flags & MMC_QUEUE_SUSPENDED) goto sched; } else { state = DCS_NO_DISCARD_REQ; mmc_clear_idle(mq->card); } continue; } else if (flag == DCS_MMC_DEVICE_REMOVED) { /* do nothing */ } else if (state == DCS_DISCARD_REQ) { state = DCS_IDLE_TIMER_TRIGGERED; mmc_trigger_idle_timer(mq->card); } } sched: #endif up(&mq->thread_sem); schedule(); down(&mq->thread_sem); continue; } #ifdef CONFIG_MMC_DISCARD_MERGE else if (mmc_card_mmc(mq->card)) { if (state == DCS_NO_DISCARD_REQ && req->cmd_flags & REQ_DISCARD) state = DCS_DISCARD_REQ; } #endif set_current_state(TASK_RUNNING); mq->issue_fn(mq, req); } while (1); up(&mq->thread_sem); return 0; }
/* queue worker thread */ static int cyasblkdev_queue_thread(void *d) { DECLARE_WAITQUEUE(wait, current); struct cyasblkdev_queue *bq = d; struct request_queue *q = bq->queue; u32 qth_pid; DBGPRN_FUNC_NAME; /* * set iothread to ensure that we aren't put to sleep by * the process freezing. we handle suspension ourselves. */ daemonize("cyasblkdev_queue_thread"); /* signal to queue_init() so it could contnue */ complete(&bq->thread_complete); down(&bq->thread_sem); add_wait_queue(&bq->thread_wq, &wait); qth_pid = current->pid; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s:%x started, bq:%p, q:%p\n", __func__, qth_pid, bq, q); #endif do { struct request *req = NULL; /* the thread wants to be woken up by signals as well */ set_current_state(TASK_INTERRUPTIBLE); spin_lock_irq(q->queue_lock); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: for bq->queue is null\n", __func__); #endif if (!bq->req) { /* chk if queue is plugged */ if (!blk_queue_plugged(q)) { bq->req = req = blk_fetch_request(q); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: blk_fetch_request:%x\n", __func__, (uint32_t)req); #endif } else { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: queue plugged, " "skip blk_fetch()\n", __func__); #endif } } spin_unlock_irq(q->queue_lock); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: checking if request queue is null\n", __func__); #endif if (!req) { if (bq->flags & CYASBLKDEV_QUEUE_EXIT) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s:got QUEUE_EXIT flag\n", __func__); #endif break; } #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: request queue is null, goto sleep, " "thread_sem->count=%d\n", __func__, bq->thread_sem.count); if (spin_is_locked(q->queue_lock)) { cy_as_hal_print_message("%s: queue_lock " "is locked, need to release\n", __func__); spin_unlock(q->queue_lock); if (spin_is_locked(q->queue_lock)) cy_as_hal_print_message( "%s: unlock did not work\n", __func__); } else { cy_as_hal_print_message( "%s: checked lock, is not locked\n", __func__); } #endif up(&bq->thread_sem); /* yields to the next rdytorun proc, * then goes back to sleep*/ schedule(); down(&bq->thread_sem); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: wake_up,continue\n", __func__); #endif continue; } /* new req received, issue it to the driver */ set_current_state(TASK_RUNNING); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: issued a RQ:%x\n", __func__, (uint32_t)req); #endif bq->issue_fn(bq, req); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: bq->issue_fn() returned\n", __func__); #endif } while (1); set_current_state(TASK_RUNNING); remove_wait_queue(&bq->thread_wq, &wait); up(&bq->thread_sem); complete_and_exit(&bq->thread_complete, 0); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: is finished\n", __func__); #endif return 0; }
static int mmc_queue_thread(void *d) { struct mmc_queue *mq = d; struct request_queue *q = mq->queue; struct request *req; current->flags |= PF_MEMALLOC; down(&mq->thread_sem); do { req = NULL; /* Must be set to NULL at each iteration */ spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); if (!blk_queue_plugged(q)) req = blk_fetch_request(q); mq->req = req; spin_unlock_irq(q->queue_lock); if (!req) { if (kthread_should_stop()) { set_current_state(TASK_RUNNING); break; } up(&mq->thread_sem); schedule(); down(&mq->thread_sem); continue; } set_current_state(TASK_RUNNING); #ifdef CONFIG_MMC_AUTO_SUSPEND mmc_auto_suspend(mq->card->host, 0); #endif #ifdef CONFIG_MMC_BLOCK_PARANOID_RESUME if (mq->check_status) { struct mmc_command cmd; int retries = 3; unsigned long delay = jiffies + HZ; do { int err; cmd.opcode = MMC_SEND_STATUS; cmd.arg = mq->card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; mmc_claim_host(mq->card->host); err = mmc_wait_for_cmd(mq->card->host, &cmd, 5); mmc_release_host(mq->card->host); if (err) { printk(KERN_ERR "%s: failed to get status (%d)\n", __func__, err); msleep(5); retries--; continue; } if (time_after(jiffies, delay)) { printk(KERN_ERR "failed to get card ready\n"); break; } printk(KERN_DEBUG "%s: status 0x%.8x\n", __func__, cmd.resp[0]); } while (retries && (!(cmd.resp[0] & R1_READY_FOR_DATA) || (R1_CURRENT_STATE(cmd.resp[0]) == 7))); mq->check_status = 0; } #endif if (!(mq->issue_fn(mq, req))) printk(KERN_ERR "mmc_blk_issue_rq failed!!\n"); } while (1); up(&mq->thread_sem); return 0; }