示例#1
0
/**
 * blk_execute_rq - insert a request into queue for execution
 * @q:		queue to insert the request in
 * @bd_disk:	matching gendisk
 * @rq:		request to insert
 * @at_head:    insert request at head or tail of queue
 *
 * Description:
 *    Insert a fully prepared request at the back of the I/O scheduler queue
 *    for execution and wait for completion.
 */
int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
		   struct request *rq, int at_head)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	char sense[SCSI_SENSE_BUFFERSIZE];
	int err = 0;
	unsigned long hang_check;

	/*
	 * we need an extra reference to the request, so we can look at
	 * it after io completion
	 */
	rq->ref_count++;

	if (!rq->sense) {
		memset(sense, 0, sizeof(sense));
		rq->sense = sense;
		rq->sense_len = 0;
	}

	rq->end_io_data = &wait;
	blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);

	/* Prevent hang_check timer from firing at us during very long I/O */
	hang_check = sysctl_hung_task_timeout_secs;
	if (hang_check)
		while (!wait_for_completion_timeout(&wait, hang_check * (HZ/2)));
	else
		wait_for_completion(&wait);

	if (rq->errors)
		err = -EIO;

	return err;
}
示例#2
0
int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
		void *data, int rw)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct bio *bio;

	bio = bio_alloc(GFP_NOIO, 1);
	bio->bi_sector = sector;
	bio->bi_bdev = bdev;
	bio->bi_end_io = hfsplus_end_io_sync;
	bio->bi_private = &wait;

	/*
	 * We always submit one sector at a time, so bio_add_page must not fail.
	 */
	if (bio_add_page(bio, virt_to_page(data), HFSPLUS_SECTOR_SIZE,
			 offset_in_page(data)) != HFSPLUS_SECTOR_SIZE)
		BUG();

	submit_bio(rw, bio);
	wait_for_completion(&wait);

	if (!bio_flagged(bio, BIO_UPTODATE))
		return -EIO;
	return 0;
}
示例#3
0
/**
 *	sysfs_deactivate - deactivate sysfs_dirent
 *	@sd: sysfs_dirent to deactivate
 *
 *	Deny new active references and drain existing ones.
 */
static void sysfs_deactivate(struct sysfs_dirent *sd)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	int v;

	BUG_ON(sd->s_sibling || !(sd->s_flags & SYSFS_FLAG_REMOVED));

	if (!(sysfs_type(sd) & SYSFS_ACTIVE_REF))
		return;

	sd->s_sibling = (void *)&wait;

	rwsem_acquire(&sd->dep_map, 0, 0, _RET_IP_);
	/* atomic_add_return() is a mb(), put_active() will always see
	 * the updated sd->s_sibling.
	 */
	v = atomic_add_return(SD_DEACTIVATED_BIAS, &sd->s_active);

	if (v != SD_DEACTIVATED_BIAS) {
		lock_contended(&sd->dep_map, _RET_IP_);
		wait_for_completion(&wait);
	}

	sd->s_sibling = NULL;

	lock_acquired(&sd->dep_map, _RET_IP_);
	rwsem_release(&sd->dep_map, 1, _RET_IP_);
}
示例#4
0
int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct request_queue *q = bdev_get_queue(bdev);
	int type = REQ_WRITE | REQ_DISCARD;
	unsigned int max_discard_sectors;
	struct bio_batch bb;
	struct bio *bio;
	int ret = 0;

	if (!q)
		return -ENXIO;

	if (!blk_queue_discard(q))
		return -EOPNOTSUPP;

	/*
	 * Ensure that max_discard_sectors is of the proper
	 * granularity
	 */
	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
	if (q->limits.discard_granularity) {
		unsigned int disc_sects = q->limits.discard_granularity >> 9;

		max_discard_sectors &= ~(disc_sects - 1);
	}
示例#5
0
/**
 * call_usermodehelper_exec - start a usermode application
 * @sub_info: information about the subprocessa
 * @wait: wait for the application to finish and return status.
 *        when -1 don't wait at all, but you get no useful error back when
 *        the program couldn't be exec'ed. This makes it safe to call
 *        from interrupt context.
 *
 * Runs a user-space application.  The application is started
 * asynchronously if wait is not set, and runs as a child of keventd.
 * (ie. it runs with full root capabilities).
 */
int call_usermodehelper_exec(struct subprocess_info *sub_info,
			     enum umh_wait wait)
{
	DECLARE_COMPLETION_ONSTACK(done);
	int retval = 0;

	helper_lock();
	if (sub_info->path[0] == '\0')
		goto out;

	if (!khelper_wq || usermodehelper_disabled) {
		retval = -EBUSY;
		goto out;
	}

	sub_info->complete = &done;
	sub_info->wait = wait;

	queue_work(khelper_wq, &sub_info->work);
	if (wait == UMH_NO_WAIT)	/* task has freed sub_info */
		goto unlock;
	wait_for_completion(&done);
	retval = sub_info->retval;

out:
	call_usermodehelper_freeinfo(sub_info);
unlock:
	helper_unlock();
	return retval;
}
static int
qup_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
	DECLARE_COMPLETION_ONSTACK(complete);
	struct qup_i2c_dev *dev = i2c_get_adapdata(adap);
	int ret;
	int rem = num;
	long timeout;
	int err;
	unsigned long flags;

	del_timer_sync(&dev->pwr_timer);
	mutex_lock(&dev->mlock);

	if (dev->suspended) {
		mutex_unlock(&dev->mlock);
		dev_err(dev->dev, "qup_i2c_xfer: dev suspended, return Error!\n");
		return -EIO;
	}

	if (dev->clk_state == 0) {
		if (dev->clk_ctl == 0) {
			if (dev->pdata->src_clk_rate > 0)
				clk_set_rate(dev->clk,
						dev->pdata->src_clk_rate);
			else
				dev->pdata->src_clk_rate = 19200000;
		}
		qup_i2c_pwr_mgmt(dev, 1);
	}
	/* Initialize QUP registers during first transfer */
	if (dev->clk_ctl == 0) {
		int fs_div;
		int hs_div;
		uint32_t fifo_reg;

		if (dev->gsbi) {
			writel(0x2 << 4, dev->gsbi);
			/* GSBI memory is not in the same 1K region as other
			 * QUP registers. dsb() here ensures that the GSBI
			 * register is updated in correct order and that the
			 * write has gone through before programming QUP core
			 * registers
			 */
			dsb();
		}

		fs_div = ((dev->pdata->src_clk_rate
				/ dev->pdata->clk_freq) / 2) - 3;
		hs_div = 3;
		dev->clk_ctl = ((hs_div & 0x7) << 8) | (fs_div & 0xff);
		fifo_reg = readl(dev->base + QUP_IO_MODE);
		if (fifo_reg & 0x3)
			dev->out_blk_sz = (fifo_reg & 0x3) * 16;
		else
			dev->out_blk_sz = 16;
		if (fifo_reg & 0x60)
			dev->in_blk_sz = ((fifo_reg & 0x60) >> 5) * 16;
		else
示例#7
0
/**
  * pm8001_phy_control - this function should be registered to
  * sas_domain_function_template to provide libsas used, note: this is just
  * control the HBA phy rather than other expander phy if you want control
  * other phy, you should use SMP command.
  * @sas_phy: which phy in HBA phys.
  * @func: the operation.
  * @funcdata: always NULL.
  */
int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
	void *funcdata)
{
	int rc = 0, phy_id = sas_phy->id;
	struct pm8001_hba_info *pm8001_ha = NULL;
	struct sas_phy_linkrates *rates;
	DECLARE_COMPLETION_ONSTACK(completion);
<<<<<<< HEAD
示例#8
0
文件: blk-lib.c 项目: DenisLug/mptcp
/**
 * blkdev_issue_write_same - queue a write same operation
 * @bdev:	target blockdev
 * @sector:	start sector
 * @nr_sects:	number of sectors to write
 * @gfp_mask:	memory allocation flags (for bio_alloc)
 * @page:	page containing data to write
 *
 * Description:
 *    Issue a write same request for the sectors in question.
 */
int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
			    sector_t nr_sects, gfp_t gfp_mask,
			    struct page *page)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct request_queue *q = bdev_get_queue(bdev);
	unsigned int max_write_same_sectors;
	struct bio_batch bb;
	struct bio *bio;
	int ret = 0;

	if (!q)
		return -ENXIO;

	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
	max_write_same_sectors = UINT_MAX >> 9;

	atomic_set(&bb.done, 1);
	bb.error = 0;
	bb.wait = &wait;

	while (nr_sects) {
		bio = bio_alloc(gfp_mask, 1);
		if (!bio) {
			ret = -ENOMEM;
			break;
		}

		bio->bi_iter.bi_sector = sector;
		bio->bi_end_io = bio_batch_end_io;
		bio->bi_bdev = bdev;
		bio->bi_private = &bb;
		bio->bi_vcnt = 1;
		bio->bi_io_vec->bv_page = page;
		bio->bi_io_vec->bv_offset = 0;
		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);

		if (nr_sects > max_write_same_sectors) {
			bio->bi_iter.bi_size = max_write_same_sectors << 9;
			nr_sects -= max_write_same_sectors;
			sector += max_write_same_sectors;
		} else {
			bio->bi_iter.bi_size = nr_sects << 9;
			nr_sects = 0;
		}

		atomic_inc(&bb.done);
		submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
	}

	/* Wait for bios in-flight */
	if (!atomic_dec_and_test(&bb.done))
		wait_for_completion_io(&wait);

	if (bb.error)
		return bb.error;
	return ret;
}
static int smu_set_fan(int pwm, u8 id, u16 value)
{
	struct smu_cmd cmd;
	u8 buffer[16];
	DECLARE_COMPLETION_ONSTACK(comp);
	int rc;

	/* Fill SMU command structure */
	cmd.cmd = SMU_CMD_FAN_COMMAND;

	/* The SMU has an "old" and a "new" way of setting the fan speed
	 * Unfortunately, I found no reliable way to know which one works
	 * on a given machine model. After some investigations it appears
	 * that MacOS X just tries the new one, and if it fails fallbacks
	 * to the old ones ... Ugh.
	 */
 retry:
	if (smu_supports_new_fans_ops) {
		buffer[0] = 0x30;
		buffer[1] = id;
		*((u16 *)(&buffer[2])) = value;
		cmd.data_len = 4;
	} else {
		if (id > 7)
			return -EINVAL;
		/* Fill argument buffer */
		memset(buffer, 0, 16);
		buffer[0] = pwm ? 0x10 : 0x00;
		buffer[1] = 0x01 << id;
		*((u16 *)&buffer[2 + id * 2]) = value;
		cmd.data_len = 14;
	}

	cmd.reply_len = 16;
	cmd.data_buf = cmd.reply_buf = buffer;
	cmd.status = 0;
	cmd.done = smu_done_complete;
	cmd.misc = &comp;

	rc = smu_queue_cmd(&cmd);
	if (rc)
		return rc;
	wait_for_completion(&comp);

	/* Handle fallback (see coment above) */
	if (cmd.status != 0 && smu_supports_new_fans_ops) {
#ifdef CONFIG_DEBUG_PRINTK
		printk(KERN_WARNING "windfarm: SMU failed new fan command "
		       "falling back to old method\n");
#else
		;
#endif
		smu_supports_new_fans_ops = 0;
		goto retry;
	}

	return cmd.status;
}
static void dispc_mgr_disable_digit_out(void)
{
	DECLARE_COMPLETION_ONSTACK(framedone_compl);
	int r, i;
	u32 irq_mask;
	int num_irqs;

	if (dispc_mgr_is_enabled(OMAP_DSS_CHANNEL_DIGIT) == false)
		return;

	/*
	 * When we disable the digit output, we need to wait for FRAMEDONE to
	 * know that DISPC has finished with the output.
	 */

	irq_mask = dispc_mgr_get_framedone_irq(OMAP_DSS_CHANNEL_DIGIT);
	num_irqs = 1;

	if (!irq_mask) {
		/*
		 * omap 2/3 don't have framedone irq for TV, so we need to use
		 * vsyncs for this.
		 */

		irq_mask = dispc_mgr_get_vsync_irq(OMAP_DSS_CHANNEL_DIGIT);
		/*
		 * We need to wait for both even and odd vsyncs. Note that this
		 * is not totally reliable, as we could get a vsync interrupt
		 * before we disable the output, which leads to timeout in the
		 * wait_for_completion.
		 */
		num_irqs = 2;
	}

	r = omap_dispc_register_isr(dispc_mgr_disable_isr, &framedone_compl,
			irq_mask);
	if (r)
		DSSERR("failed to register %x isr\n", irq_mask);

	dispc_mgr_enable(OMAP_DSS_CHANNEL_DIGIT, false);

	/* if we couldn't register the irq, just sleep and exit */
	if (r) {
		msleep(100);
		return;
	}

	for (i = 0; i < num_irqs; ++i) {
		if (!wait_for_completion_timeout(&framedone_compl,
					msecs_to_jiffies(100)))
			DSSERR("timeout waiting for digit out to stop\n");
	}

	r = omap_dispc_unregister_isr(dispc_mgr_disable_isr, &framedone_compl,
			irq_mask);
	if (r)
		DSSERR("failed to unregister %x isr\n", irq_mask);
}
示例#11
0
int wl1271_ps_elp_wakeup(struct wl1271 *wl)
{
    DECLARE_COMPLETION_ONSTACK(compl);
    unsigned long flags;
    int ret;
    u32 start_time = jiffies;
    bool pending = false;

    if (!test_and_clear_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))
        return 0;


    cancel_delayed_work(&wl->elp_work);

    if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
        return 0;

    wl1271_debug(DEBUG_PSM, "waking up chip from elp");

    spin_lock_irqsave(&wl->wl_lock, flags);
    if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
        pending = true;
    else
        wl->elp_compl = &compl;
    spin_unlock_irqrestore(&wl->wl_lock, flags);

    wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);

    if (!pending) {
        ret = wait_for_completion_timeout(
                  &compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
        if (ret == 0) {
            wl1271_error("ELP wakeup timeout!");
            wl12xx_queue_recovery_work(wl);
            ret = -ETIMEDOUT;
            goto err;
        } else if (ret < 0) {
            wl1271_error("ELP wakeup completion error.");
            goto err;
        }
    }

    clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);

    wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
                 jiffies_to_msecs(jiffies - start_time));
    goto out;

err:
    spin_lock_irqsave(&wl->wl_lock, flags);
    wl->elp_compl = NULL;
    spin_unlock_irqrestore(&wl->wl_lock, flags);
    return ret;

out:
    return 0;
}
示例#12
0
static void vbc_blk_submit_bio(struct bio *bio, int rq)
{
	DECLARE_COMPLETION_ONSTACK(wait);

	bio->bi_end_io	= vbc_blk_endio;
	bio->bi_private = &wait;
	submit_bio(rq, bio);
	wait_for_completion(&wait);
}
示例#13
0
/**
  * pm8001_phy_control - this function should be registered to
  * sas_domain_function_template to provide libsas used, note: this is just
  * control the HBA phy rather than other expander phy if you want control
  * other phy, you should use SMP command.
  * @sas_phy: which phy in HBA phys.
  * @func: the operation.
  * @funcdata: always NULL.
  */
int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
	void *funcdata)
{
	int rc = 0, phy_id = sas_phy->id;
	struct pm8001_hba_info *pm8001_ha = NULL;
	struct sas_phy_linkrates *rates;
	DECLARE_COMPLETION_ONSTACK(completion);
	pm8001_ha = sas_phy->ha->lldd_ha;
	pm8001_ha->phy[phy_id].enable_completion = &completion;
	switch (func) {
	case PHY_FUNC_SET_LINK_RATE:
		rates = funcdata;
		if (rates->minimum_linkrate) {
			pm8001_ha->phy[phy_id].minimum_linkrate =
				rates->minimum_linkrate;
		}
		if (rates->maximum_linkrate) {
			pm8001_ha->phy[phy_id].maximum_linkrate =
				rates->maximum_linkrate;
		}
		if (pm8001_ha->phy[phy_id].phy_state == 0) {
			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
			wait_for_completion(&completion);
		}
		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
					      PHY_LINK_RESET);
		break;
	case PHY_FUNC_HARD_RESET:
		if (pm8001_ha->phy[phy_id].phy_state == 0) {
			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
			wait_for_completion(&completion);
		}
		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
					      PHY_HARD_RESET);
		break;
	case PHY_FUNC_LINK_RESET:
		if (pm8001_ha->phy[phy_id].phy_state == 0) {
			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
			wait_for_completion(&completion);
		}
		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
					      PHY_LINK_RESET);
		break;
	case PHY_FUNC_RELEASE_SPINUP_HOLD:
		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
					      PHY_LINK_RESET);
		break;
	case PHY_FUNC_DISABLE:
		PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
		break;
	default:
		rc = -EOPNOTSUPP;
	}
	msleep(300);
	return rc;
}
示例#14
0
static int msm_rpm_set_exclusive(int ctx,
	uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
{
	DECLARE_COMPLETION_ONSTACK(ack);
	unsigned long flags;
	uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx);
	uint32_t ctx_mask_ack = 0;
	uint32_t sel_masks_ack[SEL_MASK_SIZE];
	int i;

	msm_rpm_request_irq_mode.req = req;
	msm_rpm_request_irq_mode.count = count;
	msm_rpm_request_irq_mode.ctx_mask_ack = &ctx_mask_ack;
	msm_rpm_request_irq_mode.sel_masks_ack = sel_masks_ack;
	msm_rpm_request_irq_mode.done = &ack;

	spin_lock_irqsave(&msm_rpm_lock, flags);
	spin_lock(&msm_rpm_irq_lock);

	BUG_ON(msm_rpm_request);
	msm_rpm_request = &msm_rpm_request_irq_mode;

	for (i = 0; i < count; i++) {
		BUG_ON(target_enum(req[i].id) >= MSM_RPM_ID_LAST);
		msm_rpm_write(MSM_RPM_PAGE_REQ,
				target_enum(req[i].id), req[i].value);
	}

	msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL,
		target_ctrl(MSM_RPM_CTRL_REQ_SEL_0),
		sel_masks, msm_rpm_sel_mask_size);
	msm_rpm_write(MSM_RPM_PAGE_CTRL,
		target_ctrl(MSM_RPM_CTRL_REQ_CTX_0), ctx_mask);

	/* Ensure RPM data is written before sending the interrupt */
	mb();
	msm_rpm_send_req_interrupt();

	spin_unlock(&msm_rpm_irq_lock);
	spin_unlock_irqrestore(&msm_rpm_lock, flags);

	wait_for_completion(&ack);

	BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)))
		!= ctx_mask);
	BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack)));

	if (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)) {
		//pr_warn("[K] %s: following request is rejected by rpm\n", __func__);
		for (i = 0; i < count; i++)
				/*pr_warn("[K] %s: id: %d, value: %d\n", __func__, req[i].id, req[i].value)*/;
		return -ENOSPC;
	} else {
		return 0;
	}
}
示例#15
0
文件: rpm.c 项目: lolhi/ef52-kernel
/* Upon return, the <req> array will contain values from the ack page.
 *
 * Note: assumes caller has acquired <msm_rpm_mutex>.
 *
 * Return value:
 *   0: success
 *   -ENOSPC: request rejected
 */
static int msm_rpm_set_exclusive(int ctx,
	uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
{
	DECLARE_COMPLETION_ONSTACK(ack);
	unsigned long flags;
	uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx);
	uint32_t ctx_mask_ack = 0;
	uint32_t sel_masks_ack[SEL_MASK_SIZE];
	int i;

	msm_rpm_request_irq_mode.req = req;
	msm_rpm_request_irq_mode.count = count;
	msm_rpm_request_irq_mode.ctx_mask_ack = &ctx_mask_ack;
	msm_rpm_request_irq_mode.sel_masks_ack = sel_masks_ack;
	msm_rpm_request_irq_mode.done = &ack;

	spin_lock_irqsave(&msm_rpm_lock, flags);
	spin_lock(&msm_rpm_irq_lock);

	BUG_ON(msm_rpm_request);
	msm_rpm_request = &msm_rpm_request_irq_mode;

	for (i = 0; i < count; i++) {
		BUG_ON(target_enum(req[i].id) >= MSM_RPM_ID_LAST);
		msm_rpm_write(MSM_RPM_PAGE_REQ,
				target_enum(req[i].id), req[i].value);
	}

	msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL,
		target_ctrl(MSM_RPM_CTRL_REQ_SEL_0),
		sel_masks, msm_rpm_sel_mask_size);
	msm_rpm_write(MSM_RPM_PAGE_CTRL,
		target_ctrl(MSM_RPM_CTRL_REQ_CTX_0), ctx_mask);

	/* Ensure RPM data is written before sending the interrupt */
	mb();	
#if defined(CONFIG_PANTECH_DEBUG)
#if defined(CONFIG_PANTECH_DEBUG_RPM_LOG) //p14291_121102
	pantech_debug_rpm_log(1, req->id, req->value); 
#endif
#endif
	msm_rpm_send_req_interrupt();

	spin_unlock(&msm_rpm_irq_lock);
	spin_unlock_irqrestore(&msm_rpm_lock, flags);

	wait_for_completion(&ack);

	BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)))
		!= ctx_mask);
	BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack)));

	return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))
		? -ENOSPC : 0;
}
示例#16
0
int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
{
	DECLARE_COMPLETION_ONSTACK(compl);
	unsigned long flags;
	int ret;
	u32 start_time = jiffies;
	bool pending = false;

	if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
		return 0;

	wl1271_debug(DEBUG_PSM, "waking up chip from elp");

	/*
	 * The spinlock is required here to synchronize both the work and
	 * the completion variable in one entity.
	 */
	spin_lock_irqsave(&wl->wl_lock, flags);
	if (work_pending(&wl->irq_work) || chip_awake)
		pending = true;
	else
		wl->elp_compl = &compl;
	spin_unlock_irqrestore(&wl->wl_lock, flags);

	wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);

	if (!pending) {
		ret = wait_for_completion_timeout(
			&compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
		if (ret == 0) {
			wl1271_error("ELP wakeup timeout!");
			ieee80211_queue_work(wl->hw, &wl->recovery_work);
			ret = -ETIMEDOUT;
			goto err;
		} else if (ret < 0) {
			wl1271_error("ELP wakeup completion error.");
			goto err;
		}
	}

	clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);

	wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
		     jiffies_to_msecs(jiffies - start_time));
	goto out;

err:
	spin_lock_irqsave(&wl->wl_lock, flags);
	wl->elp_compl = NULL;
	spin_unlock_irqrestore(&wl->wl_lock, flags);
	return ret;

out:
	return 0;
}
示例#17
0
/**
 *	mmc_wait_for_req - start a request and wait for completion
 *	@host: MMC host to start command
 *	@mrq: MMC request to start
 *
 *	Start a new MMC custom command request for a host, and wait
 *	for the command to complete. Does not attempt to parse the
 *	response.
 */
void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
{
	DECLARE_COMPLETION_ONSTACK(complete);

	mrq->done_data = &complete;
	mrq->done = mmc_wait_done;

	mmc_start_request(host, mrq);

	wait_for_completion(&complete);
}
示例#18
0
/*
 * hfsplus_submit_bio - Perfrom block I/O
 * @sb: super block of volume for I/O
 * @sector: block to read or write, for blocks of HFSPLUS_SECTOR_SIZE bytes
 * @buf: buffer for I/O
 * @data: output pointer for location of requested data
 * @rw: direction of I/O
 *
 * The unit of I/O is hfsplus_min_io_size(sb), which may be bigger than
 * HFSPLUS_SECTOR_SIZE, and @buf must be sized accordingly. On reads
 * @data will return a pointer to the start of the requested sector,
 * which may not be the same location as @buf.
 *
 * If @sector is not aligned to the bdev logical block size it will
 * be rounded down. For writes this means that @buf should contain data
 * that starts at the rounded-down address. As long as the data was
 * read using hfsplus_submit_bio() and the same buffer is used things
 * will work correctly.
 */
int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
                       void *buf, void **data, int rw)
{
    DECLARE_COMPLETION_ONSTACK(wait);
    struct bio *bio;
    int ret = 0;
    u64 io_size;
    loff_t start;
    int offset;

    /*
     * Align sector to hardware sector size and find offset. We
     * assume that io_size is a power of two, which _should_
     * be true.
     */
    io_size = hfsplus_min_io_size(sb);
    start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT;
    offset = start & (io_size - 1);
    sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);

    bio = bio_alloc(GFP_NOIO, 1);
    bio->bi_sector = sector;
    bio->bi_bdev = sb->s_bdev;
    bio->bi_end_io = hfsplus_end_io_sync;
    bio->bi_private = &wait;

    if (!(rw & WRITE) && data)
        *data = (u8 *)buf + offset;

    while (io_size > 0) {
        unsigned int page_offset = offset_in_page(buf);
        unsigned int len = min_t(unsigned int, PAGE_SIZE - page_offset,
                                 io_size);

        ret = bio_add_page(bio, virt_to_page(buf), len, page_offset);
        if (ret != len) {
            ret = -EIO;
            goto out;
        }
        io_size -= len;
        buf = (u8 *)buf + len;
    }

    submit_bio(rw, bio);
    wait_for_completion(&wait);

    if (!bio_flagged(bio, BIO_UPTODATE))
        ret = -EIO;

out:
    bio_put(bio);
    return ret < 0 ? ret : 0;
}
static int dit4192_spi_read_device(struct dit4192 *dit4192, u8 reg, int bytes, u8 *buf)
{
	int ret;
	unsigned char header[2];
	struct spi_transfer spi_transfer_w;
	struct spi_transfer spi_transfer_r;
	struct spi_message spi_message;
	DECLARE_COMPLETION_ONSTACK(context);

	memset(&spi_transfer_w, 0, sizeof(struct spi_transfer));
	memset(&spi_transfer_r, 0, sizeof(struct spi_transfer));
	memset(&spi_message, 0, sizeof(struct spi_message));

	spi_setup(dit4192->spi);
	spi_message_init(&spi_message);

	header[DIT4192_HEADER_0] = DIT4192_CMD_R | DIT4192_IO_STEP_1 | reg; //0x80
	header[DIT4192_HEADER_1] = 0;

	spi_transfer_w.tx_buf = header;
	spi_transfer_w.len =  2;
	spi_message_add_tail(&spi_transfer_w, &spi_message);

	spi_transfer_r.rx_buf = buf;
	spi_transfer_r.len =  bytes;
	spi_message_add_tail(&spi_transfer_r, &spi_message);

	spi_message.complete = dit4192_spi_completion_cb;
	spi_message.context = &context;

	/* must use spi_async in a context that may sleep */
	ret = spi_async(dit4192->spi, &spi_message);
	if (ret == 0) {
		wait_for_completion(&context);

		if (spi_message.status == 0) {
			/* spi_message.actual_length should contain the number
			* of bytes actually read and should update ret to be
			* the actual length, but since our driver doesn't
			* support this, assume all count bytes were read.
			*/
			ret = bytes;
		}

		if (ret > 0) {
			ret = -EFAULT;
		}
	} else {
		pr_err("%s: Error calling spi_async, ret = %d\n", __func__, ret);
	}

	return ret;
}
示例#20
0
static int
qup_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
	DECLARE_COMPLETION_ONSTACK(complete);
	struct qup_i2c_dev *dev = i2c_get_adapdata(adap);
	int ret;
	int rem = num;
	long timeout;
	int err;
#ifdef QUP_I2C_SPINLOCK_ENABLE
    unsigned long flags;  //p11171 dongseok added
#endif

	del_timer_sync(&dev->pwr_timer);
	mutex_lock(&dev->mlock);

	if (dev->suspended) {
		mutex_unlock(&dev->mlock);
		return -EIO;
	}

	if (dev->clk_state == 0) {
		if (dev->clk_ctl == 0) {
			if (dev->pdata->src_clk_rate > 0)
				clk_set_rate(dev->clk,
						dev->pdata->src_clk_rate);
			else
				dev->pdata->src_clk_rate = 19200000;
		}
		qup_i2c_pwr_mgmt(dev, 1);
	}
	/* Initialize QUP registers during first transfer */
	if (dev->clk_ctl == 0) {
		int fs_div;
		int hs_div;
		uint32_t fifo_reg;

		if (dev->gsbi)
			writel(0x2 << 4, dev->gsbi);

		fs_div = ((dev->pdata->src_clk_rate
				/ dev->pdata->clk_freq) / 2) - 3;
		hs_div = 3;
		dev->clk_ctl = ((hs_div & 0x7) << 8) | (fs_div & 0xff);
		fifo_reg = readl(dev->base + QUP_IO_MODE);
		if (fifo_reg & 0x3)
			dev->out_blk_sz = (fifo_reg & 0x3) * 16;
		else
			dev->out_blk_sz = 16;
		if (fifo_reg & 0x60)
			dev->in_blk_sz = ((fifo_reg & 0x60) >> 5) * 16;
		else
示例#21
0
文件: rtas.c 项目: 119-org/lamobo-d1
int rtas_ibm_suspend_me(struct rtas_args *args)
{
	long state;
	long rc;
	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
	struct rtas_suspend_me_data data;
	DECLARE_COMPLETION_ONSTACK(done);

	if (!rtas_service_present("ibm,suspend-me"))
		return -ENOSYS;

	/* Make sure the state is valid */
	rc = plpar_hcall(H_VASI_STATE, retbuf,
			 ((u64)args->args[0] << 32) | args->args[1]);

	state = retbuf[0];

	if (rc) {
		printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc);
		return rc;
	} else if (state == H_VASI_ENABLED) {
		args->args[args->nargs] = RTAS_NOT_SUSPENDABLE;
		return 0;
	} else if (state != H_VASI_SUSPENDING) {
		printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n",
		       state);
		args->args[args->nargs] = -1;
		return 0;
	}

	atomic_set(&data.working, 0);
	atomic_set(&data.done, 0);
	atomic_set(&data.error, 0);
	data.token = rtas_token("ibm,suspend-me");
	data.complete = &done;
	stop_topology_update();

	/* Call function on all CPUs.  One of us will make the
	 * rtas call
	 */
	if (on_each_cpu(rtas_percpu_suspend_me, &data, 0))
		atomic_set(&data.error, -EINVAL);

	wait_for_completion(&done);

	if (atomic_read(&data.error) != 0)
		printk(KERN_ERR "Error doing global join\n");

	start_topology_update();

	return atomic_read(&data.error);
}
示例#22
0
/**********************************************************
 *Message format:
 *	write cmd   |  ADDR_H |ADDR_L  |  data stream  |
 *    1B         |   1B    |  1B    |  length       |
 *
 * read buffer length should be 1 + 1 + 1 + data_length
 ***********************************************************/
int gf_spi_write_bytes(struct gf_dev *gf_dev,
	u16 addr, u32 data_len, u8 *tx_buf)
{
#ifdef SPI_ASYNC
    DECLARE_COMPLETION_ONSTACK(read_done);
#endif

    struct spi_message msg;
    struct spi_transfer *xfer = NULL;
    int ret = 0;

    xfer = kzalloc(sizeof(*xfer), GFP_KERNEL);
    if (xfer == NULL) {
		GF_LOG_ERROR("failed to kzalloc for command\n");
		return -ENOMEM;
    }

    /* send gf command to device */
    spi_message_init(&msg);

    tx_buf[0] = GF_W;
    tx_buf[1] = (u8)((addr >> 8) & 0xFF);
    tx_buf[2] = (u8)(addr & 0xFF);

    xfer[0].tx_buf = tx_buf;
    xfer[0].len = data_len + 3;
    xfer[0].delay_usecs = 5;

    spi_message_add_tail(xfer, &msg);

#ifdef SPI_ASYNC
    msg.complete = gf_spi_complete;
    msg.context = &read_done;

    spin_lock_irq(&gf_dev->spi_lock);
    ret = spi_async(gf_dev->spi, &msg);
    spin_unlock_irq(&gf_dev->spi_lock);

    if (ret == 0)
		wait_for_completion(&read_done);
	else
		GF_LOG_ERROR("failed to spi write = %d\n", ret);
#else
    ret = spi_sync(gf_dev->spi, &msg);
#endif

	if (xfer) {
    	kfree(xfer);
		xfer = NULL;
	}
    return ret;
}
示例#23
0
/**
 *	mmc_wait_for_req - start a request and wait for completion
 *	@host: MMC host to start command
 *	@mrq: MMC request to start
 *
 *	Start a new MMC custom command request for a host, and wait
 *	for the command to complete. Does not attempt to parse the
 *	response.
 */
void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
{
	DECLARE_COMPLETION_ONSTACK(complete);

	mrq->done_data = &complete;
	mrq->done = mmc_wait_done;

	mmc_start_request(host, mrq);
	if(host->index == 0)
		mmc_delay(10);

	wait_for_completion_io(&complete);
}
示例#24
0
static int
__mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf,
			    const void *fw_data, int len, u32 dst_addr)
{
	u8 *data = sg_virt(&buf->urb->sg[0]);
	DECLARE_COMPLETION_ONSTACK(cmpl);
	__le32 info;
	u32 val;
	int err;

	info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
			   FIELD_PREP(MT_MCU_MSG_LEN, len) |
			   MT_MCU_MSG_TYPE_CMD);

	memcpy(data, &info, sizeof(info));
	memcpy(data + sizeof(info), fw_data, len);
	memset(data + sizeof(info) + len, 0, 4);

	mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
			MT_FCE_DMA_ADDR, dst_addr);
	len = roundup(len, 4);
	mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
			MT_FCE_DMA_LEN, len << 16);

	buf->len = MT_CMD_HDR_LEN + len + sizeof(info);
	err = mt76u_submit_buf(&dev->mt76, USB_DIR_OUT,
			       MT_EP_OUT_INBAND_CMD,
			       buf, GFP_KERNEL,
			       mt76u_mcu_complete_urb, &cmpl);
	if (err < 0)
		return err;

	if (!wait_for_completion_timeout(&cmpl,
					 msecs_to_jiffies(1000))) {
		dev_err(dev->mt76.dev, "firmware upload timed out\n");
		usb_kill_urb(buf->urb);
		return -ETIMEDOUT;
	}

	if (mt76u_urb_error(buf->urb)) {
		dev_err(dev->mt76.dev, "firmware upload failed: %d\n",
			buf->urb->status);
		return buf->urb->status;
	}

	val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
	val++;
	mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);

	return 0;
}
示例#25
0
文件: mcu.c 项目: Chong-Li/cse522
static int __mt7601u_dma_fw(struct mt7601u_dev *dev,
			    const struct mt7601u_dma_buf *dma_buf,
			    const void *data, u32 len, u32 dst_addr)
{
	DECLARE_COMPLETION_ONSTACK(cmpl);
	struct mt7601u_dma_buf buf = *dma_buf; /* we need to fake length */
	__le32 reg;
	u32 val;
	int ret;

	reg = cpu_to_le32(MT76_SET(MT_TXD_INFO_TYPE, DMA_PACKET) |
			  MT76_SET(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
			  MT76_SET(MT_TXD_INFO_LEN, len));
	memcpy(buf.buf, &reg, sizeof(reg));
	memcpy(buf.buf + sizeof(reg), data, len);
	memset(buf.buf + sizeof(reg) + len, 0, 8);

	ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
				       MT_FCE_DMA_ADDR, dst_addr);
	if (ret)
		return ret;
	len = roundup(len, 4);
	ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
				       MT_FCE_DMA_LEN, len << 16);
	if (ret)
		return ret;

	buf.len = MT_DMA_HDR_LEN + len + 4;
	ret = mt7601u_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD,
				     &buf, GFP_KERNEL,
				     mt7601u_complete_urb, &cmpl);
	if (ret)
		return ret;

	if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) {
		dev_err(dev->dev, "Error: firmware upload timed out\n");
		usb_kill_urb(buf.urb);
		return -ETIMEDOUT;
	}
	if (mt7601u_urb_has_error(buf.urb)) {
		dev_err(dev->dev, "Error: firmware upload urb failed:%d\n",
			buf.urb->status);
		return buf.urb->status;
	}

	val = mt7601u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
	val++;
	mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);

	return 0;
}
示例#26
0
/**
 *	mmc_wait_for_req - start a request and wait for completion
 *	@host: MMC host to start command
 *	@mrq: MMC request to start
 *
 *	Start a new MMC custom command request for a host, and wait
 *	for the command to complete. Does not attempt to parse the
 *	response.
 */
int mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)	//sw2-6-1-RH-Wlan_Reset7-00*
{
	DECLARE_COMPLETION_ONSTACK(complete);

	mrq->done_data = &complete;
	mrq->done = mmc_wait_done;

	mmc_start_request(host, mrq);

//sw2-6-1-RH-Wlan_Reset7-00*[
//	wait_for_completion_io(&complete);
	return wait_for_completion_timeout(&complete, msecs_to_jiffies(2500));
//sw2-6-1-RH-Wlan_Reset7-00*]
}
static ssize_t time_show(struct kobject *kobj, struct kobj_attribute *attr,
			 char *buf)
{
	DECLARE_COMPLETION_ONSTACK(main_start);
	struct benchmark_thread *threads;
	unsigned int cpu;
	unsigned int online_cpus = 0;
	unsigned long long time = 0;

	for_each_online_cpu(cpu) {
		online_cpus++;
	}

	printk("lttng_benchmark: starting benchmark on %u cpus\n",
	       online_cpus);

	threads = (struct benchmark_thread *)
		kmalloc(online_cpus * sizeof(struct benchmark_thread),
			GFP_KERNEL);

	for_each_online_cpu(cpu) {
		init_completion(&threads[cpu].end);
		threads[cpu].time = 0;
	}

	for_each_online_cpu(cpu) {
		threads[cpu].prev = &main_start;
		threads[cpu].thread = kthread_run(benchmark_thread,
						  &threads[cpu],
						  "lttng_benchmark");
	}

	complete_all(&main_start);

	for_each_online_cpu(cpu) {
		wait_for_completion_interruptible(&threads[cpu].end);
	}
	for_each_online_cpu(cpu) {
		unsigned long long cpu_time = threads[cpu].time;
		printk("lttng_benchmark: time for CPU#%u: %llu\n",
		       cpu, cpu_time);
		time += cpu_time;
	}

	kfree(threads);

	printk("lttng_benchmark: total time: %llu\n", time);
	return sprintf(buf, "%llu\n", time);
}
static int dit4192_spi_write_device(struct dit4192 *dit4192, u8 header0, u8 *data, u32 bytes)
{
	int ret;
	u8 spi_data[DIT4192_MAX_DATA_SIZE];
	struct spi_transfer spi_transfer;
	struct spi_message spi_message;
	DECLARE_COMPLETION_ONSTACK(context);

	memset(&spi_transfer, 0, sizeof(struct spi_transfer));
	memset(&spi_message, 0, sizeof(struct spi_message));

	spi_data[DIT4192_HEADER_0] = header0;
	spi_data[DIT4192_HEADER_1] = 0;

	if(bytes > 0) {
		if( bytes <= (DIT4192_MAX_DATA_SIZE-2) ) {
			memcpy(&spi_data[2], data, bytes);
		} else {
			/* This should never happen. */
			pr_err("%s: SPI transfer error. Bad data size\n", __func__);
			return -1;
		}
	}

	spi_transfer.tx_buf = spi_data;
	spi_transfer.len = bytes+2;

	spi_setup(dit4192->spi);
	spi_message_init(&spi_message);
	spi_message_add_tail(&spi_transfer, &spi_message);
	spi_message.complete = dit4192_spi_completion_cb;
	spi_message.context = &context;

	/* must use spi_async in a context that may sleep */
	ret = spi_async(dit4192->spi, &spi_message);
	if (ret == 0) {
		wait_for_completion(&context);
		/* update ret to contain the number of bytes actually written */
		if (spi_message.status == 0) {
			ret = spi_transfer.len;
		} else 
			pr_err("%s: SPI transfer error, spi_message.status = %d\n", 
				__func__, spi_message.status);
	} else {
		pr_err("%s: Error calling spi_async, ret = %d\n", __func__, ret);
	}

	return 0;
}
示例#29
0
文件: blk-lib.c 项目: DenisLug/mptcp
static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
				  sector_t nr_sects, gfp_t gfp_mask)
{
	int ret;
	struct bio *bio;
	struct bio_batch bb;
	unsigned int sz;
	DECLARE_COMPLETION_ONSTACK(wait);

	atomic_set(&bb.done, 1);
	bb.error = 0;
	bb.wait = &wait;

	ret = 0;
	while (nr_sects != 0) {
		bio = bio_alloc(gfp_mask,
				min(nr_sects, (sector_t)BIO_MAX_PAGES));
		if (!bio) {
			ret = -ENOMEM;
			break;
		}

		bio->bi_iter.bi_sector = sector;
		bio->bi_bdev   = bdev;
		bio->bi_end_io = bio_batch_end_io;
		bio->bi_private = &bb;

		while (nr_sects != 0) {
			sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
			ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
			nr_sects -= ret >> 9;
			sector += ret >> 9;
			if (ret < (sz << 9))
				break;
		}
		ret = 0;
		atomic_inc(&bb.done);
		submit_bio(WRITE, bio);
	}

	/* Wait for bios in-flight */
	if (!atomic_dec_and_test(&bb.done))
		wait_for_completion_io(&wait);

	if (bb.error)
		return bb.error;
	return ret;
}
示例#30
0
/**
 * call_usermodehelper_exec - start a usermode application
 * @sub_info: information about the subprocessa
 * @wait: wait for the application to finish and return status.
 *        when -1 don't wait at all, but you get no useful error back when
 *        the program couldn't be exec'ed. This makes it safe to call
 *        from interrupt context.
 *
 * Runs a user-space application.  The application is started
 * asynchronously if wait is not set, and runs as a child of keventd.
 * (ie. it runs with full root capabilities).
 */
int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
{
	DECLARE_COMPLETION_ONSTACK(done);
	int retval = 0;

	BUG_ON(atomic_read(&sub_info->cred->usage) != 1);
	validate_creds(sub_info->cred);

	if (!sub_info->path) {
		call_usermodehelper_freeinfo(sub_info);
		return -EINVAL;
	}
	helper_lock();
	if (sub_info->path[0] == '\0')
		goto out;

	if (!khelper_wq || usermodehelper_disabled) {
		retval = -EBUSY;
		goto out;
	}

	sub_info->complete = &done;
	sub_info->wait = wait;

	queue_work(khelper_wq, &sub_info->work);
	if (wait == UMH_NO_WAIT)	/* task has freed sub_info */
		goto unlock;

	if (wait & UMH_KILLABLE) {
		retval = wait_for_completion_killable(&done);
		if (!retval)
			goto wait_done;

		/* umh_complete() will see NULL and free sub_info */
		if (xchg(&sub_info->complete, NULL))
			goto unlock;
		/* fallthrough, umh_complete() was already called */
	}

	wait_for_completion(&done);
wait_done:
	retval = sub_info->retval;
out:
	call_usermodehelper_freeinfo(sub_info);
unlock:
	helper_unlock();
	return retval;
}