Пример #1
0
static void charlcd_wait_complete_irq(struct charlcd *lcd)
{
	int ret;

	ret = wait_for_completion_interruptible_timeout(&lcd->complete,
							CHARLCD_TIMEOUT);
	/* Disable IRQ after completion */
	writel(0x00, lcd->virtbase + CHAR_MASK);

	if (ret < 0) {
		dev_err(lcd->dev,
			"wait_for_completion_interruptible_timeout() "
			"returned %d waiting for ready\n", ret);
		return;
	}

	if (ret == 0) {
		dev_err(lcd->dev, "charlcd controller timed out "
			"waiting for ready\n");
		return;
	}
}
Пример #2
0
static void p54_remove_interface(struct ieee80211_hw *dev,
                                 struct ieee80211_vif *vif)
{
    struct p54_common *priv = dev->priv;

    mutex_lock(&priv->conf_mutex);
    priv->vif = NULL;

    /*
     * LMAC API 3.2.2 states that any active beacon template must be
     * canceled by the driver before attempting a mode transition.
     */
    if (le32_to_cpu(priv->beacon_req_id) != 0) {
        p54_tx_cancel(priv, priv->beacon_req_id);
        wait_for_completion_interruptible_timeout(&priv->beacon_comp, HZ);
    }
    priv->mode = NL80211_IFTYPE_MONITOR;
    memset(priv->mac_addr, 0, ETH_ALEN);
    memset(priv->bssid, 0, ETH_ALEN);
    p54_setup_mac(priv);
    mutex_unlock(&priv->conf_mutex);
}
Пример #3
0
ssize_t mdp4_dtv_show_event(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	int cndx;
	struct vsycn_ctrl *vctrl;
	ssize_t ret = 0;
	unsigned long flags;
	u64 vsync_tick;

	cndx = 0;
	vctrl = &vsync_ctrl_db[0];

	if (atomic_read(&vctrl->suspend) > 0 ||
		!external_common_state->hpd_state ||
		atomic_read(&vctrl->vsync_resume) == 0)
		return 0;

	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (vctrl->wait_vsync_cnt == 0)
		INIT_COMPLETION(vctrl->vsync_comp);
	vctrl->wait_vsync_cnt++;
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);

        ret = wait_for_completion_interruptible_timeout(&vctrl->vsync_comp,
               msecs_to_jiffies(VSYNC_PERIOD * 4));
        if (ret <= 0) {
               vctrl->wait_vsync_cnt = 0;
               return -EBUSY;
        }

	spin_lock_irqsave(&vctrl->spin_lock, flags);
	vsync_tick = ktime_to_ns(vctrl->vsync_time);
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);

	ret = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick);
	buf[strlen(buf) + 1] = '\0';
	return ret;
}
Пример #4
0
static int atusb_xmit(struct ieee802154_dev *wpan_dev, struct sk_buff *skb)
{
	struct atusb *atusb = wpan_dev->priv;
	struct usb_device *usb_dev = atusb->usb_dev;
	unsigned long flags;
	int ret;

	dev_dbg(&usb_dev->dev, "atusb_xmit (%d)\n", skb->len);
	if (down_trylock(&atusb->tx_sem)) {
		dev_dbg(&usb_dev->dev, "atusb_xmit busy\n");
		return -EBUSY;
	}
	INIT_COMPLETION(atusb->tx_complete);
	ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
			      ATUSB_TX, ATUSB_REQ_TO_DEV, 0, atusb->tx_ack_seq,
			      skb->data, skb->len, 1000);
	if (ret < 0) {
		dev_warn_ratelimited(&usb_dev->dev,
				     "ATUSB_TX failed, error %d\n", ret);
		goto done;
	}

	ret = wait_for_completion_interruptible_timeout(
			&atusb->tx_complete, msecs_to_jiffies(TX_TIMEOUT_MS));
	if (!ret)
		ret = -ETIMEDOUT;
	if (ret > 0)
		ret = 0;

done:
	spin_lock_irqsave(&atusb->lock, flags);
	atusb->tx_ack_seq++;
	spin_unlock_irqrestore(&atusb->lock, flags);

	up(&atusb->tx_sem);
	dev_dbg(&usb_dev->dev, "atusb_xmit done (%d)\n", ret);
	return ret;
}
Пример #5
0
long jz4740_adc_read_battery_voltage(struct device *dev,
						enum jz_adc_battery_scale scale)
{
	struct jz4740_adc *adc = dev_get_drvdata(dev);
	unsigned long t;
	long long voltage;
	uint16_t val;

	if (!adc)
		return -ENODEV;

	jz4740_adc_clk_enable(adc);

	if (scale == JZ_ADC_BATTERY_SCALE_2V5)
		jz4740_adc_set_cfg(adc, JZ_ADC_CFG_BAT_MB, JZ_ADC_CFG_BAT_MB);
	else
		jz4740_adc_set_cfg(adc, JZ_ADC_CFG_BAT_MB, 0);

	jz4740_adc_enable_irq(adc, JZ_ADC_IRQ_BATTERY);
	jz4740_adc_enable_adc(adc, JZ_ADC_ENABLE_BATTERY);

	t = wait_for_completion_interruptible_timeout(&adc->bat_completion,
							HZ);

	jz4740_adc_disable_irq(adc, JZ_ADC_IRQ_BATTERY);

	if (t <= 0) {
		jz4740_adc_disable_adc(adc, JZ_ADC_ENABLE_BATTERY);
		return t ? t : -ETIMEDOUT;
	}

	val = readw(adc->base + JZ_REG_ADC_BATTERY);

	jz4740_adc_clk_disable(adc);

	if (scale == JZ_ADC_BATTERY_SCALE_2V5)
		voltage = (((long long)val) * 2500000LL) >> 12LL;
	else
ssize_t mdp4_dsi_cmd_show_event(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	int cndx;
	struct vsycn_ctrl *vctrl;
	ssize_t ret = 0;
	unsigned long flags;
	u64 vsync_tick;

	cndx = 0;
	vctrl = &vsync_ctrl_db[0];

	sec_debug_mdp_set_value(SEC_DEBUG_VSYNC_SYSFS_EVENT, SEC_DEBUG_IN);
	if (atomic_read(&vctrl->suspend) > 0)
		return 0;

	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (vctrl->wait_vsync_cnt == 0)
		INIT_COMPLETION(vctrl->vsync_comp);
	vctrl->wait_vsync_cnt++;
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);

	ret = wait_for_completion_interruptible_timeout(&vctrl->vsync_comp,
		msecs_to_jiffies(VSYNC_PERIOD * 4));
	if (ret <= 0) {
		vctrl->wait_vsync_cnt = 0;
		vctrl->vsync_time = ktime_get();
	}

	spin_lock_irqsave(&vctrl->spin_lock, flags);
	vsync_tick = ktime_to_ns(vctrl->vsync_time);
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);

	ret = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick);
	buf[strlen(buf) + 1] = '\0';
	sec_debug_mdp_set_value(SEC_DEBUG_VSYNC_SYSFS_EVENT, SEC_DEBUG_OUT);
	return ret;
}
Пример #7
0
int wcn36xx_smd_open(struct wcn36xx *wcn)
{
	int ret, left;

	INIT_WORK(&wcn->smd_work, wcn36xx_smd_work);
	init_completion(&wcn->smd_compl);

	ret = smd_named_open_on_edge("WLAN_CTRL", SMD_APPS_WCNSS,
				     &wcn->smd_ch, wcn, wcn36xx_smd_notify);
	if (ret) {
		wcn36xx_error("smd_named_open_on_edge failed: %d", ret);
		return ret;
	}

	left = wait_for_completion_interruptible_timeout(&wcn->smd_compl,
							 msecs_to_jiffies(SMD_MSG_TIMEOUT));
	if (left <= 0) {
		wcn36xx_error("timeout waiting for smd open: %d", ret);
		return left;
	}

	return 0;
}
Пример #8
0
static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
	struct device_attribute *dev_attr, char *buf)
{
	struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
	struct completion *completion = &hwmon->read_completion;
	unsigned long t;
	unsigned long val;
	int ret;

	mutex_lock(&hwmon->lock);

	INIT_COMPLETION(*completion);

	enable_irq(hwmon->irq);
	hwmon->cell->enable(to_platform_device(dev));

	t = wait_for_completion_interruptible_timeout(completion, HZ);

	if (t > 0) {
		val = readw(hwmon->base) & 0xfff;
		val = (val * 3300) >> 12;
		ret = sprintf(buf, "%lu\n", val);
	} else {
Пример #9
0
static void mdp4_dsi_video_wait4dmap(int cndx)
{
    struct vsycn_ctrl *vctrl;
    ssize_t ret = 0;

    if (cndx >= MAX_CONTROLLER) {
        pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
        return;
    }

    vctrl = &vsync_ctrl_db[cndx];

    if (atomic_read(&vctrl->suspend) > 0)
        return;

    ret = wait_for_completion_interruptible_timeout(
              &vctrl->dmap_comp,
              msecs_to_jiffies(WAIT_FOR_COMPLETION_TIMEOUT));
    if (ret < 0) {
#ifdef MDP_HANG_DEBUG
        mdp4_dump_regs();
        panic("vctrl->dmap_comp interrupt missing");
#endif
        pr_err("%s wait for completion error %x",
               __func__, ret);
        return;
    } else if (!ret) {
#ifdef MDP_HANG_DEBUG
        mdp4_dump_regs();
        panic("vctrl->dmap_comp interrupt missing");
#endif
        pr_err("%s wait for commit_comp timeout",
               __func__);
        wait_for_completion(&vctrl->dmap_comp);
    }

}
Пример #10
0
/**
 * Wait for the MAC FW to initialize
 *
 * MAC FW sends a 0xfd/0101/00 notification to EP1 when done
 * initializing. Get that notification into i1480->evt_buf; upper layer
 * will verify it.
 *
 * Set i1480->evt_result with the result of getting the event or its
 * size (if successful).
 *
 * Delivers the data directly to i1480->evt_buf
 */
static
int i1480_usb_wait_init_done(struct i1480 *i1480)
{
	int result;
	struct device *dev = i1480->dev;
	struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
	struct usb_endpoint_descriptor *epd;

	init_completion(&i1480->evt_complete);
	i1480->evt_result = -EINPROGRESS;
	epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc;
	usb_fill_int_urb(i1480_usb->neep_urb, i1480_usb->usb_dev,
			 usb_rcvintpipe(i1480_usb->usb_dev, epd->bEndpointAddress),
			 i1480->evt_buf, i1480->buf_size,
			 i1480_usb_neep_cb, i1480, epd->bInterval);
	result = usb_submit_urb(i1480_usb->neep_urb, GFP_KERNEL);
	if (result < 0) {
		dev_err(dev, "init done: cannot submit NEEP read: %d\n",
			result);
		goto error_submit;
	}
	/* Wait for the USB callback to get the data */
	result = wait_for_completion_interruptible_timeout(
		&i1480->evt_complete, HZ);
	if (result <= 0) {
		result = result == 0 ? -ETIMEDOUT : result;
		goto error_wait;
	}
	usb_kill_urb(i1480_usb->neep_urb);
	return 0;

error_wait:
	usb_kill_urb(i1480_usb->neep_urb);
error_submit:
	i1480->evt_result = result;
	return result;
}
Пример #11
0
static int rk29_wait_event(struct rk29_i2c_data *i2c,
					enum rk29_event mr_event)
{
	int ret = 0;

	if(unlikely(irqs_disabled()))
	{
		i2c_err(i2c->dev, "irqs are disabled on this system!\n");
		return -EIO;
	}
	i2c->cmd_err = RK29_ERROR_NONE;
	i2c->cmd_event = mr_event;
	rk29_i2c_enable_irqs(i2c);
	if(i2c->mode == I2C_MODE_IRQ)
	{
		ret = wait_for_completion_interruptible_timeout(&i2c->cmd_complete,
								usecs_to_jiffies(i2c->ack_timeout));
	}
	else
	{
		i2c->poll_status = 0;
		ret = wait_for_completion_poll_timeout(i2c);
	}
	if(ret < 0)
	{
		i2c_err(i2c->dev, "i2c wait for event %04x, retrun %d \n", mr_event, ret);
		return ret;
	}
	if(ret == 0)
	{
		i2c_err(i2c->dev, "i2c wait for envent timeout, but not return -ETIMEDOUT\n");
		return 0;
		//return -ETIMEDOUT;
	}
	return 0;
}
static int qsc6085_dump_start(struct dpram_link_device *dpld)
{
	int ret;
	struct link_device *ld = &dpld->ld;
	struct modem_ctl *mc = ld->mc;
	struct cp_ramdump_status *dump_stat = &ld->msd->dump_stat;
	mif_err("+++\n");

	init_completion(&dpld->crash_cmpl);
	INIT_DELAYED_WORK(&dpld->crash_dwork, qsc6085_dump_work);

	iowrite32(QSC_UPLOAD_MODE, &dpld->ul_map.magic);

	/* reset modem so that it goes to upload mode */
	/* ap does not need to reset cp during CRASH_EXIT case */
	if (gpio_get_value(mc->gpio_phone_active))
		mc->ops.modem_reset(mc);

	dpld->send_intr(dpld, CMD_CP_RAMDUMP_START_REQ);
	ret = wait_for_completion_interruptible_timeout(&dpld->crash_cmpl,
			RAMDUMP_CMD_TIMEOUT);
	if (!ret) {
		mif_err("ERR! no response to CP_RAMDUMP_START_REQ\n");
		dump_stat->dump_size = 0;
	} else {
		dump_stat->dump_size = QSC6085_RAM_SIZE;
		dump_stat->addr = 0;
		dump_stat->rcvd = 0;
		dump_stat->rest = dump_stat->dump_size;
	}

	queue_delayed_work(system_nrt_wq, &dpld->crash_dwork, 0);

	mif_err("---\n");
	return 0;
}
Пример #13
0
int wcn36xx_msm_smd_open(void *drv_priv, void *rsp_cb)
{
	int ret, left;
	wmsm.drv_priv = drv_priv;
	wmsm.rsp_cb = rsp_cb;
	INIT_WORK(&wmsm.smd_work, wcn36xx_msm_smd_work);
	init_completion(&wmsm.smd_compl);

	wmsm.wq = create_workqueue("wcn36xx_msm_smd_wq");
	if (!wmsm.wq) {
		dev_err(&wmsm.core->dev, "failed to allocate wq");
		ret = -ENOMEM;
		return ret;
	}

	ret = smd_named_open_on_edge("WLAN_CTRL", SMD_APPS_WCNSS,
		&wmsm.smd_ch, &wmsm, wcn36xx_msm_smd_notify);
	if (ret) {
		dev_err(&wmsm.core->dev,
			"smd_named_open_on_edge failed: %d\n", ret);
		return ret;
	}

	left = wait_for_completion_interruptible_timeout(&wmsm.smd_compl,
		msecs_to_jiffies(HAL_MSG_TIMEOUT));
	if (left <= 0) {
		dev_err(&wmsm.core->dev,
			"timeout waiting for smd open: %d\n", ret);
		return left;
	}

	/* Not to receive INT until the whole buf from SMD is read */
	smd_disable_read_intr(wmsm.smd_ch);

	return 0;
}
Пример #14
0
static int hfa384x_wait(struct net_device *dev, struct sk_buff *skb)
{
	struct hostap_interface *iface = netdev_priv(dev);
	local_info_t *local = iface->local;
	struct hostap_usb_priv *hw_priv = local->hw_priv;
	int res;
	unsigned long flags;

	res = wait_for_completion_interruptible_timeout(&hfa384x_cb(skb)->comp, 5 * HZ);
	if (res > 0)
		return 0;

	if (res == 0) {
		res = -ETIMEDOUT;
	}

	usb_kill_urb(&hw_priv->tx_urb);
	// FIXME: rethink
	spin_lock_irqsave(&hw_priv->tx_queue.lock, flags);
	if (skb->next)
		skb_unlink(skb, &hw_priv->tx_queue);
	spin_unlock_irqrestore(&hw_priv->tx_queue.lock, flags);
	return res;
}
Пример #15
0
static void asrc_output_task_worker(struct work_struct *w)
{
	struct asrc_pair_params *params =
		container_of(w, struct asrc_pair_params, task_output_work);
	enum asrc_pair_index index = params->index;
	unsigned long lock_flags;

	if (!wait_for_completion_interruptible_timeout(&params->output_complete, HZ / 10)) {
		pair_err("output dma task timeout\n");
		return;
	}

	init_completion(&params->output_complete);

	spin_lock_irqsave(&pair_lock, lock_flags);
	if (!params->pair_hold) {
		spin_unlock_irqrestore(&pair_lock, lock_flags);
		return;
	}
	asrc_read_output_FIFO(params);
	spin_unlock_irqrestore(&pair_lock, lock_flags);

	complete(&params->lastperiod_complete);
}
Пример #16
0
static int wacom_i2c_input_open(struct input_dev *dev)
{
	struct wacom_i2c *wac_i2c = input_get_drvdata(dev);
	int ret = 0;
	static bool init_insert = true;

	printk(KERN_DEBUG"epen:%s\n", __func__);
#ifdef WACOM_PEN_DETECT
	if (unlikely(init_insert)) {
		init_pen_insert(wac_i2c);
		init_insert = false;
	}
#endif

#if 0
	ret = wait_for_completion_interruptible_timeout(&wac_i2c->init_done,
		msecs_to_jiffies(1 * MSEC_PER_SEC));

	if (ret < 0) {
		dev_err(&wac_i2c->client->dev,
			"error while waiting for device to init (%d)\n", ret);
		ret = -ENXIO;
		goto err_open;
	}
	if (ret == 0) {
		dev_err(&wac_i2c->client->dev,
			"timedout while waiting for device to init\n");
		ret = -ENXIO;
		goto err_open;
	}
#endif
	wacom_power_on(wac_i2c);

err_open:
	return ret;
}
Пример #17
0
static int pohmelfs_crypto_process(struct ablkcipher_request *req,
		struct scatterlist *sg_dst, struct scatterlist *sg_src,
		void *iv, int enc, unsigned long timeout)
{
	struct pohmelfs_crypto_completion complete;
	int err;

	init_completion(&complete.complete);
	complete.error = -EINPROGRESS;

	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
					pohmelfs_crypto_complete, &complete);

	ablkcipher_request_set_crypt(req, sg_src, sg_dst, sg_src->length, iv);

	if (enc)
		err = crypto_ablkcipher_encrypt(req);
	else
		err = crypto_ablkcipher_decrypt(req);

	switch (err) {
	case -EINPROGRESS:
	case -EBUSY:
		err = wait_for_completion_interruptible_timeout(&complete.complete,
					timeout);
		if (!err)
			err = -ETIMEDOUT;
		else if (err > 0)
			err = complete.error;
		break;
	default:
		break;
	}

	return err;
}
Пример #18
0
static void *msm_rpcrouter_load_modem(void)
{
	void *pil;
	int rc;

	pil = pil_get("modem");
	if (IS_ERR(pil))
		pr_err("[K] %s: modem load failed\n", __func__);
	else {
		rc = wait_for_completion_interruptible_timeout(
						&rpc_remote_router_up,
						MODEM_LOAD_TIMEOUT);
		if (!rc)
			rc = -ETIMEDOUT;
		if (rc < 0) {
			pr_err("[K] %s: wait for remote router failed %d\n",
			       __func__, rc);
			msm_rpcrouter_unload_modem(pil);
			pil = ERR_PTR(rc);
		}
	}

	return pil;
}
Пример #19
0
static int vf610_read_raw(struct iio_dev *indio_dev,
			struct iio_chan_spec const *chan,
			int *val,
			int *val2,
			long mask)
{
	struct vf610_adc *info = iio_priv(indio_dev);
	unsigned int hc_cfg;
	long ret;

	switch (mask) {
	case IIO_CHAN_INFO_RAW:
	case IIO_CHAN_INFO_PROCESSED:
		mutex_lock(&indio_dev->mlock);
		if (iio_buffer_enabled(indio_dev)) {
			mutex_unlock(&indio_dev->mlock);
			return -EBUSY;
		}

		reinit_completion(&info->completion);
		hc_cfg = VF610_ADC_ADCHC(chan->channel);
		hc_cfg |= VF610_ADC_AIEN;
		writel(hc_cfg, info->regs + VF610_REG_ADC_HC0);
		ret = wait_for_completion_interruptible_timeout
				(&info->completion, VF610_ADC_TIMEOUT);
		if (ret == 0) {
			mutex_unlock(&indio_dev->mlock);
			return -ETIMEDOUT;
		}
		if (ret < 0) {
			mutex_unlock(&indio_dev->mlock);
			return ret;
		}

		switch (chan->type) {
		case IIO_VOLTAGE:
			*val = info->value;
			break;
		case IIO_TEMP:
			/*
			 * Calculate in degree Celsius times 1000
			 * Using the typical sensor slope of 1.84 mV/°C
			 * and VREFH_ADC at 3.3V, V at 25°C of 699 mV
			 */
			*val = 25000 - ((int)info->value - VF610_VTEMP25_3V3) *
					1000000 / VF610_TEMP_SLOPE_COEFF;

			break;
		default:
			mutex_unlock(&indio_dev->mlock);
			return -EINVAL;
		}

		mutex_unlock(&indio_dev->mlock);
		return IIO_VAL_INT;

	case IIO_CHAN_INFO_SCALE:
		*val = info->vref_uv / 1000;
		*val2 = info->adc_feature.res_mode;
		return IIO_VAL_FRACTIONAL_LOG2;

	case IIO_CHAN_INFO_SAMP_FREQ:
		*val = info->sample_freq_avail[info->adc_feature.sample_rate];
		*val2 = 0;
		return IIO_VAL_INT;

	default:
		break;
	}

	return -EINVAL;
}
Пример #20
0
static int smd_tty_open(struct tty_struct *tty, struct file *f)
{
	int res = 0;
	unsigned int n = tty->index;
	struct smd_tty_info *info;
	const char *peripheral = NULL;


	if (n >= MAX_SMD_TTYS || !smd_tty[n].smd)
		return -ENODEV;

	info = smd_tty + n;

	mutex_lock(&smd_tty_lock);
	tty->driver_data = info;

	if (info->open_count++ == 0) {
		peripheral = smd_edge_to_subsystem(smd_tty[n].smd->edge);
		if (peripheral) {
			info->pil = pil_get(peripheral);
			if (IS_ERR(info->pil)) {
				res = PTR_ERR(info->pil);
				goto out;
			}

			/* Wait for the modem SMSM to be inited for the SMD
			 * Loopback channel to be allocated at the modem. Since
			 * the wait need to be done atmost once, using msleep
			 * doesn't degrade the performance.
			 */
			if (n == LOOPBACK_IDX) {
				if (!is_modem_smsm_inited())
					msleep(5000);
				smsm_change_state(SMSM_APPS_STATE,
					0, SMSM_SMD_LOOPBACK);
				msleep(100);
			}


			/*
			 * Wait for a channel to be allocated so we know
			 * the modem is ready enough.
			 */
			if (smd_tty_modem_wait) {
				res = wait_for_completion_interruptible_timeout(
					&info->ch_allocated,
					msecs_to_jiffies(smd_tty_modem_wait *
									1000));

				if (res == 0) {
					pr_err("Timed out waiting for SMD"
								" channel\n");
					res = -ETIMEDOUT;
					goto release_pil;
				} else if (res < 0) {
					pr_err("Error waiting for SMD channel:"
									" %d\n",
						res);
					goto release_pil;
				}

				res = 0;
			}
		}


		info->tty = tty;
		tasklet_init(&info->tty_tsklt, smd_tty_read,
			     (unsigned long)info);
		wake_lock_init(&info->wake_lock, WAKE_LOCK_SUSPEND,
				smd_tty[n].smd->port_name);
		if (!info->ch) {
			res = smd_named_open_on_edge(smd_tty[n].smd->port_name,
							smd_tty[n].smd->edge,
							&info->ch, info,
							smd_tty_notify);
			if (res < 0) {
				pr_err("%s: %s open failed %d\n", __func__,
					smd_tty[n].smd->port_name, res);
				goto release_pil;
			}

			res = wait_event_interruptible_timeout(
				info->ch_opened_wait_queue,
				info->is_open, (2 * HZ));
			if (res == 0)
				res = -ETIMEDOUT;
			if (res < 0) {
				pr_err("%s: wait for %s smd_open failed %d\n",
					__func__, smd_tty[n].smd->port_name,
					res);
				goto release_pil;
			}
			res = 0;
		}
	}

release_pil:
	if (res < 0)
		pil_put(info->pil);
	else
		smd_disable_read_intr(info->ch);
out:
	mutex_unlock(&smd_tty_lock);

	return res;
}
Пример #21
0
/*
 * Low level master read/write transaction.
 */
static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
			     struct i2c_msg *msg, int stop)
{
	struct omap_i2c_dev *dev = i2c_get_adapdata(adap);
	int r;
	u16 w;

	dev_dbg(dev->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
		msg->addr, msg->len, msg->flags, stop);

	if (msg->len == 0)
		return -EINVAL;

	omap_i2c_write_reg(dev, OMAP_I2C_SA_REG, msg->addr);

	/* REVISIT: Could the STB bit of I2C_CON be used with probing? */
	dev->buf = msg->buf;
	dev->buf_len = msg->len;

	omap_i2c_write_reg(dev, OMAP_I2C_CNT_REG, dev->buf_len);

	init_completion(&dev->cmd_complete);
	dev->cmd_err = 0;

	w = OMAP_I2C_CON_EN | OMAP_I2C_CON_MST | OMAP_I2C_CON_STT;
	if (msg->flags & I2C_M_TEN)
		w |= OMAP_I2C_CON_XA;
	if (!(msg->flags & I2C_M_RD))
		w |= OMAP_I2C_CON_TRX;
	if (stop)
		w |= OMAP_I2C_CON_STP;
	omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w);

	r = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
						      OMAP_I2C_TIMEOUT);
	dev->buf_len = 0;
	if (r < 0)
		return r;
	if (r == 0) {
		dev_err(dev->dev, "controller timed out\n");
		omap_i2c_init(dev);
		return -ETIMEDOUT;
	}

	if (likely(!dev->cmd_err))
		return 0;

	/* We have an error */
	if (dev->cmd_err & (OMAP_I2C_STAT_AL | OMAP_I2C_STAT_ROVR |
			    OMAP_I2C_STAT_XUDF)) {
		omap_i2c_init(dev);
		return -EIO;
	}

	if (dev->cmd_err & OMAP_I2C_STAT_NACK) {
		if (msg->flags & I2C_M_IGNORE_NAK)
			return 0;
		if (stop) {
			w = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG);
			w |= OMAP_I2C_CON_STP;
			omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w);
		}
		return -EREMOTEIO;
	}
	return -EIO;
}
Пример #22
0
static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
{
	struct mmc_command *cmd = host->mrq->cmd;
	struct mmc_data *data = host->mrq->data;
	long time;

	if (host->sd_error) {
		switch (cmd->opcode) {
		case MMC_ALL_SEND_CID:
		case MMC_SELECT_CARD:
		case MMC_APP_CMD:
			cmd->error = -ETIMEDOUT;
			break;
		default:
			cmd->error = sh_mmcif_error_manage(host);
			break;
		}
		dev_dbg(&host->pd->dev, "CMD%d error %d\n",
			cmd->opcode, cmd->error);
		host->sd_error = false;
		return false;
	}
	if (!(cmd->flags & MMC_RSP_PRESENT)) {
		cmd->error = 0;
		return false;
	}

	sh_mmcif_get_response(host, cmd);

	if (!data)
		return false;

	/*
	 * Completion can be signalled from DMA callback and error, so, have to
	 * reset here, before setting .dma_active
	 */
	init_completion(&host->dma_complete);

	if (data->flags & MMC_DATA_READ) {
		if (host->chan_rx)
			sh_mmcif_start_dma_rx(host);
	} else {
		if (host->chan_tx)
			sh_mmcif_start_dma_tx(host);
	}

	if (!host->dma_active) {
		data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
		return !data->error;
	}

	/* Running in the IRQ thread, can sleep */
	time = wait_for_completion_interruptible_timeout(&host->dma_complete,
							 host->timeout);

	if (data->flags & MMC_DATA_READ)
		dma_unmap_sg(host->chan_rx->device->dev,
			     data->sg, data->sg_len,
			     DMA_FROM_DEVICE);
	else
		dma_unmap_sg(host->chan_tx->device->dev,
			     data->sg, data->sg_len,
			     DMA_TO_DEVICE);

	if (host->sd_error) {
		dev_err(host->mmc->parent,
			"Error IRQ while waiting for DMA completion!\n");
		/* Woken up by an error IRQ: abort DMA */
		data->error = sh_mmcif_error_manage(host);
	} else if (!time) {
		dev_err(host->mmc->parent, "DMA timeout!\n");
		data->error = -ETIMEDOUT;
	} else if (time < 0) {
		dev_err(host->mmc->parent,
			"wait_for_completion_...() error %ld!\n", time);
		data->error = time;
	}
	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
			BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
	host->dma_active = false;

	if (data->error) {
		data->bytes_xfered = 0;
		/* Abort DMA */
		if (data->flags & MMC_DATA_READ)
			dmaengine_terminate_all(host->chan_rx);
		else
			dmaengine_terminate_all(host->chan_tx);
	}

	return false;
}
Пример #23
0
static int at91_do_twi_transfer(struct at91_twi_dev *dev)
{
	int ret;
	bool has_unre_flag = dev->pdata->has_unre_flag;

	dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
		(dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);

	INIT_COMPLETION(dev->cmd_complete);
	dev->transfer_status = 0;

	if (!dev->buf_len) {
		at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
		at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
	} else if (dev->msg->flags & I2C_M_RD) {
		unsigned start_flags = AT91_TWI_START;

		if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) {
			dev_err(dev->dev, "RXRDY still set!");
			at91_twi_read(dev, AT91_TWI_RHR);
		}

		/* if only one byte is to be read, immediately stop transfer */
		if (dev->buf_len <= 1 && !(dev->msg->flags & I2C_M_RECV_LEN))
			start_flags |= AT91_TWI_STOP;
		at91_twi_write(dev, AT91_TWI_CR, start_flags);
		/*
		 * When using dma, the last byte has to be read manually in
		 * order to not send the stop command too late and then
		 * to receive extra data. In practice, there are some issues
		 * if you use the dma to read n-1 bytes because of latency.
		 * Reading n-2 bytes with dma and the two last ones manually
		 * seems to be the best solution.
		 */
		if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
			at91_twi_read_data_dma(dev);
			/*
			 * It is important to enable TXCOMP irq here because
			 * doing it only when transferring the last two bytes
			 * will mask NACK errors since TXCOMP is set when a
			 * NACK occurs.
			 */
			at91_twi_write(dev, AT91_TWI_IER,
			       AT91_TWI_TXCOMP);
		} else
			at91_twi_write(dev, AT91_TWI_IER,
			       AT91_TWI_TXCOMP | AT91_TWI_RXRDY);
	} else {
		if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
			at91_twi_write_data_dma(dev);
			at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
		} else {
			at91_twi_write_next_byte(dev);
			at91_twi_write(dev, AT91_TWI_IER,
				AT91_TWI_TXCOMP | AT91_TWI_TXRDY);
		}
	}

	ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
							dev->adapter.timeout);
	if (ret == 0) {
		dev_err(dev->dev, "controller timed out\n");
		at91_init_twi_bus(dev);
		ret = -ETIMEDOUT;
		goto error;
	}
	if (dev->transfer_status & AT91_TWI_NACK) {
		dev_dbg(dev->dev, "received nack\n");
		ret = -EREMOTEIO;
		goto error;
	}
	if (dev->transfer_status & AT91_TWI_OVRE) {
		dev_err(dev->dev, "overrun while reading\n");
		ret = -EIO;
		goto error;
	}
	if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
		dev_err(dev->dev, "underrun while writing\n");
		ret = -EIO;
		goto error;
	}
	dev_dbg(dev->dev, "transfer complete\n");

	return 0;

error:
	at91_twi_dma_cleanup(dev);
	return ret;
}
Пример #24
0
/* Called in soft-irq context */
static void smd_net_data_handler(unsigned long arg)
{
    struct net_device *dev = (struct net_device *) arg;
    struct rmnet_private *p = netdev_priv(dev);
    struct sk_buff *skb;
    void *ptr = 0;
    int sz;
    u32 opmode = p->operation_mode;
//	unsigned long flags;
//   int max_package_size;
    for (;;) {
        sz = smd_cur_packet_size(p->ch);
        if (sz == 0) break;
        if (smd_read_avail(p->ch) < sz) break;
//ZTE_RIL_WANGCHENG_20110425 start
#ifdef CONFIG_ZTE_PLATFORM

        if (RMNET_IS_MODE_IP(opmode) ? (sz > ((dev->mtu > RMNET_DEFAULT_MTU_LEN)? dev->mtu:RMNET_DEFAULT_MTU_LEN)) :
                (sz > (((dev->mtu > RMNET_DEFAULT_MTU_LEN)? dev->mtu:RMNET_DEFAULT_MTU_LEN) + ETH_HLEN))) {
#else
        if (RMNET_IS_MODE_IP(opmode) ? (sz > dev->mtu) :
                (sz > (dev->mtu + ETH_HLEN))) {

#endif

            pr_err("rmnet_recv() discarding %d len (%d mtu)\n",
                   sz, RMNET_IS_MODE_IP(opmode) ?
                   dev->mtu : (dev->mtu + ETH_HLEN));
            ptr = 0;
        } else {
            skb = dev_alloc_skb(sz + NET_IP_ALIGN);
            if (skb == NULL) {
                pr_err("rmnet_recv() cannot allocate skb\n");
            } else {
                skb->dev = dev;
                skb_reserve(skb, NET_IP_ALIGN);
                ptr = skb_put(skb, sz);
                wake_lock_timeout(&p->wake_lock, HZ / 2);
                if (smd_read(p->ch, ptr, sz) != sz) {
                    pr_err("rmnet_recv() smd lied about avail?!");
                    ptr = 0;
                    dev_kfree_skb_irq(skb);
                } else {
                    /* Handle Rx frame format */
                    //spin_lock_irqsave(&p->lock, flags);
                    //opmode = p->operation_mode;
                    //spin_unlock_irqrestore(&p->lock, flags);

                    if (RMNET_IS_MODE_IP(opmode)) {
                        /* Driver in IP mode */
                        skb->protocol =
                            rmnet_ip_type_trans(skb, dev);
                    } else {
                        /* Driver in Ethernet mode */
                        skb->protocol =
                            eth_type_trans(skb, dev);
                    }
                    if (RMNET_IS_MODE_IP(opmode) ||
                            count_this_packet(ptr, skb->len)) {
#ifdef CONFIG_MSM_RMNET_DEBUG
                        p->wakeups_rcv +=
                            rmnet_cause_wakeup(p);
#endif
                        p->stats.rx_packets++;
                        p->stats.rx_bytes += skb->len;
                    }
                    netif_rx(skb);
                }
                continue;
            }
        }
        if (smd_read(p->ch, ptr, sz) != sz)
            pr_err("rmnet_recv() smd lied about avail?!");
    }
}

//ZTE_RIL_RJG_20101103 end

static DECLARE_TASKLET(smd_net_data_tasklet, smd_net_data_handler, 0);

static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
    struct rmnet_private *p = netdev_priv(dev);
    smd_channel_t *ch = p->ch;
    int smd_ret;
    struct QMI_QOS_HDR_S *qmih;
    u32 opmode;
    unsigned long flags;

    /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
    spin_lock_irqsave(&p->lock, flags);
    opmode = p->operation_mode;
    spin_unlock_irqrestore(&p->lock, flags);

    if (RMNET_IS_MODE_QOS(opmode)) {
        qmih = (struct QMI_QOS_HDR_S *)
               skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
        qmih->version = 1;
        qmih->flags = 0;
        qmih->flow_id = skb->mark;
    }

    dev->trans_start = jiffies;
    smd_ret = smd_write(ch, skb->data, skb->len);
    if (smd_ret != skb->len) {
        pr_err("%s: smd_write returned error %d", __func__, smd_ret);
        goto xmit_out;
    }

    if (RMNET_IS_MODE_IP(opmode) ||
            count_this_packet(skb->data, skb->len)) {
        p->stats.tx_packets++;
        p->stats.tx_bytes += skb->len;
#ifdef CONFIG_MSM_RMNET_DEBUG
        p->wakeups_xmit += rmnet_cause_wakeup(p);
#endif
    }

xmit_out:
    /* data xmited, safe to release skb */
    dev_kfree_skb_irq(skb);
    return 0;
}

static void _rmnet_resume_flow(unsigned long param)
{
    struct net_device *dev = (struct net_device *)param;
    struct rmnet_private *p = netdev_priv(dev);
    struct sk_buff *skb = NULL;
    unsigned long flags;

    /* xmit and enable the flow only once even if
       multiple tasklets were scheduled by smd_net_notify */
    spin_lock_irqsave(&p->lock, flags);
    if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) {
        skb = p->skb;
        p->skb = NULL;
        spin_unlock_irqrestore(&p->lock, flags);
        _rmnet_xmit(skb, dev);
        netif_wake_queue(dev);
    } else
        spin_unlock_irqrestore(&p->lock, flags);
}

static void msm_rmnet_unload_modem(void *pil)
{
    if (pil)
        pil_put(pil);
}

static void *msm_rmnet_load_modem(struct net_device *dev)
{
    void *pil;
    int rc;
    struct rmnet_private *p = netdev_priv(dev);

    pil = pil_get("modem");
    if (IS_ERR(pil))
        pr_err("%s: modem load failed\n", __func__);
    else if (msm_rmnet_modem_wait) {
        rc = wait_for_completion_interruptible_timeout(
                 &p->complete,
                 msecs_to_jiffies(msm_rmnet_modem_wait * 1000));
        if (!rc)
            rc = -ETIMEDOUT;
        if (rc < 0) {
            pr_err("%s: wait for rmnet port failed %d\n",
                   __func__, rc);
            msm_rmnet_unload_modem(pil);
            pil = ERR_PTR(rc);
        }
    }

    return pil;
}
Пример #25
0
        addr = sg_dma_address(sg);
        len = sg_dma_len(sg);
        rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
    }

    spin_lock_irqsave(&pcr->lock, flags);

    pcr->done = &trans_done;
    pcr->trans_result = TRANS_NOT_READY;
    init_completion(&trans_done);
    rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
    rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);

    spin_unlock_irqrestore(&pcr->lock, flags);

    timeleft = wait_for_completion_interruptible_timeout(
                   &trans_done, msecs_to_jiffies(timeout));
    if (timeleft <= 0) {
        dev_dbg(&(pcr->pci->dev), "Timeout (%s %d)\n",
                __func__, __LINE__);
        err = -ETIMEDOUT;
        goto out;
    }

    spin_lock_irqsave(&pcr->lock, flags);

    if (pcr->trans_result == TRANS_RESULT_FAIL)
        err = -EINVAL;
    else if (pcr->trans_result == TRANS_NO_DEVICE)
        err = -ENODEV;

    spin_unlock_irqrestore(&pcr->lock, flags);
Пример #26
0
/*
 * Low level master read/write transaction. This function is called
 * from i2c_davinci_xfer.
 */
static int
i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
{
	struct i2c_davinci_device *dev = i2c_get_adapdata(adap);
	u8 zero_byte = 0;
	u32 flag = 0, stat = 0;
        unsigned long flags;
        int r;
        int cnt = 2000;

	/* Introduce a 20musec delay.  Required for Davinci EVM */
	while (cnt--);

	DEB1("addr: 0x%04x, len: %d, flags: 0x%x, stop: %d",
	     msg->addr, msg->len, msg->flags, stop);

        spin_lock_irqsave( &i2c_spinlock, flags );

	/* set the slave address */
	dev->regs->icsar = msg->addr;

	/* Sigh, seems we can't do zero length transactions. Thus, we
	 * can't probe for devices w/o actually sending/receiving at least
	 * a single byte. So we'll set count to 1 for the zero length
	 * transaction case and hope we don't cause grief for some
	 * arbitrary device due to random byte write/read during
	 * probes.
	 */
	if (msg->len == 0) {
		dev->buf = &zero_byte;
		dev->buf_len = 1;
	} else {
		dev->buf = msg->buf;
		dev->buf_len = msg->len;
	}

	dev->regs->iccnt = dev->buf_len;
	dev->cmd_err = 0;
        init_completion( &dev->cmd_completion );

	/* Clear any pending interrupts by reading the IVR */
	stat = dev->regs->icivr;

	/* Take I2C out of reset, configure it as master and set the start bit */
	flag =
	    DAVINCI_I2C_ICMDR_IRS_MASK | DAVINCI_I2C_ICMDR_MST_MASK |
	    DAVINCI_I2C_ICMDR_STT_MASK;

	/* if the slave address is ten bit address, enable XA bit */
	if (msg->flags & I2C_M_TEN)
		flag |= DAVINCI_I2C_ICMDR_XA_MASK;
	if (!(msg->flags & I2C_M_RD))
		flag |= DAVINCI_I2C_ICMDR_TRX_MASK;
	if (stop)
		flag |= DAVINCI_I2C_ICMDR_STP_MASK;

	/* write the data into mode register */
	dev->regs->icmdr = flag;

	/* Enable receive and transmit interrupts */
	if (msg->flags & I2C_M_RD)
		dev->regs->icimr |= DAVINCI_I2C_ICIMR_ICRRDY_MASK;
	else {
		dev->regs->icimr |= DAVINCI_I2C_ICIMR_ICXRDY_MASK;

     		/* Prime the pump */
		if ( dev->regs->icstr & DAVINCI_I2C_ICSTR_ICXRDY_MASK ) {
		  dev->regs->icdxr = *dev->buf++;
		  dev->buf_len--;
		}
	}

        spin_unlock_irqrestore( &i2c_spinlock, flags );

	/* wait for the transaction to complete */
	r = wait_for_completion_interruptible_timeout( &dev->cmd_completion, 
                                                   DAVINCI_I2C_TIMEOUT );

	dev->buf_len = 0;

        if ( r < 0 ) {
            return r;
        }
	if (r == 0 ) {
                printk( "I2C command timeout, icivr=0x%04x, status=0x%04x\n",
                        dev->regs->icivr, dev->regs->icstr );
		i2c_davinci_reset(dev);
		return -ETIMEDOUT;
	}

	/* no error */
	if (!dev->cmd_err)
		return msg->len;

	/* We have an error */
	if (dev->cmd_err & DAVINCI_I2C_ICSTR_NACK_MASK) {
		if (msg->flags & I2C_M_IGNORE_NAK)
			return msg->len;
		if (stop)
			dev->regs->icmdr |= DAVINCI_I2C_ICMDR_STP_MASK;
		return -EREMOTEIO;
	}
	if (dev->cmd_err & DAVINCI_I2C_ICSTR_AL_MASK ||
	    dev->cmd_err & DAVINCI_I2C_ICSTR_RSFULL_MASK) {
		i2c_davinci_reset(dev);
		return -EIO;
	}
	return msg->len;
}
Пример #27
0
static int mhl_send_msc_command(struct msc_command_struct *req)
{
	int timeout;
	u8 start_bit = 0x00;
	u8 *burst_data;
	int i;

	if (mhl_msm_state->cur_state != POWER_STATE_D0_MHL) {
		pr_debug("%s: power_state:%02x CBUS(0x0A):%02x\n",
		__func__,
		mhl_msm_state->cur_state, mhl_i2c_reg_read(TX_PAGE_CBUS, 0x0A));
		return -EFAULT;
	}

	if (!req)
		return -EFAULT;

	pr_debug("%s: command=0x%02x offset=0x%02x %02x %02x",
		__func__,
		req->command,
		req->offset,
		req->payload.data[0],
		req->payload.data[1]);

	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x13, req->offset);
	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x14, req->payload.data[0]);

	switch (req->command) {
	case MHL_SET_INT:
	case MHL_WRITE_STAT:
		start_bit = MSC_START_BIT_WRITE_REG;
		break;
	case MHL_READ_DEVCAP:
		start_bit = MSC_START_BIT_READ_REG;
		break;
	case MHL_GET_STATE:
	case MHL_GET_VENDOR_ID:
	case MHL_SET_HPD:
	case MHL_CLR_HPD:
	case MHL_GET_SC1_ERRORCODE:
	case MHL_GET_DDC_ERRORCODE:
	case MHL_GET_MSC_ERRORCODE:
	case MHL_GET_SC3_ERRORCODE:
		start_bit = MSC_START_BIT_MSC_CMD;
		mhl_i2c_reg_write(TX_PAGE_CBUS, 0x13, req->command);
		break;
	case MHL_MSC_MSG:
		start_bit = MSC_START_BIT_VS_CMD;
		mhl_i2c_reg_write(TX_PAGE_CBUS, 0x15, req->payload.data[1]);
		mhl_i2c_reg_write(TX_PAGE_CBUS, 0x13, req->command);
		break;
	case MHL_WRITE_BURST:
		start_bit = MSC_START_BIT_WRITE_BURST;
		mhl_i2c_reg_write(TX_PAGE_CBUS, 0x20, req->length - 1);
		if (!(req->payload.burst_data)) {
			pr_err("%s: burst data is null!\n", __func__);
			goto cbus_send_fail;
		}
		burst_data = req->payload.burst_data;
		for (i = 0; i < req->length; i++, burst_data++)
			mhl_i2c_reg_write(TX_PAGE_CBUS, 0xC0 + i, *burst_data);
		break;
	default:
		pr_err("%s: unknown command! (%02x)\n",
			__func__, req->command);
		goto cbus_send_fail;
	}

	INIT_COMPLETION(mhl_msm_state->msc_cmd_done);
	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x12, start_bit);
	timeout = wait_for_completion_interruptible_timeout
		(&mhl_msm_state->msc_cmd_done, HZ);
	if (!timeout) {
		pr_err("%s: cbus_command_send timed out!\n", __func__);
		goto cbus_send_fail;
	}

	switch (req->command) {
	case MHL_READ_DEVCAP:
		/* devcap */
		req->retval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x16);
		pr_debug("Read CBUS[0x16]=[%02x]\n", req->retval);
		break;
	case MHL_MSC_MSG:
		/* check if MSC_MSG NACKed */
		if (mhl_i2c_reg_read(TX_PAGE_CBUS, 0x20) & BIT6)
			return -EAGAIN;
	default:
		req->retval = 0;
		break;
	}
	mhl_msc_command_done(req);
	pr_debug("%s: msc cmd done\n", __func__);
	return 0;

cbus_send_fail:
	return -EFAULT;
}
Пример #28
0
static struct rdma_cm_id *
rpcrdma_create_id(struct rpcrdma_xprt *xprt,
			struct rpcrdma_ia *ia, struct sockaddr *addr)
{
	struct rdma_cm_id *id;
	int rc;

	init_completion(&ia->ri_done);

	id = rdma_create_id(&init_net, rpcrdma_conn_upcall, xprt, RDMA_PS_TCP,
			    IB_QPT_RC);
	if (IS_ERR(id)) {
		rc = PTR_ERR(id);
		dprintk("RPC:       %s: rdma_create_id() failed %i\n",
			__func__, rc);
		return id;
	}

	ia->ri_async_rc = -ETIMEDOUT;
	rc = rdma_resolve_addr(id, NULL, addr, RDMA_RESOLVE_TIMEOUT);
	if (rc) {
		dprintk("RPC:       %s: rdma_resolve_addr() failed %i\n",
			__func__, rc);
		goto out;
	}
	wait_for_completion_interruptible_timeout(&ia->ri_done,
				msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);

	/* FIXME:
	 * Until xprtrdma supports DEVICE_REMOVAL, the provider must
	 * be pinned while there are active NFS/RDMA mounts to prevent
	 * hangs and crashes at umount time.
	 */
	if (!ia->ri_async_rc && !try_module_get(id->device->owner)) {
		dprintk("RPC:       %s: Failed to get device module\n",
			__func__);
		ia->ri_async_rc = -ENODEV;
	}
	rc = ia->ri_async_rc;
	if (rc)
		goto out;

	ia->ri_async_rc = -ETIMEDOUT;
	rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
	if (rc) {
		dprintk("RPC:       %s: rdma_resolve_route() failed %i\n",
			__func__, rc);
		goto put;
	}
	wait_for_completion_interruptible_timeout(&ia->ri_done,
				msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
	rc = ia->ri_async_rc;
	if (rc)
		goto put;

	return id;
put:
	module_put(id->device->owner);
out:
	rdma_destroy_id(id);
	return ERR_PTR(rc);
}
Пример #29
0
ssize_t mdp4_dsi_video_show_event(struct device *dev,
                                  struct device_attribute *attr, char *buf)
{
    int cndx;
    struct vsycn_ctrl *vctrl;
    ssize_t ret = 0;
    unsigned long flags;
    u64 vsync_tick;
    ktime_t ctime;
    u32 ctick, ptick;
    int diff;


    cndx = 0;
    vctrl = &vsync_ctrl_db[0];

    if (atomic_read(&vctrl->suspend) > 0 ||
            atomic_read(&vctrl->vsync_resume) == 0)
        return 0;

    /*
     * show_event thread keep spinning on vctrl->vsync_comp
     * race condition on x.done if multiple thread blocked
     * at wait_for_completion(&vctrl->vsync_comp)
     *
     * if show_event thread waked up first then it will come back
     * and call INIT_COMPLETION(vctrl->vsync_comp) which set x.done = 0
     * then second thread wakeed up which set x.done = 0x7ffffffd
     * after that wait_for_completion will never wait.
     * To avoid this, force show_event thread to sleep 5 ms here
     * since it has full vsycn period (16.6 ms) to wait
     */
    ctime = ktime_get();
    ctick = (u32)ktime_to_us(ctime);
    ptick = (u32)ktime_to_us(vctrl->vsync_time);
    ptick += 5000;	/* 5ms */
    diff = ptick - ctick;
    if (diff > 0) {
        if (diff > 1000) /* 1 ms */
            diff = 1000;
        usleep(diff);
    }

    spin_lock_irqsave(&vctrl->spin_lock, flags);
    if (vctrl->wait_vsync_cnt == 0)
        INIT_COMPLETION(vctrl->vsync_comp);
    vctrl->wait_vsync_cnt++;
    spin_unlock_irqrestore(&vctrl->spin_lock, flags);
    ret = wait_for_completion_interruptible_timeout(&vctrl->vsync_comp,
            msecs_to_jiffies(VSYNC_PERIOD * 4));
    if (ret <= 0) {
        complete_all(&vctrl->vsync_comp);
        vctrl->wait_vsync_cnt = 0;
        vctrl->vsync_time = ktime_get();
    }

    spin_lock_irqsave(&vctrl->spin_lock, flags);
    vsync_tick = ktime_to_ns(vctrl->vsync_time);
    spin_unlock_irqrestore(&vctrl->spin_lock, flags);

    ret = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick);
    buf[strlen(buf) + 1] = '\0';
    return ret;
}
Пример #30
0
/**
 * i2400m_msg_to_dev - Send a control message to the device and get a response
 *
 * @i2400m: device descriptor
 *
 * @msg_skb: an skb  *
 *
 * @buf: pointer to the buffer containing the message to be sent; it
 *           has to start with a &struct i2400M_l3l4_hdr and then
 *           followed by the payload. Once this function returns, the
 *           buffer can be reused.
 *
 * @buf_len: buffer size
 *
 * Returns:
 *
 * Pointer to skb containing the ack message. You need to check the
 * pointer with IS_ERR(), as it might be an error code. Error codes
 * could happen because:
 *
 *  - the message wasn't formatted correctly
 *  - couldn't send the message
 *  - failed waiting for a response
 *  - the ack message wasn't formatted correctly
 *
 * The returned skb has been allocated with wimax_msg_to_user_alloc(),
 * it contains the response in a netlink attribute and is ready to be
 * passed up to user space with wimax_msg_to_user_send(). To access
 * the payload and its length, use wimax_msg_{data,len}() on the skb.
 *
 * The skb has to be freed with kfree_skb() once done.
 *
 * Description:
 *
 * This function delivers a message/command to the device and waits
 * for an ack to be received. The format is described in
 * linux/wimax/i2400m.h. In summary, a command/get/set is followed by an
 * ack.
 *
 * This function will not check the ack status, that's left up to the
 * caller.  Once done with the ack skb, it has to be kfree_skb()ed.
 *
 * The i2400m handles only one message at the same time, thus we need
 * the mutex to exclude other players.
 *
 * We write the message and then wait for an answer to come back. The
 * RX path intercepts control messages and handles them in
 * i2400m_rx_ctl(). Reports (notifications) are (maybe) processed
 * locally and then forwarded (as needed) to user space on the WiMAX
 * stack message pipe. Acks are saved and passed back to us through an
 * skb in i2400m->ack_skb which is ready to be given to generic
 * netlink if need be.
 */
struct sk_buff *i2400m_msg_to_dev(struct i2400m *i2400m,
				  const void *buf, size_t buf_len)
{
	int result;
	struct device *dev = i2400m_dev(i2400m);
	const struct i2400m_l3l4_hdr *msg_l3l4_hdr;
	struct sk_buff *ack_skb;
	const struct i2400m_l3l4_hdr *ack_l3l4_hdr;
	size_t ack_len;
	int ack_timeout;
	unsigned msg_type;
	unsigned long flags;

	d_fnstart(3, dev, "(i2400m %p buf %p len %zu)\n",
		  i2400m, buf, buf_len);

	rmb();		/* Make sure we see what i2400m_dev_reset_handle() */
	if (i2400m->boot_mode)
		return ERR_PTR(-EL3RST);

	msg_l3l4_hdr = buf;
	/* Check msg & payload consistency */
	result = i2400m_msg_size_check(i2400m, msg_l3l4_hdr, buf_len);
	if (result < 0)
		goto error_bad_msg;
	msg_type = le16_to_cpu(msg_l3l4_hdr->type);
	d_printf(1, dev, "CMD/GET/SET 0x%04x %zu bytes\n",
		 msg_type, buf_len);
	d_dump(2, dev, buf, buf_len);

	/* Setup the completion, ack_skb ("we are waiting") and send
	 * the message to the device */
	mutex_lock(&i2400m->msg_mutex);
	spin_lock_irqsave(&i2400m->rx_lock, flags);
	i2400m->ack_skb = ERR_PTR(-EINPROGRESS);
	spin_unlock_irqrestore(&i2400m->rx_lock, flags);
	init_completion(&i2400m->msg_completion);
	result = i2400m_tx(i2400m, buf, buf_len, I2400M_PT_CTRL);
	if (result < 0) {
		dev_err(dev, "can't send message 0x%04x: %d\n",
			le16_to_cpu(msg_l3l4_hdr->type), result);
		goto error_tx;
	}

	/* Some commands take longer to execute because of crypto ops,
	 * so we give them some more leeway on timeout */
	switch (msg_type) {
	case I2400M_MT_GET_TLS_OPERATION_RESULT:
	case I2400M_MT_CMD_SEND_EAP_RESPONSE:
		ack_timeout = 5 * HZ;
		break;
	default:
		ack_timeout = HZ;
	}

	if (unlikely(i2400m->trace_msg_from_user))
		wimax_msg(&i2400m->wimax_dev, "echo", buf, buf_len, GFP_KERNEL);
	/* The RX path in rx.c will put any response for this message
	 * in i2400m->ack_skb and wake us up. If we cancel the wait,
	 * we need to change the value of i2400m->ack_skb to something
	 * not -EINPROGRESS so RX knows there is no one waiting. */
	result = wait_for_completion_interruptible_timeout(
		&i2400m->msg_completion, ack_timeout);
	if (result == 0) {
		dev_err(dev, "timeout waiting for reply to message 0x%04x\n",
			msg_type);
		result = -ETIMEDOUT;
		i2400m_msg_to_dev_cancel_wait(i2400m, result);
		goto error_wait_for_completion;
	} else if (result < 0) {
		dev_err(dev, "error waiting for reply to message 0x%04x: %d\n",
			msg_type, result);
		i2400m_msg_to_dev_cancel_wait(i2400m, result);
		goto error_wait_for_completion;
	}

	/* Pull out the ack data from i2400m->ack_skb -- see if it is
	 * an error and act accordingly */
	spin_lock_irqsave(&i2400m->rx_lock, flags);
	ack_skb = i2400m->ack_skb;
	if (IS_ERR(ack_skb))
		result = PTR_ERR(ack_skb);
	else
		result = 0;
	i2400m->ack_skb = NULL;
	spin_unlock_irqrestore(&i2400m->rx_lock, flags);
	if (result < 0)
		goto error_ack_status;
	ack_l3l4_hdr = wimax_msg_data_len(ack_skb, &ack_len);

	/* Check the ack and deliver it if it is ok */
	if (unlikely(i2400m->trace_msg_from_user))
		wimax_msg(&i2400m->wimax_dev, "echo",
			  ack_l3l4_hdr, ack_len, GFP_KERNEL);
	result = i2400m_msg_size_check(i2400m, ack_l3l4_hdr, ack_len);
	if (result < 0) {
		dev_err(dev, "HW BUG? reply to message 0x%04x: %d\n",
			msg_type, result);
		goto error_bad_ack_len;
	}
	if (msg_type != le16_to_cpu(ack_l3l4_hdr->type)) {
		dev_err(dev, "HW BUG? bad reply 0x%04x to message 0x%04x\n",
			le16_to_cpu(ack_l3l4_hdr->type), msg_type);
		result = -EIO;
		goto error_bad_ack_type;
	}
	i2400m_msg_ack_hook(i2400m, ack_l3l4_hdr, ack_len);
	mutex_unlock(&i2400m->msg_mutex);
	d_fnend(3, dev, "(i2400m %p buf %p len %zu) = %p\n",
		i2400m, buf, buf_len, ack_skb);
	return ack_skb;

error_bad_ack_type:
error_bad_ack_len:
	kfree_skb(ack_skb);
error_ack_status:
error_wait_for_completion:
error_tx:
	mutex_unlock(&i2400m->msg_mutex);
error_bad_msg:
	d_fnend(3, dev, "(i2400m %p buf %p len %zu) = %d\n",
		i2400m, buf, buf_len, result);
	return ERR_PTR(result);
}