static void link_pm_runtime_work(struct work_struct *work)
{
	int ret;
	struct link_pm_data *pm_data =
		container_of(work, struct link_pm_data, link_pm_work.work);
	struct device *dev = &pm_data->usb_ld->usbdev->dev;

	if (!pm_data->usb_ld->if_usb_connected || pm_data->dpm_suspending)
		return;

	if (pm_data->usb_ld->ld.com_state == COM_NONE)
		return;

	mif_debug("for dev 0x%p : current %d\n", dev,
					dev->power.runtime_status);

	switch (dev->power.runtime_status) {
	case RPM_ACTIVE:
		pm_data->resume_retry_cnt = 0;
		pm_data->resume_requested = false;
		complete(&pm_data->active_done);

		return;
	case RPM_SUSPENDED:
		if (pm_data->resume_requested)
			break;
		pm_data->resume_requested = true;
		wake_lock(&pm_data->rpm_wake);
		ret = link_pm_slave_wake(pm_data);
		if (ret < 0) {
			mif_err("slave wake fail\n");
			wake_unlock(&pm_data->rpm_wake);
			break;
		}

		if (!pm_data->usb_ld->if_usb_connected) {
			wake_unlock(&pm_data->rpm_wake);
			return;
		}

		ret = pm_runtime_resume(dev);
		if (ret < 0) {
			mif_err("resume error(%d)\n", ret);
			if (!pm_data->usb_ld->if_usb_connected) {
				wake_unlock(&pm_data->rpm_wake);
				return;
			}
			/* force to go runtime idle before retry resume */
			if (dev->power.timer_expires == 0 &&
						!dev->power.request_pending) {
				mif_debug("run time idle\n");
				pm_runtime_idle(dev);
			}
		}
		wake_unlock(&pm_data->rpm_wake);
		break;
	default:
		break;
	}
	pm_data->resume_requested = false;

	/* check until runtime_status goes to active */
	/* attemp 10 times, or re-establish modem-link */
	/* if pm_runtime_resume run properly, rpm status must be in ACTIVE */
	if (dev->power.runtime_status == RPM_ACTIVE) {
		pm_data->resume_retry_cnt = 0;
		complete(&pm_data->active_done);
	} else if (pm_data->resume_retry_cnt++ > 10) {
		mif_err("runtime_status(%d), retry_cnt(%d)\n",
			dev->power.runtime_status, pm_data->resume_retry_cnt);
		link_pm_change_modem_state(pm_data, STATE_CRASH_RESET);
	} else
		queue_delayed_work(pm_data->wq, &pm_data->link_pm_work,
							msecs_to_jiffies(20));
}
Example #2
0
/* This is the completion handler which will wake us up when an URB
 * completes.
 */
static void usb_stor_blocking_completion(struct urb *urb)
{
	struct completion *urb_done_ptr = (struct completion *)urb->context;

	complete(urb_done_ptr);
}
Example #3
0
static void mmc_wait_done(struct mmc_request *mrq)
{
	complete(mrq->done_data);
}
Example #4
0
/**
 * i2c_pnx_master_rcv - receive data from slave
 * @adap:		pointer to I2C adapter structure
 *
 * Reads one byte data from the slave
 */
static int i2c_pnx_master_rcv(struct i2c_adapter *adap)
{
	struct i2c_pnx_algo_data *alg_data = adap->algo_data;
	unsigned int val = 0;
	u32 ctl = 0;

	dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n",
		__func__, ioread32(I2C_REG_STS(alg_data)));

	/* Check, whether there is already data,
	 * or we didn't 'ask' for it yet.
	 */
	if (ioread32(I2C_REG_STS(alg_data)) & mstatus_rfe) {
		dev_dbg(&adap->dev, "%s(): Write dummy data to fill "
			"Rx-fifo...\n", __func__);

		if (alg_data->mif.len == 1) {
			/* Last byte, do not acknowledge next rcv. */
			val |= stop_bit;
			if (!alg_data->last)
				val |= start_bit;

			/*
			 * Enable interrupt RFDAIE (data in Rx fifo),
			 * and disable DRMIE (need data for Tx)
			 */
			ctl = ioread32(I2C_REG_CTL(alg_data));
			ctl |= mcntrl_rffie | mcntrl_daie;
			ctl &= ~mcntrl_drmie;
			iowrite32(ctl, I2C_REG_CTL(alg_data));
		}

		/*
		 * Now we'll 'ask' for data:
		 * For each byte we want to receive, we must
		 * write a (dummy) byte to the Tx-FIFO.
		 */
		iowrite32(val, I2C_REG_TX(alg_data));

		return 0;
	}

	/* Handle data. */
	if (alg_data->mif.len > 0) {
		val = ioread32(I2C_REG_RX(alg_data));
		*alg_data->mif.buf++ = (u8) (val & 0xff);
		dev_dbg(&adap->dev, "%s(): rcv 0x%x [%d]\n", __func__, val,
			alg_data->mif.len);

		alg_data->mif.len--;
		if (alg_data->mif.len == 0) {
			if (alg_data->last)
				/* Wait until the STOP is seen. */
				if (wait_timeout(I2C_PNX_TIMEOUT, alg_data))
					dev_err(&adap->dev, "The bus is still "
						"active after timeout\n");

			/* Disable master interrupts */
			ctl = ioread32(I2C_REG_CTL(alg_data));
			ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie |
				 mcntrl_drmie | mcntrl_daie);
			iowrite32(ctl, I2C_REG_CTL(alg_data));

			/* Kill timer. */
			del_timer_sync(&alg_data->mif.timer);
			complete(&alg_data->mif.complete);
		}
	}

	dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n",
		__func__, ioread32(I2C_REG_STS(alg_data)));

	return 0;
}
static inline void msm_ispif_read_irq_status(struct ispif_irq_status *out,
	void *data)
{
	struct ispif_device *ispif = (struct ispif_device *)data;

	BUG_ON(!ispif);
	BUG_ON(!out);

	out[VFE0].ispifIrqStatus0 = msm_camera_io_r(ispif->base +
		ISPIF_VFE_m_IRQ_STATUS_0(VFE0));
	msm_camera_io_w(out[VFE0].ispifIrqStatus0,
		ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(VFE0));

	out[VFE0].ispifIrqStatus1 = msm_camera_io_r(ispif->base +
		ISPIF_VFE_m_IRQ_STATUS_1(VFE0));
	msm_camera_io_w(out[VFE0].ispifIrqStatus1,
		ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(VFE0));

	out[VFE0].ispifIrqStatus2 = msm_camera_io_r(ispif->base +
		ISPIF_VFE_m_IRQ_STATUS_2(VFE0));
	msm_camera_io_w_mb(out[VFE0].ispifIrqStatus2,
		ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(VFE0));

	if (ispif->vfe_info.num_vfe > 1) {
		out[VFE1].ispifIrqStatus0 = msm_camera_io_r(ispif->base +
			ISPIF_VFE_m_IRQ_STATUS_0(VFE1));
		msm_camera_io_w(out[VFE1].ispifIrqStatus0,
			ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(VFE1));

		out[VFE1].ispifIrqStatus1 = msm_camera_io_r(ispif->base +
			ISPIF_VFE_m_IRQ_STATUS_1(VFE1));
		msm_camera_io_w(out[VFE1].ispifIrqStatus1,
				ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(VFE1));

		out[VFE1].ispifIrqStatus2 = msm_camera_io_r(ispif->base +
			ISPIF_VFE_m_IRQ_STATUS_2(VFE1));
		msm_camera_io_w_mb(out[VFE1].ispifIrqStatus2,
			ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(VFE1));
	}
	msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base +
	ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);

	if (out[VFE0].ispifIrqStatus0 & ISPIF_IRQ_STATUS_MASK) {
		if (out[VFE0].ispifIrqStatus0 & RESET_DONE_IRQ)
			complete(&ispif->reset_complete[VFE0]);

		if (out[VFE0].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ)
			pr_err("%s: VFE0 pix0 overflow.\n", __func__);

		if (out[VFE0].ispifIrqStatus0 & RAW_INTF_0_OVERFLOW_IRQ)
			pr_err("%s: VFE0 rdi0 overflow.\n", __func__);

		if (out[VFE0].ispifIrqStatus1 & RAW_INTF_1_OVERFLOW_IRQ)
			pr_err("%s: VFE0 rdi1 overflow.\n", __func__);

		if (out[VFE0].ispifIrqStatus2 & RAW_INTF_2_OVERFLOW_IRQ)
			pr_err("%s: VFE0 rdi2 overflow.\n", __func__);

		ispif_process_irq(ispif, out, VFE0);
	}
	if (ispif->hw_num_isps > 1) {
		if (out[VFE1].ispifIrqStatus0 & RESET_DONE_IRQ)
			complete(&ispif->reset_complete[VFE1]);

		if (out[VFE1].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ)
			pr_err("%s: VFE1 pix0 overflow.\n", __func__);

		if (out[VFE1].ispifIrqStatus0 & RAW_INTF_0_OVERFLOW_IRQ)
			pr_err("%s: VFE1 rdi0 overflow.\n", __func__);

		if (out[VFE1].ispifIrqStatus1 & RAW_INTF_1_OVERFLOW_IRQ)
			pr_err("%s: VFE1 rdi1 overflow.\n", __func__);

		if (out[VFE1].ispifIrqStatus2 & RAW_INTF_2_OVERFLOW_IRQ)
			pr_err("%s: VFE1 rdi2 overflow.\n", __func__);

		ispif_process_irq(ispif, out, VFE1);
	}
}
Example #6
0
long mdm_modem_ioctl(struct file *filp, unsigned int cmd,
				unsigned long arg)
{
	int status, ret = 0;

	if (_IOC_TYPE(cmd) != CHARM_CODE) {
		pr_err("%s: invalid ioctl code\n", __func__);
		return -EINVAL;
	}

	pr_debug("%s: Entering ioctl cmd = %d\n", __func__, _IOC_NR(cmd));
	switch (cmd) {
	case WAKE_CHARM:
		pr_info("%s: Powering on mdm\n", __func__);
		mdm_drv->ops->power_on_mdm_cb(mdm_drv);
		break;
	case CHECK_FOR_BOOT:
		if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
			put_user(1, (unsigned long __user *) arg);
		else
			put_user(0, (unsigned long __user *) arg);
		break;
	case NORMAL_BOOT_DONE:
		pr_debug("%s: check if mdm is booted up\n", __func__);
		get_user(status, (unsigned long __user *) arg);
		if (status) {
			pr_debug("%s: normal boot failed\n", __func__);
			mdm_drv->mdm_boot_status = -EIO;
		} else {
			pr_info("%s: normal boot done\n", __func__);
			mdm_drv->mdm_boot_status = 0;
		}
		mdm_drv->mdm_ready = 1;

		if (mdm_drv->ops->normal_boot_done_cb != NULL)
			mdm_drv->ops->normal_boot_done_cb(mdm_drv);

		if (!first_boot)
			complete(&mdm_boot);
		else
			first_boot = 0;

		/* If successful, start a timer to check that the mdm2ap_status
		 * gpio goes high.
		 */
		if (!status && gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
			schedule_delayed_work(&mdm2ap_status_check_work,
				msecs_to_jiffies(MDM2AP_STATUS_TIMEOUT_MS));
		break;
	case RAM_DUMP_DONE:
		pr_debug("%s: mdm done collecting RAM dumps\n", __func__);
		get_user(status, (unsigned long __user *) arg);
		if (status)
			mdm_drv->mdm_ram_dump_status = -EIO;
		else {
			pr_info("%s: ramdump collection completed\n", __func__);
			mdm_drv->mdm_ram_dump_status = 0;
		}
		complete(&mdm_ram_dumps);
		break;
	case WAIT_FOR_RESTART:
		pr_debug("%s: wait for mdm to need images reloaded\n",
				__func__);
		ret = wait_for_completion_interruptible(&mdm_needs_reload);
		if (!ret)
			put_user(mdm_drv->boot_type,
					 (unsigned long __user *) arg);
		INIT_COMPLETION(mdm_needs_reload);
		break;
	case GET_DLOAD_STATUS:
		pr_debug("getting status of mdm2ap_errfatal_gpio\n");
		if (gpio_get_value(mdm_drv->mdm2ap_errfatal_gpio) == 1 &&
			!mdm_drv->mdm_ready)
			put_user(1, (unsigned long __user *) arg);
		else
			put_user(0, (unsigned long __user *) arg);
		break;
	case IMAGE_UPGRADE:
		pr_debug("%s Image upgrade ioctl recieved\n", __func__);
		if (mdm_drv->pdata->image_upgrade_supported &&
				mdm_drv->ops->image_upgrade_cb) {
			get_user(status, (unsigned long __user *) arg);
			mdm_drv->ops->image_upgrade_cb(mdm_drv, status);
		} else
			pr_debug("%s Image upgrade not supported\n", __func__);
		break;
	case SHUTDOWN_CHARM:
		if (!mdm_drv->pdata->send_shdn)
			break;
		mdm_drv->mdm_ready = 0;
		if (mdm_debug_mask & MDM_DEBUG_MASK_SHDN_LOG)
			pr_info("Sending shutdown request to mdm\n");
		ret = sysmon_send_shutdown(SYSMON_SS_EXT_MODEM);
		if (ret)
			pr_err("%s: Graceful shutdown of the external modem failed, ret = %d\n",
				   __func__, ret);
		break;
	default:
		pr_err("%s: invalid ioctl cmd = %d\n", __func__, _IOC_NR(cmd));
		ret = -EINVAL;
		break;
	}

	return ret;
}
/*
 * This thread processes interrupts reported by the Primary Interrupt Handler.
 */
static int twl6030_irq_thread(void *data)
{
	long irq = (long)data;
	static unsigned i2c_errors;
	static const unsigned max_i2c_errors = 100;
	int ret;
	int charger_vbus = 0;

	current->flags |= PF_NOFREEZE;

	while (!kthread_should_stop()) {
		int i;
		int start_time = 0;
		union {
		u8 bytes[4];
		u32 int_sts;
		} sts;

		/* Wait for IRQ, then read PIH irq status (also blocking) */
		wait_for_completion_interruptible(&irq_event);

		/* read INT_STS_A, B and C in one shot using a burst read */
		ret = twl_i2c_read(TWL_MODULE_PIH, sts.bytes,
				REG_INT_STS_A, 3);
		if (ret) {
			pr_warning("twl6030: I2C error %d reading PIH ISR\n",
					ret);
			if (++i2c_errors >= max_i2c_errors) {
				printk(KERN_ERR "Maximum I2C error count"
						" exceeded.  Terminating %s.\n",
						__func__);
				break;
			}
			complete(&irq_event);
			continue;
		}



		sts.bytes[3] = 0; /* Only 24 bits are valid*/

		/*
		 * Since VBUS status bit is not reliable for VBUS disconnect
		 * use CHARGER VBUS detection status bit instead.
		 */
		if (sts.bytes[2] & 0x10) {
			charger_vbus = 1;
			sts.bytes[2] |= 0x08;
		}

		for (i = 0; sts.int_sts; sts.int_sts >>= 1, i++) {
			local_irq_disable();
			if (sts.int_sts & 0x1) {
				int module_irq = twl6030_irq_base +
					twl6030_interrupt_mapping[i];
				struct irq_desc *d = irq_to_desc(module_irq);

				if (!d) {
					pr_err("twl6030: Invalid SIH IRQ: %d\n",
					       module_irq);
					return -EINVAL;
				}

				/* this may be a wakeup event
				 * d->status flag's are masked while we are
				 * waking up, give some time for the
				 * IRQ to be enabled.
				 */
				start_time = jiffies;
				while ((d->status & IRQ_DISABLED) &&
				       (jiffies_to_msecs(jiffies-start_time) < 100)) {
					yield();
				}

				/* These can't be masked ... always warn
				 * if we get any surprises.
				 */
				if ((d->status & IRQ_DISABLED) && !charger_vbus) {
					pr_warning("twl handler not called, irq is disabled!\n");
					note_interrupt(module_irq, d,
							IRQ_NONE);
				}
				else
					d->handle_irq(module_irq, d);

			}
			local_irq_enable();
		}
		ret = twl_i2c_write_u8(TWL_MODULE_PIH, 0x0,
				REG_INT_STS_A); /* clear INT_STS_A */
		if (ret)
			pr_warning("twl6030: I2C error in clearing PIH ISR\n");

		enable_irq(irq);
	}

	return 0;
}
Example #8
0
static void ps3avd(struct work_struct *work)
{
	ps3av_set_videomode_cont(ps3av->ps3av_mode, ps3av->ps3av_mode_old);
	complete(&ps3av->done);
}
Example #9
0
static void aliasguid_query_handler(int status,
				    struct ib_sa_guidinfo_rec *guid_rec,
				    void *context)
{
	struct mlx4_ib_dev *dev;
	struct mlx4_alias_guid_work_context *cb_ctx = context;
	u8 port_index ;
	int i;
	struct mlx4_sriov_alias_guid_info_rec_det *rec;
	unsigned long flags, flags1;

	if (!context)
		return;

	dev = cb_ctx->dev;
	port_index = cb_ctx->port - 1;
	rec = &dev->sriov.alias_guid.ports_guid[port_index].
		all_rec_per_port[cb_ctx->block_num];

	if (status) {
		rec->status = MLX4_GUID_INFO_STATUS_IDLE;
		pr_debug("(port: %d) failed: status = %d\n",
			 cb_ctx->port, status);
		goto out;
	}

	if (guid_rec->block_num != cb_ctx->block_num) {
		pr_err("block num mismatch: %d != %d\n",
		       cb_ctx->block_num, guid_rec->block_num);
		goto out;
	}

	pr_debug("lid/port: %d/%d, block_num: %d\n",
		 be16_to_cpu(guid_rec->lid), cb_ctx->port,
		 guid_rec->block_num);

	rec = &dev->sriov.alias_guid.ports_guid[port_index].
		all_rec_per_port[guid_rec->block_num];

	rec->status = MLX4_GUID_INFO_STATUS_SET;
	rec->method = MLX4_GUID_INFO_RECORD_SET;

	for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) {
		__be64 tmp_cur_ag;
		tmp_cur_ag = *(__be64 *)&guid_rec->guid_info_list[i * GUID_REC_SIZE];
		/* check if the SM didn't assign one of the records.
		 * if it didn't, if it was not sysadmin request:
		 * ask the SM to give a new GUID, (instead of the driver request).
		 */
		if (tmp_cur_ag == MLX4_NOT_SET_GUID) {
			mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in "
				     "block_num: %d was declined by SM, "
				     "ownership by %d (0 = driver, 1=sysAdmin,"
				     " 2=None)\n", __func__, i,
				     guid_rec->block_num, rec->ownership);
			if (rec->ownership == MLX4_GUID_DRIVER_ASSIGN) {
				/* if it is driver assign, asks for new GUID from SM*/
				*(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] =
					MLX4_NOT_SET_GUID;

				/* Mark the record as not assigned, and let it
				 * be sent again in the next work sched.*/
				rec->status = MLX4_GUID_INFO_STATUS_IDLE;
				rec->guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
			}
		} else {
		       /* properly assigned record. */
		       /* We save the GUID we just got from the SM in the
			* admin_guid in order to be persistent, and in the
			* request from the sm the process will ask for the same GUID */
			if (rec->ownership == MLX4_GUID_SYSADMIN_ASSIGN &&
			    tmp_cur_ag != *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]) {
				/* the sysadmin assignment failed.*/
				mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set"
					     " admin guid after SysAdmin "
					     "configuration. "
					     "Record num %d in block_num:%d "
					     "was declined by SM, "
					     "new val(0x%llx) was kept\n",
					      __func__, i,
					     guid_rec->block_num,
					     (long long)be64_to_cpu(*(__be64 *) &
							 rec->all_recs[i * GUID_REC_SIZE]));
			} else {
				memcpy(&rec->all_recs[i * GUID_REC_SIZE],
				       &guid_rec->guid_info_list[i * GUID_REC_SIZE],
				       GUID_REC_SIZE);
			}
		}
	}
	/*
	The func is call here to close the cases when the
	sm doesn't send smp, so in the sa response the driver
	notifies the slave.
	*/
	mlx4_ib_notify_slaves_on_guid_change(dev, guid_rec->block_num,
					     cb_ctx->port,
					     guid_rec->guid_info_list);
out:
	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
	if (!dev->sriov.is_going_down)
		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq,
				   &dev->sriov.alias_guid.ports_guid[port_index].
				   alias_guid_work, 0);
	if (cb_ctx->sa_query) {
		list_del(&cb_ctx->list);
		kfree(cb_ctx);
	} else
		complete(&cb_ctx->done);
	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
}
static void lpc32xx_dma_complete_func(void *completion)
{
	complete(completion);
}
Example #11
0
static void adb_sync_req_done(struct adb_request *req)
{
	struct completion *comp = req->arg;

	complete(comp);
}
long mdm_modem_ioctl(struct file *filp, unsigned int cmd,
				unsigned long arg)
{
	int status, ret = 0;

	if (_IOC_TYPE(cmd) != CHARM_CODE) {
		pr_err("%s: invalid ioctl code\n", __func__);
		return -EINVAL;
	}

	pr_debug("%s: Entering ioctl cmd = %d\n", __func__, _IOC_NR(cmd));
	switch (cmd) {
	case WAKE_CHARM:
		pr_info("%s: Powering on mdm\n", __func__);
#ifdef CONFIG_MDM_HSIC_PM
		request_boot_lock_set(rmnet_pm_dev);
#endif
		mdm_drv->ops->power_on_mdm_cb(mdm_drv);
		break;
	case CHECK_FOR_BOOT:
		if (gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
			put_user(1, (unsigned long __user *) arg);
		else
			put_user(0, (unsigned long __user *) arg);
		break;
	case NORMAL_BOOT_DONE:
		pr_info("%s: check if mdm is booted up\n", __func__);
		get_user(status, (unsigned long __user *) arg);
		if (status) {
			pr_debug("%s: normal boot failed\n", __func__);
			mdm_drv->mdm_boot_status = -EIO;
		} else {
			pr_info("%s: normal boot done\n", __func__);
			mdm_drv->mdm_boot_status = 0;
		}
		mdm_drv->mdm_ready = 1;

		if (mdm_drv->ops->normal_boot_done_cb != NULL)
			mdm_drv->ops->normal_boot_done_cb(mdm_drv);

		if (!first_boot)
			complete(&mdm_boot);
		else
			first_boot = 0;

		/* If bootup succeeded, start a timer to check that the
		 * mdm2ap_status gpio goes high.
		 */
		if (!status && gpio_get_value(mdm_drv->mdm2ap_status_gpio) == 0)
			schedule_delayed_work(&mdm2ap_status_check_work,
				msecs_to_jiffies(MDM2AP_STATUS_TIMEOUT_MS));
		break;
	case RAM_DUMP_DONE:
		pr_info("%s: mdm done collecting RAM dumps\n", __func__);
		get_user(status, (unsigned long __user *) arg);
		if (status)
			mdm_drv->mdm_ram_dump_status = -EIO;
		else {
			pr_info("%s: ramdump collection completed\n", __func__);
			mdm_drv->mdm_ram_dump_status = 0;
			panic("CP Crash %s", mdm_read_err_report());
		}
		complete(&mdm_ram_dumps);
		break;

	case WAIT_FOR_ERROR:
		pr_debug("%s: wait for mdm error\n", __func__);
		#if 0
		ret = wait_for_completion_interruptible(&mdm_error);
		INIT_COMPLETION(mdm_error);
		#endif
		break;

	case WAIT_FOR_RESTART:
		pr_info("%s: wait for mdm to need images reloaded\n",
				__func__);
		ret = wait_for_completion_interruptible(&mdm_needs_reload);
		if (!ret)
			put_user(mdm_drv->boot_type,
					 (unsigned long __user *) arg);
		INIT_COMPLETION(mdm_needs_reload);
		break;

	case SILENT_RESET_CONTROL:
		pr_info("%s: mdm doing silent reset\n", __func__);
		mdm_drv->mdm_ram_dump_status = 0;
		complete(&mdm_ram_dumps);
		break;

	case AUTOPM_LOCK:
		get_user(status, (unsigned long __user *) arg);
		pr_info("%s: mdm autopm request[%s]\n", __func__,
						status ? "lock" : "release");
		request_autopm_lock(status);
		break;

	case GET_BOOT_PROTOCOL:
		pr_info("%s: mdm get boot protocol %d\n", __func__,
						mdm_drv->proto_is_dload);
		return mdm_drv->proto_is_dload;

	case GET_FORCE_RAMDUMP:
		get_user(status, (unsigned long __user *) arg);
		pr_info("%s: mdm get dump mode = %d\n", __func__, force_dump);
		if (status)
			mdm_force_fatal();
		else
			mdm_silent_reset();
		break;

#ifdef CONFIG_SIM_DETECT
	case GET_SIM_DETECT:
		pr_info("%s: mdm get sim detect = %d\n", __func__,
						mdm_drv->sim_state);
		return mdm_drv->sim_state;
#endif
	default:
		pr_err("%s: invalid ioctl cmd = %d\n", __func__, _IOC_NR(cmd));
		ret = -EINVAL;
		break;
	}

	return ret;
}
Example #13
0
static void spi_imx_dma_tx_callback(void *cookie)
{
	struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;

	complete(&spi_imx->dma_tx_completion);
}
Example #14
0
static void netvsc_send_completion(struct netvsc_device *net_device,
				   struct hv_device *device,
				   struct vmpacket_descriptor *packet)
{
	struct nvsp_message *nvsp_packet;
	struct hv_netvsc_packet *nvsc_packet;
	struct net_device *ndev;
	u32 send_index;

	ndev = net_device->ndev;

	nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
			(packet->offset8 << 3));

	if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
	    (nvsp_packet->hdr.msg_type ==
	     NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
	    (nvsp_packet->hdr.msg_type ==
	     NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE) ||
	    (nvsp_packet->hdr.msg_type ==
	     NVSP_MSG5_TYPE_SUBCHANNEL)) {
		/* Copy the response back */
		memcpy(&net_device->channel_init_pkt, nvsp_packet,
		       sizeof(struct nvsp_message));
		complete(&net_device->channel_init_wait);
	} else if (nvsp_packet->hdr.msg_type ==
		   NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
		int num_outstanding_sends;
		u16 q_idx = 0;
		struct vmbus_channel *channel = device->channel;
		int queue_sends;

		/* Get the send context */
		nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
			packet->trans_id;

		/* Notify the layer above us */
		if (nvsc_packet) {
			send_index = nvsc_packet->send_buf_index;
			if (send_index != NETVSC_INVALID_INDEX)
				netvsc_free_send_slot(net_device, send_index);
			q_idx = nvsc_packet->q_idx;
			channel = nvsc_packet->channel;
			nvsc_packet->send_completion(nvsc_packet->
						     send_completion_ctx);
		}

		num_outstanding_sends =
			atomic_dec_return(&net_device->num_outstanding_sends);
		queue_sends = atomic_dec_return(&net_device->
						queue_sends[q_idx]);

		if (net_device->destroy && num_outstanding_sends == 0)
			wake_up(&net_device->wait_drain);

		if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
		    !net_device->start_remove &&
		    (hv_ringbuf_avail_percent(&channel->outbound) >
		     RING_AVAIL_PERCENT_HIWATER || queue_sends < 1))
				netif_tx_wake_queue(netdev_get_tx_queue(
						    ndev, q_idx));
	} else {
		netdev_err(ndev, "Unknown send completion packet type- "
			   "%d received!!\n", nvsp_packet->hdr.msg_type);
	}

}
static irqreturn_t audiodsp_mailbox_irq(int irq, void *data)
{
	struct audiodsp_priv *priv=(struct audiodsp_priv *)data;
	unsigned long status;
	struct mail_msg msg;
	int i = 0;
#if MESON_CPU_TYPE < MESON_CPU_TYPE_MESON8	
	unsigned long fiq_mask;
#endif
	status=READ_VREG(MB1_REG);
#if MESON_CPU_TYPE < MESON_CPU_TYPE_MESON8	
	fiq_mask=READ_VREG(MB1_SEL);
	status=status&fiq_mask;
#endif
	if(status&(1<<M1B_IRQ0_PRINT))
		{
		get_mailbox_data(priv,M1B_IRQ0_PRINT,&msg);
		SYS_CLEAR_IRQ(M1B_IRQ0_PRINT);
	//	inv_dcache_range((unsigned  long )msg.data,(unsigned long)msg.data+msg.len);
	
		DSP_PRNT("%s", msg.data);
	    //audiodsp_work.buf = msg.data;
	    //schedule_work(&audiodsp_work.audiodsp_workqueue);		
		}
	if(status&(1<<M1B_IRQ1_BUF_OVERFLOW))
		{
		SYS_CLEAR_IRQ(M1B_IRQ1_BUF_OVERFLOW);
		DSP_PRNT("DSP BUF over flow\n");
		}
	if(status&(1<<M1B_IRQ2_BUF_UNDERFLOW))
		{
		SYS_CLEAR_IRQ(M1B_IRQ2_BUF_UNDERFLOW);
		DSP_PRNT("DSP BUF over flow\n");
		}
	if(status&(1<<M1B_IRQ3_DECODE_ERROR))
		{
		SYS_CLEAR_IRQ(M1B_IRQ3_DECODE_ERROR);
		priv->decode_error_count++;
		}
	if(status&(1<<M1B_IRQ4_DECODE_FINISH_FRAME))
		{
		struct frame_info *info;
		SYS_CLEAR_IRQ(M1B_IRQ4_DECODE_FINISH_FRAME);
		get_mailbox_data(priv,M1B_IRQ4_DECODE_FINISH_FRAME,&msg);
		info=(struct frame_info *)msg.data;
		if(info!=NULL)
			{
			priv->cur_frame_info.offset=info->offset;
			priv->cur_frame_info.buffered_len=info->buffered_len;
			}
		priv->decoded_nb_frames ++;		
		complete(&priv->decode_completion);
		}
	if(status& (1<<M1B_IRQ5_STREAM_FMT_CHANGED))
		{
		struct frame_fmt *fmt;
		SYS_CLEAR_IRQ(M1B_IRQ5_STREAM_FMT_CHANGED);
		get_mailbox_data(priv,M1B_IRQ5_STREAM_FMT_CHANGED,&msg);
		fmt=(void *)msg.data;
		//DSP_PRNT("frame format changed");
		if(fmt==NULL || (sizeof(struct frame_fmt )<msg.len))
			{
			DSP_PRNT("frame format message error\n");
			}
		else
			{
			DSP_PRNT("frame format changed,fmt->valid 0x%x\n",fmt->valid);
			if(fmt->valid&SUB_FMT_VALID)
				{
				priv->frame_format.sub_fmt=fmt->sub_fmt;
				priv->frame_format.valid|=SUB_FMT_VALID;
				}
			if(fmt->valid&CHANNEL_VALID)
				{
				priv->frame_format.channel_num=((fmt->channel_num > 2) ? 2 : (fmt->channel_num));
				priv->frame_format.valid|=CHANNEL_VALID;
				}
			if(fmt->valid&SAMPLE_RATE_VALID)
				{
				priv->frame_format.sample_rate=fmt->sample_rate;
				priv->frame_format.valid|=SAMPLE_RATE_VALID;
				}
			if(fmt->valid&DATA_WIDTH_VALID)
				{
				priv->frame_format.data_width=fmt->data_width;
				priv->frame_format.valid|=DATA_WIDTH_VALID;
				}
			}
		/*
			if(fmt->data.pcm_encoded_info){
				set_pcminfo_data(fmt->data.pcm_encoded_info);
			}
		*/	
			DSP_PRNT("audio info from dsp:sample_rate=%d channel_num=%d\n",priv->frame_format.sample_rate,priv->frame_format.channel_num);
		}
        if(status & (1<<M1B_IRQ8_IEC958_INFO)){
            struct digit_raw_output_info* info;
            SYS_CLEAR_IRQ(M1B_IRQ8_IEC958_INFO);
            get_mailbox_data(priv, M1B_IRQ8_IEC958_INFO, &msg);
            info = (void*)msg.data;
#if 1
            IEC958_bpf = info->bpf;
            IEC958_brst = info->brst;
            IEC958_length = info->length;
            IEC958_padsize = info->padsize;
            IEC958_mode = info->mode;
            IEC958_syncword1 = info->syncword1;
            IEC958_syncword2 = info->syncword2;
            IEC958_syncword3 = info->syncword3;
            IEC958_syncword1_mask = info->syncword1_mask;
            IEC958_syncword2_mask = info->syncword2_mask;
            IEC958_syncword3_mask = info->syncword3_mask;
            IEC958_chstat0_l = info->chstat0_l;
            IEC958_chstat0_r = info->chstat0_r;
            IEC958_chstat1_l = info->chstat1_l;
            IEC958_chstat1_r = info->chstat1_r;
#endif			
  //          IEC958_mode_codec = info->can_bypass;
            
            DSP_PRNT( "MAILBOX: got IEC958 info\n");
            //schedule_work(&audiodsp_work.audiodsp_workqueue);		
        }

    	if(status& (1<<M1B_IRQ5_STREAM_RD_WD_TEST)){
            DSP_WD((0x84100000-4096+20*20),0);
    		SYS_CLEAR_IRQ(M1B_IRQ5_STREAM_RD_WD_TEST);
    		get_mailbox_data(priv,M1B_IRQ5_STREAM_RD_WD_TEST,&msg);
            
            for(i = 0;i<12;i++){
                if((DSP_RD((0x84100000-512*1024+i*20)))!= (0xff00|i)){
                    DSP_PRNT("a9 read dsp reg error ,now 0x%lx, should be 0x%x \n",(DSP_RD((0x84100000-512*1024+i*20))),12-i);
                }
               // DSP_PRNT("A9 audio dsp reg%d value 0x%x\n",i,DSP_RD((0x84100000-4096+i*20)));
            }
            for(i = 0;i<12;i++){
                DSP_WD((0x84100000-512*1024+i*20),(i%2)?i:(0xf0|i));
               
            }
            DSP_WD((0x84100000-512*1024+20*20),DSP_STATUS_HALT);
        //    DSP_PRNT("A9 mail box handle finished\n");
           // dsp_mailbox_send(priv, 1, M1B_IRQ5_STREAM_RD_WD_TEST, 0, NULL,0);

        }

	if(status & (1<<M1B_IRQ7_DECODE_FATAL_ERR)){
		int err_code;
		
		SYS_CLEAR_IRQ(M1B_IRQ7_DECODE_FATAL_ERR);
		get_mailbox_data(priv,M1B_IRQ7_DECODE_FATAL_ERR,&msg);

		err_code = msg.cmd;
		priv->decode_fatal_err = err_code;

		if(err_code & 0x01){
			timestamp_pcrscr_set(timestamp_vpts_get());
			timestamp_pcrscr_enable(1);
		}
		else if(err_code & 0x02){
		printk("Set decode_fatal_err flag, Reset audiodsp!\n");
		}
	}

	return 0;
}
Example #16
0
/* Main MC kobject release() function */
static void edac_memctrl_master_release(struct kobject *kobj)
{
	debugf1("%s()\n", __func__);
	complete(&edac_memctrl_kobj_complete);
}
static int jffs2_garbage_collect_thread(void *_c)
{
	struct jffs2_sb_info *c = _c;

	daemonize("jffs2_gcd_mtd%d", c->mtd->index);
	allow_signal(SIGKILL);
	allow_signal(SIGSTOP);
	allow_signal(SIGCONT);

	c->gc_task = current;
	complete(&c->gc_thread_start);

	set_user_nice(current, 10);

	set_freezable();
	for (;;) {
		allow_signal(SIGHUP);
	again:
		spin_lock(&c->erase_completion_lock);
		if (!jffs2_thread_should_wake(c)) {
			set_current_state (TASK_INTERRUPTIBLE);
			spin_unlock(&c->erase_completion_lock);
			D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n"));
			schedule();
		} else
			spin_unlock(&c->erase_completion_lock);
			

		/* Problem - immediately after bootup, the GCD spends a lot
		 * of time in places like jffs2_kill_fragtree(); so much so
		 * that userspace processes (like gdm and X) are starved
		 * despite plenty of cond_resched()s and renicing.  Yield()
		 * doesn't help, either (presumably because userspace and GCD
		 * are generally competing for a higher latency resource -
		 * disk).
		 * This forces the GCD to slow the hell down.   Pulling an
		 * inode in with read_inode() is much preferable to having
		 * the GC thread get there first. */
		schedule_timeout_interruptible(msecs_to_jiffies(50));

		/* Put_super will send a SIGKILL and then wait on the sem.
		 */
		while (signal_pending(current) || freezing(current)) {
			siginfo_t info;
			unsigned long signr;

			if (try_to_freeze())
				goto again;

			signr = dequeue_signal_lock(current, &current->blocked, &info);

			switch(signr) {
			case SIGSTOP:
				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGSTOP received.\n"));
				set_current_state(TASK_STOPPED);
				schedule();
				break;

			case SIGKILL:
				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGKILL received.\n"));
				goto die;

			case SIGHUP:
				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGHUP received.\n"));
				break;
			default:
				D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): signal %ld received\n", signr));
			}
		}
		/* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */
		disallow_signal(SIGHUP);

		D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): pass\n"));
		if (jffs2_garbage_collect_pass(c) == -ENOSPC) {
			printk(KERN_NOTICE "No space for garbage collection. Aborting GC thread\n");
			goto die;
		}
	}
 die:
	spin_lock(&c->erase_completion_lock);
	c->gc_task = NULL;
	spin_unlock(&c->erase_completion_lock);
	complete_and_exit(&c->gc_thread_exit, 0);
}
Example #18
0
/* No memory to release */
static void edac_pci_release(struct kobject *kobj)
{
	debugf1("%s()\n", __func__);
	complete(&edac_pci_kobj_complete);
}
/*
 * handle_twl6030_int() is the desc->handle method for the twl6030 interrupt.
 * This is a chained interrupt, so there is no desc->action method for it.
 * Now we need to query the interrupt controller in the twl6030 to determine
 * which module is generating the interrupt request.  However, we can't do i2c
 * transactions in interrupt context, so we must defer that work to a kernel
 * thread.  All we do here is acknowledge and mask the interrupt and wakeup
 * the kernel thread.
 */
static irqreturn_t handle_twl6030_pih(int irq, void *devid)
{
	disable_irq_nosync(irq);
	complete(devid);
	return IRQ_HANDLED;
}
Example #20
0
/*
 * This task waits until at least one touchscreen is touched.  It then loops
 * digitizing and generating events until no touchscreens are being touched.
 */
static int
xts_thread(void *arg)
{
	int any_pens_down;
	struct xts_dev *dev;
	struct task_struct *tsk = current;
	DECLARE_WAITQUEUE(wait, tsk);
	xts_task = tsk;

	daemonize();
	reparent_to_init();
	strcpy(xts_task->comm, XTS_NAME);
	xts_task->tty = NULL;

	/* only want to receive SIGKILL */
	spin_lock_irq(&xts_task->sigmask_lock);
	siginitsetinv(&xts_task->blocked, sigmask(SIGKILL));
	recalc_sigpending(xts_task);
	spin_unlock_irq(&xts_task->sigmask_lock);

	complete(&task_sync);

	add_wait_queue(&irq_wait, &wait);
	any_pens_down = 0;
	for (;;) {
		/*
		 * Block waiting for interrupt or if any pens are down, either
		 * an interrupt or timeout to sample again.
		 */
		set_current_state(TASK_INTERRUPTIBLE);
		if (any_pens_down)
			schedule_timeout(HZ / 100);
		while (signal_pending(tsk)) {
			siginfo_t info;

			/* Only honor the signal if we're cleaning up */
			if (task_shutdown)
				goto exit;
			/*
			 * Someone else sent us a kill (probably the
			 * shutdown scripts "Sending all processes the
			 * KILL signal").  Just dequeue it and ignore
			 * it.
			 */
			spin_lock_irq(&current->sigmask_lock);
			(void)dequeue_signal(&current->blocked, &info);
			spin_unlock_irq(&current->sigmask_lock);
		}
		schedule();

		any_pens_down = 0;
		for (dev = dev_list; dev; dev = dev->next_dev) {
			if (dev->pen_is_down) {
				u32 x, y;
				XTouchscreen_GetPosition_2D(&dev->Touchscreen,
							    &x, &y);
				event_add(dev, 255, (u16) x, (u16) y);
				dev->pen_was_down = 1;
				any_pens_down = 1;
			} else if (dev->pen_was_down) {
				event_add(dev, 0, 0, 0);
				dev->pen_was_down = 0;
			}
		}
	}

exit:
	remove_wait_queue(&irq_wait, &wait);

	xts_task = NULL;
	complete_and_exit(&task_sync, 0);
}
Example #21
0
/**
 * i2c_pnx_master_xmit - transmit data to slave
 * @adap:		pointer to I2C adapter structure
 *
 * Sends one byte of data to the slave
 */
static int i2c_pnx_master_xmit(struct i2c_adapter *adap)
{
	struct i2c_pnx_algo_data *alg_data = adap->algo_data;
	u32 val;

	dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n",
		__func__, ioread32(I2C_REG_STS(alg_data)));

	if (alg_data->mif.len > 0) {
		/* We still have something to talk about... */
		val = *alg_data->mif.buf++;

		if (alg_data->mif.len == 1) {
			val |= stop_bit;
			if (!alg_data->last)
				val |= start_bit;
		}

		alg_data->mif.len--;
		iowrite32(val, I2C_REG_TX(alg_data));

		dev_dbg(&adap->dev, "%s(): xmit %#x [%d]\n", __func__,
			val, alg_data->mif.len + 1);

		if (alg_data->mif.len == 0) {
			if (alg_data->last) {
				/* Wait until the STOP is seen. */
				if (wait_timeout(I2C_PNX_TIMEOUT, alg_data))
					dev_err(&adap->dev, "The bus is still "
						"active after timeout\n");
			}
			/* Disable master interrupts */
			iowrite32(ioread32(I2C_REG_CTL(alg_data)) &
				~(mcntrl_afie | mcntrl_naie | mcntrl_drmie),
				  I2C_REG_CTL(alg_data));

			del_timer_sync(&alg_data->mif.timer);

			dev_dbg(&adap->dev, "%s(): Waking up xfer routine.\n",
				__func__);

			complete(&alg_data->mif.complete);
		}
	} else if (alg_data->mif.len == 0) {
		/* zero-sized transfer */
		i2c_pnx_stop(adap);

		/* Disable master interrupts. */
		iowrite32(ioread32(I2C_REG_CTL(alg_data)) &
			~(mcntrl_afie | mcntrl_naie | mcntrl_drmie),
			  I2C_REG_CTL(alg_data));

		/* Stop timer. */
		del_timer_sync(&alg_data->mif.timer);
		dev_dbg(&adap->dev, "%s(): Waking up xfer routine after "
			"zero-xfer.\n", __func__);

		complete(&alg_data->mif.complete);
	}

	dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n",
		__func__, ioread32(I2C_REG_STS(alg_data)));

	return 0;
}
Example #22
0
int rtl_usb_probe(struct usb_interface *intf,
		  const struct usb_device_id *id,
		  struct rtl_hal_cfg *rtl_hal_cfg)
{
	int err;
	struct ieee80211_hw *hw = NULL;
	struct rtl_priv *rtlpriv = NULL;
	struct usb_device	*udev;
	struct rtl_usb_priv *usb_priv;

	hw = ieee80211_alloc_hw(sizeof(struct rtl_priv) +
				sizeof(struct rtl_usb_priv), &rtl_ops);
	if (!hw) {
		RT_ASSERT(false, "ieee80211 alloc failed\n");
		return -ENOMEM;
	}
	rtlpriv = hw->priv;
	rtlpriv->usb_data = kzalloc(RTL_USB_MAX_RX_COUNT * sizeof(u32),
				    GFP_KERNEL);
	if (!rtlpriv->usb_data)
		return -ENOMEM;

	/* this spin lock must be initialized early */
	spin_lock_init(&rtlpriv->locks.usb_lock);
	INIT_WORK(&rtlpriv->works.fill_h2c_cmd,
		  rtl_fill_h2c_cmd_work_callback);
	INIT_WORK(&rtlpriv->works.lps_change_work,
		  rtl_lps_change_work_callback);

	rtlpriv->usb_data_index = 0;
	init_completion(&rtlpriv->firmware_loading_complete);
	SET_IEEE80211_DEV(hw, &intf->dev);
	udev = interface_to_usbdev(intf);
	usb_get_dev(udev);
	usb_priv = rtl_usbpriv(hw);
	memset(usb_priv, 0, sizeof(*usb_priv));
	usb_priv->dev.intf = intf;
	usb_priv->dev.udev = udev;
	usb_set_intfdata(intf, hw);
	/* init cfg & intf_ops */
	rtlpriv->rtlhal.interface = INTF_USB;
	rtlpriv->cfg = rtl_hal_cfg;
	rtlpriv->intf_ops = &rtl_usb_ops;
	rtl_dbgp_flag_init(hw);
	/* Init IO handler */
	_rtl_usb_io_handler_init(&udev->dev, hw);
	rtlpriv->cfg->ops->read_chip_version(hw);
	/*like read eeprom and so on */
	rtlpriv->cfg->ops->read_eeprom_info(hw);
	err = _rtl_usb_init(hw);
	if (err)
		goto error_out;
	rtl_usb_init_sw(hw);
	/* Init mac80211 sw */
	err = rtl_init_core(hw);
	if (err) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
			 "Can't allocate sw for mac80211\n");
		goto error_out;
	}
	if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
		goto error_out;
	}
	rtlpriv->cfg->ops->init_sw_leds(hw);

	err = ieee80211_register_hw(hw);
	if (err) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
			 "Can't register mac80211 hw.\n");
		err = -ENODEV;
		goto error_out;
	}
	rtlpriv->mac80211.mac80211_registered = 1;

	set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
	return 0;

error_out:
	rtl_deinit_core(hw);
	_rtl_usb_io_handler_release(hw);
	usb_put_dev(udev);
	complete(&rtlpriv->firmware_loading_complete);
	return -ENODEV;
}
Example #23
0
static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
{
	u32 stat, ctl;
	struct i2c_adapter *adap = dev_id;
	struct i2c_pnx_algo_data *alg_data = adap->algo_data;

	dev_dbg(&adap->dev, "%s(): mstat = %x mctrl = %x, mode = %d\n",
		__func__,
		ioread32(I2C_REG_STS(alg_data)),
		ioread32(I2C_REG_CTL(alg_data)),
		alg_data->mif.mode);
	stat = ioread32(I2C_REG_STS(alg_data));

	/* let's see what kind of event this is */
	if (stat & mstatus_afi) {
		/* We lost arbitration in the midst of a transfer */
		alg_data->mif.ret = -EIO;

		/* Disable master interrupts. */
		ctl = ioread32(I2C_REG_CTL(alg_data));
		ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie |
			 mcntrl_drmie);
		iowrite32(ctl, I2C_REG_CTL(alg_data));

		/* Stop timer, to prevent timeout. */
		del_timer_sync(&alg_data->mif.timer);
		complete(&alg_data->mif.complete);
	} else if (stat & mstatus_nai) {
		/* Slave did not acknowledge, generate a STOP */
		dev_dbg(&adap->dev, "%s(): "
			"Slave did not acknowledge, generating a STOP.\n",
			__func__);
		i2c_pnx_stop(adap);

		/* Disable master interrupts. */
		ctl = ioread32(I2C_REG_CTL(alg_data));
		ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie |
			 mcntrl_drmie);
		iowrite32(ctl, I2C_REG_CTL(alg_data));

		/* Our return value. */
		alg_data->mif.ret = -EIO;

		/* Stop timer, to prevent timeout. */
		del_timer_sync(&alg_data->mif.timer);
		complete(&alg_data->mif.complete);
	} else {
		/*
		 * Two options:
		 * - Master Tx needs data.
		 * - There is data in the Rx-fifo
		 * The latter is only the case if we have requested for data,
		 * via a dummy write. (See 'i2c_pnx_master_rcv'.)
		 * We therefore check, as a sanity check, whether that interrupt
		 * has been enabled.
		 */
		if ((stat & mstatus_drmi) || !(stat & mstatus_rfe)) {
			if (alg_data->mif.mode == I2C_SMBUS_WRITE) {
				i2c_pnx_master_xmit(adap);
			} else if (alg_data->mif.mode == I2C_SMBUS_READ) {
				i2c_pnx_master_rcv(adap);
			}
		}
	}

	/* Clear TDI and AFI bits */
	stat = ioread32(I2C_REG_STS(alg_data));
	iowrite32(stat | mstatus_tdi | mstatus_afi, I2C_REG_STS(alg_data));

	dev_dbg(&adap->dev, "%s(): exiting, stat = %x ctrl = %x.\n",
		 __func__, ioread32(I2C_REG_STS(alg_data)),
		 ioread32(I2C_REG_CTL(alg_data)));

	return IRQ_HANDLED;
}
/*
 * mdp4_overlay1_done_atv: called from isr
 */
void mdp4_overlay1_done_atv()
{
	complete(&atv_pipe->comp);
}
Example #25
0
static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
{
	struct omap2_onenand *c = data;

	complete(&c->dma_done);
}
Example #26
0
static irqreturn_t qup_i2c_interrupt(int irq, void *dev)
{
	struct qup_i2c_dev *qup = dev;
	struct qup_i2c_block *blk = &qup->blk;
	u32 bus_err;
	u32 qup_err;
	u32 opflags;

	bus_err = readl(qup->base + QUP_I2C_STATUS);
	qup_err = readl(qup->base + QUP_ERROR_FLAGS);
	opflags = readl(qup->base + QUP_OPERATIONAL);

	if (!qup->msg) {
		/* Clear Error interrupt */
		writel(QUP_RESET_STATE, qup->base + QUP_STATE);
		return IRQ_HANDLED;
	}

	bus_err &= I2C_STATUS_ERROR_MASK;
	qup_err &= QUP_STATUS_ERROR_FLAGS;

	/* Clear the error bits in QUP_ERROR_FLAGS */
	if (qup_err)
		writel(qup_err, qup->base + QUP_ERROR_FLAGS);

	/* Clear the error bits in QUP_I2C_STATUS */
	if (bus_err)
		writel(bus_err, qup->base + QUP_I2C_STATUS);

	/*
	 * Check for BAM mode and returns if already error has come for current
	 * transfer. In Error case, sometimes, QUP generates more than one
	 * interrupt.
	 */
	if (qup->use_dma && (qup->qup_err || qup->bus_err))
		return IRQ_HANDLED;

	/* Reset the QUP State in case of error */
	if (qup_err || bus_err) {
		/*
		 * Don’t reset the QUP state in case of BAM mode. The BAM
		 * flush operation needs to be scheduled in transfer function
		 * which will clear the remaining schedule descriptors in BAM
		 * HW FIFO and generates the BAM interrupt.
		 */
		if (!qup->use_dma)
			writel(QUP_RESET_STATE, qup->base + QUP_STATE);
		goto done;
	}

	if (opflags & QUP_OUT_SVC_FLAG) {
		writel(QUP_OUT_SVC_FLAG, qup->base + QUP_OPERATIONAL);

		if (opflags & OUT_BLOCK_WRITE_REQ) {
			blk->tx_fifo_free += qup->out_blk_sz;
			if (qup->msg->flags & I2C_M_RD)
				qup->write_rx_tags(qup);
			else
				qup->write_tx_fifo(qup);
		}
	}

	if (opflags & QUP_IN_SVC_FLAG) {
		writel(QUP_IN_SVC_FLAG, qup->base + QUP_OPERATIONAL);

		if (!blk->is_rx_blk_mode) {
			blk->fifo_available += qup->in_fifo_sz;
			qup->read_rx_fifo(qup);
		} else if (opflags & IN_BLOCK_READ_REQ) {
			blk->fifo_available += qup->in_blk_sz;
			qup->read_rx_fifo(qup);
		}
	}

	if (qup->msg->flags & I2C_M_RD) {
		if (!blk->rx_bytes_read)
			return IRQ_HANDLED;
	} else {
		/*
		 * Ideally, QUP_MAX_OUTPUT_DONE_FLAG should be checked
		 * for FIFO mode also. But, QUP_MAX_OUTPUT_DONE_FLAG lags
		 * behind QUP_OUTPUT_SERVICE_FLAG sometimes. The only reason
		 * of interrupt for write message in FIFO mode is
		 * QUP_MAX_OUTPUT_DONE_FLAG condition.
		 */
		if (blk->is_tx_blk_mode && !(opflags & QUP_MX_OUTPUT_DONE))
			return IRQ_HANDLED;
	}

done:
	qup->qup_err = qup_err;
	qup->bus_err = bus_err;
	complete(&qup->xfer);
	return IRQ_HANDLED;
}
Example #27
0
static void hil_dev_process_err(struct hil_dev *dev)
{
;
	dev->idx4 = 0;
	complete(&dev->cmd_done); /* just in case somebody is waiting */
}
Example #28
0
static void TimerStop(unsigned long dat)
{
	complete(&comp);
}
Example #29
0
static void smsi2c_worker_thread(void *args) 
{
	struct smscore_buffer_t *cb;
	struct SmsMsgHdr_S *phdr;
	u16 len;
	int ret;

	sms_debug("Worker thread is running.\n");
	cb = smscore_getbuffer(g_smsi2c_device->coredev);
	if (!cb) {
		sms_err("Unable to allocate data buffer!\n");
		goto exit;
	}
	
	phdr = (struct SmsMsgHdr_S *)cb->p;
	sms_debug("Recieve the message header.....\n");
	memset(cb->p, 0, (int)sizeof(struct SmsMsgHdr_S));
	sms_debug("buf before: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x",
		((u8*)phdr)[0], ((u8*)phdr)[1], ((u8*)phdr)[2], ((u8*)phdr)[3], 
		((u8*)phdr)[4], ((u8*)phdr)[5], ((u8*)phdr)[6], ((u8*)phdr)[7]);
	ret = i2c_master_recv(g_smsi2c_device->client, 
							cb->p, 
							(int)sizeof(struct SmsMsgHdr_S));
	if (ret < 0) {
		sms_err("Unable to read sms header! ret=%d\n", ret);
		goto exit;
	}

	sms_debug("hdr: type=%d, src=%d, dst=%d, len=%d, flag=0x%x\n", 
		phdr->msgType, phdr->msgSrcId, phdr->msgDstId, phdr->msgLength, phdr->msgFlags);
	sms_debug("buf: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x",
		((u8*)phdr)[0], ((u8*)phdr)[1], ((u8*)phdr)[2], ((u8*)phdr)[3], 
		((u8*)phdr)[4], ((u8*)phdr)[5], ((u8*)phdr)[6], ((u8*)phdr)[7]);
	sms_debug("Recieve the rest of the message.....\n");
	len = phdr->msgLength;
	
	if (len > sizeof(struct SmsMsgHdr_S))
	{
		ret = i2c_master_recv(g_smsi2c_device->client, 
								(u8*)(phdr+1), 
								len - (int)sizeof(struct SmsMsgHdr_S));
		sms_debug("recv of data returned %d", ret);
		if (ret < 0) {
			sms_err("Unable to read sms payload!\n");
			goto exit;
		}
	}
	
	switch (phdr->msgType)
	{
		case MSG_SMS_GET_VERSION_EX_RES: 
		{
			struct SmsVersionRes_S *ver =
					(struct SmsVersionRes_S *) phdr;
			sms_debug("MSG_SMS_GET_VERSION_EX_RES "
					"id %d prots 0x%x ver %d.%d",
					ver->xVersion.FirmwareId,
					ver->xVersion.SupportedProtocols,
					ver->xVersion.RomVer.Major,
					ver->xVersion.RomVer.Minor);
			
			smscore_set_device_mode(g_smsi2c_device->coredev, 
					ver->xVersion.FirmwareId == 255 ?
					SMSHOSTLIB_DEVMD_NONE : ver->xVersion.FirmwareId);
			complete(&g_smsi2c_device->version_ex_done);
			break;
		}
	}
	sms_debug("Message recieved. Sending to callback.....\n");	
	cb->offset = 0;
	cb->size = len;
	smscore_onresponse(g_smsi2c_device->coredev, cb);
	
exit:
	return;
}
Example #30
0
int xenbus_frontend_closed(struct xenbus_device *dev)
{
	xenbus_switch_state(dev, XenbusStateClosed);
	complete(&dev->down);
	return 0;
}