Esempio n. 1
0
static void link_pm_runtime_work(struct work_struct *work)
{
	int ret;
	struct link_pm_data *pm_data =
		container_of(work, struct link_pm_data, link_pm_work.work);
	struct device *dev = &pm_data->usb_ld->usbdev->dev;

	if (!pm_data->usb_ld->if_usb_connected || pm_data->dpm_suspending)
		return;

	if (pm_data->usb_ld->ld.com_state == COM_NONE)
		return;

	mif_debug("for dev 0x%p : current %d\n", dev,
					dev->power.runtime_status);

	switch (dev->power.runtime_status) {
	case RPM_ACTIVE:
		pm_data->resume_retry_cnt = 0;
		pm_data->resume_requested = false;
		complete(&pm_data->active_done);

		return;
	case RPM_SUSPENDED:
		if (pm_data->resume_requested)
			break;
		pm_data->resume_requested = true;
		wake_lock(&pm_data->rpm_wake);
		ret = link_pm_slave_wake(pm_data);
		if (ret < 0) {
			mif_err("slave wake fail\n");
			wake_unlock(&pm_data->rpm_wake);
			break;
		}

		if (!pm_data->usb_ld->if_usb_connected) {
			wake_unlock(&pm_data->rpm_wake);
			return;
		}

		ret = pm_runtime_resume(dev);
		if (ret < 0) {
			mif_err("resume error(%d)\n", ret);
			if (!pm_data->usb_ld->if_usb_connected) {
				wake_unlock(&pm_data->rpm_wake);
				return;
			}
			/* force to go runtime idle before retry resume */
			if (dev->power.timer_expires == 0 &&
						!dev->power.request_pending) {
				mif_debug("run time idle\n");
				pm_runtime_idle(dev);
			}
		}
		wake_unlock(&pm_data->rpm_wake);
		break;
	case RPM_SUSPENDING:
		/* Checking the usb_runtime_suspend running time.*/
		mif_info("rpm_states=%d", dev->power.runtime_status);
		msleep(20);
		break;
	default:
		break;
	}
	pm_data->resume_requested = false;

	/* check until runtime_status goes to active */
	/* attemp 10 times, or re-establish modem-link */
	/* if pm_runtime_resume run properly, rpm status must be in ACTIVE */
	if (dev->power.runtime_status == RPM_ACTIVE) {
		pm_data->resume_retry_cnt = 0;
		complete(&pm_data->active_done);
	} else if (pm_data->resume_retry_cnt++ > 10) {
		mif_err("runtime_status(%d), retry_cnt(%d)\n",
			dev->power.runtime_status, pm_data->resume_retry_cnt);
		link_pm_change_modem_state(pm_data, STATE_CRASH_RESET);
	} else
		queue_delayed_work(pm_data->wq, &pm_data->link_pm_work,
							msecs_to_jiffies(20));
}
static void umts_modem_cfg_gpio(void)
{
	int ret = 0;

	unsigned gpio_reset_req_n = umts_modem_data.gpio_reset_req_n;
	unsigned gpio_cp_on = umts_modem_data.gpio_cp_on;
	unsigned gpio_cp_rst = umts_modem_data.gpio_cp_reset;
	unsigned gpio_pda_active = umts_modem_data.gpio_pda_active;
	unsigned gpio_phone_active = umts_modem_data.gpio_phone_active;
	unsigned gpio_cp_dump_int = umts_modem_data.gpio_cp_dump_int;
	unsigned gpio_ap_dump_int = umts_modem_data.gpio_ap_dump_int;
	unsigned gpio_flm_uart_sel = umts_modem_data.gpio_flm_uart_sel;
	unsigned gpio_sim_detect = umts_modem_data.gpio_sim_detect;

	if (gpio_reset_req_n) {
		ret = gpio_request(gpio_reset_req_n, "RESET_REQ_N");
		if (ret)
			mif_err("fail to request gpio %s:%d\n", "RESET_REQ_N",
				ret);
		gpio_direction_output(gpio_reset_req_n, 0);
	}

	if (gpio_cp_on) {
		ret = gpio_request(gpio_cp_on, "CP_ON");
		if (ret)
			mif_err("fail to request gpio %s:%d\n", "CP_ON", ret);
		gpio_direction_output(gpio_cp_on, 0);
	}

	if (gpio_cp_rst) {
		ret = gpio_request(gpio_cp_rst, "CP_RST");
		if (ret)
			mif_err("fail to request gpio %s:%d\n", "CP_RST", ret);
		gpio_direction_output(gpio_cp_rst, 0);
		s3c_gpio_setpull(gpio_cp_rst, S3C_GPIO_PULL_NONE);
	}

	if (gpio_pda_active) {
		ret = gpio_request(gpio_pda_active, "PDA_ACTIVE");
		if (ret)
			mif_err("fail to request gpio %s:%d\n", "PDA_ACTIVE",
				ret);
		gpio_direction_output(gpio_pda_active, 0);
	}

	if (gpio_phone_active) {
		ret = gpio_request(gpio_phone_active, "PHONE_ACTIVE");
		if (ret)
			mif_err("fail to request gpio %s:%d\n", "PHONE_ACTIVE",
				ret);
		gpio_direction_input(gpio_phone_active);
	}

	if (gpio_sim_detect) {
		ret = gpio_request(gpio_sim_detect, "SIM_DETECT");
		if (ret)
			mif_err("fail to request gpio %s:%d\n", "SIM_DETECT",
				ret);

		/* gpio_direction_input(gpio_sim_detect); */
		irq_set_irq_type(gpio_to_irq(gpio_sim_detect),
							IRQ_TYPE_EDGE_BOTH);
	}

	if (gpio_cp_dump_int) {
		ret = gpio_request(gpio_cp_dump_int, "CP_DUMP_INT");
		if (ret)
			mif_err("fail to request gpio %s:%d\n", "CP_DUMP_INT",
				ret);
		gpio_direction_input(gpio_cp_dump_int);
	}

	if (gpio_ap_dump_int) {
		ret = gpio_request(gpio_ap_dump_int, "AP_DUMP_INT");
		if (ret)
			mif_err("fail to request gpio %s:%d\n", "AP_DUMP_INT",
				ret);
		gpio_direction_output(gpio_ap_dump_int, 0);
	}

	if (gpio_flm_uart_sel) {
		ret = gpio_request(gpio_flm_uart_sel, "GPS_UART_SEL");
		if (ret)
			mif_err("fail to request gpio %s:%d\n", "FLM_SEL",
				ret);
		gpio_direction_output(gpio_reset_req_n, 0);
	}

	if (gpio_phone_active)
		irq_set_irq_type(gpio_to_irq(gpio_phone_active),
							IRQ_TYPE_LEVEL_HIGH);
	/* set low unused gpios between AP and CP */
	ret = gpio_request(GPIO_SUSPEND_REQUEST, "SUS_REQ");
	if (ret) {
		mif_err("fail to request gpio %s : %d\n", "SUS_REQ", ret);
	} else {
		gpio_direction_output(GPIO_SUSPEND_REQUEST, 0);
		s3c_gpio_setpull(GPIO_SUSPEND_REQUEST, S3C_GPIO_PULL_NONE);
	}
	mif_info("umts_modem_cfg_gpio done\n");
}
Esempio n. 3
0
struct link_device *dpram_create_link_device(struct platform_device *pdev)
{
	struct modem_data *mdm_data = NULL;
	struct dpram_link_device *dpld = NULL;
	struct link_device *ld = NULL;
	struct resource *res = NULL;
	resource_size_t res_size;
	struct modemlink_dpram_control *dpctl = NULL;
	unsigned long task_data;
	int ret = 0;
	int i = 0;
	int bsize;
	int qsize;

	/* Get the platform data */
	mdm_data = (struct modem_data *)pdev->dev.platform_data;
	if (!mdm_data) {
		mif_info("ERR! mdm_data == NULL\n");
		goto err;
	}
	mif_info("modem = %s\n", mdm_data->name);
	mif_info("link device = %s\n", mdm_data->link_name);

	if (!mdm_data->dpram_ctl) {
		mif_info("ERR! mdm_data->dpram_ctl == NULL\n");
		goto err;
	}
	dpctl = mdm_data->dpram_ctl;

	/* Alloc DPRAM link device structure */
	dpld = kzalloc(sizeof(struct dpram_link_device), GFP_KERNEL);
	if (!dpld) {
		mif_info("ERR! kzalloc dpld fail\n");
		goto err;
	}
	ld = &dpld->ld;

	/* Retrieve modem data and DPRAM control data from the modem data */
	ld->mdm_data = mdm_data;
	ld->name = mdm_data->link_name;
	ld->ipc_version = mdm_data->ipc_version;

	/* Retrieve the most basic data for IPC from the modem data */
	dpld->dpctl = dpctl;
	dpld->dp_type = dpctl->dp_type;

	if (mdm_data->ipc_version < SIPC_VER_50) {
		if (!dpctl->max_ipc_dev) {
			mif_info("ERR! no max_ipc_dev\n");
			goto err;
		}

		ld->aligned = dpctl->aligned;
		dpld->max_ipc_dev = dpctl->max_ipc_dev;
	} else {
		ld->aligned = 1;
		dpld->max_ipc_dev = MAX_SIPC5_DEV;
	}

	/* Set attributes as a link device */
	ld->init_comm = dpram_link_init;
	ld->terminate_comm = dpram_link_terminate;
	ld->send = dpram_send;
	ld->force_dump = dpram_force_dump;
	ld->dump_start = dpram_dump_start;
	ld->dump_update = dpram_dump_update;
	ld->ioctl = dpram_ioctl;

	INIT_LIST_HEAD(&ld->list);

	skb_queue_head_init(&ld->sk_fmt_tx_q);
	skb_queue_head_init(&ld->sk_raw_tx_q);
	skb_queue_head_init(&ld->sk_rfs_tx_q);
	ld->skb_txq[IPC_FMT] = &ld->sk_fmt_tx_q;
	ld->skb_txq[IPC_RAW] = &ld->sk_raw_tx_q;
	ld->skb_txq[IPC_RFS] = &ld->sk_rfs_tx_q;

	/* Set up function pointers */
	dpram_setup_common_op(dpld);
	dpld->dpram_dump = dpram_dump_memory;
	dpld->ext_op = dpram_get_ext_op(mdm_data->modem_type);
	if (dpld->ext_op && dpld->ext_op->ioctl)
		dpld->ext_ioctl = dpld->ext_op->ioctl;

	/* Retrieve DPRAM resource */
	if (!dpctl->dp_base) {
		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
		if (!res) {
			mif_info("%s: ERR! platform_get_resource fail\n",
				ld->name);
			goto err;
		}
		res_size = resource_size(res);

		dpctl->dp_base = ioremap_nocache(res->start, res_size);
		dpctl->dp_size = res_size;
	}
	dpld->dp_base = dpctl->dp_base;
	dpld->dp_size = dpctl->dp_size;

	mif_info("%s: dp_type %d, aligned %d, dp_base 0x%08X, dp_size %d\n",
		ld->name, dpld->dp_type, ld->aligned, (int)dpld->dp_base,
		dpld->dp_size);

	/* Initialize DPRAM map (physical map -> logical map) */
	ret = dpram_table_init(dpld);
	if (ret < 0) {
		mif_info("%s: ERR! dpram_table_init fail (err %d)\n",
			ld->name, ret);
		goto err;
	}

	/* Disable IPC */
	set_magic(dpld, 0);
	set_access(dpld, 0);
	dpld->dpram_init_status = DPRAM_INIT_STATE_NONE;

	/* Initialize locks, completions, and bottom halves */
	snprintf(dpld->wlock_name, DP_MAX_NAME_LEN, "%s_wlock", ld->name);
	wake_lock_init(&dpld->wlock, WAKE_LOCK_SUSPEND, dpld->wlock_name);

	init_completion(&dpld->dpram_init_cmd);
	init_completion(&dpld->modem_pif_init_done);
	init_completion(&dpld->udl_start_complete);
	init_completion(&dpld->udl_cmd_complete);
	init_completion(&dpld->dump_start_complete);
	init_completion(&dpld->dump_recv_done);

	task_data = (unsigned long)dpld;
	tasklet_init(&dpld->rx_tsk, dpram_ipc_rx_task, task_data);

	/* Prepare SKB queue head for RX processing */
	for (i = 0; i < dpld->max_ipc_dev; i++)
		skb_queue_head_init(&dpld->skb_rxq[i]);

	/* Prepare RXB queue */
	qsize = DPRAM_MAX_RXBQ_SIZE;
	for (i = 0; i < dpld->max_ipc_dev; i++) {
		bsize = rxbq_get_page_size(get_rx_buff_size(dpld, i));
		dpld->rxbq[i].size = qsize;
		dpld->rxbq[i].in = 0;
		dpld->rxbq[i].out = 0;
		dpld->rxbq[i].rxb = rxbq_create_pool(bsize, qsize);
		if (!dpld->rxbq[i].rxb) {
			mif_info("%s: ERR! %s rxbq_create_pool fail\n",
				ld->name, get_dev_name(i));
			goto err;
		}
		mif_info("%s: %s rxbq_pool created (bsize:%d, qsize:%d)\n",
			ld->name, get_dev_name(i), bsize, qsize);
	}

	/* Prepare a multi-purpose miscellaneous buffer */
	dpld->buff = kzalloc(dpld->dp_size, GFP_KERNEL);
	if (!dpld->buff) {
		mif_info("%s: ERR! kzalloc dpld->buff fail\n", ld->name);
		goto err;
	}

	/* Retrieve DPRAM IRQ GPIO# */
	dpld->gpio_dpram_int = mdm_data->gpio_dpram_int;

	/* Retrieve DPRAM IRQ# */
	if (!dpctl->dpram_irq) {
		dpctl->dpram_irq = platform_get_irq_byname(pdev, "dpram_irq");
		if (dpctl->dpram_irq < 0) {
			mif_info("%s: ERR! platform_get_irq_byname fail\n",
				ld->name);
			goto err;
		}
	}
	dpld->irq = dpctl->dpram_irq;

	/* Retrieve DPRAM IRQ flags */
	if (!dpctl->dpram_irq_flags)
		dpctl->dpram_irq_flags = (IRQF_NO_SUSPEND | IRQF_TRIGGER_LOW);
	dpld->irq_flags = dpctl->dpram_irq_flags;

	/* Register DPRAM interrupt handler */
	snprintf(dpld->irq_name, DP_MAX_NAME_LEN, "%s_irq", ld->name);
	ret = dpram_register_isr(dpld->irq, dpram_irq_handler, dpld->irq_flags,
				dpld->irq_name, dpld);
	if (ret)
		goto err;

	return ld;

err:
	if (dpld) {
		if (dpld->buff)
			kfree(dpld->buff);
		kfree(dpld);
	}

	return NULL;
}
Esempio n. 4
0
static void usb_tx_work(struct work_struct *work)
{
	int ret = 0;
	struct link_device *ld =
		container_of(work, struct link_device, tx_delayed_work.work);
	struct usb_link_device *usb_ld = to_usb_link_device(ld);
	struct sk_buff *skb;
	struct link_pm_data *pm_data = usb_ld->link_pm_data;

	if (!usb_ld->usbdev) {
		mif_info("usbdev is invalid\n");
		return;
	}

	pm_data->tx_cnt++;

	while (ld->sk_fmt_tx_q.qlen || ld->sk_raw_tx_q.qlen) {
		/* request and check usb runtime pm first */
		ret = link_pm_runtime_get_active(pm_data);
		if (ret < 0) {
			if (ret == -ENODEV) {
				mif_err("link not avail, retry reconnect.\n");
				goto exit;
			}
			goto retry_tx_work;
		}

		/* If AP try to tx when interface disconnect->reconnect probe,
		 * usbdev was created but one of interface channel device are
		 * probing, _usb_tx_work return to -ENOENT then runtime usage
		 * count allways positive and never enter to L2
		 */
		if (!usb_ld->if_usb_connected) {
			mif_info("link is available, but if  was not readey\n");
			goto retry_tx_work;
		}
		pm_runtime_get_sync(&usb_ld->usbdev->dev);

		ret = 0;
		/* send skb from fmt_txq and raw_txq,*/
		/* one by one for fair flow control */
		skb = skb_dequeue(&ld->sk_fmt_tx_q);
		if (skb)
			ret = _usb_tx_work(skb);

		if (ret) {
			mif_err("usb_tx_urb_with_skb for fmt_q %d\n", ret);
			skb_queue_head(&ld->sk_fmt_tx_q, skb);

			if (ret == -ENODEV || ret == -ENOENT)
				goto exit;

			/* tx fail and usbdev alived, retry tx work */
			pm_runtime_put(&usb_ld->usbdev->dev);
			goto retry_tx_work;
		}

		skb = skb_dequeue(&ld->sk_raw_tx_q);
		if (skb)
			ret = _usb_tx_work(skb);

		if (ret) {
			mif_err("usb_tx_urb_with_skb for raw_q %d\n", ret);
			skb_queue_head(&ld->sk_raw_tx_q, skb);

			if (ret == -ENODEV || ret == -ENOENT)
				goto exit;

			pm_runtime_put(&usb_ld->usbdev->dev);
			goto retry_tx_work;
		}

		pm_runtime_put(&usb_ld->usbdev->dev);
	}
	wake_unlock(&pm_data->tx_async_wake);
exit:
	return;

retry_tx_work:
	queue_delayed_work(ld->tx_wq, &ld->tx_delayed_work,
		msecs_to_jiffies(20));
	return;
}
int xmm6260_init_modemctl_device(struct modem_ctl *mc,
			struct modem_data *pdata)
{
	int ret;

	mc->gpio_cp_on = pdata->gpio_cp_on;
	mc->gpio_reset_req_n = pdata->gpio_reset_req_n;
	mc->gpio_cp_reset = pdata->gpio_cp_reset;
	mc->gpio_pda_active = pdata->gpio_pda_active;
	mc->gpio_phone_active = pdata->gpio_phone_active;
	mc->gpio_cp_dump_int = pdata->gpio_cp_dump_int;
	mc->gpio_sim_detect = pdata->gpio_sim_detect;

	mc->irq_phone_active = gpio_to_irq(mc->gpio_phone_active);

	if (mc->gpio_sim_detect)
		mc->irq_sim_detect = gpio_to_irq(mc->gpio_sim_detect);

	xmm6260_get_ops(mc);

	ret = request_irq(mc->irq_phone_active, phone_active_irq_handler,
				IRQF_NO_SUSPEND | IRQF_TRIGGER_HIGH,
				"phone_active", mc);
	if (ret) {
		mif_err("failed to request_irq:%d\n", ret);
		return ret;
	}

	ret = enable_irq_wake(mc->irq_phone_active);
	if (ret) {
		mif_err("failed to enable_irq_wake:%d\n", ret);
		goto err_exit;
	}

	/* initialize sim_state if gpio_sim_detect exists */
	mc->sim_state.online = false;
	mc->sim_state.changed = false;
	if (mc->gpio_sim_detect) {
		ret = request_irq(mc->irq_sim_detect, sim_detect_irq_handler,
				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
				"sim_detect", mc);
		if (ret) {
			mif_err("failed to SD request_irq:%d\n", ret);
			goto err_exit;
		}

		ret = enable_irq_wake(mc->irq_sim_detect);
		if (ret) {
			mif_err("failed to SD enable_irq:%d\n", ret);
			free_irq(mc->irq_sim_detect, mc);
			goto err_exit;
		}

		/* initialize sim_state => insert: gpio=0, remove: gpio=1 */
		mc->sim_state.online = !gpio_get_value(mc->gpio_sim_detect);
		mif_info("SIM detected online=%d\n", mc->sim_state.online);
	}

	return ret;

err_exit:
	free_irq(mc->irq_phone_active, mc);
	return ret;
}
static void link_pm_hub_work(struct work_struct *work)
{
	int err;
	struct link_pm_data *pm_data =
		container_of(work, struct link_pm_data, link_pm_hub.work);

	if (pm_data->hub_status == HUB_STATE_ACTIVE) {
		end_hub_work(pm_data);
		return;
	}

	if (!pm_data->port_enable) {
		mif_err("mif: hub power func not assinged\n");
		end_hub_work(pm_data);
		return;
	}

	/* If kernel if suspend, wait the ehci resume */
	if (pm_data->dpm_suspending) {
		mif_info("dpm_suspending\n");
		start_hub_work(pm_data, 500);
		return;
	}

	switch (pm_data->hub_status) {
	case HUB_STATE_OFF:
		pm_data->hub_status = HUB_STATE_RESUMMING;
		mif_trace("hub off->on\n");

		/* skip 1st time before first probe */
		if (pm_data->root_hub)
			pm_runtime_get_sync(pm_data->root_hub);
		err = pm_data->port_enable(2, 1);
		if (err < 0) {
			mif_err("hub on fail err=%d\n", err);
			err = pm_data->port_enable(2, 0);
			if (err < 0)
				mif_err("hub off fail err=%d\n", err);
			pm_data->hub_status = HUB_STATE_OFF;
			if (pm_data->root_hub)
				pm_runtime_put_sync(pm_data->root_hub);
			end_hub_work(pm_data);
		} else {
			/* resume root hub */
			start_hub_work(pm_data, 100);
		}
		break;
	case HUB_STATE_RESUMMING:
		if (pm_data->hub_on_retry_cnt++ > 50) {
			pm_data->hub_on_retry_cnt = 0;
			pm_data->hub_status = HUB_STATE_OFF;
			if (pm_data->root_hub)
				pm_runtime_put_sync(pm_data->root_hub);
			end_hub_work(pm_data);
		} else {
			mif_info("hub resumming: %d\n",
					pm_data->hub_on_retry_cnt);
			start_hub_work(pm_data, 200);
		}
		break;
	}
exit:
	return;
}
static int _cbp72_edpram_upload(struct dpram_link_device *dpld,
		struct dpram_dump_arg *dump, unsigned char __user *target)
{
	struct link_device *ld = &dpld->ld;
	struct ul_header header;
	u8 *dest = NULL;
	u8 *buff = NULL;
	u16 plen = 0;
	int err = 0;
	int ret = 0;
	int buff_size = 0;

	mif_debug("\n");

	init_completion(&dpld->udl_cmd_complete);

	mif_debug("%s: req %x, resp %x", ld->name, dump->req, dump->resp);

	if (dump->req)
		dpld->send_intr(dpld, (u16)dump->req);

	if (dump->resp) {
		err = _cbp72_edpram_wait_resp(dpld, dump->resp);
		if (err < 0) {
			mif_info("%s: ERR! wait_response fail (%d)\n",
				ld->name, err);
			goto exit;
		}
	}

	if (dump->cmd)
		return err;

	dest = (u8 *)dpld->ul_map.buff;

	header.bop = *(u8 *)(dest);
	header.total_frame = *(u16 *)(dest + 1);
	header.curr_frame = *(u16 *)(dest + 3);
	header.len = *(u16 *)(dest + 5);

	mif_debug("%s: total frame:%d, current frame:%d, data len:%d\n",
		ld->name, header.total_frame, header.curr_frame, header.len);

	plen = min_t(u16, header.len, DP_DEFAULT_DUMP_LEN);

	buff = vmalloc(DP_DEFAULT_DUMP_LEN);
	if (!buff) {
		err = -ENOMEM;
		goto exit;
	}

	memcpy(buff, dest + sizeof(struct ul_header), plen);
	ret = copy_to_user(dump->buff, buff, plen);
	if (ret < 0) {
		mif_info("%s: ERR! dump copy_to_user fail\n", ld->name);
		err = -EIO;
		goto exit;
	}
	buff_size = plen;

	ret = copy_to_user(target + 4, &buff_size, sizeof(int));
	if (ret < 0) {
		mif_info("%s: ERR! size copy_to_user fail\n", ld->name);
		err = -EIO;
		goto exit;
	}

	vfree(buff);
	return err;

exit:
	if (buff)
		vfree(buff);
	iowrite32(0, dpld->ul_map.magic);
	wake_unlock(&dpld->wlock);
	return err;
}
static void run_pm_fsm(struct modem_link_pm *pm, enum pm_event event)
{
	struct link_device *ld = container_of(pm, struct link_device, pm);
	struct modem_ctl *mc = ld->mc;
	struct pm_fsm *fsm = &pm->fsm;
	enum pm_state c_state;
	enum pm_state n_state;
	unsigned long flags;

	spin_lock_irqsave(&pm->lock, flags);

	c_state = fsm->state;
	n_state = fsm->state;

	if (!pm->active) {
		release_ap2cp_wakeup(pm);
		if (event == PM_EVENT_LINK_ERROR)
			n_state = PM_STATE_CP_FAIL;
		goto exit;
	}

	if (fsm_locked(c_state))
		goto exit;

	if (event == PM_EVENT_CP_HOLD_REQUEST) {
		if (!cp_online(mc))
			goto exit;

		pm->hold_requested = true;

		if (!hold_possible(c_state))
			goto exit;
	}

#ifdef DEBUG_MODEM_IF
	print_pm_event(pm, event);
#endif
	switch (c_state) {
	case PM_STATE_UNMOUNTED:
		if (event == PM_EVENT_LINK_SUSPENDED) {
			n_state = PM_STATE_SUSPENDED;
		} else if (event == PM_EVENT_CP2AP_WAKEUP_HIGH
			   || event == PM_EVENT_CP_HOLD_REQUEST) {
			if (link_suspended(pm)) {
				n_state = PM_STATE_SUSPENDED;
				prepare_mount(pm);
			} else {
				n_state = next_state_from_resume(pm);
				if (n_state == PM_STATE_RESETTING) {
					prepare_mount(pm);
					unmounted_to_resetting(pm);
				} else if (n_state == PM_STATE_HOLDING) {
					prepare_mount(pm);
					unmounted_to_holding(pm);
				}
			}
		}
		break;

	case PM_STATE_SUSPENDED:
		if (event == PM_EVENT_LINK_RESUMED) {
			n_state = next_state_from_resume(pm);
			if (n_state == PM_STATE_RESETTING) {
				prepare_mount(pm);
				unmounted_to_resetting(pm);
			} else if (n_state == PM_STATE_HOLDING) {
				prepare_mount(pm);
				unmounted_to_holding(pm);
			}
		} else if (event == PM_EVENT_CP2AP_WAKEUP_HIGH
			   || event == PM_EVENT_CP_HOLD_REQUEST) {
			n_state = PM_STATE_SUSPENDED;
			prepare_mount(pm);
		}
		break;

	case PM_STATE_HOLDING:
		if (event == PM_EVENT_CP2AP_WAKEUP_HIGH) {
			n_state = PM_STATE_RESETTING;
			holding_to_resetting(pm);
		} else if (event == PM_EVENT_WDOG_TIMEOUT) {
			/*
			It is not guaranteed for FSM to succeed in getting GPIO
			interrupt events or for stop_pm_wdog() to succeed in
			deleting the WDOG timer always.
			So, gpio_cp2ap_wakeup and gpio_cp2ap_status must always
			be checked before state transition.
			*/
			if (gpio_get_value(pm->gpio_cp2ap_wakeup)) {
				n_state = PM_STATE_RESETTING;
				holding_to_resetting(pm);
			} else {
				n_state = PM_STATE_WDOG_TIMEOUT;
			}
		}
		break;

	case PM_STATE_RESETTING:
		if (event == PM_EVENT_LINK_RESET) {
			n_state = PM_STATE_MOUNTING;
			stop_pm_wdog(pm, c_state, event);
			assert_ap2cp_status(pm);

			mif_info("%s: state: ap2cp_status_pin_done\n", __func__);
			start_pm_wdog(pm, n_state, PM_STATE_ACTIVE,
				      PM_EVENT_LINK_MOUNTED, LINKPM_WATCHDOG_TIMEOUT);
		} else if (event == PM_EVENT_WDOG_TIMEOUT) {
			n_state = PM_STATE_AP_FAIL;
		} else if (event == PM_EVENT_CP2AP_WAKEUP_LOW) {
			n_state = PM_STATE_CP_FAIL;
		}
		break;

	case PM_STATE_MOUNTING:
		if (event == PM_EVENT_LINK_MOUNTED
			|| event == PM_EVENT_CP2AP_STATUS_HIGH) {
			n_state = PM_STATE_ACTIVE;
			stop_pm_wdog(pm, c_state, event);
		} else if (event == PM_EVENT_WDOG_TIMEOUT) {
			n_state = PM_STATE_WDOG_TIMEOUT;
		} else if (event == PM_EVENT_CP2AP_WAKEUP_LOW) {
#if 0
			n_state = PM_STATE_CP_FAIL;
#else
			n_state = PM_STATE_AP_FAIL;
#endif
		}
		break;

	case PM_STATE_ACTIVE:
		if (event == PM_EVENT_CP2AP_WAKEUP_LOW) {
			n_state = PM_STATE_AP_FREE;
			schedule_cp_free(pm);
#if 0
			if (mipi_lli_get_link_status() == LLI_MOUNTED) {
				n_state = PM_STATE_AP_FREE;
				schedule_cp_free(pm);
			}
#endif
		} else if (event == PM_EVENT_CP2AP_STATUS_LOW) {
#ifdef REPORT_CRASHDMP
			n_state = PM_STATE_CP_FAIL;
#else
			n_state = PM_STATE_AP_FREE;
			schedule_cp_free(pm);
#endif
		}
		break;

	case PM_STATE_AP_FREE:
		if (event == PM_EVENT_CP2AP_WAKEUP_HIGH) {
			n_state = PM_STATE_ACTIVE;
			cancel_cp_free(pm);
			assert_ap2cp_wakeup(pm);
		} else if (event == PM_EVENT_CP_HOLD_REQUEST) {
			n_state = PM_STATE_AP_FREE;
			cancel_cp_free(pm);
			assert_ap2cp_wakeup(pm);
			schedule_cp_free(pm);
		} else if (event == PM_EVENT_CP_HOLD_TIMEOUT) {
			/*
			It is not guaranteed for cancel_cp_free() to succeed
			in canceling the cp_free_dwork always.
			So, cp2ap_wakeup must always be checked before state
			transition.
			*/
			if (!gpio_get_value(pm->gpio_cp2ap_wakeup)) {
				n_state = PM_STATE_CP_FREE;
				pm->hold_requested = false;
				release_ap2cp_wakeup(pm);
			} else {
				n_state = PM_STATE_ACTIVE;
				cancel_cp_free(pm);
				assert_ap2cp_wakeup(pm);
			}
		} else if (event == PM_EVENT_CP2AP_STATUS_LOW) {
			n_state = PM_STATE_CP_FAIL;
		}
		break;

	case PM_STATE_CP_FREE:
		if (event == PM_EVENT_CP2AP_STATUS_LOW) {
			n_state = PM_STATE_UNMOUNTING;
			start_pm_wdog(pm, n_state, PM_STATE_UNMOUNTED,
				      PM_EVENT_LINK_UNMOUNTED, LINKPM_WATCHDOG_TIMEOUT);
		} else if (event == PM_EVENT_CP2AP_WAKEUP_HIGH) {
			n_state = PM_STATE_ACTIVE;
			assert_ap2cp_wakeup(pm);
		} else if (event == PM_EVENT_CP_HOLD_REQUEST) {
			n_state = PM_STATE_ACTIVATING;
			assert_ap2cp_wakeup(pm);
			start_pm_wdog(pm, n_state, PM_STATE_ACTIVE,
				      PM_EVENT_CP2AP_WAKEUP_HIGH, LINKPM_WATCHDOG_TIMEOUT);
		}
		break;

	case PM_STATE_ACTIVATING:
		if (event == PM_EVENT_CP2AP_WAKEUP_HIGH) {
			n_state = PM_STATE_ACTIVE;
			stop_pm_wdog(pm, c_state, event);
			assert_ap2cp_wakeup(pm);
		} else if (event == PM_EVENT_CP2AP_STATUS_LOW) {
			n_state = PM_STATE_UNMOUNTING;
			stop_pm_wdog(pm, c_state, event);
			release_ap2cp_wakeup(pm);
		} else if (event == PM_EVENT_WDOG_TIMEOUT) {
			/*
			It is not guaranteed for FSM to succeed in getting GPIO
			interrupt events or for stop_pm_wdog() to succeed in
			deleting the WDOG timer always.
			So, gpio_cp2ap_wakeup and gpio_cp2ap_status must always
			be checked before state transition.
			*/
			if (gpio_get_value(pm->gpio_cp2ap_wakeup))
				n_state = PM_STATE_ACTIVE;
			else if (!gpio_get_value(pm->gpio_cp2ap_status))
				n_state = PM_STATE_UNMOUNTING;
			else
				n_state = PM_STATE_WDOG_TIMEOUT;
		}
		break;

	case PM_STATE_UNMOUNTING:
		if (event == PM_EVENT_LINK_UNMOUNTED) {
			if (pm->hold_requested) {
				if (cp_online(mc))
					n_state = PM_STATE_HOLDING;
				else
					n_state = PM_STATE_UNMOUNTED;
				pm->hold_requested = false;
			} else {
				n_state = PM_STATE_UNMOUNTED;
			}
			stop_pm_wdog(pm, c_state, event);
			release_ap2cp_status(pm);
			if (n_state == PM_STATE_HOLDING) {
				prepare_mount(pm);
				unmounted_to_holding(pm);
			}
		} else if (event == PM_EVENT_WDOG_TIMEOUT) {
			n_state = PM_STATE_WDOG_TIMEOUT;
		}
		break;

	case PM_STATE_CP_BOOTING:
		if (event == PM_EVENT_CP2AP_WAKEUP_HIGH) {
			n_state = PM_STATE_ACTIVE;
			assert_ap2cp_wakeup(pm);
		} else if (event == PM_EVENT_LINK_ERROR) {
			n_state = PM_STATE_CP_FAIL;
		}
		break;

	default:
		break;
	}

	set_pm_fsm(pm, c_state, n_state, event);

#ifdef DEBUG_MODEM_IF
	print_pm_fsm(pm);
#endif

	decide_pm_wake(pm, c_state, n_state);

exit:
	spin_unlock_irqrestore(&pm->lock, flags);

	check_pm_fail(pm, c_state, n_state);
}
static void link_pm_hub_work(struct work_struct *work)
{
	int err;
	struct link_pm_data *pm_data =
		container_of(work, struct link_pm_data, link_pm_hub.work);

	if (pm_data->hub_status == HUB_STATE_ACTIVE)
		return;

	if (!pm_data->port_enable) {
		mif_err("mif: hub power func not assinged\n");
		return;
	}
	wake_lock(&pm_data->hub_lock);

	/* If kernel if suspend, wait the ehci resume */
	if (pm_data->dpm_suspending) {
		mif_info("dpm_suspending\n");
		schedule_delayed_work(&pm_data->link_pm_hub,
						msecs_to_jiffies(500));
		goto exit;
	}

	switch (pm_data->hub_status) {
	case HUB_STATE_OFF:
		pm_data->hub_status = HUB_STATE_RESUMMING;
		mif_trace("hub off->on\n");

		/* skip 1st time before first probe */
		if (pm_data->root_hub)
			pm_runtime_get_sync(pm_data->root_hub);
		err = pm_data->port_enable(2, 1);
		if (err < 0) {
			mif_err("hub on fail err=%d\n", err);
			err = pm_data->port_enable(2, 0);
			if (err < 0)
				mif_err("hub off fail err=%d\n", err);
			pm_data->hub_status = HUB_STATE_OFF;
			if (pm_data->root_hub)
				pm_runtime_put_sync(pm_data->root_hub);
			goto exit;
		}
		/* resume root hub */
		schedule_delayed_work(&pm_data->link_pm_hub,
						msecs_to_jiffies(100));
		break;
	case HUB_STATE_RESUMMING:
		if (pm_data->hub_on_retry_cnt++ > 50) {
			pm_data->hub_on_retry_cnt = 0;
			pm_data->hub_status = HUB_STATE_OFF;
			if (pm_data->root_hub)
				pm_runtime_put_sync(pm_data->root_hub);
		}
		mif_trace("hub resumming\n");
		schedule_delayed_work(&pm_data->link_pm_hub,
						msecs_to_jiffies(200));
		break;
	case HUB_STATE_PREACTIVE:
		pm_data->hub_status = HUB_STATE_ACTIVE;
		mif_trace("hub active\n");
		pm_data->hub_on_retry_cnt = 0;
		wake_unlock(&pm_data->hub_lock);
		complete(&pm_data->hub_active);
		if (pm_data->root_hub)
			pm_runtime_put_sync(pm_data->root_hub);
		break;
	}
exit:
	return;
}
static int qc_ioctl(struct dpram_link_device *dpld, struct io_device *iod,
		unsigned int cmd, unsigned long arg)
{
	struct link_device *ld = &dpld->ld;
	int err = 0;

	switch (cmd) {
	case IOCTL_DPRAM_PHONE_POWON:
		err = qc_prepare_download(dpld);
		if (err < 0)
			mif_info("%s: ERR! prepare_download fail\n", ld->name);
		break;

	case IOCTL_DPRAM_PHONEIMG_LOAD:
		err = qc_download_binary(dpld, (void *)arg);
		if (err < 0)
			mif_info("%s: ERR! download_binary fail\n", ld->name);
		break;

	case IOCTL_DPRAM_NVDATA_LOAD:
		err = qc_download_nv(dpld, (void *)arg);
		if (err < 0)
			mif_info("%s: ERR! download_nv fail\n", ld->name);
		break;

	case IOCTL_DPRAM_PHONE_BOOTSTART:
		err = qc_boot_start(dpld);
		if (err < 0) {
			mif_info("%s: ERR! boot_start fail\n", ld->name);
			break;
		}

		err = qc_boot_post_process(dpld);
		if (err < 0)
			mif_info("%s: ERR! boot_post_process fail\n", ld->name);

		break;

	case IOCTL_DPRAM_PHONE_UPLOAD_STEP1:
		disable_irq_nosync(dpld->irq);
		err = qc_uload_step1(dpld);
		if (err < 0) {
			enable_irq(dpld->irq);
			mif_info("%s: ERR! upload_step1 fail\n", ld->name);
		}
		break;

	case IOCTL_DPRAM_PHONE_UPLOAD_STEP2:
		err = qc_uload_step2(dpld, (void *)arg);
		if (err < 0) {
			enable_irq(dpld->irq);
			mif_info("%s: ERR! upload_step2 fail\n", ld->name);
		}
		break;

	default:
		mif_err("%s: ERR! invalid cmd 0x%08X\n", ld->name, cmd);
		err = -EINVAL;
		break;
	}

	return err;
}
static long link_pm_ioctl(struct file *file, unsigned int cmd,
						unsigned long arg)
{
	int value, err = 0;
	struct link_pm_data *pm_data = file->private_data;

	mif_info("cmd: 0x%08x\n", cmd);

	switch (cmd) {
	case IOCTL_LINK_CONTROL_ACTIVE:
		if (copy_from_user(&value, (const void __user *)arg,
							sizeof(int)))
			return -EFAULT;
		gpio_set_value(pm_data->gpio_link_active, value);
		break;
	case IOCTL_LINK_GET_HOSTWAKE:
		return !gpio_get_value(pm_data->gpio_link_hostwake);
	case IOCTL_LINK_CONNECTED:
		return pm_data->usb_ld->if_usb_connected;
	case IOCTL_LINK_PORT_ON: /* hub only */
		/* ignore cp host wakeup irq, set the hub_init_lock when AP try
		 CP off and release hub_init_lock when CP boot done */
		pm_data->hub_init_lock = 0;
		if (pm_data->root_hub) {
			pm_runtime_resume(pm_data->root_hub);
			pm_runtime_forbid(pm_data->root_hub->parent);
		}
		if (pm_data->port_enable) {
			err = pm_data->port_enable(2, 1);
			if (err < 0) {
				mif_err("hub on fail err=%d\n", err);
				goto exit;
			}
			pm_data->hub_status = HUB_STATE_RESUMMING;
		}
		break;
	case IOCTL_LINK_PORT_OFF: /* hub only */
		if (pm_data->usb_ld->if_usb_connected) {
			struct usb_device *udev =
					pm_data->usb_ld->usbdev->parent;
			pm_runtime_get_sync(&udev->dev);
			if (udev->state != USB_STATE_NOTATTACHED) {
				usb_force_disconnect(udev);
				pr_info("force disconnect maybe cp-reset!!\n");
			}
			pm_runtime_put_autosuspend(&udev->dev);
		}
		err = link_pm_hub_standby(pm_data);
		if (err < 0) {
			mif_err("usb3503 active fail\n");
			goto exit;
		}
		pm_data->hub_init_lock = 1;
		pm_data->hub_handshake_done = 0;

		break;
	default:
		break;
	}
exit:
	return err;
}
Esempio n. 12
0
static int ss333_off(struct modem_ctl *mc)
{
	struct io_device *iod = mc->iod;
	struct link_device *ld = get_current_link(iod);
	unsigned long flags;
	int i;

	mif_disable_irq(&mc->irq_cp_active);

	mif_info("%s: %s: +++\n", mc->name, FUNC);

	print_mc_state(mc);

	spin_lock_irqsave(&mc->lock, flags);

	if (cp_offline(mc)) {
		spin_unlock_irqrestore(&mc->lock, flags);
		mif_err("%s: %s: OFFLINE already!!!\n", mc->name, FUNC);
		goto exit;
	}

	iod->modem_state_changed(iod, STATE_OFFLINE);

	spin_unlock_irqrestore(&mc->lock, flags);

	if (timer_pending(&mc->crash_ack_timer))
		del_timer(&mc->crash_ack_timer);

	if (ld->close_tx)
		ld->close_tx(ld);

#if 0
	wait_for_link_unmount(mc, ld);
#endif

	if (gpio_get_value(mc->gpio_cp_on) == 0) {
		mif_err("%s: cp_on == 0\n", mc->name);
		goto exit;
	}

	/* wait for cp_active for 3 seconds */
	for (i = 0; i < 150; i++) {
		if (gpio_get_value(mc->gpio_phone_active))
			break;
		msleep(20);
	}

	print_mc_state(mc);

	if (ld->off)
		ld->off(ld);

	if (gpio_get_value(mc->gpio_cp_reset)) {
		mif_err("%s: %s: cp_reset -> 0\n", mc->name, FUNC);
		gpio_set_value(mc->gpio_cp_reset, 0);
		print_mc_state(mc);
	}

exit:
	mif_info("%s: %s: ---\n", mc->name, FUNC);
	return 0;
}
Esempio n. 13
0
static void config_umts_modem_gpio(void)
{
	int err;
	unsigned gpio_cp_on = umts_modem_data.gpio_cp_on;
	unsigned gpio_cp_rst = umts_modem_data.gpio_cp_reset;
	unsigned gpio_pda_active = umts_modem_data.gpio_pda_active;
	unsigned gpio_phone_active = umts_modem_data.gpio_phone_active;
	unsigned gpio_active_state = umts_modem_data.gpio_host_active;
	unsigned gpio_host_wakeup = umts_modem_data.gpio_host_wakeup;
	unsigned gpio_slave_wakeup = umts_modem_data.gpio_slave_wakeup;
	unsigned gpio_dpram_int = umts_modem_data.gpio_dpram_int;
	unsigned gpio_dpram_status = umts_modem_data.gpio_dpram_status;
	unsigned gpio_dpram_wakeup = umts_modem_data.gpio_dpram_wakeup;
	unsigned gpio_dynamic_switching =
			umts_modem_data.gpio_dynamic_switching;

	if (gpio_cp_on) {
		err = gpio_request(gpio_cp_on, "CMC_ON");
		if (err) {
			mif_err("ERR: fail to request gpio %s\n", "CMC_ON");
		} else {
			gpio_direction_output(gpio_cp_on, 0);
			s3c_gpio_setpull(gpio_cp_on, S3C_GPIO_PULL_NONE);
		}
	}

	if (gpio_cp_rst) {
		err = gpio_request(gpio_cp_rst, "CMC_RST");
		if (err) {
			mif_err("ERR: fail to request gpio %s\n", "CMC_RST");
		} else {
			gpio_direction_output(gpio_cp_rst, 0);
			s3c_gpio_setpull(gpio_cp_rst, S3C_GPIO_PULL_NONE);
		}
	}

	if (gpio_pda_active) {
		err = gpio_request(gpio_pda_active, "PDA_ACTIVE");
		if (err) {
			mif_err("ERR: fail to request gpio %s\n", "PDA_ACTIVE");
		} else {
			gpio_direction_output(gpio_pda_active, 0);
			s3c_gpio_setpull(gpio_pda_active, S3C_GPIO_PULL_NONE);
		}
	}

	if (gpio_phone_active) {
		err = gpio_request(gpio_phone_active, "CMC_ACTIVE");
		if (err) {
			mif_err("ERR: fail to request gpio %s\n", "CMC_ACTIVE");
		} else {
			/* Configure as a wake-up source */
			gpio_direction_input(gpio_phone_active);
			s3c_gpio_setpull(gpio_phone_active, S3C_GPIO_PULL_DOWN);
			s3c_gpio_cfgpin(gpio_phone_active, S3C_GPIO_SFN(0xF));
		}
	}

	if (gpio_active_state) {
		err = gpio_request(gpio_active_state, "CMC_ACTIVE_STATE");
		if (err) {
			mif_err("ERR: fail to request gpio %s\n",
				"CMC_ACTIVE_STATE");
		} else {
			gpio_direction_output(gpio_active_state, 0);
			s3c_gpio_setpull(gpio_active_state, S3C_GPIO_PULL_NONE);
		}
	}

	if (gpio_slave_wakeup) {
		err = gpio_request(gpio_slave_wakeup, "CMC_SLAVE_WAKEUP");
		if (err) {
			mif_err("ERR: fail to request gpio %s\n",
				"CMC_SLAVE_WAKEUP");
		} else {
			gpio_direction_output(gpio_slave_wakeup, 0);
			s3c_gpio_setpull(gpio_slave_wakeup, S3C_GPIO_PULL_NONE);
		}
	}

	if (gpio_host_wakeup) {
		err = gpio_request(gpio_host_wakeup, "CMC_HOST_WAKEUP");
		if (err) {
			mif_err("ERR: fail to request gpio %s\n",
				"CMC_HOST_WAKEUP");
		} else {
			/* Configure as a wake-up source */
			gpio_direction_input(gpio_host_wakeup);
			s3c_gpio_setpull(gpio_host_wakeup, S3C_GPIO_PULL_DOWN);
			s3c_gpio_cfgpin(gpio_host_wakeup, S3C_GPIO_SFN(0xF));
		}
	}

	if (gpio_dpram_int) {
		err = gpio_request(gpio_dpram_int, "CMC_DPRAM_INT");
		if (err) {
			mif_err("ERR: fail to request gpio %s\n",
				"CMC_DPRAM_INT");
		} else {
			/* Configure as a wake-up source */
			gpio_direction_input(gpio_dpram_int);
			s3c_gpio_setpull(gpio_dpram_int, S3C_GPIO_PULL_NONE);
			s3c_gpio_cfgpin(gpio_dpram_int, S3C_GPIO_SFN(0xF));
		}
	}

	if (gpio_dpram_status) {
		err = gpio_request(gpio_dpram_status, "CMC_DPRAM_STATUS");
		if (err) {
			mif_err("ERR: fail to request gpio %s\n",
				"CMC_DPRAM_STATUS");
		} else {
			gpio_direction_input(gpio_dpram_status);
			s3c_gpio_setpull(gpio_dpram_status, S3C_GPIO_PULL_NONE);
		}
	}

	if (gpio_dpram_wakeup) {
		err = gpio_request(gpio_dpram_wakeup, "CMC_DPRAM_WAKEUP");
		if (err) {
			mif_err("ERR: fail to request gpio %s\n",
				"CMC_DPRAM_WAKEUP");
		} else {
			gpio_direction_output(gpio_dpram_wakeup, 1);
			s3c_gpio_setpull(gpio_dpram_wakeup, S3C_GPIO_PULL_NONE);
		}
	}

	if (gpio_dynamic_switching) {
		err = gpio_request(gpio_dynamic_switching, "DYNAMIC_SWITCHING");
		if (err) {
			mif_err("ERR: fail to request gpio %s\n",
					"DYNAMIC_SWITCHING\n");
		} else {
			gpio_direction_input(gpio_dynamic_switching);
			s3c_gpio_setpull(gpio_dynamic_switching,
					S3C_GPIO_PULL_DOWN);
		}
	}

	mif_info("done\n");
}
static int __devinit if_usb_probe(struct usb_interface *intf,
					const struct usb_device_id *id)
{
	struct usb_host_interface *data_desc;
	struct usb_link_device *usb_ld =
			(struct usb_link_device *)id->driver_info;
	struct link_device *ld = &usb_ld->ld;
	struct usb_interface *data_intf;
	struct usb_device *usbdev = interface_to_usbdev(intf);
	struct device *dev, *ehci_dev, *root_hub;
	struct if_usb_devdata *pipe;
	struct urb *urb;
	int i;
	int j;
	int dev_id;
	int err;

	/* To detect usb device order probed */
	dev_id = intf->cur_altsetting->desc.bInterfaceNumber;

	if (dev_id >= IF_USB_DEVNUM_MAX) {
		dev_err(&intf->dev, "Device id %d cannot support\n",
								dev_id);
		return -EINVAL;
	}

	if (!usb_ld) {
		dev_err(&intf->dev,
		"if_usb device doesn't be allocated\n");
		err = ENOMEM;
		goto out;
	}

	mif_info("probe dev_id=%d usb_device_id(0x%p), usb_ld (0x%p)\n",
				dev_id, id, usb_ld);

	usb_ld->usbdev = usbdev;
	usb_get_dev(usbdev);

	for (i = 0; i < IF_USB_DEVNUM_MAX; i++) {
		data_intf = usb_ifnum_to_if(usbdev, i);

		/* remap endpoint of RAW to no.1 for LTE modem */
		if (i == 0)
			pipe = &usb_ld->devdata[1];
		else if (i == 1)
			pipe = &usb_ld->devdata[0];
		else
			pipe = &usb_ld->devdata[i];

		pipe->disconnected = 0;
		pipe->data_intf = data_intf;
		data_desc = data_intf->cur_altsetting;

		/* Endpoints */
		if (usb_pipein(data_desc->endpoint[0].desc.bEndpointAddress)) {
			pipe->rx_pipe = usb_rcvbulkpipe(usbdev,
				data_desc->endpoint[0].desc.bEndpointAddress);
			pipe->tx_pipe = usb_sndbulkpipe(usbdev,
				data_desc->endpoint[1].desc.bEndpointAddress);
			pipe->rx_buf_size = 1024*4;
		} else {
			pipe->rx_pipe = usb_rcvbulkpipe(usbdev,
				data_desc->endpoint[1].desc.bEndpointAddress);
			pipe->tx_pipe = usb_sndbulkpipe(usbdev,
				data_desc->endpoint[0].desc.bEndpointAddress);
			pipe->rx_buf_size = 1024*4;
		}

		if (i == 0) {
			dev_info(&usbdev->dev, "USB IF USB device found\n");
		} else {
			err = usb_driver_claim_interface(&if_usb_driver,
					data_intf, usb_ld);
			if (err < 0) {
				mif_err("failed to cliam usb interface\n");
				goto out;
			}
		}

		usb_set_intfdata(data_intf, usb_ld);
		usb_ld->dev_count++;
		pm_suspend_ignore_children(&data_intf->dev, true);

		for (j = 0; j < URB_COUNT; j++) {
			urb = usb_alloc_urb(0, GFP_KERNEL);
			if (!urb) {
				mif_err("alloc urb fail\n");
				err = -ENOMEM;
				goto out2;
			}

			urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
			urb->transfer_buffer = usb_alloc_coherent(usbdev,
				pipe->rx_buf_size, GFP_KERNEL,
				&urb->transfer_dma);
			if (!urb->transfer_buffer) {
				mif_err(
				"Failed to allocate transfer buffer\n");
				usb_free_urb(urb);
				err = -ENOMEM;
				goto out2;
			}

			usb_fill_bulk_urb(urb, usbdev, pipe->rx_pipe,
				urb->transfer_buffer, pipe->rx_buf_size,
				usb_rx_complete, pipe);
			usb_anchor_urb(urb, &pipe->urbs);
		}
	}

	/* temporary call reset_resume */
	atomic_set(&usb_ld->suspend_count, 1);
	if_usb_reset_resume(data_intf);
	atomic_set(&usb_ld->suspend_count, 0);

	SET_HOST_ACTIVE(usb_ld->pdata, 1);
	usb_ld->host_wake_timeout_flag = 0;

	if (gpio_get_value(usb_ld->pdata->gpio_phone_active)) {
		struct link_pm_data *pm_data = usb_ld->link_pm_data;
		int delay = pm_data->autosuspend_delay_ms ?:
				DEFAULT_AUTOSUSPEND_DELAY_MS;
		pm_runtime_set_autosuspend_delay(&usbdev->dev, delay);
		dev = &usbdev->dev;
		if (dev->parent) {
			dev_dbg(&usbdev->dev, "if_usb Runtime PM Start!!\n");
			usb_enable_autosuspend(usb_ld->usbdev);
			/* s5p-ehci runtime pm allow - usb phy suspend mode */
			root_hub = &usbdev->bus->root_hub->dev;
			ehci_dev = root_hub->parent;
			mif_debug("ehci device = %s, %s\n",
					dev_driver_string(ehci_dev),
					dev_name(ehci_dev));
			pm_runtime_allow(ehci_dev);

			if (!pm_data->autosuspend)
				pm_runtime_forbid(dev);

			if (has_hub(usb_ld))
				link_pm_preactive(pm_data);

			pm_data->root_hub = root_hub;
		}

		usb_ld->flow_suspend = 0;
		/* Queue work if skbs were pending before a disconnect/probe */
		if (ld->sk_fmt_tx_q.qlen || ld->sk_raw_tx_q.qlen)
			queue_delayed_work(ld->tx_wq, &ld->tx_delayed_work, 0);

		usb_ld->if_usb_connected = 1;
		/*USB3503*/
		mif_debug("hub active complete\n");

		usb_change_modem_state(usb_ld, STATE_ONLINE);
	} else {
static int _cmc221_idpram_send_boot(struct dpram_link_device *dpld, void *arg)
{
	struct link_device *ld = &dpld->ld;
	u8 __iomem *bt_buff = dpld->bt_map.buff;
	struct dpram_boot_img cp_img;
	u8 *img_buff = NULL;
	int err = 0;
	int cnt = 0;

	ld->mode = LINK_MODE_BOOT;

	dpld->dpctl->setup_speed(DPRAM_SPEED_LOW);

	/* Test memory... After testing, memory is cleared. */
	if (mif_test_dpram(ld->name, bt_buff, dpld->bt_map.size) < 0) {
		mif_info("%s: ERR! mif_test_dpram fail!\n", ld->name);
		ld->mode = LINK_MODE_OFFLINE;
		return -EIO;
	}

	memset(&cp_img, 0, sizeof(struct dpram_boot_img));

	/* Get information about the boot image */
	err = copy_from_user(&cp_img, arg, sizeof(cp_img));
	mif_info("%s: CP image addr = 0x%08X, size = %d\n",
		ld->name, (int)cp_img.addr, cp_img.size);

	/* Alloc a buffer for the boot image */
	img_buff = kzalloc(dpld->bt_map.size, GFP_KERNEL);
	if (!img_buff) {
		mif_info("%s: ERR! kzalloc fail\n", ld->name);
		ld->mode = LINK_MODE_OFFLINE;
		return -ENOMEM;
	}

	/* Copy boot image from the user space to the image buffer */
	err = copy_from_user(img_buff, cp_img.addr, cp_img.size);

	/* Copy boot image to DPRAM and verify it */
	memcpy(bt_buff, img_buff, cp_img.size);
	if (memcmp16_to_io(bt_buff, img_buff, cp_img.size)) {
		mif_info("%s: ERR! Boot may be broken!!!\n", ld->name);
		goto err;
	}

	cmc221_idpram_reset(dpld);
	usleep_range(1000, 2000);

	if (cp_img.mode == HOST_BOOT_MODE_NORMAL) {
		mif_info("%s: HOST_BOOT_MODE_NORMAL\n", ld->name);
		mif_info("%s: Send req 0x%08X\n", ld->name, cp_img.req);
		iowrite32(cp_img.req, dpld->bt_map.req);

		/* Wait for cp_img.resp for up to 2 seconds */
		mif_info("%s: Wait resp 0x%08X\n", ld->name, cp_img.resp);
		while (ioread32(dpld->bt_map.resp) != cp_img.resp) {
			cnt++;
			usleep_range(1000, 2000);

			if (cnt > 1000) {
				mif_info("%s: ERR! Invalid resp 0x%08X\n",
					ld->name, ioread32(dpld->bt_map.resp));
				goto err;
			}
		}
	} else {
		mif_info("%s: HOST_BOOT_MODE_DUMP\n", ld->name);
	}

	kfree(img_buff);

	mif_info("%s: Send BOOT done\n", ld->name);

	dpld->dpctl->setup_speed(DPRAM_SPEED_HIGH);

	return 0;

err:
	ld->mode = LINK_MODE_OFFLINE;
	kfree(img_buff);

	mif_info("%s: ERR! Boot send fail!!!\n", ld->name);
	return -EIO;
}
static void usb_tx_work(struct work_struct *work)
{
	int ret = 0;
	struct link_device *ld =
		container_of(work, struct link_device, tx_delayed_work.work);
	struct usb_link_device *usb_ld = to_usb_link_device(ld);
	struct sk_buff *skb;
	struct link_pm_data *pm_data = usb_ld->link_pm_data;

	if (!usb_ld->usbdev) {
		mif_info("usbdev is invalid\n");
		return;
	}

	while (ld->sk_fmt_tx_q.qlen || ld->sk_raw_tx_q.qlen) {
		/* request and check usb runtime pm first */
		ret = link_pm_runtime_get_active(pm_data);
		if (ret < 0) {
			if (ret == -ENODEV)
				mif_err("link not avail, retry reconnect.\n");
			else
				queue_delayed_work(ld->tx_wq,
				&ld->tx_delayed_work, msecs_to_jiffies(20));
			return;
		}

		usb_mark_last_busy(usb_ld->usbdev);
		pm_runtime_get_sync(&usb_ld->usbdev->dev);

		ret = 0;
		/* send skb from fmt_txq and raw_txq,*/
		/* one by one for fair flow control */
		skb = skb_dequeue(&ld->sk_fmt_tx_q);
		if (skb)
			ret = _usb_tx_work(skb);

		if (ret) {
			if (ret != -ENODEV && ret != -ENOENT)
				pm_runtime_put(&usb_ld->usbdev->dev);
			/* Do not call runtime_put if ret is ENODEV. Unless it
			 * will invoke bugs */
			else
				skb_queue_head(&ld->sk_fmt_tx_q, skb);
			return;
		}

		skb = skb_dequeue(&ld->sk_raw_tx_q);
		if (skb)
			ret = _usb_tx_work(skb);

		if (ret) {
			if (ret != -ENODEV && ret != -ENOENT)
				pm_runtime_put(&usb_ld->usbdev->dev);
			else
				skb_queue_head(&ld->sk_raw_tx_q, skb);
			return;
		}

		pm_runtime_put(&usb_ld->usbdev->dev);
		usb_mark_last_busy(usb_ld->usbdev);
	}
	wake_unlock(&pm_data->tx_async_wake);
}
static long link_pm_ioctl(struct file *file, unsigned int cmd,
						unsigned long arg)
{
	int value, err = 0;
	struct task_struct *task = get_current();
	struct link_pm_data *pm_data = file->private_data;
	struct usb_link_device *usb_ld = pm_data->usb_ld;
	char taskname[TASK_COMM_LEN];

	pr_info("mif: %s: 0x%08x\n", __func__, cmd);

	switch (cmd) {
	case IOCTL_LINK_CONTROL_ACTIVE:
		if (copy_from_user(&value, (const void __user *)arg,
							sizeof(int)))
			return -EFAULT;
		gpio_set_value(pm_data->gpio_link_active, value);
		break;
	case IOCTL_LINK_GET_HOSTWAKE:
		return !gpio_get_value(pm_data->gpio_link_hostwake);
	case IOCTL_LINK_CONNECTED:
		return pm_data->usb_ld->if_usb_connected;
	case IOCTL_LINK_PORT_ON: /* hub only */
		/* ignore cp host wakeup irq, set the hub_init_lock when AP try
		 CP off and release hub_init_lock when CP boot done */
		pm_data->hub_init_lock = 0;
		if (pm_data->root_hub) {
			pm_runtime_resume(pm_data->root_hub);
			pm_runtime_forbid(pm_data->root_hub->parent);
		}
		if (pm_data->port_enable) {
			err = pm_data->port_enable(2, 1);
			if (err < 0) {
				pr_err("mif: %s: hub on fail err=%d\n",
						__func__, err);
				goto exit;
			}
			pm_data->hub_status = HUB_STATE_RESUMMING;
		}
		break;
	case IOCTL_LINK_PORT_OFF: /* hub only */
		if (pm_data->usb_ld->if_usb_connected) {
			struct usb_device *udev =
					pm_data->usb_ld->usbdev->parent;
			pm_runtime_get_sync(&udev->dev);
			if (udev->state != USB_STATE_NOTATTACHED) {
				usb_force_disconnect(udev);
				pr_info("force disconnect maybe cp-reset!!\n");
			}
			pm_runtime_put_autosuspend(&udev->dev);
		}
		err = link_pm_hub_standby(pm_data);
		if (err < 0) {
			pr_err("mif: %s: usb3503 active fail\n", __func__);
			goto exit;
		}
		pm_data->hub_init_lock = 1;
		pm_data->hub_handshake_done = 0;

		break;
	case IOCTL_LINK_BLOCK_AUTOSUSPEND: /* block autosuspend forever */
		mif_info("blocked autosuspend by `%s(%d)'\n",
				get_task_comm(taskname, task), task->pid);
		pm_data->block_autosuspend = true;
		if (usb_ld->usbdev)
			pm_runtime_forbid(&usb_ld->usbdev->dev);
		else {
			mif_err("Block autosuspend failed\n");
			err = -ENODEV;
		}
		break;
	case IOCTL_LINK_ENABLE_AUTOSUSPEND: /* Enable autosuspend */
		mif_info("autosuspend enabled by `%s(%d)'\n",
		get_task_comm(taskname, task), task->pid);
		pm_data->block_autosuspend = false;
		if (usb_ld->usbdev)
			pm_runtime_allow(&usb_ld->usbdev->dev);
		else {
			mif_err("Enable autosuspend failed\n");
			err = -ENODEV;
		}
		break;
	default:
		break;
	}
exit:
	return err;
}