示例#1
0
static int ft1000_probe(struct usb_interface *interface,
                        const struct usb_device_id *id)
{
    struct usb_host_interface *iface_desc;
    struct usb_endpoint_descriptor *endpoint;
    struct usb_device *dev;
    unsigned numaltsetting;
    int i, ret = 0, size;

    struct ft1000_usb *ft1000dev;
    struct ft1000_info *pft1000info = NULL;
    const struct firmware *dsp_fw;

    ft1000dev = kzalloc(sizeof(struct ft1000_usb), GFP_KERNEL);
    if (!ft1000dev)
        return -ENOMEM;

    dev = interface_to_usbdev(interface);
    DEBUG("ft1000_probe: usb device descriptor info:\n");
    DEBUG("ft1000_probe: number of configuration is %d\n",
          dev->descriptor.bNumConfigurations);

    ft1000dev->dev = dev;
    ft1000dev->status = 0;
    ft1000dev->net = NULL;
    ft1000dev->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
    ft1000dev->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
    if (!ft1000dev->tx_urb || !ft1000dev->rx_urb) {
        ret = -ENOMEM;
        goto err_fw;
    }

    DEBUG("ft1000_probe is called\n");
    numaltsetting = interface->num_altsetting;
    DEBUG("ft1000_probe: number of alt settings is :%d\n", numaltsetting);
    iface_desc = interface->cur_altsetting;
    DEBUG("ft1000_probe: number of endpoints is %d\n",
          iface_desc->desc.bNumEndpoints);
    DEBUG("ft1000_probe: descriptor type is %d\n",
          iface_desc->desc.bDescriptorType);
    DEBUG("ft1000_probe: interface number is %d\n",
          iface_desc->desc.bInterfaceNumber);
    DEBUG("ft1000_probe: alternatesetting is %d\n",
          iface_desc->desc.bAlternateSetting);
    DEBUG("ft1000_probe: interface class is %d\n",
          iface_desc->desc.bInterfaceClass);
    DEBUG("ft1000_probe: control endpoint info:\n");
    DEBUG("ft1000_probe: descriptor0 type -- %d\n",
          iface_desc->endpoint[0].desc.bmAttributes);
    DEBUG("ft1000_probe: descriptor1 type -- %d\n",
          iface_desc->endpoint[1].desc.bmAttributes);
    DEBUG("ft1000_probe: descriptor2 type -- %d\n",
          iface_desc->endpoint[2].desc.bmAttributes);

    for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
        endpoint =
            (struct usb_endpoint_descriptor *)&iface_desc->
            endpoint[i].desc;
        DEBUG("endpoint %d\n", i);
        DEBUG("bEndpointAddress=%x, bmAttributes=%x\n",
              endpoint->bEndpointAddress, endpoint->bmAttributes);
        if ((endpoint->bEndpointAddress & USB_DIR_IN)
                && ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
                    USB_ENDPOINT_XFER_BULK)) {
            ft1000dev->bulk_in_endpointAddr =
                endpoint->bEndpointAddress;
            DEBUG("ft1000_probe: in: %d\n",
                  endpoint->bEndpointAddress);
        }

        if (!(endpoint->bEndpointAddress & USB_DIR_IN)
                && ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
                    USB_ENDPOINT_XFER_BULK)) {
            ft1000dev->bulk_out_endpointAddr =
                endpoint->bEndpointAddress;
            DEBUG("ft1000_probe: out: %d\n",
                  endpoint->bEndpointAddress);
        }
    }

    DEBUG("bulk_in=%d, bulk_out=%d\n", ft1000dev->bulk_in_endpointAddr,
          ft1000dev->bulk_out_endpointAddr);

    ret = request_firmware(&dsp_fw, "ft3000.img", &dev->dev);
    if (ret < 0) {
        pr_err("Error request_firmware().\n");
        goto err_fw;
    }

    size = max_t(uint, dsp_fw->size, 4096);
    pFileStart = kmalloc(size, GFP_KERNEL);

    if (!pFileStart) {
        release_firmware(dsp_fw);
        ret = -ENOMEM;
        goto err_fw;
    }

    memcpy(pFileStart, dsp_fw->data, dsp_fw->size);
    FileLength = dsp_fw->size;
    release_firmware(dsp_fw);

    DEBUG("ft1000_probe: start downloading dsp image...\n");

    ret = init_ft1000_netdev(ft1000dev);
    if (ret)
        goto err_load;

    pft1000info = netdev_priv(ft1000dev->net);

    DEBUG("In probe: pft1000info=%p\n", pft1000info);
    ret = dsp_reload(ft1000dev);
    if (ret) {
        pr_err("Problem with DSP image loading\n");
        goto err_load;
    }

    gPollingfailed = false;
    ft1000dev->pPollThread =
        kthread_run(ft1000_poll_thread, ft1000dev, "ft1000_poll");

    if (IS_ERR(ft1000dev->pPollThread)) {
        ret = PTR_ERR(ft1000dev->pPollThread);
        goto err_load;
    }

    msleep(500);

    while (!pft1000info->CardReady) {
        if (gPollingfailed) {
            ret = -EIO;
            goto err_thread;
        }
        msleep(100);
        DEBUG("ft1000_probe::Waiting for Card Ready\n");
    }

    DEBUG("ft1000_probe::Card Ready!!!! Registering network device\n");

    ret = reg_ft1000_netdev(ft1000dev, interface);
    if (ret)
        goto err_thread;

    ret = ft1000_init_proc(ft1000dev->net);
    if (ret)
        goto err_proc;

    ft1000dev->NetDevRegDone = 1;

    return 0;

err_proc:
    unregister_netdev(ft1000dev->net);
    free_netdev(ft1000dev->net);
err_thread:
    kthread_stop(ft1000dev->pPollThread);
err_load:
    kfree(pFileStart);
err_fw:
    usb_free_urb(ft1000dev->rx_urb);
    usb_free_urb(ft1000dev->tx_urb);
    kfree(ft1000dev);
    return ret;
}
示例#2
0
static int __init init_thread(void)
{
	printk(KERN_INFO "Creating Thread\n");
	thread_st = kthread_run(thread_fn, NULL, "mythread");
	return 0;
}
static void
__adb_probe_task(struct work_struct *bullshit)
{
	kthread_run(adb_probe_task, NULL, "kadbprobe");
}
示例#4
0
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = *mmc_dev(host)->dma_mask;

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request, lock);
	if (!mq->queue)
		return -ENOMEM;

	mq->queue->queuedata = mq;
	mq->req = NULL;

	blk_queue_prep_rq(mq->queue, mmc_prep_request);

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_hw_segs == 1) {
		unsigned int bouncesz;

		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;

		mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
		if (!mq->bounce_buf) {
			printk(KERN_WARNING "%s: unable to allocate "
				"bounce buffer\n", mmc_card_name(card));
		} else {
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
			blk_queue_max_sectors(mq->queue, bouncesz / 512);
			blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
			blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
			blk_queue_max_segment_size(mq->queue, bouncesz);

			mq->sg = kmalloc(sizeof(struct scatterlist),
				GFP_KERNEL);
			if (!mq->sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->sg, 1);

			mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
				bouncesz / 512, GFP_KERNEL);
			if (!mq->bounce_sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->bounce_sg, bouncesz / 512);
		}
	}
#endif

	if (!mq->bounce_buf) {
		blk_queue_bounce_limit(mq->queue, limit);
		blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
		blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
		blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

		mq->sg = kmalloc(sizeof(struct scatterlist) *
			host->max_phys_segs, GFP_KERNEL);
		if (!mq->sg) {
			ret = -ENOMEM;
			goto cleanup_queue;
		}
		sg_init_table(mq->sg, host->max_phys_segs);
	}

	init_MUTEX(&mq->thread_sem);

	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
		goto free_bounce_sg;
	}

	return 0;
 free_bounce_sg:
 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;
 cleanup_queue:
 	if (mq->sg)
		kfree(mq->sg);
	mq->sg = NULL;
	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;
	blk_cleanup_queue(mq->queue);
	return ret;
}
示例#5
0
/**
 * lbs_add_card - adds the card. It will probe the
 * card, allocate the lbs_priv and initialize the device.
 *
 * @card:	A pointer to card
 * @dmdev:	A pointer to &struct device
 * returns:	A pointer to &struct lbs_private structure
 */
struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
{
	struct net_device *dev;
	struct wireless_dev *wdev;
	struct lbs_private *priv = NULL;

	lbs_deb_enter(LBS_DEB_MAIN);

	/* Allocate an Ethernet device and register it */
	wdev = lbs_cfg_alloc(dmdev);
	if (IS_ERR(wdev)) {
		pr_err("cfg80211 init failed\n");
		goto done;
	}

	wdev->iftype = NL80211_IFTYPE_STATION;
	priv = wdev_priv(wdev);
	priv->wdev = wdev;

	if (lbs_init_adapter(priv)) {
		pr_err("failed to initialize adapter structure\n");
		goto err_wdev;
	}

	dev = alloc_netdev(0, "wlan%d", ether_setup);
	if (!dev) {
		dev_err(dmdev, "no memory for network device instance\n");
		goto err_adapter;
	}

	dev->ieee80211_ptr = wdev;
	dev->ml_priv = priv;
	SET_NETDEV_DEV(dev, dmdev);
	wdev->netdev = dev;
	priv->dev = dev;

	netdev_attach_ops(dev, &lbs_netdev_ops);
	dev->watchdog_timeo = 5 * HZ;
	dev->ethtool_ops = &lbs_ethtool_ops;
	dev->flags |= IFF_BROADCAST | IFF_MULTICAST;

	priv->card = card;

	strcpy(dev->name, "wlan%d");

	lbs_deb_thread("Starting main thread...\n");
	init_waitqueue_head(&priv->waitq);
	priv->main_thread = kthread_run(lbs_thread, dev, "lbs_main");
	if (IS_ERR(priv->main_thread)) {
		lbs_deb_thread("Error creating main thread.\n");
		goto err_ndev;
	}

	priv->work_thread = create_singlethread_workqueue("lbs_worker");
	INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker);

	priv->wol_criteria = EHS_REMOVE_WAKEUP;
	priv->wol_gpio = 0xff;
	priv->wol_gap = 20;
	priv->ehs_remove_supported = true;

	goto done;

 err_ndev:
	free_netdev(dev);

 err_adapter:
	lbs_free_adapter(priv);

 err_wdev:
	lbs_cfg_free(priv);

	priv = NULL;

done:
	lbs_deb_leave_args(LBS_DEB_MAIN, "priv %p", priv);
	return priv;
}
static int __devinit bq2419x_probe(struct i2c_client *client,
				const struct i2c_device_id *id)
{
	struct bq2419x_chip *bq2419x;
	struct bq2419x_platform_data *pdata;
	int ret = 0;

	pdata = client->dev.platform_data;
	if (!pdata) {
		dev_err(&client->dev, "No Platform data");
		return -EINVAL;
	}

	bq2419x = devm_kzalloc(&client->dev, sizeof(*bq2419x), GFP_KERNEL);
	if (!bq2419x) {
		dev_err(&client->dev, "Memory allocation failed\n");
		return -ENOMEM;
	}

	bq2419x->regmap = devm_regmap_init_i2c(client, &bq2419x_regmap_config);
	if (IS_ERR(bq2419x->regmap)) {
		ret = PTR_ERR(bq2419x->regmap);
		dev_err(&client->dev, "regmap init failed with err %d\n", ret);
		return ret;
	}

	bq2419x->dev = &client->dev;
	bq2419x->use_usb = pdata->bcharger_pdata->use_usb;
	bq2419x->use_mains =  pdata->bcharger_pdata->use_mains;
	bq2419x->update_status =  pdata->bcharger_pdata->update_status;
	bq2419x->rtc_alarm_time =  pdata->bcharger_pdata->rtc_alarm_time;
	bq2419x->wdt_time_sec = pdata->bcharger_pdata->wdt_timeout;
	bq2419x->chg_restart_time = pdata->bcharger_pdata->chg_restart_time;
	bq2419x->wdt_refresh_timeout = 25;
	i2c_set_clientdata(client, bq2419x);
	bq2419x->irq = client->irq;
	bq2419x->rtc = alarmtimer_get_rtcdev();
	mutex_init(&bq2419x->mutex);
	mutex_init(&bq2419x->otg_mutex);
	bq2419x->suspended = 0;
	bq2419x->chg_restart_timeout = 0;
	bq2419x->is_otg_connected = 0;

	ret = bq2419x_show_chip_version(bq2419x);
	if (ret < 0) {
		dev_err(&client->dev, "version read failed %d\n", ret);
		return ret;
	}

	ret = bq2419x_charger_init(bq2419x);
	if (ret < 0) {
		dev_err(bq2419x->dev, "Charger init failed: %d\n", ret);
		return ret;
	}

	ret = bq2419x_init_charger_regulator(bq2419x, pdata);
	if (ret < 0) {
		dev_err(&client->dev,
			"Charger regualtor init failed %d\n", ret);
		return ret;
	}

	ret = bq2419x_psy_init(bq2419x);
	if (ret < 0) {
		dev_err(&client->dev,
			"Charger power supply init failed %d\n", ret);
		goto scrub_chg_reg;
	}

	ret = bq2419x_init_vbus_regulator(bq2419x, pdata);
	if (ret < 0) {
		dev_err(&client->dev,
			"VBUS regualtor init failed %d\n", ret);
		goto scrub_psy;
	}

	init_kthread_worker(&bq2419x->bq_kworker);
	bq2419x->bq_kworker_task = kthread_run(kthread_worker_fn,
				&bq2419x->bq_kworker,
				dev_name(bq2419x->dev));
	if (IS_ERR(bq2419x->bq_kworker_task)) {
		ret = PTR_ERR(bq2419x->bq_kworker_task);
		dev_err(&client->dev, "Kworker task creation failed %d\n", ret);
		goto scrub_vbus_reg;
	}

	init_kthread_work(&bq2419x->bq_wdt_work, bq2419x_work_thread);
	sched_setscheduler(bq2419x->bq_kworker_task,
			SCHED_FIFO, &bq2419x_param);
	queue_kthread_work(&bq2419x->bq_kworker, &bq2419x->bq_wdt_work);

	ret = bq2419x_watchdog_init(bq2419x, bq2419x->wdt_time_sec, "PROBE");
	if (ret < 0) {
		dev_err(bq2419x->dev, "BQWDT init failed %d\n", ret);
		return ret;
	}

	INIT_DELAYED_WORK(&bq2419x->otg_reset_work,
			bq2419x_otg_reset_work_handler);

	ret = bq2419x_fault_clear_sts(bq2419x);
	if (ret < 0) {
		dev_err(bq2419x->dev, "fault clear status failed %d\n", ret);
		return ret;
	}

	ret = request_threaded_irq(bq2419x->irq, NULL,
		bq2419x_irq, IRQF_TRIGGER_FALLING,
			dev_name(bq2419x->dev), bq2419x);
	if (ret < 0) {
		dev_err(bq2419x->dev, "request IRQ %d fail, err = %d\n",
				bq2419x->irq, ret);
		goto scrub_kthread;
	}

	/* enable charging */
	ret = bq2419x_charger_enable(bq2419x);
	if (ret < 0)
		goto scrub_irq;

	return 0;
scrub_irq:
	free_irq(bq2419x->irq, bq2419x);
scrub_kthread:
	bq2419x->stop_thread = true;
	flush_kthread_worker(&bq2419x->bq_kworker);
	kthread_stop(bq2419x->bq_kworker_task);
scrub_vbus_reg:
	regulator_unregister(bq2419x->vbus_rdev);
scrub_psy:
	if (bq2419x->use_usb)
		power_supply_unregister(&bq2419x->usb);
	if (bq2419x->use_mains)
		power_supply_unregister(&bq2419x->ac);
scrub_chg_reg:
	regulator_unregister(bq2419x->chg_rdev);
	mutex_destroy(&bq2419x->mutex);
	mutex_destroy(&bq2419x->otg_mutex);
	return ret;
}
static int sdio_irq_thread(void *_host)
{
    struct mmc_host *host = _host;
    struct sched_param param = { .sched_priority = 1 };
    unsigned long period, idle_period;
    int ret;

    sched_setscheduler(current, SCHED_FIFO, &param);

    idle_period = msecs_to_jiffies(10);
    period = (host->caps & MMC_CAP_SDIO_IRQ) ?
             MAX_SCHEDULE_TIMEOUT : idle_period;

    pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n",
             mmc_hostname(host), period);

    do {
        ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
        if (ret)
            break;
        ret = process_sdio_pending_irqs(host);
        host->sdio_irq_pending = false;
        mmc_release_host(host);

        if (ret < 0) {
            set_current_state(TASK_INTERRUPTIBLE);
            if (!kthread_should_stop())
                schedule_timeout(HZ);
            set_current_state(TASK_RUNNING);
        }

        if (!(host->caps & MMC_CAP_SDIO_IRQ)) {
            if (ret > 0)
                period /= 2;
            else {
                period++;
                if (period > idle_period)
                    period = idle_period;
            }
        }

        set_current_state(TASK_INTERRUPTIBLE);
        if (host->caps & MMC_CAP_SDIO_IRQ) {
            mmc_host_clk_hold(host);
            host->ops->enable_sdio_irq(host, 1);
            mmc_host_clk_release(host);
        }
        if (!kthread_should_stop())
            schedule_timeout(period);
        set_current_state(TASK_RUNNING);
    } while (!kthread_should_stop());

    if (host->caps & MMC_CAP_SDIO_IRQ) {
        mmc_host_clk_hold(host);
        host->ops->enable_sdio_irq(host, 0);
        mmc_host_clk_release(host);
    }

    pr_debug("%s: IRQ thread exiting with code %d\n",
             mmc_hostname(host), ret);

    return ret;
}

static int sdio_card_irq_get(struct mmc_card *card)
{
    struct mmc_host *host = card->host;

    WARN_ON(!host->claimed);

    if (!host->sdio_irqs++) {
        atomic_set(&host->sdio_irq_thread_abort, 0);
        host->sdio_irq_thread =
            kthread_run(sdio_irq_thread, host, "ksdioirqd/%s",
                        mmc_hostname(host));
        if (IS_ERR(host->sdio_irq_thread)) {
            int err = PTR_ERR(host->sdio_irq_thread);
            host->sdio_irqs--;
            return err;
        }
        if (host->caps & MMC_CAP_SDIO_IRQ) {
            mmc_host_clk_hold(host);
            host->ops->enable_sdio_irq(host, 1);
            mmc_host_clk_release(host);
        }
    }

    return 0;
}

static int sdio_card_irq_put(struct mmc_card *card)
{
    struct mmc_host *host = card->host;

    WARN_ON(!host->claimed);
    BUG_ON(host->sdio_irqs < 1);

    if (host->sdio_irqs == 1) {
        if (host->caps & MMC_CAP_SDIO_IRQ) {
            mmc_host_clk_hold(host);
            host->ops->enable_sdio_irq(host, 0);
            mmc_host_clk_release(host);
        }
    }

    if (!--host->sdio_irqs) {
        atomic_set(&host->sdio_irq_thread_abort, 1);
        kthread_stop(host->sdio_irq_thread);
    }

    return 0;
}

static void sdio_single_irq_set(struct mmc_card *card)
{
    struct sdio_func *func;
    int i;

    card->sdio_single_irq = NULL;
    if ((card->host->caps & MMC_CAP_SDIO_IRQ) &&
            card->host->sdio_irqs == 1)
        for (i = 0; i < card->sdio_funcs; i++) {
            func = card->sdio_func[i];
            if (func && func->irq_handler) {
                card->sdio_single_irq = func;
                break;
            }
        }
}

int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler)
{
    int ret;
    unsigned char reg;

    BUG_ON(!func);
    BUG_ON(!func->card);

    pr_debug("SDIO: Enabling IRQ for %s...\n", sdio_func_id(func));

    if (func->irq_handler) {
        pr_debug("SDIO: IRQ for %s already in use.\n", sdio_func_id(func));
        return -EBUSY;
    }

    ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg);
    if (ret)
        return ret;

    reg |= 1 << func->num;

    reg |= 1;

    ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL);
    if (ret)
        return ret;

    func->irq_handler = handler;
    ret = sdio_card_irq_get(func->card);
    if (ret)
        func->irq_handler = NULL;
    sdio_single_irq_set(func->card);

    return ret;
}
示例#8
0
int __init stmhdmi_create(int id,
                          dev_t firstdevice,
                          struct stmcore_display_pipeline_data *platform_data)
{
  struct stm_hdmi *hdmi;
  struct i2c_adapter *i2c;
  char *paramstring;

  if(!platform_data)
  {
    printk(KERN_ERR "platform data pointer is NULL\n");
    BUG();
    return -EINVAL;
  }

  i2c = i2c_get_adapter(platform_data->hdmi_i2c_adapter_id);

  if(!i2c)
  {
    printk(KERN_ERR "HDMI i2c bus (%d) not available, check your kernel configuration and board setup\n",platform_data->hdmi_i2c_adapter_id);
    return -EINVAL;
  }

  platform_data->hdmi_data = NULL;

  if((hdmi = stmhdmi_create_hdmi_dev_struct()) == NULL)
    return -ENOMEM;

  DPRINTK("new hdmi structure = %p\n",hdmi);

#ifdef __TDT__
  //Dagobert
  HACK_dev = hdmi;
#endif

  /*
   * Note that we reuse the device handle from the platform data.
   */
  hdmi->device      = platform_data->device;
  hdmi->irq         = -1;
  hdmi->i2c_adapter = i2c;
  hdmi->video_type  = STM_VIDEO_OUT_RGB;

  switch(id)
  {
    case 0:
      paramstring = hdmi0;
      break;
    default:
      paramstring = NULL;
      break;
  }

  if(paramstring)
  {
    if(paramstring[0] == 'd' || paramstring[0] == 'D')
    {
      printk(KERN_WARNING "hdmi%d.0 is initially disabled, use 'stfbset -e hdmi' to enable it\n",id);
      hdmi->disable = 1;
    }
  }

  /*
   * Set the default CEA selection behaviour to use the aspect ratio in the EDID
   */
  hdmi->cea_mode_from_edid = 1;
#ifdef __TDT__
  hdmi->non_strict_edid_semantics = STMHDMIIO_EDID_NON_STRICT_MODE_HANDLING;
#endif

  /*
   * Copy the display runtime pointer for the vsync callback handling.
   */
  hdmi->display_runtime = platform_data->display_runtime;

  /*
   * Note that we are trusting the output identifiers are valid
   * and pointing to correct output types.
   */
  hdmi->main_output = stm_display_get_output(hdmi->device, platform_data->main_output_id);
  hdmi->hdmi_output = stm_display_get_output(hdmi->device, platform_data->hdmi_output_id);

  if(hdmi->main_output == NULL || hdmi->hdmi_output == NULL)
  {
    DPRINTK("Cannot get display outputs main = %d, hdmi = %d\n",platform_data->main_output_id,platform_data->hdmi_output_id);
    stmhdmi_destroy(hdmi);
    return -ENODEV;
  }

  if(stm_display_output_get_capabilities(hdmi->hdmi_output, &hdmi->capabilities)<0)
  {
    DPRINTK("Cannot get hdmi output capabilities\n");
    stmhdmi_destroy(hdmi);
    return -ENODEV;
  }

  if(!(hdmi->capabilities & STM_OUTPUT_CAPS_TMDS))
  {
    printk(KERN_ERR "Provided HDMI output identifier doesn't support TMDS??\n");
    stmhdmi_destroy(hdmi);
    return -ENODEV;
  }

  if(request_irq(platform_data->hdmi_irq, stmhdmi_interrupt, IRQF_DISABLED, "hdmi", hdmi->hdmi_output))
  {
    printk(KERN_ERR "Cannot get HDMI irq = %d\n",platform_data->hdmi_irq);
    stmhdmi_destroy(hdmi);
    return -ENODEV;
  }

  hdmi->irq = platform_data->hdmi_irq;

  if(stmhdmi_create_spd_metadata(hdmi))
  {
    stmhdmi_destroy(hdmi);
    return -ENOMEM;
  }

  /*
   * If we split the HDMI management into another module then we should change
   * the owner field in the callback info to THIS_MODULE. However this is
   * linked into the coredisplay module at the moment we do not want to have
   * another reference to ourselves.
   */
  INIT_LIST_HEAD(&(hdmi->vsync_cb_info.node));
  hdmi->vsync_cb_info.owner   = NULL;
  hdmi->vsync_cb_info.context = hdmi;
  hdmi->vsync_cb_info.cb      = stmhdmi_vsync_cb;
  if(stmcore_register_vsync_callback(hdmi->display_runtime, &hdmi->vsync_cb_info)<0)
  {
    printk(KERN_ERR "Cannot register hdmi vsync callback\n");
    return -ENODEV;
  }

  hdmi->thread = kthread_run(stmhdmi_manager,hdmi,"hdmid/%d",id);
  if (hdmi->thread == NULL)
  {
    printk(KERN_ERR "Cannot start hdmi thread id = %d\n",id);
    stmhdmi_destroy(hdmi);
    return -ENOMEM;
  }

  platform_data->hdmi_data = hdmi;

  if(stmhdmi_register_device(hdmi, id, firstdevice, platform_data))
  {
    stmhdmi_destroy(hdmi);
    return -ENODEV;
  }

  return 0;
}
示例#9
0
static int sdio_irq_thread(void *_host)
{
	struct mmc_host *host = _host;
	struct sched_param param = { .sched_priority = 1 };
	unsigned long period, idle_period;
	int ret;

	sched_setscheduler(current, SCHED_FIFO, &param);

	/*
	 * We want to allow for SDIO cards to work even on non SDIO
	 * aware hosts.  One thing that non SDIO host cannot do is
	 * asynchronous notification of pending SDIO card interrupts
	 * hence we poll for them in that case.
	 */
	idle_period = msecs_to_jiffies(10);
	period = (host->caps & MMC_CAP_SDIO_IRQ) ?
		MAX_SCHEDULE_TIMEOUT : idle_period;

	pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n",
		 mmc_hostname(host), period);

	do {
		/*
		 * We claim the host here on drivers behalf for a couple
		 * reasons:
		 *
		 * 1) it is already needed to retrieve the CCCR_INTx;
		 * 2) we want the driver(s) to clear the IRQ condition ASAP;
		 * 3) we need to control the abort condition locally.
		 *
		 * Just like traditional hard IRQ handlers, we expect SDIO
		 * IRQ handlers to be quick and to the point, so that the
		 * holding of the host lock does not cover too much work
		 * that doesn't require that lock to be held.
		 */
		ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
		if (ret)
			break;
		ret = process_sdio_pending_irqs(host->card);
		mmc_release_host(host);

		/*
		 * Give other threads a chance to run in the presence of
		 * errors.
		 */
		if (ret < 0) {
			set_current_state(TASK_INTERRUPTIBLE);
			if (!kthread_should_stop())
				schedule_timeout(HZ);
			set_current_state(TASK_RUNNING);
		}

		/*
		 * Adaptive polling frequency based on the assumption
		 * that an interrupt will be closely followed by more.
		 * This has a substantial benefit for network devices.
		 */
		if (!(host->caps & MMC_CAP_SDIO_IRQ)) {
			if (ret > 0)
				period /= 2;
			else {
				period++;
				if (period > idle_period)
					period = idle_period;
			}
		}

		set_current_state(TASK_INTERRUPTIBLE);
		if (host->caps & MMC_CAP_SDIO_IRQ)
			host->ops->enable_sdio_irq(host, 1);
		if (!kthread_should_stop())
			schedule_timeout(period);
		set_current_state(TASK_RUNNING);
	} while (!kthread_should_stop());

	if (host->caps & MMC_CAP_SDIO_IRQ)
		host->ops->enable_sdio_irq(host, 0);

	pr_debug("%s: IRQ thread exiting with code %d\n",
		 mmc_hostname(host), ret);

	return ret;
}

static int sdio_card_irq_get(struct mmc_card *card)
{
	struct mmc_host *host = card->host;

	WARN_ON(!host->claimed);

	if (!host->sdio_irqs++) {
		atomic_set(&host->sdio_irq_thread_abort, 0);
		host->sdio_irq_thread =
			kthread_run(sdio_irq_thread, host, "ksdioirqd/%s",
				mmc_hostname(host));
		if (IS_ERR(host->sdio_irq_thread)) {
			int err = PTR_ERR(host->sdio_irq_thread);
			host->sdio_irqs--;
			return err;
		}
	}

	return 0;
}

static int sdio_card_irq_put(struct mmc_card *card)
{
	struct mmc_host *host = card->host;

	WARN_ON(!host->claimed);
	BUG_ON(host->sdio_irqs < 1);

	if (!--host->sdio_irqs) {
		atomic_set(&host->sdio_irq_thread_abort, 1);
		kthread_stop(host->sdio_irq_thread);
	}

	return 0;
}

/* If there is only 1 function registered set sdio_single_irq */
static void sdio_single_irq_set(struct mmc_card *card)
{
	struct sdio_func *func;
	int i;

	card->sdio_single_irq = NULL;
	if ((card->host->caps & MMC_CAP_SDIO_IRQ) &&
	    card->host->sdio_irqs == 1)
		for (i = 0; i < card->sdio_funcs; i++) {
		       func = card->sdio_func[i];
		       if (func && func->irq_handler) {
			       card->sdio_single_irq = func;
			       break;
		       }
	       }
}

/**
 *	sdio_claim_irq - claim the IRQ for a SDIO function
 *	@func: SDIO function
 *	@handler: IRQ handler callback
 *
 *	Claim and activate the IRQ for the given SDIO function. The provided
 *	handler will be called when that IRQ is asserted.  The host is always
 *	claimed already when the handler is called so the handler must not
 *	call sdio_claim_host() nor sdio_release_host().
 */
int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler)
{
	int ret;
	unsigned char reg;

	BUG_ON(!func);
	BUG_ON(!func->card);

	pr_debug("SDIO: Enabling IRQ for %s...\n", sdio_func_id(func));

	if (func->irq_handler) {
		pr_debug("SDIO: IRQ for %s already in use.\n", sdio_func_id(func));
		return -EBUSY;
	}

	ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg);
	if (ret)
		return ret;

	reg |= 1 << func->num;

	reg |= 1; /* Master interrupt enable */

	ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL);
	if (ret)
		return ret;

	func->irq_handler = handler;
	ret = sdio_card_irq_get(func->card);
	if (ret)
		func->irq_handler = NULL;
	sdio_single_irq_set(func->card);

	return ret;
}
void startup_aodv() {
    //aodv_pid = kernel_thread((void *) &aodv, NULL, 0);
    aodv_task = kthread_run((void *) &aodv, NULL, "fbaodv_protocol");
    initialized = 1;
}
示例#11
0
文件: queue.c 项目: Tigrouzen/k1099
/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
static void mmc_request(struct request_queue *q)
{
	struct mmc_queue *mq = q->queuedata;
	struct request *req;
	int ret;
#if 0
	if (!mq) {
#else
    //插着USB线(充电姿态),拔插卡,有偶尔死机现象。出现mq->thread为空的现象;modifyed by xbw
    if (!mq ||!mq->thread) {
#endif	
		printk(KERN_ERR "MMC: killing requests for dead queue\n");
		while ((req = elv_next_request(q)) != NULL) {
			do {
				ret = __blk_end_request(req, -EIO,
							blk_rq_cur_bytes(req));
			} while (ret);
		}
		return;
	}

	if (!mq->req)
		wake_up_process(mq->thread);
}

/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_ANY ; // BLK_BOUNCE_HIGH;
	int ret;

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = *mmc_dev(host)->dma_mask;

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request, lock);
	if (!mq->queue)
		return -ENOMEM;

	mq->queue->queuedata = mq;
	mq->req = NULL;

	blk_queue_prep_rq(mq->queue, mmc_prep_request);

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_hw_segs == 1) {
		unsigned int bouncesz;

		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;

		mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
		if (!mq->bounce_buf) {
			printk(KERN_WARNING "%s: unable to allocate "
				"bounce buffer\n", mmc_card_name(card));
		} else {
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
			blk_queue_max_sectors(mq->queue, bouncesz / 512);
			blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
			blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
			blk_queue_max_segment_size(mq->queue, bouncesz);

			mq->sg = kmalloc(sizeof(struct scatterlist),
				GFP_KERNEL);
			if (!mq->sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->sg, 1);

			mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
				bouncesz / 512, GFP_KERNEL);
			if (!mq->bounce_sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->bounce_sg, bouncesz / 512);
		}
	}
#endif

	if (!mq->bounce_buf) {
		blk_queue_bounce_limit(mq->queue, limit);
		blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
		blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
		blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

		mq->sg = kmalloc(sizeof(struct scatterlist) *
			host->max_phys_segs, GFP_KERNEL);
		if (!mq->sg) {
			ret = -ENOMEM;
			goto cleanup_queue;
		}
		sg_init_table(mq->sg, host->max_phys_segs);
	}

	init_MUTEX(&mq->thread_sem);

	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
		goto free_bounce_sg;
	}

	return 0;
 free_bounce_sg:
 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;
 cleanup_queue:
 	if (mq->sg)
		kfree(mq->sg);
	mq->sg = NULL;
	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;
	blk_cleanup_queue(mq->queue);
	return ret;
}

void mmc_cleanup_queue(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;

	/* Mark that we should start throwing out stragglers */
	spin_lock_irqsave(q->queue_lock, flags);
	q->queuedata = NULL;
	spin_unlock_irqrestore(q->queue_lock, flags);

	/* Make sure the queue isn't suspended, as that will deadlock */
	mmc_queue_resume(mq);

	/* Then terminate our worker thread */
	kthread_stop(mq->thread);

 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;

	kfree(mq->sg);
	mq->sg = NULL;

	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;

	blk_cleanup_queue(mq->queue);

	mq->card = NULL;
}
EXPORT_SYMBOL(mmc_cleanup_queue);

/**
 * mmc_queue_suspend - suspend a MMC request queue
 * @mq: MMC queue to suspend
 *
 * Stop the block request queue, and wait for our thread to
 * complete any outstanding requests.  This ensures that we
 * won't suspend while a request is being processed.
 */
void mmc_queue_suspend(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;

	if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
		mq->flags |= MMC_QUEUE_SUSPENDED;

		spin_lock_irqsave(q->queue_lock, flags);
		blk_stop_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);

		down(&mq->thread_sem);
	}
}

/**
 * mmc_queue_resume - resume a previously suspended MMC request queue
 * @mq: MMC queue to resume
 */
void mmc_queue_resume(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;

	if (mq->flags & MMC_QUEUE_SUSPENDED) {
		mq->flags &= ~MMC_QUEUE_SUSPENDED;

		up(&mq->thread_sem);

		spin_lock_irqsave(q->queue_lock, flags);
		blk_start_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
}

static void copy_sg(struct scatterlist *dst, unsigned int dst_len,
	struct scatterlist *src, unsigned int src_len)
{
	unsigned int chunk;
	char *dst_buf, *src_buf;
	unsigned int dst_size, src_size;

	dst_buf = NULL;
	src_buf = NULL;
	dst_size = 0;
	src_size = 0;

	while (src_len) {
		BUG_ON(dst_len == 0);

		if (dst_size == 0) {
			dst_buf = sg_virt(dst);
			dst_size = dst->length;
		}

		if (src_size == 0) {
			src_buf = sg_virt(src);
			src_size = src->length;
		}

		chunk = min(dst_size, src_size);

		memcpy(dst_buf, src_buf, chunk);

		dst_buf += chunk;
		src_buf += chunk;
		dst_size -= chunk;
		src_size -= chunk;

		if (dst_size == 0) {
			dst++;
			dst_len--;
		}

		if (src_size == 0) {
			src++;
			src_len--;
		}
	}
}
示例#12
0
static void kgdbts_run_tests(void)
{
	char *ptr;
	int fork_test = 0;
	int do_sys_open_test = 0;
	int sstep_test = 1000;
	int nmi_sleep = 0;
	int i;

	ptr = strchr(config, 'F');
	if (ptr)
		fork_test = simple_strtol(ptr + 1, NULL, 10);
	ptr = strchr(config, 'S');
	if (ptr)
		do_sys_open_test = simple_strtol(ptr + 1, NULL, 10);
	ptr = strchr(config, 'N');
	if (ptr)
		nmi_sleep = simple_strtol(ptr+1, NULL, 10);
	ptr = strchr(config, 'I');
	if (ptr)
		sstep_test = simple_strtol(ptr+1, NULL, 10);

	/* All HW break point tests */
	if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) {
		hwbreaks_ok = 1;
		v1printk("kgdbts:RUN hw breakpoint test\n");
		run_breakpoint_test(1);
		v1printk("kgdbts:RUN hw write breakpoint test\n");
		run_hw_break_test(1);
		v1printk("kgdbts:RUN access write breakpoint test\n");
		run_hw_break_test(0);
	}

	/* required internal KGDB tests */
	v1printk("kgdbts:RUN plant and detach test\n");
	run_plant_and_detach_test(0);
	v1printk("kgdbts:RUN sw breakpoint test\n");
	run_breakpoint_test(0);
	v1printk("kgdbts:RUN bad memory access test\n");
	run_bad_read_test();
	v1printk("kgdbts:RUN singlestep test %i iterations\n", sstep_test);
	for (i = 0; i < sstep_test; i++) {
		run_singlestep_break_test();
		if (i % 100 == 0)
			v1printk("kgdbts:RUN singlestep [%i/%i]\n",
				 i, sstep_test);
	}

	/* ===Optional tests=== */

	if (nmi_sleep) {
		v1printk("kgdbts:RUN NMI sleep %i seconds test\n", nmi_sleep);
		run_nmi_sleep_test(nmi_sleep);
	}

	/* If the do_fork test is run it will be the last test that is
	 * executed because a kernel thread will be spawned at the very
	 * end to unregister the debug hooks.
	 */
	if (fork_test) {
		repeat_test = fork_test;
		printk(KERN_INFO "kgdbts:RUN do_fork for %i breakpoints\n",
			repeat_test);
		kthread_run(kgdbts_unreg_thread, NULL, "kgdbts_unreg");
		run_do_fork_test();
		return;
	}

	/* If the sys_open test is run it will be the last test that is
	 * executed because a kernel thread will be spawned at the very
	 * end to unregister the debug hooks.
	 */
	if (do_sys_open_test) {
		repeat_test = do_sys_open_test;
		printk(KERN_INFO "kgdbts:RUN sys_open for %i breakpoints\n",
			repeat_test);
		kthread_run(kgdbts_unreg_thread, NULL, "kgdbts_unreg");
		run_sys_open_test();
		return;
	}
	/* Shutdown and unregister */
	kgdb_unregister_io_module(&kgdbts_io_ops);
	configured = 0;
}
示例#13
0
static int __devinit envctrl_probe(struct platform_device *op)
{
	struct device_node *dp;
	int index, err;

	if (i2c)
		return -EINVAL;

	i2c = of_ioremap(&op->resource[0], 0, 0x2, DRIVER_NAME);
	if (!i2c)
		return -ENOMEM;

	index = 0;
	dp = op->dev.of_node->child;
	while (dp) {
		if (!strcmp(dp->name, "gpio")) {
			i2c_childlist[index].i2ctype = I2C_GPIO;
			envctrl_init_i2c_child(dp, &(i2c_childlist[index++]));
		} else if (!strcmp(dp->name, "adc")) {
			i2c_childlist[index].i2ctype = I2C_ADC;
			envctrl_init_i2c_child(dp, &(i2c_childlist[index++]));
		}

		dp = dp->sibling;
	}

	/* Set device address. */
	writeb(CONTROL_PIN, i2c + PCF8584_CSR);
	writeb(PCF8584_ADDRESS, i2c + PCF8584_DATA);

	/* Set system clock and SCL frequencies. */ 
	writeb(CONTROL_PIN | CONTROL_ES1, i2c + PCF8584_CSR);
	writeb(CLK_4_43 | BUS_CLK_90, i2c + PCF8584_DATA);

	/* Enable serial interface. */
	writeb(CONTROL_PIN | CONTROL_ES0 | CONTROL_ACK, i2c + PCF8584_CSR);
	udelay(200);

	/* Register the device as a minor miscellaneous device. */
	err = misc_register(&envctrl_dev);
	if (err) {
		printk(KERN_ERR PFX "Unable to get misc minor %d\n",
		       envctrl_dev.minor);
		goto out_iounmap;
	}

	/* Note above traversal routine post-incremented 'i' to accommodate 
	 * a next child device, so we decrement before reverse-traversal of
	 * child devices.
	 */
	printk(KERN_INFO PFX "Initialized ");
	for (--index; index >= 0; --index) {
		printk("[%s 0x%lx]%s", 
			(I2C_ADC == i2c_childlist[index].i2ctype) ? "adc" : 
			((I2C_GPIO == i2c_childlist[index].i2ctype) ? "gpio" : "unknown"), 
			i2c_childlist[index].addr, (0 == index) ? "\n" : " ");
	}

	kenvctrld_task = kthread_run(kenvctrld, NULL, "kenvctrld");
	if (IS_ERR(kenvctrld_task)) {
		err = PTR_ERR(kenvctrld_task);
		goto out_deregister;
	}

	return 0;

out_deregister:
	misc_deregister(&envctrl_dev);
out_iounmap:
	of_iounmap(&op->resource[0], i2c, 0x2);
	for (index = 0; index < ENVCTRL_MAX_CPU * 2; index++)
		kfree(i2c_childlist[index].tables);

	return err;
}
示例#14
0
static ssize_t stage1_store(struct kobject *kobj, struct kobj_attribute *attr,
                        const char *buf, size_t count)
{
    char cur_name[DEVNAME_SIZE]="";
    int test_len, cur_len;
    int i, j;
    int id;
    int select;
    struct msdc_host *host;
    //char *p_log;
    //id = 3;
    select = -1;
    sscanf(kobj->name, "%d", &id);

    if (id >= HOST_MAX_NUM) {
		pr_err("[%s] id<%d> is bigger than HOST_MAX_NUM<%d>\n", __func__, id, HOST_MAX_NUM);
        return count;
    }

    host = mtk_msdc_host[id];
    sscanf(attr->attr.name, "%s", cur_name);
    for(i=0; i<TOTAL_STAGE1_NODE_COUNT; i++){
        test_len = strlen(stage1_nodes[i]);
        cur_len = strlen(cur_name);
        if((test_len==cur_len) && (strncmp(stage1_nodes[i], cur_name, cur_len)==0)){
            select = i;
            break;   
        }
    }
    
    switch(select){
        case VOLTAGE:
            sscanf(buf, "%u", &cur_voltage[id]);
            break;
        case PARAMS:
            memset(cur_name, 0, DEVNAME_SIZE);
            cur_name[0] = 1;
            cur_name[1] = E_AUTOK_PARM_MAX;
            memcpy(&cur_name[2], &cur_voltage[id], sizeof(unsigned int));
            store_autok(&p_single_autok[id], cur_name, count);

		pr_debug("[AUTOKD] Enter Store Autok");
		pr_debug("[AUTOKD] p_single_autok[%d].vol_count=%d", id, p_single_autok[id].vol_count);
		pr_debug("[AUTOKD] p_single_autok[%d].param_count=%d", id, p_single_autok[id].param_count);
            for(i=0; i<p_single_autok[id].vol_count; i++){
			pr_debug("[AUTOKD] p_single_autok[%d].vol_list[%d]=%d",
				id, i, p_single_autok[id].vol_list[i]);
            }
            for(i=0; i<p_single_autok[id].vol_count; i++){
                for(j=0; j<p_single_autok[id].param_count; j++)
			pr_debug("[AUTOKD] p_single_autok[%d].ai_data[%d][%d]=%d",
				id, i, j, p_single_autok[id].ai_data[i][j].data.sel);
            }
            //[FIXDONE] Start to do autok alforithm; data is in p_single_autok
#ifdef UT_TEST
            if(is_first_stage1 == 1) {
                // claim host   
                #ifdef CONFIG_SDIOAUTOK_SUPPORT
                //mt_cpufreq_disable(0, true);
                mt_vcore_dvfs_disable_by_sdio(0, true);
                #endif
    #ifdef MTK_SDIO30_ONLINE_TUNING_SUPPORT    
                atomic_set(&host->ot_work.ot_disable, 1);
    #endif  // MTK_SDIO30_ONLINE_TUNING_SUPPORT
                autok_claim_host(host);
                
                is_first_stage1 = 0;
            }
#endif               
#ifdef AUTOK_THREAD
            p_autok_thread_data->host = host;
            p_autok_thread_data->stage = 1;
            p_autok_thread_data->p_autok_predata = &p_single_autok[id];
            p_autok_thread_data->log = autok_log_info;
            task = kthread_run(&autok_thread_func,(void *)(p_autok_thread_data),"autokp");
#endif            
            break;
        case DONE:
            sscanf(buf, "%d", &i);
            p_autok_thread_data->is_autok_done[id] = (u8)i;
            break;
        case LOG:  
          sscanf(buf, "%d", &i);
          if(is_full_log != i){
              is_full_log = i;
              if(i==0){       
                  debugfs_remove(autok_log_entry);
                  //kfree(autok_log_info);
              }else{
                  autok_log_entry = debugfs_create_file("autok_log", 0660, NULL, NULL, &autok_log_fops);
                  i_gid_write(autok_log_entry->d_inode, 1000);
                  autok_log_info = (char*)kzalloc(LOG_SIZE, GFP_KERNEL);
              }
          }
          break;
        default:
            break;        
    }
    return count;
}
示例#15
0
static int __devinit serial_m3110_probe(struct spi_device *spi)
{
	struct uart_max3110 *max;
	void *buffer;
	u16 res;
	int ret = 0;

	max = kzalloc(sizeof(*max), GFP_KERNEL);
	if (!max)
		return -ENOMEM;

	/* Set spi info */
	spi->bits_per_word = 16;
	max->clock = MAX3110_HIGH_CLK;

	spi_setup(spi);

	max->port.type = PORT_MAX3100;
	max->port.fifosize = 2;		/* Only have 16b buffer */
	max->port.ops = &serial_m3110_ops;
	max->port.line = 0;
	max->port.dev = &spi->dev;
	max->port.uartclk = 115200;

	max->spi = spi;
	strcpy(max->name, spi->modalias);
	max->irq = (u16)spi->irq;

	mutex_init(&max->thread_mutex);

	max->word_7bits = 0;
	max->parity = 0;
	max->baud = 0;

	max->cur_conf = 0;
	max->uart_flags = 0;

	/* Check if reading configuration register returns something sane */

	res = RC_TAG;
	ret = max3110_write_then_read(max, (u8 *)&res, (u8 *)&res, 2, 0);
	if (ret < 0 || res == 0 || res == 0xffff) {
		printk(KERN_ERR "MAX3111 deemed not present (conf reg %04x)",
									res);
		ret = -ENODEV;
		goto err_get_page;
	}

	buffer = (void *)__get_free_page(GFP_KERNEL);
	if (!buffer) {
		ret = -ENOMEM;
		goto err_get_page;
	}
	max->con_xmit.buf = buffer;
	max->con_xmit.head = 0;
	max->con_xmit.tail = 0;

	max->main_thread = kthread_run(max3110_main_thread,
					max, "max3110_main");
	if (IS_ERR(max->main_thread)) {
		ret = PTR_ERR(max->main_thread);
		goto err_kthread;
	}

	spi_set_drvdata(spi, max);
	pmax = max;

	/* Give membase a psudo value to pass serial_core's check */
	max->port.membase = (void *)0xff110000;
	uart_add_one_port(&serial_m3110_reg, &max->port);

	return 0;

err_kthread:
	free_page((unsigned long)buffer);
err_get_page:
	kfree(max);
	return ret;
}
int card_init_queue(struct card_queue *cq, struct memory_card *card,
		    spinlock_t * lock)
{
	struct card_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret=0, card_quene_num;
	struct card_queue_list *cq_node_current;
	struct card_queue_list *cq_node_prev = NULL;

	if (host->parent->dma_mask && *host->parent->dma_mask)
		limit = *host->parent->dma_mask;

	cq->card = card;
	cq->queue = blk_init_queue(card_request, lock);
	if (!cq->queue)
		return -ENOMEM;

	blk_queue_prep_rq(cq->queue, card_prep_request);
	card_init_bounce_buf(cq, card);
	
	if(!cq->bounce_buf){
		blk_queue_bounce_limit(cq->queue, limit);
		blk_queue_max_hw_sectors(cq->queue, host->max_sectors);
		//blk_queue_max_hw_phys_segments(cq->queue, host->max_phys_segs);
		blk_queue_max_segments(cq->queue, host->max_hw_segs);
		blk_queue_max_segment_size(cq->queue, host->max_seg_size);

		cq->queue->queuedata = cq;
		cq->req = NULL;

		cq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs, GFP_KERNEL);
		if (!cq->sg) {
			ret = -ENOMEM;
			blk_cleanup_queue(cq->queue);
			return ret;
		}
	}

	if (card_queue_head == NULL)
	{
		card_queue_head = kmalloc(sizeof(struct card_queue_list), GFP_KERNEL);
		if (card_queue_head == NULL) 
		{
			ret = -ENOMEM;
			kfree(card_queue_head);
			card_queue_head = NULL;
			return ret;
		}
		card_queue_head->cq = cq;
		card_queue_head->cq_num = 0;
		card_queue_head->cq_flag = 0;
		card_queue_head->cq_next = NULL;

		init_completion(&card_thread_complete);
		init_waitqueue_head(&card_thread_wq);
		init_MUTEX(&card_thread_sem);
		host->queue_task = kthread_run(card_queue_thread, cq, "card_queue");
		if (host->queue_task)
		{
			wait_for_completion(&card_thread_complete);
			init_completion(&card_thread_complete);
			ret = 0;
			return ret;
		}
	} 
	else
	{
		card_quene_num = 0;
		cq_node_current = card_queue_head;
		do
		{
			card_quene_num = cq_node_current->cq_num;
			cq_node_prev = cq_node_current;
			cq_node_current = cq_node_current->cq_next;
		} while (cq_node_current != NULL);

		cq_node_current = kmalloc(sizeof(struct card_queue_list), GFP_KERNEL);
		if (cq_node_current == NULL)
		{
			ret = -ENOMEM;
			kfree(cq_node_current);
			cq_node_current = NULL;
			return ret;
		}
		cq_node_prev->cq_next = cq_node_current;
		cq_node_current->cq = cq;
		cq_node_current->cq_next = NULL;
		cq_node_current->cq_num = (++card_quene_num);
		cq_node_current->cq_flag = 0;

		ret = 0;
		return ret;
	}

	return ret;
}
/**
 * @brief Start drivers event monitoring thread.
 *
 *****************************************************************************/
void StartEventThread(void)
{
	gDriverContext.pTaskStruct = kthread_run(EventThread,
											 &gDriverContext,
											 MHL_DRIVER_NAME);
}
int ssp_sensorhub_initialize(struct ssp_data *ssp_data)
{
	struct ssp_sensorhub_data *hub_data;
	int ret;

	/* allocate memory for sensorhub data */
	hub_data = kzalloc(sizeof(*hub_data), GFP_KERNEL);
	if (!hub_data) {
		sensorhub_err("allocate memory for sensorhub data err");
		ret = -ENOMEM;
		goto exit;
	}
	hub_data->ssp_data = ssp_data;
	ssp_data->hub_data = hub_data;

	/* init wakelock, list, waitqueue, completion and spinlock */
	wake_lock_init(&hub_data->sensorhub_wake_lock, WAKE_LOCK_SUSPEND,
			"ssp_sensorhub_wake_lock");
	init_waitqueue_head(&hub_data->sensorhub_wq);
	init_completion(&hub_data->read_done);
	init_completion(&hub_data->big_read_done);
	init_completion(&hub_data->big_write_done);
	spin_lock_init(&hub_data->sensorhub_lock);

	/* allocate sensorhub input device */
	hub_data->sensorhub_input_dev = input_allocate_device();
	if (!hub_data->sensorhub_input_dev) {
		sensorhub_err("allocate sensorhub input device err");
		ret = -ENOMEM;
		goto err_input_allocate_device_sensorhub;
	}

	/* set sensorhub input device */
	input_set_drvdata(hub_data->sensorhub_input_dev, hub_data);
	hub_data->sensorhub_input_dev->name = "ssp_context";
	input_set_capability(hub_data->sensorhub_input_dev, EV_REL, DATA);
	input_set_capability(hub_data->sensorhub_input_dev, EV_REL, BIG_DATA);
	input_set_capability(hub_data->sensorhub_input_dev, EV_REL, NOTICE);

	/* register sensorhub input device */
	ret = input_register_device(hub_data->sensorhub_input_dev);
	if (ret < 0) {
		sensorhub_err("register sensorhub input device err(%d)", ret);
		input_free_device(hub_data->sensorhub_input_dev);
		goto err_input_register_device_sensorhub;
	}

	/* register sensorhub misc device */
	hub_data->sensorhub_device.minor = MISC_DYNAMIC_MINOR;
	hub_data->sensorhub_device.name = "ssp_sensorhub";
	hub_data->sensorhub_device.fops = &ssp_sensorhub_fops;

	ret = misc_register(&hub_data->sensorhub_device);
	if (ret < 0) {
		sensorhub_err("register sensorhub misc device err(%d)", ret);
		goto err_misc_register;
	}

	/* allocate fifo */
	ret = kfifo_alloc(&hub_data->fifo,
		sizeof(void *) * LIST_SIZE, GFP_KERNEL);
	if (ret) {
		sensorhub_err("kfifo allocate err(%d)", ret);
		goto err_kfifo_alloc;
	}

	/* create and run sensorhub thread */
	hub_data->sensorhub_task = kthread_run(ssp_sensorhub_thread,
				(void *)hub_data, "ssp_sensorhub_thread");
	if (IS_ERR(hub_data->sensorhub_task)) {
		ret = PTR_ERR(hub_data->sensorhub_task);
		goto err_kthread_run;
	}

	return 0;

err_kthread_run:
	kfifo_free(&hub_data->fifo);
err_kfifo_alloc:
	misc_deregister(&hub_data->sensorhub_device);
err_misc_register:
	input_unregister_device(hub_data->sensorhub_input_dev);
err_input_register_device_sensorhub:
err_input_allocate_device_sensorhub:
	complete_all(&hub_data->big_write_done);
	complete_all(&hub_data->big_read_done);
	complete_all(&hub_data->read_done);
	wake_lock_destroy(&hub_data->sensorhub_wake_lock);
	kfree(hub_data);
exit:
	return ret;
}
示例#19
0
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
		   spinlock_t *lock, const char *subname)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;
	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = *mmc_dev(host)->dma_mask;

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request_fn, lock);
	if (!mq->queue)
		return -ENOMEM;

	memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
	memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
	mq->mqrq_cur = mqrq_cur;
	mq->mqrq_prev = mqrq_prev;
	mq->queue->queuedata = mq;

	blk_queue_prep_rq(mq->queue, mmc_prep_request);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
	if (mmc_can_erase(card))
		mmc_queue_setup_discard(mq->queue, card);

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_segs == 1) {
		unsigned int bouncesz;

		if(mmc_card_sd(card)) {
			bouncesz = SD_QUEUE_BOUNCESZ;

			if (bouncesz > host->max_req_size)
				bouncesz = host->max_req_size;
			if (bouncesz > host->max_seg_size)
				bouncesz = host->max_seg_size;
			if (bouncesz > (host->max_blk_count * 512))
				bouncesz = host->max_blk_count * 512;

			sd_buffer_pre_alloc();
			mqrq_cur->bounce_buf = (char *)sd_bounce_buffer_cur;
			mqrq_prev->bounce_buf = (char *)sd_bounce_buffer_prev;
		} else {
			bouncesz = MMC_QUEUE_BOUNCESZ;

			if (bouncesz > host->max_req_size)
				bouncesz = host->max_req_size;
			if (bouncesz > host->max_seg_size)
				bouncesz = host->max_seg_size;
			if (bouncesz > (host->max_blk_count * 512))
				bouncesz = host->max_blk_count * 512;

			if (bouncesz > 512) {
				mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
				if (!mqrq_cur->bounce_buf) {
					pr_warning("%s: unable to "
						"allocate bounce cur buffer\n",
						mmc_card_name(card));
				}
				mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
				if (!mqrq_prev->bounce_buf) {
					pr_warning("%s: unable to "
						"allocate bounce prev buffer\n",
						mmc_card_name(card));
					kfree(mqrq_cur->bounce_buf);
					mqrq_cur->bounce_buf = NULL;
				}
			}
		}

		if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
			blk_queue_max_segments(mq->queue, bouncesz / 512);
			blk_queue_max_segment_size(mq->queue, bouncesz);

			mqrq_cur->sg = mmc_alloc_sg(1, &ret);
			if (ret)
				goto cleanup_queue;

			mqrq_cur->bounce_sg =
				mmc_alloc_sg(bouncesz / 512, &ret);
			if (ret)
				goto cleanup_queue;

			mqrq_prev->sg = mmc_alloc_sg(1, &ret);
			if (ret)
				goto cleanup_queue;

			mqrq_prev->bounce_sg =
				mmc_alloc_sg(bouncesz / 512, &ret);
			if (ret)
				goto cleanup_queue;
		}
	}
#endif

	if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
		blk_queue_bounce_limit(mq->queue, limit);
		blk_queue_max_hw_sectors(mq->queue,
			min(host->max_blk_count, host->max_req_size / 512));
		blk_queue_max_segments(mq->queue, host->max_segs);
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

		mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
		if (ret)
			goto cleanup_queue;


		mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
		if (ret)
			goto cleanup_queue;
	}

	sema_init(&mq->thread_sem, 1);

	if(mmc_card_sd(card))
		mq->thread = kthread_run(sd_queue_thread, mq, "sd-qd");
	else
	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
		host->index, subname ? subname : "");

	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
		goto free_bounce_sg;
	}

	return 0;
 free_bounce_sg:
	kfree(mqrq_cur->bounce_sg);
	mqrq_cur->bounce_sg = NULL;
	kfree(mqrq_prev->bounce_sg);
	mqrq_prev->bounce_sg = NULL;

 cleanup_queue:
	kfree(mqrq_cur->sg);
	mqrq_cur->sg = NULL;
	if(!mmc_card_sd(card)) {
		kfree(mqrq_cur->bounce_buf);
	}
	mqrq_cur->bounce_buf = NULL;
	kfree(mqrq_prev->sg);
	mqrq_prev->sg = NULL;
	if(!mmc_card_sd(card)) {
		kfree(mqrq_prev->bounce_buf);
	}
	mqrq_prev->bounce_buf = NULL;
	blk_cleanup_queue(mq->queue);
	return ret;
}
示例#20
0
文件: QCUSBNet.c 项目: Tatsh/cr48
/*===========================================================================
METHOD:
   QCUSBNetOpen (Public Method)

DESCRIPTION:
   Wrapper to usbnet_open, correctly handling autosuspend
   Start AutoPM thread

PARAMETERS
   pNet     [ I ] - Pointer to net device

RETURN VALUE:
   int - 0 for success
         Negative errno for error
===========================================================================*/
int QCUSBNetOpen( struct net_device * pNet )
{
   int status = 0;
   struct sQCUSBNet * pQCDev;
   struct usbnet * pDev = netdev_priv( pNet );
   
   if (pDev == NULL)
   {
      DBG( "failed to get usbnet device\n" );
      return -ENXIO;
   }
   
   pQCDev = (sQCUSBNet *)pDev->data[0];
   if (pQCDev == NULL)
   {
      DBG( "failed to get QMIDevice\n" );
      return -ENXIO;
   }

   DBG( "\n" );

   // Start the AutoPM thread
   pQCDev->mAutoPM.mpIntf = pQCDev->mpIntf;
   pQCDev->mAutoPM.mbExit = false;
   pQCDev->mAutoPM.mpURBList = NULL;
   pQCDev->mAutoPM.mpActiveURB = NULL;
   spin_lock_init( &pQCDev->mAutoPM.mURBListLock );
   spin_lock_init( &pQCDev->mAutoPM.mActiveURBLock );
   init_completion( &pQCDev->mAutoPM.mThreadDoWork );
   
   pQCDev->mAutoPM.mpThread = kthread_run( QCUSBNetAutoPMThread, 
                              &pQCDev->mAutoPM, 
                              "QCUSBNetAutoPMThread" );
   if (IS_ERR( pQCDev->mAutoPM.mpThread ))
   {
      DBG( "AutoPM thread creation error\n" );
      return PTR_ERR( pQCDev->mAutoPM.mpThread );
   }

   // Allow traffic
   QClearDownReason( pQCDev, NET_IFACE_STOPPED );

   // Pass to usbnet_open if defined
   if (pQCDev->mpUSBNetOpen != NULL)
   {
      status = pQCDev->mpUSBNetOpen( pNet );
   
      // If usbnet_open was successful enable Auto PM
      if (status == 0)
      {
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,33 ))
         usb_autopm_enable( pQCDev->mpIntf );
#else
         usb_autopm_put_interface( pQCDev->mpIntf );
#endif
      }
   }
   else
   {
      DBG( "no USBNetOpen defined\n" );
   }
   
   return status;
}
示例#21
0
int twl6030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end,
			unsigned long features)
{

	int	status = 0;
	int	i;
	int ret;
	u8 mask[4];

	static struct irq_chip	twl6030_irq_chip;

	if (features & TWL6032_SUBCLASS)
		twl6030_interrupt_mapping = twl6032_interrupt_mapping_table;

	mask[1] = 0xFF;
	mask[2] = 0xFF;
	mask[3] = 0xFF;
	ret = twl_i2c_write(TWL_MODULE_PIH, &mask[0],
			REG_INT_MSK_LINE_A, 3); /* MASK ALL INT LINES */
	ret = twl_i2c_write(TWL_MODULE_PIH, &mask[0],
			REG_INT_MSK_STS_A, 3); /* MASK ALL INT STS */
	ret = twl_i2c_write(TWL_MODULE_PIH, &mask[0],
			REG_INT_STS_A, 3); /* clear INT_STS_A,B,C */

	twl6030_irq_base = irq_base;
	twl6030_irq_end = irq_end;

	/* install an irq handler for each of the modules;
	 * clone dummy irq_chip since PIH can't *do* anything
	 */
	twl6030_irq_chip = dummy_irq_chip;
	twl6030_irq_chip.name = "twl6030";
	twl6030_irq_chip.irq_set_type = NULL;
	twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;

	for (i = irq_base; i < irq_end; i++) {
		irq_set_chip_and_handler(i, &twl6030_irq_chip,
					 handle_simple_irq);
		irq_set_chip_data(i, (void *)irq_num);
		activate_irq(i);
	}

	twl6030_irq_next = i;
	pr_info("twl6030: %s (irq %d) chaining IRQs %d..%d\n", "PIH",
			irq_num, irq_base, twl6030_irq_next - 1);

	/* install an irq handler to demultiplex the TWL6030 interrupt */
	init_completion(&irq_event);
	task = kthread_run(twl6030_irq_thread, (void *)irq_num, "twl6030-irq");
	if (IS_ERR(task)) {
		pr_err("twl6030: could not create irq %d thread!\n", irq_num);
		status = PTR_ERR(task);
		goto fail_kthread;
	}

	status = request_irq(irq_num, handle_twl6030_pih, IRQF_DISABLED,
				"TWL6030-PIH", &irq_event);
	if (status < 0) {
		pr_err("twl6030: could not claim irq%d: %d\n", irq_num, status);
		goto fail_irq;
	}

	twl_irq = irq_num;
	register_pm_notifier(&twl6030_irq_pm_notifier_block);

	status = twl6030_vlow_init(twl6030_irq_base + TWL_VLOW_INTR_OFFSET);
	if (status < 0)
		goto fail_vlow;
	
	twl_i2c_write_u8(TWL_MODULE_PIH, 0x04,REG_INT_MSK_STS_A);   //close vlow interrupt

	return status;

fail_vlow:
	free_irq(irq_num, &irq_event);

fail_irq:
	kthread_stop(task);

fail_kthread:
	for (i = irq_base; i < irq_end; i++)
		irq_set_chip_and_handler(i, NULL, NULL);
	return status;
}
static int tpd_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) {
    int err = 0;
    char buffer[2];
    int status=0;
    i2c_client = client;
    TPD_DMESG("[mtk-tpd], cy8ctma300 tpd_i2c_probe ++++\n");
#ifdef TPD_NO_GPIO
    u32 temp;

    temp = *(volatile u32 *) TPD_GPIO_GPO_ADDR;
    //temp = temp | 0x40;
    temp = temp |(1<<16) ;
    temp = temp |(1<<17);
    //temp = (temp | 1<<16);
    //mt65xx_reg_sync_write(TPD_GPIO_GPO_ADDR, temp);
    //*(volatile u32 *) TPD_GPIO_GPO_ADDR = temp;
    printk("TPD_GPIO_GPO_ADDR:0x%x\n", *(volatile u32 *) TPD_GPIO_GPO_ADDR);

    temp = *(volatile u32 *) TPD_GPIO_OE_ADDR;
    //temp = temp | 0x40;
    temp = temp |(1<<16) ;
    temp = temp |(1<<17);
    //temp = (temp | 1<<16) ;
    //mt65xx_reg_sync_write(TPD_GPIO_OE_ADDR, temp);
    // *(volatile u32 *) TPD_GPIO_OE_ADDR = temp;
    printk("TPD_GPIO_OE_ADDR:0x%x\n", *(volatile u32 *) TPD_GPIO_OE_ADDR);
#endif

#ifndef TPD_NO_GPIO
    mt_set_gpio_mode(GPIO_CTP_RST_PIN, GPIO_CTP_RST_PIN_M_GPIO);
    mt_set_gpio_dir(GPIO_CTP_RST_PIN, GPIO_DIR_OUT);
    mt_set_gpio_out(GPIO_CTP_RST_PIN, GPIO_OUT_ZERO);
    msleep(10);
    mt_set_gpio_mode(GPIO_CTP_EN_PIN, GPIO_CTP_EN_PIN_M_GPIO);
    mt_set_gpio_dir(GPIO_CTP_EN_PIN, GPIO_DIR_OUT);
    mt_set_gpio_out(GPIO_CTP_EN_PIN, GPIO_OUT_ONE);

//   mt_set_gpio_mode(GPIO_CTP_RST_PIN, GPIO_CTP_RST_PIN_M_GPIO);
//   mt_set_gpio_dir(GPIO_CTP_RST_PIN, GPIO_DIR_OUT);
//   mt_set_gpio_out(GPIO_CTP_RST_PIN, GPIO_OUT_ONE);
//   msleep(10);
    mt_set_gpio_out(GPIO_CTP_RST_PIN, GPIO_OUT_ZERO);
    msleep(1);
    mt_set_gpio_out(GPIO_CTP_RST_PIN, GPIO_OUT_ONE);

    mt_set_gpio_mode(GPIO_CTP_EINT_PIN, GPIO_CTP_EINT_PIN_M_EINT);
    mt_set_gpio_dir(GPIO_CTP_EINT_PIN, GPIO_DIR_IN);
    mt_set_gpio_pull_enable(GPIO_CTP_EINT_PIN, GPIO_PULL_ENABLE);
    mt_set_gpio_pull_select(GPIO_CTP_EINT_PIN, GPIO_PULL_UP);
#endif

    msleep(50);
    // buffer[0]=0x00;
    // i2c_client->ext_flag = I2C_WR_FLAG;
    // status=i2c_master_send(i2c_client ,buffer, 0x101);
    TPD_DMESG("1...........\n");
    status = i2c_smbus_read_i2c_block_data(i2c_client, 0x00, 1, &(buffer[0]));
    if(status<0)
    {
        TPD_DMESG("fwq read error\n");
        TPD_DMESG("[mtk-tpd], cy8ctma300 tpd_i2c_probe failed!!\n");
        status = i2c_smbus_read_i2c_block_data(i2c_client, 0x00, 1, &(buffer[0]));
        if(status<0) {
            TPD_DMESG("[mtk-tpd], cy8ctma300 tpd_i2c_probe retry failed!!\n");
            return status;
        }
    }
    TPD_DMESG("fwq buffer=%x \n",buffer[0]);

    TPD_DMESG("[mtk-tpd], cy8ctma300 tpd_i2c_probe success!!\n");
    tpd_load_status = 1;

    if ((buffer[0] & 0x70) != 0x00)
    {
        buffer[0] = 0x00; // switch to operation mode

        i2c_smbus_write_i2c_block_data(i2c_client, 0x00, 1, &(buffer[0]));
        if(status < 0)
        {
            TPD_DMESG("fwq write error\n");
        }
        msleep(50);
    }

    thread = kthread_run(touch_event_handler, 0, TPD_DEVICE);
    if (IS_ERR(thread)) {
        err = PTR_ERR(thread);
        TPD_DMESG(TPD_DEVICE " failed to create kernel thread: %d\n", err);
    }
#ifndef POLL_MODE //mt6575t fpga debug 0: enable polling mode
    mt65xx_eint_set_sens(CUST_EINT_TOUCH_PANEL_NUM, CUST_EINT_TOUCH_PANEL_SENSITIVE);
    mt65xx_eint_set_hw_debounce(CUST_EINT_TOUCH_PANEL_NUM, CUST_EINT_TOUCH_PANEL_DEBOUNCE_CN);
    mt65xx_eint_registration(CUST_EINT_TOUCH_PANEL_NUM, CUST_EINT_TOUCH_PANEL_DEBOUNCE_EN, CUST_EINT_TOUCH_PANEL_POLARITY, tpd_eint_interrupt_handler, 1);
    mt65xx_eint_unmask(CUST_EINT_TOUCH_PANEL_NUM);
    TPD_DMESG("EINT num=%d\n",CUST_EINT_TOUCH_PANEL_NUM);
#endif
    TPD_DMESG("[mtk-tpd], cy8ctma300 tpd_i2c_probe ----\n");

    return 0;
}
static int trace_wakeup_test_thread(void *data)
{
	/*                                                    */
	static const struct sched_param param = { .sched_priority = 5 };
	struct completion *x = data;

	sched_setscheduler(current, SCHED_FIFO, &param);

	/*                                 */
	complete(x);

	/*                                             */
	set_current_state(TASK_INTERRUPTIBLE);
	schedule();

	/*                                     */
	while (!kthread_should_stop()) {
		/*
                                               
                
   */
		msleep(100);
	}

	return 0;
}

int
trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
{
	unsigned long save_max = tracing_max_latency;
	struct task_struct *p;
	struct completion isrt;
	unsigned long count;
	int ret;

	init_completion(&isrt);

	/*                           */
	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
	if (IS_ERR(p)) {
		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
		return -1;
	}

	/*                                               */
	wait_for_completion(&isrt);

	/*                   */
	ret = tracer_init(trace, tr);
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		return ret;
	}

	/*                       */
	tracing_max_latency = 0;

	/*                                      */
	msleep(100);

	/*
                                                           
                                                         
                                                       
                                                       
                                                            
                                                       
                                                            
                          
  */

	wake_up_process(p);

	/*                                              */
	msleep(100);

	/*                   */
	tracing_stop();
	/*                          */
	ret = trace_test_buffer(tr, NULL);
	if (!ret)
		ret = trace_test_buffer(&max_tr, &count);


	trace->reset(tr);
	tracing_start();

	tracing_max_latency = save_max;

	/*                 */
	kthread_stop(p);

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
	}

	return ret;
}
示例#24
0
static int tpd_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) {             
    int err = 0, ret = -1;
    u8 cmdbuf[MAX_I2C_LEN]={0x03, 0x03, 0x0A, 0x01, 0x41, 0, 0, 0, 0, 0};
    
    
    #ifdef TPD_NO_GPIO
    u16 temp;
    temp = *(volatile u16 *) TPD_RESET_PIN_ADDR;
    temp = temp | 0x40;
    *(volatile u16 *) TPD_RESET_PIN_ADDR = temp;
    #endif
    i2c_client = client;
    
    printk("MediaTek touch panel i2c probe\n");
    
    #ifndef TPD_NO_GPIO 

    mt_set_gpio_mode(GPIO_CTP_RST_PIN, GPIO_CTP_RST_PIN_M_GPIO);
    mt_set_gpio_dir(GPIO_CTP_RST_PIN, GPIO_DIR_OUT);
    mt_set_gpio_out(GPIO_CTP_RST_PIN, GPIO_OUT_ZERO);  
    msleep(10);  
    mt_set_gpio_out(GPIO_CTP_RST_PIN, GPIO_OUT_ONE);
   
    mt_set_gpio_mode(GPIO_CTP_EINT_PIN, GPIO_CTP_EINT_PIN_M_EINT);
    mt_set_gpio_dir(GPIO_CTP_EINT_PIN, GPIO_DIR_IN);
    mt_set_gpio_pull_enable(GPIO_CTP_EINT_PIN, GPIO_PULL_ENABLE);
    mt_set_gpio_pull_select(GPIO_CTP_EINT_PIN, GPIO_PULL_UP);
    #endif 

    msleep(50);
    
    I2CDMABuf_va = (u8 *)dma_alloc_coherent(NULL, 4096, &I2CDMABuf_pa, GFP_KERNEL);
    if(!I2CDMABuf_va)
    {
	printk("Allocate Touch DMA I2C Buffer failed!\n");
	return -1;
    }

    i2c_client->addr = i2c_client->addr & I2C_MASK_FLAG | I2C_DMA_FLAG | I2C_ENEXT_FLAG;
    ret = tpd_i2c_write(i2c_client, cmdbuf, 10);
    i2c_client->addr = i2c_client->addr & I2C_MASK_FLAG;
    if (ret != sizeof(cmdbuf))
    {
        TPD_DEBUG("[mtk-tpd] i2c write communcate error: 0x%x\n", ret);
        return -1;
    }
	
    thread = kthread_run(touch_event_handler, 0, TPD_DEVICE);
    if (IS_ERR(thread)) { 
        err = PTR_ERR(thread);
        TPD_DMESG(TPD_DEVICE " failed to create kernel thread: %d\n", err);
    }
    
    mt65xx_eint_set_sens(CUST_EINT_TOUCH_PANEL_NUM, CUST_EINT_TOUCH_PANEL_SENSITIVE);
    mt65xx_eint_set_hw_debounce(CUST_EINT_TOUCH_PANEL_NUM, CUST_EINT_TOUCH_PANEL_DEBOUNCE_CN);
    mt65xx_eint_registration(CUST_EINT_TOUCH_PANEL_NUM, CUST_EINT_TOUCH_PANEL_DEBOUNCE_EN, CUST_EINT_TOUCH_PANEL_POLARITY, tpd_eint_interrupt_handler, 1);
    mt65xx_eint_unmask(CUST_EINT_TOUCH_PANEL_NUM);
    
    printk("MediaTek touch panel i2c probe success\n");
    
    return 0;
}
示例#25
0
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 * @subname: partition subname
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
                   spinlock_t *lock, const char *subname)
{
    struct mmc_host *host = card->host;
    u64 limit = BLK_BOUNCE_HIGH;
    int ret;

    if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
        limit = *mmc_dev(host)->dma_mask;

    mq->card = card;
    mq->queue = blk_init_queue(mmc_request, lock);
    if (!mq->queue)
        return -ENOMEM;

    mq->queue->queuedata = mq;
    mq->req = NULL;

    blk_queue_prep_rq(mq->queue, mmc_prep_request);
    queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
    if (mmc_can_erase(card)) {
        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
        mq->queue->limits.max_discard_sectors = UINT_MAX;
        if (card->erased_byte == 0)
            mq->queue->limits.discard_zeroes_data = 1;
        mq->queue->limits.discard_granularity = card->pref_erase << 9;
        if (mmc_can_secure_erase_trim(card))
            queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
                                    mq->queue);
    }

#ifdef CONFIG_MMC_BLOCK_BOUNCE
    if (host->max_segs == 1) {
        unsigned int bouncesz;

        bouncesz = MMC_QUEUE_BOUNCESZ;

        if (bouncesz > host->max_req_size)
            bouncesz = host->max_req_size;
        if (bouncesz > host->max_seg_size)
            bouncesz = host->max_seg_size;
        if (bouncesz > (host->max_blk_count * 512))
            bouncesz = host->max_blk_count * 512;

        if (bouncesz > 512) {
            mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
            if (!mq->bounce_buf) {
                printk(KERN_WARNING "%s: unable to "
                       "allocate bounce buffer\n",
                       mmc_card_name(card));
            }
        }

        if (mq->bounce_buf) {
            blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
            blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
            blk_queue_max_segments(mq->queue, bouncesz / 512);
            blk_queue_max_segment_size(mq->queue, bouncesz);

            mq->sg = kmalloc(sizeof(struct scatterlist),
                             GFP_KERNEL);
            if (!mq->sg) {
                ret = -ENOMEM;
                goto cleanup_queue;
            }
            sg_init_table(mq->sg, 1);

            mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
                                    bouncesz / 512, GFP_KERNEL);
            if (!mq->bounce_sg) {
                ret = -ENOMEM;
                goto cleanup_queue;
            }
            sg_init_table(mq->bounce_sg, bouncesz / 512);
        }
    }
#endif

    if (!mq->bounce_buf) {
        blk_queue_bounce_limit(mq->queue, limit);
        blk_queue_max_hw_sectors(mq->queue,
                                 min(host->max_blk_count, host->max_req_size / 512));
        blk_queue_max_segments(mq->queue, host->max_segs);
        blk_queue_max_segment_size(mq->queue, host->max_seg_size);

        mq->sg = kmalloc(sizeof(struct scatterlist) *
                         host->max_segs, GFP_KERNEL);
        if (!mq->sg) {
            ret = -ENOMEM;
            goto cleanup_queue;
        }
        sg_init_table(mq->sg, host->max_segs);
    }

    sema_init(&mq->thread_sem, 1);

    mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
                             host->index, subname ? subname : "");

    if (IS_ERR(mq->thread)) {
        ret = PTR_ERR(mq->thread);
        goto free_bounce_sg;
    }

    return 0;
free_bounce_sg:
    if (mq->bounce_sg)
        kfree(mq->bounce_sg);
    mq->bounce_sg = NULL;
cleanup_queue:
    if (mq->sg)
        kfree(mq->sg);
    mq->sg = NULL;
    if (mq->bounce_buf)
        kfree(mq->bounce_buf);
    mq->bounce_buf = NULL;
    blk_cleanup_queue(mq->queue);
    return ret;
}
static int ucb1400_ts_thread(void *_ucb)
{
	struct ucb1400 *ucb = _ucb;
	struct task_struct *tsk = current;
	int valid = 0;
	struct sched_param param = { .sched_priority = 1 };

	sched_setscheduler(tsk, SCHED_FIFO, &param);

	set_freezable();
	while (!kthread_should_stop()) {
		unsigned int x, y, p;
		long timeout;

		ucb->ts_restart = 0;

		if (ucb->irq_pending) {
			ucb->irq_pending = 0;
			ucb1400_handle_pending_irq(ucb);
		}

		ucb1400_adc_enable(ucb);
		x = ucb1400_ts_read_xpos(ucb);
		y = ucb1400_ts_read_ypos(ucb);
		p = ucb1400_ts_read_pressure(ucb);
		ucb1400_adc_disable(ucb);

		/* Switch back to interrupt mode. */
		ucb1400_ts_mode_int(ucb);

		msleep(10);

		if (ucb1400_ts_pen_down(ucb)) {
			ucb1400_ts_irq_enable(ucb);

			/*
			 * If we spat out a valid sample set last time,
			 * spit out a "pen off" sample here.
			 */
			if (valid) {
				ucb1400_ts_event_release(ucb->ts_idev);
				valid = 0;
			}

			timeout = MAX_SCHEDULE_TIMEOUT;
		} else {
			valid = 1;
			ucb1400_ts_evt_add(ucb->ts_idev, p, x, y);
			timeout = msecs_to_jiffies(10);
		}

		wait_event_freezable_timeout(ucb->ts_wait,
			ucb->irq_pending || ucb->ts_restart || kthread_should_stop(),
			timeout);
	}

	/* Send the "pen off" if we are stopping with the pen still active */
	if (valid)
		ucb1400_ts_event_release(ucb->ts_idev);

	ucb->ts_task = NULL;
	return 0;
}

/*
 * A restriction with interrupts exists when using the ucb1400, as
 * the codec read/write routines may sleep while waiting for codec
 * access completion and uses semaphores for access control to the
 * AC97 bus.  A complete codec read cycle could take  anywhere from
 * 60 to 100uSec so we *definitely* don't want to spin inside the
 * interrupt handler waiting for codec access.  So, we handle the
 * interrupt by scheduling a RT kernel thread to run in process
 * context instead of interrupt context.
 */
static irqreturn_t ucb1400_hard_irq(int irqnr, void *devid)
{
	struct ucb1400 *ucb = devid;

	if (irqnr == ucb->irq) {
		disable_irq(ucb->irq);
		ucb->irq_pending = 1;
		wake_up(&ucb->ts_wait);
		return IRQ_HANDLED;
	}
	return IRQ_NONE;
}

static int ucb1400_ts_open(struct input_dev *idev)
{
	struct ucb1400 *ucb = input_get_drvdata(idev);
	int ret = 0;

	BUG_ON(ucb->ts_task);

	ucb->ts_task = kthread_run(ucb1400_ts_thread, ucb, "UCB1400_ts");
	if (IS_ERR(ucb->ts_task)) {
		ret = PTR_ERR(ucb->ts_task);
		ucb->ts_task = NULL;
	}

	return ret;
}

static void ucb1400_ts_close(struct input_dev *idev)
{
	struct ucb1400 *ucb = input_get_drvdata(idev);

	if (ucb->ts_task)
		kthread_stop(ucb->ts_task);

	ucb1400_ts_irq_disable(ucb);
	ucb1400_reg_write(ucb, UCB_TS_CR, 0);
}

#ifdef CONFIG_PM
static int ucb1400_ts_resume(struct device *dev)
{
	struct ucb1400 *ucb = dev_get_drvdata(dev);

	if (ucb->ts_task) {
		/*
		 * Restart the TS thread to ensure the
		 * TS interrupt mode is set up again
		 * after sleep.
		 */
		ucb->ts_restart = 1;
		wake_up(&ucb->ts_wait);
	}
	return 0;
}
#else
#define ucb1400_ts_resume NULL
#endif

#ifndef NO_IRQ
#define NO_IRQ	0
#endif

/*
 * Try to probe our interrupt, rather than relying on lots of
 * hard-coded machine dependencies.
 */
static int ucb1400_detect_irq(struct ucb1400 *ucb)
{
	unsigned long mask, timeout;

	mask = probe_irq_on();

	/* Enable the ADC interrupt. */
	ucb1400_reg_write(ucb, UCB_IE_RIS, UCB_IE_ADC);
	ucb1400_reg_write(ucb, UCB_IE_FAL, UCB_IE_ADC);
	ucb1400_reg_write(ucb, UCB_IE_CLEAR, 0xffff);
	ucb1400_reg_write(ucb, UCB_IE_CLEAR, 0);

	/* Cause an ADC interrupt. */
	ucb1400_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA);
	ucb1400_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA | UCB_ADC_START);

	/* Wait for the conversion to complete. */
	timeout = jiffies + HZ/2;
	while (!(ucb1400_reg_read(ucb, UCB_ADC_DATA) & UCB_ADC_DAT_VALID)) {
		cpu_relax();
		if (time_after(jiffies, timeout)) {
			printk(KERN_ERR "ucb1400: timed out in IRQ probe\n");
			probe_irq_off(mask);
			return -ENODEV;
		}
	}
	ucb1400_reg_write(ucb, UCB_ADC_CR, 0);

	/* Disable and clear interrupt. */
	ucb1400_reg_write(ucb, UCB_IE_RIS, 0);
	ucb1400_reg_write(ucb, UCB_IE_FAL, 0);
	ucb1400_reg_write(ucb, UCB_IE_CLEAR, 0xffff);
	ucb1400_reg_write(ucb, UCB_IE_CLEAR, 0);

	/* Read triggered interrupt. */
	ucb->irq = probe_irq_off(mask);
	if (ucb->irq < 0 || ucb->irq == NO_IRQ)
		return -ENODEV;

	return 0;
}
示例#27
0
/*Write data to channel*/
int write_hsi(u32 ch, u32 *data, int length)
{
	int ret;
	//u32 cmd[4] = {0x00000000, 0xAAAAAAAA, 0xBBBBBBBB, 0xCCCCCCCC};
	struct if_hsi_channel *channel;
	struct task_struct *read_thread;

	channel = &hsi_protocol_iface.channels[ch];
	channel->tx_buf = data;
	channel->tx_count = 0;

	//cmd[0] = protocol_create_cmd(HSI_LL_MSG_OPEN_CONN_OCTET, ch, (void *)&length);
	//printk(KERN_INFO "data ptr is %x\n", data);

	if (initialization == 0) {

#if 0
		/* ACWAKE ->HIGH */
		ret = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_ACWAKE_UP, NULL);
		if (ret == 0)
			printk(KERN_INFO "ACWAKE pulled high in %s()\n", __func__);
		else
			printk(KERN_INFO "ACWAKE pulled high in %s() ERROR : %d\n", __func__, ret);
#endif

		/*Creating read thread*/
		read_thread = kthread_run(hsi_read_thrd, NULL, "hsi_read_thread");

		initialization++;
	}
	/*Wait till previous data transfer is over*/
	while (channel->tx_state != HSI_LL_TX_STATE_IDLE) {
		//printk(KERN_INFO "Wait 5ms previous data transfer isn't over %s()\n", __func__);

		//msleep(5);

		return -EAGAIN;
	}

#if 1
	/* ACWAKE ->HIGH */
	ret = hsi_ioctl(hsi_protocol_iface.channels[0].dev, HSI_IOCTL_ACWAKE_UP, NULL);
	if (ret == 0)
		printk(KERN_INFO "ACWAKE pulled high in %s()\n", __func__);
	else
		printk(KERN_INFO "ACWAKE pulled high in %s() ERROR : %d\n", __func__, ret);
#endif

	channel->tx_state = HSI_LL_TX_STATE_WAIT_FOR_ACK;

	//send_cmd(cmd, channel, data)
	//ret = hsi_proto_write(0, &cmd, 4*4);
	//printk(KERN_INFO "Write returned %d\n", ret);
	hsi_protocol_send_command(HSI_LL_MSG_OPEN_CONN_OCTET, ch, length);

	wait_event_interruptible(ipc_write_wait, channel->tx_count != 0);

	return	channel->tx_count;


}
示例#28
0
文件: audiostub.c 项目: qkdang/m462
static void start_handshake(void)
{
	if (handshake_taskref == NULL)
		handshake_taskref =
		    kthread_run(handshake_task, NULL, "audio_handshake");
}
示例#29
0
/*
功能描述    : 光纤驱动程序初始化函数
返回值      : 成功为0,失败为-1
参数        : 
作者        : 张彦升
日期        : 2014年3月3日 10:23:31
*/
static int __init fiber_init(void)
{
    int ret;
    int i = 0;
    int minor_a = 0;
    int minor_b = 0;
    dev_t devno = 0;

    if (fiber_major)
    {
        devno = MKDEV(fiber_major, 0);
        ret = register_chrdev_region(devno, FIBER_DEVS_NR, "fiber");
    }
    else
    {
        ret = alloc_chrdev_region(&devno, 0, FIBER_DEVS_NR, "fiber");
        fiber_major = MAJOR(devno);
    }
    if (ret < 0)
    {
        printk(KERN_WARNING "fiber: can't get major %d\n", fiber_major);

        return ret;
    }
    fiber_recv_status_a = 0;
    fiber_recv_status_b = 0;

    send_addr = ioremap(FIBER_SEND_START_ADDR,FIBER_SEND_TOTAL_MEMORY_SIZE);

    sema_init(&fiber_dev_sem,1);
    sema_init(&fiber_data_sem,1);

    for (i = 0;i < FIBER_VIRTUAL_SECTIONS;i++)
    {
        minor_a = i * 2;
        minor_b = minor_a + 1;

        fiber_dev_a[i].b_use = FIBER_FALSE;
        fiber_dev_a[i].frag_start_id = -1;
        fiber_dev_a[i].frag_count = -1;
        init_waitqueue_head(&(fiber_dev_a[i].data_queue));
        fiber_dev_a[i].peer_fiber_dev = &fiber_dev_b[i];

        ret = fiber_setup_cdev(&fiber_dev_a[i].cdev,minor_a,&fiber_fops_a);
        if (0 != ret)
        {
            goto fail;
        }

        fiber_dev_b[i].b_use = FIBER_FALSE;
        fiber_dev_b[i].frag_start_id = -1;
        fiber_dev_b[i].frag_count = -1;
        init_waitqueue_head(&(fiber_dev_b[i].data_queue));
        fiber_dev_b[i].peer_fiber_dev = &fiber_dev_a[i];

        ret = fiber_setup_cdev(&fiber_dev_b[i].cdev,minor_b,&fiber_fops_b);
        if (0 != ret)
        {
            goto fail;
        }
    }

    /*init irq*/
    fiber_irq = FIBER_IRQ;
    ret = request_irq(fiber_irq, fiber_interrupt, 0, "fiber", NULL);
    if (ret)
    {
        printk(KERN_ERR "fiber_init: can't get assigned irq %i\n", fiber_irq);

        goto fail;
    }

    ret = fiber_init_recver();
    if (0 != ret)
    {
        goto fail;
    }
    fiber_frag_addr_init();

    b_stop_thread = FIBER_FALSE;
    fiber_recv_thread = kthread_run(fiber_recv_store,NULL,"fiber_recver"); 
#ifdef FIBER_DEBUG
    /*创建proc接口*/
    fiber_create_proc();
#endif /*FIBER_DEBUG*/

    printk(KERN_INFO "fiber init.\n");

    return 0;

fail:
    unregister_chrdev_region(MKDEV(fiber_major, 0), FIBER_DEVS_NR); /*释放设备号*/
    printk(KERN_ERR "fiber_init:failed\n");

    return ret;
}
static long q6_evrc_in_ioctl(struct file *file, unsigned int cmd,
				unsigned long arg)
{
	struct evrc *evrc = file->private_data;
	int rc = 0;
	int i = 0;
	struct evrc_fc *fc;
	int size = 0;

	mutex_lock(&evrc->lock);
	switch (cmd) {
	case AUDIO_SET_VOLUME:
		pr_debug("[%s:%s] SET_VOLUME\n", __MM_FILE__, __func__);
		break;
	case AUDIO_GET_STATS:
	{
		struct msm_audio_stats stats;
		pr_debug("[%s:%s] GET_STATS\n", __MM_FILE__, __func__);
		memset(&stats, 0, sizeof(stats));
		if (copy_to_user((void *) arg, &stats, sizeof(stats)))
			return -EFAULT;
		return 0;
	}
	case AUDIO_START:
	{
		uint32_t acdb_id;
		pr_debug("[%s:%s] AUDIO_START\n", __MM_FILE__, __func__);
		if (arg == 0) {
			acdb_id = 0;
		} else {
			if (copy_from_user(&acdb_id, (void *) arg,
				sizeof(acdb_id))) {
				rc = -EFAULT;
				break;
			}
		}
		if (evrc->audio_client) {
			rc = -EBUSY;
			pr_err("[%s:%s] active session already existing\n",
				__MM_FILE__, __func__);
			break;
		} else {
			evrc->audio_client = q6audio_open_qcp(
					evrc->str_cfg.buffer_size,
					evrc->cfg.min_bit_rate,
					evrc->cfg.max_bit_rate,
					evrc->voicerec_mode.rec_mode,
					ADSP_AUDIO_FORMAT_EVRC_FS,
					acdb_id);

			if (!evrc->audio_client) {
				pr_err("[%s:%s] evrc open session failed\n",
					__MM_FILE__, __func__);
				kfree(evrc);
				rc = -ENOMEM;
				break;
			}
		}

		/*allocate flow control buffers*/
		fc = evrc->evrc_fc;
		size = evrc->str_cfg.buffer_size;
		for (i = 0; i < EVRC_FC_BUFF_CNT; ++i) {
			mutex_init(&(fc->fc_buff[i].lock));
			fc->fc_buff[i].empty = 1;
			fc->fc_buff[i].data = kmalloc(size, GFP_KERNEL);
			if (fc->fc_buff[i].data == NULL) {
				pr_err("[%s:%s] No memory for FC buffers\n",
						__MM_FILE__, __func__);
				rc = -ENOMEM;
				goto fc_fail;
			}
			fc->fc_buff[i].size = size;
			fc->fc_buff[i].actual_size = 0;
		}

		/*create flow control thread*/
		fc->task = kthread_run(q6_evrc_flowcontrol,
				evrc, "evrc_flowcontrol");
		if (IS_ERR(fc->task)) {
			rc = PTR_ERR(fc->task);
			pr_err("[%s:%s] error creating flow control thread\n",
					__MM_FILE__, __func__);
			goto fc_fail;
		}
		break;
fc_fail:
		/*free flow control buffers*/
		--i;
		for (; i >=  0; i--) {
			kfree(fc->fc_buff[i].data);
			fc->fc_buff[i].data = NULL;
		}
		break;
	}
	case AUDIO_STOP:
		pr_debug("[%s:%s] AUDIO_STOP\n", __MM_FILE__, __func__);
		break;
	case AUDIO_FLUSH:
		break;
	case AUDIO_SET_INCALL: {
		pr_debug("[%s:%s] SET_INCALL\n", __MM_FILE__, __func__);
		if (copy_from_user(&evrc->voicerec_mode,
			(void *)arg, sizeof(struct msm_voicerec_mode)))
			rc = -EFAULT;

		if (evrc->voicerec_mode.rec_mode != AUDIO_FLAG_READ
				&& evrc->voicerec_mode.rec_mode !=
				AUDIO_FLAG_INCALL_MIXED) {
			evrc->voicerec_mode.rec_mode = AUDIO_FLAG_READ;
			pr_err("[%s:%s] Invalid rec_mode\n", __MM_FILE__,
					__func__);
			rc = -EINVAL;
		}
		break;
	}
	case AUDIO_GET_STREAM_CONFIG:
		if (copy_to_user((void *)arg, &evrc->str_cfg,
				sizeof(struct msm_audio_stream_config)))
			rc = -EFAULT;

		pr_debug("[%s:%s] GET_STREAM_CONFIG: buffsz=%d, buffcnt=%d\n",
			 __MM_FILE__, __func__, evrc->str_cfg.buffer_size,
			evrc->str_cfg.buffer_count);
		break;
	case AUDIO_SET_STREAM_CONFIG:
		if (copy_from_user(&evrc->str_cfg, (void *)arg,
			sizeof(struct msm_audio_stream_config))) {
			rc = -EFAULT;
			break;
		}

		pr_debug("[%s:%s] SET_STREAM_CONFIG: buffsz=%d, buffcnt=%d\n",
			 __MM_FILE__, __func__, evrc->str_cfg.buffer_size,
			evrc->str_cfg.buffer_count);

		if (evrc->str_cfg.buffer_size < 23) {
			pr_err("[%s:%s] Buffer size too small\n", __MM_FILE__,
					__func__);
			rc = -EINVAL;
			break;
		}

		if (evrc->str_cfg.buffer_count != 2)
			pr_info("[%s:%s] Buffer count set to 2\n", __MM_FILE__,
					__func__);
		break;
	case AUDIO_SET_EVRC_ENC_CONFIG:
		if (copy_from_user(&evrc->cfg, (void *) arg,
				 sizeof(struct msm_audio_evrc_enc_config)))
			rc = -EFAULT;
		pr_debug("[%s:%s] SET_EVRC_ENC_CONFIG\n", __MM_FILE__,
				__func__);

		if (evrc->cfg.min_bit_rate > 4 || evrc->cfg.min_bit_rate < 1) {
			pr_err("[%s:%s] invalid min bitrate\n", __MM_FILE__,
					__func__);
			rc = -EINVAL;
		}
		if (evrc->cfg.max_bit_rate > 4 || evrc->cfg.max_bit_rate < 1) {
			pr_err("[%s:%s] invalid max bitrate\n", __MM_FILE__,
					__func__);
			rc = -EINVAL;
		}
		break;
	case AUDIO_GET_EVRC_ENC_CONFIG:
		if (copy_to_user((void *) arg, &evrc->cfg,
				 sizeof(struct msm_audio_evrc_enc_config)))
			rc = -EFAULT;
		pr_debug("[%s:%s] GET_EVRC_ENC_CONFIG\n", __MM_FILE__,
			__func__);
		break;

	default:
		rc = -EINVAL;
	}

	mutex_unlock(&evrc->lock);
	pr_debug("[%s:%s] rc = %d\n", __MM_FILE__, __func__, rc);
	return rc;
}