Пример #1
0
int	rtl8188eu_init_recv_priv(_adapter *padapter)
{
    struct recv_priv	*precvpriv = &padapter->recvpriv;
    int	i, res = _SUCCESS;
    struct recv_buf *precvbuf;

#ifdef CONFIG_RECV_THREAD_MODE
    _rtw_init_sema(&precvpriv->recv_sema, 0);//will be removed
    _rtw_init_sema(&precvpriv->terminate_recvthread_sema, 0);//will be removed
#endif

#ifdef PLATFORM_LINUX
    tasklet_init(&precvpriv->recv_tasklet,
                 (void(*)(unsigned long))rtl8188eu_recv_tasklet,
                 (unsigned long)padapter);
#endif

#ifdef CONFIG_USB_INTERRUPT_IN_PIPE
#ifdef PLATFORM_LINUX
    precvpriv->int_in_urb = usb_alloc_urb(0, GFP_KERNEL);
    if(precvpriv->int_in_urb == NULL) {
        DBG_8192C("alloc_urb for interrupt in endpoint fail !!!!\n");
    }
#endif
    precvpriv->int_in_buf = rtw_zmalloc(sizeof(INTERRUPT_MSG_FORMAT_EX));
    if(precvpriv->int_in_buf == NULL) {
        DBG_8192C("alloc_mem for interrupt in endpoint fail !!!!\n");
    }
#endif

    //init recv_buf
    _rtw_init_queue(&precvpriv->free_recv_buf_queue);

#ifdef CONFIG_USE_USB_BUFFER_ALLOC_RX
    _rtw_init_queue(&precvpriv->recv_buf_pending_queue);
#endif	// CONFIG_USE_USB_BUFFER_ALLOC_RX

    precvpriv->pallocated_recv_buf = rtw_zmalloc(NR_RECVBUFF *sizeof(struct recv_buf) + 4);
    if(precvpriv->pallocated_recv_buf==NULL) {
        res= _FAIL;
        RT_TRACE(_module_rtl871x_recv_c_,_drv_err_,("alloc recv_buf fail!\n"));
        goto exit;
    }
    _rtw_memset(precvpriv->pallocated_recv_buf, 0, NR_RECVBUFF *sizeof(struct recv_buf) + 4);

    precvpriv->precv_buf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(precvpriv->pallocated_recv_buf), 4);
    //precvpriv->precv_buf = precvpriv->pallocated_recv_buf + 4 -
    //						((uint) (precvpriv->pallocated_recv_buf) &(4-1));


    precvbuf = (struct recv_buf*)precvpriv->precv_buf;

    for(i=0; i < NR_RECVBUFF ; i++)
    {
        _rtw_init_listhead(&precvbuf->list);

        _rtw_spinlock_init(&precvbuf->recvbuf_lock);

        precvbuf->alloc_sz = MAX_RECVBUF_SZ;

        res = rtw_os_recvbuf_resource_alloc(padapter, precvbuf);
        if(res==_FAIL)
            break;

        precvbuf->ref_cnt = 0;
        precvbuf->adapter =padapter;


        //rtw_list_insert_tail(&precvbuf->list, &(precvpriv->free_recv_buf_queue.queue));

        precvbuf++;

    }

    precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;

#ifdef PLATFORM_LINUX

    skb_queue_head_init(&precvpriv->rx_skb_queue);

#ifdef CONFIG_PREALLOC_RECV_SKB
    {
        int i;
        SIZE_PTR tmpaddr=0;
        SIZE_PTR alignment=0;
        struct sk_buff *pskb=NULL;

        skb_queue_head_init(&precvpriv->free_recv_skb_queue);

        for(i=0; i<NR_PREALLOC_RECV_SKB; i++)
        {

#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) // http://www.mail-archive.com/[email protected]/msg17214.html
            pskb = dev_alloc_skb(MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
#else
            pskb = netdev_alloc_skb(padapter->pnetdev, MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
#endif

            if(pskb)
            {
                pskb->dev = padapter->pnetdev;

                tmpaddr = (SIZE_PTR)pskb->data;
                alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
                skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignment));

                skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
            }

            pskb=NULL;

        }
    }
#endif

#endif

exit:

    return res;

}
Пример #2
0
static int __init rmnet_init(void)
{
    int ret;
    struct device *d;
    struct net_device *dev;
    struct rmnet_private *p;
    unsigned n;

    printk("%s\n", __func__);

#ifdef CONFIG_MSM_RMNET_DEBUG
    timeout_us = 0;
#ifdef CONFIG_HAS_EARLYSUSPEND
    timeout_suspend_us = 0;
#endif
#endif

    for (n = 0; n < 8; n++) {
        dev = alloc_netdev(sizeof(struct rmnet_private),
                           "rmnet%d", rmnet_setup);

        if (!dev)
            return -ENOMEM;

        d = &(dev->dev);
        p = netdev_priv(dev);
        p->chname = ch_name[n];
        /* Initial config uses Ethernet */
        p->operation_mode = RMNET_MODE_LLP_ETH;
        p->skb = NULL;
        spin_lock_init(&p->lock);
        tasklet_init(&p->tsklt, _rmnet_resume_flow,
                     (unsigned long)dev);
        wake_lock_init(&p->wake_lock, WAKE_LOCK_SUSPEND, ch_name[n]);
#ifdef CONFIG_MSM_RMNET_DEBUG
        p->timeout_us = timeout_us;
        p->wakeups_xmit = p->wakeups_rcv = 0;
#endif

        init_completion(&p->complete);
        port_complete[n] = &p->complete;
        mutex_init(&p->pil_lock);
        p->pdrv.probe = msm_rmnet_smd_probe;
        p->pdrv.driver.name = ch_name[n];
        p->pdrv.driver.owner = THIS_MODULE;
        ret = platform_driver_register(&p->pdrv);
        if (ret) {
            free_netdev(dev);
            return ret;
        }

        ret = register_netdev(dev);
        if (ret) {
            platform_driver_unregister(&p->pdrv);
            free_netdev(dev);
            return ret;
        }


#ifdef CONFIG_MSM_RMNET_DEBUG
        if (device_create_file(d, &dev_attr_timeout))
            continue;
        if (device_create_file(d, &dev_attr_wakeups_xmit))
            continue;
        if (device_create_file(d, &dev_attr_wakeups_rcv))
            continue;
#ifdef CONFIG_HAS_EARLYSUSPEND
        if (device_create_file(d, &dev_attr_timeout_suspend))
            continue;

        /* Only care about rmnet0 for suspend/resume tiemout hooks. */
        if (n == 0)
            rmnet0 = d;
#endif
#endif
    }
    return 0;
}
Пример #3
0
int rtl8723au_init_recv_priv(struct rtw_adapter *padapter)
{
	struct recv_priv *precvpriv = &padapter->recvpriv;
	int i, size, res = _SUCCESS;
	struct recv_buf *precvbuf;
	unsigned long tmpaddr;
	unsigned long alignment;
	struct sk_buff *pskb;

	tasklet_init(&precvpriv->recv_tasklet,
		     (void(*)(unsigned long))rtl8723au_recv_tasklet,
		     (unsigned long)padapter);

	precvpriv->int_in_urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!precvpriv->int_in_urb)
		DBG_8723A("alloc_urb for interrupt in endpoint fail !!!!\n");
	precvpriv->int_in_buf = kzalloc(USB_INTR_CONTENT_LENGTH, GFP_KERNEL);
	if (!precvpriv->int_in_buf)
		DBG_8723A("alloc_mem for interrupt in endpoint fail !!!!\n");

	size = NR_RECVBUFF * sizeof(struct recv_buf);
	precvpriv->precv_buf = kzalloc(size, GFP_KERNEL);
	if (!precvpriv->precv_buf) {
		res = _FAIL;
		RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
			 ("alloc recv_buf fail!\n"));
		goto exit;
	}

	precvbuf = (struct recv_buf *)precvpriv->precv_buf;

	for (i = 0; i < NR_RECVBUFF; i++) {
		INIT_LIST_HEAD(&precvbuf->list);

		precvbuf->purb = usb_alloc_urb(0, GFP_KERNEL);
		if (!precvbuf->purb)
			break;

		precvbuf->adapter = padapter;

		precvbuf++;
	}

	skb_queue_head_init(&precvpriv->rx_skb_queue);
	skb_queue_head_init(&precvpriv->free_recv_skb_queue);

	for (i = 0; i < NR_PREALLOC_RECV_SKB; i++) {
		size = MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ;
		pskb = __netdev_alloc_skb(padapter->pnetdev, size, GFP_KERNEL);

		if (pskb) {
			pskb->dev = padapter->pnetdev;

			tmpaddr = (unsigned long)pskb->data;
			alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
			skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignment));

			skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
		}

		pskb = NULL;
	}

exit:
	return res;
}
Пример #4
0
int __devinit mantis_dvb_init(struct mantis_pci *mantis)
{
	struct mantis_hwconfig *config = mantis->hwconfig;
	int result = -1;

#ifdef CONFIG_DEBUG_PRINTK
	dprintk(MANTIS_DEBUG, 1, "dvb_register_adapter");
#else
	d;
#endif

	result = dvb_register_adapter(&mantis->dvb_adapter,
				      "Mantis DVB adapter",
				      THIS_MODULE,
				      &mantis->pdev->dev,
				      adapter_nr);

	if (result < 0) {

#ifdef CONFIG_DEBUG_PRINTK
		dprintk(MANTIS_ERROR, 1, "Error registering adapter");
#else
		d;
#endif
		return -ENODEV;
	}

	mantis->dvb_adapter.priv	= mantis;
	mantis->demux.dmx.capabilities	= DMX_TS_FILTERING	|
					 DMX_SECTION_FILTERING	|
					 DMX_MEMORY_BASED_FILTERING;

	mantis->demux.priv		= mantis;
	mantis->demux.filternum		= 256;
	mantis->demux.feednum		= 256;
	mantis->demux.start_feed	= mantis_dvb_start_feed;
	mantis->demux.stop_feed		= mantis_dvb_stop_feed;
	mantis->demux.write_to_decoder	= NULL;

#ifdef CONFIG_DEBUG_PRINTK
	dprintk(MANTIS_DEBUG, 1, "dvb_dmx_init");
#else
	d;
#endif
	result = dvb_dmx_init(&mantis->demux);
	if (result < 0) {
#ifdef CONFIG_DEBUG_PRINTK
		dprintk(MANTIS_ERROR, 1, "dvb_dmx_init failed, ERROR=%d", result);
#else
		d;
#endif

		goto err0;
	}

	mantis->dmxdev.filternum	= 256;
	mantis->dmxdev.demux		= &mantis->demux.dmx;
	mantis->dmxdev.capabilities	= 0;
#ifdef CONFIG_DEBUG_PRINTK
	dprintk(MANTIS_DEBUG, 1, "dvb_dmxdev_init");
#else
	d;
#endif

	result = dvb_dmxdev_init(&mantis->dmxdev, &mantis->dvb_adapter);
	if (result < 0) {

#ifdef CONFIG_DEBUG_PRINTK
		dprintk(MANTIS_ERROR, 1, "dvb_dmxdev_init failed, ERROR=%d", result);
#else
		d;
#endif
		goto err1;
	}

	mantis->fe_hw.source		= DMX_FRONTEND_0;
	result = mantis->demux.dmx.add_frontend(&mantis->demux.dmx, &mantis->fe_hw);
	if (result < 0) {

#ifdef CONFIG_DEBUG_PRINTK
		dprintk(MANTIS_ERROR, 1, "dvb_dmx_init failed, ERROR=%d", result);
#else
		d;
#endif
		goto err2;
	}

	mantis->fe_mem.source		= DMX_MEMORY_FE;
	result = mantis->demux.dmx.add_frontend(&mantis->demux.dmx, &mantis->fe_mem);
	if (result < 0) {
#ifdef CONFIG_DEBUG_PRINTK
		dprintk(MANTIS_ERROR, 1, "dvb_dmx_init failed, ERROR=%d", result);
#else
		d;
#endif
		goto err3;
	}

	result = mantis->demux.dmx.connect_frontend(&mantis->demux.dmx, &mantis->fe_hw);
	if (result < 0) {
#ifdef CONFIG_DEBUG_PRINTK
		dprintk(MANTIS_ERROR, 1, "dvb_dmx_init failed, ERROR=%d", result);
#else
		d;
#endif
		goto err4;
	}

	dvb_net_init(&mantis->dvb_adapter, &mantis->dvbnet, &mantis->demux.dmx);
	tasklet_init(&mantis->tasklet, mantis_dma_xfer, (unsigned long) mantis);
	tasklet_disable(&mantis->tasklet);
	if (mantis->hwconfig) {
		result = config->frontend_init(mantis, mantis->fe);
		if (result < 0) {
#ifdef CONFIG_DEBUG_PRINTK
			dprintk(MANTIS_ERROR, 1, "!!! NO Frontends found !!!");
#else
			d;
#endif
			goto err5;
		} else {
			if (mantis->fe == NULL) {
#ifdef CONFIG_DEBUG_PRINTK
				dprintk(MANTIS_ERROR, 1, "FE <NULL>");
#else
				d;
#endif
				goto err5;
			}

			if (dvb_register_frontend(&mantis->dvb_adapter, mantis->fe)) {
#ifdef CONFIG_DEBUG_PRINTK
				dprintk(MANTIS_ERROR, 1, "ERROR: Frontend registration failed");
#else
				d;
#endif

				if (mantis->fe->ops.release)
					mantis->fe->ops.release(mantis->fe);

				mantis->fe = NULL;
				goto err5;
			}
		}
	}

	return 0;

	/* Error conditions ..	*/
err5:
	tasklet_kill(&mantis->tasklet);
	dvb_net_release(&mantis->dvbnet);
	dvb_unregister_frontend(mantis->fe);
	dvb_frontend_detach(mantis->fe);
err4:
	mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_mem);

err3:
	mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_hw);

err2:
	dvb_dmxdev_release(&mantis->dmxdev);

err1:
	dvb_dmx_release(&mantis->demux);

err0:
	dvb_unregister_adapter(&mantis->dvb_adapter);

	return result;
}
Пример #5
0
struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
					const struct ieee80211_ops *ops)
{
	struct ieee80211_local *local;
	int priv_size, i;
	struct wiphy *wiphy;

	/* Ensure 32-byte alignment of our private data and hw private data.
	 * We use the wiphy priv data for both our ieee80211_local and for
	 * the driver's private data
	 *
	 * In memory it'll be like this:
	 *
	 * +-------------------------+
	 * | struct wiphy	    |
	 * +-------------------------+
	 * | struct ieee80211_local  |
	 * +-------------------------+
	 * | driver's private data   |
	 * +-------------------------+
	 *
	 */
	priv_size = ALIGN(sizeof(*local), NETDEV_ALIGN) + priv_data_len;

	wiphy = wiphy_new(&mac80211_config_ops, priv_size);

	if (!wiphy)
		return NULL;

	wiphy->mgmt_stypes = ieee80211_default_mgmt_stypes;

	wiphy->privid = mac80211_wiphy_privid;

	wiphy->flags |= WIPHY_FLAG_NETNS_OK |
			WIPHY_FLAG_4ADDR_AP |
			WIPHY_FLAG_4ADDR_STATION |
			WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS;

	if (!ops->set_key)
		wiphy->flags |= WIPHY_FLAG_IBSS_RSN;

	wiphy->bss_priv_size = sizeof(struct ieee80211_bss);

	local = wiphy_priv(wiphy);

	local->hw.wiphy = wiphy;

	local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);

	BUG_ON(!ops->tx);
	BUG_ON(!ops->start);
	BUG_ON(!ops->stop);
	BUG_ON(!ops->config);
	BUG_ON(!ops->add_interface);
	BUG_ON(!ops->remove_interface);
	BUG_ON(!ops->configure_filter);
	local->ops = ops;

	/* set up some defaults */
	local->hw.queues = 1;
	local->hw.max_rates = 1;
	local->hw.max_report_rates = 0;
	local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
	local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
	local->user_power_level = -1;
	local->uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES;
	local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN;

	INIT_LIST_HEAD(&local->interfaces);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))

	__hw_addr_init(&local->mc_list);

#endif
	mutex_init(&local->iflist_mtx);
	mutex_init(&local->mtx);

	mutex_init(&local->key_mtx);
	spin_lock_init(&local->filter_lock);
	spin_lock_init(&local->queue_stop_reason_lock);

	/*
	 * The rx_skb_queue is only accessed from tasklets,
	 * but other SKB queues are used from within IRQ
	 * context. Therefore, this one needs a different
	 * locking class so our direct, non-irq-safe use of
	 * the queue's lock doesn't throw lockdep warnings.
	 */
	skb_queue_head_init_class(&local->rx_skb_queue,
				  &ieee80211_rx_skb_queue_class);

	INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);

	ieee80211_work_init(local);

	INIT_WORK(&local->restart_work, ieee80211_restart_work);

	INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
	INIT_WORK(&local->recalc_smps, ieee80211_recalc_smps_work);
	local->smps_mode = IEEE80211_SMPS_OFF;

	INIT_WORK(&local->dynamic_ps_enable_work,
		  ieee80211_dynamic_ps_enable_work);
	INIT_WORK(&local->dynamic_ps_disable_work,
		  ieee80211_dynamic_ps_disable_work);
	setup_timer(&local->dynamic_ps_timer,
		    ieee80211_dynamic_ps_timer, (unsigned long) local);

	sta_info_init(local);

	for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
		skb_queue_head_init(&local->pending[i]);
		atomic_set(&local->agg_queue_stop[i], 0);
	}
	tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
		     (unsigned long)local);

	tasklet_init(&local->tasklet,
		     ieee80211_tasklet_handler,
		     (unsigned long) local);

	skb_queue_head_init(&local->skb_queue);
	skb_queue_head_init(&local->skb_queue_unreliable);

	/* init dummy netdev for use w/ NAPI */
	init_dummy_netdev(&local->napi_dev);

	ieee80211_led_names(local);

	ieee80211_hw_roc_setup(local);

	return local_to_hw(local);
}
static int
msmsdcc_probe(struct platform_device *pdev)
{
	struct mmc_platform_data *plat = pdev->dev.platform_data;
	struct msmsdcc_host *host;
	struct mmc_host *mmc;
	struct resource *irqres = NULL;
	struct resource *memres = NULL;
	struct resource *dmares = NULL;
	int ret;
	int i;

	/* must have platform data */
	if (!plat) {
		printk(KERN_ERR "%s: Platform data not available\n", __func__);
		ret = -EINVAL;
		goto out;
	}

	if (pdev->id < 1 || pdev->id > 4)
		return -EINVAL;

	if (pdev->resource == NULL || pdev->num_resources < 3) {
		printk(KERN_ERR "%s: Invalid resource\n", __func__);
		return -ENXIO;
	}

	for (i = 0; i < pdev->num_resources; i++) {
		if (pdev->resource[i].flags & IORESOURCE_MEM)
			memres = &pdev->resource[i];
		if (pdev->resource[i].flags & IORESOURCE_IRQ)
			irqres = &pdev->resource[i];
		if (pdev->resource[i].flags & IORESOURCE_DMA)
			dmares = &pdev->resource[i];
	}
	if (!irqres || !memres) {
		printk(KERN_ERR "%s: Invalid resource\n", __func__);
		return -ENXIO;
	}

	/*
	 * Setup our host structure
	 */

	mmc = mmc_alloc_host(sizeof(struct msmsdcc_host), &pdev->dev);
	if (!mmc) {
		ret = -ENOMEM;
		goto out;
	}

	host = mmc_priv(mmc);
	host->pdev_id = pdev->id;
	host->plat = plat;
	host->mmc = mmc;

	host->base = ioremap(memres->start, PAGE_SIZE);
	if (!host->base) {
		ret = -ENOMEM;
		goto host_free;
	}

	host->irqres = irqres;
	host->memres = memres;
	host->dmares = dmares;
	spin_lock_init(&host->lock);

#ifdef CONFIG_MMC_EMBEDDED_SDIO
	if (plat->embedded_sdio)
		mmc_set_embedded_sdio_data(mmc,
					   &plat->embedded_sdio->cis,
					   &plat->embedded_sdio->cccr,
					   plat->embedded_sdio->funcs,
					   plat->embedded_sdio->num_funcs);
#endif

#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
	INIT_WORK(&host->resume_task, do_resume_work);
#endif
	tasklet_init(&host->dma_tlet, msmsdcc_dma_complete_tlet,
			(unsigned long)host);

	/*
	 * Setup DMA
	 */
	ret = msmsdcc_init_dma(host);
	if (ret)
		goto ioremap_free;

	/*
	 * Setup main peripheral bus clock
	 */
	host->pclk = clk_get(&pdev->dev, "sdc_pclk");
	if (IS_ERR(host->pclk)) {
		ret = PTR_ERR(host->pclk);
		goto dma_free;
	}

	ret = clk_enable(host->pclk);
	if (ret)
		goto pclk_put;

	host->pclk_rate = clk_get_rate(host->pclk);

	/*
	 * Setup SDC MMC clock
	 */
	host->clk = clk_get(&pdev->dev, "sdc_clk");
	if (IS_ERR(host->clk)) {
		ret = PTR_ERR(host->clk);
		goto pclk_disable;
	}

	ret = clk_enable(host->clk);
	if (ret)
		goto clk_put;

	ret = clk_set_rate(host->clk, msmsdcc_fmin);
	if (ret) {
		printk(KERN_ERR "%s: Clock rate set failed (%d)\n",
		       __func__, ret);
		goto clk_disable;
	}

	host->clk_rate = clk_get_rate(host->clk);

	host->clks_on = 1;

	/*
	 * Setup MMC host structure
	 */
	mmc->ops = &msmsdcc_ops;
	mmc->f_min = msmsdcc_fmin;
	mmc->f_max = msmsdcc_fmax;
	mmc->ocr_avail = plat->ocr_mask;
	mmc->caps |= plat->mmc_bus_width;

	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
#ifdef CONFIG_MMC_MSM_SDIO_SUPPORT
	mmc->caps |= MMC_CAP_SDIO_IRQ;
#endif

	mmc->max_phys_segs = NR_SG;
	mmc->max_hw_segs = NR_SG;
	mmc->max_blk_size = 4096;	/* MCI_DATA_CTL BLOCKSIZE up to 4096 */
	mmc->max_blk_count = 65536;

	mmc->max_req_size = 33554432;	/* MCI_DATA_LENGTH is 25 bits */
	mmc->max_seg_size = mmc->max_req_size;

	writel(0, host->base + MMCIMASK0);
	writel(MCI_CLEAR_STATIC_MASK, host->base + MMCICLEAR);

	writel(MCI_IRQENABLE, host->base + MMCIMASK0);

	/*
	 * Setup card detect change
	 */

	if (plat->status_irq) {
		ret = request_irq(plat->status_irq,
				  msmsdcc_platform_status_irq,
				  IRQF_SHARED | plat->irq_flags,
				  DRIVER_NAME " (slot)",
				  host);
		if (ret) {
			printk(KERN_ERR "Unable to get slot IRQ %d (%d)\n",
			       plat->status_irq, ret);
			goto clk_disable;
		}
	} else if (plat->register_status_notify) {
		plat->register_status_notify(msmsdcc_status_notify_cb, host);
	} else if (!plat->status)
		printk(KERN_ERR "%s: No card detect facilities available\n",
		       mmc_hostname(mmc));

	if (plat->status) {
		host->oldstat = host->plat->status(mmc_dev(host->mmc));
		host->eject = !host->oldstat;
	}

	/*
	 * Setup a command timer. We currently need this due to
	 * some 'strange' timeout / error handling situations.
	 */
	init_timer(&host->command_timer);
	host->command_timer.data = (unsigned long) host;
	host->command_timer.function = msmsdcc_command_expired;

	ret = request_irq(irqres->start, msmsdcc_irq, IRQF_SHARED,
			  DRIVER_NAME " (cmd)", host);
	if (ret)
		goto platform_irq_free;

	ret = request_irq(irqres->end, msmsdcc_pio_irq, IRQF_SHARED,
			  DRIVER_NAME " (pio)", host);
	if (ret)
		goto irq_free;

	mmc_set_drvdata(pdev, mmc);
	mmc_add_host(mmc);

#ifdef CONFIG_HAS_EARLYSUSPEND
	host->early_suspend.suspend = msmsdcc_early_suspend;
	host->early_suspend.resume  = msmsdcc_late_resume;
	host->early_suspend.level   = EARLY_SUSPEND_LEVEL_DISABLE_FB;
	register_early_suspend(&host->early_suspend);
#endif

	printk(KERN_INFO
	       "%s: Qualcomm MSM SDCC at 0x%016llx irq %d,%d dma %d\n",
	       mmc_hostname(mmc), (unsigned long long)memres->start,
	       (unsigned int) irqres->start,
	       (unsigned int) plat->status_irq, host->dma.channel);

	printk(KERN_INFO "%s: 8 bit data mode %s\n", mmc_hostname(mmc),
		(mmc->caps & MMC_CAP_8_BIT_DATA ? "enabled" : "disabled"));
	printk(KERN_INFO "%s: 4 bit data mode %s\n", mmc_hostname(mmc),
	       (mmc->caps & MMC_CAP_4_BIT_DATA ? "enabled" : "disabled"));
	printk(KERN_INFO "%s: polling status mode %s\n", mmc_hostname(mmc),
	       (mmc->caps & MMC_CAP_NEEDS_POLL ? "enabled" : "disabled"));
	printk(KERN_INFO "%s: MMC clock %u -> %u Hz, PCLK %u Hz\n",
	       mmc_hostname(mmc), msmsdcc_fmin, msmsdcc_fmax, host->pclk_rate);
	printk(KERN_INFO "%s: Slot eject status = %d\n", mmc_hostname(mmc),
	       host->eject);
	printk(KERN_INFO "%s: Power save feature enable = %d\n",
	       mmc_hostname(mmc), msmsdcc_pwrsave);

	if (host->dma.channel != -1) {
		printk(KERN_INFO
		       "%s: DM non-cached buffer at %p, dma_addr 0x%.8x\n",
		       mmc_hostname(mmc), host->dma.nc, host->dma.nc_busaddr);
		printk(KERN_INFO
		       "%s: DM cmd busaddr 0x%.8x, cmdptr busaddr 0x%.8x\n",
		       mmc_hostname(mmc), host->dma.cmd_busaddr,
		       host->dma.cmdptr_busaddr);
	} else
		printk(KERN_INFO
		       "%s: PIO transfer enabled\n", mmc_hostname(mmc));

#if defined(CONFIG_DEBUG_FS)
	msmsdcc_dbg_createhost(host);
#endif
	if (!plat->status_irq) {
		ret = sysfs_create_group(&pdev->dev.kobj, &dev_attr_grp);
		if (ret)
			goto irq_free;
	}
	return 0;
 irq_free:
	free_irq(irqres->start, host);
 platform_irq_free:
	if (plat->status_irq)
		free_irq(plat->status_irq, host);
 clk_disable:
	clk_disable(host->clk);
 clk_put:
	clk_put(host->clk);
 pclk_disable:
	clk_disable(host->pclk);
 pclk_put:
	clk_put(host->pclk);
 dma_free:
	dma_free_coherent(NULL, sizeof(struct msmsdcc_nc_dmadata),
			host->dma.nc, host->dma.nc_busaddr);
 ioremap_free:
	iounmap(host->base);
 host_free:
	mmc_free_host(mmc);
 out:
	return ret;
}
Пример #7
0
static int smd_tty_open(struct tty_struct *tty, struct file *f)
{
	int res = 0;
	unsigned int n = tty->index;
	struct smd_tty_info *info;
	char *peripheral = NULL;


	if (n >= MAX_SMD_TTYS || !smd_tty[n].smd)
		return -ENODEV;

	info = smd_tty + n;

	mutex_lock(&smd_tty_lock);
	tty->driver_data = info;

	if (info->open_count++ == 0) {
		if (smd_tty[n].smd->edge == SMD_APPS_MODEM)
			peripheral = "modem";

		if (peripheral) {
			info->pil = pil_get(peripheral);
			if (IS_ERR(info->pil)) {
				res = PTR_ERR(info->pil);
				goto out;
			}

			/* Wait for the modem SMSM to be inited for the SMD
			 * Loopback channel to be allocated at the modem. Since
			 * the wait need to be done atmost once, using msleep
			 * doesn't degrade the performance.
			 */
			if (n == LOOPBACK_IDX) {
				if (!is_modem_smsm_inited())
					msleep(5000);
				smsm_change_state(SMSM_APPS_STATE,
					0, SMSM_SMD_LOOPBACK);
				msleep(100);
			}


			/*
			 * Wait for a channel to be allocated so we know
			 * the modem is ready enough.
			 */
			if (smd_tty_modem_wait) {
				res = wait_for_completion_interruptible_timeout(
					&info->ch_allocated,
					msecs_to_jiffies(smd_tty_modem_wait *
									1000));

				if (res == 0) {
					pr_err("Timed out waiting for SMD"
								" channel\n");
					res = -ETIMEDOUT;
					goto release_pil;
				} else if (res < 0) {
					pr_err("Error waiting for SMD channel:"
									" %d\n",
						res);
					goto release_pil;
				}

				res = 0;
			}
		}


		info->tty = tty;
		tasklet_init(&info->tty_tsklt, smd_tty_read,
			     (unsigned long)info);
		wake_lock_init(&info->wake_lock, WAKE_LOCK_SUSPEND,
				smd_tty[n].smd->port_name);
		if (!info->ch) {
			res = smd_named_open_on_edge(smd_tty[n].smd->port_name,
							smd_tty[n].smd->edge,
							&info->ch, info,
							smd_tty_notify);
			if (res < 0) {
				pr_err("%s: %s open failed %d\n", __func__,
					smd_tty[n].smd->port_name, res);
				goto release_pil;
			}

			res = wait_event_interruptible_timeout(
				info->ch_opened_wait_queue,
				info->is_open, (2 * HZ));
			if (res == 0)
				res = -ETIMEDOUT;
			if (res < 0) {
				pr_err("%s: wait for %s smd_open failed %d\n",
					__func__, smd_tty[n].smd->port_name,
					res);
				goto release_pil;
			}
			res = 0;
		}
	}

release_pil:
	if (res < 0)
		pil_put(info->pil);
	else
		smd_disable_read_intr(info->ch);
out:
	mutex_unlock(&smd_tty_lock);

	return res;
}
Пример #8
0
void softirq_tasklet_init(
    struct tasklet *t, void (*func)(unsigned long), unsigned long data)
{
    tasklet_init(t, func, data);
    t->is_softirq = 1;
}
Пример #9
0
/**
 * ipath_create_cq - create a completion queue
 * @ibdev: the device this completion queue is attached to
 * @entries: the minimum size of the completion queue
 * @context: unused by the InfiniPath driver
 * @udata: unused by the InfiniPath driver
 *
 * Returns a pointer to the completion queue or negative errno values
 * for failure.
 *
 * Called by ib_create_cq() in the generic verbs code.
 */
struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,
			      struct ib_ucontext *context,
			      struct ib_udata *udata)
{
	struct ipath_ibdev *dev = to_idev(ibdev);
	struct ipath_cq *cq;
	struct ipath_cq_wc *wc;
	struct ib_cq *ret;

	if (entries < 1 || entries > ib_ipath_max_cqes) {
		ret = ERR_PTR(-EINVAL);
		goto done;
	}

	/* Allocate the completion queue structure. */
	cq = kmalloc(sizeof(*cq), GFP_KERNEL);
	if (!cq) {
		ret = ERR_PTR(-ENOMEM);
		goto done;
	}

	/*
	 * Allocate the completion queue entries and head/tail pointers.
	 * This is allocated separately so that it can be resized and
	 * also mapped into user space.
	 * We need to use vmalloc() in order to support mmap and large
	 * numbers of entries.
	 */
	wc = vmalloc_user(sizeof(*wc) + sizeof(struct ib_wc) * entries);
	if (!wc) {
		ret = ERR_PTR(-ENOMEM);
		goto bail_cq;
	}

	/*
	 * Return the address of the WC as the offset to mmap.
	 * See ipath_mmap() for details.
	 */
	if (udata && udata->outlen >= sizeof(__u64)) {
		int err;
		u32 s = sizeof *wc + sizeof(struct ib_wc) * entries;

		cq->ip = ipath_create_mmap_info(dev, s, context, wc);
		if (!cq->ip) {
			ret = ERR_PTR(-ENOMEM);
			goto bail_wc;
		}

		err = ib_copy_to_udata(udata, &cq->ip->offset,
				       sizeof(cq->ip->offset));
		if (err) {
			ret = ERR_PTR(err);
			goto bail_ip;
		}
	} else
		cq->ip = NULL;

	spin_lock(&dev->n_cqs_lock);
	if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
		spin_unlock(&dev->n_cqs_lock);
		ret = ERR_PTR(-ENOMEM);
		goto bail_ip;
	}

	dev->n_cqs_allocated++;
	spin_unlock(&dev->n_cqs_lock);

	if (cq->ip) {
		spin_lock_irq(&dev->pending_lock);
		list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
		spin_unlock_irq(&dev->pending_lock);
	}

	/*
	 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
	 * The number of entries should be >= the number requested or return
	 * an error.
	 */
	cq->ibcq.cqe = entries;
	cq->notify = IB_CQ_NONE;
	cq->triggered = 0;
	spin_lock_init(&cq->lock);
	tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
	wc->head = 0;
	wc->tail = 0;
	cq->queue = wc;

	ret = &cq->ibcq;

	goto done;

bail_ip:
	kfree(cq->ip);
bail_wc:
	vfree(wc);
bail_cq:
	kfree(cq);
done:
	return ret;
}
Пример #10
0
static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id)
{
	struct mmc_omap_slot *slot = NULL;
	struct mmc_host *mmc;
	int r;

	mmc = mmc_alloc_host(sizeof(struct mmc_omap_slot), host->dev);
	if (mmc == NULL)
		return -ENOMEM;

	slot = mmc_priv(mmc);
	slot->host = host;
	slot->mmc = mmc;
	slot->id = id;
	slot->pdata = &host->pdata->slots[id];

	host->slots[id] = slot;

	mmc->caps = 0;
	if (host->pdata->slots[id].wires >= 4)
		mmc->caps |= MMC_CAP_4_BIT_DATA;

	mmc->ops = &mmc_omap_ops;
	mmc->f_min = 400000;

	if (cpu_class_is_omap2())
		mmc->f_max = 48000000;
	else
		mmc->f_max = 24000000;
	if (host->pdata->max_freq)
		mmc->f_max = min((unsigned int)host->pdata->max_freq,
							mmc->f_max);
	mmc->ocr_avail = slot->pdata->ocr_mask;

	/* Use scatterlist DMA to reduce per-transfer costs.
	 * NOTE max_seg_size assumption that small blocks aren't
	 * normally used (except e.g. for reading SD registers).
	 */
	mmc->max_segs = 32;
	mmc->max_blk_size = 2048;	/* BLEN is 11 bits (+1) */
	mmc->max_blk_count = 2048;	/* NBLK is 11 bits (+1) */
	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
	mmc->max_seg_size = mmc->max_req_size;

	r = mmc_add_host(mmc);
	if (r < 0)
		goto err_remove_host;

	if (slot->pdata->name != NULL) {
		r = device_create_file(&mmc->class_dev,
					&dev_attr_slot_name);
		if (r < 0)
			goto err_remove_host;
	}

	if (slot->pdata->get_cover_state != NULL) {
		r = device_create_file(&mmc->class_dev,
					&dev_attr_cover_switch);
		if (r < 0)
			goto err_remove_slot_name;

		setup_timer(&slot->cover_timer, mmc_omap_cover_timer,
			    (unsigned long)slot);
		tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler,
			     (unsigned long)slot);
		tasklet_schedule(&slot->cover_tasklet);
	}

	return 0;

err_remove_slot_name:
	if (slot->pdata->name != NULL)
		device_remove_file(&mmc->class_dev, &dev_attr_slot_name);
err_remove_host:
	mmc_remove_host(mmc);
	mmc_free_host(mmc);
	return r;
}
Пример #11
0
static int vfe_probe(struct platform_device *pdev)
{
	struct vfe_device *vfe_dev;
	/*struct msm_cam_subdev_info sd_info;*/
	const struct of_device_id *match_dev;
	int rc = 0;

	struct msm_iova_partition vfe_partition = {
		.start = SZ_128K,
		.size = SZ_2G - SZ_128K,
	};
	struct msm_iova_layout vfe_layout = {
		.partitions = &vfe_partition,
		.npartitions = 1,
		.client_name = "vfe",
		.domain_flags = 0,
	};

	vfe_dev = kzalloc(sizeof(struct vfe_device), GFP_KERNEL);
	if (!vfe_dev) {
		pr_err("%s: no enough memory\n", __func__);
		rc = -ENOMEM;
		goto end;
	}

	if (pdev->dev.of_node) {
		of_property_read_u32((&pdev->dev)->of_node,
			"cell-index", &pdev->id);
		match_dev = of_match_device(msm_vfe_dt_match, &pdev->dev);
		if (!match_dev) {
			pr_err("%s: No vfe hardware info\n", __func__);
			rc = -EINVAL;
			goto probe_fail;
		}
		vfe_dev->hw_info =
			(struct msm_vfe_hardware_info *) match_dev->data;
	} else {
		vfe_dev->hw_info = (struct msm_vfe_hardware_info *)
			platform_get_device_id(pdev)->driver_data;
	}

	if (!vfe_dev->hw_info) {
		pr_err("%s: No vfe hardware info\n", __func__);
		rc = -EINVAL;
		goto probe_fail;
	}
	ISP_DBG("%s: device id = %d\n", __func__, pdev->id);

	vfe_dev->pdev = pdev;
	rc = vfe_dev->hw_info->vfe_ops.core_ops.get_platform_data(vfe_dev);
	if (rc < 0) {
		pr_err("%s: failed to get platform resources\n", __func__);
		rc = -ENOMEM;
		goto probe_fail;
	}

	INIT_LIST_HEAD(&vfe_dev->tasklet_q);
	tasklet_init(&vfe_dev->vfe_tasklet,
		msm_isp_do_tasklet, (unsigned long)vfe_dev);

	v4l2_subdev_init(&vfe_dev->subdev.sd, vfe_dev->hw_info->subdev_ops);
	vfe_dev->subdev.sd.internal_ops =
		vfe_dev->hw_info->subdev_internal_ops;
	snprintf(vfe_dev->subdev.sd.name,
		ARRAY_SIZE(vfe_dev->subdev.sd.name),
		"vfe");
	vfe_dev->subdev.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
	vfe_dev->subdev.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
	v4l2_set_subdevdata(&vfe_dev->subdev.sd, vfe_dev);
	platform_set_drvdata(pdev, &vfe_dev->subdev.sd);
	mutex_init(&vfe_dev->realtime_mutex);
	mutex_init(&vfe_dev->core_mutex);
	spin_lock_init(&vfe_dev->tasklet_lock);
	spin_lock_init(&vfe_dev->shared_data_lock);
	media_entity_init(&vfe_dev->subdev.sd.entity, 0, NULL, 0);
	vfe_dev->subdev.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
	vfe_dev->subdev.sd.entity.group_id = MSM_CAMERA_SUBDEV_VFE;
	vfe_dev->subdev.sd.entity.name = pdev->name;
	vfe_dev->subdev.close_seq = MSM_SD_CLOSE_1ST_CATEGORY | 0x2;
	rc = msm_sd_register(&vfe_dev->subdev);
	if (rc != 0) {
		pr_err("%s: msm_sd_register error = %d\n", __func__, rc);
		goto probe_fail;
	}

	vfe_dev->buf_mgr = &vfe_buf_mgr;
	v4l2_subdev_notify(&vfe_dev->subdev.sd,
		MSM_SD_NOTIFY_REQ_CB, &vfe_vb2_ops);
	rc = msm_isp_create_isp_buf_mgr(vfe_dev->buf_mgr,
		&vfe_vb2_ops, &vfe_layout);
	if (rc < 0) {
		pr_err("%s: Unable to create buffer manager\n", __func__);
		rc = -EINVAL;
		goto probe_fail;
	}
	/* create secure context banks*/
	if (vfe_dev->hw_info->num_iommu_secure_ctx) {
		/*secure vfe layout*/
		struct msm_iova_layout vfe_secure_layout = {
			.partitions = &vfe_partition,
			.npartitions = 1,
			.client_name = "vfe_secure",
			.domain_flags = 0,
			.is_secure = MSM_IOMMU_DOMAIN_SECURE,
		};
		rc = msm_isp_create_secure_domain(vfe_dev->buf_mgr,
			&vfe_secure_layout);
		if (rc < 0) {
			pr_err("%s: fail to create secure domain\n", __func__);
			msm_sd_unregister(&vfe_dev->subdev);
			rc = -EINVAL;
			goto probe_fail;
		}
	}

	vfe_dev->buf_mgr->ops->register_ctx(vfe_dev->buf_mgr,
		&vfe_dev->iommu_ctx[0], &vfe_dev->iommu_secure_ctx[0],
		vfe_dev->hw_info->num_iommu_ctx,
		vfe_dev->hw_info->num_iommu_secure_ctx);

	vfe_dev->buf_mgr->init_done = 1;
	vfe_dev->vfe_open_cnt = 0;
	return rc;

probe_fail:
	kfree(vfe_dev);
end:
	return rc;
}

static struct platform_driver vfe_driver = {
	.probe = vfe_probe,
	.driver = {
		.name = "msm_vfe",
		.owner = THIS_MODULE,
		.of_match_table = msm_vfe_dt_match,
	},
	.id_table = msm_vfe_dev_id,
};

static int __init msm_vfe_init_module(void)
{
	return platform_driver_register(&vfe_driver);
}

static void __exit msm_vfe_exit_module(void)
{
	platform_driver_unregister(&vfe_driver);
}

module_init(msm_vfe_init_module);
module_exit(msm_vfe_exit_module);
MODULE_DESCRIPTION("MSM VFE driver");
MODULE_LICENSE("GPL v2");
Пример #12
0
struct net_device *__init
c4_add_dev (hdw_info_t * hi, int brdno, unsigned long f0, unsigned long f1,
            int irq0, int irq1)
{
    struct net_device *ndev;
    ci_t       *ci;

    ndev = alloc_netdev(sizeof(ci_t), SBE_IFACETMPL, c4_setup);
    if (!ndev)
    {
        pr_warning("%s: no memory for struct net_device !\n", hi->devname);
        error_flag = ENOMEM;
        return 0;
    }
    ci = (ci_t *)(netdev_priv(ndev));
    ndev->irq = irq0;

    ci->hdw_info = hi;
    ci->state = C_INIT;         /* mark as hardware not available */
    ci->next = c4_list;
    c4_list = ci;
    ci->brdno = ci->next ? ci->next->brdno + 1 : 0;

    if (CI == 0)
        CI = ci;                    /* DEBUG, only board 0 usage */

    strcpy (ci->devname, hi->devname);
    ci->release = &pmcc4_OSSI_release[0];

    /* tasklet */
#if defined(SBE_ISR_TASKLET)
    tasklet_init (&ci->ci_musycc_isr_tasklet,
                  (void (*) (unsigned long)) musycc_intr_bh_tasklet,
                  (unsigned long) ci);

    if (atomic_read (&ci->ci_musycc_isr_tasklet.count) == 0)
        tasklet_disable_nosync (&ci->ci_musycc_isr_tasklet);
#elif defined(SBE_ISR_IMMEDIATE)
    ci->ci_musycc_isr_tq.routine = (void *) (unsigned long) musycc_intr_bh_tasklet;
    ci->ci_musycc_isr_tq.data = ci;
#endif


    if (register_netdev (ndev) ||
        (c4_init (ci, (u_char *) f0, (u_char *) f1) != SBE_DRVR_SUCCESS))
    {
        OS_kfree (netdev_priv(ndev));
        OS_kfree (ndev);
        error_flag = ENODEV;
        return 0;
    }
    /*************************************************************
     *  int request_irq(unsigned int irq,
     *                  void (*handler)(int, void *, struct pt_regs *),
     *                  unsigned long flags, const char *dev_name, void *dev_id);
     *  wherein:
     *  irq      -> The interrupt number that is being requested.
     *  handler  -> Pointer to handling function being installed.
     *  flags    -> A bit mask of options related to interrupt management.
     *  dev_name -> String used in /proc/interrupts to show owner of interrupt.
     *  dev_id   -> Pointer (for shared interrupt lines) to point to its own
     *              private data area (to identify which device is interrupting).
     *
     *  extern void free_irq(unsigned int irq, void *dev_id);
     **************************************************************/

    if (request_irq (irq0, &c4_linux_interrupt,
#if defined(SBE_ISR_TASKLET)
                     IRQF_DISABLED | IRQF_SHARED,
#elif defined(SBE_ISR_IMMEDIATE)
                     IRQF_DISABLED | IRQF_SHARED,
#elif defined(SBE_ISR_INLINE)
                     IRQF_SHARED,
#endif
                     ndev->name, ndev))
    {
        pr_warning("%s: MUSYCC could not get irq: %d\n", ndev->name, irq0);
        unregister_netdev (ndev);
        OS_kfree (netdev_priv(ndev));
        OS_kfree (ndev);
        error_flag = EIO;
        return 0;
    }
#ifdef CONFIG_SBE_PMCC4_NCOMM
    if (request_irq (irq1, &c4_ebus_interrupt, IRQF_SHARED, ndev->name, ndev))
    {
        pr_warning("%s: EBUS could not get irq: %d\n", hi->devname, irq1);
        unregister_netdev (ndev);
        free_irq (irq0, ndev);
        OS_kfree (netdev_priv(ndev));
        OS_kfree (ndev);
        error_flag = EIO;
        return 0;
    }
#endif

    /* setup board identification information */

    {
        u_int32_t   tmp;

        hdw_sn_get (hi, brdno);     /* also sets PROM format type (promfmt)
                                     * for later usage */

        switch (hi->promfmt)
        {
        case PROM_FORMAT_TYPE1:
            memcpy (ndev->dev_addr, (FLD_TYPE1 *) (hi->mfg_info.pft1.Serial), 6);
            memcpy (&tmp, (FLD_TYPE1 *) (hi->mfg_info.pft1.Id), 4);     /* unaligned data
                                                                         * acquisition */
            ci->brd_id = cpu_to_be32 (tmp);
            break;
        case PROM_FORMAT_TYPE2:
            memcpy (ndev->dev_addr, (FLD_TYPE2 *) (hi->mfg_info.pft2.Serial), 6);
            memcpy (&tmp, (FLD_TYPE2 *) (hi->mfg_info.pft2.Id), 4);     /* unaligned data
                                                                         * acquisition */
            ci->brd_id = cpu_to_be32 (tmp);
            break;
        default:
            ci->brd_id = 0;
            memset (ndev->dev_addr, 0, 6);
            break;
        }

#if 1
        sbeid_set_hdwbid (ci);      /* requires bid to be preset */
#else
        sbeid_set_bdtype (ci);      /* requires hdw_bid to be preset */
#endif

    }

#ifdef CONFIG_PROC_FS
    sbecom_proc_brd_init (ci);
#endif
#if defined(SBE_ISR_TASKLET)
    tasklet_enable (&ci->ci_musycc_isr_tasklet);
#endif


    if ((error_flag = c4_init2 (ci)) != SBE_DRVR_SUCCESS)
    {
#ifdef CONFIG_PROC_FS
        sbecom_proc_brd_cleanup (ci);
#endif
        unregister_netdev (ndev);
        free_irq (irq1, ndev);
        free_irq (irq0, ndev);
        OS_kfree (netdev_priv(ndev));
        OS_kfree (ndev);
        return 0;                   /* failure, error_flag is set */
    }
    return ndev;
}
Пример #13
0
static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
			    const struct ath_bus_ops *bus_ops)
{
	struct ath_hw *ah = NULL;
	struct ath_common *common;
	int ret = 0, i;
	int csz = 0;

	ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
	if (!ah)
		return -ENOMEM;

	ah->hw_version.devid = devid;
	ah->hw_version.subsysid = subsysid;
	sc->sc_ah = ah;

	common = ath9k_hw_common(ah);
	common->ops = &ath9k_common_ops;
	common->bus_ops = bus_ops;
	common->ah = ah;
	common->hw = sc->hw;
	common->priv = sc;
	common->debug_mask = ath9k_debug;
	spin_lock_init(&common->cc_lock);

	spin_lock_init(&sc->wiphy_lock);
	spin_lock_init(&sc->sc_resetlock);
	spin_lock_init(&sc->sc_serial_rw);
	spin_lock_init(&sc->sc_pm_lock);
	mutex_init(&sc->mutex);
	tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
	tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
		     (unsigned long)sc);

	/*
	 * Cache line size is used to size and align various
	 * structures used to communicate with the hardware.
	 */
	ath_read_cachesize(common, &csz);
	common->cachelsz = csz << 2; /* convert to bytes */

	/* Initializes the hardware for all supported chipsets */
	ret = ath9k_hw_init(ah);
	if (ret)
		goto err_hw;

	ret = ath9k_init_debug(ah);
	if (ret) {
		ath_print(common, ATH_DBG_FATAL,
			  "Unable to create debugfs files\n");
		goto err_debug;
	}

	ret = ath9k_init_queues(sc);
	if (ret)
		goto err_queues;

	ret =  ath9k_init_btcoex(sc);
	if (ret)
		goto err_btcoex;

	ret = ath9k_init_channels_rates(sc);
	if (ret)
		goto err_btcoex;

	ath9k_init_crypto(sc);
	ath9k_init_misc(sc);

	return 0;

err_btcoex:
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
		if (ATH_TXQ_SETUP(sc, i))
			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
err_queues:
	ath9k_exit_debug(ah);
err_debug:
	ath9k_hw_deinit(ah);
err_hw:
	tasklet_kill(&sc->intr_tq);
	tasklet_kill(&sc->bcon_tasklet);

	kfree(ah);
	sc->sc_ah = NULL;

	return ret;
}
Пример #14
0
/**
 * initialize_crq_queue: - Initializes and registers CRQ with hypervisor
 * @queue:	crq_queue to initialize and register
 * @hostdata:	ibmvscsi_host_data of host
 *
 * Allocates a page for messages, maps it for dma, and registers
 * the crq with the hypervisor.
 * Returns zero on success.
 */
int ibmvscsi_init_crq_queue(struct crq_queue *queue,
			    struct ibmvscsi_host_data *hostdata,
			    int max_requests)
{
	int rc;
	int retrc;
	struct vio_dev *vdev = to_vio_dev(hostdata->dev);

	queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);

	if (!queue->msgs)
		goto malloc_failed;
	queue->size = PAGE_SIZE / sizeof(*queue->msgs);

	queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
					  queue->size * sizeof(*queue->msgs),
					  DMA_BIDIRECTIONAL);

	if (dma_mapping_error(queue->msg_token))
		goto map_failed;

	gather_partition_info();
	set_adapter_info(hostdata);

	retrc = rc = plpar_hcall_norets(H_REG_CRQ,
				vdev->unit_address,
				queue->msg_token, PAGE_SIZE);
	if (rc == H_RESOURCE)
		/* maybe kexecing and resource is busy. try a reset */
		rc = ibmvscsi_reset_crq_queue(queue,
					      hostdata);

	if (rc == 2) {
		/* Adapter is good, but other end is not ready */
		printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n");
		retrc = 0;
	} else if (rc != 0) {
		printk(KERN_WARNING "ibmvscsi: Error %d opening adapter\n", rc);
		goto reg_crq_failed;
	}

	if (request_irq(vdev->irq,
			ibmvscsi_handle_event,
			0, "ibmvscsi", (void *)hostdata) != 0) {
		printk(KERN_ERR "ibmvscsi: couldn't register irq 0x%x\n",
		       vdev->irq);
		goto req_irq_failed;
	}

	rc = vio_enable_interrupts(vdev);
	if (rc != 0) {
		printk(KERN_ERR "ibmvscsi:  Error %d enabling interrupts!!!\n",
		       rc);
		goto req_irq_failed;
	}

	queue->cur = 0;
	spin_lock_init(&queue->lock);

	tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task,
		     (unsigned long)hostdata);

	return retrc;

      req_irq_failed:
	do {
		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
	} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
      reg_crq_failed:
	dma_unmap_single(hostdata->dev,
			 queue->msg_token,
			 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
      map_failed:
	free_page((unsigned long)queue->msgs);
      malloc_failed:
	return -1;
}
Пример #15
0
/*******************************************************************************
**
**  onuEponIrqTaskletInit
**  ____________________________________________________________________________
**
**  DESCRIPTION: EPON Interrupt tasklet init
**               
**  PARAMETERS:  S_onuPonIrq irqId
** 
**  OUTPUTS:     none
**                               
**  RETURNS:     MV_OK
**
*******************************************************************************/
MV_STATUS onuEponIrqTaskletInit(S_onuPonIrq *irqId)
{
  tasklet_init(&(irqId->onuPonTasklet), onuEponTaskletFunc, (unsigned int)0);

  return(MV_OK);
}
Пример #16
0
static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
			    const struct ath_bus_ops *bus_ops)
{
	struct ath9k_platform_data *pdata = sc->dev->platform_data;
	struct ath_hw *ah = NULL;
	struct ath_common *common;
	int ret = 0, i;
	int csz = 0;

	ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
	if (!ah)
		return -ENOMEM;

	ah->hw = sc->hw;
	ah->hw_version.devid = devid;
	ah->reg_ops.read = ath9k_ioread32;
	ah->reg_ops.write = ath9k_iowrite32;
	ah->reg_ops.rmw = ath9k_reg_rmw;
	atomic_set(&ah->intr_ref_cnt, -1);
	sc->sc_ah = ah;

	if (!pdata) {
		ah->ah_flags |= AH_USE_EEPROM;
		sc->sc_ah->led_pin = -1;
	} else {
		sc->sc_ah->gpio_mask = pdata->gpio_mask;
		sc->sc_ah->gpio_val = pdata->gpio_val;
		sc->sc_ah->led_pin = pdata->led_pin;
		ah->is_clk_25mhz = pdata->is_clk_25mhz;
		ah->get_mac_revision = pdata->get_mac_revision;
		ah->external_reset = pdata->external_reset;
	}

	common = ath9k_hw_common(ah);
	common->ops = &ah->reg_ops;
	common->bus_ops = bus_ops;
	common->ah = ah;
	common->hw = sc->hw;
	common->priv = sc;
	common->debug_mask = ath9k_debug;
	common->btcoex_enabled = ath9k_btcoex_enable == 1;
	common->disable_ani = false;
	spin_lock_init(&common->cc_lock);

	spin_lock_init(&sc->sc_serial_rw);
	spin_lock_init(&sc->sc_pm_lock);
	mutex_init(&sc->mutex);
#ifdef CONFIG_ATH9K_DEBUGFS
	spin_lock_init(&sc->nodes_lock);
	spin_lock_init(&sc->debug.samp_lock);
	INIT_LIST_HEAD(&sc->nodes);
#endif
	tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
	tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
		     (unsigned long)sc);

	/*
	 * Cache line size is used to size and align various
	 * structures used to communicate with the hardware.
	 */
	ath_read_cachesize(common, &csz);
	common->cachelsz = csz << 2; /* convert to bytes */

	/* Initializes the hardware for all supported chipsets */
	ret = ath9k_hw_init(ah);
	if (ret)
		goto err_hw;

	if (pdata && pdata->macaddr)
		memcpy(common->macaddr, pdata->macaddr, ETH_ALEN);

	ret = ath9k_init_queues(sc);
	if (ret)
		goto err_queues;

	ret =  ath9k_init_btcoex(sc);
	if (ret)
		goto err_btcoex;

	ret = ath9k_init_channels_rates(sc);
	if (ret)
		goto err_btcoex;

	ath9k_cmn_init_crypto(sc->sc_ah);
	ath9k_init_misc(sc);

	return 0;

err_btcoex:
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
		if (ATH_TXQ_SETUP(sc, i))
			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
err_queues:
	ath9k_hw_deinit(ah);
err_hw:

	kfree(ah);
	sc->sc_ah = NULL;

	return ret;
}
Пример #17
0
static int mtk_wcn_stp_uart_init(void)
{
	static struct tty_ldisc_ops stp_uart_ldisc;
	INT32 err;
	INT32 fifo_init_done = 0;

	UART_INFO_FUNC("mtk_wcn_stp_uart_init(): MTK STP UART driver\n");

#if  (LDISC_RX == LDISC_RX_TASKLET)
	err = stp_uart_fifo_init();
	if (err != 0) {
		goto init_err;
	}
	fifo_init_done = 1;
	/*init rx tasklet */
	tasklet_init(&g_stp_uart_rx_fifo_tasklet, stp_uart_rx_handling, (unsigned long)0);

#elif (LDISC_RX == LDISC_RX_WORK)
	err = stp_uart_fifo_init();
	if (err != 0) {
		UART_ERR_FUNC("stp_uart_fifo_init(WORK) error(%d)\n", err);
		err = -EFAULT;
		goto init_err;
	}
	fifo_init_done = 1;

	g_stp_uart_rx_work = vmalloc(sizeof(struct work_struct));
	if (!g_stp_uart_rx_work) {
		UART_ERR_FUNC("vmalloc work_struct(%d) fail\n", sizeof(struct work_struct));
		err = -ENOMEM;
		goto init_err;
	}

	g_stp_uart_rx_wq = create_singlethread_workqueue("mtk_urxd");
	if (!g_stp_uart_rx_wq) {
		UART_ERR_FUNC("create_singlethread_workqueue fail\n");
		err = -ENOMEM;
		goto init_err;
	}

	/* init rx work */
	INIT_WORK(g_stp_uart_rx_work, stp_uart_rx_worker);

#endif

	/* Register the tty discipline */
	memset(&stp_uart_ldisc, 0, sizeof(stp_uart_ldisc));
	stp_uart_ldisc.magic = TTY_LDISC_MAGIC;
	stp_uart_ldisc.name = "n_mtkstp";
	stp_uart_ldisc.open = stp_uart_tty_open;
	stp_uart_ldisc.close = stp_uart_tty_close;
	stp_uart_ldisc.read = stp_uart_tty_read;
	stp_uart_ldisc.write = stp_uart_tty_write;
	stp_uart_ldisc.ioctl = stp_uart_tty_ioctl;
	stp_uart_ldisc.poll = stp_uart_tty_poll;
	stp_uart_ldisc.receive_buf = stp_uart_tty_receive;
	stp_uart_ldisc.write_wakeup = stp_uart_tty_wakeup;
	stp_uart_ldisc.owner = THIS_MODULE;

	if ((err = tty_register_ldisc(N_MTKSTP, &stp_uart_ldisc))) {
		UART_ERR_FUNC("MTK STP line discipline registration failed. (%d)\n", err);
		goto init_err;
	}

	/*
	   mtk_wcn_stp_register_if_tx( mtk_wcn_uart_tx);
	 */

	return 0;

 init_err:

#if (LDISC_RX == LDISC_RX_TASKLET)
	/* nothing */
	if (fifo_init_done) {
		stp_uart_fifo_deinit();
	}
#elif (LDISC_RX == LDISC_RX_WORK)
	if (g_stp_uart_rx_wq) {
		destroy_workqueue(g_stp_uart_rx_wq);
		g_stp_uart_rx_wq = NULL;
	}
	if (g_stp_uart_rx_work) {
		vfree(g_stp_uart_rx_work);
	}
	if (fifo_init_done) {
		stp_uart_fifo_deinit();
	}
#endif
	UART_ERR_FUNC("init fail, return(%d)\n", err);

	return err;

}
Пример #18
0
static int octeon_mgmt_probe(struct platform_device *pdev)
{
	struct net_device *netdev;
	struct octeon_mgmt *p;
	const __be32 *data;
	const u8 *mac;
	struct resource *res_mix;
	struct resource *res_agl;
	struct resource *res_agl_prt_ctl;
	int len;
	int result;

	netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
	if (netdev == NULL)
		return -ENOMEM;

	SET_NETDEV_DEV(netdev, &pdev->dev);

	platform_set_drvdata(pdev, netdev);
	p = netdev_priv(netdev);
	netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
		       OCTEON_MGMT_NAPI_WEIGHT);

	p->netdev = netdev;
	p->dev = &pdev->dev;
	p->has_rx_tstamp = false;

	data = of_get_property(pdev->dev.of_node, "cell-index", &len);
	if (data && len == sizeof(*data)) {
		p->port = be32_to_cpup(data);
	} else {
		dev_err(&pdev->dev, "no 'cell-index' property\n");
		result = -ENXIO;
		goto err;
	}

	snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);

	result = platform_get_irq(pdev, 0);
	if (result < 0)
		goto err;

	p->irq = result;

	res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res_mix == NULL) {
		dev_err(&pdev->dev, "no 'reg' resource\n");
		result = -ENXIO;
		goto err;
	}

	res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	if (res_agl == NULL) {
		dev_err(&pdev->dev, "no 'reg' resource\n");
		result = -ENXIO;
		goto err;
	}

	res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3);
	if (res_agl_prt_ctl == NULL) {
		dev_err(&pdev->dev, "no 'reg' resource\n");
		result = -ENXIO;
		goto err;
	}

	p->mix_phys = res_mix->start;
	p->mix_size = resource_size(res_mix);
	p->agl_phys = res_agl->start;
	p->agl_size = resource_size(res_agl);
	p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
	p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);


	if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
				     res_mix->name)) {
		dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
			res_mix->name);
		result = -ENXIO;
		goto err;
	}

	if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
				     res_agl->name)) {
		result = -ENXIO;
		dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
			res_agl->name);
		goto err;
	}

	if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
				     p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
		result = -ENXIO;
		dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
			res_agl_prt_ctl->name);
		goto err;
	}

	p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
	p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
	p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
					   p->agl_prt_ctl_size);
	if (!p->mix || !p->agl || !p->agl_prt_ctl) {
		dev_err(&pdev->dev, "failed to map I/O memory\n");
		result = -ENOMEM;
		goto err;
	}

	spin_lock_init(&p->lock);

	skb_queue_head_init(&p->tx_list);
	skb_queue_head_init(&p->rx_list);
	tasklet_init(&p->tx_clean_tasklet,
		     octeon_mgmt_clean_tx_tasklet, (unsigned long)p);

	netdev->priv_flags |= IFF_UNICAST_FLT;

	netdev->netdev_ops = &octeon_mgmt_ops;
	netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;

	netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM;
	netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM;

	mac = of_get_mac_address(pdev->dev.of_node);

	if (mac)
		memcpy(netdev->dev_addr, mac, ETH_ALEN);
	else
		eth_hw_addr_random(netdev);

	p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);

	result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
	if (result)
		goto err;

	netif_carrier_off(netdev);
	result = register_netdev(netdev);
	if (result)
		goto err;

	dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
	return 0;

err:
	of_node_put(p->phy_np);
	free_netdev(netdev);
	return result;
}
Пример #19
0
static int pegasus_probe(struct usb_interface *intf,
			 const struct usb_device_id *id)
{
	struct usb_device *dev = interface_to_usbdev(intf);
	struct net_device *net;
	pegasus_t *pegasus;
	int dev_index = id - pegasus_ids;
	int res = -ENOMEM;

	usb_get_dev(dev);
	if (!(pegasus = kmalloc(sizeof (struct pegasus), GFP_KERNEL))) {
		err("out of memory allocating device structure");
		goto out;
	}

	memset(pegasus, 0, sizeof (struct pegasus));
	pegasus->dev_index = dev_index;
	init_waitqueue_head(&pegasus->ctrl_wait);

	if (!alloc_urbs(pegasus))
		goto out1;

	net = alloc_etherdev(0);
	if (!net)
		goto out2;

	tasklet_init(&pegasus->rx_tl, rx_fixup, (unsigned long) pegasus);

	pegasus->usb = dev;
	pegasus->net = net;
	SET_MODULE_OWNER(net);
	net->priv = pegasus;
	net->open = pegasus_open;
	net->stop = pegasus_close;
	net->watchdog_timeo = PEGASUS_TX_TIMEOUT;
	net->tx_timeout = pegasus_tx_timeout;
	net->do_ioctl = pegasus_ioctl;
	net->hard_start_xmit = pegasus_start_xmit;
	net->set_multicast_list = pegasus_set_multicast;
	net->get_stats = pegasus_netdev_stats;
	net->mtu = PEGASUS_MTU;
	pegasus->mii.dev = net;
	pegasus->mii.mdio_read = mdio_read;
	pegasus->mii.mdio_write = mdio_write;
	pegasus->mii.phy_id_mask = 0x1f;
	pegasus->mii.reg_num_mask = 0x1f;
	spin_lock_init(&pegasus->rx_pool_lock);

	pegasus->features = usb_dev_id[dev_index].private;
	get_interrupt_interval(pegasus);
	if (reset_mac(pegasus)) {
		err("can't reset MAC");
		res = -EIO;
		goto out3;
	}
	set_ethernet_addr(pegasus);
	fill_skb_pool(pegasus);
	if (pegasus->features & PEGASUS_II) {
		info("setup Pegasus II specific registers");
		setup_pegasus_II(pegasus);
	}
	pegasus->phy = mii_phy_probe(pegasus);
	if (pegasus->phy == 0xff) {
		warn("can't locate MII phy, using default");
		pegasus->phy = 1;
	}
	usb_set_intfdata(intf, pegasus);
	SET_NETDEV_DEV(net, &intf->dev);
	res = register_netdev(net);
	if (res)
		goto out4;
	printk("%s: %s\n", net->name, usb_dev_id[dev_index].name);
	return 0;

out4:
	usb_set_intfdata(intf, NULL);
	free_skb_pool(pegasus);
out3:
	free_netdev(net);
out2:
	free_all_urbs(pegasus);
out1:
	kfree(pegasus);
out:
	usb_put_dev(dev);
	return res;
}
Пример #20
0
/**
 * attach to the WL device.
 *
 * Attach to the WL device identified by vendor and device parameters.
 * regs is a host accessible memory address pointing to WL device registers.
 *
 * brcms_attach is not defined as static because in the case where no bus
 * is defined, wl_attach will never be called, and thus, gcc will issue
 * a warning that this function is defined but not used if we declare
 * it as static.
 *
 *
 * is called in brcms_bcma_probe() context, therefore no locking required.
 */
static struct brcms_info *brcms_attach(struct bcma_device *pdev)
{
	struct brcms_info *wl = NULL;
	int unit, err;
	struct ieee80211_hw *hw;
	u8 perm[ETH_ALEN];

	unit = n_adapters_found;
	err = 0;

	if (unit < 0)
		return NULL;

	/* allocate private info */
	hw = bcma_get_drvdata(pdev);
	if (hw != NULL)
		wl = hw->priv;
	if (WARN_ON(hw == NULL) || WARN_ON(wl == NULL))
		return NULL;
	wl->wiphy = hw->wiphy;

	atomic_set(&wl->callbacks, 0);

	/* setup the bottom half handler */
	tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);

	spin_lock_init(&wl->lock);
	spin_lock_init(&wl->isr_lock);

	/* prepare ucode */
	if (brcms_request_fw(wl, pdev) < 0) {
		wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in "
			  "%s\n", KBUILD_MODNAME, "/lib/firmware/brcm");
		brcms_release_fw(wl);
		brcms_remove(pdev);
		return NULL;
	}

	/* common load-time initialization */
	wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err);
	brcms_release_fw(wl);
	if (!wl->wlc) {
		wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n",
			  KBUILD_MODNAME, err);
		goto fail;
	}
	wl->pub = brcms_c_pub(wl->wlc);

	wl->pub->ieee_hw = hw;

	/* register our interrupt handler */
	if (request_irq(pdev->irq, brcms_isr,
			IRQF_SHARED, KBUILD_MODNAME, wl)) {
		wiphy_err(wl->wiphy, "wl%d: request_irq() failed\n", unit);
		goto fail;
	}
	wl->irq = pdev->irq;

	/* register module */
	brcms_c_module_register(wl->pub, "linux", wl, NULL);

	if (ieee_hw_init(hw)) {
		wiphy_err(wl->wiphy, "wl%d: %s: ieee_hw_init failed!\n", unit,
			  __func__);
		goto fail;
	}

	memcpy(perm, &wl->pub->cur_etheraddr, ETH_ALEN);
	if (WARN_ON(!is_valid_ether_addr(perm)))
		goto fail;
	SET_IEEE80211_PERM_ADDR(hw, perm);

	err = ieee80211_register_hw(hw);
	if (err)
		wiphy_err(wl->wiphy, "%s: ieee80211_register_hw failed, status"
			  "%d\n", __func__, err);

	if (wl->pub->srom_ccode[0] && brcms_set_hint(wl, wl->pub->srom_ccode))
		wiphy_err(wl->wiphy, "%s: regulatory_hint failed, status %d\n",
			  __func__, err);

	n_adapters_found++;
	return wl;

fail:
	brcms_free(wl);
	return NULL;
}
Пример #21
0
int __init ar2313_probe(struct platform_device *pdev)
{
	struct net_device *dev;
	struct ar2313_private *sp;
	struct resource *res;
	unsigned long ar_eth_base;
	char buf[64];

	dev = alloc_etherdev(sizeof(struct ar2313_private));

	if (dev == NULL) {
		printk(KERN_ERR
			   "ar2313: Unable to allocate net_device structure!\n");
		return -ENOMEM;
	}

	platform_set_drvdata(pdev, dev);

	sp = netdev_priv(dev);
	sp->dev = dev;
	sp->cfg = pdev->dev.platform_data;

	sprintf(buf, "eth%d_membase", pdev->id);
	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, buf);
	if (!res)
		return -ENODEV;

	sp->link = 0;
	ar_eth_base = res->start;
	sp->phy = sp->cfg->phy;

	sprintf(buf, "eth%d_irq", pdev->id);
	dev->irq = platform_get_irq_byname(pdev, buf);

	spin_lock_init(&sp->lock);

	/* initialize func pointers */
	dev->open = &ar2313_open;
	dev->stop = &ar2313_close;
	dev->hard_start_xmit = &ar2313_start_xmit;

	dev->set_multicast_list = &ar2313_multicast_list;
#ifdef TX_TIMEOUT
	dev->tx_timeout = ar2313_tx_timeout;
	dev->watchdog_timeo = AR2313_TX_TIMEOUT;
#endif
	dev->do_ioctl = &ar2313_ioctl;

	// SAMEER: do we need this?
	dev->features |= NETIF_F_HIGHDMA | NETIF_F_HW_CSUM;

	tasklet_init(&sp->rx_tasklet, rx_tasklet_func, (unsigned long) dev);
	tasklet_disable(&sp->rx_tasklet);

	sp->eth_regs =
		ioremap_nocache(virt_to_phys(ar_eth_base), sizeof(*sp->eth_regs));
	if (!sp->eth_regs) {
		printk("Can't remap eth registers\n");
		return (-ENXIO);
	}

	/*
	 * When there's only one MAC, PHY regs are typically on ENET0,
	 * even though the MAC might be on ENET1.
	 * Needto remap PHY regs separately in this case
	 */
	if (virt_to_phys(ar_eth_base) == virt_to_phys(sp->phy_regs))
		sp->phy_regs = sp->eth_regs;
	else {
		sp->phy_regs =
			ioremap_nocache(virt_to_phys(sp->cfg->phy_base),
							sizeof(*sp->phy_regs));
		if (!sp->phy_regs) {
			printk("Can't remap phy registers\n");
			return (-ENXIO);
		}
	}

	sp->dma_regs =
		ioremap_nocache(virt_to_phys(ar_eth_base + 0x1000),
						sizeof(*sp->dma_regs));
	dev->base_addr = (unsigned int) sp->dma_regs;
	if (!sp->dma_regs) {
		printk("Can't remap DMA registers\n");
		return (-ENXIO);
	}

	sp->int_regs = ioremap_nocache(virt_to_phys(sp->cfg->reset_base), 4);
	if (!sp->int_regs) {
		printk("Can't remap INTERRUPT registers\n");
		return (-ENXIO);
	}

	strncpy(sp->name, "Atheros AR231x", sizeof(sp->name) - 1);
	sp->name[sizeof(sp->name) - 1] = '\0';
	memcpy(dev->dev_addr, sp->cfg->macaddr, 6);
	sp->board_idx = BOARD_IDX_STATIC;

	if (ar2313_init(dev)) {
		/*
		 * ar2313_init() calls ar2313_init_cleanup() on error.
		 */
		kfree(dev);
		return -ENODEV;
	}

	if (register_netdev(dev)) {
		printk("%s: register_netdev failed\n", __func__);
		return -1;
	}

	printk("%s: %s: %02x:%02x:%02x:%02x:%02x:%02x, irq %d\n",
		   dev->name, sp->name,
		   dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
		   dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5], dev->irq);

	sp->mii_bus.priv = dev;
	sp->mii_bus.read = mdiobus_read;
	sp->mii_bus.write = mdiobus_write;
	sp->mii_bus.reset = mdiobus_reset;
	sp->mii_bus.name = "ar2313_eth_mii";
	sp->mii_bus.id = 0;
	sp->mii_bus.irq = kmalloc(sizeof(int), GFP_KERNEL);
	*sp->mii_bus.irq = PHY_POLL;

	mdiobus_register(&sp->mii_bus);

	if (mdiobus_probe(dev) != 0) {
		printk(KERN_ERR "ar2313: mdiobus_probe failed");
		rx_tasklet_cleanup(dev);
		ar2313_init_cleanup(dev);
		unregister_netdev(dev);
		kfree(dev);
	} else {
		/* start link poll timer */
		ar2313_setup_timer(dev);
	}

	return 0;
}
Пример #22
0
static int __devinit p54p_probe(struct pci_dev *pdev,
				const struct pci_device_id *id)
{
	struct p54p_priv *priv;
	struct ieee80211_hw *dev;
	unsigned long mem_addr, mem_len;
	int err;

	err = pci_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "Cannot enable new PCI device\n");
		return err;
	}

	mem_addr = pci_resource_start(pdev, 0);
	mem_len = pci_resource_len(pdev, 0);
	if (mem_len < sizeof(struct p54p_csr)) {
		dev_err(&pdev->dev, "Too short PCI resources\n");
		goto err_disable_dev;
	}

	err = pci_request_regions(pdev, "p54pci");
	if (err) {
		dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
		goto err_disable_dev;
	}

	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
	    pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
		dev_err(&pdev->dev, "No suitable DMA available\n");
		goto err_free_reg;
	}

	pci_set_master(pdev);
	pci_try_set_mwi(pdev);

	pci_write_config_byte(pdev, 0x40, 0);
	pci_write_config_byte(pdev, 0x41, 0);

	dev = p54_init_common(sizeof(*priv));
	if (!dev) {
		dev_err(&pdev->dev, "ieee80211 alloc failed\n");
		err = -ENOMEM;
		goto err_free_reg;
	}

	priv = dev->priv;
	priv->pdev = pdev;

	SET_IEEE80211_DEV(dev, &pdev->dev);
	pci_set_drvdata(pdev, dev);

	priv->map = ioremap(mem_addr, mem_len);
	if (!priv->map) {
		dev_err(&pdev->dev, "Cannot map device memory\n");
		err = -ENOMEM;
		goto err_free_dev;
	}

	priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control),
						  &priv->ring_control_dma);
	if (!priv->ring_control) {
		dev_err(&pdev->dev, "Cannot allocate rings\n");
		err = -ENOMEM;
		goto err_iounmap;
	}
	priv->common.open = p54p_open;
	priv->common.stop = p54p_stop;
	priv->common.tx = p54p_tx;

	spin_lock_init(&priv->lock);
	tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev);

	err = request_firmware(&priv->firmware, "isl3886pci",
			       &priv->pdev->dev);
	if (err) {
		dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n");
		err = request_firmware(&priv->firmware, "isl3886",
				       &priv->pdev->dev);
		if (err)
			goto err_free_common;
	}

	err = p54p_open(dev);
	if (err)
		goto err_free_common;
	err = p54_read_eeprom(dev);
	p54p_stop(dev);
	if (err)
		goto err_free_common;

	err = p54_register_common(dev, &pdev->dev);
	if (err)
		goto err_free_common;

	return 0;

 err_free_common:
	release_firmware(priv->firmware);
	pci_free_consistent(pdev, sizeof(*priv->ring_control),
			    priv->ring_control, priv->ring_control_dma);

 err_iounmap:
	iounmap(priv->map);

 err_free_dev:
	pci_set_drvdata(pdev, NULL);
	p54_free_common(dev);

 err_free_reg:
	pci_release_regions(pdev);
 err_disable_dev:
	pci_disable_device(pdev);
	return err;
}
Пример #23
0
int mt76x2_init_hardware(struct mt76x2_dev *dev)
{
	static const u16 beacon_offsets[16] = {
		/* 1024 byte per beacon */
		0xc000,
		0xc400,
		0xc800,
		0xcc00,
		0xd000,
		0xd400,
		0xd800,
		0xdc00,

		/* BSS idx 8-15 not used for beacons */
		0xc000,
		0xc000,
		0xc000,
		0xc000,
		0xc000,
		0xc000,
		0xc000,
		0xc000,
	};
	u32 val;
	int ret;

	dev->beacon_offsets = beacon_offsets;
	tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet,
		     (unsigned long) dev);

	val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
	val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
	       MT_WPDMA_GLO_CFG_BIG_ENDIAN |
	       MT_WPDMA_GLO_CFG_HDR_SEG_LEN;
	val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE;
	mt76_wr(dev, MT_WPDMA_GLO_CFG, val);

	mt76x2_reset_wlan(dev, true);
	mt76x2_power_on(dev);

	ret = mt76x2_eeprom_init(dev);
	if (ret)
		return ret;

	ret = mt76x2_mac_reset(dev, true);
	if (ret)
		return ret;

	dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);

	ret = mt76x2_dma_init(dev);
	if (ret)
		return ret;

	set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
	ret = mt76x2_mac_start(dev);
	if (ret)
		return ret;

	ret = mt76x2_mcu_init(dev);
	if (ret)
		return ret;

	mt76x2_mac_stop(dev, false);

	return 0;
}
Пример #24
0
static int __devinit sunximmc_probe(struct platform_device *pdev)
{
    struct sunxi_mmc_host *smc_host = NULL;
    struct mmc_host	*mmc = NULL;
    int ret = 0;
    char mmc_para[16] = {0};
    int card_detmode = 0;

    SMC_MSG("%s: pdev->name: %s, pdev->id: %d\n", dev_name(&pdev->dev), pdev->name, pdev->id);
    mmc = mmc_alloc_host(sizeof(struct sunxi_mmc_host), &pdev->dev);
    if (!mmc)
    {
        SMC_ERR("mmc alloc host failed\n");
    	ret = -ENOMEM;
    	goto probe_out;
    }

    smc_host = mmc_priv(mmc);
    memset((void*)smc_host, 0, sizeof(smc_host));
    smc_host->mmc = mmc;
    smc_host->pdev = pdev;

    spin_lock_init(&smc_host->lock);
    tasklet_init(&smc_host->tasklet, sunximmc_tasklet, (unsigned long) smc_host);

    smc_host->cclk  = 400000;
    smc_host->mod_clk = SMC_MAX_MOD_CLOCK(pdev->id);
    smc_host->clk_source = SMC_MOD_CLK_SRC(pdev->id);

    mmc->ops        = &sunximmc_ops;
    mmc->ocr_avail	= MMC_VDD_32_33 | MMC_VDD_33_34;
    mmc->caps	    = MMC_CAP_4_BIT_DATA|MMC_CAP_MMC_HIGHSPEED|MMC_CAP_SD_HIGHSPEED|MMC_CAP_SDIO_IRQ;
    mmc->f_min 	    = 400000;
    mmc->f_max      = SMC_MAX_IO_CLOCK(pdev->id);
    if (pdev->id==3 && !mmc_pm_io_shd_suspend_host())
        mmc->pm_flags = MMC_PM_IGNORE_PM_NOTIFY;

    mmc->max_blk_count	= 4095;
    mmc->max_blk_size	= 4095;
    mmc->max_req_size	= 4095 * 512;              //32bit byte counter = 2^32 - 1
    mmc->max_seg_size	= mmc->max_req_size;
    mmc->max_segs	    = 256;
    
    if (sunximmc_resource_request(smc_host))
    {
        SMC_ERR("%s: Failed to get resouce.\n", dev_name(&pdev->dev));
        goto probe_free_host;
    }

    if (sunximmc_set_src_clk(smc_host))
    {
        goto probe_free_host;
    }
    sunximmc_init_controller(smc_host);
    smc_host->power_on = 1;
    sunximmc_procfs_attach(smc_host);

    /* irq */
    smc_host->irq = platform_get_irq(pdev, 0);
    if (smc_host->irq == 0)
    {
    	dev_err(&pdev->dev, "Failed to get interrupt resouce.\n");
    	ret = -EINVAL;
    	goto probe_free_resource;
    }

    if (request_irq(smc_host->irq, sunximmc_irq, 0, DRIVER_NAME, smc_host))
    {
    	dev_err(&pdev->dev, "Failed to request smc card interrupt.\n");
    	ret = -ENOENT;
    	goto probe_free_irq;
    }
    disable_irq(smc_host->irq);
    
    /* add host */
    ret = mmc_add_host(mmc);
    if (ret)
    {
    	dev_err(&pdev->dev, "Failed to add mmc host.\n");
    	goto probe_free_irq;
    }
    platform_set_drvdata(pdev, mmc);
    
    //fetch card detecetd mode
    sprintf(mmc_para, "mmc%d_para", pdev->id);
    ret = script_parser_fetch(mmc_para, "sdc_detmode", &card_detmode, sizeof(int));
    if (ret)
    {
    	SMC_ERR("sdc fetch card detect mode failed\n");
    }

    smc_host->cd_mode = card_detmode;
    if (smc_host->cd_mode == CARD_DETECT_BY_GPIO)
    {
        //initial card detect timer
        init_timer(&smc_host->cd_timer);
        smc_host->cd_timer.expires = jiffies + 1*HZ;
        smc_host->cd_timer.function = &sunximmc_cd_timer;
        smc_host->cd_timer.data = (unsigned long)smc_host;
        add_timer(&smc_host->cd_timer);
        smc_host->present = 0;
    }
    
    enable_irq(smc_host->irq);
    if (smc_host->cd_mode == CARD_ALWAYS_PRESENT)
    {
        mmc_detect_change(smc_host->mmc, msecs_to_jiffies(300));
    }

    sw_host[pdev->id] = smc_host;

    SMC_MSG("mmc%d Probe: base:0x%p irq:%u dma:%u pdes:0x%p, ret %d.\n", 
            pdev->id, smc_host->smc_base, smc_host->irq, smc_host->dma_no, smc_host->pdes, ret);
    
    goto probe_out;

probe_free_irq:
    if (smc_host->irq)
    {
        free_irq(smc_host->irq, smc_host);
    }

probe_free_resource:
    sunximmc_resource_release(smc_host);

probe_free_host:
    mmc_free_host(mmc);

probe_out:
    return ret;
}
Пример #25
0
int IO_irq_open(struct inode *inode, struct file *filp)
{
	struct IO_irq_dev *dev; /* device information */
	int result1,result2,result3,result4;

	spin_lock(&IO_irq_lock);

	while (! atomic_dec_and_test (&IO_irq_available)) {
		atomic_inc(&IO_irq_available);
		spin_unlock(&IO_irq_lock);
		if (filp->f_flags & O_NONBLOCK) return -EAGAIN;
		if (wait_event_interruptible (IO_irq_wait, atomic_read (&IO_irq_available)))
			return -ERESTARTSYS; /* tell the fs layer to handle it */
		
		spin_lock(&IO_irq_lock);
	}

	spin_unlock(&IO_irq_lock);

	dev = container_of(inode->i_cdev, struct IO_irq_dev, cdev);

	if ((dev->IO_irq1 >= IRQ_EINT0)&&(dev->IO_irq2 >= IRQ_EINT0)&&(dev->IO_irq3 >= IRQ_EINT0)&&(dev->IO_irq4 >= IRQ_EINT0))
	{
		result1 = request_irq(dev->IO_irq1, IO_irq_interrupt1, 0 , "IO_irq1", (void *)NULL);
		result2 = request_irq(dev->IO_irq2, IO_irq_interrupt2, 0 , "IO_irq2", (void *)NULL);
		result3 = request_irq(dev->IO_irq3, IO_irq_interrupt3, 0 , "IO_irq3", (void *)NULL);
		result4 = request_irq(dev->IO_irq4, IO_irq_interrupt4, 0 , "IO_irq4", (void *)NULL);
		if (result1 || result2 || result3 || result4 ) {
			printk( "IO_irq: can't get assigned one of irq \n");
			if (!result1) free_irq(IO_irq_devices->IO_irq1, NULL);
			if (!result2) free_irq(IO_irq_devices->IO_irq2, NULL);
			if (!result3) free_irq(IO_irq_devices->IO_irq3, NULL);
			if (!result4) free_irq(IO_irq_devices->IO_irq4, NULL);
			return -EAGAIN;
		} else { 
			s3c2410_gpio_cfgpin(S3C2410_GPG11, S3C2410_GPG11_EINT19);
			s3c2410_gpio_pullup(S3C2410_GPG11, 0);
			s3c2410_gpio_cfgpin(S3C2410_GPG3, S3C2410_GPG3_EINT11);
			s3c2410_gpio_pullup(S3C2410_GPG3, 0);
			s3c2410_gpio_cfgpin(S3C2410_GPF2, S3C2410_GPF2_EINT2);
			s3c2410_gpio_pullup(S3C2410_GPF2, 0);
			s3c2410_gpio_cfgpin(S3C2410_GPF0, S3C2410_GPF0_EINT0);
			s3c2410_gpio_pullup(S3C2410_GPF0, 0);
			dev->IO_status = 0x1f ;
		}
	}
	else   {
		printk("IO_irq: get IRQ failed !\n");
		return -EAGAIN;
		}

	filp->private_data = dev; /* for other methods */

	tasklet_init(&keytask , key1_tasklet , (unsigned long)&key1);

	tekkamanwork = create_workqueue("tekkamanwork");
	INIT_DELAYED_WORK(&irq_work_delay, irq_work_delay_fn); 

	INIT_WORK(&irq_work, irq_work_fn);

	tekkamantmp = kmalloc(512, GFP_KERNEL);
	tekkamanbuf = kmalloc (512 , GFP_KERNEL);
	tekkamanfifo = kfifo_alloc(512, GFP_KERNEL, &tekkamanlock);
	
	printk( "IO_irq: opened !  \n");
	return nonseekable_open(inode, filp);          /* success */
}
Пример #26
0
/*
 * our driver startup and shutdown routines
 */
static int
mv_cesa_ocf_init(struct platform_device *pdev)
{
#if defined(CONFIG_MV78200) || defined(CONFIG_MV632X)
	if (MV_FALSE == mvSocUnitIsMappedToThisCpu(CESA))
	{
		dprintk("CESA is not mapped to this CPU\n");
		return -ENODEV;
	}
#endif

	dprintk("%s\n", __FUNCTION__);
	memset(&mv_cesa_dev, 0, sizeof(mv_cesa_dev));
	softc_device_init(&mv_cesa_dev, "MV CESA", 0, mv_cesa_methods);
	cesa_ocf_id = crypto_get_driverid(softc_get_device(&mv_cesa_dev),CRYPTOCAP_F_HARDWARE);

	if (cesa_ocf_id < 0)
		panic("MV CESA crypto device cannot initialize!");

	dprintk("%s,%d: cesa ocf device id is %d \n", __FILE__, __LINE__, cesa_ocf_id);

	/* CESA unit is auto power on off */
#if 0
	if (MV_FALSE == mvCtrlPwrClckGet(CESA_UNIT_ID,0))
	{
		printk("\nWarning CESA %d is Powered Off\n",0);
		return EINVAL;
	}
#endif

	memset(&cesa_device, 0, sizeof(struct cesa_dev));
	/* Get the IRQ, and crypto memory regions */
	{
		struct resource *res;
		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");

		if (!res)
			return -ENXIO;

		cesa_device.sram = ioremap(res->start, res->end - res->start + 1);
		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");

		if (!res) {
		        iounmap(cesa_device.sram);
			return -ENXIO;
                }
                cesa_device.reg = ioremap(res->start, res->end - res->start + 1);
		cesa_device.irq = platform_get_irq(pdev, 0);
		cesa_device.plat_data = pdev->dev.platform_data;
	        setup_tdma_mbus_windows(&cesa_device);

	}


	if( MV_OK != mvCesaInit(CESA_OCF_MAX_SES*5, CESA_Q_SIZE, cesa_device.reg,
				NULL) ) {
		printk("%s,%d: mvCesaInit Failed. \n", __FILE__, __LINE__);
		return EINVAL;
	}

	/* clear and unmask Int */
	MV_REG_WRITE( MV_CESA_ISR_CAUSE_REG, 0);
#ifndef CESA_OCF_POLLING
    MV_REG_WRITE( MV_CESA_ISR_MASK_REG, MV_CESA_CAUSE_ACC_DMA_MASK);
#endif
#ifdef CESA_OCF_TASKLET
	tasklet_init(&cesa_ocf_tasklet, cesa_callback, (unsigned int) 0);
#endif
	/* register interrupt */
	if( request_irq( cesa_device.irq, cesa_interrupt_handler,
                             (IRQF_DISABLED) , "cesa", &cesa_ocf_id) < 0) {
		printk("%s,%d: cannot assign irq %x\n", __FILE__, __LINE__, cesa_device.reg);
		return EINVAL;
        }


	memset(cesa_ocf_sessions, 0, sizeof(struct cesa_ocf_data *) * CESA_OCF_MAX_SES);

#define	REGISTER(alg) \
	crypto_register(cesa_ocf_id, alg, 0,0)
	REGISTER(CRYPTO_AES_CBC);
	REGISTER(CRYPTO_DES_CBC);
	REGISTER(CRYPTO_3DES_CBC);
	REGISTER(CRYPTO_MD5);
	REGISTER(CRYPTO_MD5_HMAC);
	REGISTER(CRYPTO_SHA1);
	REGISTER(CRYPTO_SHA1_HMAC);
#undef REGISTER

	return 0;
}
Пример #27
0
void snd_hdjmidi_output_initialize_tasklet(struct snd_hdjmidi_out_endpoint* ep)
{
	if (ep) {
		tasklet_init(&ep->tasklet, snd_hdjmidi_out_tasklet, (unsigned long)ep);
	}
}
Пример #28
0
static int s5p_aes_probe(struct platform_device *pdev)
{
    int                 i, j, err = -ENODEV;
    struct s5p_aes_dev *pdata;
    struct device      *dev = &pdev->dev;
    struct resource    *res;

    if (s5p_dev)
        return -EEXIST;

    pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
    if (!pdata)
        return -ENOMEM;

    res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
    if (IS_ERR(pdata->ioaddr))
        return PTR_ERR(pdata->ioaddr);

    pdata->clk = devm_clk_get(dev, "secss");
    if (IS_ERR(pdata->clk)) {
        dev_err(dev, "failed to find secss clock source\n");
        return -ENOENT;
    }

    clk_enable(pdata->clk);

    spin_lock_init(&pdata->lock);

    pdata->irq_hash = platform_get_irq_byname(pdev, "hash");
    if (pdata->irq_hash < 0) {
        err = pdata->irq_hash;
        dev_warn(dev, "hash interrupt is not available.\n");
        goto err_irq;
    }
    err = devm_request_irq(dev, pdata->irq_hash, s5p_aes_interrupt,
                           IRQF_SHARED, pdev->name, pdev);
    if (err < 0) {
        dev_warn(dev, "hash interrupt is not available.\n");
        goto err_irq;
    }

    pdata->irq_fc = platform_get_irq_byname(pdev, "feed control");
    if (pdata->irq_fc < 0) {
        err = pdata->irq_fc;
        dev_warn(dev, "feed control interrupt is not available.\n");
        goto err_irq;
    }
    err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt,
                           IRQF_SHARED, pdev->name, pdev);
    if (err < 0) {
        dev_warn(dev, "feed control interrupt is not available.\n");
        goto err_irq;
    }

    pdata->dev = dev;
    platform_set_drvdata(pdev, pdata);
    s5p_dev = pdata;

    tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata);
    crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);

    for (i = 0; i < ARRAY_SIZE(algs); i++) {
        err = crypto_register_alg(&algs[i]);
        if (err)
            goto err_algs;
    }

    pr_info("s5p-sss driver registered\n");

    return 0;

err_algs:
    dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, err);

    for (j = 0; j < i; j++)
        crypto_unregister_alg(&algs[j]);

    tasklet_kill(&pdata->tasklet);

err_irq:
    clk_disable(pdata->clk);

    s5p_dev = NULL;

    return err;
}
Пример #29
0
/*----------------------------------------------------------------
* wlan_setup
*
* Roughly matches the functionality of ether_setup.  Here
* we set up any members of the wlandevice structure that are common
* to all devices.  Additionally, we allocate a linux 'struct device'
* and perform the same setup as ether_setup.
*
* Note: It's important that the caller have setup the wlandev->name
*	ptr prior to calling this function.
*
* Arguments:
*	wlandev		ptr to the wlandev structure for the
*			interface.
* Returns:
*	zero on success, non-zero otherwise.
* Call Context:
*	Should be process thread.  We'll assume it might be
*	interrupt though.  When we add support for statically
*	compiled drivers, this function will be called in the
*	context of the kernel startup code.
----------------------------------------------------------------*/
int wlan_setup(wlandevice_t *wlandev)
{
	int		result = 0;
	netdevice_t	*dev;

	DBFENTER;

	/* Set up the wlandev */
	wlandev->state = WLAN_DEVICE_CLOSED;
	wlandev->ethconv = WLAN_ETHCONV_8021h;
	wlandev->macmode = WLAN_MACMODE_NONE;

	/* Set up the rx queue */
	skb_queue_head_init(&wlandev->nsd_rxq);
	tasklet_init(&wlandev->rx_bh,
		     p80211netdev_rx_bh,
		     (unsigned long)wlandev);

	/* Allocate and initialize the struct device */
	dev = kmalloc(sizeof(netdevice_t), GFP_ATOMIC);
	if ( dev == NULL ) {
		WLAN_LOG_ERROR("Failed to alloc netdev.\n");
		result = 1;
	} else {
		memset( dev, 0, sizeof(netdevice_t));
		ether_setup(dev);
		wlandev->netdev = dev;
		dev->ml_priv = wlandev;
		dev->hard_start_xmit =	p80211knetdev_hard_start_xmit;
		dev->get_stats =	p80211knetdev_get_stats;
#ifdef HAVE_PRIVATE_IOCTL
		dev->do_ioctl = 	p80211knetdev_do_ioctl;
#endif
#ifdef HAVE_MULTICAST
		dev->set_multicast_list = p80211knetdev_set_multicast_list;
#endif
		dev->init =		p80211knetdev_init;
		dev->open =		p80211knetdev_open;
		dev->stop =		p80211knetdev_stop;

#ifdef CONFIG_NET_WIRELESS
#if ((WIRELESS_EXT < 17) && (WIRELESS_EXT < 21))
		dev->get_wireless_stats = p80211wext_get_wireless_stats;
#endif
#if WIRELESS_EXT > 12
		dev->wireless_handlers = &p80211wext_handler_def;
#endif
#endif

#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,3,38) )
		dev->tbusy = 1;
		dev->start = 0;
#else
		netif_stop_queue(dev);
#endif
#ifdef HAVE_CHANGE_MTU
		dev->change_mtu = wlan_change_mtu;
#endif
#ifdef HAVE_SET_MAC_ADDR
		dev->set_mac_address =	p80211knetdev_set_mac_address;
#endif
#ifdef HAVE_TX_TIMEOUT
		dev->tx_timeout      =  &p80211knetdev_tx_timeout;
		dev->watchdog_timeo  =  (wlan_watchdog * HZ) / 1000;
#endif
		netif_carrier_off(dev);
	}

	DBFEXIT;
	return result;
}
Пример #30
0
static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
			    const struct ath_bus_ops *bus_ops)
{
	struct ath9k_platform_data *pdata = sc->dev->platform_data;
	struct ath_hw *ah = NULL;
	struct ath9k_hw_capabilities *pCap;
	struct ath_common *common;
	int ret = 0, i;
	int csz = 0;

	ah = devm_kzalloc(sc->dev, sizeof(struct ath_hw), GFP_KERNEL);
	if (!ah)
		return -ENOMEM;

	ah->dev = sc->dev;
	ah->hw = sc->hw;
	ah->hw_version.devid = devid;
	ah->reg_ops.read = ath9k_ioread32;
	ah->reg_ops.multi_read = ath9k_multi_ioread32;
	ah->reg_ops.write = ath9k_iowrite32;
	ah->reg_ops.rmw = ath9k_reg_rmw;
	pCap = &ah->caps;

	common = ath9k_hw_common(ah);

	/* Will be cleared in ath9k_start() */
	set_bit(ATH_OP_INVALID, &common->op_flags);

	sc->sc_ah = ah;
	sc->dfs_detector = dfs_pattern_detector_init(common, NL80211_DFS_UNSET);
	sc->tx99_power = MAX_RATE_POWER + 1;
	init_waitqueue_head(&sc->tx_wait);
	sc->cur_chan = &sc->chanctx[0];
	if (!ath9k_is_chanctx_enabled())
		sc->cur_chan->hw_queue_base = 0;

	if (!pdata || pdata->use_eeprom) {
		ah->ah_flags |= AH_USE_EEPROM;
		sc->sc_ah->led_pin = -1;
	} else {
		sc->sc_ah->gpio_mask = pdata->gpio_mask;
		sc->sc_ah->gpio_val = pdata->gpio_val;
		sc->sc_ah->led_pin = pdata->led_pin;
		ah->is_clk_25mhz = pdata->is_clk_25mhz;
		ah->get_mac_revision = pdata->get_mac_revision;
		ah->external_reset = pdata->external_reset;
		ah->disable_2ghz = pdata->disable_2ghz;
		ah->disable_5ghz = pdata->disable_5ghz;
		if (!pdata->endian_check)
			ah->ah_flags |= AH_NO_EEP_SWAP;
	}

	common->ops = &ah->reg_ops;
	common->bus_ops = bus_ops;
	common->ps_ops = &ath9k_ps_ops;
	common->ah = ah;
	common->hw = sc->hw;
	common->priv = sc;
	common->debug_mask = ath9k_debug;
	common->btcoex_enabled = ath9k_btcoex_enable == 1;
	common->disable_ani = false;

	/*
	 * Platform quirks.
	 */
	ath9k_init_pcoem_platform(sc);

	ret = ath9k_init_soc_platform(sc);
	if (ret)
		return ret;

	if (ath9k_led_active_high != -1)
		ah->config.led_active_high = ath9k_led_active_high == 1;

	/*
	 * Enable WLAN/BT RX Antenna diversity only when:
	 *
	 * - BTCOEX is disabled.
	 * - the user manually requests the feature.
	 * - the HW cap is set using the platform data.
	 */
	if (!common->btcoex_enabled && ath9k_bt_ant_diversity &&
	    (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV))
		common->bt_ant_diversity = 1;

	spin_lock_init(&common->cc_lock);
	spin_lock_init(&sc->sc_serial_rw);
	spin_lock_init(&sc->sc_pm_lock);
	spin_lock_init(&sc->chan_lock);
	mutex_init(&sc->mutex);
	tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
	tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
		     (unsigned long)sc);

	setup_timer(&sc->sleep_timer, ath_ps_full_sleep, (unsigned long)sc);
	INIT_WORK(&sc->hw_reset_work, ath_reset_work);
	INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
	INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);

	ath9k_init_channel_context(sc);

	/*
	 * Cache line size is used to size and align various
	 * structures used to communicate with the hardware.
	 */
	ath_read_cachesize(common, &csz);
	common->cachelsz = csz << 2; /* convert to bytes */

	/* Initializes the hardware for all supported chipsets */
	ret = ath9k_hw_init(ah);
	if (ret)
		goto err_hw;

	if (pdata && pdata->macaddr)
		memcpy(common->macaddr, pdata->macaddr, ETH_ALEN);

	ret = ath9k_init_queues(sc);
	if (ret)
		goto err_queues;

	ret =  ath9k_init_btcoex(sc);
	if (ret)
		goto err_btcoex;

	ret = ath9k_cmn_init_channels_rates(common);
	if (ret)
		goto err_btcoex;

	ret = ath9k_init_p2p(sc);
	if (ret)
		goto err_btcoex;

	ath9k_cmn_init_crypto(sc->sc_ah);
	ath9k_init_misc(sc);
	ath_chanctx_init(sc);
	ath9k_offchannel_init(sc);

	if (common->bus_ops->aspm_init)
		common->bus_ops->aspm_init(common);

	return 0;

err_btcoex:
	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
		if (ATH_TXQ_SETUP(sc, i))
			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
err_queues:
	ath9k_hw_deinit(ah);
err_hw:
	ath9k_eeprom_release(sc);
	dev_kfree_skb_any(sc->tx99_skb);
	return ret;
}