Example #1
0
static int __devinit ad1889_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
{
	int err;
	ad1889_dev_t *dev;
	unsigned long bar;
	struct proc_dir_entry *proc_root = NULL;

	if ((err = pci_enable_device(pcidev)) != 0) {
		printk(KERN_ERR DEVNAME ": pci_enable_device failed\n");
		return err;
	}

	pci_set_master(pcidev);
	if ((dev = ad1889_alloc_dev(pcidev)) == NULL) {
		printk(KERN_ERR DEVNAME ": cannot allocate memory for device\n");
		return -ENOMEM;
	}
	pci_set_drvdata(pcidev, dev);
	bar = pci_resource_start(pcidev, 0);
	
        if (!(pci_resource_flags(pcidev, 0) & IORESOURCE_MEM)) {
		printk(KERN_ERR DEVNAME ": memory region not assigned\n");
		goto out1;
	}

	if (pci_request_region(pcidev, 0, DEVNAME)) {
		printk(KERN_ERR DEVNAME ": unable to request memory region\n");
		goto out1;
	}

	dev->regbase = ioremap_nocache(bar, AD_DS_IOMEMSIZE);
	if (!dev->regbase) {
		printk(KERN_ERR DEVNAME ": unable to remap iomem\n");
		goto out2;
	}

	if (request_irq(pcidev->irq, ad1889_interrupt, IRQF_SHARED, DEVNAME, dev) != 0) {
		printk(KERN_ERR DEVNAME ": unable to request interrupt\n");
		goto out3;
	}

	printk(KERN_INFO DEVNAME ": %s at %p IRQ %d\n",
		(char *)ent->driver_data, dev->regbase, pcidev->irq);

	if (ad1889_aclink_reset(pcidev) != 0)
		goto out4;

	/* register /dev/dsp */
	if ((dev->dev_audio = register_sound_dsp(&ad1889_fops, -1)) < 0) {
		printk(KERN_ERR DEVNAME ": cannot register /dev/dsp\n");
		goto out4;
	}

	if ((err = ad1889_ac97_init(dev, 0)) != 0)
		goto out5;

	/* XXX: cleanups */
	if (((proc_root = proc_mkdir("driver/ad1889", NULL)) == NULL) ||
	    create_proc_read_entry("ac97", S_IFREG|S_IRUGO, proc_root, ac97_read_proc, dev->ac97_codec) == NULL ||
	    create_proc_read_entry("info", S_IFREG|S_IRUGO, proc_root, ad1889_read_proc, dev) == NULL) 
		goto out5;
	
	ad1889_initcfg(dev);

	//DBG(DEVNAME ": Driver initialization done!\n");

	ad1889_dev = dev;

	return 0;

out5:
	unregister_sound_dsp(dev->dev_audio);
out4:
	free_irq(pcidev->irq, dev);
out3:
	iounmap(dev->regbase);
out2:
	pci_release_region(pcidev, 0);
out1:
	ad1889_free_dev(dev);
	pci_set_drvdata(pcidev, NULL);

	return -ENODEV;
}
Example #2
0
struct link_device *dpram_create_link_device(struct platform_device *pdev)
{
	struct modem_data *mdm_data = NULL;
	struct dpram_link_device *dpld = NULL;
	struct link_device *ld = NULL;
	struct resource *res = NULL;
	resource_size_t res_size;
	struct modemlink_dpram_control *dpctl = NULL;
	unsigned long task_data;
	int ret = 0;
	int i = 0;
	int bsize;
	int qsize;

	/* Get the platform data */
	mdm_data = (struct modem_data *)pdev->dev.platform_data;
	if (!mdm_data) {
		mif_info("ERR! mdm_data == NULL\n");
		goto err;
	}
	mif_info("modem = %s\n", mdm_data->name);
	mif_info("link device = %s\n", mdm_data->link_name);

	if (!mdm_data->dpram_ctl) {
		mif_info("ERR! mdm_data->dpram_ctl == NULL\n");
		goto err;
	}
	dpctl = mdm_data->dpram_ctl;

	/* Alloc DPRAM link device structure */
	dpld = kzalloc(sizeof(struct dpram_link_device), GFP_KERNEL);
	if (!dpld) {
		mif_info("ERR! kzalloc dpld fail\n");
		goto err;
	}
	ld = &dpld->ld;
	task_data = (unsigned long)dpld;

	/* Retrieve modem data and DPRAM control data from the modem data */
	ld->mdm_data = mdm_data;
	ld->name = mdm_data->link_name;
	ld->ipc_version = mdm_data->ipc_version;

	/* Retrieve the most basic data for IPC from the modem data */
	dpld->dpctl = dpctl;
	dpld->dp_type = dpctl->dp_type;

	if (mdm_data->ipc_version < SIPC_VER_50) {
		if (!dpctl->max_ipc_dev) {
			mif_info("ERR! no max_ipc_dev\n");
			goto err;
		}

		ld->aligned = dpctl->aligned;
		dpld->max_ipc_dev = dpctl->max_ipc_dev;
	} else {
		ld->aligned = 1;
		dpld->max_ipc_dev = MAX_SIPC5_DEV;
	}

	/* Set attributes as a link device */
	ld->init_comm = dpram_link_init;
	ld->terminate_comm = dpram_link_terminate;
	ld->send = dpram_send;
	ld->force_dump = dpram_force_dump;
	ld->dump_start = dpram_dump_start;
	ld->dump_update = dpram_dump_update;
	ld->ioctl = dpram_ioctl;

	INIT_LIST_HEAD(&ld->list);

	skb_queue_head_init(&ld->sk_fmt_tx_q);
	skb_queue_head_init(&ld->sk_raw_tx_q);
	skb_queue_head_init(&ld->sk_rfs_tx_q);
	ld->skb_txq[IPC_FMT] = &ld->sk_fmt_tx_q;
	ld->skb_txq[IPC_RAW] = &ld->sk_raw_tx_q;
	ld->skb_txq[IPC_RFS] = &ld->sk_rfs_tx_q;

	/* Set up function pointers */
	dpram_setup_common_op(dpld);
	dpld->dpram_dump = dpram_dump_memory;
	dpld->ext_op = dpram_get_ext_op(mdm_data->modem_type);

	/* Retrieve DPRAM resource */
	if (!dpctl->dp_base) {
		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
		if (!res) {
			mif_info("%s: ERR! platform_get_resource fail\n",
				ld->name);
			goto err;
		}
		res_size = resource_size(res);

		dpctl->dp_base = ioremap_nocache(res->start, res_size);
		dpctl->dp_size = res_size;
	}
	dpld->dp_base = dpctl->dp_base;
	dpld->dp_size = dpctl->dp_size;

	mif_info("%s: dp_type %d, aligned %d, dp_base 0x%08X, dp_size %d\n",
		ld->name, dpld->dp_type, ld->aligned, (int)dpld->dp_base,
		dpld->dp_size);

	/* Initialize DPRAM map (physical map -> logical map) */
	ret = dpram_table_init(dpld);
	if (ret < 0) {
		mif_info("%s: ERR! dpram_table_init fail (err %d)\n",
			ld->name, ret);
		goto err;
	}

	/* Disable IPC */
	set_magic(dpld, 0);
	set_access(dpld, 0);
	dpld->dpram_init_status = DPRAM_INIT_STATE_NONE;

	/* Initialize locks, completions, and bottom halves */
	snprintf(dpld->wlock_name, DP_MAX_NAME_LEN, "%s_wlock", ld->name);
	wake_lock_init(&dpld->wlock, WAKE_LOCK_SUSPEND, dpld->wlock_name);

	init_completion(&dpld->dpram_init_cmd);
	init_completion(&dpld->modem_pif_init_done);
	init_completion(&dpld->udl_start_complete);
	init_completion(&dpld->udl_cmd_complete);
	init_completion(&dpld->crash_start_complete);
	init_completion(&dpld->dump_start_complete);
	init_completion(&dpld->dump_recv_done);

	tasklet_init(&dpld->rx_tsk, dpram_ipc_rx_task, task_data);
	if (dpld->ext_op && dpld->ext_op->dl_task)
		tasklet_init(&dpld->dl_tsk, dpld->ext_op->dl_task, task_data);

	/* Prepare RXB queue */
	qsize = DPRAM_MAX_RXBQ_SIZE;
	for (i = 0; i < dpld->max_ipc_dev; i++) {
		bsize = rxbq_get_page_size(get_rx_buff_size(dpld, i));
		dpld->rxbq[i].size = qsize;
		dpld->rxbq[i].in = 0;
		dpld->rxbq[i].out = 0;
		dpld->rxbq[i].rxb = rxbq_create_pool(bsize, qsize);
		if (!dpld->rxbq[i].rxb) {
			mif_info("%s: ERR! %s rxbq_create_pool fail\n",
				ld->name, get_dev_name(i));
			goto err;
		}
		mif_info("%s: %s rxbq_pool created (bsize:%d, qsize:%d)\n",
			ld->name, get_dev_name(i), bsize, qsize);
	}

	/* Prepare a multi-purpose miscellaneous buffer */
	dpld->buff = kzalloc(dpld->dp_size, GFP_KERNEL);
	if (!dpld->buff) {
		mif_info("%s: ERR! kzalloc dpld->buff fail\n", ld->name);
		goto err;
	}

	/* Retrieve DPRAM IRQ GPIO# */
	dpld->gpio_dpram_int = mdm_data->gpio_dpram_int;

	/* Retrieve DPRAM IRQ# */
	if (!dpctl->dpram_irq) {
		dpctl->dpram_irq = platform_get_irq_byname(pdev, "dpram_irq");
		if (dpctl->dpram_irq < 0) {
			mif_info("%s: ERR! platform_get_irq_byname fail\n",
				ld->name);
			goto err;
		}
	}
	dpld->irq = dpctl->dpram_irq;

	/* Retrieve DPRAM IRQ flags */
	if (!dpctl->dpram_irq_flags)
		dpctl->dpram_irq_flags = (IRQF_NO_SUSPEND | IRQF_TRIGGER_LOW);
	dpld->irq_flags = dpctl->dpram_irq_flags;

	/* Register DPRAM interrupt handler */
	snprintf(dpld->irq_name, DP_MAX_NAME_LEN, "%s_irq", ld->name);
	ret = dpram_register_isr(dpld->irq, dpram_irq_handler, dpld->irq_flags,
				dpld->irq_name, dpld);
	if (ret)
		goto err;

	return ld;

err:
	if (dpld) {
		if (dpld->buff)
			kfree(dpld->buff);
		kfree(dpld);
	}

	return NULL;
}
Example #3
0
/* PCI probe function */
static int __devinit ps_hdmi_hpd_probe(struct pci_dev *pdev,
				       const struct pci_device_id *id)
{
	int result = 0;
	hdmi_context_t *ctx = g_context;

	if (pdev == NULL || ctx == NULL) {
		pr_err("%s: called with NULL device or context\n", __func__);
		result = -EINVAL;
		return result;
	}

	/* Verify probe is called for the intended device */
	if (pdev->device != PS_MSIC_PCI_DEVICE_ID) {
		pr_err("%s: called for wrong device id = 0x%x\n", __func__,
		       pdev->device);
		result = -EINVAL;
		goto exit;
	}

	pr_debug("pci_enable_device for 0x%x\n",
					PS_MSIC_PCI_DEVICE_ID);
	result = pci_enable_device(pdev);
	if (result) {
		pr_err("%s: Failed to enable MSIC PCI device = 0x%x\n",
		       __func__, PS_MSIC_PCI_DEVICE_ID);
		goto exit;
	}

	/* Map IO region for IRQ registers */
	ctx->dev.irq_io_address = ioremap_nocache(PS_MSIC_VRINT_ADDR,
						  PS_MSIC_VRINT_IOADDR_LEN);
	if (!ctx->dev.irq_io_address) {
		pr_err("%s: Failed to map IO region for MSIC IRQ\n", __func__);
		result = -ENOMEM;
		goto exit2;
	}

	ctx->irq_number = pdev->irq;
	pr_debug("%s: IRQ number assigned = %d\n", __func__, pdev->irq);

	result = request_threaded_irq(ctx->irq_number, ps_hdmi_irq_handler,
				      ctx->hpd_callback, IRQF_SHARED,
				      PS_HDMI_HPD_PCI_DRIVER_NAME,
				      ctx->hpd_data);
	if (result) {
		pr_debug("%s: Register irq interrupt %d failed\n",
			 __func__, ctx->irq_number);
		goto exit3;
	}

	/* Create Freezable workqueue for post resume HPD operations */
	ctx->post_resume_wq = create_freezable_workqueue("MFLD Post-Resume WQ");
	if (!ctx->post_resume_wq) {
		pr_debug("%s: Failed to create post-resume workqueue\n",
			 __func__);
		goto exit3;
	}

	INIT_WORK(&ctx->post_resume_work, ps_post_resume_wq);
	return result;

exit3:
	iounmap(ctx->dev.irq_io_address);
exit2:
	pci_disable_device(pdev);
exit:
	pci_dev_put(pdev);
	return result;
}
Example #4
0
static int cx18_probe(struct pci_dev *pci_dev,
		      const struct pci_device_id *pci_id)
{
	int retval = 0;
	int i;
	u32 devtype;
	struct cx18 *cx;

	/* FIXME - module parameter arrays constrain max instances */
	i = atomic_inc_return(&cx18_instance) - 1;
	if (i >= CX18_MAX_CARDS) {
		printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
		       "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
		return -ENOMEM;
	}

	cx = kzalloc(sizeof(struct cx18), GFP_ATOMIC);
	if (cx == NULL) {
		printk(KERN_ERR "cx18: cannot manage card %d, out of memory\n",
		       i);
		return -ENOMEM;
	}
	cx->pci_dev = pci_dev;
	cx->instance = i;

	retval = v4l2_device_register(&pci_dev->dev, &cx->v4l2_dev);
	if (retval) {
		printk(KERN_ERR "cx18: v4l2_device_register of card %d failed"
		       "\n", cx->instance);
		kfree(cx);
		return retval;
	}
	snprintf(cx->v4l2_dev.name, sizeof(cx->v4l2_dev.name), "cx18-%d",
		 cx->instance);
	CX18_INFO("Initializing card %d\n", cx->instance);

	cx18_process_options(cx);
	if (cx->options.cardtype == -1) {
		retval = -ENODEV;
		goto err;
	}

	retval = cx18_init_struct1(cx);
	if (retval)
		goto err;

	CX18_DEBUG_INFO("base addr: 0x%llx\n", (u64)cx->base_addr);

	/* PCI Device Setup */
	retval = cx18_setup_pci(cx, pci_dev, pci_id);
	if (retval != 0)
		goto free_workqueues;

	/* map io memory */
	CX18_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
		   (u64)cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
	cx->enc_mem = ioremap_nocache(cx->base_addr + CX18_MEM_OFFSET,
				       CX18_MEM_SIZE);
	if (!cx->enc_mem) {
		CX18_ERR("ioremap failed. Can't get a window into CX23418 "
			 "memory and register space\n");
		CX18_ERR("Each capture card with a CX23418 needs 64 MB of "
			 "vmalloc address space for the window\n");
		CX18_ERR("Check the output of 'grep Vmalloc /proc/meminfo'\n");
		CX18_ERR("Use the vmalloc= kernel command line option to set "
			 "VmallocTotal to a larger value\n");
		retval = -ENOMEM;
		goto free_mem;
	}
	cx->reg_mem = cx->enc_mem + CX18_REG_OFFSET;
	devtype = cx18_read_reg(cx, 0xC72028);
	switch (devtype & 0xff000000) {
	case 0xff000000:
		CX18_INFO("cx23418 revision %08x (A)\n", devtype);
		break;
	case 0x01000000:
		CX18_INFO("cx23418 revision %08x (B)\n", devtype);
		break;
	default:
		CX18_INFO("cx23418 revision %08x (Unknown)\n", devtype);
		break;
	}

	cx18_init_power(cx, 1);
	cx18_init_memory(cx);

	cx->scb = (struct cx18_scb __iomem *)(cx->enc_mem + SCB_OFFSET);
	cx18_init_scb(cx);

	cx18_gpio_init(cx);

	/* Initialize integrated A/V decoder early to set PLLs, just in case */
	retval = cx18_av_probe(cx);
	if (retval) {
		CX18_ERR("Could not register A/V decoder subdevice\n");
		goto free_map;
	}

	/* Initialize GPIO Reset Controller to do chip resets during i2c init */
	if (cx->card->hw_all & CX18_HW_GPIO_RESET_CTRL) {
		if (cx18_gpio_register(cx, CX18_HW_GPIO_RESET_CTRL) != 0)
			CX18_WARN("Could not register GPIO reset controller"
				  "subdevice; proceeding anyway.\n");
		else
			cx->hw_flags |= CX18_HW_GPIO_RESET_CTRL;
	}

	/* active i2c  */
	CX18_DEBUG_INFO("activating i2c...\n");
	retval = init_cx18_i2c(cx);
	if (retval) {
		CX18_ERR("Could not initialize i2c\n");
		goto free_map;
	}

	if (cx->card->hw_all & CX18_HW_TVEEPROM) {
		/* Based on the model number the cardtype may be changed.
		   The PCI IDs are not always reliable. */
		const struct cx18_card *orig_card = cx->card;
		cx18_process_eeprom(cx);

		if (cx->card != orig_card) {
			/* Changed the cardtype; re-reset the I2C chips */
			cx18_gpio_init(cx);
			cx18_call_hw(cx, CX18_HW_GPIO_RESET_CTRL,
					core, reset, (u32) CX18_GPIO_RESET_I2C);
		}
	}
	if (cx->card->comment)
		CX18_INFO("%s", cx->card->comment);
	if (cx->card->v4l2_capabilities == 0) {
		retval = -ENODEV;
		goto free_i2c;
	}
	cx18_init_memory(cx);
	cx18_init_scb(cx);

	/* Register IRQ */
	retval = request_irq(cx->pci_dev->irq, cx18_irq_handler,
			     IRQF_SHARED, cx->v4l2_dev.name, (void *)cx);
	if (retval) {
		CX18_ERR("Failed to register irq %d\n", retval);
		goto free_i2c;
	}

	if (cx->std == 0)
		cx->std = V4L2_STD_NTSC_M;

	if (cx->options.tuner == -1) {
		for (i = 0; i < CX18_CARD_MAX_TUNERS; i++) {
			if ((cx->std & cx->card->tuners[i].std) == 0)
				continue;
			cx->options.tuner = cx->card->tuners[i].tuner;
			break;
		}
	}
	/* if no tuner was found, then pick the first tuner in the card list */
	if (cx->options.tuner == -1 && cx->card->tuners[0].std) {
		cx->std = cx->card->tuners[0].std;
		if (cx->std & V4L2_STD_PAL)
			cx->std = V4L2_STD_PAL_BG | V4L2_STD_PAL_H;
		else if (cx->std & V4L2_STD_NTSC)
			cx->std = V4L2_STD_NTSC_M;
		else if (cx->std & V4L2_STD_SECAM)
			cx->std = V4L2_STD_SECAM_L;
		cx->options.tuner = cx->card->tuners[0].tuner;
	}
	if (cx->options.radio == -1)
		cx->options.radio = (cx->card->radio_input.audio_type != 0);

	/* The card is now fully identified, continue with card-specific
	   initialization. */
	cx18_init_struct2(cx);

	cx18_init_subdevs(cx);

	if (cx->std & V4L2_STD_525_60)
		cx->is_60hz = 1;
	else
		cx->is_50hz = 1;

	cx2341x_handler_set_50hz(&cx->cxhdl, !cx->is_60hz);

	if (cx->options.radio > 0)
		cx->v4l2_cap |= V4L2_CAP_RADIO;

	if (cx->options.tuner > -1) {
		struct tuner_setup setup;

		setup.addr = ADDR_UNSET;
		setup.type = cx->options.tuner;
		setup.mode_mask = T_ANALOG_TV;  /* matches TV tuners */
		if (cx->options.radio > 0)
			setup.mode_mask |= T_RADIO;
		setup.tuner_callback = (setup.type == TUNER_XC2028) ?
			cx18_reset_tuner_gpio : NULL;
		cx18_call_all(cx, tuner, s_type_addr, &setup);
		if (setup.type == TUNER_XC2028) {
			static struct xc2028_ctrl ctrl = {
				.fname = XC2028_DEFAULT_FIRMWARE,
				.max_len = 64,
			};
			struct v4l2_priv_tun_config cfg = {
				.tuner = cx->options.tuner,
				.priv = &ctrl,
			};
			cx18_call_all(cx, tuner, s_config, &cfg);
		}
	}

	/* The tuner is fixed to the standard. The other inputs (e.g. S-Video)
	   are not. */
	cx->tuner_std = cx->std;
	if (cx->std == V4L2_STD_ALL)
		cx->std = V4L2_STD_NTSC_M;

	retval = cx18_streams_setup(cx);
	if (retval) {
		CX18_ERR("Error %d setting up streams\n", retval);
		goto free_irq;
	}
	retval = cx18_streams_register(cx);
	if (retval) {
		CX18_ERR("Error %d registering devices\n", retval);
		goto free_streams;
	}

	CX18_INFO("Initialized card: %s\n", cx->card_name);

	/* Load cx18 submodules (cx18-alsa) */
	request_modules(cx);
	return 0;

free_streams:
	cx18_streams_cleanup(cx, 1);
free_irq:
	free_irq(cx->pci_dev->irq, (void *)cx);
free_i2c:
	exit_cx18_i2c(cx);
free_map:
	cx18_iounmap(cx);
free_mem:
	release_mem_region(cx->base_addr, CX18_MEM_SIZE);
free_workqueues:
	destroy_workqueue(cx->in_work_queue);
err:
	if (retval == 0)
		retval = -ENODEV;
	CX18_ERR("Error %d on initialization\n", retval);

	v4l2_device_unregister(&cx->v4l2_dev);
	kfree(cx);
	return retval;
}

int cx18_init_on_first_open(struct cx18 *cx)
{
	int video_input;
	int fw_retry_count = 3;
	struct v4l2_frequency vf;
	struct cx18_open_id fh;
	v4l2_std_id std;

	fh.cx = cx;

	if (test_bit(CX18_F_I_FAILED, &cx->i_flags))
		return -ENXIO;

	if (test_and_set_bit(CX18_F_I_INITED, &cx->i_flags))
		return 0;

	while (--fw_retry_count > 0) {
		/* load firmware */
		if (cx18_firmware_init(cx) == 0)
			break;
		if (fw_retry_count > 1)
			CX18_WARN("Retry loading firmware\n");
	}

	if (fw_retry_count == 0) {
		set_bit(CX18_F_I_FAILED, &cx->i_flags);
		return -ENXIO;
	}
	set_bit(CX18_F_I_LOADED_FW, &cx->i_flags);

	/*
	 * Init the firmware twice to work around a silicon bug
	 * with the digital TS.
	 *
	 * The second firmware load requires us to normalize the APU state,
	 * or the audio for the first analog capture will be badly incorrect.
	 *
	 * I can't seem to call APU_RESETAI and have it succeed without the
	 * APU capturing audio, so we start and stop it here to do the reset
	 */

	/* MPEG Encoding, 224 kbps, MPEG Layer II, 48 ksps */
	cx18_vapi(cx, CX18_APU_START, 2, CX18_APU_ENCODING_METHOD_MPEG|0xb9, 0);
	cx18_vapi(cx, CX18_APU_RESETAI, 0);
	cx18_vapi(cx, CX18_APU_STOP, 1, CX18_APU_ENCODING_METHOD_MPEG);

	fw_retry_count = 3;
	while (--fw_retry_count > 0) {
		/* load firmware */
		if (cx18_firmware_init(cx) == 0)
			break;
		if (fw_retry_count > 1)
			CX18_WARN("Retry loading firmware\n");
	}

	if (fw_retry_count == 0) {
		set_bit(CX18_F_I_FAILED, &cx->i_flags);
		return -ENXIO;
	}

	/*
	 * The second firmware load requires us to normalize the APU state,
	 * or the audio for the first analog capture will be badly incorrect.
	 *
	 * I can't seem to call APU_RESETAI and have it succeed without the
	 * APU capturing audio, so we start and stop it here to do the reset
	 */

	/* MPEG Encoding, 224 kbps, MPEG Layer II, 48 ksps */
	cx18_vapi(cx, CX18_APU_START, 2, CX18_APU_ENCODING_METHOD_MPEG|0xb9, 0);
	cx18_vapi(cx, CX18_APU_RESETAI, 0);
	cx18_vapi(cx, CX18_APU_STOP, 1, CX18_APU_ENCODING_METHOD_MPEG);

	/* Init the A/V decoder, if it hasn't been already */
	v4l2_subdev_call(cx->sd_av, core, load_fw);

	vf.tuner = 0;
	vf.type = V4L2_TUNER_ANALOG_TV;
	vf.frequency = 6400; /* the tuner 'baseline' frequency */

	/* Set initial frequency. For PAL/SECAM broadcasts no
	   'default' channel exists AFAIK. */
	if (cx->std == V4L2_STD_NTSC_M_JP)
		vf.frequency = 1460;	/* ch. 1 91250*16/1000 */
	else if (cx->std & V4L2_STD_NTSC_M)
		vf.frequency = 1076;	/* ch. 4 67250*16/1000 */

	video_input = cx->active_input;
	cx->active_input++;	/* Force update of input */
	cx18_s_input(NULL, &fh, video_input);

	/* Let the VIDIOC_S_STD ioctl do all the work, keeps the code
	   in one place. */
	cx->std++;		/* Force full standard initialization */
	std = (cx->tuner_std == V4L2_STD_ALL) ? V4L2_STD_NTSC_M : cx->tuner_std;
	cx18_s_std(NULL, &fh, std);
	cx18_s_frequency(NULL, &fh, &vf);
	return 0;
}

static void cx18_cancel_in_work_orders(struct cx18 *cx)
{
	int i;
	for (i = 0; i < CX18_MAX_IN_WORK_ORDERS; i++)
		cancel_work_sync(&cx->in_work_order[i].work);
}
Example #5
0
File: irq.c Project: 4oh4ed/linux
int __init icu_of_init(struct device_node *node, struct device_node *parent)
{
	struct device_node *eiu_node;
	struct resource res;
	int i, ret;

	for (i = 0; i < MAX_IM; i++) {
		if (of_address_to_resource(node, i, &res))
			panic("Failed to get icu memory range");

		if (request_mem_region(res.start, resource_size(&res),
					res.name) < 0)
			pr_err("Failed to request icu memory");

		ltq_icu_membase[i] = ioremap_nocache(res.start,
					resource_size(&res));
		if (!ltq_icu_membase[i])
			panic("Failed to remap icu memory");
	}

	/* the external interrupts are optional and xway only */
	eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
	if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
		/* find out how many external irq sources we have */
		exin_avail = of_irq_count(eiu_node);

		if (exin_avail > MAX_EIU)
			exin_avail = MAX_EIU;

		ret = of_irq_to_resource_table(eiu_node,
						ltq_eiu_irq, exin_avail);
		if (ret != exin_avail)
			panic("failed to load external irq resources");

		if (request_mem_region(res.start, resource_size(&res),
							res.name) < 0)
			pr_err("Failed to request eiu memory");

		ltq_eiu_membase = ioremap_nocache(res.start,
							resource_size(&res));
		if (!ltq_eiu_membase)
			panic("Failed to remap eiu memory");
	}

	/* turn off all irqs by default */
	for (i = 0; i < MAX_IM; i++) {
		/* make sure all irqs are turned off by default */
		ltq_icu_w32(i, 0, LTQ_ICU_IM0_IER);
		/* clear all possibly pending interrupts */
		ltq_icu_w32(i, ~0, LTQ_ICU_IM0_ISR);
	}

	mips_cpu_irq_init();

	for (i = 0; i < MAX_IM; i++)
		setup_irq(i + 2, &cascade);

	if (cpu_has_vint) {
		pr_info("Setting up vectored interrupts\n");
		set_vi_handler(2, ltq_hw0_irqdispatch);
		set_vi_handler(3, ltq_hw1_irqdispatch);
		set_vi_handler(4, ltq_hw2_irqdispatch);
		set_vi_handler(5, ltq_hw3_irqdispatch);
		set_vi_handler(6, ltq_hw4_irqdispatch);
		set_vi_handler(7, ltq_hw5_irqdispatch);
	}

	ltq_domain = irq_domain_add_linear(node,
		(MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
		&irq_domain_ops, 0);

#if defined(CONFIG_MIPS_MT_SMP)
	if (cpu_has_vint) {
		pr_info("Setting up IPI vectored interrupts\n");
		set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ltq_sw0_irqdispatch);
		set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ltq_sw1_irqdispatch);
	}
	arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ,
		&irq_resched);
	arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ, &irq_call);
#endif

#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
	set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
		IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
#else
	set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
		IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
#endif

	/* tell oprofile which irq to use */
	cp0_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);

	/*
	 * if the timer irq is not one of the mips irqs we need to
	 * create a mapping
	 */
	if (MIPS_CPU_TIMER_IRQ != 7)
		irq_create_mapping(ltq_domain, MIPS_CPU_TIMER_IRQ);

	return 0;
}
static int __devinit
scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
{
	u8 reg;

	/* enable decoding of the flash region in the south bridge */
	pci_read_config_byte(dev, CSB5_FCR, &reg);
	pci_write_config_byte(dev, CSB5_FCR, reg | CSB5_FCR_DECODE_ALL);

	if (!request_mem_region(SCB2_ADDR, SCB2_WINDOW, scb2_map.name)) {
		/*
		 * The BIOS seems to mark the flash region as 'reserved'
		 * in the e820 map.  Warn and go about our business.
		 */
		printk(KERN_WARNING MODNAME
		    ": warning - can't reserve rom window, continuing\n");
		region_fail = 1;
	}

	/* remap the IO window (w/o caching) */
	scb2_ioaddr = ioremap_nocache(SCB2_ADDR, SCB2_WINDOW);
	if (!scb2_ioaddr) {
		printk(KERN_ERR MODNAME ": Failed to ioremap window!\n");
		if (!region_fail)
			release_mem_region(SCB2_ADDR, SCB2_WINDOW);
		return -ENOMEM;
	}

	scb2_map.phys = SCB2_ADDR;
	scb2_map.virt = scb2_ioaddr;
	scb2_map.size = SCB2_WINDOW;

	simple_map_init(&scb2_map);

	/* try to find a chip */
	scb2_mtd = do_map_probe("cfi_probe", &scb2_map);

	if (!scb2_mtd) {
		printk(KERN_ERR MODNAME ": flash probe failed!\n");
		iounmap(scb2_ioaddr);
		if (!region_fail)
			release_mem_region(SCB2_ADDR, SCB2_WINDOW);
		return -ENODEV;
	}

	scb2_mtd->owner = THIS_MODULE;
	if (scb2_fixup_mtd(scb2_mtd) < 0) {
		del_mtd_device(scb2_mtd);
		map_destroy(scb2_mtd);
		iounmap(scb2_ioaddr);
		if (!region_fail)
			release_mem_region(SCB2_ADDR, SCB2_WINDOW);
		return -ENODEV;
	}

	printk(KERN_NOTICE MODNAME ": chip size 0x%x at offset 0x%x\n",
	       scb2_mtd->size, SCB2_WINDOW - scb2_mtd->size);

	add_mtd_device(scb2_mtd);

	return 0;
}
Example #7
0
static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
	struct radeon_device *rdev = radeon_get_rdev(bdev);

	mem->bus.addr = NULL;
	mem->bus.offset = 0;
	mem->bus.size = mem->num_pages << PAGE_SHIFT;
	mem->bus.base = 0;
	mem->bus.is_iomem = false;
	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
		return -EINVAL;
	switch (mem->mem_type) {
	case TTM_PL_SYSTEM:
		/* system memory */
		return 0;
	case TTM_PL_TT:
#if __OS_HAS_AGP
		if (rdev->flags & RADEON_IS_AGP) {
			/* RADEON_IS_AGP is set only if AGP is active */
			mem->bus.offset = mem->start << PAGE_SHIFT;
			mem->bus.base = rdev->mc.agp_base;
			mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
		}
#endif
		break;
	case TTM_PL_VRAM:
		mem->bus.offset = mem->start << PAGE_SHIFT;
		/* check if it's visible */
		if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
			return -EINVAL;
		mem->bus.base = rdev->mc.aper_base;
		mem->bus.is_iomem = true;
#ifdef __alpha__
		/*
		 * Alpha: use bus.addr to hold the ioremap() return,
		 * so we can modify bus.base below.
		 */
		if (mem->placement & TTM_PL_FLAG_WC)
			mem->bus.addr =
				ioremap_wc(mem->bus.base + mem->bus.offset,
					   mem->bus.size);
		else
			mem->bus.addr =
				ioremap_nocache(mem->bus.base + mem->bus.offset,
						mem->bus.size);

		/*
		 * Alpha: Use just the bus offset plus
		 * the hose/domain memory base for bus.base.
		 * It then can be used to build PTEs for VRAM
		 * access, as done in ttm_bo_vm_fault().
		 */
		mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
			rdev->ddev->hose->dense_mem_base;
#endif
		break;
	default:
		return -EINVAL;
	}
	return 0;
}
Example #8
0
static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
				unsigned long arg)
{
	void __user *argp = (void __user *)arg;
	int new_timeout;
	unsigned int value;
	static const struct watchdog_info ident = {
		.options =		WDIOF_SETTIMEOUT |
					WDIOF_KEEPALIVEPING |
					WDIOF_MAGICCLOSE,
		.identity =		"RC32434_WDT Watchdog",
	};
	switch (cmd) {
	case WDIOC_GETSUPPORT:
		if (copy_to_user(argp, &ident, sizeof(ident)))
			return -EFAULT;
		break;
	case WDIOC_GETSTATUS:
	case WDIOC_GETBOOTSTATUS:
		value = 0;
		if (copy_to_user(argp, &value, sizeof(int)))
			return -EFAULT;
		break;
	case WDIOC_SETOPTIONS:
		if (copy_from_user(&value, argp, sizeof(int)))
			return -EFAULT;
		switch (value) {
		case WDIOS_ENABLECARD:
			rc32434_wdt_start();
			break;
		case WDIOS_DISABLECARD:
			rc32434_wdt_stop();
			break;
		default:
			return -EINVAL;
		}
		break;
	case WDIOC_KEEPALIVE:
		rc32434_wdt_ping();
		break;
	case WDIOC_SETTIMEOUT:
		if (copy_from_user(&new_timeout, argp, sizeof(int)))
			return -EFAULT;
		if (rc32434_wdt_set(new_timeout))
			return -EINVAL;
		/* Fall through */
	case WDIOC_GETTIMEOUT:
		return copy_to_user(argp, &timeout, sizeof(int));
	default:
		return -ENOTTY;
	}

	return 0;
}

static const struct file_operations rc32434_wdt_fops = {
	.owner		= THIS_MODULE,
	.llseek		= no_llseek,
	.write		= rc32434_wdt_write,
	.unlocked_ioctl	= rc32434_wdt_ioctl,
	.open		= rc32434_wdt_open,
	.release	= rc32434_wdt_release,
};

static struct miscdevice rc32434_wdt_miscdev = {
	.minor	= WATCHDOG_MINOR,
	.name	= "watchdog",
	.fops	= &rc32434_wdt_fops,
};

static char banner[] __devinitdata = KERN_INFO PFX
		"Watchdog Timer version " VERSION ", timer margin: %d sec\n";

static int __devinit rc32434_wdt_probe(struct platform_device *pdev)
{
	int ret;
	struct resource *r;

	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rb532_wdt_res");
	if (!r) {
		printk(KERN_ERR PFX "failed to retrieve resources\n");
		return -ENODEV;
	}

	wdt_reg = ioremap_nocache(r->start, resource_size(r));
	if (!wdt_reg) {
		printk(KERN_ERR PFX "failed to remap I/O resources\n");
		return -ENXIO;
	}

	spin_lock_init(&rc32434_wdt_device.io_lock);

	/* Make sure the watchdog is not running */
	rc32434_wdt_stop();

	/* Check that the heartbeat value is within it's range;
	 * if not reset to the default */
	if (rc32434_wdt_set(timeout)) {
		rc32434_wdt_set(WATCHDOG_TIMEOUT);
		printk(KERN_INFO PFX
			"timeout value must be between 0 and %d\n",
			WTCOMP2SEC((u32)-1));
	}

	ret = misc_register(&rc32434_wdt_miscdev);
	if (ret < 0) {
		printk(KERN_ERR PFX "failed to register watchdog device\n");
		goto unmap;
	}

	printk(banner, timeout);

	return 0;

unmap:
	iounmap(wdt_reg);
	return ret;
}

static int __devexit rc32434_wdt_remove(struct platform_device *pdev)
{
	misc_deregister(&rc32434_wdt_miscdev);
	iounmap(wdt_reg);
	return 0;
}
Example #9
0
static int jz4740_adc_probe(struct platform_device *pdev)
{
	struct irq_chip_generic *gc;
	struct irq_chip_type *ct;
	struct jz4740_adc *adc;
	struct resource *mem_base;
	int ret;
	int irq_base;

	adc = devm_kzalloc(&pdev->dev, sizeof(*adc), GFP_KERNEL);
	if (!adc) {
		dev_err(&pdev->dev, "Failed to allocate driver structure\n");
		return -ENOMEM;
	}

	adc->irq = platform_get_irq(pdev, 0);
	if (adc->irq < 0) {
		ret = adc->irq;
		dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret);
		return ret;
	}

	irq_base = platform_get_irq(pdev, 1);
	if (irq_base < 0) {
		dev_err(&pdev->dev, "Failed to get irq base: %d\n", irq_base);
		return irq_base;
	}

	mem_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!mem_base) {
		dev_err(&pdev->dev, "Failed to get platform mmio resource\n");
		return -ENOENT;
	}

	/* Only request the shared registers for the MFD driver */
	adc->mem = request_mem_region(mem_base->start, JZ_REG_ADC_STATUS,
					pdev->name);
	if (!adc->mem) {
		dev_err(&pdev->dev, "Failed to request mmio memory region\n");
		return -EBUSY;
	}

	adc->base = ioremap_nocache(adc->mem->start, resource_size(adc->mem));
	if (!adc->base) {
		ret = -EBUSY;
		dev_err(&pdev->dev, "Failed to ioremap mmio memory\n");
		goto err_release_mem_region;
	}

	adc->clk = clk_get(&pdev->dev, "adc");
	if (IS_ERR(adc->clk)) {
		ret = PTR_ERR(adc->clk);
		dev_err(&pdev->dev, "Failed to get clock: %d\n", ret);
		goto err_iounmap;
	}

	spin_lock_init(&adc->lock);
	atomic_set(&adc->clk_ref, 0);

	platform_set_drvdata(pdev, adc);

	gc = irq_alloc_generic_chip("INTC", 1, irq_base, adc->base,
		handle_level_irq);

	ct = gc->chip_types;
	ct->regs.mask = JZ_REG_ADC_CTRL;
	ct->regs.ack = JZ_REG_ADC_STATUS;
	ct->chip.irq_mask = irq_gc_mask_set_bit;
	ct->chip.irq_unmask = irq_gc_mask_clr_bit;
	ct->chip.irq_ack = irq_gc_ack_set_bit;

	irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL);

	adc->gc = gc;

	irq_set_handler_data(adc->irq, gc);
	irq_set_chained_handler(adc->irq, jz4740_adc_irq_demux);

	writeb(0x00, adc->base + JZ_REG_ADC_ENABLE);
	writeb(0xff, adc->base + JZ_REG_ADC_CTRL);

	ret = mfd_add_devices(&pdev->dev, 0, jz4740_adc_cells,
			      ARRAY_SIZE(jz4740_adc_cells), mem_base,
			      irq_base, NULL);
	if (ret < 0)
		goto err_clk_put;

	return 0;

err_clk_put:
	clk_put(adc->clk);
err_iounmap:
	iounmap(adc->base);
err_release_mem_region:
	release_mem_region(adc->mem->start, resource_size(adc->mem));
	return ret;
}
int ehci_xls_probe_internal(const struct hc_driver *driver,
	struct platform_device *pdev)
{
	struct usb_hcd  *hcd;
	struct resource *res;
	int retval, irq;

	
	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(&pdev->dev, "Found HC with no IRQ. Check %s setup!\n",
				dev_name(&pdev->dev));
		return -ENODEV;
	}

	
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "Error: MMIO Handle %s setup!\n",
				dev_name(&pdev->dev));
		return -ENODEV;
	}
	hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
	if (!hcd) {
		retval = -ENOMEM;
		goto err1;
	}

	hcd->rsrc_start = res->start;
	hcd->rsrc_len = resource_size(res);

	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
				driver->description)) {
		dev_dbg(&pdev->dev, "controller already in use\n");
		retval = -EBUSY;
		goto err2;
	}
	hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);

	if (hcd->regs == NULL) {
		dev_dbg(&pdev->dev, "error mapping memory\n");
		retval = -EFAULT;
		goto err3;
	}

	retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
	if (retval != 0)
		goto err4;
	return retval;

err4:
	iounmap(hcd->regs);
err3:
	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err2:
	usb_put_hcd(hcd);
err1:
	dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev),
			retval);
	return retval;
}
Example #11
0
static int __devinit esp_sun3x_probe(struct platform_device *dev)
{
	struct scsi_host_template *tpnt = &scsi_esp_template;
	struct Scsi_Host *host;
	struct esp *esp;
	struct resource *res;
	int err = -ENOMEM;

	host = scsi_host_alloc(tpnt, sizeof(struct esp));
	if (!host)
		goto fail;

	host->max_id = 8;
	esp = shost_priv(host);

	esp->host = host;
	esp->dev = dev;
	esp->ops = &sun3x_esp_ops;

	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
	if (!res || !res->start)
		goto fail_unlink;

	esp->regs = ioremap_nocache(res->start, 0x20);
	if (!esp->regs)
		goto fail_unmap_regs;

	res = platform_get_resource(dev, IORESOURCE_MEM, 1);
	if (!res || !res->start)
		goto fail_unmap_regs;

	esp->dma_regs = ioremap_nocache(res->start, 0x10);

	esp->command_block = dma_alloc_coherent(esp->dev, 16,
						&esp->command_block_dma,
						GFP_KERNEL);
	if (!esp->command_block)
		goto fail_unmap_regs_dma;

	host->irq = platform_get_irq(dev, 0);
	err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
			  "SUN3X ESP", esp);
	if (err < 0)
		goto fail_unmap_command_block;

	esp->scsi_id = 7;
	esp->host->this_id = esp->scsi_id;
	esp->scsi_id_mask = (1 << esp->scsi_id);
	esp->cfreq = 20000000;

	dev_set_drvdata(&dev->dev, esp);

	err = scsi_esp_register(esp, &dev->dev);
	if (err)
		goto fail_free_irq;

	return 0;

fail_free_irq:
	free_irq(host->irq, esp);
fail_unmap_command_block:
	dma_free_coherent(esp->dev, 16,
			  esp->command_block,
			  esp->command_block_dma);
fail_unmap_regs_dma:
	iounmap(esp->dma_regs);
fail_unmap_regs:
	iounmap(esp->regs);
fail_unlink:
	scsi_host_put(host);
fail:
	return err;
}
static int __devinit pxa930_rotary_probe(struct platform_device *pdev)
{
	struct pxa930_rotary_platform_data *pdata = pdev->dev.platform_data;
	struct pxa930_rotary *r;
	struct input_dev *input_dev;
	struct resource *res;
	int irq;
	int err;

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(&pdev->dev, "no irq for rotary controller\n");
		return -ENXIO;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "no I/O memory defined\n");
		return -ENXIO;
	}

	if (!pdata) {
		dev_err(&pdev->dev, "no platform data defined\n");
		return -EINVAL;
	}

	r = kzalloc(sizeof(struct pxa930_rotary), GFP_KERNEL);
	if (!r)
		return -ENOMEM;

	r->mmio_base = ioremap_nocache(res->start, resource_size(res));
	if (r->mmio_base == NULL) {
		dev_err(&pdev->dev, "failed to remap IO memory\n");
		err = -ENXIO;
		goto failed_free;
	}

	r->pdata = pdata;
	platform_set_drvdata(pdev, r);

	/* allocate and register the input device */
	input_dev = input_allocate_device();
	if (!input_dev) {
		dev_err(&pdev->dev, "failed to allocate input device\n");
		err = -ENOMEM;
		goto failed_free_io;
	}

	input_dev->name = pdev->name;
	input_dev->id.bustype = BUS_HOST;
	input_dev->open = pxa930_rotary_open;
	input_dev->close = pxa930_rotary_close;
	input_dev->dev.parent = &pdev->dev;

	if (pdata->up_key && pdata->down_key) {
		__set_bit(pdata->up_key, input_dev->keybit);
		__set_bit(pdata->down_key, input_dev->keybit);
		__set_bit(EV_KEY, input_dev->evbit);
	} else {
		__set_bit(pdata->rel_code, input_dev->relbit);
		__set_bit(EV_REL, input_dev->evbit);
	}

	r->input_dev = input_dev;
	input_set_drvdata(input_dev, r);

	err = request_irq(irq, rotary_irq, 0,
			"enhanced rotary", r);
	if (err) {
		dev_err(&pdev->dev, "failed to request IRQ\n");
		goto failed_free_input;
	}

	err = input_register_device(input_dev);
	if (err) {
		dev_err(&pdev->dev, "failed to register input device\n");
		goto failed_free_irq;
	}

	return 0;

failed_free_irq:
	free_irq(irq, r);
failed_free_input:
	input_free_device(input_dev);
failed_free_io:
	iounmap(r->mmio_base);
failed_free:
	kfree(r);
	return err;
}
Example #13
0
void *
osl_reg_map(uint32 pa, uint size)
{
	return (ioremap_nocache((unsigned long)pa, (unsigned long)size));
}
Example #14
0
static int __devinit isp1761_pci_probe(struct pci_dev *dev,
		const struct pci_device_id *id)
{
	u8 latency, limit;
	__u32 reg_data;
	int retry_count;
	struct usb_hcd *hcd;
	unsigned int devflags = 0;
	int ret_status = 0;

	resource_size_t pci_mem_phy0;
	resource_size_t memlength;

	u8 __iomem *chip_addr;
	u8 __iomem *iobase;
	resource_size_t nxp_pci_io_base;
	resource_size_t iolength;

	if (usb_disabled())
		return -ENODEV;

	if (pci_enable_device(dev) < 0)
		return -ENODEV;

	if (!dev->irq)
		return -ENODEV;

	/* Grab the PLX PCI mem maped port start address we need  */
	nxp_pci_io_base = pci_resource_start(dev, 0);
	iolength = pci_resource_len(dev, 0);

	if (!request_mem_region(nxp_pci_io_base, iolength, "ISP1761 IO MEM")) {
		printk(KERN_ERR "request region #1\n");
		return -EBUSY;
	}

	iobase = ioremap_nocache(nxp_pci_io_base, iolength);
	if (!iobase) {
		printk(KERN_ERR "ioremap #1\n");
		ret_status = -ENOMEM;
		goto cleanup1;
	}
	/* Grab the PLX PCI shared memory of the ISP 1761 we need  */
	pci_mem_phy0 = pci_resource_start(dev, 3);
	memlength = pci_resource_len(dev, 3);
	if (memlength < 0xffff) {
		printk(KERN_ERR "memory length for this resource is wrong\n");
		ret_status = -ENOMEM;
		goto cleanup2;
	}

	if (!request_mem_region(pci_mem_phy0, memlength, "ISP-PCI")) {
		printk(KERN_ERR "host controller already in use\n");
		ret_status = -EBUSY;
		goto cleanup2;
	}

	/* map available memory */
	chip_addr = ioremap_nocache(pci_mem_phy0,memlength);
	if (!chip_addr) {
		printk(KERN_ERR "Error ioremap failed\n");
		ret_status = -ENOMEM;
		goto cleanup3;
	}

	/* bad pci latencies can contribute to overruns */
	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &latency);
	if (latency) {
		pci_read_config_byte(dev, PCI_MAX_LAT, &limit);
		if (limit && limit < latency)
			pci_write_config_byte(dev, PCI_LATENCY_TIMER, limit);
	}

	/* Try to check whether we can access Scratch Register of
	 * Host Controller or not. The initial PCI access is retried until
	 * local init for the PCI bridge is completed
	 */
	retry_count = 20;
	reg_data = 0;
	while ((reg_data != 0xFACE) && retry_count) {
		/*by default host is in 16bit mode, so
		 * io operations at this stage must be 16 bit
		 * */
		writel(0xface, chip_addr + HC_SCRATCH_REG);
		udelay(100);
		reg_data = readl(chip_addr + HC_SCRATCH_REG) & 0x0000ffff;
		retry_count--;
	}

	iounmap(chip_addr);

	/* Host Controller presence is detected by writing to scratch register
	 * and reading back and checking the contents are same or not
	 */
	if (reg_data != 0xFACE) {
		dev_err(&dev->dev, "scratch register mismatch %x\n", reg_data);
		ret_status = -ENOMEM;
		goto cleanup3;
	}

	pci_set_master(dev);

	/* configure PLX PCI chip to pass interrupts */
#define PLX_INT_CSR_REG 0x68
	reg_data = readl(iobase + PLX_INT_CSR_REG);
	reg_data |= 0x900;
	writel(reg_data, iobase + PLX_INT_CSR_REG);

	dev->dev.dma_mask = NULL;
	hcd = isp1760_register(pci_mem_phy0, memlength, dev->irq,
		IRQF_SHARED | IRQF_DISABLED, &dev->dev, dev_name(&dev->dev),
		devflags);
	if (IS_ERR(hcd)) {
		ret_status = -ENODEV;
		goto cleanup3;
	}

	/* done with PLX IO access */
	iounmap(iobase);
	release_mem_region(nxp_pci_io_base, iolength);

	pci_set_drvdata(dev, hcd);
	return 0;

cleanup3:
	release_mem_region(pci_mem_phy0, memlength);
cleanup2:
	iounmap(iobase);
cleanup1:
	release_mem_region(nxp_pci_io_base, iolength);
	return ret_status;
}
Example #15
0
struct tpm_chip* init_tpm_tis(unsigned long baseaddr, int localities, unsigned int irq)
{
   int i;
   unsigned long addr;
   struct tpm_chip* tpm = NULL;
   uint32_t didvid;
   uint32_t intfcaps;
   uint32_t intmask;

   printk("============= Init TPM TIS Driver ==============\n");

   /*Sanity check the localities input */
   if(localities & ~TPM_TIS_EN_LOCLALL) {
      printk("init_tpm_tis() Invalid locality specification! %X\n", localities);
      goto abort_egress;
   }

   printk("IOMEM Machine Base Address: %lX\n", baseaddr);

   /* Create the tpm data structure */
   tpm = malloc(sizeof(struct tpm_chip));
   __init_tpm_chip(tpm);

   /* Set the enabled localities - if 0 we leave default as all enabled */
   if(localities != 0) {
      tpm->enabled_localities = localities;
   }
   printk("Enabled Localities: ");
   for(i = 0; i < 5; ++i) {
      if(locality_enabled(tpm, i)) {
	 printk("%d ", i);
      }
   }
   printk("\n");

   /* Set the base machine address */
   tpm->baseaddr = baseaddr;

   /* Set default timeouts */
   tpm->timeout_a = MILLISECS(TIS_SHORT_TIMEOUT);
   tpm->timeout_b = MILLISECS(TIS_LONG_TIMEOUT);
   tpm->timeout_c = MILLISECS(TIS_SHORT_TIMEOUT);
   tpm->timeout_d = MILLISECS(TIS_SHORT_TIMEOUT);

   /*Map the mmio pages */
   addr = tpm->baseaddr;
   for(i = 0; i < 5; ++i) {
      if(locality_enabled(tpm, i)) {
	 /* Map the page in now */
	 if((tpm->pages[i] = ioremap_nocache(addr, PAGE_SIZE)) == NULL) {
	    printk("Unable to map iomem page a address %p\n", addr);
	    goto abort_egress;
	 }

	 /* Set default locality to the first enabled one */
	 if (tpm->locality < 0) {
	    if(tpm_tis_request_locality(tpm, i) < 0) {
	       printk("Unable to request locality %d??\n", i);
	       goto abort_egress;
	    }
	 }
      }
      addr += PAGE_SIZE;
   }


   /* Get the vendor and device ids */
   didvid = ioread32(TPM_DID_VID(tpm, tpm->locality));
   tpm->did = didvid >> 16;
   tpm->vid = didvid & 0xFFFF;


   /* Get the revision id */
   tpm->rid = ioread8(TPM_RID(tpm, tpm->locality));

   printk("1.2 TPM (device-id=0x%X vendor-id = %X rev-id = %X)\n", tpm->did, tpm->vid, tpm->rid);

   intfcaps = ioread32(TPM_INTF_CAPS(tpm, tpm->locality));
   printk("TPM interface capabilities (0x%x):\n", intfcaps);
   if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
      printk("\tBurst Count Static\n");
   if (intfcaps & TPM_INTF_CMD_READY_INT)
      printk("\tCommand Ready Int Support\n");
   if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
      printk("\tInterrupt Edge Falling\n");
   if (intfcaps & TPM_INTF_INT_EDGE_RISING)
      printk("\tInterrupt Edge Rising\n");
   if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
      printk("\tInterrupt Level Low\n");
   if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
      printk("\tInterrupt Level High\n");
   if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
      printk("\tLocality Change Int Support\n");
   if (intfcaps & TPM_INTF_STS_VALID_INT)
      printk("\tSts Valid Int Support\n");
   if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
      printk("\tData Avail Int Support\n");

   /*Interupt setup */
   intmask = ioread32(TPM_INT_ENABLE(tpm, tpm->locality));

   intmask |= TPM_INTF_CMD_READY_INT
      | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
      | TPM_INTF_STS_VALID_INT;

   iowrite32(TPM_INT_ENABLE(tpm, tpm->locality), intmask);

   /*If interupts are enabled, handle it */
   if(irq) {
      if(irq != TPM_PROBE_IRQ) {
	 tpm->irq = irq;
      } else {
	 /*FIXME add irq probing feature later */
	 printk("IRQ probing not implemented\n");
      }
   }

   if(tpm->irq) {
      iowrite8(TPM_INT_VECTOR(tpm, tpm->locality), tpm->irq);

      if(bind_pirq(tpm->irq, 1, tpm_tis_irq_handler, tpm) != 0) {
	 printk("Unabled to request irq: %u for use\n", tpm->irq);
	 printk("Will use polling mode\n");
	 tpm->irq = 0;
      } else {
	 /* Clear all existing */
	 iowrite32(TPM_INT_STATUS(tpm, tpm->locality), ioread32(TPM_INT_STATUS(tpm, tpm->locality)));

	 /* Turn on interrupts */
	 iowrite32(TPM_INT_ENABLE(tpm, tpm->locality), intmask | TPM_GLOBAL_INT_ENABLE);
      }
   }

   if(tpm_get_timeouts(tpm)) {
      printk("Could not get TPM timeouts and durations\n");
      goto abort_egress;
   }
   tpm_continue_selftest(tpm);


   return tpm;
abort_egress:
   if(tpm != NULL) {
      shutdown_tpm_tis(tpm);
   }
   return NULL;
}
Example #16
0
static int matrox_w1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct matrox_device *dev;
	int err;

	if (pdev->vendor != PCI_VENDOR_ID_MATROX || pdev->device != PCI_DEVICE_ID_MATROX_G400)
		return -ENODEV;

	dev = kzalloc(sizeof(struct matrox_device) +
		       sizeof(struct w1_bus_master), GFP_KERNEL);
	if (!dev) {
		dev_err(&pdev->dev,
			"%s: Failed to create new matrox_device object.\n",
			__func__);
		return -ENOMEM;
	}


	dev->bus_master = (struct w1_bus_master *)(dev + 1);

	/*
	 * True for G400, for some other we need resource 0, see drivers/video/matrox/matroxfb_base.c
	 */

	dev->phys_addr = pci_resource_start(pdev, 1);

	dev->virt_addr = ioremap_nocache(dev->phys_addr, 16384);
	if (!dev->virt_addr) {
		dev_err(&pdev->dev, "%s: failed to ioremap(0x%lx, %d).\n",
			__func__, dev->phys_addr, 16384);
		err = -EIO;
		goto err_out_free_device;
	}

	dev->base_addr = dev->virt_addr + MATROX_BASE;
	dev->port_index = dev->base_addr + MATROX_PORT_INDEX_OFFSET;
	dev->port_data = dev->base_addr + MATROX_PORT_DATA_OFFSET;
	dev->data_mask = (MATROX_G400_DDC_DATA);

	matrox_w1_hw_init(dev);

	dev->bus_master->data = dev;
	dev->bus_master->read_bit = &matrox_w1_read_ddc_bit;
	dev->bus_master->write_bit = &matrox_w1_write_ddc_bit;

	err = w1_add_master_device(dev->bus_master);
	if (err)
		goto err_out_free_device;

	pci_set_drvdata(pdev, dev);

	dev->found = 1;

	dev_info(&pdev->dev, "Matrox G400 GPIO transport layer for 1-wire.\n");

	return 0;

err_out_free_device:
	if (dev->virt_addr)
		iounmap(dev->virt_addr);
	kfree(dev);

	return err;
}
Example #17
0
static int __init parse_options(struct early_serial8250_device *device,
								char *options)
{
	struct uart_port *port = &device->port;
	int mmio, mmio32, length;

	if (!options)
		return -ENODEV;

	port->uartclk = BASE_BAUD * 16;

	mmio = !strncmp(options, "mmio,", 5);
	mmio32 = !strncmp(options, "mmio32,", 7);
	if (mmio || mmio32) {
		port->iotype = (mmio ? UPIO_MEM : UPIO_MEM32);
		port->mapbase = simple_strtoul(options + (mmio ? 5 : 7),
					       &options, 0);
		if (mmio32)
			port->regshift = 2;
#ifdef CONFIG_FIX_EARLYCON_MEM
		set_fixmap_nocache(FIX_EARLYCON_MEM_BASE,
					port->mapbase & PAGE_MASK);
		port->membase =
			(void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE);
		port->membase += port->mapbase & ~PAGE_MASK;
#else
		port->membase = ioremap_nocache(port->mapbase, 64);
		if (!port->membase) {
			printk(KERN_ERR "%s: Couldn't ioremap 0x%llx\n",
				__func__,
			       (unsigned long long) port->mapbase);
			return -ENOMEM;
		}
#endif
	} else if (!strncmp(options, "io,", 3)) {
		port->iotype = UPIO_PORT;
		port->iobase = simple_strtoul(options + 3, &options, 0);
		mmio = 0;
	} else
		return -EINVAL;

	options = strchr(options, ',');
	if (options) {
		options++;
		device->baud = simple_strtoul(options, NULL, 0);
		length = min(strcspn(options, " ") + 1,
			     sizeof(device->options));
		strlcpy(device->options, options, length);
	} else {
		device->baud = probe_baud(port);
		snprintf(device->options, sizeof(device->options), "%u",
			device->baud);
	}

	if (mmio || mmio32)
		printk(KERN_INFO
		       "Early serial console at MMIO%s 0x%llx (options '%s')\n",
			mmio32 ? "32" : "",
			(unsigned long long)port->mapbase,
			device->options);
	else
		printk(KERN_INFO
		      "Early serial console at I/O port 0x%lx (options '%s')\n",
			port->iobase,
			device->options);

	return 0;
}
Example #18
0
static int __devinit jz4740_rtc_probe(struct platform_device *pdev)
{
	int ret;
	struct jz4740_rtc *rtc;
	uint32_t scratchpad;

	rtc = kzalloc(sizeof(*rtc), GFP_KERNEL);
	if (!rtc)
		return -ENOMEM;

	rtc->irq = platform_get_irq(pdev, 0);
	if (rtc->irq < 0) {
		ret = -ENOENT;
		dev_err(&pdev->dev, "Failed to get platform irq\n");
		goto err_free;
	}

	rtc->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!rtc->mem) {
		ret = -ENOENT;
		dev_err(&pdev->dev, "Failed to get platform mmio memory\n");
		goto err_free;
	}

	rtc->mem = request_mem_region(rtc->mem->start, resource_size(rtc->mem),
					pdev->name);
	if (!rtc->mem) {
		ret = -EBUSY;
		dev_err(&pdev->dev, "Failed to request mmio memory region\n");
		goto err_free;
	}

	rtc->base = ioremap_nocache(rtc->mem->start, resource_size(rtc->mem));
	if (!rtc->base) {
		ret = -EBUSY;
		dev_err(&pdev->dev, "Failed to ioremap mmio memory\n");
		goto err_release_mem_region;
	}

	spin_lock_init(&rtc->lock);

	platform_set_drvdata(pdev, rtc);

	rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, &jz4740_rtc_ops,
					THIS_MODULE);
	if (IS_ERR(rtc->rtc)) {
		ret = PTR_ERR(rtc->rtc);
		dev_err(&pdev->dev, "Failed to register rtc device: %d\n", ret);
		goto err_iounmap;
	}

	ret = request_irq(rtc->irq, jz4740_rtc_irq, 0,
				pdev->name, rtc);
	if (ret) {
		dev_err(&pdev->dev, "Failed to request rtc irq: %d\n", ret);
		goto err_unregister_rtc;
	}

	scratchpad = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SCRATCHPAD);
	if (scratchpad != 0x12345678) {
		ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SCRATCHPAD, 0x12345678);
		ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC, 0);
		if (ret) {
			dev_err(&pdev->dev, "Could not write write to RTC registers\n");
			goto err_free_irq;
		}
	}

	return 0;

err_free_irq:
	free_irq(rtc->irq, rtc);
err_unregister_rtc:
	rtc_device_unregister(rtc->rtc);
err_iounmap:
	platform_set_drvdata(pdev, NULL);
	iounmap(rtc->base);
err_release_mem_region:
	release_mem_region(rtc->mem->start, resource_size(rtc->mem));
err_free:
	kfree(rtc);

	return ret;
}
static int mtk_btcvsd_rx_probe(struct platform_device *pdev)
{
	int ret = 0;

	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
	if (!pdev->dev.dma_mask)
		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;

	if (pdev->dev.of_node)
		dev_set_name(&pdev->dev, "%s", MT_SOC_BTCVSD_RX_PCM);

	pr_warn("%s: dev name %s\n", __func__, dev_name(&pdev->dev));

	mDev_btcvsd_rx = &pdev->dev;

#ifdef CONFIG_OF
	Auddrv_BTCVSD_Irq_Map();
#endif

	ret = Register_BTCVSD_Irq(pdev, btcvsd_irq_number);

	if (ret < 0)
		pr_warn("%s request_irq btcvsd_irq_number(%d) Fail\n", __func__, btcvsd_irq_number);

	/* inremap to INFRA sys */
#ifdef CONFIG_OF
	Auddrv_BTCVSD_Address_Map();
	INFRA_MISC_ADDRESS = (volatile kal_uint32 *)(infra_base + infra_misc_offset);
#else
	AUDIO_INFRA_BASE_VIRTUAL = ioremap_nocache(AUDIO_INFRA_BASE_PHYSICAL, 0x1000);
	INFRA_MISC_ADDRESS = (volatile kal_uint32 *)(AUDIO_INFRA_BASE_VIRTUAL + INFRA_MISC_OFFSET);
#endif
	pr_warn("[BTCVSD probe] INFRA_MISC_ADDRESS = %p\n", INFRA_MISC_ADDRESS);

	pr_warn("%s disable BT IRQ disableBTirq = %d,irq=%d\n", __func__, disableBTirq, btcvsd_irq_number);
	if (disableBTirq == 0) {
		disable_irq(btcvsd_irq_number);
		Disable_CVSD_Wakeup();
		disableBTirq = 1;
	}

	if (!isProbeDone) {
		memset((void *)&BT_CVSD_Mem, 0, sizeof(CVSD_MEMBLOCK_T));
		isProbeDone = 1;
	}

	memset((void *)&btsco, 0, sizeof(btsco));
	btsco.uTXState = BT_SCO_TXSTATE_IDLE;
	btsco.uRXState = BT_SCO_RXSTATE_IDLE;

	/* ioremap to BT HW register base address */
#ifdef CONFIG_OF
	BTSYS_PKV_BASE_ADDRESS = (void *)btsys_pkv_physical_base;
	BTSYS_SRAM_BANK2_BASE_ADDRESS = (void *)btsys_sram_bank2_physical_base;
	bt_hw_REG_PACKET_R = BTSYS_PKV_BASE_ADDRESS + cvsd_mcu_read_offset;
	bt_hw_REG_PACKET_W = BTSYS_PKV_BASE_ADDRESS + cvsd_mcu_write_offset;
	bt_hw_REG_CONTROL = BTSYS_PKV_BASE_ADDRESS + cvsd_packet_indicator;
#else
	BTSYS_PKV_BASE_ADDRESS = ioremap_nocache(AUDIO_BTSYS_PKV_PHYSICAL_BASE, 0x10000);
	BTSYS_SRAM_BANK2_BASE_ADDRESS = ioremap_nocache(AUDIO_BTSYS_SRAM_BANK2_PHYSICAL_BASE, 0x10000);
	bt_hw_REG_PACKET_R = (volatile kal_uint32 *)(BTSYS_PKV_BASE_ADDRESS + CVSD_MCU_READ_OFFSET);
	bt_hw_REG_PACKET_W = (volatile kal_uint32 *)(BTSYS_PKV_BASE_ADDRESS + CVSD_MCU_WRITE_OFFSET);
	bt_hw_REG_CONTROL = (volatile kal_uint32 *)(BTSYS_PKV_BASE_ADDRESS + CVSD_PACKET_INDICATOR);
#endif
	pr_debug("[BTCVSD probe] BTSYS_PKV_BASE_ADDRESS = %p BTSYS_SRAM_BANK2_BASE_ADDRESS = %p\n",
				BTSYS_PKV_BASE_ADDRESS, BTSYS_SRAM_BANK2_BASE_ADDRESS);

	/* allocate dram */
	AudDrv_Allocate_mem_Buffer(mDev_btcvsd_rx, Soc_Aud_Digital_Block_MEM_BTCVSD_RX, sizeof(BT_SCO_RX_T));
	BT_CVSD_Mem.RX_btcvsd_dma_buf =  Get_Mem_Buffer(Soc_Aud_Digital_Block_MEM_BTCVSD_RX);


	return snd_soc_register_platform(&pdev->dev, &mtk_btcvsd_rx_soc_platform);
}
static int __devinit
scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
{
	u8 reg;

	/*                                                         */
	pci_read_config_byte(dev, CSB5_FCR, &reg);
	pci_write_config_byte(dev, CSB5_FCR, reg | CSB5_FCR_DECODE_ALL);

	if (!request_mem_region(SCB2_ADDR, SCB2_WINDOW, scb2_map.name)) {
		/*
                                                          
                                                      
   */
		printk(KERN_WARNING MODNAME
		    ": warning - can't reserve rom window, continuing\n");
		region_fail = 1;
	}

	/*                                   */
	scb2_ioaddr = ioremap_nocache(SCB2_ADDR, SCB2_WINDOW);
	if (!scb2_ioaddr) {
		printk(KERN_ERR MODNAME ": Failed to ioremap window!\n");
		if (!region_fail)
			release_mem_region(SCB2_ADDR, SCB2_WINDOW);
		return -ENOMEM;
	}

	scb2_map.phys = SCB2_ADDR;
	scb2_map.virt = scb2_ioaddr;
	scb2_map.size = SCB2_WINDOW;

	simple_map_init(&scb2_map);

	/*                    */
	scb2_mtd = do_map_probe("cfi_probe", &scb2_map);

	if (!scb2_mtd) {
		printk(KERN_ERR MODNAME ": flash probe failed!\n");
		iounmap(scb2_ioaddr);
		if (!region_fail)
			release_mem_region(SCB2_ADDR, SCB2_WINDOW);
		return -ENODEV;
	}

	scb2_mtd->owner = THIS_MODULE;
	if (scb2_fixup_mtd(scb2_mtd) < 0) {
		mtd_device_unregister(scb2_mtd);
		map_destroy(scb2_mtd);
		iounmap(scb2_ioaddr);
		if (!region_fail)
			release_mem_region(SCB2_ADDR, SCB2_WINDOW);
		return -ENODEV;
	}

	printk(KERN_NOTICE MODNAME ": chip size 0x%llx at offset 0x%llx\n",
	       (unsigned long long)scb2_mtd->size,
	       (unsigned long long)(SCB2_WINDOW - scb2_mtd->size));

	mtd_device_register(scb2_mtd, NULL, 0);

	return 0;
}
Example #21
0
int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
				   struct bnxt_qplib_rcfw *rcfw,
				   int msix_vector,
				   int cp_bar_reg_off, int virt_fn,
				   int (*aeq_handler)(struct bnxt_qplib_rcfw *,
						      struct creq_func_event *))
{
	resource_size_t res_base;
	struct cmdq_init init;
	u16 bmap_size;
	int rc;

	/* General */
	rcfw->seq_num = 0;
	rcfw->flags = FIRMWARE_FIRST_FLAG;
	bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD *
				  sizeof(unsigned long));
	rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);
	if (!rcfw->cmdq_bitmap)
		return -ENOMEM;
	rcfw->bmap_size = bmap_size;

	/* CMDQ */
	rcfw->cmdq_bar_reg = RCFW_COMM_PCI_BAR_REGION;
	res_base = pci_resource_start(pdev, rcfw->cmdq_bar_reg);
	if (!res_base)
		return -ENOMEM;

	rcfw->cmdq_bar_reg_iomem = ioremap_nocache(res_base +
					      RCFW_COMM_BASE_OFFSET,
					      RCFW_COMM_SIZE);
	if (!rcfw->cmdq_bar_reg_iomem) {
		dev_err(&rcfw->pdev->dev,
			"QPLIB: CMDQ BAR region %d mapping failed",
			rcfw->cmdq_bar_reg);
		return -ENOMEM;
	}

	rcfw->cmdq_bar_reg_prod_off = virt_fn ? RCFW_VF_COMM_PROD_OFFSET :
					RCFW_PF_COMM_PROD_OFFSET;

	rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET;

	/* CREQ */
	rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION;
	res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
	if (!res_base)
		dev_err(&rcfw->pdev->dev,
			"QPLIB: CREQ BAR region %d resc start is 0!",
			rcfw->creq_bar_reg);
	rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off,
						   4);
	if (!rcfw->creq_bar_reg_iomem) {
		dev_err(&rcfw->pdev->dev,
			"QPLIB: CREQ BAR region %d mapping failed",
			rcfw->creq_bar_reg);
		return -ENOMEM;
	}
	rcfw->creq_qp_event_processed = 0;
	rcfw->creq_func_event_processed = 0;

	rcfw->vector = msix_vector;
	if (aeq_handler)
		rcfw->aeq_handler = aeq_handler;

	tasklet_init(&rcfw->worker, bnxt_qplib_service_creq,
		     (unsigned long)rcfw);

	rcfw->requested = false;
	rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
			 "bnxt_qplib_creq", rcfw);
	if (rc) {
		dev_err(&rcfw->pdev->dev,
			"QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
		bnxt_qplib_disable_rcfw_channel(rcfw);
		return rc;
	}
	rcfw->requested = true;

	init_waitqueue_head(&rcfw->waitq);

	CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, 0, rcfw->creq.max_elements);

	init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
	init.cmdq_size_cmdq_lvl = cpu_to_le16(
		((BNXT_QPLIB_CMDQE_MAX_CNT << CMDQ_INIT_CMDQ_SIZE_SFT) &
		 CMDQ_INIT_CMDQ_SIZE_MASK) |
		((rcfw->cmdq.level << CMDQ_INIT_CMDQ_LVL_SFT) &
		 CMDQ_INIT_CMDQ_LVL_MASK));
	init.creq_ring_id = cpu_to_le16(rcfw->creq_ring_id);

	/* Write to the Bono mailbox register */
	__iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4);
	return 0;
}
Example #22
0
/* Initiate DMA Reader */
static int _mhal_alsa_dma_reader_init(void)
{
	struct MStar_DMA_Reader_Struct *dma_reader = &g_dma_reader;
	unsigned char tmp1, tmp2, tmp3, tmp4;
	unsigned int i;
	unsigned int audio_pcm_dmaRdr_bufSz = 0;  /* must be multiple of DMA_RDR_PCM_BUF_UNIT*2 (= 0x2000) */
	unsigned int audio_pcm_dmaRdr_base_pa = 0; /* DMA Reader Input buffer (DM_Prefetch) */
	unsigned int audio_pcm_dmaRdr_base_ba = 0;
	unsigned int audio_pcm_dmaRdr_base_va = 0;
	MAD_PRINT(KERN_INFO "Initiate MStar PCM playback engine\n");

	if ((dma_reader->initialized_status != MAD_TRUE) || (dma_reader->str_mode_info.status != E_RESUME)) {
		audio_pcm_dmaRdr_bufSz = dma_reader->buffer.size;
		audio_pcm_dmaRdr_base_pa = (((unsigned int)_mhal_alsa_read_reg(0x2A90) * _MAD_ADDR_CONVERTOR) + _MAD_DMA_READER_BASE_OFFSET);
		audio_pcm_dmaRdr_base_pa += (((_mhal_alsa_read_reg(0x2AC0) & 0x0F) << 16) * _MAD_ADDR_CONVERTOR);
		audio_pcm_dmaRdr_base_ba = audio_pcm_dmaRdr_base_pa + _MAD_PHYSICAL_ADDR_TO_BUS_ADDR;

		if (audio_pcm_dmaRdr_base_pa >= 0xA0000000) {
			MAD_PRINT(KERN_ERR "Error! Invalid MStar PCM reader address!\n");
			return -EFAULT;
		}
		else if ((audio_pcm_dmaRdr_base_ba % 0x1000)) {
			MAD_PRINT(KERN_ERR "Error! Invalid MStar PCM reader bus address, it should be aligned by 4 KB!\n");
			return -EFAULT;
		}

		/* Convert Bus Address to Virtual Address */
		audio_pcm_dmaRdr_base_va = (unsigned int)ioremap_nocache(audio_pcm_dmaRdr_base_ba, audio_pcm_dmaRdr_bufSz);

		dma_reader->str_mode_info.physical_addr = audio_pcm_dmaRdr_base_pa;
		dma_reader->str_mode_info.bus_addr = audio_pcm_dmaRdr_base_ba;
		dma_reader->str_mode_info.virtual_addr = audio_pcm_dmaRdr_base_va;

		dma_reader->initialized_status = MAD_TRUE;
	}
	else {
		audio_pcm_dmaRdr_bufSz = dma_reader->buffer.size;
		audio_pcm_dmaRdr_base_pa = dma_reader->str_mode_info.physical_addr;
		audio_pcm_dmaRdr_base_ba = dma_reader->str_mode_info.bus_addr;
		audio_pcm_dmaRdr_base_va = dma_reader->str_mode_info.virtual_addr;
	}

	/* init DMA writer address */
	dma_reader->buffer.addr = (unsigned char *)audio_pcm_dmaRdr_base_va;
	dma_reader->buffer.w_ptr = dma_reader->buffer.addr;
	//MAD_PRINT(KERN_INFO "PCM buffer start address = 0x%08X\n", dma_reader->buffer.addr);
	//MAD_PRINT(KERN_INFO "PCM buffer end address = 0x%08X\n", (dma_reader->buffer.addr + dma_reader->buffer.size));

	/* Initial DMA Reader path & clk select (DMA reader -> CH8) */
	_mhal_alsa_write_mask_byte(0x2CB0, 0x07, 0x04); /* SEL_CLK_DMA_READER */
	_mhal_alsa_write_mask_byte(0x2C6B, 0xFF, 0x8F); /* CH8 sel to DMA Rdr */

	_mhal_alsa_write_mask_reg(0x2B82, 0xFFFF, 0x0000); /* reset dma reader */
	_mhal_alsa_write_mask_byte(0x2B8E, 0xFF, ((audio_pcm_dmaRdr_base_pa / _MAD_BYTES_IN_LINE) & 0xFF)); /* RD_BASE_ADDR[7:0] */
	_mhal_alsa_write_mask_reg(0x2B84, 0xFFFF, (((audio_pcm_dmaRdr_base_pa / _MAD_BYTES_IN_LINE) >> 8) & 0xFFFF)); /* RD_BASE_ADDR[23:8] */
	_mhal_alsa_write_mask_byte(0x2B8F, 0x0F, (((audio_pcm_dmaRdr_base_pa / _MAD_BYTES_IN_LINE) >> 24) & 0x0F)); /* RD_BASE_ADDR[27:24] */
	_mhal_alsa_write_mask_reg(0x2B86, 0xFFFF, audio_pcm_dmaRdr_bufSz / _MAD_BYTES_IN_LINE); /* setting : DMA Reader Size */
	_mhal_alsa_write_mask_reg(0x2B88, 0xFFFF, audio_pcm_dmaRdr_bufSz - 12); /* setting : DMA Reader Overrun Thr */
	_mhal_alsa_write_mask_reg(0x2B8A, 0xFFFF, 0x0012); /* setting : DMA Reader Underrun Thr */

	/* Reset and Start DMA Reader */
	_mhal_alsa_write_mask_reg(0x2B80, 0xFFFF, 0x6402);
	_mhal_alsa_write_mask_reg(0x2B80, 0xFFFF, 0x2402);

	/* Clear DMA reader buffer */
	for (i = 0; i < (dma_reader->buffer.size/4); i++) {
		tmp1 = 0;
		tmp2 = 0;
		tmp3 = 0;
		tmp4 = 0;

		*(dma_reader->buffer.w_ptr++) = tmp1;
		*(dma_reader->buffer.w_ptr++) = tmp2;
		*(dma_reader->buffer.w_ptr++) = tmp3;
		*(dma_reader->buffer.w_ptr++) = tmp4;
	}

	/* Flush MIU */
	Chip_Flush_Memory();

	/* Reset Write Pointer */
	dma_reader->buffer.w_ptr = dma_reader->buffer.addr;

	return 0;
}
Example #23
0
static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
{
	struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data;
	struct device *dev = &pdev->dev;
	struct sdhci_host *host;
	struct sdhci_s3c *sc;
	struct resource *res;
	int ret, irq, ptr, clks;

	if (!pdata) {
		dev_err(dev, "no device data specified\n");
		return -ENOENT;
	}

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(dev, "no irq specified\n");
		return irq;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(dev, "no memory specified\n");
		return -ENOENT;
	}

	host = sdhci_alloc_host(dev, sizeof(struct sdhci_s3c));
	if (IS_ERR(host)) {
		dev_err(dev, "sdhci_alloc_host() failed\n");
		return PTR_ERR(host);
	}

	sc = sdhci_priv(host);

	sc->host = host;
	sc->pdev = pdev;
	sc->pdata = pdata;
	sc->ext_cd_gpio = -1; /* invalid gpio number */

	platform_set_drvdata(pdev, host);

	sc->clk_io = clk_get(dev, "hsmmc");
	if (IS_ERR(sc->clk_io)) {
		dev_err(dev, "failed to get io clock\n");
		ret = PTR_ERR(sc->clk_io);
		goto err_io_clk;
	}

	/* enable the local io clock and keep it running for the moment. */
	clk_enable(sc->clk_io);

	for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
		struct clk *clk;
		char *name = pdata->clocks[ptr];

		if (name == NULL)
			continue;

		clk = clk_get(dev, name);
		if (IS_ERR(clk)) {
			dev_err(dev, "failed to get clock %s\n", name);
			continue;
		}

		clks++;
		sc->clk_bus[ptr] = clk;

		/*
		 * save current clock index to know which clock bus
		 * is used later in overriding functions.
		 */
		sc->cur_clk = ptr;

		clk_enable(clk);

		dev_info(dev, "clock source %d: %s (%ld Hz)\n",
			 ptr, name, clk_get_rate(clk));
	}

	if (clks == 0) {
		dev_err(dev, "failed to find any bus clocks\n");
		ret = -ENOENT;
		goto err_no_busclks;
	}

	sc->ioarea = request_mem_region(res->start, resource_size(res),
					mmc_hostname(host->mmc));
	if (!sc->ioarea) {
		dev_err(dev, "failed to reserve register area\n");
		ret = -ENXIO;
		goto err_req_regs;
	}

	host->ioaddr = ioremap_nocache(res->start, resource_size(res));
	if (!host->ioaddr) {
		dev_err(dev, "failed to map registers\n");
		ret = -ENXIO;
		goto err_req_regs;
	}

	/* Ensure we have minimal gpio selected CMD/CLK/Detect */
	if (pdata->cfg_gpio)
		pdata->cfg_gpio(pdev, pdata->max_width);

	host->hw_name = "samsung-hsmmc";
	host->ops = &sdhci_s3c_ops;
	host->quirks = 0;
	host->irq = irq;

	/* Setup quirks for the controller */
	host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
	host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT;

#ifndef CONFIG_MMC_SDHCI_S3C_DMA

	/* we currently see overruns on errors, so disable the SDMA
	 * support as well. */
	host->quirks |= SDHCI_QUIRK_BROKEN_DMA;

#endif /* CONFIG_MMC_SDHCI_S3C_DMA */

	/* It seems we do not get an DATA transfer complete on non-busy
	 * transfers, not sure if this is a problem with this specific
	 * SDHCI block, or a missing configuration that needs to be set. */
	host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ;

	/* This host supports the Auto CMD12 */
	host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;

	/* Samsung SoCs need BROKEN_ADMA_ZEROLEN_DESC */
	host->quirks |= SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC;

	if (pdata->cd_type == S3C_SDHCI_CD_NONE ||
	    pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
		host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;

	if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
		host->mmc->caps = MMC_CAP_NONREMOVABLE;

	if (pdata->host_caps)
		host->mmc->caps |= pdata->host_caps;

	host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
			 SDHCI_QUIRK_32BIT_DMA_SIZE);

	/* HSMMC on Samsung SoCs uses SDCLK as timeout clock */
	host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;

	/*
	 * If controller does not have internal clock divider,
	 * we can use overriding functions instead of default.
	 */
	if (pdata->clk_type) {
		sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
		sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
		sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
	}

	/* It supports additional host capabilities if needed */
	if (pdata->host_caps)
		host->mmc->caps |= pdata->host_caps;

	ret = sdhci_add_host(host);
	if (ret) {
		dev_err(dev, "sdhci_add_host() failed\n");
		goto err_add_host;
	}

	/* The following two methods of card detection might call
	   sdhci_s3c_notify_change() immediately, so they can be called
	   only after sdhci_add_host(). Setup errors are ignored. */
	if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_init)
		pdata->ext_cd_init(&sdhci_s3c_notify_change);
	if (pdata->cd_type == S3C_SDHCI_CD_GPIO &&
	    gpio_is_valid(pdata->ext_cd_gpio))
		sdhci_s3c_setup_card_detect_gpio(sc);

	return 0;

 err_add_host:
	release_resource(sc->ioarea);
	kfree(sc->ioarea);

 err_req_regs:
	for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
		clk_disable(sc->clk_bus[ptr]);
		clk_put(sc->clk_bus[ptr]);
	}

 err_no_busclks:
	clk_disable(sc->clk_io);
	clk_put(sc->clk_io);

 err_io_clk:
	sdhci_free_host(host);

	return ret;
}
Example #24
0
/* Initiate PCM Capture1 */
static int _mhal_alsa_pcm_capture1_init(void)
{
	struct MStar_PCM_Capture_Struct *pcm_capture = &g_pcm_capture1;
	unsigned int audio_pcm_capture_base_pa = 0;
	unsigned int audio_pcm_capture_base_ba = 0;
	unsigned int audio_pcm_capture_base_va = 0;
	unsigned int w_ptr_offset = 0;
	MAD_PRINT(KERN_INFO "Initiate MStar PCM Capture1 engine\n");

	if ((pcm_capture->initialized_status != MAD_TRUE) || (pcm_capture->str_mode_info.status != E_RESUME)) {
		audio_pcm_capture_base_pa = ((unsigned int)_mhal_alsa_read_reg(0x2A90) * _MAD_ADDR_CONVERTOR) + _MAD_PCM_CAPTURE1_BASE_OFFSET;
		audio_pcm_capture_base_pa += (((_mhal_alsa_read_reg(0x2AC0) & 0x0F) << 16) * _MAD_ADDR_CONVERTOR);
		audio_pcm_capture_base_ba = audio_pcm_capture_base_pa + _MAD_PHYSICAL_ADDR_TO_BUS_ADDR;

		if (audio_pcm_capture_base_pa >= 0xA0000000) {
			MAD_PRINT(KERN_ERR "Error! Invalid MStar PCM capture address!\n");
			return -EFAULT;
		}
		else if ((audio_pcm_capture_base_ba % 0x1000)) {
			MAD_PRINT(KERN_ERR "Error! Invalid MStar PCM capture bus address, it should be aligned by 4 KB!\n");
			return -EFAULT;
		}

		/* Convert Bus Address to Virtual Address */
		pcm_capture->buffer.size = _MAD_PCM_CAPTURE_BUF_SIZE;
		audio_pcm_capture_base_va = (unsigned int)ioremap_nocache(audio_pcm_capture_base_ba, pcm_capture->buffer.size);

		pcm_capture->str_mode_info.physical_addr = audio_pcm_capture_base_pa;
		pcm_capture->str_mode_info.bus_addr = audio_pcm_capture_base_ba;
		pcm_capture->str_mode_info.virtual_addr = audio_pcm_capture_base_va;

		pcm_capture->initialized_status = MAD_TRUE;
	}
	else {
		audio_pcm_capture_base_pa = pcm_capture->str_mode_info.physical_addr;
		audio_pcm_capture_base_ba = pcm_capture->str_mode_info.bus_addr;
		audio_pcm_capture_base_va = pcm_capture->str_mode_info.virtual_addr;
	}

	/* init PCM capture buffer address */
	pcm_capture->buffer.addr = (unsigned char *)audio_pcm_capture_base_va;
	//MAD_PRINT(KERN_INFO "PCM Capture1 buffer start address = 0x%08X\n", pcm_capture->buffer.addr);
	//MAD_PRINT(KERN_INFO "PCM Capture1 buffer end address = 0x%08X\n", (pcm_capture->buffer.addr + _MAD_PCM_CAPTURE_BUF_SIZE));

	/* clear all PCM capture buffer */
	memset((void *)audio_pcm_capture_base_va, 0x00, _MAD_PCM_CAPTURE_BUF_SIZE);
	Chip_Flush_Memory();

	/* reset PCM capture write pointer */
	w_ptr_offset = _mhal_alsa_read_reg(0x2DF0) * _MAD_BYTES_IN_LINE;
	pcm_capture->buffer.w_ptr = pcm_capture->buffer.addr + w_ptr_offset;

	/* reset PCM capture read pointer */
	pcm_capture->buffer.r_ptr = pcm_capture->buffer.w_ptr;
	_mhal_alsa_write_reg(0x2DD4, (unsigned short)(w_ptr_offset / _MAD_BYTES_IN_LINE));

	/* reset PCM capture buffer size */
	_mhal_alsa_write_reg(0x2DD6, (unsigned short)(pcm_capture->buffer.size / _MAD_BYTES_IN_LINE));

	return 0;
}
Example #25
0
static int __init nettel_init(void)
{
	volatile unsigned long *amdpar;
	unsigned long amdaddr, maxsize;
	int num_amd_partitions=0;
#ifdef CONFIG_MTD_CFI_INTELEXT
	volatile unsigned long *intel0par, *intel1par;
	unsigned long orig_bootcspar, orig_romcs1par;
	unsigned long intel0addr, intel0size;
	unsigned long intel1addr, intel1size;
	int intelboot, intel0cs, intel1cs;
	int num_intel_partitions;
#endif
	int rc = 0;

	nettel_mmcrp = (void *) ioremap_nocache(0xfffef000, 4096);
	if (nettel_mmcrp == NULL) {
#ifdef CONFIG_DEBUG_PRINTK
		printk("SNAPGEAR: failed to disable MMCR cache??\n");
#else
		;
#endif
		return(-EIO);
	}

	/* Set CPU clock to be 33.000MHz */
	*((unsigned char *) (nettel_mmcrp + 0xc64)) = 0x01;

	amdpar = (volatile unsigned long *) (nettel_mmcrp + 0xc4);

#ifdef CONFIG_MTD_CFI_INTELEXT
	intelboot = 0;
	intel0cs = SC520_PAR_ROMCS1;
	intel0par = (volatile unsigned long *) (nettel_mmcrp + 0xc0);
	intel1cs = SC520_PAR_ROMCS2;
	intel1par = (volatile unsigned long *) (nettel_mmcrp + 0xbc);

	/*
	 *	Save the CS settings then ensure ROMCS1 and ROMCS2 are off,
	 *	otherwise they might clash with where we try to map BOOTCS.
	 */
	orig_bootcspar = *amdpar;
	orig_romcs1par = *intel0par;
	*intel0par = 0;
	*intel1par = 0;
#endif

	/*
	 *	The first thing to do is determine if we have a separate
	 *	boot FLASH device. Typically this is a small (1 to 2MB)
	 *	AMD FLASH part. It seems that device size is about the
	 *	only way to tell if this is the case...
	 */
	amdaddr = 0x20000000;
	maxsize = AMD_WINDOW_MAXSIZE;

	*amdpar = SC520_PAR(SC520_PAR_BOOTCS, amdaddr, maxsize);
	__asm__ ("wbinvd");

	nettel_amd_map.phys = amdaddr;
	nettel_amd_map.virt = ioremap_nocache(amdaddr, maxsize);
	if (!nettel_amd_map.virt) {
#ifdef CONFIG_DEBUG_PRINTK
		printk("SNAPGEAR: failed to ioremap() BOOTCS\n");
#else
		;
#endif
		iounmap(nettel_mmcrp);
		return(-EIO);
	}
	simple_map_init(&nettel_amd_map);

	if ((amd_mtd = do_map_probe("jedec_probe", &nettel_amd_map))) {
#ifdef CONFIG_DEBUG_PRINTK
		printk(KERN_NOTICE "SNAPGEAR: AMD flash device size = %dK\n",
			(int)(amd_mtd->size>>10));
#else
		;
#endif

		amd_mtd->owner = THIS_MODULE;

		/* The high BIOS partition is only present for 2MB units */
		num_amd_partitions = NUM_AMD_PARTITIONS;
		if (amd_mtd->size < AMD_WINDOW_MAXSIZE)
			num_amd_partitions--;
		/* Don't add the partition until after the primary INTEL's */

#ifdef CONFIG_MTD_CFI_INTELEXT
		/*
		 *	Map the Intel flash into memory after the AMD
		 *	It has to start on a multiple of maxsize.
		 */
		maxsize = SC520_PAR_TO_SIZE(orig_romcs1par);
		if (maxsize < (32 * 1024 * 1024))
			maxsize = (32 * 1024 * 1024);
		intel0addr = amdaddr + maxsize;
#endif
	} else {
Example #26
0
static int __devinit
snd_nm256_create(struct snd_card *card, struct pci_dev *pci,
		 struct nm256 **chip_ret)
{
	struct nm256 *chip;
	int err, pval;
	static struct snd_device_ops ops = {
		.dev_free =	snd_nm256_dev_free,
	};
	u32 addr;

	*chip_ret = NULL;

	if ((err = pci_enable_device(pci)) < 0)
		return err;

	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
	if (chip == NULL) {
		pci_disable_device(pci);
		return -ENOMEM;
	}

	chip->card = card;
	chip->pci = pci;
	chip->use_cache = use_cache;
	spin_lock_init(&chip->reg_lock);
	chip->irq = -1;
	mutex_init(&chip->irq_mutex);

	/* store buffer sizes in bytes */
	chip->streams[SNDRV_PCM_STREAM_PLAYBACK].bufsize = playback_bufsize * 1024;
	chip->streams[SNDRV_PCM_STREAM_CAPTURE].bufsize = capture_bufsize * 1024;

	/* 
	 * The NM256 has two memory ports.  The first port is nothing
	 * more than a chunk of video RAM, which is used as the I/O ring
	 * buffer.  The second port has the actual juicy stuff (like the
	 * mixer and the playback engine control registers).
	 */

	chip->buffer_addr = pci_resource_start(pci, 0);
	chip->cport_addr = pci_resource_start(pci, 1);

	/* Init the memory port info.  */
	/* remap control port (#2) */
	chip->res_cport = request_mem_region(chip->cport_addr, NM_PORT2_SIZE,
					     card->driver);
	if (chip->res_cport == NULL) {
		snd_printk(KERN_ERR "memory region 0x%lx (size 0x%x) busy\n",
			   chip->cport_addr, NM_PORT2_SIZE);
		err = -EBUSY;
		goto __error;
	}
	chip->cport = ioremap_nocache(chip->cport_addr, NM_PORT2_SIZE);
	if (chip->cport == NULL) {
		snd_printk(KERN_ERR "unable to map control port %lx\n", chip->cport_addr);
		err = -ENOMEM;
		goto __error;
	}

	if (!strcmp(card->driver, "NM256AV")) {
		/* Ok, try to see if this is a non-AC97 version of the hardware. */
		pval = snd_nm256_readw(chip, NM_MIXER_PRESENCE);
		if ((pval & NM_PRESENCE_MASK) != NM_PRESENCE_VALUE) {
			if (! force_ac97) {
				printk(KERN_ERR "nm256: no ac97 is found!\n");
				printk(KERN_ERR "  force the driver to load by "
				       "passing in the module parameter\n");
				printk(KERN_ERR "    force_ac97=1\n");
				printk(KERN_ERR "  or try sb16 or cs423x drivers instead.\n");
				err = -ENXIO;
				goto __error;
			}
		}
		chip->buffer_end = 2560 * 1024;
		chip->interrupt = snd_nm256_interrupt;
		chip->mixer_status_offset = NM_MIXER_STATUS_OFFSET;
		chip->mixer_status_mask = NM_MIXER_READY_MASK;
	} else {
		/* Not sure if there is any relevant detect for the ZX or not.  */
		if (snd_nm256_readb(chip, 0xa0b) != 0)
			chip->buffer_end = 6144 * 1024;
		else
			chip->buffer_end = 4096 * 1024;

		chip->interrupt = snd_nm256_interrupt_zx;
		chip->mixer_status_offset = NM2_MIXER_STATUS_OFFSET;
		chip->mixer_status_mask = NM2_MIXER_READY_MASK;
	}
	
	chip->buffer_size = chip->streams[SNDRV_PCM_STREAM_PLAYBACK].bufsize +
		chip->streams[SNDRV_PCM_STREAM_CAPTURE].bufsize;
	if (chip->use_cache)
		chip->buffer_size += NM_TOTAL_COEFF_COUNT * 4;
	else
		chip->buffer_size += NM_MAX_PLAYBACK_COEF_SIZE + NM_MAX_RECORD_COEF_SIZE;

	if (buffer_top >= chip->buffer_size && buffer_top < chip->buffer_end)
		chip->buffer_end = buffer_top;
	else {
		/* get buffer end pointer from signature */
		if ((err = snd_nm256_peek_for_sig(chip)) < 0)
			goto __error;
	}

	chip->buffer_start = chip->buffer_end - chip->buffer_size;
	chip->buffer_addr += chip->buffer_start;

	printk(KERN_INFO "nm256: Mapping port 1 from 0x%x - 0x%x\n",
	       chip->buffer_start, chip->buffer_end);

	chip->res_buffer = request_mem_region(chip->buffer_addr,
					      chip->buffer_size,
					      card->driver);
	if (chip->res_buffer == NULL) {
		snd_printk(KERN_ERR "nm256: buffer 0x%lx (size 0x%x) busy\n",
			   chip->buffer_addr, chip->buffer_size);
		err = -EBUSY;
		goto __error;
	}
	chip->buffer = ioremap_nocache(chip->buffer_addr, chip->buffer_size);
	if (chip->buffer == NULL) {
		err = -ENOMEM;
		snd_printk(KERN_ERR "unable to map ring buffer at %lx\n", chip->buffer_addr);
		goto __error;
	}

	/* set offsets */
	addr = chip->buffer_start;
	chip->streams[SNDRV_PCM_STREAM_PLAYBACK].buf = addr;
	addr += chip->streams[SNDRV_PCM_STREAM_PLAYBACK].bufsize;
	chip->streams[SNDRV_PCM_STREAM_CAPTURE].buf = addr;
	addr += chip->streams[SNDRV_PCM_STREAM_CAPTURE].bufsize;
	if (chip->use_cache) {
		chip->all_coeff_buf = addr;
	} else {
		chip->coeff_buf[SNDRV_PCM_STREAM_PLAYBACK] = addr;
		addr += NM_MAX_PLAYBACK_COEF_SIZE;
		chip->coeff_buf[SNDRV_PCM_STREAM_CAPTURE] = addr;
	}

	/* Fixed setting. */
	chip->mixer_base = NM_MIXER_OFFSET;

	chip->coeffs_current = 0;

	snd_nm256_init_chip(chip);

	// pci_set_master(pci); /* needed? */
	
	if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0)
		goto __error;

	snd_card_set_dev(card, &pci->dev);

	*chip_ret = chip;
	return 0;

__error:
	snd_nm256_free(chip);
	return err;
}
Example #27
0
static int __init init_dnpc(void)
{
	int is_dnp;

	/*
	** determine hardware (DNP/ADNP/invalid)
	*/
	if((is_dnp = dnp_adnp_probe()) < 0)
		return -ENXIO;

	/*
	** Things are set up for ADNP by default
	** -> modify all that needs to be different for DNP
	*/
	if(is_dnp)
	{	/*
		** Adjust window size, select correct set_vpp function.
		** The partitioning scheme is identical on both DNP
		** and ADNP except for the size of the third partition.
		*/
		int i;
		dnpc_map.size          = DNP_WINDOW_SIZE;
		dnpc_map.set_vpp       = dnp_set_vpp;
		partition_info[2].size = 0xf0000;

		/*
		** increment all string pointers so the leading 'A' gets skipped,
		** thus turning all occurrences of "ADNP ..." into "DNP ..."
		*/
		++dnpc_map.name;
		for(i = 0; i < NUM_PARTITIONS; i++)
			++partition_info[i].name;
		higlvl_partition_info[1].size = DNP_WINDOW_SIZE -
			CONFIG_MTD_DILNETPC_BOOTSIZE - 0x20000;
		for(i = 0; i < NUM_HIGHLVL_PARTITIONS; i++)
			++higlvl_partition_info[i].name;
	}

	printk(KERN_NOTICE "DIL/Net %s flash: 0x%lx at 0x%llx\n",
		is_dnp ? "DNPC" : "ADNP", dnpc_map.size, (unsigned long long)dnpc_map.phys);

	dnpc_map.virt = ioremap_nocache(dnpc_map.phys, dnpc_map.size);

	dnpc_map_flash(dnpc_map.phys, dnpc_map.size);

	if (!dnpc_map.virt) {
		printk("Failed to ioremap_nocache\n");
		return -EIO;
	}
	simple_map_init(&dnpc_map);

	printk("FLASH virtual address: 0x%p\n", dnpc_map.virt);

	mymtd = do_map_probe("jedec_probe", &dnpc_map);

	if (!mymtd)
		mymtd = do_map_probe("cfi_probe", &dnpc_map);

	/*
	** If flash probes fail, try to make flashes accessible
	** at least as ROM. Ajust erasesize in this case since
	** the default one (128M) will break our partitioning
	*/
	if (!mymtd)
		if((mymtd = do_map_probe("map_rom", &dnpc_map)))
			mymtd->erasesize = 0x10000;

	if (!mymtd) {
		iounmap(dnpc_map.virt);
		return -ENXIO;
	}

	mymtd->owner = THIS_MODULE;

	/*
	** Supply pointers to lowlvl_parts[] array to add_mtd_partitions()
	** -> add_mtd_partitions() will _not_ register MTD devices for
	** the partitions, but will instead store pointers to the MTD
	** objects it creates into our lowlvl_parts[] array.
	** NOTE: we arrange the pointers such that the sequence of the
	**       partitions gets re-arranged: partition #2 follows
	**       partition #0.
	*/
	partition_info[0].mtdp = &lowlvl_parts[0];
	partition_info[1].mtdp = &lowlvl_parts[2];
	partition_info[2].mtdp = &lowlvl_parts[1];
	partition_info[3].mtdp = &lowlvl_parts[3];

	add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS);

	/*
	** now create a virtual MTD device by concatenating the for partitions
	** (in the sequence given by the lowlvl_parts[] array.
	*/
	merged_mtd = mtd_concat_create(lowlvl_parts, NUM_PARTITIONS, "(A)DNP Flash Concatenated");
	if(merged_mtd)
	{	/*
		** now partition the new device the way we want it. This time,
		** we do not supply mtd pointers in higlvl_partition_info, so
		** add_mtd_partitions() will register the devices.
		*/
		add_mtd_partitions(merged_mtd, higlvl_partition_info, NUM_HIGHLVL_PARTITIONS);
	}

	return 0;
}
Example #28
0
/*
 *  stmfb_createfb
 *  Create framebuffer related state and register with upper layers
 */
static int stmfb_createfb(struct stmfb_info *i, int display, const char *name)
{
  int ret;
  unsigned long nPages;
  int fb_registered = 0;
  const struct stmcore_display_pipeline_data * const pd =
    *((struct stmcore_display_pipeline_data **) i->platformDevice->dev.platform_data);

  DPRINTK("\n");

  /*
   * Copy all available display modes into the modelist, before we parse
   * the module parameters to get the default mode.
   */
  stmfb_enumerate_modes(i);

  /*
   * Need to set the framebuffer operation table and get hold of the display
   * plane before parsing the module parameters so fb_find_mode can work.
   */
  i->info.fbops = &stmfb_ops;

  if((ret = stmfb_parse_module_parameters(i, display)) < 0)
    goto failed0;

  /* can create blitter only after module parameters have been parsed */
  if((ret = stmfb_probe_get_blitter(i,pd))<0)
    goto failed0;

  nPages = (i->ulFBSize + PAGE_SIZE - 1) / PAGE_SIZE;

  i->FBPart = bpa2_find_part ("bigphysarea");
  i->ulPFBBase = i->FBPart ? bpa2_alloc_pages (i->FBPart, nPages, 0, GFP_KERNEL)
                           : 0;
  if (!i->ulPFBBase)
  {
    printk(KERN_WARNING "Failed to allocate fb%d memory, requested size = %lu\n",display, i->ulFBSize);
    ret = -ENOMEM;
    goto failed0;
  }

  /* try to allocate memory from BPA2 as additional memory for graphics
     operations. Basically, this is not vital, but driver loading will still
     fail if an auxsize has been specified in the module parameters which can
     not be satisfied, either because no BPA2 partition 'gfx-memory' exists,
     or if it's not large enough for the given auxsize. */
  /* Please note that due to hardware limitations, this can actually be a bit
     complex (2 & 3):
     1) we look for partitions labelled 'gfx-memory-[0...x]', each of which
        should be 64MB in size and be aligned to a 64MB bank. This makes the
        process quite easy for us.
     2) Failing that, we try to use a partition labelled 'gfx-memory'. Now we
        have to make sure ourselves that each allocation does not cross a 64MB
        bank boundary.
     3) Failing that, too, or in case the 'gfx-memory' partition not being
        large enough to accomodate for the amount of gfx memory requested in
        the module options, we will do the same but this time use the
        'bigphysarea' partition. */
  /* So, either one can configure several 'gfx-memory-%d' partitions
     (preferred, to have maximum control over placement of gfx memory), just
     use one large 'gfx-memory' partition, or configure nothing at all and be
     limited to 'bigphysarea' memory.
     The combined memory of all the allocations will be made available to
     DirectFB. */
  /* FIXME: this code is way too complex! */
#define MEMORY64MB_SIZE      (1<<26)
  if (i->AuxSize[0])
  {
    /* aux memory was requested */
    unsigned int       idx;        /* index into i->AuxPart[] */
    /* order of partnames is important here! */
    static const char *partnames[] = { "gfx-memory-%d", "gfx-memory", "bigphysarea" };
    unsigned int       partnameidx /* index into partnames[] */,
                       partidx     /* index in case of 'gfx-memory-%d' */;
    char               partname[14]; /* real name in case of 'gfx-memory-%d' */
    struct bpa2_part  *part;
    unsigned long      still_needed = i->AuxSize[0];

    unsigned long      this_alloc;
    int                alignment;

    partidx = 0;
    /* find a partition suitable for us, in the preferred order, as outlined
       above */
    for (partnameidx = 0; partnameidx < ARRAY_SIZE (partnames); ++partnameidx)
    {
      sprintf (partname, partnames[partnameidx], 0);
      part = bpa2_find_part (partname);
      if (part)
        break;
    }

    if (!part)
    {
      printk (KERN_ERR "no BPA2 partitions found for auxmem!\n");
      goto failed_bigphys;
    }

    idx = 0;

restart:
    if (still_needed >= MEMORY64MB_SIZE)
    {
      /* we first try to satisfy from a 64MB aligned region */
      this_alloc = MEMORY64MB_SIZE;
      /* don't request a specific alignment if individual 'gfx-memory-%d'
         partitions are configured in the kernel */
      alignment  = (partnameidx == 0) ? 0 : MEMORY64MB_SIZE / PAGE_SIZE;
    }
    else
    {
      /* if requested size is < 64MB, we are optimistic and hope it will
         fit into one 64MB bank without alignment restrictions. This
         doesn't mean we will be happy with a region crossing that
         boundary, i.e. we will still make sure all restrictions are met.
         We are really being just optimistic here. */
      this_alloc = still_needed;
      alignment  = 0;
    }

    while (part && idx < ARRAY_SIZE (i->AuxPart) && still_needed)
    {
      int           this_pages;
      unsigned long base;

      printk (KERN_INFO "trying to alloc %lu bytes (align: %.4x, still needed:"
                        " %lu) from '%s' for GFX%d auxmem\n",
              this_alloc, alignment, still_needed, partname, display);

      this_pages = (this_alloc + PAGE_SIZE - 1) / PAGE_SIZE;
      base = bpa2_alloc_pages (part, this_pages, alignment, GFP_KERNEL);
      if (base)
      {
        /* make sure it doesn't cross a 64MB boundary. At some point we will
           be using the new BPA2 allocator API... */
        if ((base & ~(MEMORY64MB_SIZE - 1)) != ((base + this_alloc - 1) & ~(MEMORY64MB_SIZE - 1)))
        {
          unsigned long topAddress;

          printk ("%s: %8.lx + %lu (%d pages) crosses a 64MB boundary, retrying!\n",
                  __FUNCTION__, base, this_alloc, this_pages);

          /* free and try a new reservation with different attributes,
             hoping nobody requests bpa2 memory in between... */
          bpa2_free_pages (part, base);

          if (partnameidx == 0)
          {
            /* if we have 'gfx-memory-%d' partitions, try the next one */
            /* Getting here doesn't neccessarily mean there is an error in
               the BPA2 partition definition. This can happen if e.g. two
               framebuffers are configured and the auxmem size requested is
               larger but not a multiple of 64MB (since we are optimistic and
               try to re-use partially used partitions), so don't be tempted
               to put a WARN_ON() here! */
            sprintf (partname, partnames[partnameidx], ++partidx);
            part = bpa2_find_part (partname);
            continue;
          }

          if (still_needed == this_alloc && alignment == 0)
          {
            /* this can only happen on the last chunk of memory needed. So
               we first try to fit it into its own region.
               I.e. we first try to put the last chunk into a partly used
               bank. If we succeed, but the chunk now spans a boundary
               (which gets us here), we try to put it into its own
               bank. If that fails, too, we will come back again (this
               time with alignment == 1) and split the chunk into two. */
            alignment = MEMORY64MB_SIZE / PAGE_SIZE;
            continue;
          }

          /* standard case, allocate up to the end of current 64MB bank,
             effectively splitting the current chunk into two. */
#define _ALIGN_UP(addr,size)    (((addr)+((size)-1))&(~((size)-1)))
          topAddress = _ALIGN_UP (base, MEMORY64MB_SIZE);
          this_alloc = topAddress - base;
          this_pages = (this_alloc + PAGE_SIZE - 1) / PAGE_SIZE;

          /* alignment should be 0 or 1 here */
          WARN_ON (alignment != 0
                   && alignment != 1);
          base = bpa2_alloc_pages (part, this_pages, alignment, GFP_KERNEL);
          if (!base)
          {
            /* shouldn't happen here, since we just moments ago suceeded in
               allocating even more memory than now, so we don't know what
               to do and kindof panic ...*/
            break;
          }
        }

        /* we now have found a more or less nice place to chill. */
        i->AuxPart[idx] = part;
        i->AuxBase[idx] = base;
        i->AuxSize[idx] = this_alloc;
        ++idx;

        still_needed -= this_alloc;

        printk (KERN_INFO "success: base: %.8lx, still needed: %lu\n",
                base, still_needed);

        if (still_needed >= MEMORY64MB_SIZE)
        {
          /* we first try to satisfy from a 64MB aligned region */
          this_alloc = MEMORY64MB_SIZE;
          alignment  = (partnameidx == 0) ? 0 : MEMORY64MB_SIZE / PAGE_SIZE;
        }
        else
        {
          /* if requested size is < 64MB we are on the last chunk of memory
             blocks. We are otimistic and hope it will fit into a possibly
             already partly used 64MB bank without alignment restrictions,
             i.e. we hope it will not span a 64MB boundary. If that's not
             the case, we will again use an alignment of 64MB, so as to
             reduce scattering of the auxmem chunks. */
          this_alloc = still_needed;
          alignment  = 0;

          /* if using 'gfx-memory-%d' partitions, try to share the last
             chunk with another chunk (in a possibly partly used
             partition). */
          if (partnameidx == 0)
            partidx = -1;
        }

        if (partnameidx == 0)
        {
          /* in case we are using 'gfx-memory-%d' as partition name, make
             sure we advance to the next partition */
          sprintf (partname, partnames[partnameidx], ++partidx);
          part = bpa2_find_part (partname);
        }
      }
      else
      {
        if (alignment > 1)
        {
          if (still_needed == this_alloc)
            /* make sure not to get into an endless loop which would be
               switching around the alignment all the time. */
            alignment = 1;
          else
            /* try again with different alignment rules */
            alignment = 0;
        }
        else
        {
          /* failed: couldn't allocate any memory at all, even without any
             alignment restrictions.
             1) In case there are 'gfx-memory-%d' partitions, we advance
                to the next one.
             2) In case we had been using 'gfx-memory' as partition name,
                we now try 'bigphysarea'*/
          if (partnameidx == 0)
          {
            /* use next 'gfx-memory-%d' partition */
            BUG_ON (strcmp (partnames[partnameidx], "gfx-memory-%d"));
            sprintf (partname, partnames[partnameidx], ++partidx);
            part = bpa2_find_part (partname);
          }
          else if (partnameidx == 1)
          {
            /* advance from 'gfx-memory' to 'bigphysarea' partition name */
            BUG_ON (strcmp (partnames[partnameidx], "gfx-memory"));
            sprintf (partname, partnames[++partnameidx], 0);
            BUG_ON (strcmp (partnames[partnameidx], "bigphysarea"));
            part = bpa2_find_part (partname);
            goto restart;
          }
          else
            /* that's it, there are no partitions left to check for auxmem */
            break;
        }
      }
    }

    if (still_needed)
    {
      /* not enough chill space configured for BPA2 */
      printk (KERN_WARNING "Failed to allocate enough fb%d auxmem "
                           "(still need %lu bytes)\n",
              display, still_needed);
      ret = -ENOMEM;
      goto failed_auxmem;
    }
  }

  if(stm_display_plane_connect_to_output(i->pFBPlane, i->pFBMainOutput) < 0)
  {
    printk(KERN_ERR "fb%d display cannot be connected to display output pipeline, this is very bad!\n",display);
    ret = -EIO;
    goto failed_auxmem;
  }

  if(stm_display_plane_lock(i->pFBPlane) < 0)
  {
    printk(KERN_WARNING "fb%d display plane may already be in use by another driver\n",display);
    ret = -EBUSY;
    goto failed_auxmem;
  }

  i->main_config.activate = STMFBIO_ACTIVATE_IMMEDIATE;
  if(stmfb_set_output_configuration(&i->main_config,i)<0)
  {
    printk(KERN_WARNING "fb%d main output configuration is unsupported\n",display);
    ret = -EINVAL;
    goto failed_auxmem;
  }

  /*
   * Get the framebuffer layer default extended var state and capabilities
   */
  i->current_var_ex.layerid = 0;

  if((ret = stmfb_encode_var_ex(&i->current_var_ex, i)) < 0)
  {
    printk(KERN_WARNING "fb%d failed to get display plane's extended capabilities\n",display);
    goto failed_auxmem;
  }

  /* Setup the framebuffer info for registration */
  i->info.screen_base = ioremap_nocache(i->ulPFBBase,i->ulFBSize);
  i->info.flags       = FBINFO_DEFAULT |
                        FBINFO_PARTIAL_PAN_OK |
                        FBINFO_HWACCEL_YPAN;

  strcpy(i->info.fix.id, name);  /* identification string */

  i->info.fix.smem_start  = i->ulPFBBase;                /* Start of frame buffer mem (physical address)               */
  i->info.fix.smem_len    = i->ulFBSize;                 /* Length of frame buffer mem as the device sees it           */
  i->info.fix.mmio_start  = pd->mmio;                    /* memory mapped register access     */
  i->info.fix.mmio_len    = pd->mmio_len;                /* Length of Memory Mapped I/O       */
  i->info.fix.type        = FB_TYPE_PACKED_PIXELS;       /* see FB_TYPE_*                     */
  i->info.fix.type_aux    = 0;                           /* Interleave for interleaved Planes */
  i->info.fix.xpanstep    = 0;                           /* zero if no hardware panning       */
  i->info.fix.ypanstep    = 1;                           /* zero if no hardware panning       */
  i->info.fix.ywrapstep   = 0;                           /* zero if no hardware ywrap         */

  i->info.pseudo_palette = &i->pseudo_palette;

  if(fb_alloc_cmap(&i->info.cmap, 256, 1)<0)
  {
    printk(KERN_ERR "fb%d unable to allocate colour map\n",display);
    ret = -ENOMEM;
    goto failed_ioremap;
  }

  if(i->info.cmap.len != 256)
  {
    printk(KERN_ERR "fb%d WTF colour map length is wrong????\n",display);
  }

  if (register_framebuffer(&i->info) < 0)
  {
    printk(KERN_ERR "fb%d register =_framebuffer failed!\n",display);
    ret = -ENODEV;
    goto failed_cmap;
  }

  /*
   * If there was no console activated on the registered framebuffer, we have
   * to force the display mode be updated. This sequence is cribbed from the
   * matroxfb driver.
   */
  if(!i->current_videomode_valid)
  {
    i->info.var.activate |= FB_ACTIVATE_FORCE;
    if((ret = fb_set_var(&i->info, &i->info.var))<0)
    {
      printk(KERN_WARNING "fb%d failed to set default display mode, display pipeline may already be in use\n",display);
      goto failed_register;
    }
  }

//#if (defined(UFS912) || defined(HS7110) || defined(HS7119) || defined(HS7420) || defined(HS7429) || defined(HS7810A) || defined(HS7819) || defined(ATEMIO520) || defined(ATEMIO530) || defined(SPARK) || defined(AT7500)) && defined(__TDT__)
  // WORKAROUND: Clear the framebuffer 
  memset(i->info.screen_base, 0x00, i->ulFBSize);
//#endif

  stmfb_init_class_device(i);

  DPRINTK("out\n");

  return 0;

failed_register:
  fb_registered = 1;
  unregister_framebuffer (&i->info);

failed_cmap:
  fb_dealloc_cmap (&i->info.cmap);

failed_ioremap:
  iounmap (i->info.screen_base);
  i->info.screen_base = NULL;

failed_auxmem:
  stmfb_destroy_auxmem (i);

failed_bigphys:
  bpa2_free_pages (i->FBPart, i->ulPFBBase);
  i->ulPFBBase = 0;
  i->FBPart = NULL;

failed0:
  if (!fb_registered)
    /* unregister_framebuffer() does fb_destroy_modelist() for us, so don't
       destroy it twice! */
    fb_destroy_modelist (&i->info.modelist);

  return ret;
}
Example #29
0
static int bcm4710_pcmcia_init(struct pcmcia_init *init)
{
	struct pci_dev *pdev;
	uint16 *attrsp;
	uint32 outen, tmp;
#ifdef DO_BCM47XX_PCMCIA_INTERRUPTS
	uint32 intp, intm;
	int rc = 0, i;
	extern unsigned long bcm4710_cpu_cycle;
#endif


	if (!(pdev = pci_find_device(VENDOR_BROADCOM, SB_EXTIF, NULL))) {
		printk(KERN_ERR "bcm4710_pcmcia: extif not found\n");
		return -ENODEV;
	}

	eir = (extifregs_t *) ioremap_nocache(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));

	/* Initialize the pcmcia i/f: 16bit no swap */
	writel(CF_EM_PCMCIA | CF_DS | CF_EN, &eir->pcmcia_config);

#ifdef	notYet

	/* Set the timing for memory accesses */
	tmp = (19 / bcm4710_cpu_cycle) << 24;		/* W3 = 10nS */
	tmp = tmp | ((29 / bcm4710_cpu_cycle) << 16);	/* W2 = 20nS */
	tmp = tmp | ((109 / bcm4710_cpu_cycle) << 8);	/* W1 = 100nS */
	tmp = tmp | (129 / bcm4710_cpu_cycle);		/* W0 = 120nS */
	writel(tmp, &eir->pcmcia_memwait);		/* 0x01020a0c for a 100Mhz clock */

	/* Set the timing for I/O accesses */
	tmp = (19 / bcm4710_cpu_cycle) << 24;		/* W3 = 10nS */
	tmp = tmp | ((29 / bcm4710_cpu_cycle) << 16);	/* W2 = 20nS */
	tmp = tmp | ((109 / bcm4710_cpu_cycle) << 8);	/* W1 = 100nS */
	tmp = tmp | (129 / bcm4710_cpu_cycle);		/* W0 = 120nS */
	writel(tmp, &eir->pcmcia_iowait);		/* 0x01020a0c for a 100Mhz clock */

	/* Set the timing for attribute accesses */
	tmp = (19 / bcm4710_cpu_cycle) << 24;		/* W3 = 10nS */
	tmp = tmp | ((29 / bcm4710_cpu_cycle) << 16);	/* W2 = 20nS */
	tmp = tmp | ((109 / bcm4710_cpu_cycle) << 8);	/* W1 = 100nS */
	tmp = tmp | (129 / bcm4710_cpu_cycle);		/* W0 = 120nS */
	writel(tmp, &eir->pcmcia_attrwait);		/* 0x01020a0c for a 100Mhz clock */

#endif
	/* Make sure gpio0 and gpio5 are inputs */
	outen = readl(&eir->gpio[0].outen);
	outen &= ~(BCM47XX_PCMCIA_WP | BCM47XX_PCMCIA_STSCHG | BCM47XX_PCMCIA_RESET);
	writel(outen, &eir->gpio[0].outen);

	/* Issue a reset to the pcmcia socket */
	bcm4710_pcmcia_reset();

#ifdef	DO_BCM47XX_PCMCIA_INTERRUPTS
	/* Setup gpio5 to be the STSCHG interrupt */
	intp = readl(&eir->gpiointpolarity);
	writel(intp | BCM47XX_PCMCIA_STSCHG, &eir->gpiointpolarity);	/* Active low */
	intm = readl(&eir->gpiointmask);
	writel(intm | BCM47XX_PCMCIA_STSCHG, &eir->gpiointmask);	/* Enable it */
#endif

	DEBUG(2, "bcm4710_pcmcia after reset:\n");
	DEBUG(2, "\textstatus\t= 0x%08x:\n", readl(&eir->extstatus));
	DEBUG(2, "\tpcmcia_config\t= 0x%08x:\n", readl(&eir->pcmcia_config));
	DEBUG(2, "\tpcmcia_memwait\t= 0x%08x:\n", readl(&eir->pcmcia_memwait));
	DEBUG(2, "\tpcmcia_attrwait\t= 0x%08x:\n", readl(&eir->pcmcia_attrwait));
	DEBUG(2, "\tpcmcia_iowait\t= 0x%08x:\n", readl(&eir->pcmcia_iowait));
	DEBUG(2, "\tgpioin\t\t= 0x%08x:\n", readl(&eir->gpioin));
	DEBUG(2, "\tgpio_outen0\t= 0x%08x:\n", readl(&eir->gpio[0].outen));
	DEBUG(2, "\tgpio_out0\t= 0x%08x:\n", readl(&eir->gpio[0].out));
	DEBUG(2, "\tgpiointpolarity\t= 0x%08x:\n", readl(&eir->gpiointpolarity));
	DEBUG(2, "\tgpiointmask\t= 0x%08x:\n", readl(&eir->gpiointmask));

#ifdef	DO_BCM47XX_PCMCIA_INTERRUPTS
	/* Request pcmcia interrupt */
	rc =  request_irq(BCM47XX_PCMCIA_IRQ, init->handler, SA_INTERRUPT,
			  "PCMCIA Interrupt", &bcm47xx_pcmcia_dev_id);
#endif

	attrsp = (uint16 *)ioremap_nocache(EXTIF_PCMCIA_CFGBASE(SB_EXTIF_BASE), 0x1000);
	tmp = readw(&attrsp[0]);
	DEBUG(2, "\tattr[0] = 0x%04x\n", tmp);
	if ((tmp == 0x7fff) || (tmp == 0x7f00)) {
		bcm47xx_pcmcia_present = 0;
	} else {
		bcm47xx_pcmcia_present = 1;
	}

	/* There's only one socket */
	return 1;
}
Example #30
0
/* driver entry point */
static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
	int ret = -ENODEV;
	resource_size_t csr_base, mem_base;
	unsigned long csr_len, mem_len;
	struct denali_nand_info *denali;

	denali = kzalloc(sizeof(*denali), GFP_KERNEL);
	if (!denali)
		return -ENOMEM;

	ret = pci_enable_device(dev);
	if (ret) {
		printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
		goto failed_alloc_memery;
	}

	if (id->driver_data == INTEL_CE4100) {
		/* Due to a silicon limitation, we can only support
		 * ONFI timing mode 1 and below.
		 */
		if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
			printk(KERN_ERR "Intel CE4100 only supports"
					" ONFI timing mode 1 or below\n");
			ret = -EINVAL;
			goto failed_enable_dev;
		}
		denali->platform = INTEL_CE4100;
		mem_base = pci_resource_start(dev, 0);
		mem_len = pci_resource_len(dev, 1);
		csr_base = pci_resource_start(dev, 1);
		csr_len = pci_resource_len(dev, 1);
	} else {
		denali->platform = INTEL_MRST;
		csr_base = pci_resource_start(dev, 0);
		csr_len = pci_resource_len(dev, 0);
		mem_base = pci_resource_start(dev, 1);
		mem_len = pci_resource_len(dev, 1);
		if (!mem_len) {
			mem_base = csr_base + csr_len;
			mem_len = csr_len;
		}
	}

	/* Is 32-bit DMA supported? */
	ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
	if (ret) {
		printk(KERN_ERR "Spectra: no usable DMA configuration\n");
		goto failed_enable_dev;
	}
	denali->buf.dma_buf = dma_map_single(&dev->dev, denali->buf.buf,
					     DENALI_BUF_SIZE,
					     DMA_BIDIRECTIONAL);

	if (dma_mapping_error(&dev->dev, denali->buf.dma_buf)) {
		dev_err(&dev->dev, "Spectra: failed to map DMA buffer\n");
		goto failed_enable_dev;
	}

	pci_set_master(dev);
	denali->dev = &dev->dev;
	denali->mtd.dev.parent = &dev->dev;

	ret = pci_request_regions(dev, DENALI_NAND_NAME);
	if (ret) {
		printk(KERN_ERR "Spectra: Unable to request memory regions\n");
		goto failed_dma_map;
	}

	denali->flash_reg = ioremap_nocache(csr_base, csr_len);
	if (!denali->flash_reg) {
		printk(KERN_ERR "Spectra: Unable to remap memory region\n");
		ret = -ENOMEM;
		goto failed_req_regions;
	}

	denali->flash_mem = ioremap_nocache(mem_base, mem_len);
	if (!denali->flash_mem) {
		printk(KERN_ERR "Spectra: ioremap_nocache failed!");
		ret = -ENOMEM;
		goto failed_remap_reg;
	}

	denali_hw_init(denali);
	denali_drv_init(denali);

	/* denali_isr register is done after all the hardware
	 * initilization is finished*/
	if (request_irq(dev->irq, denali_isr, IRQF_SHARED,
			DENALI_NAND_NAME, denali)) {
		printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
		ret = -ENODEV;
		goto failed_remap_mem;
	}

	/* now that our ISR is registered, we can enable interrupts */
	denali_set_intr_modes(denali, true);

	pci_set_drvdata(dev, denali);

	denali->mtd.name = "denali-nand";
	denali->mtd.owner = THIS_MODULE;
	denali->mtd.priv = &denali->nand;

	/* register the driver with the NAND core subsystem */
	denali->nand.select_chip = denali_select_chip;
	denali->nand.cmdfunc = denali_cmdfunc;
	denali->nand.read_byte = denali_read_byte;
	denali->nand.waitfunc = denali_waitfunc;

	/* scan for NAND devices attached to the controller
	 * this is the first stage in a two step process to register
	 * with the nand subsystem */
	if (nand_scan_ident(&denali->mtd, denali->max_banks, NULL)) {
		ret = -ENXIO;
		goto failed_req_irq;
	}

	/* MTD supported page sizes vary by kernel. We validate our
	 * kernel supports the device here.
	 */
	if (denali->mtd.writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) {
		ret = -ENODEV;
		printk(KERN_ERR "Spectra: device size not supported by this "
			"version of MTD.");
		goto failed_req_irq;
	}

	/* support for multi nand
	 * MTD known nothing about multi nand,
	 * so we should tell it the real pagesize
	 * and anything necessery
	 */
	denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
	denali->nand.chipsize <<= (denali->devnum - 1);
	denali->nand.page_shift += (denali->devnum - 1);
	denali->nand.pagemask = (denali->nand.chipsize >>
						denali->nand.page_shift) - 1;
	denali->nand.bbt_erase_shift += (denali->devnum - 1);
	denali->nand.phys_erase_shift = denali->nand.bbt_erase_shift;
	denali->nand.chip_shift += (denali->devnum - 1);
	denali->mtd.writesize <<= (denali->devnum - 1);
	denali->mtd.oobsize <<= (denali->devnum - 1);
	denali->mtd.erasesize <<= (denali->devnum - 1);
	denali->mtd.size = denali->nand.numchips * denali->nand.chipsize;
	denali->bbtskipbytes *= denali->devnum;

	/* second stage of the NAND scan
	 * this stage requires information regarding ECC and
	 * bad block management. */

	/* Bad block management */
	denali->nand.bbt_td = &bbt_main_descr;
	denali->nand.bbt_md = &bbt_mirror_descr;

	/* skip the scan for now until we have OOB read and write support */
	denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
	denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;

	/* Denali Controller only support 15bit and 8bit ECC in MRST,
	 * so just let controller do 15bit ECC for MLC and 8bit ECC for
	 * SLC if possible.
	 * */
	if (denali->nand.cellinfo & 0xc &&
			(denali->mtd.oobsize > (denali->bbtskipbytes +
			ECC_15BITS * (denali->mtd.writesize /
			ECC_SECTOR_SIZE)))) {
		/* if MLC OOB size is large enough, use 15bit ECC*/
		denali->nand.ecc.layout = &nand_15bit_oob;
		denali->nand.ecc.bytes = ECC_15BITS;
		iowrite32(15, denali->flash_reg + ECC_CORRECTION);
	} else if (denali->mtd.oobsize < (denali->bbtskipbytes +
			ECC_8BITS * (denali->mtd.writesize /
			ECC_SECTOR_SIZE))) {
		printk(KERN_ERR "Your NAND chip OOB is not large enough to"
				" contain 8bit ECC correction codes");
		goto failed_req_irq;
	} else {
		denali->nand.ecc.layout = &nand_8bit_oob;
		denali->nand.ecc.bytes = ECC_8BITS;
		iowrite32(8, denali->flash_reg + ECC_CORRECTION);
	}

	denali->nand.ecc.bytes *= denali->devnum;
	denali->nand.ecc.layout->eccbytes *=
		denali->mtd.writesize / ECC_SECTOR_SIZE;
	denali->nand.ecc.layout->oobfree[0].offset =
		denali->bbtskipbytes + denali->nand.ecc.layout->eccbytes;
	denali->nand.ecc.layout->oobfree[0].length =
		denali->mtd.oobsize - denali->nand.ecc.layout->eccbytes -
		denali->bbtskipbytes;

	/* Let driver know the total blocks number and
	 * how many blocks contained by each nand chip.
	 * blksperchip will help driver to know how many
	 * blocks is taken by FW.
	 * */
	denali->totalblks = denali->mtd.size >>
				denali->nand.phys_erase_shift;
	denali->blksperchip = denali->totalblks / denali->nand.numchips;

	/* These functions are required by the NAND core framework, otherwise,
	 * the NAND core will assert. However, we don't need them, so we'll stub
	 * them out. */
	denali->nand.ecc.calculate = denali_ecc_calculate;
	denali->nand.ecc.correct = denali_ecc_correct;
	denali->nand.ecc.hwctl = denali_ecc_hwctl;

	/* override the default read operations */
	denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum;
	denali->nand.ecc.read_page = denali_read_page;
	denali->nand.ecc.read_page_raw = denali_read_page_raw;
	denali->nand.ecc.write_page = denali_write_page;
	denali->nand.ecc.write_page_raw = denali_write_page_raw;
	denali->nand.ecc.read_oob = denali_read_oob;
	denali->nand.ecc.write_oob = denali_write_oob;
	denali->nand.erase_cmd = denali_erase;

	if (nand_scan_tail(&denali->mtd)) {
		ret = -ENXIO;
		goto failed_req_irq;
	}

	ret = mtd_device_register(&denali->mtd, NULL, 0);
	if (ret) {
		dev_err(&dev->dev, "Spectra: Failed to register MTD: %d\n",
				ret);
		goto failed_req_irq;
	}
	return 0;

failed_req_irq:
	denali_irq_cleanup(dev->irq, denali);
failed_remap_mem:
	iounmap(denali->flash_mem);
failed_remap_reg:
	iounmap(denali->flash_reg);
failed_req_regions:
	pci_release_regions(dev);
failed_dma_map:
	dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
			 DMA_BIDIRECTIONAL);
failed_enable_dev:
	pci_disable_device(dev);
failed_alloc_memery:
	kfree(denali);
	return ret;
}