Exemplo n.º 1
0
int mini_isp_debug_load_cfg(char *cfg_file,char *key_name,u8 *key_var)
{
	struct kstat stat;
	mm_segment_t fs;
	struct file *fp = NULL;
	int file_flag = O_RDONLY;
	ssize_t ret = 0;
	char temp_array[64] = {0};
	char temp;
	int  cnt=0;
	bool bRegStart = false;
	bool bKeyFound = false;
	bool bKeyCfg   = false;


	print_debug("enter %s", __func__);

	if (NULL == cfg_file) {
		print_error("%s cfg_file null ptr.", __func__);
		return -EINVAL;
	}

	if (NULL == key_name) {
		print_error("%s key_name null ptr.", __func__);
		return -EINVAL;
	}

	if (NULL == key_var) {
		print_error("%s key_var null ptr.", __func__);
		return -EINVAL;
	}

	/* must have the following 2 statement */
	fs = get_fs();
	set_fs(KERNEL_DS);

	fp = filp_open(cfg_file, file_flag, 0666);
	if (IS_ERR_OR_NULL(fp)) {
		print_debug("no debug configuration file(%s) - do nothing, just skip it!\n",cfg_file);
		return -1;
	}

	if (0 != vfs_stat(cfg_file, &stat)) {
		print_error("failed to get file %s state!",cfg_file);
		goto ERROR;
	}

	print_debug("%s size : %d",cfg_file, (u32)stat.size);

	while (0 < vfs_read(fp, &temp, 1, &fp->f_pos)) {
		switch (temp) {
		case '{':
			bRegStart = true;
			cnt       = 0;
			bKeyFound = false;
			memset(temp_array,sizeof(char),sizeof(temp_array));
			break;
		case '}':
			bRegStart = false;
			if(bKeyFound)
			{
			    *key_var = mini_atoi16(temp_array);
	            bKeyCfg  = true;
			    print_debug("%s:0x%x",key_name,*key_var);
			}
			break;
		case '=':
		    if (bRegStart)
		    {
		        bKeyFound = false;
		        if (0 == strncmp(key_name,temp_array,strlen(key_name)))
		        {
                    bKeyFound = true;
		        }
		        cnt = 0;
		    }
		    break;
		default:
		    if (bRegStart){
    		    if (cnt >= 64)
    		    {
    		        bRegStart = false;
    		    }
    		    else
    		    {
    		        temp_array[cnt] = temp;
    		        cnt=cnt+1;
    		    }
		    }
		    break;
		}

		if (bKeyCfg)
		{
		    break;
		}
	}

	/* must have the following 1 statement */
	set_fs(fs);

ERROR:
	if (NULL != fp)
		filp_close(fp, 0);
	return ret;
}
Exemplo n.º 2
0
static int __devinit therm_est_probe(struct platform_device *pdev)
{
	int i;
	struct therm_estimator *est;
	struct therm_est_data *data;

	est = kzalloc(sizeof(struct therm_estimator), GFP_KERNEL);
	if (IS_ERR_OR_NULL(est))
		return -ENOMEM;

	platform_set_drvdata(pdev, est);

	data = therm_est_get_pdata(&pdev->dev);

	est->devs = data->devs;
	est->ndevs = data->ndevs;
	est->toffset = data->toffset;
	est->polling_period = data->polling_period;
	est->tc1 = data->tc1;
	est->tc2 = data->tc2;
	est->cur_temp = DEFAULT_TEMP;
	est->ntemp = HIST_UNINIT;

	/* initialize timer trips */
	est->num_timer_trips = data->num_timer_trips;
	est->timer_trips = data->timer_trips;
	therm_est_init_timer_trips(est);
	mutex_init(&est->timer_trip_lock);
	INIT_DELAYED_WORK(&est->timer_trip_work,
			  therm_est_timer_trip_work_func);

	est->workqueue = alloc_workqueue(dev_name(&pdev->dev),
				    WQ_HIGHPRI | WQ_UNBOUND | WQ_RESCUER, 1);
	if (!est->workqueue)
		goto err;

	INIT_DELAYED_WORK(&est->therm_est_work, therm_est_work_func);

	queue_delayed_work(est->workqueue,
				&est->therm_est_work,
				msecs_to_jiffies(est->polling_period));

	est->num_trips = data->num_trips;
	est->trips = data->trips;
	est->tzp = data->tzp;

	est->thz = thermal_zone_device_register(dev_name(&pdev->dev),
						est->num_trips,
						(1 << est->num_trips) - 1,
						est,
						&therm_est_ops,
						est->tzp,
						data->passive_delay,
						0);
	if (IS_ERR_OR_NULL(est->thz))
		goto err;

	for (i = 0; i < ARRAY_SIZE(therm_est_nodes); i++)
		device_create_file(&pdev->dev, &therm_est_nodes[i].dev_attr);

#ifdef CONFIG_PM
	est->pm_nb.notifier_call = therm_est_pm_notify,
	register_pm_notifier(&est->pm_nb);
#endif

	return 0;
err:
	cancel_delayed_work_sync(&est->therm_est_work);
	if (est->workqueue)
		destroy_workqueue(est->workqueue);
	kfree(est);
	return -EINVAL;
}
Exemplo n.º 3
0
static int res_trk_pmem_alloc
	(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
	u32 alloc_size;
	struct ddl_context *ddl_context;
	unsigned long fw_addr;
	int rc = 0;
	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
	if (!addr) {
		DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
		rc = -EINVAL;
		goto bail_out;
	}
	ddl_context = ddl_get_context();
	res_trk_set_mem_type(addr->mem_type);
	alloc_size = (sz + alignment);
	if (res_trk_get_enable_ion()) {
		if (!res_trk_is_cp_enabled() ||
			 !res_trk_check_for_sec_session()) {
			if (!ddl_context->video_ion_client)
				ddl_context->video_ion_client =
					res_trk_get_ion_client();
			if (!ddl_context->video_ion_client) {
				DDL_MSG_ERROR(
				"%s() :DDL ION Client Invalid handle\n",
						__func__);
				rc = -ENOMEM;
				goto bail_out;
			}
			alloc_size = (alloc_size+4095) & ~4095;
			addr->alloc_handle = ion_alloc(
					ddl_context->video_ion_client,
					 alloc_size, SZ_4K,
					res_trk_get_mem_type(),
					res_trk_get_ion_flags());
			if (IS_ERR_OR_NULL(addr->alloc_handle)) {
				DDL_MSG_ERROR("%s() :DDL ION alloc failed\n",
						__func__);
				rc = -ENOMEM;
				goto bail_out;
			}
		} else {
			fw_addr = resource_context.vidc_platform_data->fw_addr;
			addr->alloc_handle = NULL;
			addr->alloced_phys_addr = fw_addr;
			addr->buffer_size = sz;
		}
	} else {
		addr->alloced_phys_addr = (phys_addr_t)
			allocate_contiguous_memory_nomap(alloc_size,
					res_trk_get_mem_type(), SZ_4K);
		if (!addr->alloced_phys_addr) {
			DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n",
					__func__, alloc_size);
			rc = -ENOMEM;
			goto bail_out;
		}
		addr->buffer_size = sz;
		return rc;
	}
bail_out:
	return rc;
}
static int msm_pmem_table_add(struct hlist_head *ptype,
	struct msm_pmem_info *info, struct ion_client *client, int domain_num)
{
	unsigned long paddr;
#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
	unsigned long kvstart;
	struct file *file;
#endif
	int rc = -ENOMEM;

	unsigned long len;
	struct msm_pmem_region *region;

	region = kmalloc(sizeof(struct msm_pmem_region), GFP_KERNEL);
	if (!region)
		goto out;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
	region->handle = ion_import_dma_buf(client, info->fd);
	if (IS_ERR_OR_NULL(region->handle))
		goto out1;
	if (ion_map_iommu(client, region->handle, domain_num, 0,
				  SZ_4K, 0, &paddr, &len, UNCACHED, 0) < 0)
		goto out2;
#elif CONFIG_ANDROID_PMEM
	rc = get_pmem_file(info->fd, &paddr, &kvstart, &len, &file);
	if (rc < 0) {
		pr_err("%s: get_pmem_file fd %d error %d\n",
				__func__, info->fd, rc);
		goto out1;
	}
	region->file = file;
#else
	paddr = 0;
	file = NULL;
	kvstart = 0;
#endif
	if (!info->len)
		info->len = len;
	rc = check_pmem_info(info, len);
	if (rc < 0)
		goto out3;
	paddr += info->offset;
	len = info->len;

	if (check_overlap(ptype, paddr, len) < 0) {
		rc = -EINVAL;
		goto out3;
	}

	CDBG("%s: type %d, active flag %d, paddr 0x%lx, vaddr 0x%lx\n",
		__func__, info->type, info->active, paddr,
		(unsigned long)info->vaddr);

	INIT_HLIST_NODE(&region->list);
	region->paddr = paddr;
	region->len = len;
	memcpy(&region->info, info, sizeof(region->info));
	D("%s Adding region to list with type %d\n", __func__,
						region->info.type);
	D("%s pmem_stats address is 0x%p\n", __func__, ptype);
	hlist_add_head(&(region->list), ptype);

	return 0;
out3:
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
	ion_unmap_iommu(client, region->handle, domain_num, 0);
#endif
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
out2:
	ion_free(client, region->handle);
#elif CONFIG_ANDROID_PMEM
	put_pmem_file(region->file);
#endif
out1:
	kfree(region);
out:
	return rc;
}
Exemplo n.º 5
0
static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
				struct ion_iommu_map *data,
				unsigned int domain_num,
				unsigned int partition_num,
				unsigned long align,
				unsigned long iova_length,
				unsigned long flags)
{
	struct iommu_domain *domain;
	int ret = 0;
	unsigned long extra;
	struct scatterlist *sglist = 0;
	struct ion_cp_heap *cp_heap =
		container_of(buffer->heap, struct ion_cp_heap, heap);
	int prot = IOMMU_WRITE | IOMMU_READ;
	prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;

	data->mapped_size = iova_length;

	if (!msm_use_iommu()) {
		data->iova_addr = buffer->priv_phys;
		return 0;
	}

	if (cp_heap->iommu_iova[domain_num]) {
		/* Already mapped. */
		unsigned long offset = buffer->priv_phys - cp_heap->base;
		data->iova_addr = cp_heap->iommu_iova[domain_num] + offset;
		return 0;
	} else if (cp_heap->iommu_map_all) {
		ret = iommu_map_all(domain_num, cp_heap, partition_num,
				    align, prot);
		if (!ret) {
			unsigned long offset =
					buffer->priv_phys - cp_heap->base;
			data->iova_addr =
				cp_heap->iommu_iova[domain_num] + offset;
			cp_heap->iommu_partition[domain_num] = partition_num;
			/*
			clear delayed map flag so that we don't interfere
			with this feature (we are already delaying).
			*/
			data->flags &= ~ION_IOMMU_UNMAP_DELAYED;
			return 0;
		} else {
			cp_heap->iommu_iova[domain_num] = 0;
			cp_heap->iommu_partition[domain_num] = 0;
			return ret;
		}
	}

	extra = iova_length - buffer->size;

	data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
						data->mapped_size, align);

	if (!data->iova_addr) {
		ret = -ENOMEM;
		goto out;
	}

	domain = msm_get_iommu_domain(domain_num);

	if (!domain) {
		ret = -ENOMEM;
		goto out1;
	}

	sglist = ion_cp_heap_create_sglist(buffer);
	if (IS_ERR_OR_NULL(sglist)) {
		ret = -ENOMEM;
		goto out1;
	}
	ret = iommu_map_range(domain, data->iova_addr, sglist,
			      buffer->size, prot);
	if (ret) {
		pr_err("%s: could not map %lx in domain %p\n",
			__func__, data->iova_addr, domain);
		goto out1;
	}

	if (extra) {
		unsigned long extra_iova_addr = data->iova_addr + buffer->size;
		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra,
					  SZ_4K, prot);
		if (ret)
			goto out2;
	}
	vfree(sglist);
	return ret;

out2:
	iommu_unmap_range(domain, data->iova_addr, buffer->size);
out1:
	if (!IS_ERR_OR_NULL(sglist))
		vfree(sglist);
	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
				data->mapped_size);
out:
	return ret;
}
Exemplo n.º 6
0
/**
 * sr_classp5_disable() - disable for a voltage domain
 * @sr: SmartReflex module, which need to be disabled
 * @is_volt_reset: reset the voltage?
 *
 * This function has the necessity to either disable SR alone OR disable SR
 * and reset voltage to appropriate level depending on is_volt_reset parameter.
 *
 * NOTE: Appropriate locks must be held by calling path to ensure mutual
 * exclusivity
 */
static int sr_classp5_disable(struct omap_sr *sr, int is_volt_reset)
{
	struct voltagedomain *voltdm = NULL;
	struct omap_volt_data *volt_data = NULL;
	struct sr_classp5_calib_data *work_data = NULL;

	if (IS_ERR_OR_NULL(sr) || IS_ERR_OR_NULL(sr->voltdm)) {
		pr_err("%s: bad parameters!\n", __func__);
		return -EINVAL;
	}

	work_data = (struct sr_classp5_calib_data *)sr->voltdm_cdata;
	if (IS_ERR_OR_NULL(work_data)) {
		pr_err("%s: bad work data %s\n", __func__, sr->name);
		return -EINVAL;
	}

	if (is_idle_task(current)) {
		/*
		 * we should not have seen this path if calibration !complete
		 * pm_qos constraint is already released after voltage
		 * calibration work is finished
		 */
		WARN_ON(work_data->work_active);

		return 0;
	}

	/* Rest is regular DVFS path */

	voltdm = sr->voltdm;
	volt_data = omap_voltage_get_curr_vdata(voltdm);
	if (IS_ERR_OR_NULL(volt_data)) {
		pr_warning("%s: Voltage data is NULL. Cannot disable %s\n",
			   __func__, sr->name);
		return -ENODATA;
	}

	/* need to do rest of code ONLY if required */
	if (volt_data->volt_calibrated && !work_data->work_active) {
		/*
		 * We are going OFF - disable clocks manually to allow OFF-mode.
		 */
		if (sr->suspended)
			sr->ops->put(sr);
		return 0;
	}

	if (work_data->work_active) {
		/* flag work is dead and remove the old work */
		work_data->work_active = false;
		cancel_delayed_work_sync(&work_data->work);
		sr_notifier_control(sr, false);
	}

	sr_classp5_stop_hw_loop(sr);

	if (is_volt_reset)
		voltdm_reset(sr->voltdm);

	/* Canceled SR, so no more need to keep request */
	pm_qos_update_request(&work_data->qos, PM_QOS_DEFAULT_VALUE);

	/*
	 * We are going OFF - disable clocks manually to allow OFF-mode.
	 */
	if (sr->suspended) {
		/* !!! Should never ever be here - no guarantee to recover !!!*/
		WARN(true, "Trying to go OFF with invalid AVS state\n");
		sr->ops->put(sr);
	}

	return 0;
}
Exemplo n.º 7
0
struct q6v5_data *pil_q6v5_init(struct platform_device *pdev)
{
	struct q6v5_data *drv;
	struct resource *res;
	struct pil_desc *desc;
	int ret;

	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
	if (!drv)
		return ERR_PTR(-ENOMEM);

	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6_base");
	drv->reg_base = devm_request_and_ioremap(&pdev->dev, res);
	if (!drv->reg_base)
		return ERR_PTR(-ENOMEM);

	desc = &drv->desc;
	ret = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
				      &desc->name);
	if (ret)
		return ERR_PTR(ret);

	desc->dev = &pdev->dev;

	drv->qdsp6v5_2_0 = of_device_is_compatible(pdev->dev.of_node,
						   "qcom,pil-femto-modem");

	if (drv->qdsp6v5_2_0)
		return drv;

	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "halt_base");
	drv->axi_halt_base = devm_ioremap(&pdev->dev, res->start,
					  resource_size(res));
	if (!drv->axi_halt_base)
		return ERR_PTR(-ENOMEM);

	drv->qdsp6v55 = of_device_is_compatible(pdev->dev.of_node,
						"qcom,pil-q6v55-mss");
	drv->qdsp6v55 |= of_device_is_compatible(pdev->dev.of_node,
						"qcom,pil-q6v55-lpass");
	drv->qdsp6v56 = of_device_is_compatible(pdev->dev.of_node,
						"qcom,pil-q6v56-mss");

	drv->non_elf_image = of_property_read_bool(pdev->dev.of_node,
						"qcom,mba-image-is-not-elf");

	drv->xo = devm_clk_get(&pdev->dev, "xo");
	if (IS_ERR(drv->xo))
		return ERR_CAST(drv->xo);

	drv->vreg_cx = devm_regulator_get(&pdev->dev, "vdd_cx");
	if (IS_ERR(drv->vreg_cx))
		return ERR_CAST(drv->vreg_cx);

	drv->vreg_pll = devm_regulator_get(&pdev->dev, "vdd_pll");
	if (!IS_ERR_OR_NULL(drv->vreg_pll)) {
		int voltage;
		ret = of_property_read_u32(pdev->dev.of_node, "qcom,vdd_pll",
					   &voltage);
		if (ret) {
			dev_err(&pdev->dev, "Failed to find vdd_pll voltage.\n");
			return ERR_PTR(ret);
		}

		ret = regulator_set_voltage(drv->vreg_pll, voltage, voltage);
		if (ret) {
			dev_err(&pdev->dev, "Failed to request vdd_pll voltage.\n");
			return ERR_PTR(ret);
		}

		ret = regulator_set_optimum_mode(drv->vreg_pll, 10000);
		if (ret < 0) {
			dev_err(&pdev->dev, "Failed to set vdd_pll mode.\n");
			return ERR_PTR(ret);
		}
	} else {
		 drv->vreg_pll = NULL;
	}

	return drv;
}
int tdmb_fc8080_spi_write_read(uint8* tx_data, int tx_length, uint8 *rx_data, int rx_length)
{
    int rc;

    struct spi_transfer    t = {
            .tx_buf        = tx_data,
            .rx_buf        = rx_data,
            .len        = tx_length+rx_length,
        };

    struct spi_message    m;

    if (fc8080_ctrl_info.spi_ptr == NULL)
    {
        printk("tdmb_fc8080_spi_write_read error txdata=0x%x, length=%d\n", (unsigned int)tx_data, tx_length+rx_length);
        return FALSE;
    }

    mutex_lock(&fc8080_ctrl_info.mutex);

    spi_message_init(&m);
    spi_message_add_tail(&t, &m);
    rc = spi_sync(fc8080_ctrl_info.spi_ptr, &m);

    if ( rc < 0 )
    {
        printk("tdmb_fc8080_spi_read_burst result(%d), actual_len=%d\n",rc, m.actual_length);
    }

    mutex_unlock(&fc8080_ctrl_info.mutex);

    return TRUE;
}

#ifdef FEATURE_DMB_USE_WORKQUEUE
static irqreturn_t broadcast_tdmb_spi_isr(int irq, void *handle)
{
    struct tdmb_fc8080_ctrl_blk* fc8080_info_p;

    fc8080_info_p = (struct tdmb_fc8080_ctrl_blk *)handle;
    if ( fc8080_info_p && fc8080_info_p->TdmbPowerOnState )
    {
        unsigned long flag;
        if (fc8080_info_p->spi_irq_status)
        {
            printk("######### spi read function is so late skip #########\n");
            return IRQ_HANDLED;
        }
//        printk("***** broadcast_tdmb_spi_isr coming *******\n");
        spin_lock_irqsave(&fc8080_info_p->spin_lock, flag);
        queue_work(fc8080_info_p->spi_wq, &fc8080_info_p->spi_work);
        spin_unlock_irqrestore(&fc8080_info_p->spin_lock, flag);
    }
    else
    {
        printk("broadcast_tdmb_spi_isr is called, but device is off state\n");
    }

    return IRQ_HANDLED;
}

static void broacast_tdmb_spi_work(struct work_struct *tdmb_work)
{
    struct tdmb_fc8080_ctrl_blk *pTdmbWorkData;

    pTdmbWorkData = container_of(tdmb_work, struct tdmb_fc8080_ctrl_blk, spi_work);
    if ( pTdmbWorkData )
    {
        tunerbb_drv_fc8080_isr_control(0);
        pTdmbWorkData->spi_irq_status = TRUE;
        broadcast_fc8080_drv_if_isr();
        pTdmbWorkData->spi_irq_status = FALSE;
        tunerbb_drv_fc8080_isr_control(1);
    }
    else
    {
        printk("~~~~~~~broadcast_tdmb_spi_work call but pTdmbworkData is NULL ~~~~~~~\n");
    }
}
#else
static irqreturn_t broadcast_tdmb_spi_event_handler(int irq, void *handle)
{
    struct tdmb_fc8080_ctrl_blk* fc8080_info_p;

    fc8080_info_p = (struct tdmb_fc8080_ctrl_blk *)handle;
    if ( fc8080_info_p && fc8080_info_p->TdmbPowerOnState )
    {
        if (fc8080_info_p->spi_irq_status)
        {
            printk("######### spi read function is so late skip ignore #########\n");
            return IRQ_HANDLED;
        }

        tunerbb_drv_fc8080_isr_control(0);
        fc8080_info_p->spi_irq_status = TRUE;
        broadcast_fc8080_drv_if_isr();
        fc8080_info_p->spi_irq_status = FALSE;
        tunerbb_drv_fc8080_isr_control(1);
    }
    else
    {
        printk("broadcast_tdmb_spi_isr is called, but device is off state\n");
    }

    return IRQ_HANDLED;
}
#endif

#ifdef FEATURE_DMB_USE_PINCTRL
static int tdmb_pinctrl_init(void)
{
    struct pinctrl *tdmb_pinctrl;
    struct pinctrl_state *gpio_state_suspend;

    tdmb_pinctrl = devm_pinctrl_get(&(fc8080_ctrl_info.pdev->dev));


    if(IS_ERR_OR_NULL(tdmb_pinctrl)) {
        pr_err("%s: Getting pinctrl handle failed\n", __func__);
        return -EINVAL;
    }
    gpio_state_suspend
     = pinctrl_lookup_state(tdmb_pinctrl, "gpio_tdmb_suspend");

     if(IS_ERR_OR_NULL(gpio_state_suspend)) {
         pr_err("%s: Failed to get the suspend state pinctrl handle\n", __func__);
         return -EINVAL;
    }

    if(pinctrl_select_state(tdmb_pinctrl, gpio_state_suspend)) {
        pr_err("%s: error on pinctrl_select_state for tdmb enable and irq pin\n", __func__);
        return -EINVAL;
    }
    else {
        printk("%s: success to set pinctrl_select_state for tdmb enable and irq pin\n", __func__);
    }

    return 0;
}
#endif
static int tdmb_configure_gpios(void)
{
    int rc = OK;
    int err_count = 0;

    fc8080_ctrl_info.dmb_en = of_get_named_gpio(fc8080_ctrl_info.pdev->dev.of_node,"tdmb-fc8080,en-gpio",0);

    rc = gpio_request(fc8080_ctrl_info.dmb_en, "DMB_EN");
    if (rc < 0) {
        err_count++;
        printk("%s:Failed GPIO DMB_EN request!!!\n",__func__);
    }

    fc8080_ctrl_info.dmb_irq = of_get_named_gpio(fc8080_ctrl_info.pdev->dev.of_node,"tdmb-fc8080,irq-gpio",0);

    rc = gpio_request(fc8080_ctrl_info.dmb_irq, "DMB_INT_N");
    if (rc < 0) {
        err_count++;
        printk("%s:Failed GPIO DMB_INT_N request!!!\n",__func__);
    }

#if defined (CONFIG_MACH_MSM8926_VFP_KR )||defined(CONFIG_MACH_MSM8916_YG_SKT_KR)
    fc8080_ctrl_info.dmb_ant = of_get_named_gpio(fc8080_ctrl_info.pdev->dev.of_node,"tdmb-fc8080,ant-gpio",0);

    rc = gpio_request(fc8080_ctrl_info.dmb_ant, "DMB_ANT");
    if (rc < 0) {
        err_count++;
        printk("%s:Failed GPIO DMB_ANT request!!!\n",__func__);
    }
    gpio_direction_output(fc8080_ctrl_info.dmb_ant,0);
#endif

    gpio_direction_output(fc8080_ctrl_info.dmb_en, 0);
    gpio_direction_input(fc8080_ctrl_info.dmb_irq);

    if(err_count > 0) rc = -EINVAL;

    return rc;
}
Exemplo n.º 9
0
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
        struct dma_buf *dma_buf)
{
    struct dma_buf_attachment *attach;
    struct sg_table *sgt;
    struct scatterlist *sgl;
    struct exynos_drm_gem_obj *exynos_gem_obj;
    struct exynos_drm_gem_buf *buffer;
    int ret;

    DRM_DEBUG_PRIME("%s\n", __FILE__);

    /* is this one of own objects? */
    if (dma_buf->ops == &exynos_dmabuf_ops) {
        struct drm_gem_object *obj;

        exynos_gem_obj = dma_buf->priv;
        obj = &exynos_gem_obj->base;

        /* is it from our device? */
        if (obj->dev == drm_dev) {
            /*
             * Importing dmabuf exported from out own gem increases
             * refcount on gem itself instead of f_count of dmabuf.
             */
            drm_gem_object_reference(obj);
            return obj;
        }
    }

    attach = dma_buf_attach(dma_buf, drm_dev->dev);
    if (IS_ERR(attach))
        return ERR_PTR(-EINVAL);

    get_dma_buf(dma_buf);

    sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
    if (IS_ERR_OR_NULL(sgt)) {
        ret = PTR_ERR(sgt);
        goto err_buf_detach;
    }

    buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
    if (!buffer) {
        DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
        ret = -ENOMEM;
        goto err_unmap_attach;
    }

    exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
    if (!exynos_gem_obj) {
        ret = -ENOMEM;
        goto err_free_buffer;
    }

    sgl = sgt->sgl;

    buffer->size = dma_buf->size;
    buffer->dma_addr = sg_dma_address(sgl);

    if (sgt->nents == 1) {
        /* always physically continuous memory if sgt->nents is 1. */
        exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
    } else {
        /*
         * this case could be CONTIG or NONCONTIG type but for now
         * sets NONCONTIG.
         * TODO. we have to find a way that exporter can notify
         * the type of its own buffer to importer.
         */
        exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
    }

    exynos_gem_obj->buffer = buffer;
    buffer->sgt = sgt;
    exynos_gem_obj->base.import_attach = attach;

    DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
                    buffer->size);

    return &exynos_gem_obj->base;

err_free_buffer:
    kfree(buffer);
    buffer = NULL;
err_unmap_attach:
    dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_buf_detach:
    dma_buf_detach(dma_buf, attach);
    dma_buf_put(dma_buf);

    return ERR_PTR(ret);
}
Exemplo n.º 10
0
static int
userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
				  const struct mmu_notifier_range *range)
{
	struct i915_mmu_notifier *mn =
		container_of(_mn, struct i915_mmu_notifier, mn);
	struct interval_tree_node *it;
	struct mutex *unlock = NULL;
	unsigned long end;
	int ret = 0;

	if (RB_EMPTY_ROOT(&mn->objects.rb_root))
		return 0;

	/* interval ranges are inclusive, but invalidate range is exclusive */
	end = range->end - 1;

	spin_lock(&mn->lock);
	it = interval_tree_iter_first(&mn->objects, range->start, end);
	while (it) {
		struct drm_i915_gem_object *obj;

		if (!mmu_notifier_range_blockable(range)) {
			ret = -EAGAIN;
			break;
		}

		/*
		 * The mmu_object is released late when destroying the
		 * GEM object so it is entirely possible to gain a
		 * reference on an object in the process of being freed
		 * since our serialisation is via the spinlock and not
		 * the struct_mutex - and consequently use it after it
		 * is freed and then double free it. To prevent that
		 * use-after-free we only acquire a reference on the
		 * object if it is not in the process of being destroyed.
		 */
		obj = container_of(it, struct i915_mmu_object, it)->obj;
		if (!kref_get_unless_zero(&obj->base.refcount)) {
			it = interval_tree_iter_next(it, range->start, end);
			continue;
		}
		spin_unlock(&mn->lock);

		if (!unlock) {
			unlock = &mn->mm->i915->drm.struct_mutex;

			switch (mutex_trylock_recursive(unlock)) {
			default:
			case MUTEX_TRYLOCK_FAILED:
				if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) {
					i915_gem_object_put(obj);
					return -EINTR;
				}
				/* fall through */
			case MUTEX_TRYLOCK_SUCCESS:
				break;

			case MUTEX_TRYLOCK_RECURSIVE:
				unlock = ERR_PTR(-EEXIST);
				break;
			}
		}

		ret = i915_gem_object_unbind(obj);
		if (ret == 0)
			ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
		i915_gem_object_put(obj);
		if (ret)
			goto unlock;

		spin_lock(&mn->lock);

		/*
		 * As we do not (yet) protect the mmu from concurrent insertion
		 * over this range, there is no guarantee that this search will
		 * terminate given a pathologic workload.
		 */
		it = interval_tree_iter_first(&mn->objects, range->start, end);
	}
	spin_unlock(&mn->lock);

unlock:
	if (!IS_ERR_OR_NULL(unlock))
		mutex_unlock(unlock);

	return ret;

}
Exemplo n.º 11
0
/**
 * mei_probe - Device Initialization Routine
 *
 * @pdev: PCI device structure
 * @ent: entry in mei_txe_pci_tbl
 *
 * returns 0 on success, <0 on failure.
 */
static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
    struct mei_device *dev;
    struct mei_txe_hw *hw;
    int err;
    int i;

    /* enable pci dev */
    err = pci_enable_device(pdev);
    if (err) {
        dev_err(&pdev->dev, "failed to enable pci device.\n");
        goto end;
    }
    /* set PCI host mastering  */
    pci_set_master(pdev);
    /* pci request regions for mei driver */
    err = pci_request_regions(pdev, KBUILD_MODNAME);
    if (err) {
        dev_err(&pdev->dev, "failed to get pci regions.\n");
        goto disable_device;
    }

    err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
    if (err) {
        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
        if (err) {
            dev_err(&pdev->dev, "No suitable DMA available.\n");
            goto release_regions;
        }
    }

    /* allocates and initializes the mei dev structure */
    dev = mei_txe_dev_init(pdev);
    if (!dev) {
        err = -ENOMEM;
        goto release_regions;
    }
    hw = to_txe_hw(dev);


    err = mei_reserver_dma_acpi(dev);
    if (err)
        err = mei_alloc_dma(dev);
    if (err)
        goto free_device;

    /* mapping  IO device memory */
    for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) {
        hw->mem_addr[i] = pci_iomap(pdev, i, 0);
        if (!hw->mem_addr[i]) {
            dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
            err = -ENOMEM;
            goto free_device;
        }
    }


    pci_enable_msi(pdev);

    /* clear spurious interrupts */
    mei_clear_interrupts(dev);

    /* request and enable interrupt  */
    if (pci_dev_msi_enabled(pdev))
        err = request_threaded_irq(pdev->irq,
                                   NULL,
                                   mei_txe_irq_thread_handler,
                                   IRQF_ONESHOT, KBUILD_MODNAME, dev);
    else
        err = request_threaded_irq(pdev->irq,
                                   mei_txe_irq_quick_handler,
                                   mei_txe_irq_thread_handler,
                                   IRQF_SHARED, KBUILD_MODNAME, dev);
    if (err) {
        dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
                pdev->irq);
        goto free_device;
    }

    if (mei_start(dev)) {
        dev_err(&pdev->dev, "init hw failure.\n");
        err = -ENODEV;
        goto release_irq;
    }

    err = mei_txe_setup_satt2(dev,
                              dma_to_phys(&dev->pdev->dev, hw->pool_paddr), hw->pool_size);
    if (err)
        goto release_irq;


    err = mei_register(dev);
    if (err)
        goto release_irq;

    pci_set_drvdata(pdev, dev);

    hw->mdev = mei_mm_init(&dev->pdev->dev,
                           hw->pool_vaddr, hw->pool_paddr, hw->pool_size);

    if (IS_ERR_OR_NULL(hw->mdev))
        goto deregister_mei;

    pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
    pm_runtime_use_autosuspend(&pdev->dev);

    pm_runtime_mark_last_busy(&pdev->dev);

    /*
    * For not wake-able HW runtime pm framework
    * can't be used on pci device level.
    * Use domain runtime pm callbacks instead.
    */
    if (!pci_dev_run_wake(pdev))
        mei_txe_set_pm_domain(dev);

    pm_runtime_put_noidle(&pdev->dev);

    if (!nopg)
        pm_runtime_allow(&pdev->dev);

    return 0;

deregister_mei:
    mei_deregister(dev);
release_irq:

    mei_cancel_work(dev);

    /* disable interrupts */
    mei_disable_interrupts(dev);

    free_irq(pdev->irq, dev);
    pci_disable_msi(pdev);

free_device:
    if (hw->pool_release)
        hw->pool_release(hw);

    mei_txe_pci_iounmap(pdev, hw);

    kfree(dev);
release_regions:
    pci_release_regions(pdev);
disable_device:
    pci_disable_device(pdev);
end:
    dev_err(&pdev->dev, "initialization failed.\n");
    return err;
}
Exemplo n.º 12
0
void sptlrpc_lproc_fini(void)
{
	if (!IS_ERR_OR_NULL(sptlrpc_debugfs_dir))
		ldebugfs_remove(&sptlrpc_debugfs_dir);
}
Exemplo n.º 13
0
int mdp4_dsi_video_on(struct platform_device *pdev)
{
	int dsi_width;
	int dsi_height;
	int dsi_bpp;
	int dsi_border_clr;
	int dsi_underflow_clr;
	int dsi_hsync_skew;

	int hsync_period;
	int hsync_ctrl;
	int vsync_period;
	int display_hctl;
	int display_v_start;
	int display_v_end;
	int active_hctl;
	int active_h_start;
	int active_h_end;
	int active_v_start;
	int active_v_end;
	int ctrl_polarity;
	int h_back_porch;
	int h_front_porch;
	int v_back_porch;
	int v_front_porch;
	int hsync_pulse_width;
	int vsync_pulse_width;
	int hsync_polarity;
	int vsync_polarity;
	int data_en_polarity;
	int hsync_start_x;
	int hsync_end_x;
	uint8 *buf;
	unsigned int buf_offset;
	int bpp;
	struct fb_info *fbi;
	struct fb_var_screeninfo *var;
	struct msm_fb_data_type *mfd;
	struct mdp4_overlay_pipe *pipe;
	int ret = 0;
	int cndx = 0;
	struct vsycn_ctrl *vctrl;
	struct msm_panel_info *pinfo;

	vctrl = &vsync_ctrl_db[cndx];
	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
	pinfo = &mfd->panel_info;

	if (!mfd)
		return -ENODEV;

	if (mfd->key != MFD_KEY)
		return -EINVAL;

	mutex_lock(&mfd->dma->ov_mutex);

	vctrl->mfd = mfd;
	vctrl->dev = mfd->fbi->dev;
	vctrl->blt_ctrl = pinfo->lcd.blt_ctrl;
	vctrl->vsync_irq_enabled = 0;
	vsync_irq_cnt = 0;

	/* mdp clock on */
	mdp_clk_ctrl(1);

	fbi = mfd->fbi;
	var = &fbi->var;

	pipe = mdp4_dsi_video_alloc_base_pipe();
	if (IS_ERR_OR_NULL(pipe)) {
		mutex_unlock(&mfd->dma->ov_mutex);
		return -EPERM;
	}

	if (mfd->panel_info.pdest == DISPLAY_4)
		mdp4_overlay_panel_mode(MDP4_PANEL_DSI_VIDEO_DMA_S,
					pipe->mixer_num);
	else
		mdp4_overlay_panel_mode(MDP4_PANEL_DSI_VIDEO, pipe->mixer_num);

	bpp = fbi->var.bits_per_pixel / 8;
	buf = (uint8 *) fbi->fix.smem_start;
	buf_offset = calc_fb_offset(mfd, fbi, bpp);

	atomic_set(&vctrl->suspend, 0);

	pipe->src_height = fbi->var.yres;
	pipe->src_width = fbi->var.xres;
	pipe->src_h = fbi->var.yres;
	pipe->src_w = fbi->var.xres;
	pipe->src_y = 0;
	pipe->src_x = 0;
	pipe->dst_h = fbi->var.yres;
	pipe->dst_w = fbi->var.xres;
	pipe->srcp0_ystride = fbi->fix.line_length;
	pipe->bpp = bpp;

	if (mfd->display_iova)
		pipe->srcp0_addr = mfd->display_iova + buf_offset;
	else
		pipe->srcp0_addr = (uint32)(buf + buf_offset);

	pipe->dst_h = fbi->var.yres;
	pipe->dst_w = fbi->var.xres;

	mdp4_overlay_solidfill_init(pipe);

	mdp4_overlay_mdp_pipe_req(pipe, mfd);
	mdp4_calc_blt_mdp_bw(mfd, pipe);

	mdp4_overlay_dmap_xy(pipe);	/* dma_p */
	mdp4_overlay_dmap_cfg(mfd, 1);
	if (pipe->pipe_type == OVERLAY_TYPE_VIDEO)
		mdp4_overlay_vg_setup(pipe);	/* video/graphic pipe */
	else
		mdp4_overlay_rgb_setup(pipe);	/* rgb pipe */
	mdp4_overlayproc_cfg(pipe);

	mdp4_overlay_reg_flush(pipe, 1);

	mdp4_mixer_stage_up(pipe, 0);
	mdp4_mixer_stage_commit(pipe->mixer_num);

	/*
	 * DSI timing setting
	 */
	h_back_porch = var->left_margin;
	h_front_porch = var->right_margin;
	v_back_porch = var->upper_margin;
	v_front_porch = var->lower_margin;
	hsync_pulse_width = var->hsync_len;
	vsync_pulse_width = var->vsync_len;
	dsi_border_clr = mfd->panel_info.lcdc.border_clr;
	dsi_underflow_clr = mfd->panel_info.lcdc.underflow_clr;
	dsi_hsync_skew = mfd->panel_info.lcdc.hsync_skew;
	dsi_width = mfd->panel_info.xres +
		mfd->panel_info.lcdc.xres_pad;
	dsi_height = mfd->panel_info.yres +
		mfd->panel_info.lcdc.yres_pad;
	dsi_bpp = mfd->panel_info.bpp;

	hsync_period = hsync_pulse_width + h_back_porch + dsi_width
				+ h_front_porch;
	hsync_ctrl = (hsync_period << 16) | hsync_pulse_width;
	hsync_start_x = h_back_porch + hsync_pulse_width;
	hsync_end_x = hsync_period - h_front_porch - 1;
	display_hctl = (hsync_end_x << 16) | hsync_start_x;

	vsync_period =
	    (vsync_pulse_width + v_back_porch + dsi_height + v_front_porch);
	display_v_start = ((vsync_pulse_width + v_back_porch) * hsync_period)
				+ dsi_hsync_skew;
	display_v_end =
	  ((vsync_period - v_front_porch) * hsync_period) + dsi_hsync_skew - 1;

	if (dsi_width != var->xres) {
		active_h_start = hsync_start_x + first_pixel_start_x;
		active_h_end = active_h_start + var->xres - 1;
		active_hctl =
		    ACTIVE_START_X_EN | (active_h_end << 16) | active_h_start;
	} else {
		active_hctl = 0;
	}

	if (dsi_height != var->yres) {
		active_v_start =
		    display_v_start + first_pixel_start_y * hsync_period;
		active_v_end = active_v_start + (var->yres) * hsync_period - 1;
		active_v_start |= ACTIVE_START_Y_EN;
	} else {
		active_v_start = 0;
		active_v_end = 0;
	}

	dsi_underflow_clr |= 0x80000000;	/* enable recovery */
	hsync_polarity = 0;
	vsync_polarity = 0;
	data_en_polarity = 0;

	ctrl_polarity =
	    (data_en_polarity << 2) | (vsync_polarity << 1) | (hsync_polarity);

	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x4, hsync_ctrl);
	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x8, vsync_period * hsync_period);
	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0xc,
				vsync_pulse_width * hsync_period);
	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x10, display_hctl);
	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x14, display_v_start);
	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x18, display_v_end);
	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x1c, active_hctl);
	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x20, active_v_start);
	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x24, active_v_end);
	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x28, dsi_border_clr);
	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x2c, dsi_underflow_clr);
	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x30, dsi_hsync_skew);
	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x38, ctrl_polarity);
	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);

	mdp_histogram_ctrl_all(TRUE);
	mdp4_overlay_dsi_video_start();
	mutex_unlock(&mfd->dma->ov_mutex);

	return ret;
}
Exemplo n.º 14
0
int mini_isp_debug_load_reg(char *reg_file,u32 *reg_key,u32 reg_max,u32 *reg_cnt)
{
	struct kstat stat;
	mm_segment_t fs;
	struct file *fp = NULL;
	int file_flag = O_RDONLY;
	ssize_t ret = 0;
	u32 addr = 0;
	char addr_array[8] = {0};
	char temp;
	bool bRegStart = false;

	if (NULL == reg_file) {
		print_error("%s param error", __func__);
		return -EINVAL;
	}

	print_debug("enter %s", __func__);

	/* must have the following 2 statement */
	fs = get_fs();
	set_fs(KERNEL_DS);

	fp = filp_open(reg_file, file_flag, 0666);
	if (IS_ERR_OR_NULL(fp)) {
		print_debug("no debug configuration file(%s) - do nothing, just skip it!\n",reg_file);
		return -1;
	}

	if (0 != vfs_stat(reg_file, &stat)) {
		print_error("failed to get file state!");
		goto ERROR;
	}

	*reg_cnt = 0;
	print_debug("file size : %d", (u32) stat.size);


	while (0 < vfs_read(fp, &temp, 1, &fp->f_pos)) {
		switch (temp) {
		case '{':
			bRegStart = true;
			if (0 == vfs_read(fp, addr_array, 7, &fp->f_pos))
				goto ERROR;
			addr = mini_atoi16(addr_array);

			if (*reg_cnt < reg_max){
    			reg_key[*reg_cnt]=addr;
    			*reg_cnt=*reg_cnt+1;
    		}
			break;

		case '}':
			bRegStart = false;
			break;

		default:
			break;
		}
	}

	/* must have the following 1 statement */
	set_fs(fs);

ERROR:
	if (NULL != fp)
		filp_close(fp, 0);
	return ret;
}
Exemplo n.º 15
0
static int tegra_camera_probe(struct platform_device *pdev)
{
	int err;
	struct tegra_camera_dev *dev;

	dev_info(&pdev->dev, "%s\n", __func__);
	dev = devm_kzalloc(&pdev->dev, sizeof(struct tegra_camera_dev),
			GFP_KERNEL);
	if (!dev) {
		err = -ENOMEM;
		dev_err(&pdev->dev, "%s: unable to allocate memory\n",
			__func__);
		goto alloc_err;
	}

	mutex_init(&dev->tegra_camera_lock);

	/* Powergate VE when boot */
	mutex_lock(&dev->tegra_camera_lock);
	dev->power_refcnt = 0;
//#ifndef CONFIG_ARCH_TEGRA_2x_SOC
#if 0
	err = tegra_powergate_partition(TEGRA_POWERGATE_VENC);
	if (err)
		dev_err(&pdev->dev, "%s: powergate failed.\n", __func__);
#endif
	mutex_unlock(&dev->tegra_camera_lock);

	dev->dev = &pdev->dev;

	/* Get regulator pointer */
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
	dev->reg = regulator_get(&pdev->dev, "vcsi");
#else
	dev->reg = regulator_get(&pdev->dev, "avdd_dsi_csi");
#endif
	if (IS_ERR_OR_NULL(dev->reg)) {
		dev_err(&pdev->dev, "%s: couldn't get regulator\n", __func__);
		return PTR_ERR(dev->reg);
	}

	dev->misc_dev.minor = MISC_DYNAMIC_MINOR;
	dev->misc_dev.name = TEGRA_CAMERA_NAME;
	dev->misc_dev.fops = &tegra_camera_fops;
	dev->misc_dev.parent = &pdev->dev;

	err = misc_register(&dev->misc_dev);
	if (err) {
		dev_err(&pdev->dev, "%s: Unable to register misc device!\n",
		       TEGRA_CAMERA_NAME);
		goto misc_register_err;
	}

	err = tegra_camera_clk_get(pdev, "isp", &dev->isp_clk);
	if (err)
		goto misc_register_err;
	err = tegra_camera_clk_get(pdev, "vi", &dev->vi_clk);
	if (err)
		goto vi_clk_get_err;
	err = tegra_camera_clk_get(pdev, "vi_sensor", &dev->vi_sensor_clk);
	if (err)
		goto vi_sensor_clk_get_err;
	err = tegra_camera_clk_get(pdev, "csus", &dev->csus_clk);
	if (err)
		goto csus_clk_get_err;
	err = tegra_camera_clk_get(pdev, "csi", &dev->csi_clk);
	if (err)
		goto csi_clk_get_err;

#ifndef CONFIG_ARCH_TEGRA_2x_SOC
	err = tegra_camera_clk_get(pdev, "emc", &dev->emc_clk);
	if (err)
		goto emc_clk_get_err;
	err = tegra_camera_clk_get(pdev, "sclk", &dev->avp_clk);
	if (err)
		goto avp_clk_get_err;
#endif
	/* dev is set in order to restore in _remove */
	platform_set_drvdata(pdev, dev);

	return 0;

avp_clk_get_err:
	clk_put(dev->avp_clk);
emc_clk_get_err:
	clk_put(dev->emc_clk);
csi_clk_get_err:
	clk_put(dev->csus_clk);
csus_clk_get_err:
	clk_put(dev->vi_sensor_clk);
vi_sensor_clk_get_err:
	clk_put(dev->vi_clk);
vi_clk_get_err:
	clk_put(dev->isp_clk);
misc_register_err:
	regulator_put(dev->reg);
alloc_err:
	return err;
}
Exemplo n.º 16
0
PVRSRV_ERROR SysDvfsInitialize(SYS_SPECIFIC_DATA *psSysSpecificData)
{
	IMG_INT32 opp_count;
	IMG_UINT32 i, *freq_list;
	struct opp *opp;
	unsigned long freq;

	/**
	 * We query and store the list of SGX frequencies just this once under the
	 * assumption that they are unchanging, e.g. no disabling of high frequency
	 * option for thermal management. This is currently valid for 4430 and 4460.
	 */
	rcu_read_lock();
	opp_count = opp_get_opp_count(&gpsPVRLDMDev->dev);
	if (opp_count < 1)
	{
		rcu_read_unlock();
		PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not retrieve opp count"));
		return PVRSRV_ERROR_NOT_SUPPORTED;
	}

	/**
	 * Allocate the frequency list with a slot for each available frequency plus
	 * one additional slot to hold a designated frequency value to assume when in
	 * an unknown frequency state.
	 */
	freq_list = kmalloc((opp_count + 1) * sizeof(IMG_UINT32), GFP_ATOMIC);
	if (!freq_list)
	{
		rcu_read_unlock();
		PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not allocate frequency list"));
		return PVRSRV_ERROR_OUT_OF_MEMORY;
	}

	/**
	 * Fill in frequency list from lowest to highest then finally the "unknown"
	 * frequency value. We use the highest available frequency as our assumed value
	 * when in an unknown state, because it is safer for APM and hardware recovery
	 * timers to be longer than intended rather than shorter.
	 */
	freq = 0;
	for (i = 0; i < opp_count; i++)
	{
		opp = opp_find_freq_ceil(&gpsPVRLDMDev->dev, &freq);
		if (IS_ERR_OR_NULL(opp))
		{
			rcu_read_unlock();
			PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not retrieve opp level %d", i));
			kfree(freq_list);
			return PVRSRV_ERROR_NOT_SUPPORTED;
		}
		freq_list[i] = (IMG_UINT32)freq;
		freq++;
	}
	rcu_read_unlock();
	freq_list[opp_count] = freq_list[opp_count - 1];

	psSysSpecificData->ui32SGXFreqListSize = opp_count + 1;
	psSysSpecificData->pui32SGXFreqList = freq_list;
	/* Start in unknown state - no frequency request to DVFS yet made */
	psSysSpecificData->ui32SGXFreqListIndex = opp_count;

	return PVRSRV_OK;
}
Exemplo n.º 17
0
static int register_memory(void)
{
    int			result;
    unsigned long		paddr;
    void                    *kvptr;
    unsigned long		kvaddr;
    unsigned long		mem_len;

    mutex_lock(&acdb_data.acdb_mutex);
    acdb_data.ion_client =
        msm_ion_client_create(UINT_MAX, "audio_acdb_client");
    if (IS_ERR_OR_NULL(acdb_data.ion_client)) {
        pr_err("%s: Could not register ION client!!!\n", __func__);
        result = PTR_ERR(acdb_data.ion_client);
        goto err;
    }

    acdb_data.ion_handle = ion_import_dma_buf(acdb_data.ion_client,
                           atomic_read(&acdb_data.map_handle));
    if (IS_ERR_OR_NULL(acdb_data.ion_handle)) {
        pr_err("%s: Could not import map handle!!!\n", __func__);
        result = PTR_ERR(acdb_data.ion_handle);
        goto err_ion_client;
    }

    result = ion_phys(acdb_data.ion_client, acdb_data.ion_handle,
                      &paddr, (size_t *)&mem_len);
    if (result != 0) {
        pr_err("%s: Could not get phys addr!!!\n", __func__);
        goto err_ion_handle;
    }

    kvptr = ion_map_kernel(acdb_data.ion_client,
                           acdb_data.ion_handle, 0);
    if (IS_ERR_OR_NULL(kvptr)) {
        pr_err("%s: Could not get kernel virt addr!!!\n", __func__);
        result = PTR_ERR(kvptr);
        goto err_ion_handle;
    }
    kvaddr = (unsigned long)kvptr;
    atomic64_set(&acdb_data.paddr, paddr);
    atomic64_set(&acdb_data.kvaddr, kvaddr);
    atomic64_set(&acdb_data.mem_len, mem_len);
    mutex_unlock(&acdb_data.acdb_mutex);

    pr_debug("%s done! paddr = 0x%lx, "
             "kvaddr = 0x%lx, len = x%lx\n",
             __func__,
             (long)atomic64_read(&acdb_data.paddr),
             (long)atomic64_read(&acdb_data.kvaddr),
             (long)atomic64_read(&acdb_data.mem_len));

    return result;
err_ion_handle:
    ion_free(acdb_data.ion_client, acdb_data.ion_handle);
err_ion_client:
    ion_client_destroy(acdb_data.ion_client);
err:
    atomic64_set(&acdb_data.mem_len, 0);
    mutex_unlock(&acdb_data.acdb_mutex);
    return result;
}
Exemplo n.º 18
0
static int davinci_musb_init(struct musb *musb)
{
	void __iomem	*tibase = musb->ctrl_base;
	u32		revision;
	int 		ret = -ENODEV;

	usb_nop_xceiv_register();
	musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
	if (IS_ERR_OR_NULL(musb->xceiv)) {
		ret = -EPROBE_DEFER;
		goto unregister;
	}

	musb->mregs += DAVINCI_BASE_OFFSET;

	/* returns zero if e.g. not clocked */
	revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG);
	if (revision == 0)
		goto fail;

	setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);

	davinci_musb_source_power(musb, 0, 1);

	/* dm355 EVM swaps D+/D- for signal integrity, and
	 * is clocked from the main 24 MHz crystal.
	 */
	if (machine_is_davinci_dm355_evm()) {
		u32	phy_ctrl = __raw_readl(USB_PHY_CTRL);

		phy_ctrl &= ~(3 << 9);
		phy_ctrl |= USBPHY_DATAPOL;
		__raw_writel(phy_ctrl, USB_PHY_CTRL);
	}

	/* On dm355, the default-A state machine needs DRVVBUS control.
	 * If we won't be a host, there's no need to turn it on.
	 */
	if (cpu_is_davinci_dm355()) {
		u32	deepsleep = __raw_readl(DM355_DEEPSLEEP);

		deepsleep &= ~DRVVBUS_FORCE;
		__raw_writel(deepsleep, DM355_DEEPSLEEP);
	}

	/* reset the controller */
	musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1);

	/* start the on-chip PHY and its PLL */
	phy_on();

	msleep(5);

	/* NOTE:  irqs are in mixed mode, not bypass to pure-musb */
	pr_debug("DaVinci OTG revision %08x phy %03x control %02x\n",
		revision, __raw_readl(USB_PHY_CTRL),
		musb_readb(tibase, DAVINCI_USB_CTRL_REG));

	musb->isr = davinci_musb_interrupt;
	return 0;

fail:
	usb_put_phy(musb->xceiv);
unregister:
	usb_nop_xceiv_unregister();
	return ret;
}
Exemplo n.º 19
0
/**
 * sr_classp5_suspend_noirq() - class suspend_noirq handler
 * @sr:	SmartReflex module which is moving to suspend
 *
 * The purpose of suspend_noirq handler is to make sure that Calibration
 * works are canceled before moving to OFF mode.
 * Otherwise these works may be executed at any moment, trigger
 * SmartReflex and race with CPU Idle notifiers. As result - system
 * will crash
 */
static int sr_classp5_suspend_noirq(struct omap_sr *sr)
{
	struct sr_classp5_calib_data *work_data;
	struct omap_volt_data *volt_data;
	struct voltagedomain *voltdm;
	int ret = 0;

	if (IS_ERR_OR_NULL(sr)) {
		pr_err("%s: bad parameters!\n", __func__);
		return -EINVAL;
	}

	work_data = (struct sr_classp5_calib_data *)sr->voltdm_cdata;
	if (IS_ERR_OR_NULL(work_data)) {
		pr_err("%s: bad work data %s\n", __func__, sr->name);
		return -EINVAL;
	}

	/*
	 * At suspend_noirq the code isn't needed to be protected by
	 * omap_dvfs_lock, but - Let's be paranoid (may have smth on other CPUx)
	 */
	mutex_lock(&omap_dvfs_lock);

	voltdm = sr->voltdm;
	volt_data = omap_voltage_get_curr_vdata(voltdm);
	if (IS_ERR_OR_NULL(volt_data)) {
		pr_warning("%s: Voltage data is NULL. Cannot disable %s\n",
			   __func__, sr->name);
		ret = -ENODATA;
		goto finish_suspend;
	}


	/*
	 * Check if calibration is active at this moment if yes -
	 * abort suspend.
	 */
	if (work_data->work_active) {
		pr_warn("%s: %s Calibration is active, abort suspend (Vnom=%u)\n",
			__func__, sr->name, volt_data->volt_nominal);
		ret = -EBUSY;
		goto finish_suspend;
	}

	/*
	 * Check if current voltage is calibrated if no -
	 * abort suspend.
	 */
	if (!volt_data->volt_calibrated) {
		pr_warn("%s: %s Calibration hasn't been done, abort suspend  (Vnom=%u)\n",
			__func__, sr->name, volt_data->volt_nominal);
		ret = -EBUSY;
		goto finish_suspend;
	}

	/* Let's be paranoid - cancel Calibration work manually */
	cancel_delayed_work_sync(&work_data->work);
	work_data->work_active = false;

finish_suspend:
	mutex_unlock(&omap_dvfs_lock);
	return ret;
}
Exemplo n.º 20
0
static int alloc_ion_mem(struct smem_client *client, size_t size, u32 align,
	u32 flags, enum hal_buffer buffer_type, struct msm_smem *mem,
	int map_kernel)
{
	struct ion_handle *hndl;
	dma_addr_t iova = 0;
	unsigned long buffer_size = 0;
	unsigned long heap_mask = 0;
	int rc = 0;

	align = ALIGN(align, SZ_4K);
	size = ALIGN(size, SZ_4K);

	if (flags & SMEM_SECURE) {
		size = ALIGN(size, SZ_1M);
		align = ALIGN(align, SZ_1M);
	}

	if (is_iommu_present(client->res)) {
		heap_mask = ION_HEAP(ION_IOMMU_HEAP_ID);
	} else {
		dprintk(VIDC_DBG,
			"allocate shared memory from adsp heap size %d align %d\n",
			size, align);
		heap_mask = ION_HEAP(ION_ADSP_HEAP_ID);
	}

	if (flags & SMEM_SECURE)
		heap_mask = ION_HEAP(ION_CP_MM_HEAP_ID);

	hndl = ion_alloc(client->clnt, size, align, heap_mask, flags);
	if (IS_ERR_OR_NULL(hndl)) {
		dprintk(VIDC_ERR,
		"Failed to allocate shared memory = %pK, %d, %d, 0x%x\n",
		client, size, align, flags);
		rc = -ENOMEM;
		goto fail_shared_mem_alloc;
	}
	mem->mem_type = client->mem_type;
	mem->smem_priv = hndl;
	mem->flags = flags;
	mem->buffer_type = buffer_type;
	if (map_kernel) {
		mem->kvaddr = ion_map_kernel(client->clnt, hndl);
		if (!mem->kvaddr) {
			dprintk(VIDC_ERR,
				"Failed to map shared mem in kernel\n");
			rc = -EIO;
			goto fail_map;
		}
	} else
		mem->kvaddr = NULL;

	rc = get_device_address(client, hndl, align, &iova, &buffer_size,
				flags, buffer_type);
	if (rc) {
		dprintk(VIDC_ERR, "Failed to get device address: %d\n",
			rc);
		goto fail_device_address;
	}
	mem->device_addr = iova;
	mem->size = size;
	dprintk(VIDC_DBG,
		"%s: ion_handle = 0x%pK, device_addr = 0x%x, size = %d, kvaddr = 0x%pK, buffer_type = %d\n",
		__func__, mem->smem_priv, (u32)mem->device_addr,
		mem->size, mem->kvaddr, mem->buffer_type);
	return rc;
fail_device_address:
	ion_unmap_kernel(client->clnt, hndl);
fail_map:
	ion_free(client->clnt, hndl);
fail_shared_mem_alloc:
	return rc;
}
Exemplo n.º 21
0
void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
	u32 alloc_size, offset = 0 ;
	u32 index = 0;
	struct ddl_context *ddl_context;
	struct msm_mapped_buffer *mapped_buffer = NULL;
	unsigned long iova = 0;
	unsigned long buffer_size = 0;
	unsigned long *kernel_vaddr = NULL;
	unsigned long ionflag = 0;
	unsigned long flags = 0;
	int ret = 0;
	ion_phys_addr_t phyaddr = 0;
	size_t len = 0;
	int rc = 0;
	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
	if (!addr) {
		DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
		goto bail_out;
	}
	ddl_context = ddl_get_context();
	res_trk_set_mem_type(addr->mem_type);
	alloc_size = (sz + alignment);
	if (res_trk_get_enable_ion()) {
		if (!ddl_context->video_ion_client)
			ddl_context->video_ion_client =
				res_trk_get_ion_client();
		if (!ddl_context->video_ion_client) {
			DDL_MSG_ERROR("%s() :DDL ION Client Invalid handle\n",
						 __func__);
			goto bail_out;
		}
		alloc_size = (alloc_size+4095) & ~4095;
		addr->alloc_handle = ion_alloc(
		ddl_context->video_ion_client, alloc_size, SZ_4K,
			res_trk_get_mem_type());
		if (IS_ERR_OR_NULL(addr->alloc_handle)) {
			DDL_MSG_ERROR("%s() :DDL ION alloc failed\n",
						 __func__);
			goto bail_out;
		}
		if (res_trk_check_for_sec_session() ||
			addr->mem_type == DDL_FW_MEM)
			ionflag = UNCACHED;
		else
			ionflag = CACHED;
		kernel_vaddr = (unsigned long *) ion_map_kernel(
					ddl_context->video_ion_client,
					addr->alloc_handle, ionflag);
		if (IS_ERR_OR_NULL(kernel_vaddr)) {
				DDL_MSG_ERROR("%s() :DDL ION map failed\n",
							 __func__);
				goto free_ion_alloc;
		}
		addr->virtual_base_addr = (u8 *) kernel_vaddr;
		if (res_trk_check_for_sec_session()) {
			rc = ion_phys(ddl_context->video_ion_client,
				addr->alloc_handle, &phyaddr,
					&len);
			if (rc || !phyaddr) {
				DDL_MSG_ERROR(
				"%s():DDL ION client physical failed\n",
				__func__);
				goto unmap_ion_alloc;
			}
			addr->alloced_phys_addr = phyaddr;
		} else {
			ret = ion_map_iommu(ddl_context->video_ion_client,
					addr->alloc_handle,
					VIDEO_DOMAIN,
					VIDEO_MAIN_POOL,
					SZ_4K,
					0,
					&iova,
					&buffer_size,
					UNCACHED, 0);
			if (ret || !iova) {
				DDL_MSG_ERROR(
				"%s():DDL ION ion map iommu failed, ret = %d iova = 0x%lx\n",
					__func__, ret, iova);
				goto unmap_ion_alloc;
			}
			addr->alloced_phys_addr = (phys_addr_t) iova;
		}
		if (!addr->alloced_phys_addr) {
			DDL_MSG_ERROR("%s():DDL ION client physical failed\n",
						__func__);
			goto unmap_ion_alloc;
		}
		addr->mapped_buffer = NULL;
		addr->physical_base_addr = (u8 *) addr->alloced_phys_addr;
		addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
			addr->physical_base_addr, alignment);
		offset = (u32)(addr->align_physical_addr -
				addr->physical_base_addr);
		addr->align_virtual_addr = addr->virtual_base_addr + offset;
		addr->buffer_size = alloc_size;
	} else {
		addr->alloced_phys_addr = (phys_addr_t)
		allocate_contiguous_memory_nomap(alloc_size,
			res_trk_get_mem_type(), SZ_4K);
		if (!addr->alloced_phys_addr) {
			DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n",
					 __func__, alloc_size);
			goto bail_out;
		}
		flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
		if (alignment == DDL_KILO_BYTE(128))
				index = 1;
		else if (alignment > SZ_4K)
			flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;

		addr->mapped_buffer =
		msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr,
			alloc_size, flags, &vidc_mmu_subsystem[index],
			sizeof(vidc_mmu_subsystem[index])/sizeof(unsigned int));
		if (IS_ERR(addr->mapped_buffer)) {
			pr_err(" %s() buffer map failed", __func__);
			goto free_acm_alloc;
		}
		mapped_buffer = addr->mapped_buffer;
		if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
			pr_err("%s() map buffers failed\n", __func__);
			goto free_map_buffers;
		}
		addr->physical_base_addr = (u8 *)mapped_buffer->iova[0];
		addr->virtual_base_addr = mapped_buffer->vaddr;
		addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
			addr->physical_base_addr, alignment);
		offset = (u32)(addr->align_physical_addr -
				addr->physical_base_addr);
		addr->align_virtual_addr = addr->virtual_base_addr + offset;
		addr->buffer_size = sz;
	}
	return addr->virtual_base_addr;
free_map_buffers:
	msm_subsystem_unmap_buffer(addr->mapped_buffer);
	addr->mapped_buffer = NULL;
free_acm_alloc:
		free_contiguous_memory_by_paddr(
			(unsigned long)addr->alloced_phys_addr);
		addr->alloced_phys_addr = (phys_addr_t)NULL;
		return NULL;
unmap_ion_alloc:
	ion_unmap_kernel(ddl_context->video_ion_client,
		addr->alloc_handle);
	addr->virtual_base_addr = NULL;
	addr->alloced_phys_addr = (phys_addr_t)NULL;
free_ion_alloc:
	ion_free(ddl_context->video_ion_client,
		addr->alloc_handle);
	addr->alloc_handle = NULL;
bail_out:
	return NULL;
}
Exemplo n.º 22
0
static int lis3dh_acc_config_regulator(struct lis3dh_acc_data *acc, bool on)
{
	int rc = 0, i;
	int num_reg = sizeof(lis3dh_acc_vreg) / sizeof(struct sensor_regulator);

	if (on) {
		for (i = 0; i < num_reg; i++) {
			lis3dh_acc_vreg[i].vreg =
				regulator_get(&acc->client->dev,
				lis3dh_acc_vreg[i].name);
			if (IS_ERR(lis3dh_acc_vreg[i].vreg)) {
				rc = PTR_ERR(lis3dh_acc_vreg[i].vreg);
				pr_err("%s:regulator get failed rc=%d\n",
								__func__, rc);
				lis3dh_acc_vreg[i].vreg = NULL;
				goto error_vdd;
			}

			if (regulator_count_voltages(
				lis3dh_acc_vreg[i].vreg) > 0) {
				rc = regulator_set_voltage(
					lis3dh_acc_vreg[i].vreg,
					lis3dh_acc_vreg[i].min_uV,
					lis3dh_acc_vreg[i].max_uV);
				if (rc) {
					pr_err("%s: set voltage failed rc=%d\n",
					__func__, rc);
					regulator_put(lis3dh_acc_vreg[i].vreg);
					lis3dh_acc_vreg[i].vreg = NULL;
					goto error_vdd;
				}
			}

			rc = regulator_enable(lis3dh_acc_vreg[i].vreg);
			if (rc) {
				pr_err("%s: regulator_enable failed rc =%d\n",
					__func__, rc);
				if (regulator_count_voltages(
					lis3dh_acc_vreg[i].vreg) > 0) {
					regulator_set_voltage(
						lis3dh_acc_vreg[i].vreg, 0,
						lis3dh_acc_vreg[i].max_uV);
				}
				regulator_put(lis3dh_acc_vreg[i].vreg);
				lis3dh_acc_vreg[i].vreg = NULL;
				goto error_vdd;
			}
		}
		return rc;
	} else {
		i = num_reg;
	}

error_vdd:
	while (--i >= 0) {
		if (!IS_ERR_OR_NULL(lis3dh_acc_vreg[i].vreg)) {
			if (regulator_count_voltages(
			lis3dh_acc_vreg[i].vreg) > 0) {
				regulator_set_voltage(lis3dh_acc_vreg[i].vreg,
						0, lis3dh_acc_vreg[i].max_uV);
			}
			regulator_disable(lis3dh_acc_vreg[i].vreg);
			regulator_put(lis3dh_acc_vreg[i].vreg);
			lis3dh_acc_vreg[i].vreg = NULL;
		}
	}
	return rc;
}
Exemplo n.º 23
0
int omap_tiler_alloc(struct ion_heap *heap,
		     struct ion_client *client,
		     struct omap_ion_tiler_alloc_data *data)
{
	struct ion_handle *handle;
	struct ion_buffer *buffer;
	struct omap_tiler_info *info = NULL;
	u32 n_phys_pages;
	u32 n_tiler_pages;
	u32 tiler_start = 0;
	u32 v_size;
	tiler_blk_handle tiler_handle;
	ion_phys_addr_t addr = 0;
	int i = 0, ret;

	if (data->fmt == TILER_PIXEL_FMT_PAGE && data->h != 1) {
		pr_err("%s: Page mode (1D) allocations must have a height "
		       "of one\n", __func__);
		return -EINVAL;
	}

	ret = tiler_memsize(data->fmt, data->w, data->h,
			    &n_phys_pages,
			    &n_tiler_pages);

	if (ret) {
		pr_err("%s: invalid tiler request w %u h %u fmt %u\n", __func__,
		       data->w, data->h, data->fmt);
		return ret;
	}

	BUG_ON(!n_phys_pages || !n_tiler_pages);

	if( (TILER_ENABLE_NON_PAGE_ALIGNED_ALLOCATIONS)
			&& (data->token != 0) ) {
		tiler_handle = tiler_alloc_block_area_aligned(data->fmt, data->w, data->h,
									    &tiler_start,
									    NULL,
									    data->out_align,
									    data->offset,
									    data->token);
	} else {
		tiler_handle = tiler_alloc_block_area(data->fmt, data->w, data->h,
							    &tiler_start,
							    NULL);
	}

	if (IS_ERR_OR_NULL(tiler_handle)) {
		ret = PTR_ERR(tiler_handle);
		pr_err("%s: failure to allocate address space from tiler\n",
		       __func__);
		goto err_nomem;
	}

	v_size = tiler_block_vsize(tiler_handle);

	if(!v_size)
		goto err_alloc;

	n_tiler_pages = (PAGE_ALIGN(v_size) / PAGE_SIZE);

	info = kzalloc(sizeof(struct omap_tiler_info) +
		       sizeof(u32) * n_phys_pages +
		       sizeof(u32) * n_tiler_pages, GFP_KERNEL);
	if (!info)
		goto err_alloc;

	info->tiler_handle = tiler_handle;
	info->tiler_start = tiler_start;
	info->n_phys_pages = n_phys_pages;
	info->n_tiler_pages = n_tiler_pages;
	info->phys_addrs = (u32 *)(info + 1);
	info->tiler_addrs = info->phys_addrs + n_phys_pages;
	info->fmt = data->fmt;

	addr = ion_carveout_allocate(heap, n_phys_pages*PAGE_SIZE, 0);
	if (addr == ION_CARVEOUT_ALLOCATE_FAIL) {
		for (i = 0; i < n_phys_pages; i++) {
			addr = ion_carveout_allocate(heap, PAGE_SIZE, 0);

			if (addr == ION_CARVEOUT_ALLOCATE_FAIL) {
				ret = -ENOMEM;
				pr_err("%s: failed to allocate pages to back "
					"tiler address space\n", __func__);
				goto err_alloc;
			}
			info->phys_addrs[i] = addr;
		}
	} else {
		info->lump = true;
		for (i = 0; i < n_phys_pages; i++)
			info->phys_addrs[i] = addr + i*PAGE_SIZE;
	}

	ret = tiler_pin_block(info->tiler_handle, info->phys_addrs,
			      info->n_phys_pages);
	if (ret) {
		pr_err("%s: failure to pin pages to tiler\n", __func__);
		goto err_alloc;
	}

	data->stride = tiler_block_vstride(info->tiler_handle);

	/* create an ion handle  for the allocation */
	handle = ion_alloc(client, 0, 0, 1 << heap->id);
	if (IS_ERR_OR_NULL(handle)) {
		ret = PTR_ERR(handle);
		pr_err("%s: failure to allocate handle to manage tiler"
		       " allocation\n", __func__);
		goto err;
	}

	buffer = ion_handle_buffer(handle);
	buffer->size = v_size;
	buffer->priv_virt = info;
	data->handle = handle;
	data->offset = (size_t)(info->tiler_start & ~PAGE_MASK);

	if(tiler_fill_virt_array(tiler_handle, info->tiler_addrs,
			&n_tiler_pages) < 0) {
		pr_err("%s: failure filling tiler's virtual array %d\n",
				__func__, n_tiler_pages);
	}

	return 0;

err:
	tiler_unpin_block(info->tiler_handle);
err_alloc:
	tiler_free_block_area(info->tiler_handle);
	if(info)
	{
		if (info->lump)
			ion_carveout_free(heap, addr, n_phys_pages * PAGE_SIZE);
		else
			for (i -= 1; i >= 0; i--)
				ion_carveout_free(heap, info->phys_addrs[i], PAGE_SIZE);
	}
err_nomem:
	kfree(info);
	return ret;
}
static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	long err;

	/* do nothing if we already have a clock device */
	if (!IS_ERR_OR_NULL(adapter->ptp_clock))
		return 0;

	switch (adapter->hw.mac.type) {
	case ixgbe_mac_X540:
		snprintf(adapter->ptp_caps.name,
			 sizeof(adapter->ptp_caps.name),
			 "%s", netdev->name);
		adapter->ptp_caps.owner = THIS_MODULE;
		adapter->ptp_caps.max_adj = 250000000;
		adapter->ptp_caps.n_alarm = 0;
		adapter->ptp_caps.n_ext_ts = 0;
		adapter->ptp_caps.n_per_out = 0;
		adapter->ptp_caps.pps = 1;
		adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599;
		adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime_82599;
		adapter->ptp_caps.gettime = ixgbe_ptp_gettime_82599;
		adapter->ptp_caps.settime = ixgbe_ptp_settime_82599;
		adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
		adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_X540;
		break;
	case ixgbe_mac_82599EB:
		snprintf(adapter->ptp_caps.name,
			 sizeof(adapter->ptp_caps.name),
			 "%s", netdev->name);
		adapter->ptp_caps.owner = THIS_MODULE;
		adapter->ptp_caps.max_adj = 250000000;
		adapter->ptp_caps.n_alarm = 0;
		adapter->ptp_caps.n_ext_ts = 0;
		adapter->ptp_caps.n_per_out = 0;
		adapter->ptp_caps.pps = 0;
		adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599;
		adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime_82599;
		adapter->ptp_caps.gettime = ixgbe_ptp_gettime_82599;
		adapter->ptp_caps.settime = ixgbe_ptp_settime_82599;
		adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
		break;
	default:
		adapter->ptp_clock = NULL;
		adapter->ptp_setup_sdp = NULL;
		return -EOPNOTSUPP;
	}

	adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
						pci_dev_to_dev(adapter->pdev));
	if (IS_ERR(adapter->ptp_clock)) {
		err = PTR_ERR(adapter->ptp_clock);
		adapter->ptp_clock = NULL;
		e_dev_err("ptp_clock_register failed\n");
		return err;
	} else
		e_dev_info("registered PHC device on %s\n", netdev->name);

	/* Set the default timestamp mode to disabled here. We do this in
	 * create_clock instead of initialization, because we don't want to
	 * override the previous settings during a suspend/resume cycle.
	 */
	adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
	adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;

	return 0;
}
Exemplo n.º 25
0
static
int omap_bandgap_probe(struct platform_device *pdev)
{
	struct omap_bandgap *bg_ptr;
	int clk_rate, ret = 0, i;

	bg_ptr = omap_bandgap_build(pdev);
	if (IS_ERR_OR_NULL(bg_ptr)) {
		dev_err(&pdev->dev, "failed to fetch platform data\n");
		return PTR_ERR(bg_ptr);
	}
	bg_ptr->dev = &pdev->dev;

	if (OMAP_BANDGAP_HAS(bg_ptr, TSHUT)) {
		ret = omap_bandgap_tshut_init(bg_ptr, pdev);
		if (ret) {
			dev_err(&pdev->dev,
				"failed to initialize system tshut IRQ\n");
			return ret;
		}
	}

	bg_ptr->fclock = clk_get(NULL, bg_ptr->conf->fclock_name);
	ret = IS_ERR_OR_NULL(bg_ptr->fclock);
	if (ret) {
		dev_err(&pdev->dev, "failed to request fclock reference\n");
		goto free_irqs;
	}

	bg_ptr->div_clk = clk_get(NULL,  bg_ptr->conf->div_ck_name);
	ret = IS_ERR_OR_NULL(bg_ptr->div_clk);
	if (ret) {
		dev_err(&pdev->dev,
			"failed to request div_ts_ck clock ref\n");
		goto free_irqs;
	}

	bg_ptr->conv_table = bg_ptr->conf->conv_table;
	for (i = 0; i < bg_ptr->conf->sensor_count; i++) {
		struct temp_sensor_registers *tsr;
		u32 val;

		tsr = bg_ptr->conf->sensors[i].registers;
		/*
		 * check if the efuse has a non-zero value if not
		 * it is an untrimmed sample and the temperatures
		 * may not be accurate
		 */
		val = omap_bandgap_readl(bg_ptr, tsr->bgap_efuse);
		if (ret || !val)
			dev_info(&pdev->dev,
				 "Non-trimmed BGAP, Temp not accurate\n");
	}

	clk_rate = clk_round_rate(bg_ptr->div_clk,
				  bg_ptr->conf->sensors[0].ts_data->max_freq);
	if (clk_rate < bg_ptr->conf->sensors[0].ts_data->min_freq ||
	    clk_rate == 0xffffffff) {
		ret = -ENODEV;
		dev_err(&pdev->dev, "wrong clock rate (%d)\n", clk_rate);
		goto put_clks;
	}

	ret = clk_set_rate(bg_ptr->div_clk, clk_rate);
	if (ret)
		dev_err(&pdev->dev, "Cannot re-set clock rate. Continuing\n");

	bg_ptr->clk_rate = clk_rate;
	clk_enable(bg_ptr->fclock);

	mutex_init(&bg_ptr->bg_mutex);
	bg_ptr->dev = &pdev->dev;
	platform_set_drvdata(pdev, bg_ptr);

	omap_bandgap_power(bg_ptr, true);

	/* Set default counter to 1 for now */
	if (OMAP_BANDGAP_HAS(bg_ptr, COUNTER))
		for (i = 0; i < bg_ptr->conf->sensor_count; i++)
			configure_temp_sensor_counter(bg_ptr, i, 1);

	for (i = 0; i < bg_ptr->conf->sensor_count; i++) {
		struct temp_sensor_data *ts_data;

		ts_data = bg_ptr->conf->sensors[i].ts_data;

		if (OMAP_BANDGAP_HAS(bg_ptr, TALERT))
			temp_sensor_init_talert_thresholds(bg_ptr, i,
							   ts_data->t_hot,
							   ts_data->t_cold);
		if (OMAP_BANDGAP_HAS(bg_ptr, TSHUT_CONFIG)) {
			temp_sensor_configure_tshut_hot(bg_ptr, i,
							ts_data->tshut_hot);
			temp_sensor_configure_tshut_cold(bg_ptr, i,
							 ts_data->tshut_cold);
		}
	}

	if (OMAP_BANDGAP_HAS(bg_ptr, MODE_CONFIG))
		enable_continuous_mode(bg_ptr);

	/* Set .250 seconds time as default counter */
	if (OMAP_BANDGAP_HAS(bg_ptr, COUNTER))
		for (i = 0; i < bg_ptr->conf->sensor_count; i++)
			configure_temp_sensor_counter(bg_ptr, i,
						      bg_ptr->clk_rate / 4);

	/* Every thing is good? Then expose the sensors */
	for (i = 0; i < bg_ptr->conf->sensor_count; i++) {
		char *domain;

		if (bg_ptr->conf->sensors[i].register_cooling)
			bg_ptr->conf->sensors[i].register_cooling(bg_ptr, i);

		domain = bg_ptr->conf->sensors[i].domain;
		if (bg_ptr->conf->expose_sensor)
			bg_ptr->conf->expose_sensor(bg_ptr, i, domain);
	}

	/*
	 * Enable the Interrupts once everything is set. Otherwise irq handler
	 * might be called as soon as it is enabled where as rest of framework
	 * is still getting initialised.
	 */
	if (OMAP_BANDGAP_HAS(bg_ptr, TALERT)) {
		ret = omap_bandgap_talert_init(bg_ptr, pdev);
		if (ret) {
			dev_err(&pdev->dev, "failed to initialize Talert IRQ\n");
			i = bg_ptr->conf->sensor_count;
			goto disable_clk;
		}
	}

	return 0;

disable_clk:
	clk_disable(bg_ptr->fclock);
put_clks:
	clk_put(bg_ptr->fclock);
	clk_put(bg_ptr->div_clk);
free_irqs:
	if (OMAP_BANDGAP_HAS(bg_ptr, TSHUT)) {
		free_irq(gpio_to_irq(bg_ptr->tshut_gpio), NULL);
		gpio_free(bg_ptr->tshut_gpio);
	}

	return ret;
}
Exemplo n.º 26
0
static int mpu3050_config_regulator(struct i2c_client *client, bool on)
{
	int rc = 0, i;
	int num_reg = sizeof(mpu_vreg) / sizeof(struct sensor_regulator);

	if (on) {
		for (i = 0; i < num_reg; i++) {
			mpu_vreg[i].vreg = regulator_get(&client->dev,
						mpu_vreg[i].name);
			if (IS_ERR(mpu_vreg[i].vreg)) {
				rc = PTR_ERR(mpu_vreg[i].vreg);
				pr_err("%s:regulator get failed rc=%d\n",
						__func__, rc);
				mpu_vreg[i].vreg = NULL;
				goto error_vdd;
			}

			if (regulator_count_voltages(mpu_vreg[i].vreg) > 0) {
				rc = regulator_set_voltage(mpu_vreg[i].vreg,
					mpu_vreg[i].min_uV, mpu_vreg[i].max_uV);
				if (rc) {
					pr_err("%s:set_voltage failed rc=%d\n",
						__func__, rc);
					regulator_put(mpu_vreg[i].vreg);
					mpu_vreg[i].vreg = NULL;
					goto error_vdd;
				}
			}

			rc = regulator_enable(mpu_vreg[i].vreg);
			if (rc) {
				pr_err("%s: regulator_enable failed rc =%d\n",
						__func__,
						rc);

				if (regulator_count_voltages(
					mpu_vreg[i].vreg) > 0) {
					regulator_set_voltage(mpu_vreg[i].vreg,
						0, mpu_vreg[i].max_uV);
				}
				regulator_put(mpu_vreg[i].vreg);
				mpu_vreg[i].vreg = NULL;
				goto error_vdd;
			}
		}
		return rc;
	} else {
		i = num_reg;
	}
error_vdd:
	while (--i >= 0) {
		if (!IS_ERR_OR_NULL(mpu_vreg[i].vreg)) {
			if (regulator_count_voltages(
				mpu_vreg[i].vreg) > 0) {
				regulator_set_voltage(mpu_vreg[i].vreg, 0,
						mpu_vreg[i].max_uV);
			}
			regulator_disable(mpu_vreg[i].vreg);
			regulator_put(mpu_vreg[i].vreg);
			mpu_vreg[i].vreg = NULL;
		}
	}
	return rc;
}
Exemplo n.º 27
0
int max77888_irq_init(struct max77888_dev *max77888)
{
	int i;
	int cur_irq;
	int ret;
	u8 i2c_data;

	pr_info("func: %s, irq_gpio: %d, irq_base: %d\n", __func__,
		max77888->irq_gpio, max77888->irq_base);
	
	if (!max77888->irq_gpio) {
		dev_warn(max77888->dev, "No interrupt specified.\n");
		max77888->irq_base = 0;
		return 0;
	}

	if (!max77888->irq_base) {
		dev_err(max77888->dev, "No interrupt base specified.\n");
		return 0;
	}

	mutex_init(&max77888->irqlock);

	max77888->irq = gpio_to_irq(max77888->irq_gpio);
	ret = gpio_request(max77888->irq_gpio, "if_pmic_irq");
	if (ret) {
		dev_err(max77888->dev, "%s: failed requesting gpio %d\n",
			__func__, max77888->irq_gpio);
		return ret;
	}
	gpio_direction_input(max77888->irq_gpio);
	gpio_free(max77888->irq_gpio);

	/* Mask individual interrupt sources */
	for (i = 0; i < MAX77888_IRQ_GROUP_NR; i++) {
		struct i2c_client *i2c;
		/* MUIC IRQ  0:MASK 1:NOT MASK */
		/* Other IRQ 1:MASK 0:NOT MASK */
		if (i >= MUIC_INT1 && i <= MUIC_INT3) {
			max77888->irq_masks_cur[i] = 0x00;
			max77888->irq_masks_cache[i] = 0x00;
		} else {
			max77888->irq_masks_cur[i] = 0xff;
			max77888->irq_masks_cache[i] = 0xff;
		}
		i2c = get_i2c(max77888, i);

		if (IS_ERR_OR_NULL(i2c))
			continue;
		if (max77888_mask_reg[i] == MAX77888_REG_INVALID)
			continue;
		if (i >= MUIC_INT1 && i <= MUIC_INT3)
			max77888_write_reg(i2c, max77888_mask_reg[i], 0x00);
		else
			max77888_write_reg(i2c, max77888_mask_reg[i], 0xff);
	}

	/* Register with genirq */
	for (i = 0; i < MAX77888_IRQ_NR; i++) {
		cur_irq = i + max77888->irq_base;
		irq_set_chip_data(cur_irq, max77888);
		irq_set_chip_and_handler(cur_irq, &max77888_irq_chip,
					 handle_edge_irq);
		irq_set_nested_thread(cur_irq, 1);
#ifdef CONFIG_ARM
		set_irq_flags(cur_irq, IRQF_VALID);
#else
		irq_set_noprobe(cur_irq);
#endif
	}

	/* Unmask max77888 interrupt */
	ret = max77888_read_reg(max77888->i2c, MAX77888_PMIC_REG_INTSRC_MASK,
			  &i2c_data);
	if (ret) {
		dev_err(max77888->dev, "%s: fail to read muic reg\n", __func__);
		return ret;
	}

	i2c_data &= ~(MAX77888_IRQSRC_CHG);	/* Unmask charger interrupt */
	i2c_data &= ~(MAX77888_IRQSRC_MUIC);	/* Unmask muic interrupt */
	max77888_write_reg(max77888->i2c, MAX77888_PMIC_REG_INTSRC_MASK,
			   i2c_data);

	ret = request_threaded_irq(max77888->irq, NULL, max77888_irq_thread,
			IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
			"max77888-irq", max77888);

	if (ret) {
		dev_err(max77888->dev, "Failed to request IRQ %d: %d\n",
			max77888->irq, ret);
		return ret;
	}

	return 0;
}
Exemplo n.º 28
0
static int __devinit rotator_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct rot_context *rot;
	struct resource *res;
	struct exynos_drm_ippdrv *ippdrv;
	int ret;

	rot = kzalloc(sizeof(*rot), GFP_KERNEL);
	if (!rot) {
		dev_err(dev, "failed to allocate rot\n");
		return -ENOMEM;
	}

	rot->limit_tbl = (struct rot_limit_table *)
				platform_get_device_id(pdev)->driver_data;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(dev, "failed to find registers\n");
		ret = -ENOENT;
		goto err_get_resource;
	}

	rot->regs_res = request_mem_region(res->start, resource_size(res),
								dev_name(dev));
	if (!rot->regs_res) {
		dev_err(dev, "failed to claim register region\n");
		ret = -ENOENT;
		goto err_get_resource;
	}

	rot->regs = ioremap(res->start, resource_size(res));
	if (!rot->regs) {
		dev_err(dev, "failed to map register\n");
		ret = -ENXIO;
		goto err_ioremap;
	}

	rot->irq = platform_get_irq(pdev, 0);
	if (rot->irq < 0) {
		dev_err(dev, "faild to get irq\n");
		ret = rot->irq;
		goto err_get_irq;
	}

	ret = request_irq(rot->irq, rotator_irq_handler, 0, "drm_rotator", rot);
	if (ret < 0) {
		dev_err(dev, "failed to request irq\n");
		goto err_get_irq;
	}

	rot->clock = clk_get(dev, "rotator");
	if (IS_ERR_OR_NULL(rot->clock)) {
		dev_err(dev, "faild to get clock\n");
		ret = PTR_ERR(rot->clock);
		goto err_clk_get;
	}

	pm_runtime_enable(dev);

	ippdrv = &rot->ippdrv;
	ippdrv->dev = dev;
	ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_ops;
	ippdrv->ops[EXYNOS_DRM_OPS_DST] = &rot_dst_ops;
	ippdrv->open = rotator_ippdrv_open;
	ippdrv->close = rotator_ippdrv_close;
	ippdrv->start = rotator_ippdrv_start;

	platform_set_drvdata(pdev, rot);

	ret = exynos_drm_ippdrv_register(ippdrv);
	if (ret < 0) {
		dev_err(dev, "failed to register drm rotator device\n");
		goto err_ippdrv_register;
	}

	dev_info(dev, "The exynos rotator is probed successfully\n");

	return 0;

err_ippdrv_register:
	pm_runtime_disable(dev);
	clk_put(rot->clock);
err_clk_get:
	free_irq(rot->irq, rot);
err_get_irq:
	iounmap(rot->regs);
err_ioremap:
	release_resource(rot->regs_res);
	kfree(rot->regs_res);
err_get_resource:
	kfree(rot);
	return ret;
}
Exemplo n.º 29
0
static void *res_trk_pmem_map
	(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
	u32 offset = 0, flags = 0;
	u32 index = 0;
	struct ddl_context *ddl_context;
	struct msm_mapped_buffer *mapped_buffer = NULL;
	int ret = 0;
	unsigned long iova = 0;
	unsigned long buffer_size  = 0;
	unsigned long *kernel_vaddr = NULL;

	ddl_context = ddl_get_context();
	if (res_trk_get_enable_ion() && addr->alloc_handle) {
		kernel_vaddr = (unsigned long *) ion_map_kernel(
					ddl_context->video_ion_client,
					addr->alloc_handle);
		if (IS_ERR_OR_NULL(kernel_vaddr)) {
			DDL_MSG_ERROR("%s():DDL ION client map failed\n",
						 __func__);
			goto ion_bail_out;
		}
		addr->virtual_base_addr = (u8 *) kernel_vaddr;
		ret = ion_map_iommu(ddl_context->video_ion_client,
				addr->alloc_handle,
				VIDEO_DOMAIN,
				VIDEO_FIRMWARE_POOL,
				SZ_4K,
				0,
				&iova,
				&buffer_size,
				0, 0);
		if (ret || !iova) {
			DDL_MSG_ERROR(
			"%s():DDL ION client iommu map failed, ret = %d iova = 0x%lx\n",
			__func__, ret, iova);
			goto ion_unmap_bail_out;
		}
		addr->mapped_buffer = NULL;
		addr->physical_base_addr = (u8 *)iova;
		addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
		addr->physical_base_addr, alignment);
		offset = (u32)(addr->align_physical_addr -
				addr->physical_base_addr);
		addr->align_virtual_addr = addr->virtual_base_addr + offset;
		addr->buffer_size = buffer_size;
	} else {
		if (!res_trk_check_for_sec_session()) {
			if (!addr->alloced_phys_addr) {
				pr_err(" %s() alloced addres NULL", __func__);
				goto bail_out;
			}
			flags = MSM_SUBSYSTEM_MAP_IOVA |
				MSM_SUBSYSTEM_MAP_KADDR;
			if (alignment == DDL_KILO_BYTE(128))
					index = 1;
			else if (alignment > SZ_4K)
				flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;
			addr->mapped_buffer =
			msm_subsystem_map_buffer(
			(unsigned long)addr->alloced_phys_addr,
			sz, flags, &restrk_mmu_subsystem[index],
			sizeof(restrk_mmu_subsystem[index])/
				sizeof(unsigned int));
			if (IS_ERR(addr->mapped_buffer)) {
				pr_err(" %s() buffer map failed", __func__);
				goto bail_out;
			}
			mapped_buffer = addr->mapped_buffer;
			if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
				pr_err("%s() map buffers failed\n", __func__);
				goto bail_out;
			}
			addr->physical_base_addr =
				 (u8 *)mapped_buffer->iova[0];
			addr->virtual_base_addr =
					mapped_buffer->vaddr;
		} else {
			addr->physical_base_addr =
				(u8 *) addr->alloced_phys_addr;
			addr->virtual_base_addr =
				(u8 *)addr->alloced_phys_addr;
		}
		addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
		addr->physical_base_addr, alignment);
		offset = (u32)(addr->align_physical_addr -
				addr->physical_base_addr);
		addr->align_virtual_addr = addr->virtual_base_addr + offset;
		addr->buffer_size = sz;
	}
	return addr->virtual_base_addr;
bail_out:
	if (IS_ERR(addr->mapped_buffer))
		msm_subsystem_unmap_buffer(addr->mapped_buffer);
	return NULL;
ion_unmap_bail_out:
	if (!IS_ERR_OR_NULL(addr->alloc_handle)) {
		ion_unmap_kernel(resource_context.
			res_ion_client,	addr->alloc_handle);
	}
ion_bail_out:
	return NULL;
}
static int mdnie_probe(struct platform_device *pdev)
{
#if defined(CONFIG_FB_MDNIE_PWM)
 	struct platform_mdnie_data *pdata = pdev->dev.platform_data;
#endif
 	struct mdnie_info *mdnie;
 	int ret = 0;
 
 	mdnie_class = class_create(THIS_MODULE, dev_name(&pdev->dev));
 	if (IS_ERR_OR_NULL(mdnie_class)) {
 		pr_err("failed to create mdnie class\n");
 		ret = -EINVAL;
 		goto error0;
 	}
 
 	mdnie_class->dev_attrs = mdnie_attributes;
 
 	mdnie = kzalloc(sizeof(struct mdnie_info), GFP_KERNEL);
 	if (!mdnie) {
 		pr_err("failed to allocate mdnie\n");
 		ret = -ENOMEM;
 		goto error1;
 	}
 
 	mdnie->dev = device_create(mdnie_class, &pdev->dev, 0, &mdnie, "mdnie");
 	if (IS_ERR_OR_NULL(mdnie->dev)) {
 		pr_err("failed to create mdnie device\n");
 		ret = -EINVAL;
 		goto error2;
 	}
 
#if defined(CONFIG_FB_MDNIE_PWM)
 	if (!pdata) {
 		pr_err("no platform data specified\n");
 		ret = -EINVAL;
 		goto error2;
 	}
 
 	mdnie->bd = backlight_device_register("panel", mdnie->dev,
 		mdnie, &mdnie_backlight_ops, NULL);
 	mdnie->bd->props.max_brightness = MAX_BRIGHTNESS_LEVEL;
 	mdnie->bd->props.brightness = DEFAULT_BRIGHTNESS;
 	mdnie->bd_enable = TRUE;
 	mdnie->lcd_pd = pdata->lcd_pd;
 
 	ret = device_create_file(&mdnie->bd->dev, &dev_attr_auto_brightness);
 	if (ret < 0)
 		dev_err(&mdnie->bd->dev, "failed to add sysfs entries, %d\n", __LINE__);
#endif
 
 	mdnie->scenario = UI_MODE;
 	mdnie->mode = STANDARD;
 	mdnie->tone = TONE_NORMAL;
 	mdnie->outdoor = OUTDOOR_OFF;
#if defined(CONFIG_FB_MDNIE_PWM)
 	mdnie->cabc = CABC_ON;
 	mdnie->power_lut_idx = LUT_LEVEL_MANUAL_AND_INDOOR;
 	mdnie->auto_brightness = 0;
#else
 	mdnie->cabc = CABC_OFF;
#endif
 
#if defined(CONFIG_FB_S5P_S6C1372)
 	mdnie->cabc = CABC_OFF;
#endif
 	mdnie->enable = TRUE;
 	mdnie->tunning = FALSE;
 	mdnie->negative = NEGATIVE_OFF;
 
 	mutex_init(&mdnie->lock);
 	mutex_init(&mdnie->dev_lock);
 
 	platform_set_drvdata(pdev, mdnie);
 	dev_set_drvdata(mdnie->dev, mdnie);
 
#ifdef CONFIG_HAS_WAKELOCK
#ifdef CONFIG_HAS_EARLYSUSPEND
#if 1 /* defined(CONFIG_FB_MDNIE_PWM) */
 	mdnie->early_suspend.suspend = mdnie_early_suspend;
 	mdnie->early_suspend.resume = mdnie_late_resume;
 	mdnie->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN; //EARLY_SUSPEND_LEVEL_DISABLE_FB - 1;
 	register_early_suspend(&mdnie->early_suspend);
#endif
#endif
#endif
 
#if defined(CONFIG_FB_S5P_S6C1372)
 	check_lcd_type();
 	dev_info(mdnie->dev, "lcdtype = %d\n", pdata->display_type);
 	if (pdata->display_type == 1) {
 		b_value.max = 1441;
 		b_value.mid = 784;
 		b_value.low = 16;
 		b_value.dim = 16;
 	} else {
 		b_value.max = 1216;	/* 76% */
 		b_value.mid = 679;	/* 39% */
 		b_value.low = 16;	/* 1% */
 		b_value.dim = 16;	/* 1% */
 	}
#endif
 
#if defined(CONFIG_FB_S5P_S6F1202A)
 	if (pdata->display_type == 0) {
 		memcpy(tunning_table, tunning_table_hydis, sizeof(tunning_table));
 		memcpy(etc_table, etc_table_hydis, sizeof(etc_table));
 		memcpy(camera_table, camera_table_hydis, sizeof(camera_table));
 	} else if (pdata->display_type == 1) {
 		memcpy(tunning_table, tunning_table_sec, sizeof(tunning_table));
 		memcpy(etc_table, etc_table_sec, sizeof(etc_table));
 		memcpy(camera_table, camera_table_sec, sizeof(camera_table));
 	} else if (pdata->display_type == 2) {
 		memcpy(tunning_table, tunning_table_boe, sizeof(tunning_table));
 		memcpy(etc_table, etc_table_boe, sizeof(etc_table));
 		memcpy(camera_table, camera_table_boe, sizeof(camera_table));
 	}
#endif
 
 	g_mdnie = mdnie;
 
 	set_mdnie_value(mdnie, 0);
 
 	dev_info(mdnie->dev, "registered successfully\n");
 
 	return 0;
 
 error2:
 	kfree(mdnie);
 error1:
 	class_destroy(mdnie_class);
 error0:
 	return ret; 
}