int32_t    scale_continue(void)
{
	enum scale_drv_rtn      rtn = SCALE_RTN_SUCCESS;
	uint32_t                slice_h = g_path->slice_height;

	SCALE_TRACE("SCALE DRV: continue %d, %d, %d \n",
		g_path->slice_height, g_path->slice_in_height, g_path->scale_mode);

	if (SCALE_MODE_SLICE == g_path->scale_mode) {
		if (g_path->slice_in_height +  g_path->slice_height >= g_path->input_rect.h) {
			slice_h = g_path->input_rect.h - g_path->slice_in_height;
			g_path->is_last_slice = 1;
			REG_MWR(SCALE_SLICE_VER, 0x3FF, slice_h);
			REG_OWR(SCALE_SLICE_VER, (1 << 12));
			SCALE_TRACE("SCALE DRV: continue, last slice, 0x%x \n", REG_RD(SCALE_SLICE_VER));
		} else {
			g_path->is_last_slice = 0;
			REG_MWR(SCALE_SLICE_VER, (1 << 12), (0 << 12));
		}
		g_path->slice_in_height += g_path->slice_height;
	}

	REG_WR(SCALE_FRM_SWAP_Y, g_path->temp_buf_addr.yaddr);
	REG_WR(SCALE_FRM_SWAP_U, g_path->temp_buf_addr.uaddr);
	REG_WR(SCALE_FRM_LINE,   g_path->temp_buf_addr.vaddr);
	
	_scale_reg_trace();
	REG_OWR(SCALE_CFG, 1);
	atomic_inc(&g_path->start_flag);
	SCALE_TRACE("SCALE DRV: continue %x.\n", REG_RD(SCALE_CFG));

	return rtn;
}
static int32_t _scale_calc_sc_size(void)
{
	uint32_t                reg_val = 0;
	enum scale_drv_rtn      rtn = SCALE_RTN_SUCCESS;
	uint32_t                div_factor = 1;
	uint32_t                i;
		
	if (g_path->input_rect.w > (g_path->output_size.w * SCALE_SC_COEFF_MAX * (1 << SCALE_DECI_FAC_MAX)) ||
	    g_path->input_rect.h > (g_path->output_size.h * SCALE_SC_COEFF_MAX * (1 << SCALE_DECI_FAC_MAX)) ||
	    g_path->input_rect.w * SCALE_SC_COEFF_MAX < g_path->output_size.w ||
	    g_path->input_rect.h * SCALE_SC_COEFF_MAX < g_path->output_size.h) {
		SCALE_TRACE("SCALE DRV: Target too small or large \n");
		rtn = SCALE_RTN_SC_ERR;
	} else {
		g_path->sc_input_size.w = g_path->input_rect.w;
		g_path->sc_input_size.h = g_path->input_rect.h;
		if (g_path->input_rect.w > g_path->output_size.w * SCALE_SC_COEFF_MAX ||
			g_path->input_rect.h > g_path->output_size.h * SCALE_SC_COEFF_MAX) {
			for (i = 0; i < SCALE_DECI_FAC_MAX; i++) {
				div_factor = (uint32_t)(SCALE_SC_COEFF_MAX * (1 << (1 + i)));
				if (g_path->input_rect.w < (g_path->output_size.w * div_factor) &&
					g_path->input_rect.h < (g_path->output_size.h * div_factor)) {
					break;
				}
			}
			REG_OWR(SCALE_CFG, 1 << 2);
			REG_MWR(SCALE_CFG, (3 << 9), i << 9);
			g_path->sc_input_size.w = g_path->input_rect.w >> (1 + i);
			g_path->sc_input_size.h = g_path->input_rect.h >> (1 + i);
			if ((g_path->sc_input_size.w & (SCALE_PIXEL_ALIGNED - 1)) ||
				(g_path->sc_input_size.h & (SCALE_PIXEL_ALIGNED - 1))) {
				SCALE_TRACE("SCALE DRV: Unsupported sc aligned w ,h %d %d \n",
					g_path->sc_input_size.w,
					g_path->sc_input_size.h);
				g_path->sc_input_size.w = g_path->sc_input_size.w & ~(SCALE_PIXEL_ALIGNED - 1);
				g_path->sc_input_size.h = g_path->sc_input_size.h & ~(SCALE_PIXEL_ALIGNED - 1);
				g_path->input_rect.w = g_path->sc_input_size.w << (1 + i);
				g_path->input_rect.h = g_path->sc_input_size.h << (1 + i);
				SCALE_TRACE("SCALE DRV: after rearranged w ,h %d %d, sc w h %d %d \n",
					g_path->input_rect.w,
					g_path->input_rect.h,
					g_path->sc_input_size.w,
					g_path->sc_input_size.h);
				reg_val = g_path->input_rect.w | (g_path->input_rect.h << 16);
				REG_OWR(SCALE_CFG, 1 << 1);
				REG_WR(SCALE_TRIM_SIZE, reg_val);
			}
		} 

	}

	return rtn;
}
static int img_scale_open(struct inode *node, struct file *pf)
{
	int ret = 0;
	struct scale_user *p_user = NULL;

	mutex_lock(&scale_dev_open_mutex);

	SCALE_TRACE("img_scale_open \n");

	p_user = scale_get_user(current->pid);
	if (NULL == p_user) {
		printk("img_scale_open user cnt full  pid:%d. \n",current->pid);
		return -1;
	}
	pf->private_data = p_user;

	if (1 == atomic_inc_return(&scale_users)) {
		ret = scale_module_en();
		if (unlikely(ret)) {
			printk("Failed to enable scale module \n");
			ret = -EIO;
			goto faile;
		}

		ret = scale_reg_isr(SCALE_TX_DONE, scale_done, NULL);
		if (unlikely(ret)) {
			printk("Failed to register ISR \n");
			ret = -EACCES;
			goto reg_faile;
		} else {
			goto exit;
		}
	}
	else{
		goto exit;
	}
reg_faile:
	scale_module_dis();
faile:
	atomic_dec(&scale_users);
	p_user->pid = INVALID_USER_ID;
	pf->private_data = NULL;
exit:
	mutex_unlock(&scale_dev_open_mutex);

	SCALE_TRACE("img_scale_open %d \n", ret);

	return ret;

}
Beispiel #4
0
static int scale_k_open(struct inode *node, struct file *file)
{
	int ret = 0;
	struct scale_k_private *scale_private = s_scale_private; //platform_get_drvdata(scale_k_get_platform_device())
	struct scale_k_file *fd = NULL;
	struct miscdevice *md = file->private_data ;

	if (!scale_private) {
		ret = -EFAULT;
		printk("scale_k_open error: scale_private is null \n");
		goto exit;
	}

	fd = vzalloc(sizeof(*fd));
	if (!fd) {
		ret = -ENOMEM;
		printk("scale_k_open error: alloc \n");
		goto exit;
	}
	fd ->dn = md->this_device->of_node;
	scale_k_file_init(fd, scale_private);

	file->private_data = fd;

	SCALE_TRACE("scale_k_open fd=0x%x ret=%d\n", (int)fd, ret);

exit:
	return ret;
}
Beispiel #5
0
static int scale_k_release(struct inode *node, struct file *file)
{
	struct scale_k_file *fd = NULL;
	struct scale_k_private *scale_private = NULL;

	fd = file->private_data;
	if (!fd) {
		goto exit;
	}

	scale_private = fd->scale_private;
	if (!scale_private) {
		goto fd_free;
	}

	down(&scale_private->start_sem);
	up(&scale_private->start_sem);

fd_free:
	vfree(fd);
	fd = NULL;
	file->private_data = NULL;

exit:
	SCALE_TRACE("scale_k_release\n");

	return 0;
}
static irqreturn_t _scale_isr_root(int irq, void *dev_id)
{
	uint32_t                status;
	struct scale_frame      frame;
	uint32_t                flag;
	
	(void)irq; (void)dev_id;
	status = REG_RD(SCALE_INT_STS);

	if (unlikely(0 == (status & SCALE_IRQ_BIT))) {
		return IRQ_HANDLED;
	}

	SCALE_TRACE("SCALE DRV: _scale_isr_root \n");
	spin_lock_irqsave(&scale_lock, flag);
	if (g_path->user_func) {
		frame.yaddr = g_path->output_addr.yaddr;
		frame.uaddr = g_path->output_addr.uaddr;
		frame.vaddr = g_path->output_addr.vaddr;
		frame.width = g_path->output_size.w;
		if (SCALE_MODE_SLICE == g_path->scale_mode) {
			frame.height = g_path->slice_out_height;
			g_path->slice_out_height = REG_RD(SCALE_SLICE_VER);
			g_path->slice_out_height = (g_path->slice_out_height >> 16) & 0xFFF;
			frame.height = g_path->slice_out_height - frame.height;
		} else {
int img_scale_probe(struct platform_device *pdev)
{
	int                      ret = 0;
	int                      i = 0;
	struct scale_user *p_user = NULL;
	
	SCALE_TRACE("scale_probe called \n");

	ret = misc_register(&img_scale_dev);
	if (ret) {
		SCALE_TRACE("cannot register miscdev (%d)\n", ret);
		goto exit;
	}

	img_scale_proc_file = create_proc_read_entry("driver/scale",
						0444,
						NULL,
						img_scale_proc_read,
						NULL);
	if (unlikely(NULL == img_scale_proc_file)) {
		printk("Can't create an entry for scale in /proc \n");
		ret = ENOMEM;
		goto exit;
	}
	
	/* initialize locks */
	mutex_init(&scale_param_cfg_mutex);
	mutex_init(&scale_dev_open_mutex);

	g_scale_user = kzalloc(SCALE_USER_MAX * sizeof(struct scale_user), GFP_KERNEL);
	if (NULL == g_scale_user) {
		printk("scale_user, no mem");
		return -1;
	}
	p_user = g_scale_user;
	for (i =  0; i < SCALE_USER_MAX; i++) {
		p_user->pid = INVALID_USER_ID;
		sema_init(&p_user->sem_done, 0);
		p_user++;
	}
	cur_task_pid = INVALID_USER_ID;
	
exit:	
	return ret;
}
int32_t    scale_start(void)
{
	enum scale_drv_rtn      rtn = SCALE_RTN_SUCCESS;
	int                     ret = 0;

	SCALE_TRACE("SCALE DRV: scale_start: %d \n", g_path->scale_mode);

	dcam_resize_start();
	if (g_path->output_size.w > SCALE_LINE_BUF_LENGTH &&
		SCALE_MODE_NORMAL == g_path->scale_mode) {
		rtn = SCALE_RTN_SC_ERR;
		SCALE_RTN_IF_ERR;
	}
	rtn = _scale_alloc_tmp_buf();
	if(rtn) {
		printk("SCALE DRV: No mem to alloc tmp buf \n");
		goto exit;
	}

	g_path->slice_in_height  = 0;
	g_path->slice_out_height = 0;
	g_path->is_last_slice    = 0;
	REG_OWR(SCALE_INT_CLR,  SCALE_IRQ_BIT);
	REG_OWR(SCALE_INT_MASK, SCALE_IRQ_BIT);

	rtn = _scale_trim();
	if(rtn) goto exit;
	rtn = _scale_cfg_scaler();
	if(rtn) goto exit;

	ret = request_irq(SCALE_IRQ, 
			_scale_isr_root, 
			IRQF_SHARED, 
			"SCALE", 
			&g_scale_irq);
	if (ret) {
		printk("SCALE DRV: scale_start,error %d \n", ret);
		rtn = SCALE_RTN_MAX;
	}

	if (SCALE_MODE_SLICE == g_path->scale_mode) {
		g_path->slice_in_height += g_path->slice_height;
	}
	REG_OWR(SCALE_BASE,  1 << 2);
	_scale_reg_trace();	

	REG_OWR(SCALE_CFG, 1);
	atomic_inc(&g_path->start_flag);
	return SCALE_RTN_SUCCESS;

exit:
	dcam_resize_end();
	_scale_free_tmp_buf();
	printk("SCALE DRV: ret %d \n", rtn);
	return rtn;
}
static int img_scale_release(struct inode *node, struct file *file)
{
	((struct scale_user *)(file->private_data))->pid = INVALID_USER_ID;
	if (0 == atomic_dec_return(&scale_users)) {
		scale_reg_isr(SCALE_TX_DONE, NULL, NULL);
		scale_module_dis();
	}

	SCALE_TRACE("img_scale_release \n");
	return 0;
}
static int img_scale_remove(struct platform_device *dev)
{
	SCALE_TRACE( "scale_remove called !\n");

	if (g_scale_user) {
		kfree(g_scale_user);
	}
	
	if (img_scale_proc_file) {
		remove_proc_entry("driver/scale", NULL);
	}

	misc_deregister(&img_scale_dev);
	return 0;
}
ssize_t img_scale_read(struct file *file, char __user *u_data, size_t cnt, loff_t *cnt_ret)
{
	uint32_t                 rt_word[2];

	if (cnt < sizeof(uint32_t)) {
		printk("img_scale_read , wrong size of u_data %d \n", cnt);
		return -1;
	}

	rt_word[0] = SCALE_LINE_BUF_LENGTH;
	rt_word[1] = SCALE_SC_COEFF_MAX;
	SCALE_TRACE("img_scale_read line threshold %d %d, sc factor \n", rt_word[0], rt_word[1]);
	(void)file; (void)cnt; (void)cnt_ret;
	return copy_to_user(u_data, (void*)rt_word, (uint32_t)(2*sizeof(uint32_t)));
}
int32_t    scale_stop(void)
{
	enum scale_drv_rtn      rtn = SCALE_RTN_SUCCESS;
	uint32_t                flag;

	spin_lock_irqsave(&scale_lock, flag);
	if (atomic_read(&g_path->start_flag)) {
		s_wait_flag = 1;
		down_interruptible(&scale_done_sema);
	}
	spin_unlock_irqrestore(&scale_lock, flag);

	REG_MWR(SCALE_CFG, 1, 0);
	REG_MWR(SCALE_INT_MASK, SCALE_IRQ_BIT, 0 << 9);
	REG_MWR(SCALE_INT_CLR,  SCALE_IRQ_BIT, 0 << 9);
	free_irq(SCALE_IRQ, &g_scale_irq);

	_scale_free_tmp_buf();

	SCALE_TRACE("SCALE DRV: stop is OK.\n");
	return rtn;
}
static int32_t _scale_set_sc_coeff(void)
{
	uint32_t                i = 0;
	uint32_t                h_coeff_addr = SCALE_BASE;
	uint32_t                v_coeff_addr  = SCALE_BASE;
	uint32_t                *tmp_buf = NULL;
	uint32_t                *h_coeff = NULL;
	uint32_t                *v_coeff = NULL;

	h_coeff_addr += SC_COEFF_H_TAB_OFFSET;
	v_coeff_addr += SC_COEFF_V_TAB_OFFSET;

	tmp_buf = (uint32_t *)kmalloc(SC_COEFF_BUF_SIZE, GFP_KERNEL);
	if (NULL == tmp_buf) {
		printk("SCALE DRV: No mem to alloc coeff buffer! \n");
		return SCALE_RTN_NO_MEM;
	}

	h_coeff = tmp_buf;
	v_coeff = tmp_buf + (SC_COEFF_COEF_SIZE/4);

	if (!(GenScaleCoeff((int16_t)g_path->sc_input_size.w, 
	                    (int16_t)g_path->sc_input_size.h,
	                    (int16_t)g_path->output_size.w,  
	                    (int16_t)g_path->output_size.h, 
	                    h_coeff, 
	                    v_coeff, 
	                    tmp_buf + (SC_COEFF_COEF_SIZE/2), 
	                    SC_COEFF_TMP_SIZE))) {
		kfree(tmp_buf);
		printk("SCALE DRV: _scale_set_sc_coeff error! \n");    
		return SCALE_RTN_GEN_COEFF_ERR;
	}	

	do {
		REG_OWR(SCALE_BASE, 1 << 4);
	} while ((1 << 6) != ((1 << 6) & REG_RD(SCALE_BASE)));
        
	for (i = 0; i < SC_COEFF_H_NUM; i++) {
		REG_WR(h_coeff_addr, *h_coeff);
		h_coeff_addr += 4;
		h_coeff++;
	}    
    
	for (i = 0; i < SC_COEFF_V_NUM; i++) {
		REG_WR(v_coeff_addr, *v_coeff);
		v_coeff_addr += 4;
		v_coeff++;
	}

	REG_MWR(SCALE_CFG, (0xF << 16), ((*v_coeff) & 0x0F) << 16);
	SCALE_TRACE("SCALE DRV: _scale_set_sc_coeff V[%d] = 0x%x \n", i,  (*v_coeff) & 0x0F);

	do {
		REG_MWR(SCALE_BASE, 1 << 4, 0 << 4);
	} while (0 != ((1 << 6) & REG_RD(SCALE_BASE))); 
	
	kfree(tmp_buf);

	return SCALE_RTN_SUCCESS;	
}
int32_t    scale_cfg(enum scale_cfg_id id, void *param)
{
	enum scale_drv_rtn      rtn = SCALE_RTN_SUCCESS;

	switch (id) {

	case SCALE_INPUT_SIZE:
	{
		struct scale_size *size = (struct scale_size*)param;
		uint32_t          reg_val = 0;

		SCALE_CHECK_PARAM_ZERO_POINTER(param);

		SCALE_TRACE("SCALE DRV: SCALE_INPUT_SIZE {%d %d} \n", size->w, size->h);  
		if (size->w > SCALE_FRAME_WIDTH_MAX ||
		    size->h > SCALE_FRAME_HEIGHT_MAX) {
			rtn = SCALE_RTN_SRC_SIZE_ERR;    
		} else {
			reg_val = size->w | (size->h << 16);
			REG_WR(SCALE_SRC_SIZE, reg_val);
			g_path->input_size.w = size->w;
			g_path->input_size.h = size->h;
		}
		break;
	}

	case SCALE_INPUT_RECT:
	{
		struct scale_rect *rect = (struct scale_rect*)param;
		uint32_t          reg_val = 0;

		SCALE_CHECK_PARAM_ZERO_POINTER(param);            

		SCALE_TRACE("SCALE DRV: SCALE_PATH_INPUT_RECT {%d %d %d %d} \n", 
		         rect->x, 
		         rect->y, 
		         rect->w, 
		         rect->h);  

		if (rect->x > SCALE_FRAME_WIDTH_MAX ||
		    rect->y > SCALE_FRAME_HEIGHT_MAX ||
		    rect->w > SCALE_FRAME_WIDTH_MAX ||
		    rect->h > SCALE_FRAME_HEIGHT_MAX) {
			rtn = SCALE_RTN_TRIM_SIZE_ERR;    
		} else {
			reg_val = rect->x | (rect->y << 16);
			REG_WR(SCALE_TRIM_START, reg_val);
			reg_val = rect->w | (rect->h << 16);
			REG_WR(SCALE_TRIM_SIZE, reg_val);
			memcpy((void*)&g_path->input_rect,
			       (void*)rect,
			       sizeof(struct scale_rect));
		}
		break;
	}

	case SCALE_INPUT_FORMAT:
	{
		enum scale_fmt format = *(enum scale_fmt*)param;

		SCALE_CHECK_PARAM_ZERO_POINTER(param);

		g_path->input_format = format;
		if (SCALE_YUV422 == format ||
			SCALE_YUV420 == format ||
			SCALE_YUV420_3FRAME == format ||
			SCALE_YUV400 == format) {
			REG_MWR(SCALE_CFG, (3 << 11), g_path->input_format << 11);
			REG_MWR(SCALE_CFG, (1 << 5), 0 << 5);
		} else if (SCALE_RGB565 == format) {
			REG_OWR(SCALE_CFG, (1 << 13));
			REG_OWR(SCALE_CFG, (1 << 5));
		} else if (SCALE_RGB888 == format) {
			REG_MWR(SCALE_CFG, (1 << 13), (0 << 13));
			REG_OWR(SCALE_CFG, (1 << 5));
		} else {
			rtn = SCALE_RTN_IN_FMT_ERR;
			g_path->input_format = SCALE_FTM_MAX;
		}
		break;
		
	}
	
	case SCALE_INPUT_ADDR:
	{
		struct scale_addr *p_addr = (struct scale_addr*)param;

		SCALE_CHECK_PARAM_ZERO_POINTER(param);

		if (SCALE_YUV_ADDR_INVALIDE(p_addr->yaddr, p_addr->uaddr, p_addr->vaddr)) {
			rtn = SCALE_RTN_ADDR_ERR;   
		} else {
			g_path->input_addr.yaddr = p_addr->yaddr;
			g_path->input_addr.uaddr = p_addr->uaddr;
			g_path->input_addr.vaddr = p_addr->vaddr;
			REG_WR(SCALE_FRM_IN_Y, p_addr->yaddr);
			REG_WR(SCALE_FRM_IN_U, p_addr->uaddr);
			REG_WR(SCALE_FRM_IN_V, p_addr->vaddr);
		}
		break;
	}
	
	case SCALE_INPUT_ENDIAN:
	{
		struct scale_endian_sel *endian = (struct scale_endian_sel*)param;

		SCALE_CHECK_PARAM_ZERO_POINTER(param);

		if (endian->y_endian >= SCALE_ENDIAN_MAX ||
			endian->uv_endian >= SCALE_ENDIAN_MAX) {
			rtn = SCALE_RTN_ENDIAN_ERR;
		} else {
			REG_MWR(SCALE_ENDIAN_SEL, 3, endian->y_endian);
			REG_MWR(SCALE_ENDIAN_SEL, 3 << 2, endian->uv_endian << 2);
		}
		break;
	}

	case SCALE_OUTPUT_SIZE:
	{
		struct scale_size *size = (struct scale_size*)param;
		uint32_t          reg_val = 0;

		SCALE_CHECK_PARAM_ZERO_POINTER(param);

		SCALE_TRACE("SCALE DRV: SCALE_OUTPUT_SIZE {%d %d} \n", size->w, size->h);
		if (size->w > SCALE_FRAME_WIDTH_MAX ||
		    size->h > SCALE_FRAME_HEIGHT_MAX) {
			rtn = SCALE_RTN_SRC_SIZE_ERR;
		} else {
			reg_val = size->w | (size->h << 16);
			REG_WR(SCALE_DST_SIZE, reg_val);
			g_path->output_size.w = size->w;
			g_path->output_size.h = size->h;
		}		
		break;
	}

	case SCALE_OUTPUT_FORMAT:
	{
		enum scale_fmt format = *(enum scale_fmt*)param;

		SCALE_CHECK_PARAM_ZERO_POINTER(param);

		g_path->output_format = format;
		if (SCALE_YUV422 == format) {
			REG_MWR(SCALE_CFG, (3 << 6), 0 << 6);
		} else if (SCALE_YUV420 == format) {
			REG_MWR(SCALE_CFG, (3 << 6), 1 << 6);
		} else if (SCALE_RGB565 == format) {
			REG_MWR(SCALE_CFG, (3 << 6), 2 << 6);
		} else {
			rtn = SCALE_RTN_OUT_FMT_ERR;
			g_path->output_format = SCALE_FTM_MAX;
		}
		break;
	}
	
	case SCALE_OUTPUT_ADDR:
	{
		struct scale_addr *p_addr = (struct scale_addr*)param;

		SCALE_CHECK_PARAM_ZERO_POINTER(param);

		if (SCALE_YUV_ADDR_INVALIDE(p_addr->yaddr, p_addr->uaddr, p_addr->vaddr)) {
			rtn = SCALE_RTN_ADDR_ERR;   
		} else {
			g_path->output_addr.yaddr = p_addr->yaddr;
			g_path->output_addr.uaddr = p_addr->uaddr;
			REG_WR(SCALE_FRM_OUT_Y, p_addr->yaddr);
			REG_WR(SCALE_FRM_OUT_U, p_addr->uaddr);
		}
		break;
	}

	case SCALE_OUTPUT_ENDIAN:
	{
		struct scale_endian_sel *endian = (struct scale_endian_sel*)param;

		SCALE_CHECK_PARAM_ZERO_POINTER(param);

		if (endian->y_endian >= SCALE_ENDIAN_MAX ||
			endian->uv_endian >= SCALE_ENDIAN_MAX) {
			rtn = SCALE_RTN_ENDIAN_ERR;
		} else {
			REG_MWR(SCALE_ENDIAN_SEL, (3 << 4), endian->y_endian << 4);
			REG_MWR(SCALE_ENDIAN_SEL, (3 << 6), endian->uv_endian << 6);
		}
		break;
	}

	case SCALE_TEMP_BUFF:
	{
		struct scale_addr *p_addr = (struct scale_addr*)param;

		SCALE_CHECK_PARAM_ZERO_POINTER(param);

		if (SCALE_YUV_ADDR_INVALIDE(p_addr->yaddr, p_addr->uaddr, p_addr->vaddr)) {
			rtn = SCALE_RTN_ADDR_ERR;   
		} else {
			g_path->temp_buf_src = 1;
			g_path->temp_buf_addr.yaddr = p_addr->yaddr;
			g_path->temp_buf_addr.uaddr = p_addr->uaddr;
			g_path->temp_buf_addr.vaddr = p_addr->vaddr;
			REG_WR(SCALE_FRM_SWAP_Y, p_addr->yaddr);
			REG_WR(SCALE_FRM_SWAP_U, p_addr->uaddr);
			REG_WR(SCALE_FRM_LINE,   p_addr->vaddr);
		}
		break;
	}

	case SCALE_SCALE_MODE:
	{
		enum scle_mode mode = *(enum scle_mode*)param;

		if (mode >= SCALE_MODE_MAX) {
			rtn = SCALE_RTN_MODE_ERR;
		} else {
			g_path->scale_mode = mode;
			if (SCALE_MODE_NORMAL == mode) {
				REG_MWR(SCALE_CFG, (1 << 4), (0 << 4));
			} else {
				REG_OWR(SCALE_CFG, (1 << 4));
			}
		}
		
		break;
	}
	
	case SCALE_SLICE_SCALE_HEIGHT:
	{
		uint32_t height = *(uint32_t*)param;

		SCALE_CHECK_PARAM_ZERO_POINTER(param);

		if (height > SCALE_FRAME_HEIGHT_MAX || (height % SCALE_SLICE_HEIGHT_ALIGNED)) {
			rtn = SCALE_RTN_PARA_ERR;
		} else {
			g_path->slice_height = height;
			REG_MWR(SCALE_SLICE_VER, 0x3FF, height);
		}
		break;
	}

	case SCALE_START:
	{
		rtn = scale_start();
		break;

	}

	case SCALE_CONTINUE:
	{
		rtn = scale_continue();
		break;
	}
	
	case SCALE_STOP:
	{
		rtn = scale_stop();
		break;
	}

	default:
		rtn = SCALE_RTN_IO_ID_ERR;
		break;
	}

	return -rtn;
}
static long img_scale_ioctl(struct file *file,
				unsigned int cmd,
				unsigned long arg)
{
	int                      ret = 0;
	uint8_t                  param[PARAM_SIZE];
	uint32_t                 param_size;
	void                     *data = param;

	param_size = _IOC_SIZE(cmd);
	printk("img_scale_ioctl, io number 0x%x, param_size %d \n",
		_IOC_NR(cmd),
		param_size);

	if (param_size) {
		if (copy_from_user(data, (void*)arg, param_size)) {
			printk("img_scale_ioctl, failed to copy_from_user \n");
			ret = -EFAULT;
			goto exit;
		}
	}

	if (SCALE_IO_IS_DONE == cmd) {
		ret = down_timeout(&(((struct scale_user *)(file->private_data))->sem_done), msecs_to_jiffies(100));
		if (ret) {
			printk("img_scale_ioctl, failed to down, 0x%x \n", ret);
			ret = -ERESTARTSYS;
			goto exit;
		} else {
			if (frm_rtn.type) {
				SCALE_TRACE("abnormal scale done \n");
				ret = -1;
				goto exit;
			}
			if (copy_to_user((void*)arg, &frm_rtn, sizeof(struct scale_frame))) {
				printk("img_scale_ioctl, failed to copy_to_user \n");
				ret = -EFAULT;
				goto exit;
			}
		}
	} else {
		if (cur_task_pid == INVALID_USER_ID)
		{
			mutex_lock(&scale_param_cfg_mutex);
			cur_task_pid = ((struct scale_user *)(file->private_data))->pid;
		}else if (cur_task_pid != ((struct scale_user *)(file->private_data))->pid){
			mutex_lock(&scale_param_cfg_mutex);
		}

		ret = scale_cfg(_IOC_NR(cmd), data);

		if (SCALE_IO_STOP == cmd) {
			cur_task_pid = INVALID_USER_ID;
			mutex_unlock(&scale_param_cfg_mutex);
		}

	}

exit:
	if (ret) {
		SCALE_TRACE("img_scale_ioctl, error code 0x%x \n", ret);
	}
	return ret;

}