/* msm_sensor_setting */
int32_t yacd5c1sbdbc_sensor_setting(struct msm_sensor_ctrl_t *s_ctrl,
			int update_type, int res)
{
	int32_t rc = 0;

	v4l2_subdev_notify(&s_ctrl->sensor_v4l2_subdev,
		NOTIFY_ISPIF_STREAM, (void *)ISPIF_STREAM(
		PIX_0, ISPIF_OFF_IMMEDIATELY));

SKYCDBG("%s:[F_PANTECH_CAMERA] %d, %d res=%d\n", __func__, __LINE__,update_type,res);
	
   if (s_ctrl->func_tbl->sensor_stop_stream)
	s_ctrl->func_tbl->sensor_stop_stream(s_ctrl);

	msleep(30);
	if (update_type == MSM_SENSOR_REG_INIT) {
		s_ctrl->curr_csi_params = NULL;
		g_preview_fps= 0;
		msm_sensor_enable_debugfs(s_ctrl);
#ifdef CONFIG_PANTECH_CAMERA_TUNER 
        SKYCDBG("[CONFIG_PANTECH_CAMERA_TUNER]%s PASS init_setting\n ",__func__);
        tuner_init_check = 1;
#else
        yacd5c1sbdbc_sensor_write_init_settings(s_ctrl);
#endif

    } else if (update_type == MSM_SENSOR_UPDATE_PERIODIC) {

#ifdef CONFIG_PANTECH_CAMERA_TUNER
        if (tuner_init_check == 1) {
            SKYCDBG("[CONFIG_PANTECH_CAMERA_TUNER]%s init_setting\n ",__func__);
            yacd5c1sbdbc_sensor_write_init_settings(s_ctrl);
            tuner_init_check = 0;
        }

        SKYCDBG("[CONFIG_PANTECH_CAMERA_TUNER]%s res=%d res_setting\n ",__func__,res);
        yacd5c1sbdbc_sensor_write_res_settings(s_ctrl, res);
#else
        yacd5c1sbdbc_sensor_write_res_settings(s_ctrl, res);
#endif

#ifdef CONFIG_PANTECH_CAMERA_YACD5C1SBDBC// for VTS
        //if (strcmp(s_ctrl->sensordata->sensor_name, "yacd5c1sbdbc"))
        if((s_ctrl->sensor_id_info->sensor_id == YACD5C1SBDBC_ID) &&
            (preview_24fps_for_motion_detect_check == 1)){

            //printk("[CONFIG_PANTECH_CAMERA for VTS] msleep(133)==>\n");
            //msleep(133);
        
            SKYCDBG("[CONFIG_PANTECH_CAMERA for VTS]preview_24fps_for_motion_detect_cfg_settings\n ");

            rc = msm_camera_i2c_write_tbl(
                s_ctrl->sensor_i2c_client,
                s_ctrl->msm_sensor_reg->preview_24fps_for_motion_detect_cfg_settings[0],
                s_ctrl->msm_sensor_reg->preview_24fps_for_motion_detect_cfg_settings_size,
                s_ctrl->msm_sensor_reg->default_data_type);

            preview_24fps_for_motion_detect_check = 0;
        	if (rc < 0)
        	{
        		SKYCERR("ERR:%s 24fps FAIL!!! rc=%d \n", __func__, rc);
        		return rc;
        	}            
        }
#if 0// for camif err
	 else  {
	 	if(res==1) yacd5c1sbdbc_sensor_set_preview_fps(s_ctrl , g_preview_fps);
	 }
#endif
#endif
		if (s_ctrl->curr_csi_params != s_ctrl->csi_params[res]) {
SKYCDBG("%s:[F_PANTECH_CAMERA] ==> MIPI setting  E %d\n", __func__, update_type);
			s_ctrl->curr_csi_params = s_ctrl->csi_params[res];
			v4l2_subdev_notify(&s_ctrl->sensor_v4l2_subdev,
				NOTIFY_CSID_CFG,
				&s_ctrl->curr_csi_params->csid_params);
			v4l2_subdev_notify(&s_ctrl->sensor_v4l2_subdev,
						NOTIFY_CID_CHANGE, NULL);
			mb();
			v4l2_subdev_notify(&s_ctrl->sensor_v4l2_subdev,
				NOTIFY_CSIPHY_CFG,
				&s_ctrl->curr_csi_params->csiphy_params);
			mb();
			msleep(20);
SKYCDBG("%s:[F_PANTECH_CAMERA] ==> MIPI setting  X %d\n", __func__, update_type);			
		}

		v4l2_subdev_notify(&s_ctrl->sensor_v4l2_subdev,
			NOTIFY_PCLK_CHANGE, &s_ctrl->msm_sensor_reg->
			output_settings[res].op_pixel_clk);
		v4l2_subdev_notify(&s_ctrl->sensor_v4l2_subdev,
			NOTIFY_ISPIF_STREAM, (void *)ISPIF_STREAM(
			PIX_0, ISPIF_ON_FRAME_BOUNDARY));

        if (s_ctrl->func_tbl->sensor_start_stream)
		s_ctrl->func_tbl->sensor_start_stream(s_ctrl);

#ifdef T_OSCAR
		msleep(300);	//msleep(150); //msleep(30);
#else
		msleep(200);//msleep(150); //msleep(30);
#endif

	}
	SKYCDBG("%s: %d x\n", __func__, __LINE__);
	return rc;
}
Exemplo n.º 2
0
static irqreturn_t
cuda_interrupt(int irq, void *arg)
{
    int status;
    struct adb_request *req = NULL;
    unsigned char ibuf[16];
    int ibuf_len = 0;
    int complete = 0;
    
    spin_lock(&cuda_lock);

    /* On powermacs, this handler is registered for the VIA IRQ. But they use
     * just the shift register IRQ -- other VIA interrupt sources are disabled.
     * On m68k macs, the VIA IRQ sources are dispatched individually. Unless
     * we are polling, the shift register IRQ flag has already been cleared.
     */

#ifdef CONFIG_MAC
    if (!arg)
#endif
    {
        if ((in_8(&via[IFR]) & SR_INT) == 0) {
            spin_unlock(&cuda_lock);
            return IRQ_NONE;
        } else {
            out_8(&via[IFR], SR_INT);
        }
    }
    
    status = (~in_8(&via[B]) & (TIP|TREQ)) | (in_8(&via[ACR]) & SR_OUT);
    /* printk("cuda_interrupt: state=%d status=%x\n", cuda_state, status); */
    switch (cuda_state) {
    case idle:
	/* CUDA has sent us the first byte of data - unsolicited */
	if (status != TREQ)
	    printk("cuda: state=idle, status=%x\n", status);
	(void)in_8(&via[SR]);
	out_8(&via[B], in_8(&via[B]) & ~TIP);
	cuda_state = reading;
	reply_ptr = cuda_rbuf;
	reading_reply = 0;
	break;

    case awaiting_reply:
	/* CUDA has sent us the first byte of data of a reply */
	if (status != TREQ)
	    printk("cuda: state=awaiting_reply, status=%x\n", status);
	(void)in_8(&via[SR]);
	out_8(&via[B], in_8(&via[B]) & ~TIP);
	cuda_state = reading;
	reply_ptr = current_req->reply;
	reading_reply = 1;
	break;

    case sent_first_byte:
	if (status == TREQ + TIP + SR_OUT) {
	    /* collision */
	    out_8(&via[ACR], in_8(&via[ACR]) & ~SR_OUT);
	    (void)in_8(&via[SR]);
	    out_8(&via[B], in_8(&via[B]) | TIP | TACK);
	    cuda_state = idle;
	} else {
	    /* assert status == TIP + SR_OUT */
	    if (status != TIP + SR_OUT)
		printk("cuda: state=sent_first_byte status=%x\n", status);
	    out_8(&via[SR], current_req->data[1]);
	    out_8(&via[B], in_8(&via[B]) ^ TACK);
	    data_index = 2;
	    cuda_state = sending;
	}
	break;

    case sending:
	req = current_req;
	if (data_index >= req->nbytes) {
	    out_8(&via[ACR], in_8(&via[ACR]) & ~SR_OUT);
	    (void)in_8(&via[SR]);
	    out_8(&via[B], in_8(&via[B]) | TACK | TIP);
	    req->sent = 1;
	    if (req->reply_expected) {
		cuda_state = awaiting_reply;
	    } else {
		current_req = req->next;
		complete = 1;
		/* not sure about this */
		cuda_state = idle;
		cuda_start();
	    }
	} else {
	    out_8(&via[SR], req->data[data_index++]);
	    out_8(&via[B], in_8(&via[B]) ^ TACK);
	}
	break;

    case reading:
	*reply_ptr++ = in_8(&via[SR]);
	if (status == TIP) {
	    /* that's all folks */
	    out_8(&via[B], in_8(&via[B]) | TACK | TIP);
	    cuda_state = read_done;
	} else {
	    /* assert status == TIP | TREQ */
	    if (status != TIP + TREQ)
		printk("cuda: state=reading status=%x\n", status);
	    out_8(&via[B], in_8(&via[B]) ^ TACK);
	}
	break;

    case read_done:
	(void)in_8(&via[SR]);
	if (reading_reply) {
	    req = current_req;
	    req->reply_len = reply_ptr - req->reply;
	    if (req->data[0] == ADB_PACKET) {
		/* Have to adjust the reply from ADB commands */
		if (req->reply_len <= 2 || (req->reply[1] & 2) != 0) {
		    /* the 0x2 bit indicates no response */
		    req->reply_len = 0;
		} else {
		    /* leave just the command and result bytes in the reply */
		    req->reply_len -= 2;
		    memmove(req->reply, req->reply + 2, req->reply_len);
		}
	    }
	    current_req = req->next;
	    complete = 1;
	} else {
	    /* This is tricky. We must break the spinlock to call
	     * cuda_input. However, doing so means we might get
	     * re-entered from another CPU getting an interrupt
	     * or calling cuda_poll(). I ended up using the stack
	     * (it's only for 16 bytes) and moving the actual
	     * call to cuda_input to outside of the lock.
	     */
	    ibuf_len = reply_ptr - cuda_rbuf;
	    memcpy(ibuf, cuda_rbuf, ibuf_len);
	}
	if (status == TREQ) {
	    out_8(&via[B], in_8(&via[B]) & ~TIP);
	    cuda_state = reading;
	    reply_ptr = cuda_rbuf;
	    reading_reply = 0;
	} else {
	    cuda_state = idle;
	    cuda_start();
	}
	break;

    default:
	printk("cuda_interrupt: unknown cuda_state %d?\n", cuda_state);
    }
    spin_unlock(&cuda_lock);
    if (complete && req) {
    	void (*done)(struct adb_request *) = req->done;
    	mb();
    	req->complete = 1;
    	/* Here, we assume that if the request has a done member, the
    	 * struct request will survive to setting req->complete to 1
    	 */
    	if (done)
		(*done)(req);
    }
    if (ibuf_len)
	cuda_input(ibuf, ibuf_len);
    return IRQ_HANDLED;
}
Exemplo n.º 3
0
static int hdmi_pll_enable(struct clk_hw *hw)
{
	struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
	struct hdmi *hdmi = phy_8960->hdmi;
	int timeout_count, pll_lock_retry = 10;
	unsigned int val;

	DBG("");

	/* Assert PLL S/W reset */
	hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d);
	hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0, 0x10);
	hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1, 0x1a);

	/* Wait for a short time before de-asserting
	 * to allow the hardware to complete its job.
	 * This much of delay should be fine for hardware
	 * to assert and de-assert.
	 */
	udelay(10);

	/* De-assert PLL S/W reset */
	hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d);

	val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12);
	val |= HDMI_8960_PHY_REG12_SW_RESET;
	/* Assert PHY S/W reset */
	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
	val &= ~HDMI_8960_PHY_REG12_SW_RESET;
	/* Wait for a short time before de-asserting
	   to allow the hardware to complete its job.
	   This much of delay should be fine for hardware
	   to assert and de-assert. */
	udelay(10);
	/* De-assert PHY S/W reset */
	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2,  0x3f);

	val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12);
	val |= HDMI_8960_PHY_REG12_PWRDN_B;
	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
	/* Wait 10 us for enabling global power for PHY */
	mb();
	udelay(10);

	val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B);
	val |= HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B;
	val &= ~HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL;
	hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B, val);
	hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x80);

	timeout_count = 1000;
	while (--pll_lock_retry > 0) {

		/* are we there yet? */
		val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_STATUS0);
		if (val & HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK)
			break;

		udelay(1);

		if (--timeout_count > 0)
			continue;

		/*
		 * PLL has still not locked.
		 * Do a software reset and try again
		 * Assert PLL S/W reset first
		 */
		hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d);
		udelay(10);
		hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d);

		/*
		 * Wait for a short duration for the PLL calibration
		 * before checking if the PLL gets locked
		 */
		udelay(350);

		timeout_count = 1000;
	}

	return 0;
}
/* 3D side by side */
void mdp4_dsi_video_3d_sbys(struct msm_fb_data_type *mfd,
				struct msmfb_overlay_3d *r3d)
{
	struct fb_info *fbi;
	unsigned int buf_offset;
	int bpp;
	uint8 *buf = NULL;
	int cndx = 0;
	struct vsycn_ctrl *vctrl;
	struct mdp4_overlay_pipe *pipe;

	vctrl = &vsync_ctrl_db[cndx];
	pipe = vctrl->base_pipe;

	if (vctrl->base_pipe == NULL)
		return;

	pipe = vctrl->base_pipe;
	pipe->is_3d = r3d->is_3d;
	pipe->src_height_3d = r3d->height;
	pipe->src_width_3d = r3d->width;

	if (pipe->is_3d)
		mdp4_overlay_panel_3d(pipe->mixer_num, MDP4_3D_SIDE_BY_SIDE);
	else
		mdp4_overlay_panel_3d(pipe->mixer_num, MDP4_3D_NONE);

	fbi = mfd->fbi;

	bpp = fbi->var.bits_per_pixel / 8;
	buf = (uint8 *) fbi->fix.smem_start;
	buf_offset = calc_fb_offset(mfd, fbi, bpp);

	if (pipe->is_3d) {
		pipe->src_height = pipe->src_height_3d;
		pipe->src_width = pipe->src_width_3d;
		pipe->src_h = pipe->src_height_3d;
		pipe->src_w = pipe->src_width_3d;
		pipe->dst_h = pipe->src_height_3d;
		pipe->dst_w = pipe->src_width_3d;
		pipe->srcp0_ystride = msm_fb_line_length(0,
					pipe->src_width, bpp);
	} else {
		 /* 2D */
		pipe->src_height = fbi->var.yres;
		pipe->src_width = fbi->var.xres;
		pipe->src_h = fbi->var.yres;
		pipe->src_w = fbi->var.xres;
		pipe->dst_h = fbi->var.yres;
		pipe->dst_w = fbi->var.xres;
		pipe->srcp0_ystride = fbi->fix.line_length;
	}

	pipe->src_y = 0;
	pipe->src_x = 0;
	pipe->dst_y = 0;
	pipe->dst_x = 0;

	if (mfd->map_buffer) {
		pipe->srcp0_addr = (unsigned int)mfd->map_buffer->iova[0] + \
			buf_offset;
		pr_debug("start 0x%lx srcp0_addr 0x%x\n", mfd->
			map_buffer->iova[0], pipe->srcp0_addr);
	} else {
		pipe->srcp0_addr = (uint32)(buf + buf_offset);
	}

	mdp4_overlay_rgb_setup(pipe);

	mdp4_overlayproc_cfg(pipe);

	mdp4_overlay_dmap_xy(pipe);

	mdp4_overlay_dmap_cfg(mfd, 1);

	mdp4_overlay_reg_flush(pipe, 1);

	mdp4_mixer_stage_up(pipe, 0);

	mdp4_mixer_stage_commit(pipe->mixer_num);

	mb();
}
int mdp4_dtv_pipe_commit(int cndx, int wait)
{

	int  i, undx;
	int mixer = 0;
	struct vsycn_ctrl *vctrl;
	struct vsync_update *vp;
	struct mdp4_overlay_pipe *pipe;
	struct mdp4_overlay_pipe *real_pipe;
	unsigned long flags;
	int cnt = 0;

	vctrl = &vsync_ctrl_db[cndx];
	mutex_lock(&vctrl->update_lock);
	undx =  vctrl->update_ndx;
	vp = &vctrl->vlist[undx];
	pipe = vctrl->base_pipe;
	mixer = pipe->mixer_num;
	mdp4_overlay_iommu_unmap_freelist(mixer);

	if (vp->update_cnt == 0) {
		mutex_unlock(&vctrl->update_lock);
		return 0;
	}

	vctrl->update_ndx++;
	vctrl->update_ndx &= 0x01;
	vp->update_cnt = 0;	/* reset */
	mutex_unlock(&vctrl->update_lock);

	pipe = vp->plist;
	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
		if (pipe->pipe_used) {
			cnt++;
			real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
			if (real_pipe && real_pipe->pipe_used) {
				/* pipe not unset */
				mdp4_overlay_vsync_commit(pipe);
			}
			/* free previous iommu to freelist
			* which will be freed at next
			* pipe_commit
			*/
			mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
			pipe->pipe_used = 0; /* clear */
		}
	}
	mdp4_mixer_stage_commit(mixer);

	 /* start timing generator & mmu if they are not started yet */
	mdp4_overlay_dtv_start();

	pipe = vctrl->base_pipe;
	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (pipe->ov_blt_addr) {
		mdp4_dtv_blt_ov_update(pipe);
		pipe->blt_ov_done++;
		vsync_irq_enable(INTR_OVERLAY1_DONE, MDP_OVERLAY1_TERM);
		mb();
		pipe->blt_ov_koff++;
		/* kickoff overlay1 engine */
		mdp4_stat.kickoff_ov1++;
		outpdw(MDP_BASE + 0x0008, 0);
	} else {
		/* schedule second phase update  at dmap */
		INIT_COMPLETION(vctrl->dmae_comp);
		vsync_irq_enable(INTR_DMA_E_DONE, MDP_DMA_E_TERM);
	}
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
	mdp4_stat.overlay_commit[pipe->mixer_num]++;

	if (wait)
		mdp4_dtv_wait4dmae(0);

	return cnt;
}
Exemplo n.º 6
0
/*
 *
 * hv_ringbuffer_write()
 *
 * Write to the ring buffer
 *
 */
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
		    struct kvec *kv_list, u32 kv_count, bool *signal)
{
	int i = 0;
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
	u32 totalbytes_towrite = 0;

	u32 next_write_location;
	u32 old_write;
	u64 prev_indices = 0;
	unsigned long flags;

	for (i = 0; i < kv_count; i++)
		totalbytes_towrite += kv_list[i].iov_len;

	totalbytes_towrite += sizeof(u64);

	spin_lock_irqsave(&outring_info->ring_lock, flags);

	hv_get_ringbuffer_availbytes(outring_info,
				&bytes_avail_toread,
				&bytes_avail_towrite);


	/* If there is only room for the packet, assume it is full. */
	/* Otherwise, the next time around, we think the ring buffer */
	/* is empty since the read index == write index */
	if (bytes_avail_towrite <= totalbytes_towrite) {
		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
		return -EAGAIN;
	}

	/* Write to the ring buffer */
	next_write_location = hv_get_next_write_location(outring_info);

	old_write = next_write_location;

	for (i = 0; i < kv_count; i++) {
		next_write_location = hv_copyto_ringbuffer(outring_info,
						     next_write_location,
						     kv_list[i].iov_base,
						     kv_list[i].iov_len);
	}

	/* Set previous packet start */
	prev_indices = hv_get_ring_bufferindices(outring_info);

	next_write_location = hv_copyto_ringbuffer(outring_info,
					     next_write_location,
					     &prev_indices,
					     sizeof(u64));

	/* Issue a full memory barrier before updating the write index */
	mb();

	/* Now, update the write location */
	hv_set_next_write_location(outring_info, next_write_location);


	spin_unlock_irqrestore(&outring_info->ring_lock, flags);

	*signal = hv_need_to_signal(old_write, outring_info);
	return 0;
}
Exemplo n.º 7
0
static inline void
writehscx(void __iomem *adr, int hscx, u_char off, u_char data)
{
	writeb(data, adr + (hscx ? 0x1c0 : 0x180) +
	       ((off & 1) ? 0x1ff : 0) + off); mb();
}
Exemplo n.º 8
0
void __cpuinit synchronise_count_master(int cpu)
{
	int i;
	unsigned long flags;
	unsigned int initcount;

	printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu);

	local_irq_save(flags);

	/*
	 * Notify the slaves that it's time to start
	 */
	atomic_set(&count_reference, read_c0_count());
	atomic_set(&count_start_flag, cpu);
	smp_wmb();

	/* Count will be initialised to current timer for all CPU's */
	initcount = read_c0_count();

	/*
	 * We loop a few times to get a primed instruction cache,
	 * then the last pass is more or less synchronised and
	 * the master and slaves each set their cycle counters to a known
	 * value all at once. This reduces the chance of having random offsets
	 * between the processors, and guarantees that the maximum
	 * delay between the cycle counters is never bigger than
	 * the latency of information-passing (cachelines) between
	 * two CPUs.
	 */

	for (i = 0; i < NR_LOOPS; i++) {
		/* slaves loop on '!= 2' */
		while (atomic_read(&count_count_start) != 1)
			mb();
		atomic_set(&count_count_stop, 0);
		smp_wmb();

		/* this lets the slaves write their count register */
		atomic_inc(&count_count_start);

		/*
		 * Everyone initialises count in the last loop:
		 */
		if (i == NR_LOOPS-1)
			write_c0_count(initcount);

		/*
		 * Wait for all slaves to leave the synchronization point:
		 */
		while (atomic_read(&count_count_stop) != 1)
			mb();
		atomic_set(&count_count_start, 0);
		smp_wmb();
		atomic_inc(&count_count_stop);
	}
	/* Arrange for an interrupt in a short while */
	write_c0_compare(read_c0_count() + COUNTON);
	atomic_set(&count_start_flag, 0);

	local_irq_restore(flags);

	/*
	 * i386 code reported the skew here, but the
	 * count registers were almost certainly out of sync
	 * so no point in alarming people
	 */
	printk("done.\n");
}
Exemplo n.º 9
0
void udbg_scc_init(int force_scc)
{
	const u32 *reg;
	unsigned long addr;
	struct device_node *stdout = NULL, *escc = NULL, *macio = NULL;
	struct device_node *ch, *ch_def = NULL, *ch_a = NULL;
	const char *path;
	int i, x;

	escc = of_find_node_by_name(NULL, "escc");
	if (escc == NULL)
		goto bail;
	macio = of_get_parent(escc);
	if (macio == NULL)
		goto bail;
	path = of_get_property(of_chosen, "linux,stdout-path", NULL);
	if (path != NULL)
		stdout = of_find_node_by_path(path);
	for (ch = NULL; (ch = of_get_next_child(escc, ch)) != NULL;) {
		if (ch == stdout)
			ch_def = of_node_get(ch);
		if (strcmp(ch->name, "ch-a") == 0)
			ch_a = of_node_get(ch);
	}
	if (ch_def == NULL && !force_scc)
		goto bail;

	ch = ch_def ? ch_def : ch_a;

	/* Get address within mac-io ASIC */
	reg = of_get_property(escc, "reg", NULL);
	if (reg == NULL)
		goto bail;
	addr = reg[0];

	/* Get address of mac-io PCI itself */
	reg = of_get_property(macio, "assigned-addresses", NULL);
	if (reg == NULL)
		goto bail;
	addr += reg[2];

	/* Lock the serial port */
	pmac_call_feature(PMAC_FTR_SCC_ENABLE, ch,
			  PMAC_SCC_ASYNC | PMAC_SCC_FLAG_XMON, 1);

	if (ch == ch_a)
		addr += 0x20;
	sccc = ioremap(addr & PAGE_MASK, PAGE_SIZE) ;
	sccc += addr & ~PAGE_MASK;
	sccd = sccc + 0x10;

	mb();

	for (i = 20000; i != 0; --i)
		x = in_8(sccc);
	out_8(sccc, 0x09);		/* reset A or B side */
	out_8(sccc, 0xc0);

	/* If SCC was the OF output port, read the BRG value, else
	 * Setup for 57600 8N1
	 */
	if (ch_def != NULL) {
		out_8(sccc, 13);
		scc_inittab[1] = in_8(sccc);
		out_8(sccc, 12);
		scc_inittab[3] = in_8(sccc);
	}

	for (i = 0; i < sizeof(scc_inittab); ++i)
		out_8(sccc, scc_inittab[i]);


	udbg_putc = udbg_scc_putc;
	udbg_getc = udbg_scc_getc;
	udbg_getc_poll = udbg_scc_getc_poll;

	udbg_puts("Hello World !\n");

 bail:
	of_node_put(macio);
	of_node_put(escc);
	of_node_put(stdout);
	of_node_put(ch_def);
	of_node_put(ch_a);
}
Exemplo n.º 10
0
Arquivo: platsmp.c Projeto: saav/linux
static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned long timeout;
	struct device_node *np;

	np = of_find_matching_node(NULL, clk_ids);
	if (!np)
		return -ENODEV;

	clk_base = of_iomap(np, 0);
	if (!clk_base)
		return -ENOMEM;

	/*
	 * write the address of secondary startup into the clkc register
	 * at offset 0x2bC, then write the magic number 0x3CAF5D62 to the
	 * clkc register at offset 0x2b8, which is what boot rom code is
	 * waiting for. This would wake up the secondary core from WFE
	 */
#define SIRFSOC_CPU1_JUMPADDR_OFFSET 0x2bc
	__raw_writel(virt_to_phys(sirfsoc_secondary_startup),
		clk_base + SIRFSOC_CPU1_JUMPADDR_OFFSET);

#define SIRFSOC_CPU1_WAKEMAGIC_OFFSET 0x2b8
	__raw_writel(0x3CAF5D62,
		clk_base + SIRFSOC_CPU1_WAKEMAGIC_OFFSET);

	/* make sure write buffer is drained */
	mb();

	spin_lock(&boot_lock);

	/*
	 * The secondary processor is waiting to be released from
	 * the holding pen - release it, then wait for it to flag
	 * that it has been released by resetting pen_release.
	 *
	 * Note that "pen_release" is the hardware CPU ID, whereas
	 * "cpu" is Linux's internal ID.
	 */
	pen_release = cpu_logical_map(cpu);
	sync_cache_w(&pen_release);

	/*
	 * Send the secondary CPU SEV, thereby causing the boot monitor to read
	 * the JUMPADDR and WAKEMAGIC, and branch to the address found there.
	 */
	dsb_sev();

	timeout = jiffies + (1 * HZ);
	while (time_before(jiffies, timeout)) {
		smp_rmb();
		if (pen_release == -1)
			break;

		udelay(10);
	}

	/*
	 * now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);

	return pen_release != -1 ? -ENOSYS : 0;
}
Exemplo n.º 11
0
static int software_resume(void)
{
	int error;

	down(&pm_sem);
	if (!swsusp_resume_device) {
		if (!strlen(resume_file)) {
			up(&pm_sem);
			return -ENOENT;
		}
		swsusp_resume_device = name_to_dev_t(resume_file);
		pr_debug("swsusp: Resume From Partition %s\n", resume_file);
	} else {
		pr_debug("swsusp: Resume From Partition %d:%d\n",
			 MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));
	}

	if (noresume) {
		/**
		 * FIXME: If noresume is specified, we need to find the partition
		 * and reset it back to normal swap space.
		 */
		up(&pm_sem);
		return 0;
	}

	pr_debug("PM: Checking swsusp image.\n");

	if ((error = swsusp_check()))
		goto Done;

	pr_debug("PM: Preparing processes for restore.\n");

	if ((error = prepare_processes())) {
		swsusp_close();
		goto Done;
	}

	pr_debug("PM: Reading swsusp image.\n");

	if ((error = swsusp_read())) {
		swsusp_free();
		goto Thaw;
	}

	pr_debug("PM: Preparing devices for restore.\n");

	suspend_console();
	if ((error = device_suspend(PMSG_PRETHAW))) {
		resume_console();
		printk("Some devices failed to suspend\n");
		swsusp_free();
		goto Thaw;
	}

	mb();

	pr_debug("PM: Restoring saved image.\n");
	swsusp_resume();
	pr_debug("PM: Restore failed, recovering.n");
	device_resume();
	resume_console();
 Thaw:
	unprepare_processes();
 Done:
	/* For success case, the suspend path will release the lock */
	up(&pm_sem);
	pr_debug("PM: Resume from disk failed.\n");
	return 0;
}
Exemplo n.º 12
0
static int c4_detect(avmcard *card)
{
	unsigned long stop, dummy;

	c4outmeml(card->mbase+PCI_OUT_INT_MASK, 0x0c);
	if (c4inmeml(card->mbase+PCI_OUT_INT_MASK) != 0x0c)
		return	1;

	c4outmeml(card->mbase+DOORBELL, DBELL_RESET_ARM);

	stop = jiffies + HZ*10;
	while (c4inmeml(card->mbase+DOORBELL) != 0xffffffff) {
		if (!time_before(jiffies, stop))
			return 2;
		c4outmeml(card->mbase+DOORBELL, DBELL_ADDR);
		mb();
	}

	c4_poke(card, DC21285_ARMCSR_BASE + CHAN_1_CONTROL, 0);
	c4_poke(card, DC21285_ARMCSR_BASE + CHAN_2_CONTROL, 0);

	c4outmeml(card->mbase+MAILBOX_0, 0x55aa55aa);
	if (c4inmeml(card->mbase+MAILBOX_0) != 0x55aa55aa) return 3;

	c4outmeml(card->mbase+MAILBOX_0, 0xaa55aa55);
	if (c4inmeml(card->mbase+MAILBOX_0) != 0xaa55aa55) return 4;

	if (c4_poke(card, DC21285_ARMCSR_BASE+DBELL_SA_MASK, 0)) return 5;
	if (c4_poke(card, DC21285_ARMCSR_BASE+DBELL_PCI_MASK, 0)) return 6;
	if (c4_poke(card, DC21285_ARMCSR_BASE+SA_CONTROL, SA_CTL_ALLRIGHT))
		return 7;
	if (c4_poke(card, DC21285_ARMCSR_BASE+XBUS_CYCLE, INIT_XBUS_CYCLE))
		return 8;
	if (c4_poke(card, DC21285_ARMCSR_BASE+XBUS_STROBE, INIT_XBUS_STROBE))
		return 8;
	if (c4_poke(card, DC21285_ARMCSR_BASE+DRAM_TIMING, 0)) return 9;

        mdelay(1);

	if (c4_peek(card, DC21285_DRAM_A0MR, &dummy)) return 10;
	if (c4_peek(card, DC21285_DRAM_A1MR, &dummy)) return 11;
	if (c4_peek(card, DC21285_DRAM_A2MR, &dummy)) return 12;
	if (c4_peek(card, DC21285_DRAM_A3MR, &dummy)) return 13;

	if (c4_poke(card, DC21285_DRAM_A0MR+CAS_OFFSET, 0)) return 14;
	if (c4_poke(card, DC21285_DRAM_A1MR+CAS_OFFSET, 0)) return 15;
	if (c4_poke(card, DC21285_DRAM_A2MR+CAS_OFFSET, 0)) return 16;
	if (c4_poke(card, DC21285_DRAM_A3MR+CAS_OFFSET, 0)) return 17;

        mdelay(1);

	if (c4_poke(card, DC21285_ARMCSR_BASE+DRAM_TIMING, DRAM_TIMING_DEF))
		return 18;

	if (c4_poke(card, DC21285_ARMCSR_BASE+DRAM_ADDR_SIZE_0,DRAM_AD_SZ_DEF0))
		return 19;
	if (c4_poke(card, DC21285_ARMCSR_BASE+DRAM_ADDR_SIZE_1,DRAM_AD_SZ_NULL))
		return 20;
	if (c4_poke(card, DC21285_ARMCSR_BASE+DRAM_ADDR_SIZE_2,DRAM_AD_SZ_NULL))
		return 21;
	if (c4_poke(card, DC21285_ARMCSR_BASE+DRAM_ADDR_SIZE_3,DRAM_AD_SZ_NULL))
		return 22;

	/* Transputer test */
	
	if (   c4_poke(card, 0x000000, 0x11111111)
	    || c4_poke(card, 0x400000, 0x22222222)
	    || c4_poke(card, 0x800000, 0x33333333)
	    || c4_poke(card, 0xC00000, 0x44444444))
		return 23;

	if (   c4_peek(card, 0x000000, &dummy) || dummy != 0x11111111
	    || c4_peek(card, 0x400000, &dummy) || dummy != 0x22222222
	    || c4_peek(card, 0x800000, &dummy) || dummy != 0x33333333
	    || c4_peek(card, 0xC00000, &dummy) || dummy != 0x44444444)
		return 24;

	if (   c4_poke(card, 0x000000, 0x55555555)
	    || c4_poke(card, 0x400000, 0x66666666)
	    || c4_poke(card, 0x800000, 0x77777777)
	    || c4_poke(card, 0xC00000, 0x88888888))
		return 25;

	if (   c4_peek(card, 0x000000, &dummy) || dummy != 0x55555555
	    || c4_peek(card, 0x400000, &dummy) || dummy != 0x66666666
	    || c4_peek(card, 0x800000, &dummy) || dummy != 0x77777777
	    || c4_peek(card, 0xC00000, &dummy) || dummy != 0x88888888)
		return 26;

	return 0;
}
Exemplo n.º 13
0
/* Upon return, the <req> array will contain values from the ack page.
 *
 * Note: assumes caller has acquired <msm_rpm_lock>.
 *
 * Return value:
 *   0: success
 *   -ENOSPC: request rejected
 */
static int msm_rpm_set_exclusive_noirq(int ctx,
	uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count)
{
	unsigned int irq = msm_rpm_data.irq_ack;
	unsigned long flags;
	uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx);
	uint32_t ctx_mask_ack = 0;
	uint32_t sel_masks_ack[SEL_MASK_SIZE];
	struct irq_chip *irq_chip, *err_chip;
	int i;

	msm_rpm_request_poll_mode.req = req;
	msm_rpm_request_poll_mode.count = count;
	msm_rpm_request_poll_mode.ctx_mask_ack = &ctx_mask_ack;
	msm_rpm_request_poll_mode.sel_masks_ack = sel_masks_ack;
	msm_rpm_request_poll_mode.done = NULL;

	spin_lock_irqsave(&msm_rpm_irq_lock, flags);
	irq_chip = irq_get_chip(irq);
	if (!irq_chip) {
		spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
		return -ENOSPC;
	}
	irq_chip->irq_mask(irq_get_irq_data(irq));
	err_chip = irq_get_chip(msm_rpm_data.irq_err);
	if (!err_chip) {
		irq_chip->irq_unmask(irq_get_irq_data(irq));
		spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);
		return -ENOSPC;
	}
	err_chip->irq_mask(irq_get_irq_data(msm_rpm_data.irq_err));

	if (msm_rpm_request) {
		msm_rpm_busy_wait_for_request_completion(true);
		BUG_ON(msm_rpm_request);
	}

	msm_rpm_request = &msm_rpm_request_poll_mode;

	for (i = 0; i < count; i++) {
		BUG_ON(target_enum(req[i].id) >= MSM_RPM_ID_LAST);
		msm_rpm_write(MSM_RPM_PAGE_REQ,
				target_enum(req[i].id), req[i].value);
	}

	msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL,
		target_ctrl(MSM_RPM_CTRL_REQ_SEL_0),
		sel_masks, msm_rpm_sel_mask_size);
	msm_rpm_write(MSM_RPM_PAGE_CTRL,
		target_ctrl(MSM_RPM_CTRL_REQ_CTX_0), ctx_mask);

	/* Ensure RPM data is written before sending the interrupt */
	mb();
	msm_rpm_send_req_interrupt();

	msm_rpm_busy_wait_for_request_completion(false);
	BUG_ON(msm_rpm_request);

	err_chip->irq_unmask(irq_get_irq_data(msm_rpm_data.irq_err));
	irq_chip->irq_unmask(irq_get_irq_data(irq));
	spin_unlock_irqrestore(&msm_rpm_irq_lock, flags);

	BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)))
		!= ctx_mask);
	BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack)));

	return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))
		? -ENOSPC : 0;
}
Exemplo n.º 14
0
/*
 * Note: assumes caller has acquired <msm_rpm_irq_lock>.
 *
 * Return value:
 *   0: request acknowledgement
 *   1: notification
 *   2: spurious interrupt
 */
static int msm_rpm_process_ack_interrupt(void)
{
	uint32_t ctx_mask_ack;
	uint32_t sel_masks_ack[SEL_MASK_SIZE] = {0};

	ctx_mask_ack = msm_rpm_read(MSM_RPM_PAGE_CTRL,
			target_ctrl(MSM_RPM_CTRL_ACK_CTX_0));
	msm_rpm_read_contiguous(MSM_RPM_PAGE_CTRL,
		target_ctrl(MSM_RPM_CTRL_ACK_SEL_0),
		sel_masks_ack, msm_rpm_sel_mask_size);

	if (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_NOTIFICATION)) {
		struct msm_rpm_notification *n;
		int i;

		list_for_each_entry(n, &msm_rpm_notifications, list)
			for (i = 0; i < msm_rpm_sel_mask_size; i++)
				if (sel_masks_ack[i] & n->sel_masks[i]) {
					up(&n->sem);
					break;
				}

		msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL,
			target_ctrl(MSM_RPM_CTRL_ACK_SEL_0),
			msm_rpm_sel_mask_size);
		msm_rpm_write(MSM_RPM_PAGE_CTRL,
			target_ctrl(MSM_RPM_CTRL_ACK_CTX_0), 0);
		/* Ensure the write is complete before return */
		mb();

		return 1;
	}

	if (msm_rpm_request) {
		int i;

		*(msm_rpm_request->ctx_mask_ack) = ctx_mask_ack;
		memcpy(msm_rpm_request->sel_masks_ack, sel_masks_ack,
			sizeof(sel_masks_ack));

		for (i = 0; i < msm_rpm_request->count; i++)
			msm_rpm_request->req[i].value =
				msm_rpm_read(MSM_RPM_PAGE_ACK,
				target_enum(msm_rpm_request->req[i].id));

		msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL,
			target_ctrl(MSM_RPM_CTRL_ACK_SEL_0),
			msm_rpm_sel_mask_size);
		msm_rpm_write(MSM_RPM_PAGE_CTRL,
			target_ctrl(MSM_RPM_CTRL_ACK_CTX_0), 0);
		/* Ensure the write is complete before return */
		mb();

		if (msm_rpm_request->done)
			complete_all(msm_rpm_request->done);

		msm_rpm_request = NULL;
		return 0;
	}

	return 2;
}
Exemplo n.º 15
0
void
platform_mp_start_ap(void)
{
	uint32_t physaddr;
	vm_offset_t vaddr;
	uint32_t val;
	uint32_t start_mask;
	u_long cpu_resume_base;
	u_long nb_base;
	u_long cpu_resume_size;
	u_long nb_size;
	bus_addr_t cpu_resume_baddr;
	bus_addr_t nb_baddr;
	int a;

	if (alpine_get_cpu_resume_base(&cpu_resume_base, &cpu_resume_size))
		panic("Couldn't resolve cpu_resume_base address\n");

	if (alpine_get_nb_base(&nb_base, &nb_size))
		panic("Couldn't resolve_nb_base address\n");

	/* Proceed with start addresses for additional CPUs */
	if (bus_space_map(fdtbus_bs_tag, al_devmap_pa + cpu_resume_base,
	    cpu_resume_size, 0, &cpu_resume_baddr))
		panic("Couldn't map CPU-resume area");
	if (bus_space_map(fdtbus_bs_tag, al_devmap_pa + nb_base,
	    nb_size, 0, &nb_baddr))
		panic("Couldn't map NB-service area");

	/* Proceed with start addresses for additional CPUs */
	val = bus_space_read_4(fdtbus_bs_tag, cpu_resume_baddr,
	    AL_CPU_RESUME_WATERMARK_REG);
	if (((val & AL_CPU_RESUME_MAGIC_NUM_MASK) != AL_CPU_RESUME_MAGIC_NUM) ||
	    ((val & AL_CPU_RESUME_MIN_VER_MASK) < AL_CPU_RESUME_MIN_VER)) {
		panic("CPU-resume device is not compatible");
	}

	vaddr = (vm_offset_t)mpentry;
	physaddr = pmap_kextract(vaddr);

	for (a = 1; a < platform_mp_get_core_cnt(); a++) {
		/* Power up the core */
		bus_space_write_4(fdtbus_bs_tag, nb_baddr,
		    AL_NB_CONFIG_STATUS_PWR_CTRL(a), 0);
		mb();

		/* Enable resume */
		val = bus_space_read_4(fdtbus_bs_tag, cpu_resume_baddr,
		    AL_CPU_RESUME_PCPU_FLAGS(a));
		val &= ~AL_CPU_RESUME_FLG_PERCPU_DONT_RESUME;
		bus_space_write_4(fdtbus_bs_tag, cpu_resume_baddr,
		    AL_CPU_RESUME_PCPU_FLAGS(a), val);
		mb();

		/* Set resume physical address */
		bus_space_write_4(fdtbus_bs_tag, cpu_resume_baddr,
		    AL_CPU_RESUME_PCPU_RADDR_REG(a), physaddr);
		mb();
	}

	/* Release cores from reset */
	if (bus_space_map(fdtbus_bs_tag, al_devmap_pa + nb_base,
	    nb_size, 0, &nb_baddr))
		panic("Couldn't map NB-service area");

	start_mask = (1 << platform_mp_get_core_cnt()) - 1;

	/* Release cores from reset */
	val = bus_space_read_4(fdtbus_bs_tag, nb_baddr, AL_NB_INIT_CONTROL);
	val |= start_mask;
	bus_space_write_4(fdtbus_bs_tag, nb_baddr, AL_NB_INIT_CONTROL, val);
	dsb();

	bus_space_unmap(fdtbus_bs_tag, nb_baddr, nb_size);
	bus_space_unmap(fdtbus_bs_tag, cpu_resume_baddr, cpu_resume_size);
}
int mdp4_dtv_pipe_commit(int cndx, int wait)
{

	int  i, undx;
	int mixer = 0;
	struct vsycn_ctrl *vctrl;
	struct vsync_update *vp;
	struct mdp4_overlay_pipe *pipe;
	struct mdp4_overlay_pipe *real_pipe;
	unsigned long flags;
	int cnt = 0;

	vctrl = &vsync_ctrl_db[cndx];
	mutex_lock(&vctrl->update_lock);
	undx =  vctrl->update_ndx;
	vp = &vctrl->vlist[undx];
	pipe = vctrl->base_pipe;
	if (pipe == NULL) {
		pr_err("%s: NO base pipe\n", __func__);
		mutex_unlock(&vctrl->update_lock);
		return 0;
	}
	mixer = pipe->mixer_num;

        mdp_update_pm(vctrl->mfd, vctrl->vsync_time);

/*
* allow stage_commit without pipes queued
* (vp->update_cnt == 0) to unstage pipes after
* overlay_unset
*/

	vctrl->update_ndx++;
	vctrl->update_ndx &= 0x01;
	vp->update_cnt = 0;	/* reset */
	mutex_unlock(&vctrl->update_lock);

	pipe = vp->plist;
	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
		if (pipe->pipe_used) {
			cnt++;
			real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
			if (real_pipe && real_pipe->pipe_used) {
				/* pipe not unset */
				mdp4_overlay_vsync_commit(pipe);
			}
		}
	}
	mdp4_mixer_stage_commit(mixer);

	 /* start timing generator & mmu if they are not started yet */
	mdp4_overlay_dtv_start();

	/*
	 * there has possibility that pipe commit come very close to next vsync
	 * this may cause two consecutive pie_commits happen within same vsync
	 * period which casue iommu page fault when previous iommu buffer
	 * freed. Set ION_IOMMU_UNMAP_DELAYED flag at ion_map_iommu() to
	 * add delay unmap iommu buffer to fix this problem.
	 * Also ion_unmap_iommu() may take as long as 9 ms to free an ion buffer.
	 * therefore mdp4_overlay_iommu_unmap_freelist(mixer) should be called
	 * ater stage_commit() to ensure pipe_commit (up to stage_commit)
	 * is completed within vsync period.
	 */

	/* free previous committed iommu back to pool */
	mdp4_overlay_iommu_unmap_freelist(mixer);

	pipe = vp->plist;
	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
		if (pipe->pipe_used) {
			/* free previous iommu to freelist
			* which will be freed at next
			* pipe_commit
			*/
			mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
			pipe->pipe_used = 0; /* clear */
		}
	}

	pipe = vctrl->base_pipe;
	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (pipe->ov_blt_addr) {
		mdp4_dtv_blt_ov_update(pipe);
		pipe->blt_ov_done++;
		vsync_irq_enable(INTR_OVERLAY1_DONE, MDP_OVERLAY1_TERM);
		mb();
		pipe->blt_ov_koff++;
		/* kickoff overlay1 engine */
		mdp4_stat.kickoff_ov1++;
		outpdw(MDP_BASE + 0x0008, 0);
	} else {
		/* schedule second phase update  at dmap */
		INIT_COMPLETION(vctrl->dmae_comp);
		vsync_irq_enable(INTR_DMA_E_DONE, MDP_DMA_E_TERM);
	}
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
	mdp4_stat.overlay_commit[pipe->mixer_num]++;

	if (wait)
		mdp4_dtv_wait4dmae(0);

	return cnt;
}
Exemplo n.º 17
0
void hv_begin_read(struct hv_ring_buffer_info *rbi)
{
	rbi->ring_buffer->interrupt_mask = 1;
	mb();
}
Exemplo n.º 18
0
static void simple_map_write16(struct map_info *map, u16 datum, unsigned long ofs)
{
	__raw_writew(datum, map->virt + ofs);
	mb();
}
Exemplo n.º 19
0
static inline void
writeisac(void __iomem *adr, u_char off, u_char data)
{
	writeb(data, adr + ((off & 1) ? 0x2ff : 0x100) + off); mb();
}
Exemplo n.º 20
0
static void simple_map_write32(struct map_info *map, u32 datum, unsigned long ofs)
{
	__raw_writel(datum, map->virt + ofs);
	mb();
}
int mdp4_dsi_video_pipe_commit(int cndx, int wait)
{

	int  i, undx;
	int mixer = 0;
	struct vsycn_ctrl *vctrl;
	struct vsync_update *vp;
	struct mdp4_overlay_pipe *pipe;
	struct mdp4_overlay_pipe *real_pipe;
	unsigned long flags;
	int cnt = 0;

	vctrl = &vsync_ctrl_db[cndx];

	mutex_lock(&vctrl->update_lock);
	undx =  vctrl->update_ndx;
	vp = &vctrl->vlist[undx];
	pipe = vctrl->base_pipe;
	mixer = pipe->mixer_num;

	/*
	 * allow stage_commit without pipes queued
	 * (vp->update_cnt == 0) to unstage pipes after
	 * overlay_unset
	 */

	vctrl->update_ndx++;
	vctrl->update_ndx &= 0x01;
	vp->update_cnt = 0;     /* reset */
	if (vctrl->blt_free) {
		vctrl->blt_free--;
		if (vctrl->blt_free == 0)
			mdp4_free_writeback_buf(vctrl->mfd, mixer);
	}
	mutex_unlock(&vctrl->update_lock);

	/* free previous committed iommu back to pool */
	mdp4_overlay_iommu_unmap_freelist(mixer);

	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (vctrl->ov_koff != vctrl->ov_done) {
		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
		pr_err("%s: Error, frame dropped %d %d\n", __func__,
				vctrl->ov_koff, vctrl->ov_done);
		return 0;
	}
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);

	mdp4_overlay_mdp_perf_upd(vctrl->mfd, 1);

	if (vctrl->blt_change) {
		pipe = vctrl->base_pipe;
		spin_lock_irqsave(&vctrl->spin_lock, flags);
		INIT_COMPLETION(vctrl->dmap_comp);
		INIT_COMPLETION(vctrl->ov_comp);
		vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
		mdp4_dsi_video_wait4dmap(0);
		if (pipe->ov_blt_addr)
			mdp4_dsi_video_wait4ov(0);
	}

	pipe = vp->plist;

	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
		if (pipe->pipe_used) {
			cnt++;
			real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
			if (real_pipe && real_pipe->pipe_used) {
				/* pipe not unset */
				mdp4_overlay_vsync_commit(pipe);
			}
			/* free previous iommu to freelist
			* which will be freed at next
			* pipe_commit
			*/
			mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
			pipe->pipe_used = 0; /* clear */
		}
	}

	mdp4_mixer_stage_commit(mixer);

	/* start timing generator & mmu if they are not started yet */
	mdp4_overlay_dsi_video_start();

	pipe = vctrl->base_pipe;
	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (pipe->ov_blt_addr) {
		mdp4_dsi_video_blt_ov_update(pipe);
		pipe->ov_cnt++;
		INIT_COMPLETION(vctrl->ov_comp);
		vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
		mb();
		vctrl->ov_koff++;
		/* kickoff overlay engine */
		mdp4_stat.kickoff_ov0++;
		outpdw(MDP_BASE + 0x0004, 0);
	} else {
		/* schedule second phase update  at dmap */
		INIT_COMPLETION(vctrl->dmap_comp);
		vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
	}
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);

	mdp4_stat.overlay_commit[pipe->mixer_num]++;

	if (wait) {
		if (pipe->ov_blt_addr)
			mdp4_dsi_video_wait4ov(cndx);
		else
			mdp4_dsi_video_wait4dmap(cndx);
	}

	return cnt;
}
Exemplo n.º 22
0
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
	uint32_t max;
	uint32_t min;
	uint32_t dummy;

	fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
	fifo->static_buffer = vmalloc(fifo->static_buffer_size);
	if (unlikely(fifo->static_buffer == NULL))
		return -ENOMEM;

	fifo->dynamic_buffer = NULL;
	fifo->reserved_size = 0;
	fifo->using_bounce_buffer = false;

	mutex_init(&fifo->fifo_mutex);
	init_rwsem(&fifo->rwsem);

	/*
	 * Allow mapping the first page read-only to user-space.
	 */

	DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
	DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
	DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));

	mutex_lock(&dev_priv->hw_mutex);
	dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
	dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
	dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
	vmw_write(dev_priv, SVGA_REG_ENABLE, 1);

	min = 4;
	if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
		min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
	min <<= 2;

	if (min < PAGE_SIZE)
		min = PAGE_SIZE;

	iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
	iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
	wmb();
	iowrite32(min,  fifo_mem + SVGA_FIFO_NEXT_CMD);
	iowrite32(min,  fifo_mem + SVGA_FIFO_STOP);
	iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
	mb();

	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
	mutex_unlock(&dev_priv->hw_mutex);

	max = ioread32(fifo_mem + SVGA_FIFO_MAX);
	min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
	fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);

	DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
		 (unsigned int) max,
		 (unsigned int) min,
		 (unsigned int) fifo->capabilities);

	atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
	iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
	vmw_marker_queue_init(&fifo->marker_queue);
	return vmw_fifo_send_fence(dev_priv, &dummy);
}
Exemplo n.º 23
0
void _mali_osk_mem_barrier( void )
{
    mb();
}
Exemplo n.º 24
0
int mdp4_dsi_video_pipe_commit(int cndx, int wait)
{

	int  i, undx;
	int mixer = 0;
	struct vsycn_ctrl *vctrl;
	struct vsync_update *vp;
	struct mdp4_overlay_pipe *pipe;
	struct mdp4_overlay_pipe *real_pipe;
	unsigned long flags;
	int cnt = 0;
#ifdef MDP_ODD_RESOLUTION_CTRL
	int current_pipe_ndx = 0;
#endif

	vctrl = &vsync_ctrl_db[cndx];

	mutex_lock(&vctrl->update_lock);
	undx =  vctrl->update_ndx;
	vp = &vctrl->vlist[undx];
	pipe = vctrl->base_pipe;
	if (pipe == NULL) {
		pr_err("%s: NO base pipe\n", __func__);
		mutex_unlock(&vctrl->update_lock);
		return 0;
	}

	mixer = pipe->mixer_num;

	mdp_update_pm(vctrl->mfd, vctrl->vsync_time);

	/*
	* allow stage_commit without pipes queued
	* (vp->update_cnt == 0) to unstage pipes after
	* overlay_unset
	*/

	vctrl->update_ndx++;
	vctrl->update_ndx &= 0x01;
	vp->update_cnt = 0;     /* reset */
	if (vctrl->blt_free) {
		vctrl->blt_free--;
		if (vctrl->blt_free == 0)
			mdp4_free_writeback_buf(vctrl->mfd, mixer);
	}
	mutex_unlock(&vctrl->update_lock);


	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (vctrl->ov_koff != vctrl->ov_done) {
		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
		pr_err("%s: Error, frame dropped %d %d\n", __func__,
				vctrl->ov_koff, vctrl->ov_done);
		return 0;
	}
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);

	pipe = vp->plist;

	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
		if (pipe->pipe_used) {
			cnt++;
			real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
			if (real_pipe && real_pipe->pipe_used) {
				/*
				 * commit pipes which are in pending queue
				 * and not be unset yet
				 */
				mdp4_overlay_vsync_commit(pipe);
			}
 		}
	}

	mdp4_mixer_stage_commit(mixer);

	/* start timing generator & mmu if they are not started yet */
	mdp4_overlay_dsi_video_start();

	/*
	 * there has possibility that pipe commit come very close to next vsync
	 * this may cause two consecutive pie_commits happen within same vsync
	 * period which casue iommu page fault when previous iommu buffer
	 * freed. Set ION_IOMMU_UNMAP_DELAYED flag at ion_map_iommu() to
	 * add delay unmap iommu buffer to fix this problem.
	 * Also ion_unmap_iommu() may take as long as 9 ms to free an ion buffer.
	 * therefore mdp4_overlay_iommu_unmap_freelist(mixer) should be called
	 * ater stage_commit() to ensure pipe_commit (up to stage_commit)
	 * is completed within vsync period.
	 */

	/* free previous committed iommu back to pool */
	mdp4_overlay_iommu_unmap_freelist(mixer);

	pipe = vp->plist;
	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
		if (pipe->pipe_used) {

		
		/* free previous iommu to freelist
		* which will be freed at next
		* pipe_commit
		*/
		mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
		pipe->pipe_used = 0; /* clear */
		}
	}

	pipe = vctrl->base_pipe;
	spin_lock_irqsave(&vctrl->spin_lock, flags);
	if (pipe->ov_blt_addr) {
		mdp4_dsi_video_blt_ov_update(pipe);
		pipe->ov_cnt++;
		INIT_COMPLETION(vctrl->ov_comp);
		vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
		mb();
		vctrl->ov_koff++;
		/* kickoff overlay engine */
		mdp4_stat.kickoff_ov0++;
		outpdw(MDP_BASE + 0x0004, 0);
	}
	spin_unlock_irqrestore(&vctrl->spin_lock, flags);

	mdp4_stat.overlay_commit[pipe->mixer_num]++;

	if (wait) {
		if (pipe->ov_blt_addr)
			mdp4_dsi_video_wait4ov(0);
		else
			mdp4_dsi_video_wait4vsync(0);
	}
#ifdef MDP_ODD_RESOLUTION_CTRL
	current_pipe_ndx = pipe->pipe_ndx;
	for (i = current_pipe_ndx ; i >= 0; i--, pipe--) {
		pipe->check_odd_res = 0;
	}
#endif

	return cnt;
}
/*
 * This wrapper function around hv_flush_remote() does several things:
 *
 *  - Provides a return value error-checking panic path, since
 *    there's never any good reason for hv_flush_remote() to fail.
 *  - Accepts a 32-bit PFN rather than a 64-bit PA, which generally
 *    is the type that Linux wants to pass around anyway.
 *  - Centralizes the mark_caches_evicted() handling.
 *  - Canonicalizes that lengths of zero make cpumasks NULL.
 *  - Handles deferring TLB flushes for dataplane tiles.
 *  - Tracks remote interrupts in the per-cpu irq_cpustat_t.
 *
 * Note that we have to wait until the cache flush completes before
 * updating the per-cpu last_cache_flush word, since otherwise another
 * concurrent flush can race, conclude the flush has already
 * completed, and start to use the page while it's still dirty
 * remotely (running concurrently with the actual evict, presumably).
 */
void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
		  const struct cpumask *cache_cpumask_orig,
		  HV_VirtAddr tlb_va, unsigned long tlb_length,
		  unsigned long tlb_pgsize,
		  const struct cpumask *tlb_cpumask_orig,
		  HV_Remote_ASID *asids, int asidcount)
{
	int rc;
	int timestamp = 0;  /* happy compiler */
	struct cpumask cache_cpumask_copy, tlb_cpumask_copy;
	struct cpumask *cache_cpumask, *tlb_cpumask;
	HV_PhysAddr cache_pa;
	char cache_buf[NR_CPUS*5], tlb_buf[NR_CPUS*5];

	mb();   /* provided just to simplify "magic hypervisor" mode */

	/*
	 * Canonicalize and copy the cpumasks.
	 */
	if (cache_cpumask_orig && cache_control) {
		cpumask_copy(&cache_cpumask_copy, cache_cpumask_orig);
		cache_cpumask = &cache_cpumask_copy;
	} else {
		cpumask_clear(&cache_cpumask_copy);
		cache_cpumask = NULL;
	}
	if (cache_cpumask == NULL)
		cache_control = 0;
	if (tlb_cpumask_orig && tlb_length) {
		cpumask_copy(&tlb_cpumask_copy, tlb_cpumask_orig);
		tlb_cpumask = &tlb_cpumask_copy;
	} else {
		cpumask_clear(&tlb_cpumask_copy);
		tlb_cpumask = NULL;
	}

	hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length,
			asids, asidcount);
	cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT;
	if (cache_control & HV_FLUSH_EVICT_L2)
		timestamp = mark_caches_evicted_start();
	rc = hv_flush_remote(cache_pa, cache_control,
			     cpumask_bits(cache_cpumask),
			     tlb_va, tlb_length, tlb_pgsize,
			     cpumask_bits(tlb_cpumask),
			     asids, asidcount);
	if (cache_control & HV_FLUSH_EVICT_L2)
		mark_caches_evicted_finish(cache_cpumask, timestamp);
	if (rc == 0)
		return;
	cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy);
	cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy);

	pr_err("hv_flush_remote(%#llx, %#lx, %p [%s],"
	       " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
	       cache_pa, cache_control, cache_cpumask, cache_buf,
	       (unsigned long)tlb_va, tlb_length, tlb_pgsize,
	       tlb_cpumask, tlb_buf,
	       asids, asidcount, rc);
	panic("Unsafe to continue.");
}
Exemplo n.º 26
0
static void
WriteISAR(struct IsdnCardState *cs, int mode, u_char offset, u_char value)
{
	writeb(value, cs->hw.isurf.isar + offset);mb();
}
Exemplo n.º 27
0
static __cpuinit void probe_spram(char *type,
	    unsigned int base,
	    unsigned int (*read)(unsigned int),
	    void (*write)(unsigned int, unsigned int))
{
	unsigned int firstsize = 0, lastsize = 0;
	unsigned int firstpa = 0, lastpa = 0, pa = 0;
	unsigned int offset = 0;
	unsigned int size, tag0, tag1;
	unsigned int enabled;
	int i;

	/*
	 * The limit is arbitrary but avoids the loop running away if
	 * the SPRAM tags are implemented differently
	 */

	for (i = 0; i < 8; i++) {
		tag0 = read(offset);
		tag1 = read(offset+SPRAM_TAG_STRIDE);
		pr_debug("DBG %s%d: tag0=%08x tag1=%08x\n",
			 type, i, tag0, tag1);

		size = tag1 & SPRAM_TAG1_SIZE_MASK;

		if (size == 0)
			break;

		if (i != 0) {
			/* tags may repeat... */
			if ((pa == firstpa && size == firstsize) ||
			    (pa == lastpa && size == lastsize))
				break;
		}

		/* Align base with size */
		base = (base + size - 1) & ~(size-1);

		/* reprogram the base address base address and enable */
		tag0 = (base & SPRAM_TAG0_PA_MASK) | SPRAM_TAG0_ENABLE;
		write(offset, tag0);

		base += size;

		/* reread the tag */
		tag0 = read(offset);
		pa = tag0 & SPRAM_TAG0_PA_MASK;
		enabled = tag0 & SPRAM_TAG0_ENABLE;

		if (i == 0) {
			firstpa = pa;
			firstsize = size;
		}

		lastpa = pa;
		lastsize = size;

		if (strcmp(type, "DSPRAM") == 0) {
			unsigned int *vp = (unsigned int *)(CKSEG1 | pa);
			unsigned int v;
#ifdef CONFIG_MIPS_TC3262
			if (!isMT751020) {
#endif
#define TDAT	0x5a5aa5a5
			vp[0] = TDAT;
			vp[1] = ~TDAT;

			mb();

			v = vp[0];
			if (v != TDAT)
				printk(KERN_ERR "vp=%p wrote=%08x got=%08x\n",
				       vp, TDAT, v);
			v = vp[1];
			if (v != ~TDAT)
				printk(KERN_ERR "vp=%p wrote=%08x got=%08x\n",
				       vp+1, ~TDAT, v);
#ifdef CONFIG_MIPS_TC3262
			}
			if (enabled) {
				if(isMT751020){
					dspram_max_size = size;
				}
				else{
				sram_allocp = (char *) vp;
				sram_size = sram_free = size;
				}
			}
#endif
		}

		pr_info("%s%d: PA=%08x,Size=%08x%s\n",
			type, i, pa, size, enabled ? ",enabled" : "");
		offset += 2 * SPRAM_TAG_STRIDE;
	}
}
Exemplo n.º 28
0
static void
pfm_set_irq_mask(unsigned int irq_nr)
{
	int i = irq_nr >> 3, is_slot;
	volatile unsigned char *ctrler;
	unsigned char bit, *ena;
	unsigned char flag, mask;
	unsigned long irq_flags;

	if (irq_nr >= max_irqs)
		return;

	bit = 1U << (irq_nr & 0x7);
	flag = test_bit(irq_nr, ppc_cached_irq_mask)? bit: 0;

//printk(KERN_ERR "irq %d, i %d, bit %02X, flag %02X\n",
//(int)irq_nr, i, bit, flag);

	mask = bit;
	is_slot = 0;
	switch (i) {
	default:
		return;
	case 0:
		ctrler = pfm_irq.via1_ena;
		ena = &via1_ena;
		break;
	case 1:
		ctrler = pfm_irq.via2_ena;
		ena = &via2_ena;
		break;
	case 2:
		ena = &slt1_ena;
		ctrler = pfm_irq.via1_ena;
		mask = VIA1_SLOT;
		is_slot = 1;
		break;
	case 3:
		ena = &slt2_ena;
		ctrler = pfm_irq.via2_ena;
		mask = VIA2_SLOT;
		is_slot = 1;
		break;
	case 4:
		ena = &f108_ena;
		ctrler = pfm_irq.via2_ena;
		mask = VIA2_SLOT;
		is_slot = 1;
		break;
	}

	spin_lock_irqsave(&pfm_lock, irq_flags);
	if (flag) {
		*ena |= bit;
		out_8(ctrler, AMIC_SET | mask);
	} else {
		*ena &= ~bit;
		if (!is_slot)
			out_8(ctrler, AMIC_CLR | mask);
	}
	mb();
	spin_unlock_irqrestore(&pfm_lock, irq_flags);
}
Exemplo n.º 29
0
void boot_write8(struct map_info *map, __u8 d, unsigned long adr)
{
       __raw_writeb(d, map->map_priv_1 + adr);
       mb();
}
static int msm_csiphy_lane_config(struct csiphy_device *csiphy_dev,
	struct msm_camera_csiphy_params *csiphy_params)
{
	int rc = 0;
	int j = 0, curr_lane = 0;
	uint32_t val = 0, clk_rate = 0, round_rate = 0;
	uint8_t lane_cnt = 0;
	uint16_t lane_mask = 0;
	void __iomem *csiphybase;
	uint8_t csiphy_id = csiphy_dev->pdev->id;
	int32_t lane_val = 0, lane_right = 0, num_lanes = 0;
	struct clk **csid_phy_clk_ptr;
	int ratio = 1;

	csiphybase = csiphy_dev->base;
	if (!csiphybase) {
		pr_err("%s: csiphybase NULL\n", __func__);
		return -EINVAL;
	}

	csiphy_dev->lane_mask[csiphy_id] |= csiphy_params->lane_mask;
	lane_mask = csiphy_dev->lane_mask[csiphy_id];
	lane_cnt = csiphy_params->lane_cnt;
	if (csiphy_params->lane_cnt < 1 || csiphy_params->lane_cnt > 4) {
		pr_err("%s: unsupported lane cnt %d\n",
			__func__, csiphy_params->lane_cnt);
		return rc;
	}

	csid_phy_clk_ptr = csiphy_dev->csiphy_clk;
	if (!csid_phy_clk_ptr) {
		pr_err("csiphy_timer_src_clk get failed\n");
		return -EINVAL;
	}

	clk_rate = (csiphy_params->csiphy_clk > 0)
			? csiphy_params->csiphy_clk :
			csiphy_dev->csiphy_max_clk;
	round_rate = clk_round_rate(
			csid_phy_clk_ptr[csiphy_dev->csiphy_clk_index],
			clk_rate);
	if (round_rate >= csiphy_dev->csiphy_max_clk)
		round_rate = csiphy_dev->csiphy_max_clk;
	else {
		ratio = csiphy_dev->csiphy_max_clk/round_rate;
		csiphy_params->settle_cnt = csiphy_params->settle_cnt/ratio;
	}

	CDBG("set from usr csiphy_clk clk_rate = %u round_rate = %u\n",
			clk_rate, round_rate);
	rc = clk_set_rate(
		csid_phy_clk_ptr[csiphy_dev->csiphy_clk_index],
		round_rate);
	if (rc < 0) {
		pr_err("csiphy_timer_src_clk set failed\n");
		return rc;
	}

	CDBG("%s csiphy_params, mask = 0x%x cnt = %d\n",
		__func__,
		csiphy_params->lane_mask,
		csiphy_params->lane_cnt);
	CDBG("%s csiphy_params, settle cnt = 0x%x csid %d\n",
		__func__, csiphy_params->settle_cnt,
		csiphy_params->csid_core);

	if (csiphy_dev->hw_version >= CSIPHY_VERSION_V30) {
		val = msm_camera_io_r(csiphy_dev->clk_mux_base);
		if (csiphy_params->combo_mode &&
			(csiphy_params->lane_mask & 0x18) == 0x18) {
			val &= ~0xf0;
			val |= csiphy_params->csid_core << 4;
		} else {
			val &= ~0xf;
			val |= csiphy_params->csid_core;
		}
		msm_camera_io_w(val, csiphy_dev->clk_mux_base);
		CDBG("%s clk mux addr %p val 0x%x\n", __func__,
			csiphy_dev->clk_mux_base, val);
		mb();
	}
	msm_camera_io_w(0x1, csiphybase + csiphy_dev->ctrl_reg->
		csiphy_reg.mipi_csiphy_glbl_t_init_cfg0_addr);
	msm_camera_io_w(0x1, csiphybase + csiphy_dev->ctrl_reg->
		csiphy_reg.mipi_csiphy_t_wakeup_cfg0_addr);

	if (csiphy_dev->hw_version < CSIPHY_VERSION_V30) {
		val = 0x3;
		msm_camera_io_w((lane_mask << 2) | val,
				csiphybase +
				csiphy_dev->ctrl_reg->
				csiphy_reg.mipi_csiphy_glbl_pwr_cfg_addr);
		msm_camera_io_w(0x10, csiphybase +
			csiphy_dev->ctrl_reg->csiphy_reg.
			mipi_csiphy_lnck_cfg2_addr);
		msm_camera_io_w(csiphy_params->settle_cnt,
			csiphybase +
			csiphy_dev->ctrl_reg->csiphy_reg.
			mipi_csiphy_lnck_cfg3_addr);
		msm_camera_io_w(0x24,
			csiphybase + csiphy_dev->ctrl_reg->
			csiphy_reg.mipi_csiphy_interrupt_mask0_addr);
		msm_camera_io_w(0x24,
			csiphybase + csiphy_dev->ctrl_reg->
			csiphy_reg.mipi_csiphy_interrupt_clear0_addr);
	} else {
		if ((csiphy_dev->hw_version == CSIPHY_VERSION_V31) &&
			csiphy_dev->is_3_1_rev3) {
			msm_camera_io_w(0x01, csiphybase +
				MIPI_CSIPHY_GLBL_PWG_CFG0_OFFSET);
		}
		val = 0x1;
		msm_camera_io_w((lane_mask << 1) | val,
				csiphybase +
				csiphy_dev->ctrl_reg->
				csiphy_reg.mipi_csiphy_glbl_pwr_cfg_addr);
		msm_camera_io_w(csiphy_params->combo_mode <<
			csiphy_dev->ctrl_reg->csiphy_reg.
			mipi_csiphy_mode_config_shift,
			csiphybase +
			csiphy_dev->ctrl_reg->csiphy_reg.
			mipi_csiphy_glbl_reset_addr);
	}

	lane_mask &= 0x1f;
	while (lane_mask & 0x1f) {
		if (!(lane_mask & 0x1)) {
			j++;
			lane_mask >>= 1;
			continue;
		}
		msm_camera_io_w(0x10,
			csiphybase + csiphy_dev->ctrl_reg->csiphy_reg.
			mipi_csiphy_lnn_cfg2_addr + 0x40*j);
		msm_camera_io_w(csiphy_params->settle_cnt,
			csiphybase + csiphy_dev->ctrl_reg->csiphy_reg.
			mipi_csiphy_lnn_cfg3_addr + 0x40*j);
        msm_camera_io_w(csiphy_params->settle_cnt,
            csiphybase + csiphy_dev->ctrl_reg->csiphy_reg.
            mipi_csiphy_lnn_cfg4_addr + 0x40*j);
		msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_reg.
			mipi_csiphy_interrupt_mask_val, csiphybase +
			csiphy_dev->ctrl_reg->csiphy_reg.
			mipi_csiphy_interrupt_mask_addr + 0x4*j);
		msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_reg.
			mipi_csiphy_interrupt_mask_val, csiphybase +
			csiphy_dev->ctrl_reg->csiphy_reg.
			mipi_csiphy_interrupt_clear_addr + 0x4*j);
		if (csiphy_dev->is_3_1_20nm_hw == 1) {
			if (j > CLK_LANE_OFFSET) {
				lane_right = 0x8;
				num_lanes = (lane_cnt - curr_lane)
					<< NUM_LANES_OFFSET;
				if (lane_cnt < curr_lane) {
					pr_err("%s: Lane_cnt is less than curr_lane number\n",
						__func__);
					return -EINVAL;
				}
				lane_val = lane_right|num_lanes;
			} else if (j == 1) {
				lane_val = 0x4;
			}
			if (csiphy_params->combo_mode == 1) {
				/*
				* In the case of combo mode, the clock is always
				* 4th lane for the second sensor.
				* So check whether the sensor is of one lane
				* sensor and curr_lane for 0.
				*/
				if (curr_lane == 0 &&
					((csiphy_params->lane_mask &
						0x18) == 0x18))
					lane_val = 0x4;
			}
			msm_camera_io_w(lane_val, csiphybase +
				csiphy_dev->ctrl_reg->csiphy_reg.
				mipi_csiphy_lnn_misc1_addr + 0x40*j);
			msm_camera_io_w(0x17, csiphybase +
				csiphy_dev->ctrl_reg->csiphy_reg.
				mipi_csiphy_lnn_test_imp + 0x40*j);
			curr_lane++;
		}
		j++;
		lane_mask >>= 1;
	}