int nvhost_flcn_init(struct platform_device *dev)
{
	int err = 0;
	struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
	struct flcn *v = get_flcn(dev);

	nvhost_dbg_fn("in dev:%p v:%p", dev, v);

	v = kzalloc(sizeof(*v), GFP_KERNEL);
	if (!v) {
		dev_err(&dev->dev, "couldn't alloc flcn support");
		err = -ENOMEM;
		goto clean_up;
	}
	set_flcn(dev, v);
	nvhost_dbg_fn("primed dev:%p v:%p", dev, v);

	err = flcn_read_ucode(dev, pdata->firmware_name);
	if (err || !v->valid)
		goto clean_up;

	nvhost_module_busy(dev);
	err = nvhost_flcn_boot(dev);
	nvhost_module_idle(dev);

	return 0;

 clean_up:
	nvhost_err(&dev->dev, "failed");
	return err;
}
Example #2
0
static int msenc_dma_wait_idle(struct platform_device *dev, u32 *timeout)
{
    nvhost_dbg_fn("");

    if (!*timeout)
        *timeout = MSENC_IDLE_TIMEOUT_DEFAULT;

    do {
        u32 check = min_t(u32, MSENC_IDLE_CHECK_PERIOD, *timeout);
        u32 dmatrfcmd = host1x_readl(dev, msenc_dmatrfcmd_r());
        u32 idle_v = msenc_dmatrfcmd_idle_v(dmatrfcmd);

        if (msenc_dmatrfcmd_idle_true_v() == idle_v) {
            nvhost_dbg_fn("done");
            return 0;
        }

        udelay(MSENC_IDLE_CHECK_PERIOD);
        *timeout -= check;
    } while (*timeout);

    dev_err(&dev->dev, "dma idle timeout");

    return -1;
}
static int flcn_read_ucode(struct platform_device *dev, const char *fw_name)
{
	struct flcn *v = get_flcn(dev);
	const struct firmware *ucode_fw;
	int err;
	DEFINE_DMA_ATTRS(attrs);

	nvhost_dbg_fn("");

	v->dma_addr = 0;
	v->mapped = NULL;

	ucode_fw = nvhost_client_request_firmware(dev, fw_name);
	if (!ucode_fw) {
		nvhost_dbg_fn("request firmware failed");
		dev_err(&dev->dev, "failed to get firmware\n");
		err = -ENOENT;
		return err;
	}

	v->size = ucode_fw->size;
	dma_set_attr(DMA_ATTR_READ_ONLY, &attrs);

	v->mapped = dma_alloc_attrs(&dev->dev,
				v->size, &v->dma_addr,
				GFP_KERNEL, &attrs);
	if (!v->mapped) {
		dev_err(&dev->dev, "dma memory allocation failed");
		err = -ENOMEM;
		goto clean_up;
	}

	err = flcn_setup_ucode_image(dev, v->mapped, ucode_fw);
	if (err) {
		dev_err(&dev->dev, "failed to parse firmware image\n");
		goto clean_up;
	}

	v->valid = true;

	release_firmware(ucode_fw);

	return 0;

 clean_up:
	if (v->mapped) {
		dma_free_attrs(&dev->dev,
			v->size, v->mapped,
			v->dma_addr, &attrs);
		v->mapped = NULL;
		v->dma_addr = 0;
	}
	release_firmware(ucode_fw);
	return err;
}
Example #4
0
int nvhost_vic03_init(struct platform_device *dev)
{
	int err = 0;
	struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
	struct vic03 *v = get_vic03(dev);
	char *fw_name;

	nvhost_dbg_fn("in dev:%p v:%p", dev, v);

	fw_name = vic_get_fw_name(dev);
	if (!fw_name) {
		dev_err(&dev->dev, "couldn't determine firmware name");
		return -EINVAL;
	}

	if (!v) {
		nvhost_dbg_fn("allocating vic03 support");
		v = kzalloc(sizeof(*v), GFP_KERNEL);
		if (!v) {
			dev_err(&dev->dev, "couldn't alloc vic03 support");
			err = -ENOMEM;
			goto clean_up;
		}
		set_vic03(dev, v);
		v->is_booted = false;
	}
	nvhost_dbg_fn("primed dev:%p v:%p", dev, v);

	v->host = nvhost_get_host(dev);

	if (!v->ucode.valid)
		err = vic03_read_ucode(dev, fw_name);
	if (err)
		goto clean_up;

	kfree(fw_name);
	fw_name = NULL;

	nvhost_module_busy(dev);
	err = vic03_boot(dev);
	nvhost_module_idle(dev);

	if (pdata->scaling_init)
		nvhost_scale_hw_init(dev);

	return 0;

 clean_up:
	kfree(fw_name);
	nvhost_err(&dev->dev, "failed");

	return err;
}
static int flcn_probe(struct platform_device *dev)
{
	int err;
	struct nvhost_device_data *pdata = NULL;

	if (dev->dev.of_node) {
		const struct of_device_id *match;

		match = of_match_device(tegra_flcn_of_match, &dev->dev);
		if (match)
			pdata = (struct nvhost_device_data *)match->data;
	} else
		pdata = (struct nvhost_device_data *)dev->dev.platform_data;

	if (!pdata) {
		dev_err(&dev->dev, "no platform data\n");
		return -ENODATA;
	}

	nvhost_dbg_fn("dev:%p pdata:%p", dev, pdata);

	pdata->pdev = dev;
	mutex_init(&pdata->lock);
	platform_set_drvdata(dev, pdata);

	err = nvhost_client_device_get_resources(dev);
	if (err)
		return err;

	dev->dev.platform_data = NULL;

	nvhost_module_init(dev);

#ifdef CONFIG_PM_GENERIC_DOMAINS
	pdata->pd.name = kstrdup(dev->name, GFP_KERNEL);
	if (!pdata->pd.name)
		return -ENOMEM;

	err = nvhost_module_add_domain(&pdata->pd, dev);
#endif

	err = nvhost_client_device_init(dev);
	if (err) {
		nvhost_dbg_fn("failed to init client device for %s",
			      dev->name);
		pm_runtime_put(&dev->dev);
		return err;
	}

	return 0;
}
int nvhost_vic_finalize_poweron(struct platform_device *pdev)
{
	struct nvhost_device_data *pdata = nvhost_get_devdata(pdev);
	int err;

	nvhost_dbg_fn("");

	host1x_writel(pdev, flcn_slcg_override_high_a_r(), 0);
	host1x_writel(pdev, flcn_cg_r(),
		     flcn_cg_idle_cg_dly_cnt_f(4) |
		     flcn_cg_idle_cg_en_f(1) |
		     flcn_cg_wakeup_dly_cnt_f(4));

	err = nvhost_flcn_boot(pdev);
	if (err)
		return err;

	if (pdata->scaling_init) {
		err = nvhost_scale_hw_init(pdev);
		if (err)
			dev_warn(&pdev->dev, "failed to initialize scaling (%d)",
				 err);
	}

	return 0;
}
Example #7
0
int nvhost_msenc_init(struct platform_device *dev)
{
    struct nvhost_device_data *pdata = platform_get_drvdata(dev);
    int err = 0;
    struct msenc *m;
    char *fw_name;

    nvhost_dbg_fn("in dev:%p", dev);

    fw_name = msenc_get_fw_name(dev);
    if (!fw_name) {
        dev_err(&dev->dev, "couldn't determine firmware name");
        return -EINVAL;
    }

    m = kzalloc(sizeof(struct msenc), GFP_KERNEL);
    if (!m) {
        dev_err(&dev->dev, "couldn't alloc ucode");
        kfree(fw_name);
        return -ENOMEM;
    }
    set_msenc(dev, m);
    nvhost_dbg_fn("primed dev:%p", dev);

    err = msenc_read_ucode(dev, fw_name);
    kfree(fw_name);
    fw_name = 0;

    if (err || !m->valid) {
        dev_err(&dev->dev, "ucode not valid");
        goto clean_up;
    }

    nvhost_module_busy(dev);
    msenc_boot(dev);
    nvhost_module_idle(dev);

    if (pdata->scaling_init)
        nvhost_scale_hw_init(dev);

    return 0;

clean_up:
    dev_err(&dev->dev, "failed");
    return err;
}
Example #8
0
int gk20a_ctrl_dev_release(struct inode *inode, struct file *filp)
{
	struct platform_device *dev = filp->private_data;

	nvhost_dbg_fn("");

	gk20a_platform_putchannel(dev);
	return 0;
}
Example #9
0
static int vic03_wait_mem_scrubbing(struct platform_device *dev)
{
	int retries = VIC_IDLE_TIMEOUT_DEFAULT / VIC_IDLE_CHECK_PERIOD;
	nvhost_dbg_fn("");

	do {
		u32 w = host1x_readl(dev, flcn_dmactl_r()) &
			(flcn_dmactl_dmem_scrubbing_m() |
			 flcn_dmactl_imem_scrubbing_m());

		if (!w) {
			nvhost_dbg_fn("done");
			return 0;
		}
		udelay(VIC_IDLE_CHECK_PERIOD);
	} while (--retries || !tegra_platform_is_silicon());

	nvhost_err(&dev->dev, "Falcon mem scrubbing timeout");
	return -ETIMEDOUT;
}
Example #10
0
int gk20a_ctrl_dev_open(struct inode *inode, struct file *filp)
{
	int err;
	struct gk20a *g;

	nvhost_dbg_fn("");

	g = container_of(inode->i_cdev,
			 struct gk20a, ctrl.cdev);

	filp->private_data = g->dev;

	err = gk20a_platform_getchannel(g->dev);
	if (err) {
		nvhost_dbg_fn("fail to get channel!");
		return err;
	}

	return 0;
}
Example #11
0
static int msenc_wait_idle(struct platform_device *dev, u32 *timeout)
{
    nvhost_dbg_fn("");

    if (!*timeout)
        *timeout = MSENC_IDLE_TIMEOUT_DEFAULT;

    do {
        u32 check = min_t(u32, MSENC_IDLE_CHECK_PERIOD, *timeout);
        u32 w = host1x_readl(dev, msenc_idlestate_r());

        if (!w) {
            nvhost_dbg_fn("done");
            return 0;
        }
        udelay(MSENC_IDLE_CHECK_PERIOD);
        *timeout -= check;
    } while (*timeout);

    return -1;
}
Example #12
0
	/* note size here is a little over...*/
	fw_name = kzalloc(32, GFP_KERNEL);
	if (!fw_name)
		return NULL;

	decode_vic_ver(pdata->version, &maj, &min);
	sprintf(fw_name, "vic%02d_ucode.bin", maj);
	dev_info(&dev->dev, "fw name:%s\n", fw_name);

	return fw_name;
}

#define VIC_IDLE_TIMEOUT_DEFAULT	10000	/* 10 milliseconds */
#define VIC_IDLE_CHECK_PERIOD	10		/* 10 usec */
static int vic03_flcn_wait_idle(struct platform_device *pdev,
				u32 *timeout)
{
	nvhost_dbg_fn("");

	if (!*timeout)
		*timeout = VIC_IDLE_TIMEOUT_DEFAULT;

	do {
		u32 check = min_t(u32, VIC_IDLE_CHECK_PERIOD, *timeout);
		u32 w = host1x_readl(pdev, flcn_idlestate_r());

		if (!w) {
			nvhost_dbg_fn("done");
			return 0;
		}
		udelay(VIC_IDLE_CHECK_PERIOD);
		*timeout -= check;
	} while (*timeout);

	dev_err(&pdev->dev, "vic03 flcn idle timeout");

	return -1;
}
Example #13
0
static int vic03_flcn_dma_wait_idle(struct platform_device *pdev, u32 *timeout)
{
	nvhost_dbg_fn("");

	if (!*timeout)
		*timeout = VIC_IDLE_TIMEOUT_DEFAULT;

	do {
		u32 check = min_t(u32, VIC_IDLE_CHECK_PERIOD, *timeout);
		u32 dmatrfcmd = host1x_readl(pdev, flcn_dmatrfcmd_r());
		u32 idle_v = flcn_dmatrfcmd_idle_v(dmatrfcmd);

		if (flcn_dmatrfcmd_idle_true_v() == idle_v) {
			nvhost_dbg_fn("done");
			return 0;
		}
		udelay(VIC_IDLE_CHECK_PERIOD);
		*timeout -= check;
	} while (*timeout);

	dev_err(&pdev->dev, "vic03 dma idle timeout");

	return -1;
}
Example #14
0
int gk20a_init_clk_support(struct gk20a *g)
{
	struct clk_gk20a *clk = &g->clk;
	u32 err;

	nvhost_dbg_fn("");

	clk->g = g;

	err = gk20a_init_clk_reset_enable_hw(g);
	if (err)
		return err;

	err = gk20a_init_clk_setup_sw(g);
	if (err)
		return err;

	mutex_lock(&clk->clk_mutex);
	clk->clk_hw_on = true;

	err = gk20a_init_clk_setup_hw(g);
	mutex_unlock(&clk->clk_mutex);
	if (err)
		return err;

	err = gk20a_clk_register_export_ops(g);
	if (err)
		return err;

	/* FIXME: this effectively prevents host level clock gating */
	err = clk_enable(g->clk.tegra_clk);
	if (err)
		return err;

	/* The prev call may not enable PLL if gbus is unbalanced - force it */
	mutex_lock(&clk->clk_mutex);
	err = set_pll_freq(g, clk->gpc_pll.freq, clk->gpc_pll.freq);
	mutex_unlock(&clk->clk_mutex);
	if (err)
		return err;

	gk20a_writel(g,
		timer_pri_timeout_r(),
		timer_pri_timeout_period_f(0x200) |
		timer_pri_timeout_en_m());

	return err;
}
Example #15
0
static int gk20a_init_clk_setup_hw(struct gk20a *g)
{
	u32 data;

	nvhost_dbg_fn("");

	data = gk20a_readl(g, trim_sys_gpc2clk_out_r());
	data = set_field(data,
			trim_sys_gpc2clk_out_sdiv14_m() |
			trim_sys_gpc2clk_out_vcodiv_m() |
			trim_sys_gpc2clk_out_bypdiv_m(),
			trim_sys_gpc2clk_out_sdiv14_indiv4_mode_f() |
			trim_sys_gpc2clk_out_vcodiv_by1_f() |
			trim_sys_gpc2clk_out_bypdiv_f(0));
	gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);

	return 0;
}
int nvhost_vic_prepare_poweroff(struct platform_device *dev)
{
	struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
	struct flcn *v;
	struct nvhost_channel *ch = pdata->channels[0];

	nvhost_dbg_fn("");

	if (pdata->scaling_deinit)
		nvhost_scale_hw_deinit(dev);

	if (ch && ch->dev) {
		mutex_lock(&ch->submitlock);
		ch->cur_ctx = NULL;
		mutex_unlock(&ch->submitlock);
	}

	v = get_flcn(pdata->pdev);

	return 0;
}
Example #17
0
static int set_pll_freq(struct gk20a *g, u32 freq, u32 old_freq)
{
	struct clk_gk20a *clk = &g->clk;
	int err = 0;

	nvhost_dbg_fn("curr freq: %dMHz, target freq %dMHz", old_freq, freq);

	if ((freq == old_freq) && clk->gpc_pll.enabled)
		return 0;

	/* change frequency only if power is on */
	if (g->clk.clk_hw_on) {
		err = clk_program_gpc_pll(g, clk, 1);
		if (err)
			err = clk_program_gpc_pll(g, clk, 0);
	}

	/* Just report error but not restore PLL since dvfs could already change
	    voltage even when it returns error. */
	if (err)
		nvhost_err(dev_from_gk20a(g),
			"failed to set pll to %d", freq);
	return err;
}
Example #18
0
/* Calculate and update M/N/PL as well as pll->freq
    ref_clk_f = clk_in_f / src_div = clk_in_f; (src_div = 1 on gk20a)
    u_f = ref_clk_f / M;
    PLL output = vco_f = u_f * N = ref_clk_f * N / M;
    gpc2clk = target clock frequency = vco_f / PL;
    gpcclk = gpc2clk / 2; */
static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
	struct pll_parms *pll_params, u32 *target_freq, bool best_fit)
{
	u32 min_vco_f, max_vco_f;
	u32 best_M, best_N;
	u32 low_PL, high_PL, best_PL;
	u32 m, n, n2;
	u32 target_vco_f, vco_f;
	u32 ref_clk_f, target_clk_f, u_f;
	u32 delta, lwv, best_delta = ~0;
	int pl;

	BUG_ON(target_freq == NULL);

	nvhost_dbg_fn("request target freq %d MHz", *target_freq);

	ref_clk_f = pll->clk_in;
	target_clk_f = *target_freq;
	max_vco_f = pll_params->max_vco;
	min_vco_f = pll_params->min_vco;
	best_M = pll_params->max_M;
	best_N = pll_params->min_N;
	best_PL = pll_params->min_PL;

	target_vco_f = target_clk_f + target_clk_f / 50;
	if (max_vco_f < target_vco_f)
		max_vco_f = target_vco_f;

	high_PL = (max_vco_f + target_vco_f - 1) / target_vco_f;
	high_PL = min(high_PL, pll_params->max_PL);
	high_PL = max(high_PL, pll_params->min_PL);

	low_PL = min_vco_f / target_vco_f;
	low_PL = min(low_PL, pll_params->max_PL);
	low_PL = max(low_PL, pll_params->min_PL);

	/* Find Indices of high_PL and low_PL */
	for (pl = 0; pl < 14; pl++) {
		if (pl_to_div[pl] >= low_PL) {
			low_PL = pl;
			break;
		}
	}
	for (pl = 0; pl < 14; pl++) {
		if (pl_to_div[pl] >= high_PL) {
			high_PL = pl;
			break;
		}
	}
	nvhost_dbg_info("low_PL %d(div%d), high_PL %d(div%d)",
			low_PL, pl_to_div[low_PL], high_PL, pl_to_div[high_PL]);

	for (pl = low_PL; pl <= high_PL; pl++) {
		target_vco_f = target_clk_f * pl_to_div[pl];

		for (m = pll_params->min_M; m <= pll_params->max_M; m++) {
			u_f = ref_clk_f / m;

			if (u_f < pll_params->min_u)
				break;
			if (u_f > pll_params->max_u)
				continue;

			n = (target_vco_f * m) / ref_clk_f;
			n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f;

			if (n > pll_params->max_N)
				break;

			for (; n <= n2; n++) {
				if (n < pll_params->min_N)
					continue;
				if (n > pll_params->max_N)
					break;

				vco_f = ref_clk_f * n / m;

				if (vco_f >= min_vco_f && vco_f <= max_vco_f) {
					lwv = (vco_f + (pl_to_div[pl] / 2))
						/ pl_to_div[pl];
					delta = abs(lwv - target_clk_f);

					if (delta < best_delta) {
						best_delta = delta;
						best_M = m;
						best_N = n;
						best_PL = pl;

						if (best_delta == 0 ||
						    /* 0.45% for non best fit */
						    (!best_fit && (vco_f / best_delta > 218))) {
							goto found_match;
						}

						nvhost_dbg_info("delta %d @ M %d, N %d, PL %d",
							delta, m, n, pl);
					}
				}
			}
		}
	}

found_match:
	BUG_ON(best_delta == ~0);

	if (best_fit && best_delta != 0)
		nvhost_dbg_clk("no best match for target @ %dMHz on gpc_pll",
			target_clk_f);

	pll->M = best_M;
	pll->N = best_N;
	pll->PL = best_PL;

	/* save current frequency */
	pll->freq = ref_clk_f * pll->N / (pll->M * pl_to_div[pll->PL]);

	*target_freq = pll->freq;

	nvhost_dbg_clk("actual target freq %d MHz, M %d, N %d, PL %d(div%d)",
		*target_freq, pll->M, pll->N, pll->PL, pl_to_div[pll->PL]);

	nvhost_dbg_fn("done");

	return 0;
}
Example #19
0
static void vic03_save_push_hwctx ( struct nvhost_hwctx *ctx, struct nvhost_cdma *cdma)
{
	nvhost_dbg_fn("");
}
Example #20
0
static void vic03_put_hwctx (struct nvhost_hwctx *ctx)
{
	nvhost_dbg_fn("");
	kref_put(&ctx->ref, vic03_free_hwctx);
}
Example #21
0
static void vic03_get_hwctx (struct nvhost_hwctx *ctx)
{
	nvhost_dbg_fn("");
	kref_get(&ctx->ref);
}
Example #22
0
static struct nvhost_hwctx *vic03_alloc_hwctx(struct nvhost_hwctx_handler *h,
		struct nvhost_channel *ch)
{
	struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h);

	struct vic03 *v = get_vic03(ch->dev);
	struct host1x_hwctx *ctx;
	u32 *ptr;
	u32 syncpt = nvhost_get_devdata(ch->dev)->syncpts[0];
	u32 nvhost_vic03_restore_size = 10; /* number of words written below */

	nvhost_dbg_fn("");

	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx)
		return NULL;

	ctx->restore_size = nvhost_vic03_restore_size;

	ctx->cpuva = dma_alloc_writecombine(&ch->dev->dev,
						ctx->restore_size * 4,
						&ctx->iova,
						GFP_KERNEL);
	if (!ctx->cpuva) {
		dev_err(&ch->dev->dev, "memory allocation failed\n");
		goto fail;
	}

	ptr = ctx->cpuva;

	/* set app id, fce ucode size, offset */
	ptr[0] = nvhost_opcode_incr(VIC_UCLASS_METHOD_OFFSET, 2);
	ptr[1] = NVA0B6_VIDEO_COMPOSITOR_SET_APPLICATION_ID  >> 2;
	ptr[2] = 1;

	ptr[3] = nvhost_opcode_incr(VIC_UCLASS_METHOD_OFFSET, 2);
	ptr[4] = NVA0B6_VIDEO_COMPOSITOR_SET_FCE_UCODE_SIZE >> 2;
	ptr[5] = v->ucode.fce.size;

	ptr[6] = nvhost_opcode_incr(VIC_UCLASS_METHOD_OFFSET, 2);
	ptr[7] = NVA0B6_VIDEO_COMPOSITOR_SET_FCE_UCODE_OFFSET >> 2;
	ptr[8] = (v->ucode.dma_addr + v->ucode.fce.data_offset) >> 8;

	/* syncpt increment to track restore gather. */
	ptr[9] = nvhost_opcode_imm_incr_syncpt(
			host1x_uclass_incr_syncpt_cond_op_done_v(),
			syncpt);

	kref_init(&ctx->hwctx.ref);
	ctx->hwctx.h = &p->h;
	ctx->hwctx.channel = ch;
	ctx->hwctx.valid = true; /* this is a preconditioning sequence... */
	ctx->hwctx.save_incrs = 0;
	ctx->hwctx.save_slots = 0;

	ctx->hwctx.restore_incrs = 1;

	return &ctx->hwctx;

 fail:
	kfree(ctx);
	return NULL;
}
Example #23
0
static int clk_program_gpc_pll(struct gk20a *g, struct clk_gk20a *clk,
			int allow_slide)
{
	u32 data, cfg, coeff, timeout;
	u32 m, n, pl;
	u32 nlo;

	nvhost_dbg_fn("");

	/* get old coefficients */
	coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
	m = trim_sys_gpcpll_coeff_mdiv_v(coeff);
	n = trim_sys_gpcpll_coeff_ndiv_v(coeff);
	pl = trim_sys_gpcpll_coeff_pldiv_v(coeff);

	/* do NDIV slide if there is no change in M and PL */
	cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
	if (allow_slide && clk->gpc_pll.M == m && clk->gpc_pll.PL == pl
		&& trim_sys_gpcpll_cfg_enable_v(cfg)) {
		return clk_slide_gpc_pll(g, clk->gpc_pll.N);
	}

	/* slide down to NDIV_LO */
	nlo = DIV_ROUND_UP(m * gpc_pll_params.min_vco, clk->gpc_pll.clk_in);
	if (allow_slide && trim_sys_gpcpll_cfg_enable_v(cfg)) {
		int ret = clk_slide_gpc_pll(g, nlo);
		if (ret)
			return ret;
	}

	/* split FO-to-bypass jump in halfs by setting out divider 1:2 */
	data = gk20a_readl(g, trim_sys_gpc2clk_out_r());
	data = set_field(data, trim_sys_gpc2clk_out_vcodiv_m(),
		trim_sys_gpc2clk_out_vcodiv_f(2));
	gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);

	/* put PLL in bypass before programming it */
	data = gk20a_readl(g, trim_sys_sel_vco_r());
	data = set_field(data, trim_sys_sel_vco_gpc2clk_out_m(),
		trim_sys_sel_vco_gpc2clk_out_bypass_f());
	udelay(2);
	gk20a_writel(g, trim_sys_sel_vco_r(), data);

	/* get out from IDDQ */
	cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
	if (trim_sys_gpcpll_cfg_iddq_v(cfg)) {
		cfg = set_field(cfg, trim_sys_gpcpll_cfg_iddq_m(),
				trim_sys_gpcpll_cfg_iddq_power_on_v());
		gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
		gk20a_readl(g, trim_sys_gpcpll_cfg_r());
		udelay(2);
	}

	/* disable PLL before changing coefficients */
	cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
	cfg = set_field(cfg, trim_sys_gpcpll_cfg_enable_m(),
			trim_sys_gpcpll_cfg_enable_no_f());
	gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
	gk20a_readl(g, trim_sys_gpcpll_cfg_r());

	/* change coefficients */
	nlo = DIV_ROUND_UP(clk->gpc_pll.M * gpc_pll_params.min_vco,
			clk->gpc_pll.clk_in);
	coeff = trim_sys_gpcpll_coeff_mdiv_f(clk->gpc_pll.M) |
		trim_sys_gpcpll_coeff_ndiv_f(allow_slide ?
					     nlo : clk->gpc_pll.N) |
		trim_sys_gpcpll_coeff_pldiv_f(clk->gpc_pll.PL);
	gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff);

	/* enable PLL after changing coefficients */
	cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
	cfg = set_field(cfg, trim_sys_gpcpll_cfg_enable_m(),
			trim_sys_gpcpll_cfg_enable_yes_f());
	gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);

	/* lock pll */
	cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
	if (cfg & trim_sys_gpcpll_cfg_enb_lckdet_power_off_f()){
		cfg = set_field(cfg, trim_sys_gpcpll_cfg_enb_lckdet_m(),
			trim_sys_gpcpll_cfg_enb_lckdet_power_on_f());
		gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
	}

	/* wait pll lock */
	timeout = clk->pll_delay / 2 + 1;
	do {
		cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
		if (cfg & trim_sys_gpcpll_cfg_pll_lock_true_f())
			goto pll_locked;
		udelay(2);
	} while (--timeout > 0);

	/* PLL is messed up. What can we do here? */
	BUG();
	return -EBUSY;

pll_locked:
	/* put PLL back on vco */
	data = gk20a_readl(g, trim_sys_sel_vco_r());
	data = set_field(data, trim_sys_sel_vco_gpc2clk_out_m(),
		trim_sys_sel_vco_gpc2clk_out_vco_f());
	gk20a_writel(g, trim_sys_sel_vco_r(), data);
	clk->gpc_pll.enabled = true;

	/* restore out divider 1:1 */
	data = gk20a_readl(g, trim_sys_gpc2clk_out_r());
	data = set_field(data, trim_sys_gpc2clk_out_vcodiv_m(),
		trim_sys_gpc2clk_out_vcodiv_by1_f());
	udelay(2);
	gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);

	/* slide up to target NDIV */
	return clk_slide_gpc_pll(g, clk->gpc_pll.N);
}
Example #24
0
static int gk20a_init_clk_reset_enable_hw(struct gk20a *g)
{
	nvhost_dbg_fn("");
	return 0;
}
Example #25
0
static int gk20a_init_clk_setup_sw(struct gk20a *g)
{
	struct clk_gk20a *clk = &g->clk;
	static int initialized;
	unsigned long *freqs;
	int err, num_freqs;
	struct clk *ref;
	unsigned long ref_rate;

	nvhost_dbg_fn("");

	if (clk->sw_ready) {
		nvhost_dbg_fn("skip init");
		return 0;
	}

	if (!gk20a_clk_get(g))
		return -EINVAL;

	ref = clk_get_parent(clk_get_parent(clk->tegra_clk));
	if (IS_ERR(ref)) {
		nvhost_err(dev_from_gk20a(g),
			"failed to get GPCPLL reference clock");
		return -EINVAL;
	}
	ref_rate = clk_get_rate(ref);

	clk->pll_delay = 300; /* usec */

	clk->gpc_pll.id = GK20A_GPC_PLL;
	clk->gpc_pll.clk_in = ref_rate / 1000000; /* MHz */

	/* Decide initial frequency */
	if (!initialized) {
		initialized = 1;
		clk->gpc_pll.M = 1;
		clk->gpc_pll.N = DIV_ROUND_UP(gpc_pll_params.min_vco,
					clk->gpc_pll.clk_in);
		clk->gpc_pll.PL = 1;
		clk->gpc_pll.freq = clk->gpc_pll.clk_in * clk->gpc_pll.N;
		clk->gpc_pll.freq /= pl_to_div[clk->gpc_pll.PL];
	}

	err = tegra_dvfs_get_freqs(clk_get_parent(clk->tegra_clk),
				   &freqs, &num_freqs);
	if (!err) {
		int i, j;

		/* init j for inverse traversal of frequencies */
		j = num_freqs - 1;

		gpu_cooling_freq = kzalloc(
				(1 + num_freqs) * sizeof(*gpu_cooling_freq),
				GFP_KERNEL);

		/* store frequencies in inverse order */
		for (i = 0; i < num_freqs; ++i, --j) {
			gpu_cooling_freq[i].index = i;
			gpu_cooling_freq[i].frequency = freqs[j];
		}

		/* add 'end of table' marker */
		gpu_cooling_freq[i].index = i;
		gpu_cooling_freq[i].frequency = GPUFREQ_TABLE_END;

		/* store number of frequencies */
		num_gpu_cooling_freq = num_freqs + 1;
	}

	mutex_init(&clk->clk_mutex);

	clk->sw_ready = true;

	nvhost_dbg_fn("done");
	return 0;
}
static int flcn_setup_ucode_image(struct platform_device *dev,
				   u32 *ucode_ptr,
				   const struct firmware *ucode_fw)
{
	struct flcn *v = get_flcn(dev);
	/* image data is little endian. */
	struct ucode_v1_flcn ucode;
	int w;
	u32 reserved_offset;
	u32 tsec_key_offset;

	nvhost_dbg_fn("");

	/* copy the whole thing taking into account endianness */
	for (w = 0; w < ucode_fw->size/sizeof(u32); w++)
		ucode_ptr[w] = le32_to_cpu(((u32 *)ucode_fw->data)[w]);

	ucode.bin_header = (struct ucode_bin_header_v1_flcn *)ucode_ptr;
	/* endian problems would show up right here */
	if (ucode.bin_header->bin_magic != 0x10de) {
		dev_err(&dev->dev,
			   "failed to get firmware magic");
		return -EINVAL;
	}
	if (ucode.bin_header->bin_ver != 1) {
		dev_err(&dev->dev,
			   "unsupported firmware version");
		return -ENOENT;
	}
	/* shouldn't be bigger than what firmware thinks */
	if (ucode.bin_header->bin_size > ucode_fw->size) {
		dev_err(&dev->dev,
			   "ucode image size inconsistency");
		return -EINVAL;
	}

	nvhost_dbg_info("ucode bin header: magic:0x%x ver:%d size:%d",
			ucode.bin_header->bin_magic,
			ucode.bin_header->bin_ver,
			ucode.bin_header->bin_size);
	nvhost_dbg_info("ucode bin header: os bin (header,data) offset size: 0x%x, 0x%x %d",
			ucode.bin_header->os_bin_header_offset,
			ucode.bin_header->os_bin_data_offset,
			ucode.bin_header->os_bin_size);
	nvhost_dbg_info("ucode bin header: fce bin (header,data) offset size: 0x%x, 0x%x %d",
			ucode.bin_header->fce_bin_header_offset,
			ucode.bin_header->fce_bin_data_offset,
			ucode.bin_header->fce_bin_size);

	ucode.os_header = (struct ucode_os_header_v1_flcn *)
		(((void *)ucode_ptr) + ucode.bin_header->os_bin_header_offset);

	nvhost_dbg_info("os ucode header: os code (offset,size): 0x%x, 0x%x",
			ucode.os_header->os_code_offset,
			ucode.os_header->os_code_size);
	nvhost_dbg_info("os ucode header: os data (offset,size): 0x%x, 0x%x",
			ucode.os_header->os_data_offset,
			ucode.os_header->os_data_size);
	nvhost_dbg_info("os ucode header: num apps: %d", ucode.os_header->num_apps);

	if (ucode.bin_header->fce_bin_header_offset != 0xa5a5a5a5) {
		ucode.fce_header = (struct ucode_fce_header_v1_flcn *)
			(((void *)ucode_ptr) +
			 ucode.bin_header->fce_bin_header_offset);
		nvhost_dbg_info("fce ucode header: offset, buffer_size, size: 0x%x 0x%x 0x%x",
				ucode.fce_header->fce_ucode_offset,
				ucode.fce_header->fce_ucode_buffer_size,
				ucode.fce_header->fce_ucode_size);
		v->fce.size        = ucode.fce_header->fce_ucode_size;
		v->fce.data_offset =
			ucode.bin_header->fce_bin_data_offset;
	}

	/* make space for reserved area - we need 20 bytes, but we move 256
	 * bytes because firmware needs to be 256 byte aligned */
	reserved_offset = ucode.bin_header->os_bin_data_offset;
	memmove(((void *)ucode_ptr) + reserved_offset + FALCON_RESERVE,
			((void *)ucode_ptr) + reserved_offset,
			ucode.bin_header->os_bin_size);
	ucode.bin_header->os_bin_data_offset += FALCON_RESERVE;

	/*  clear 256 bytes before ucode os code */
	memset(((void *)ucode_ptr) + reserved_offset, 0, FALCON_RESERVE);

	/* Copy key to be the 16 bytes before the firmware */
	tsec_key_offset = reserved_offset + TSEC_KEY_OFFSET;
	memcpy(((void *)ucode_ptr) + tsec_key_offset, otf_key, TSEC_KEY_LENGTH);
	v->os.size = ucode.bin_header->os_bin_size;
	v->os.bin_data_offset = ucode.bin_header->os_bin_data_offset;
	v->os.code_offset = ucode.os_header->os_code_offset;
	v->os.data_offset = ucode.os_header->os_data_offset;
	v->os.data_size   = ucode.os_header->os_data_size;

	return 0;
}
Example #27
0
long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	struct platform_device *dev = filp->private_data;
	struct gk20a *g = get_gk20a(dev);
	struct nvhost_gpu_zcull_get_ctx_size_args *get_ctx_size_args;
	struct nvhost_gpu_zcull_get_info_args *get_info_args;
	struct nvhost_gpu_zbc_set_table_args *set_table_args;
	struct nvhost_gpu_zbc_query_table_args *query_table_args;
	u8 buf[NVHOST_GPU_IOCTL_MAX_ARG_SIZE];
	struct gr_zcull_info *zcull_info;
	struct zbc_entry *zbc_val;
	struct zbc_query_params *zbc_tbl;
	int i, err = 0;

	nvhost_dbg_fn("");

	if ((_IOC_TYPE(cmd) != NVHOST_GPU_IOCTL_MAGIC) ||
		(_IOC_NR(cmd) == 0) ||
		(_IOC_NR(cmd) > NVHOST_GPU_IOCTL_LAST))
		return -EFAULT;

	BUG_ON(_IOC_SIZE(cmd) > NVHOST_GPU_IOCTL_MAX_ARG_SIZE);

	if (_IOC_DIR(cmd) & _IOC_WRITE) {
		if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
			return -EFAULT;
	}

	if (!g->gr.sw_ready) {
		gk20a_busy(g->dev);
		gk20a_idle(g->dev);
	}

	switch (cmd) {
	case NVHOST_GPU_IOCTL_ZCULL_GET_CTX_SIZE:
		get_ctx_size_args = (struct nvhost_gpu_zcull_get_ctx_size_args *)buf;

		get_ctx_size_args->size = gr_gk20a_get_ctxsw_zcull_size(g, &g->gr);

		break;
	case NVHOST_GPU_IOCTL_ZCULL_GET_INFO:
		get_info_args = (struct nvhost_gpu_zcull_get_info_args *)buf;

		memset(get_info_args, 0, sizeof(struct nvhost_gpu_zcull_get_info_args));

		zcull_info = kzalloc(sizeof(struct gr_zcull_info), GFP_KERNEL);
		if (zcull_info == NULL)
			return -ENOMEM;

		err = gr_gk20a_get_zcull_info(g, &g->gr, zcull_info);
		if (err)
			break;

		get_info_args->width_align_pixels = zcull_info->width_align_pixels;
		get_info_args->height_align_pixels = zcull_info->height_align_pixels;
		get_info_args->pixel_squares_by_aliquots = zcull_info->pixel_squares_by_aliquots;
		get_info_args->aliquot_total = zcull_info->aliquot_total;
		get_info_args->region_byte_multiplier = zcull_info->region_byte_multiplier;
		get_info_args->region_header_size = zcull_info->region_header_size;
		get_info_args->subregion_header_size = zcull_info->subregion_header_size;
		get_info_args->subregion_width_align_pixels = zcull_info->subregion_width_align_pixels;
		get_info_args->subregion_height_align_pixels = zcull_info->subregion_height_align_pixels;
		get_info_args->subregion_count = zcull_info->subregion_count;

		if (zcull_info)
			kfree(zcull_info);
		break;
	case NVHOST_GPU_IOCTL_ZBC_SET_TABLE:
		set_table_args = (struct nvhost_gpu_zbc_set_table_args *)buf;

		zbc_val = kzalloc(sizeof(struct zbc_entry), GFP_KERNEL);
		if (zbc_val == NULL)
			return -ENOMEM;

		zbc_val->format = set_table_args->format;
		zbc_val->type = set_table_args->type;

		switch (zbc_val->type) {
		case GK20A_ZBC_TYPE_COLOR:
			for (i = 0; i < GK20A_ZBC_COLOR_VALUE_SIZE; i++) {
				zbc_val->color_ds[i] = set_table_args->color_ds[i];
				zbc_val->color_l2[i] = set_table_args->color_l2[i];
			}
			break;
		case GK20A_ZBC_TYPE_DEPTH:
			zbc_val->depth = set_table_args->depth;
			break;
		default:
			err = -EINVAL;
		}

		if (!err) {
			gk20a_busy(dev);
			err = gk20a_gr_zbc_set_table(g, &g->gr, zbc_val);
			gk20a_idle(dev);
		}

		if (zbc_val)
			kfree(zbc_val);
		break;
	case NVHOST_GPU_IOCTL_ZBC_QUERY_TABLE:
		query_table_args = (struct nvhost_gpu_zbc_query_table_args *)buf;

		zbc_tbl = kzalloc(sizeof(struct zbc_query_params), GFP_KERNEL);
		if (zbc_tbl == NULL)
			return -ENOMEM;

		zbc_tbl->type = query_table_args->type;
		zbc_tbl->index_size = query_table_args->index_size;

		err = gr_gk20a_query_zbc(g, &g->gr, zbc_tbl);

		if (!err) {
			switch (zbc_tbl->type) {
			case GK20A_ZBC_TYPE_COLOR:
				for (i = 0; i < GK20A_ZBC_COLOR_VALUE_SIZE; i++) {
					query_table_args->color_ds[i] = zbc_tbl->color_ds[i];
					query_table_args->color_l2[i] = zbc_tbl->color_l2[i];
				}
				break;
			case GK20A_ZBC_TYPE_DEPTH:
				query_table_args->depth = zbc_tbl->depth;
				break;
			case GK20A_ZBC_TYPE_INVALID:
				query_table_args->index_size = zbc_tbl->index_size;
				break;
			default:
				err = -EINVAL;
			}
			if (!err) {
				query_table_args->format = zbc_tbl->format;
				query_table_args->ref_cnt = zbc_tbl->ref_cnt;
			}
		}

		if (zbc_tbl)
			kfree(zbc_tbl);
		break;

	case NVHOST_GPU_IOCTL_GET_CHARACTERISTICS:
		err = gk20a_ctrl_ioctl_gpu_characteristics(
			g, (struct nvhost_gpu_get_characteristics *)buf);
		break;

	default:
		nvhost_err(dev_from_gk20a(g), "unrecognized gpu ioctl cmd: 0x%x", cmd);
		err = -ENOTTY;
		break;
	}

	if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
		err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));

	return err;
}
static int vhost_client_probe(struct platform_device *dev)
{
	int err;
	struct nvhost_device_data *pdata = NULL;

	if (dev->dev.of_node) {
		const struct of_device_id *match;

		match = of_match_device(tegra_client_of_match, &dev->dev);

		if (!match)
			return -ENODEV;

		pdata = (struct nvhost_device_data *)match->data;

#ifdef CONFIG_TEGRA_GRHOST_ISP
		/* If ISP, need to differentiate ISP.0 from ISP.1 */
		if (!IS_ENABLED(CONFIG_ARCH_TEGRA_18x_SOC)) {
			int dev_id = 0;
			if (sscanf(dev->name, "%x.isp", &dev_id) == 1) {
				switch (tegra_get_chipid()) {
				case TEGRA_CHIPID_TEGRA12:
					if (dev_id == 0x54600000)
						pdata = &t124_isp_info;
					if (dev_id == 0x54680000)
						pdata = &t124_ispb_info;
					break;
				default:
					/* Only T124 is virtualized, for now */
					return -EINVAL;
				}
			}
		}
#endif
	}

	if (!pdata) {
		dev_err(&dev->dev, "no platform data\n");
		return -ENODATA;
	}

	pdata->virtual_dev = true;

	nvhost_dbg_fn("dev:%p pdata:%p", dev, pdata);

	pdata->pdev = dev;
	mutex_init(&pdata->lock);
	platform_set_drvdata(dev, pdata);

	/* Disable power management when virtual */
	pdata->can_powergate = false;
	pdata->busy = NULL;
	pdata->idle = NULL;
	pdata->scaling_init = NULL;
	pdata->finalize_poweron = nvhost_vhost_client_finalize_poweron;
	pdata->poweron_reset = false;
	pdata->engine_cg_regs = NULL;
	pdata->keepalive = false;

	pdata->hw_init = NULL;

	dev->dev.platform_data = NULL;

	nvhost_module_init(dev);

#ifdef CONFIG_PM_GENERIC_DOMAINS
	pdata->pd.name = kstrdup(dev->name, GFP_KERNEL);
	if (!pdata->pd.name)
		return -ENOMEM;

	err = nvhost_module_add_domain(&pdata->pd, dev);
#endif

	err = nvhost_virt_init(dev, pdata->moduleid);
	if (err) {
		dev_err(&dev->dev, "nvhost_virt_init failed for %s",
			      dev->name);
		pm_runtime_put(&dev->dev);
		return err;
	}

	err = nvhost_client_device_init(dev);
	if (err) {
		dev_err(&dev->dev, "failed to init client device for %s",
			      dev->name);
		pm_runtime_put(&dev->dev);
		return err;
	}

	return 0;
}