Example #1
0
void nvhost_scale3d_init(struct nvhost_device *d)
{
	if (!scale3d.init) {
		int error;
		unsigned long max_emc, min_emc;
		long correction;
		mutex_init(&scale3d.lock);

		INIT_WORK(&scale3d.work, scale3d_clocks_handler);
		INIT_DELAYED_WORK(&scale3d.idle_timer, scale3d_idle_handler);

		scale3d.clk_3d = d->clk[0];
		if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) {
			scale3d.clk_3d2 = d->clk[1];
			scale3d.clk_3d_emc = d->clk[2];
		} else
			scale3d.clk_3d_emc = d->clk[1];

		scale3d.max_rate_3d = clk_round_rate(scale3d.clk_3d, UINT_MAX);
		scale3d.min_rate_3d = clk_round_rate(scale3d.clk_3d, 0);

		if (scale3d.max_rate_3d == scale3d.min_rate_3d) {
			pr_warn("scale3d: 3d max rate = min rate (%lu), "
				"disabling\n", scale3d.max_rate_3d);
			scale3d.enable = 0;
			return;
		}

		/* emc scaling:
		 *
		 * Remc = S * R3d + O - (Sd * (R3d - Rm)^2 + Od)
		 *
		 * Remc - 3d.emc rate
		 * R3d  - 3d.cbus rate
		 * Rm   - 3d.cbus 'middle' rate = (max + min)/2
		 * S    - emc_slope
		 * O    - emc_offset
		 * Sd   - emc_dip_slope
		 * Od   - emc_dip_offset
		 *
		 * this superposes a quadratic dip centered around the middle 3d
		 * frequency over a linear correlation of 3d.emc to 3d clock
		 * rates.
		 *
		 * S, O are chosen so that the maximum 3d rate produces the
		 * maximum 3d.emc rate exactly, and the minimum 3d rate produces
		 * at least the minimum 3d.emc rate.
		 *
		 * Sd and Od are chosen to produce the largest dip that will
		 * keep 3d.emc frequencies monotonously decreasing with 3d
		 * frequencies. To achieve this, the first derivative of Remc
		 * with respect to R3d should be zero for the minimal 3d rate:
		 *
		 *   R'emc = S - 2 * Sd * (R3d - Rm)
		 *   R'emc(R3d-min) = 0
		 *   S = 2 * Sd * (R3d-min - Rm)
		 *     = 2 * Sd * (R3d-min - R3d-max) / 2
		 *   Sd = S / (R3d-min - R3d-max)
		 *
		 *   +---------------------------------------------------+
		 *   | Sd = -(emc-max - emc-min) / (R3d-min - R3d-max)^2 |
		 *   +---------------------------------------------------+
		 *
		 *   dip = Sd * (R3d - Rm)^2 + Od
		 *
		 * requiring dip(R3d-min) = 0 and dip(R3d-max) = 0 gives
		 *
		 *   Sd * (R3d-min - Rm)^2 + Od = 0
		 *   Od = -Sd * ((R3d-min - R3d-max) / 2)^2
		 *      = -Sd * ((R3d-min - R3d-max)^2) / 4
		 *
		 *   +------------------------------+
		 *   | Od = (emc-max - emc-min) / 4 |
		 *   +------------------------------+
		 */

		max_emc = clk_round_rate(scale3d.clk_3d_emc, UINT_MAX);
		min_emc = clk_round_rate(scale3d.clk_3d_emc, 0);

		scale3d.emc_slope = (max_emc - min_emc) /
			 (scale3d.max_rate_3d - scale3d.min_rate_3d);
		scale3d.emc_offset = max_emc -
			scale3d.emc_slope * scale3d.max_rate_3d;
		/* guarantee max 3d rate maps to max emc rate */
		scale3d.emc_offset += max_emc -
			(scale3d.emc_slope * scale3d.max_rate_3d +
			scale3d.emc_offset);

		scale3d.emc_dip_offset = (max_emc - min_emc) / 4;
		scale3d.emc_dip_slope =
			-4 * (scale3d.emc_dip_offset /
			(POW2(scale3d.max_rate_3d - scale3d.min_rate_3d)));
		scale3d.emc_xmid =
			(scale3d.max_rate_3d + scale3d.min_rate_3d) / 2;
		correction =
			scale3d.emc_dip_offset +
				scale3d.emc_dip_slope *
				POW2(scale3d.max_rate_3d - scale3d.emc_xmid);
		scale3d.emc_dip_offset -= correction;

		/* set scaling parameter defaults */
		scale3d.enable = 1;
		scale3d.period = scale3d.p_period = 100000;
		scale3d.idle_min = scale3d.p_idle_min = 10;
		scale3d.idle_max = scale3d.p_idle_max = 15;
		scale3d.fast_response = scale3d.p_fast_response = 7000;
		scale3d.p_scale_emc = 1;
		scale3d.p_emc_dip = 1;
		scale3d.p_verbosity = 0;
		scale3d.p_adjust = 1;
		scale3d.p_use_throughput_hint = 1;
		scale3d.p_throughput_lo_limit = 95;
		scale3d.p_throughput_hi_limit = 100;
		scale3d.p_scale_step = 60000000;

		error = device_create_file(&d->dev,
				&dev_attr_enable_3d_scaling);
		if (error)
			dev_err(&d->dev, "failed to create sysfs attributes");

		scale3d.init = 1;
	}
	int i=0;
	struct clk *c = scale3d.clk_3d;
	long rate = 0;

        /* shared bus clock must round up, unless top of range reached */
        while (rate <= scale3d.max_rate_3d) {
                long rounded_rate =clk_round_rate(c, rate);
                if (IS_ERR_VALUE(rounded_rate) || (rounded_rate <= rate))
                        break;

                rate = rounded_rate + 2000;     /* 2kHz resolution */
		gpu_loading[i].freq = rounded_rate;
		i++;
        }
	FREQ_LEVEL = i;
	curr_idx = 0;
	nvhost_scale3d_reset();
}
Example #2
0
void __init tegra_reserve(unsigned long carveout_size, unsigned long fb_size,
	unsigned long fb2_size)
{
#ifdef SUPPORT_SMMU_BASE_FOR_TEGRA3_A01
	int smmu_reserved = 0;
	struct tegra_smmu_window *smmu_window = tegra_smmu_window(0);
#endif

	if (carveout_size) {
		tegra_carveout_start = memblock_end_of_DRAM() - carveout_size;
		if (memblock_remove(tegra_carveout_start, carveout_size)) {
			pr_err("Failed to remove carveout %08lx@%08lx "
				"from memory map\n",
				carveout_size, tegra_carveout_start);
			tegra_carveout_start = 0;
			tegra_carveout_size = 0;
		} else
			tegra_carveout_size = carveout_size;
	}

	if (fb2_size) {
		tegra_fb2_start = memblock_end_of_DRAM() - fb2_size;
		if (memblock_remove(tegra_fb2_start, fb2_size)) {
			pr_err("Failed to remove second framebuffer "
				"%08lx@%08lx from memory map\n",
				fb2_size, tegra_fb2_start);
			tegra_fb2_start = 0;
			tegra_fb2_size = 0;
		} else
			tegra_fb2_size = fb2_size;
	}

	if (fb_size) {
		tegra_fb_start = memblock_end_of_DRAM() - fb_size;
		if (memblock_remove(tegra_fb_start, fb_size)) {
			pr_err("Failed to remove framebuffer %08lx@%08lx "
				"from memory map\n",
				fb_size, tegra_fb_start);
			tegra_fb_start = 0;
			tegra_fb_size = 0;
		} else
			tegra_fb_size = fb_size;
	}

	if (tegra_fb_size)
		tegra_grhost_aperture = tegra_fb_start;

	if (tegra_fb2_size && tegra_fb2_start < tegra_grhost_aperture)
		tegra_grhost_aperture = tegra_fb2_start;

	if (tegra_carveout_size && tegra_carveout_start < tegra_grhost_aperture)
		tegra_grhost_aperture = tegra_carveout_start;

#ifdef SUPPORT_SMMU_BASE_FOR_TEGRA3_A01
	if (!smmu_window) {
		pr_err("No SMMU resource\n");
	} else {
		size_t smmu_window_size;

		if (FORCE_SMMU_BASE_FOR_TEGRA3_A01 ||
			(tegra_get_chipid() == TEGRA_CHIPID_TEGRA3 &&
			tegra_get_revision() == TEGRA_REVISION_A01)) {
			smmu_window->start = TEGRA_SMMU_BASE_TEGRA3_A01;
			smmu_window->end   = TEGRA_SMMU_BASE_TEGRA3_A01 +
						TEGRA_SMMU_SIZE_TEGRA3_A01 - 1;
		}
		smmu_window_size = smmu_window->end + 1 - smmu_window->start;
		if (smmu_window->start >= 0x80000000) {
			if (memblock_reserve(smmu_window->start,
						smmu_window_size))
				pr_err(
			"Failed to reserve SMMU I/O VA window %08lx@%08lx\n",
				(unsigned long)smmu_window_size,
				(unsigned long)smmu_window->start);
			else
				smmu_reserved = 1;
		}
	}
#endif

	if (tegra_lp0_vec_size &&
	   (tegra_lp0_vec_start < memblock_end_of_DRAM())) {
		if (memblock_reserve(tegra_lp0_vec_start, tegra_lp0_vec_size)) {
			pr_err("Failed to reserve lp0_vec %08lx@%08lx\n",
				tegra_lp0_vec_size, tegra_lp0_vec_start);
			tegra_lp0_vec_start = 0;
			tegra_lp0_vec_size = 0;
		}
		tegra_lp0_vec_relocate = false;
	} else
		tegra_lp0_vec_relocate = true;

	/*
	 * We copy the bootloader's framebuffer to the framebuffer allocated
	 * above, and then free this one.
	 * */
	if (tegra_bootloader_fb_size) {
		tegra_bootloader_fb_size = PAGE_ALIGN(tegra_bootloader_fb_size);
		if (memblock_reserve(tegra_bootloader_fb_start,
				tegra_bootloader_fb_size)) {
			pr_err("Failed to reserve bootloader frame buffer "
				"%08lx@%08lx\n", tegra_bootloader_fb_size,
				tegra_bootloader_fb_start);
			tegra_bootloader_fb_start = 0;
			tegra_bootloader_fb_size = 0;
		}
	}

	pr_info("Tegra reserved memory:\n"
		"LP0:                    %08lx - %08lx\n"
		"Bootloader framebuffer: %08lx - %08lx\n"
		"Framebuffer:            %08lx - %08lx\n"
		"2nd Framebuffer:        %08lx - %08lx\n"
		"Carveout:               %08lx - %08lx\n"
		"Vpr:                    %08lx - %08lx\n",
		tegra_lp0_vec_start,
		tegra_lp0_vec_size ?
			tegra_lp0_vec_start + tegra_lp0_vec_size - 1 : 0,
		tegra_bootloader_fb_start,
		tegra_bootloader_fb_size ?
			tegra_bootloader_fb_start + tegra_bootloader_fb_size - 1 : 0,
		tegra_fb_start,
		tegra_fb_size ?
			tegra_fb_start + tegra_fb_size - 1 : 0,
		tegra_fb2_start,
		tegra_fb2_size ?
			tegra_fb2_start + tegra_fb2_size - 1 : 0,
		tegra_carveout_start,
		tegra_carveout_size ?
			tegra_carveout_start + tegra_carveout_size - 1 : 0,
		tegra_vpr_start,
		tegra_vpr_size ?
			tegra_vpr_start + tegra_vpr_size - 1 : 0);

#ifdef SUPPORT_SMMU_BASE_FOR_TEGRA3_A01
	if (smmu_reserved)
		pr_info("SMMU:                   %08lx - %08lx\n",
			smmu_window->start, smmu_window->end);
#endif
}
static long tegra_crypto_dev_ioctl(struct file *filp,
	unsigned int ioctl_num, unsigned long arg)
{
	struct tegra_crypto_ctx *ctx = filp->private_data;
	struct tegra_crypt_req crypt_req;
	struct tegra_rng_req rng_req;
	struct tegra_sha_req sha_req;
	struct tegra_rsa_req rsa_req;
	char *rng;
	int ret = 0;

	switch (ioctl_num) {

	case TEGRA_CRYPTO_IOCTL_NEED_SSK:
		ctx->use_ssk = (int)arg;
		break;

	case TEGRA_CRYPTO_IOCTL_PROCESS_REQ:
		ret = copy_from_user(&crypt_req, (void __user *)arg,
			sizeof(crypt_req));
		if (ret) {
			ret = -EFAULT;
			pr_err("%s: copy_from_user fail(%d)\n", __func__, ret);
			break;
		}

		ret = process_crypt_req(ctx, &crypt_req);
		break;

	case TEGRA_CRYPTO_IOCTL_SET_SEED:
		if (copy_from_user(&rng_req, (void __user *)arg,
			sizeof(rng_req))) {
			ret = -EFAULT;
			pr_err("%s: copy_from_user fail(%d)\n", __func__, ret);
			return ret;
		}

		memcpy(ctx->seed, rng_req.seed, TEGRA_CRYPTO_RNG_SEED_SIZE);

		if (rng_req.type == RNG_DRBG)
			ret = crypto_rng_reset(ctx->rng_drbg, ctx->seed,
				crypto_rng_seedsize(ctx->rng_drbg));
		else
			ret = crypto_rng_reset(ctx->rng, ctx->seed,
				crypto_rng_seedsize(ctx->rng));
		break;

	case TEGRA_CRYPTO_IOCTL_GET_RANDOM:
		if (copy_from_user(&rng_req, (void __user *)arg,
			sizeof(rng_req))) {
			ret = -EFAULT;
			pr_err("%s: copy_from_user fail(%d)\n", __func__, ret);
			return ret;
		}

		rng = kzalloc(rng_req.nbytes, GFP_KERNEL);
		if (!rng) {
			if (rng_req.type == RNG_DRBG)
				pr_err("mem alloc for rng_drbg fail");
			else
				pr_err("mem alloc for rng fail");

			ret = -ENODATA;
			goto rng_out;
		}

		if (rng_req.type == RNG_DRBG)
			ret = crypto_rng_get_bytes(ctx->rng_drbg, rng,
				rng_req.nbytes);
		else
			ret = crypto_rng_get_bytes(ctx->rng, rng,
				rng_req.nbytes);

		if (ret != rng_req.nbytes) {
			if (rng_req.type == RNG_DRBG)
				pr_err("rng_drbg failed");
			else
				pr_err("rng failed");
			ret = -ENODATA;
			goto rng_out;
		}

		ret = copy_to_user((void __user *)rng_req.rdata,
			(const void *)rng, rng_req.nbytes);
		if (ret) {
			ret = -EFAULT;
			pr_err("%s: copy_to_user fail(%d)\n", __func__, ret);
			return ret;
		}

rng_out:
		if (rng)
			kfree(rng);
		break;

	case TEGRA_CRYPTO_IOCTL_GET_SHA:
		if (tegra_get_chipid() != TEGRA_CHIPID_TEGRA2) {
			if (copy_from_user(&sha_req, (void __user *)arg,
				sizeof(sha_req))) {
				ret = -EFAULT;
				pr_err("%s: copy_from_user fail(%d)\n",
						__func__, ret);
				return ret;
			}

			ret = tegra_crypto_sha(&sha_req);
		} else {
			ret = -EINVAL;
		}
		break;

	case TEGRA_CRYPTO_IOCTL_RSA_REQ:
		if (copy_from_user(&rsa_req, (void __user *)arg,
			sizeof(rsa_req))) {
			ret = -EFAULT;
			pr_err("%s: copy_from_user fail(%d)\n", __func__, ret);
			return ret;
		}

		ret = tegra_crypt_rsa(ctx, &rsa_req);
		break;

	default:
		pr_debug("invalid ioctl code(%d)", ioctl_num);
		ret = -EINVAL;
	}

	return ret;
}
static int tegra_crypto_dev_open(struct inode *inode, struct file *filp)
{
	struct tegra_crypto_ctx *ctx;
	int ret = 0;

	ctx = kzalloc(sizeof(struct tegra_crypto_ctx), GFP_KERNEL);
	if (!ctx) {
		pr_err("no memory for context\n");
		return -ENOMEM;
	}

	ctx->ecb_tfm = crypto_alloc_ablkcipher("ecb-aes-tegra",
		CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 0);
	if (IS_ERR(ctx->ecb_tfm)) {
		pr_err("Failed to load transform for ecb-aes-tegra: %ld\n",
			PTR_ERR(ctx->ecb_tfm));
		ret = PTR_ERR(ctx->ecb_tfm);
		goto fail_ecb;
	}

	ctx->cbc_tfm = crypto_alloc_ablkcipher("cbc-aes-tegra",
		CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 0);
	if (IS_ERR(ctx->cbc_tfm)) {
		pr_err("Failed to load transform for cbc-aes-tegra: %ld\n",
			PTR_ERR(ctx->cbc_tfm));
		ret = PTR_ERR(ctx->cbc_tfm);
		goto fail_cbc;
	}

	if (tegra_get_chipid() != TEGRA_CHIPID_TEGRA2) {
		ctx->ofb_tfm = crypto_alloc_ablkcipher("ofb-aes-tegra",
			CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 0);
		if (IS_ERR(ctx->ofb_tfm)) {
			pr_err("Failed to load transform for ofb-aes-tegra: %ld\n",
				PTR_ERR(ctx->ofb_tfm));
			ret = PTR_ERR(ctx->ofb_tfm);
			goto fail_ofb;
		}

		ctx->ctr_tfm = crypto_alloc_ablkcipher("ctr-aes-tegra",
			CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 0);
		if (IS_ERR(ctx->ctr_tfm)) {
			pr_err("Failed to load transform for ctr-aes-tegra: %ld\n",
				PTR_ERR(ctx->ctr_tfm));
			ret = PTR_ERR(ctx->ctr_tfm);
			goto fail_ctr;
		}
	}

	if (tegra_get_chipid() != TEGRA_CHIPID_TEGRA2 &&
			tegra_get_chipid() != TEGRA_CHIPID_TEGRA3) {
		ctx->rng_drbg = crypto_alloc_rng("rng_drbg-aes-tegra",
			CRYPTO_ALG_TYPE_RNG, 0);
		if (IS_ERR(ctx->rng_drbg)) {
			pr_err("Failed to load transform for rng_drbg tegra: %ld\n",
				PTR_ERR(ctx->rng_drbg));
			ret = PTR_ERR(ctx->rng_drbg);
			goto fail_rng;
		}
	} else {
		ctx->rng = crypto_alloc_rng("rng-aes-tegra",
			CRYPTO_ALG_TYPE_RNG, 0);
		if (IS_ERR(ctx->rng)) {
			pr_err("Failed to load transform for tegra rng: %ld\n",
				PTR_ERR(ctx->rng));
			ret = PTR_ERR(ctx->rng);
			goto fail_rng;
		}
	}

	ctx->rsa512_tfm = crypto_alloc_ahash("tegra-se-rsa512",
					CRYPTO_ALG_TYPE_AHASH, 0);
	if (IS_ERR(ctx->rsa512_tfm)) {
		pr_err("Failed to load transform for rsa512: %ld\n",
			PTR_ERR(ctx->rsa512_tfm));
		goto fail_rsa512;
	}

	ctx->rsa1024_tfm = crypto_alloc_ahash("tegra-se-rsa1024",
					CRYPTO_ALG_TYPE_AHASH, 0);
	if (IS_ERR(ctx->rsa1024_tfm)) {
		pr_err("Failed to load transform for rsa1024: %ld\n",
			PTR_ERR(ctx->rsa1024_tfm));
		goto fail_rsa1024;
	}

	ctx->rsa1536_tfm = crypto_alloc_ahash("tegra-se-rsa1536",
					CRYPTO_ALG_TYPE_AHASH, 0);
	if (IS_ERR(ctx->rsa1536_tfm)) {
		pr_err("Failed to load transform for rsa1536: %ld\n",
			PTR_ERR(ctx->rsa1536_tfm));
		goto fail_rsa1536;
	}

	ctx->rsa2048_tfm = crypto_alloc_ahash("tegra-se-rsa2048",
					CRYPTO_ALG_TYPE_AHASH, 0);
	if (IS_ERR(ctx->rsa2048_tfm)) {
		pr_err("Failed to load transform for rsa2048: %ld\n",
			PTR_ERR(ctx->rsa2048_tfm));
		goto fail_rsa2048;
	}

	filp->private_data = ctx;
	return ret;

fail_rsa2048:
	crypto_free_ahash(ctx->rsa1536_tfm);
fail_rsa1536:
	crypto_free_ahash(ctx->rsa1024_tfm);
fail_rsa1024:
	crypto_free_ahash(ctx->rsa512_tfm);
fail_rsa512:
	if (tegra_get_chipid() != TEGRA_CHIPID_TEGRA2 &&
			tegra_get_chipid() != TEGRA_CHIPID_TEGRA3)
		crypto_free_rng(ctx->rng_drbg);
	else
		crypto_free_rng(ctx->rng);
fail_rng:
	if (tegra_get_chipid() != TEGRA_CHIPID_TEGRA2)
		crypto_free_ablkcipher(ctx->ctr_tfm);
fail_ctr:
	if (tegra_get_chipid() != TEGRA_CHIPID_TEGRA2)
		crypto_free_ablkcipher(ctx->ofb_tfm);
fail_ofb:
	crypto_free_ablkcipher(ctx->cbc_tfm);

fail_cbc:
	crypto_free_ablkcipher(ctx->ecb_tfm);

fail_ecb:
	kfree(ctx);
	return ret;
}
Example #5
0
static int nvhost_pod_init(struct devfreq *df)
{
	struct podgov_info_rec *podgov;
	struct platform_device *d = to_platform_device(df->dev.parent);
	ktime_t now = ktime_get();
	enum tegra_chipid cid = tegra_get_chipid();
	int error = 0;

	struct nvhost_devfreq_ext_stat *ext_stat;
	struct devfreq_dev_status dev_stat;
	int stat = 0;

	podgov = kzalloc(sizeof(struct podgov_info_rec), GFP_KERNEL);
	if (!podgov)
		goto err_alloc_podgov;
	df->data = (void *)podgov;

	/* Initialise workers */
	INIT_DELAYED_WORK(&podgov->idle_timer, podgov_idle_handler);

	/* Set scaling parameter defaults */
	podgov->enable = 1;
	podgov->block = 0;
	podgov->p_use_throughput_hint = 1;

	if (!strcmp(d->name, "vic03")) {
		podgov->p_load_max = 990;
		podgov->p_load_target = 800;
		podgov->p_bias = 80;
		podgov->p_hint_lo_limit = 500;
		podgov->p_hint_hi_limit = 997;
		podgov->p_scaleup_limit = 1100;
		podgov->p_scaledown_limit = 1300;
		podgov->p_smooth = 10;
		podgov->p_damp = 7;
	} else {
		switch (cid) {
		case TEGRA_CHIPID_TEGRA14:
		case TEGRA_CHIPID_TEGRA11:
		case TEGRA_CHIPID_TEGRA12:
			podgov->p_load_max = 900;
			podgov->p_load_target = 700;
			podgov->p_bias = 80;
			podgov->p_hint_lo_limit = 500;
			podgov->p_hint_hi_limit = 997;
			podgov->p_scaleup_limit = 1100;
			podgov->p_scaledown_limit = 1300;
			podgov->p_smooth = 10;
			podgov->p_damp = 7;
			break;
		default:
			pr_err("%s: un-supported chip id\n", __func__);
			goto err_unsupported_chip_id;
			break;
		}
	}

	podgov->p_slowdown_delay = 10;
	podgov->p_block_window = 50000;
	podgov->adjustment_type = ADJUSTMENT_DEVICE_REQ;
	podgov->p_user = 0;

	/* Reset clock counters */
	podgov->last_throughput_hint = now;
	podgov->last_scale = now;

	podgov->power_manager = df;

	/* Get the current status of the device */
	stat = df->profile->get_dev_status(df->dev.parent, &dev_stat);
	if (!dev_stat.private_data) {
		pr_err("podgov: device does not support ext_stat.\n");
		goto err_get_current_status;
	}
	ext_stat = dev_stat.private_data;

	/* store the limits */
	df->min_freq = ext_stat->min_freq;
	df->max_freq = ext_stat->max_freq;

	podgov->p_freq_request = ext_stat->max_freq;

	/* Create sysfs entries for controlling this governor */
	error = device_create_file(&d->dev,
			&dev_attr_enable_3d_scaling);
	if (error)
		goto err_create_sysfs_entry;

	error = device_create_file(&d->dev,
			&dev_attr_user);
	if (error)
		goto err_create_sysfs_entry;

	error = device_create_file(&d->dev,
			&dev_attr_freq_request);
	if (error)
		goto err_create_sysfs_entry;

	podgov->freq_count = df->profile->max_state;
	podgov->freqlist = df->profile->freq_table;
	if (!podgov->freq_count || !podgov->freqlist)
		goto err_get_freqs;

	podgov->idle_avg = 0;
	podgov->freq_avg = 0;
	podgov->hint_avg = 0;

	nvhost_scale3d_debug_init(df);

	/* register the governor to throughput hint notifier chain */
	podgov->throughput_hint_notifier.notifier_call =
		&nvhost_scale3d_set_throughput_hint;
	blocking_notifier_chain_register(&throughput_notifier_list,
					 &podgov->throughput_hint_notifier);

	return 0;

err_get_freqs:
	device_remove_file(&d->dev, &dev_attr_enable_3d_scaling);
	device_remove_file(&d->dev, &dev_attr_user);
	device_remove_file(&d->dev, &dev_attr_freq_request);
err_create_sysfs_entry:
	dev_err(&d->dev, "failed to create sysfs attributes");
err_get_current_status:
err_unsupported_chip_id:
	kfree(podgov);
err_alloc_podgov:
	return -ENOMEM;
}
static int process_crypt_req(struct tegra_crypto_ctx *ctx, struct tegra_crypt_req *crypt_req)
{
	struct crypto_ablkcipher *tfm;
	struct ablkcipher_request *req = NULL;
	struct scatterlist in_sg;
	struct scatterlist out_sg;
	unsigned long *xbuf[NBUFS];
	int ret = 0, size = 0;
	unsigned long total = 0;
	const u8 *key = NULL;
	struct tegra_crypto_completion tcrypt_complete;

	if (crypt_req->op & TEGRA_CRYPTO_ECB) {
		req = ablkcipher_request_alloc(ctx->ecb_tfm, GFP_KERNEL);
		tfm = ctx->ecb_tfm;
	} else if (crypt_req->op & TEGRA_CRYPTO_CBC) {
		req = ablkcipher_request_alloc(ctx->cbc_tfm, GFP_KERNEL);
		tfm = ctx->cbc_tfm;
	} else if ((crypt_req->op & TEGRA_CRYPTO_OFB) &&
			(tegra_get_chipid() != TEGRA_CHIPID_TEGRA2)) {

		req = ablkcipher_request_alloc(ctx->ofb_tfm, GFP_KERNEL);
		tfm = ctx->ofb_tfm;
	} else if ((crypt_req->op & TEGRA_CRYPTO_CTR) &&
			(tegra_get_chipid() != TEGRA_CHIPID_TEGRA2)) {

		req = ablkcipher_request_alloc(ctx->ctr_tfm, GFP_KERNEL);
		tfm = ctx->ctr_tfm;
	}

	if (!req) {
		pr_err("%s: Failed to allocate request\n", __func__);
		return -ENOMEM;
	}

	if ((crypt_req->keylen < 0) || (crypt_req->keylen > AES_MAX_KEY_SIZE)) {
		ret = -EINVAL;
		pr_err("crypt_req keylen invalid");
		goto process_req_out;
	}

	crypto_ablkcipher_clear_flags(tfm, ~0);

	if (!ctx->use_ssk)
		key = crypt_req->key;

	if (!crypt_req->skip_key) {
		ret = crypto_ablkcipher_setkey(tfm, key, crypt_req->keylen);
		if (ret < 0) {
			pr_err("setkey failed");
			goto process_req_out;
		}
	}

	ret = alloc_bufs(xbuf);
	if (ret < 0) {
		pr_err("alloc_bufs failed");
		goto process_req_out;
	}

	init_completion(&tcrypt_complete.restart);

	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
		tegra_crypt_complete, &tcrypt_complete);

	total = crypt_req->plaintext_sz;
	while (total > 0) {
		size = min(total, PAGE_SIZE);
		ret = copy_from_user((void *)xbuf[0],
			(void __user *)crypt_req->plaintext, size);
		if (ret) {
			ret = -EFAULT;
			pr_debug("%s: copy_from_user failed (%d)\n", __func__, ret);
			goto process_req_buf_out;
		}
		sg_init_one(&in_sg, xbuf[0], size);
		sg_init_one(&out_sg, xbuf[1], size);

		if (!crypt_req->skip_iv)
			ablkcipher_request_set_crypt(req, &in_sg,
				&out_sg, size, crypt_req->iv);
		else
			ablkcipher_request_set_crypt(req, &in_sg,
				&out_sg, size, NULL);

		INIT_COMPLETION(tcrypt_complete.restart);

		tcrypt_complete.req_err = 0;

		ret = crypt_req->encrypt ?
			crypto_ablkcipher_encrypt(req) :
			crypto_ablkcipher_decrypt(req);
		if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
			/* crypto driver is asynchronous */
			ret = wait_for_completion_interruptible(&tcrypt_complete.restart);

			if (ret < 0)
				goto process_req_buf_out;

			if (tcrypt_complete.req_err < 0) {
				ret = tcrypt_complete.req_err;
				goto process_req_buf_out;
			}
		} else if (ret < 0) {
			pr_debug("%scrypt failed (%d)\n",
				crypt_req->encrypt ? "en" : "de", ret);
			goto process_req_buf_out;
		}

		ret = copy_to_user((void __user *)crypt_req->result,
			(const void *)xbuf[1], size);
		if (ret) {
			ret = -EFAULT;
			pr_debug("%s: copy_to_user failed (%d)\n", __func__,
					ret);
			goto process_req_buf_out;
		}

		total -= size;
		crypt_req->result += size;
		crypt_req->plaintext += size;
	}

process_req_buf_out:
	free_bufs(xbuf);
process_req_out:
	ablkcipher_request_free(req);

	return ret;
}
void nvhost_scale3d_actmon_init(struct platform_device *dev)
{
	struct nvhost_devfreq_ext_stat *ext_stat;
	struct nvhost_device_data *pdata = platform_get_drvdata(dev);

	if (power_profile.init)
		return;

	/* Get clocks */
	power_profile.dev = dev;
	power_profile.clk_3d = pdata->clk[0];
	if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) {
		power_profile.clk_3d2 = pdata->clk[1];
		power_profile.clk_3d_emc = pdata->clk[2];
	} else
		power_profile.clk_3d_emc = pdata->clk[1];

	/* Get frequency settings */
	power_profile.max_rate_3d =
		clk_round_rate(power_profile.clk_3d, UINT_MAX);
	power_profile.min_rate_3d =
		clk_round_rate(power_profile.clk_3d, 0);

	nvhost_scale3d_devfreq_profile.initial_freq = power_profile.max_rate_3d;

	if (power_profile.max_rate_3d == power_profile.min_rate_3d) {
		pr_warn("scale3d: 3d max rate = min rate (%lu), disabling\n",
			power_profile.max_rate_3d);
		goto err_bad_power_profile;
	}

	/* Reserve space for devfreq structures (dev_stat and ext_dev_stat) */
	power_profile.dev_stat =
		kzalloc(sizeof(struct power_profile_gr3d), GFP_KERNEL);
	if (!power_profile.dev_stat)
		goto err_devfreq_alloc;
	ext_stat = kzalloc(sizeof(struct nvhost_devfreq_ext_stat), GFP_KERNEL);
	if (!ext_stat)
		goto err_devfreq_ext_stat_alloc;

	/* Initialise the dev_stat and ext_stat structures */
	power_profile.dev_stat->private_data = ext_stat;
	power_profile.last_event_type = DEVICE_UNKNOWN;
	ext_stat->min_freq = power_profile.min_rate_3d;
	ext_stat->max_freq = power_profile.max_rate_3d;

	power_profile.last_request_time = ktime_get();

	nvhost_scale3d_calibrate_emc();

	/* Start using devfreq */
	pdata->power_manager = devfreq_add_device(&dev->dev,
				&nvhost_scale3d_devfreq_profile,
				&nvhost_podgov,
				NULL);

	power_profile.init = 1;
	return;

err_devfreq_ext_stat_alloc:
	kfree(power_profile.dev_stat);
err_devfreq_alloc:
err_bad_power_profile:

	return;

}
int __init enterprise_panel_init(void)
{
	int err;
	struct resource __maybe_unused *res;
	struct board_info board_info;

	tegra_get_board_info(&board_info);

	BUILD_BUG_ON(ARRAY_SIZE(enterprise_bl_output_measured_a03) != 256);
	BUILD_BUG_ON(ARRAY_SIZE(enterprise_bl_output_measured_a02) != 256);

	if (board_info.fab >= BOARD_FAB_A03) {
		enterprise_disp1_backlight_data.clk_div = 0x1D;
		bl_output = enterprise_bl_output_measured_a03;
	} else
		bl_output = enterprise_bl_output_measured_a02;

	enterprise_dsi.chip_id = tegra_get_chipid();
	enterprise_dsi.chip_rev = tegra_get_revision();

#if defined(CONFIG_TEGRA_NVMAP)
	enterprise_carveouts[1].base = tegra_carveout_start;
	enterprise_carveouts[1].size = tegra_carveout_size;
#endif

	tegra_gpio_enable(enterprise_hdmi_hpd);
	gpio_request(enterprise_hdmi_hpd, "hdmi_hpd");
	gpio_direction_input(enterprise_hdmi_hpd);

	tegra_gpio_enable(enterprise_lcd_2d_3d);
	gpio_request(enterprise_lcd_2d_3d, "lcd_2d_3d");
	gpio_direction_output(enterprise_lcd_2d_3d, 0);
	enterprise_stereo_set_mode(enterprise_stereo.mode_2d_3d);

	tegra_gpio_enable(enterprise_lcd_swp_pl);
	gpio_request(enterprise_lcd_swp_pl, "lcd_swp_pl");
	gpio_direction_output(enterprise_lcd_swp_pl, 0);
	enterprise_stereo_set_orientation(enterprise_stereo.orientation);

#if !(DC_CTRL_MODE & TEGRA_DC_OUT_ONE_SHOT_MODE)
	tegra_gpio_enable(enterprise_lcd_te);
	gpio_request(enterprise_lcd_swp_pl, "lcd_te");
	gpio_direction_input(enterprise_lcd_te);
#endif

#ifdef CONFIG_HAS_EARLYSUSPEND
	enterprise_panel_early_suspender.suspend = enterprise_panel_early_suspend;
	enterprise_panel_early_suspender.resume = enterprise_panel_late_resume;
	enterprise_panel_early_suspender.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
	register_early_suspend(&enterprise_panel_early_suspender);
#endif

#ifdef CONFIG_TEGRA_GRHOST
	err = nvhost_device_register(&tegra_grhost_device);
	if (err)
		return err;
#endif

	err = platform_add_devices(enterprise_gfx_devices,
				ARRAY_SIZE(enterprise_gfx_devices));

#if defined(CONFIG_TEGRA_GRHOST) && defined(CONFIG_TEGRA_DC)
	res = nvhost_get_resource_byname(&enterprise_disp1_device,
					 IORESOURCE_MEM, "fbmem");
	res->start = tegra_fb_start;
	res->end = tegra_fb_start + tegra_fb_size - 1;
#endif

	/* Copy the bootloader fb to the fb. */
	tegra_move_framebuffer(tegra_fb_start, tegra_bootloader_fb_start,
		min(tegra_fb_size, tegra_bootloader_fb_size));

#if defined(CONFIG_TEGRA_GRHOST) && defined(CONFIG_TEGRA_DC)
	if (!err)
		err = nvhost_device_register(&enterprise_disp1_device);

	res = nvhost_get_resource_byname(&enterprise_disp2_device,
					 IORESOURCE_MEM, "fbmem");
	res->start = tegra_fb2_start;
	res->end = tegra_fb2_start + tegra_fb2_size - 1;
	if (!err)
		err = nvhost_device_register(&enterprise_disp2_device);
#endif

#if defined(CONFIG_TEGRA_GRHOST) && defined(CONFIG_TEGRA_NVAVP)
	if (!err)
		err = nvhost_device_register(&nvavp_device);
#endif

	if (!err)
		err = platform_add_devices(enterprise_bl_devices,
				ARRAY_SIZE(enterprise_bl_devices));
	return err;
}
/*
 * Read the chip's pre_si_platform valus from the chip ID value
 */
static uint32_t tegra_get_chipid_pre_si_platform(void)
{
	return (tegra_get_chipid() >> PRE_SI_PLATFORM_SHIFT) & PRE_SI_PLATFORM_MASK;
}
/*
 * Read the chip's minor version from the chip ID value
 */
uint32_t tegra_get_chipid_minor(void)
{
	return (tegra_get_chipid() >> MINOR_VERSION_SHIFT) & MINOR_VERSION_MASK;
}
/*
 * Read the chip's major version from chip ID value
 */
uint32_t tegra_get_chipid_major(void)
{
	return (tegra_get_chipid() >> MAJOR_VERSION_SHIFT) & MAJOR_VERSION_MASK;
}
int __init enterprise_panel_init(void)
{
	int err;
	struct resource __maybe_unused *res;
	struct board_info board_info;
	struct platform_device *phost1x;

	tegra_get_board_info(&board_info);

	BUILD_BUG_ON(ARRAY_SIZE(enterprise_bl_output_measured_a03) != 256);
	BUILD_BUG_ON(ARRAY_SIZE(enterprise_bl_output_measured_a02) != 256);

	if (board_info.board_id != BOARD_E1239) {
		if (board_info.fab >= BOARD_FAB_A03) {
#if !(IS_EXTERNAL_PWM)
			enterprise_disp1_backlight_data.clk_div = 0x1D;
#endif
			bl_output = enterprise_bl_output_measured_a03;
		} else
			bl_output = enterprise_bl_output_measured_a02;
	} else {
		enterprise_bl_devices[0] = &external_pwm_disp1_backlight_device;
		bl_output = tai_bl_output_measured;
	}
	enterprise_dsi.chip_id = tegra_get_chipid();
	enterprise_dsi.chip_rev = tegra_revision;

#if defined(CONFIG_TEGRA_NVMAP)
	enterprise_carveouts[1].base = tegra_carveout_start;
	enterprise_carveouts[1].size = tegra_carveout_size;
#endif

	err = gpio_request(enterprise_hdmi_hpd, "hdmi_hpd");
	if (err < 0) {
		pr_err("%s: gpio_request failed %d\n", __func__, err);
		return err;
	}
	err = gpio_direction_input(enterprise_hdmi_hpd);
	if (err < 0) {
		pr_err("%s: gpio_direction_input failed %d\n",
			__func__, err);
		gpio_free(enterprise_hdmi_hpd);
		return err;
	}

	if (board_info.board_id != BOARD_E1239) {
		err = gpio_request(enterprise_lcd_2d_3d, "lcd_2d_3d");
		if (err < 0) {
			pr_err("%s: gpio_request failed %d\n", __func__, err);
			return err;
		}
		err = gpio_direction_output(enterprise_lcd_2d_3d, 0);
		if (err < 0) {
			pr_err("%s: gpio_direction_ouput failed %d\n",
				__func__, err);
			gpio_free(enterprise_lcd_2d_3d);
			return err;
		}
		enterprise_stereo_set_mode(enterprise_stereo.mode_2d_3d);

		err = gpio_request(enterprise_lcd_swp_pl, "lcd_swp_pl");
		if (err < 0) {
			pr_err("%s: gpio_request failed %d\n", __func__, err);
			return err;
		}
		err = gpio_direction_output(enterprise_lcd_swp_pl, 0);
		if (err < 0) {
			pr_err("%s: gpio_direction_ouput failed %d\n",
				__func__, err);
			gpio_free(enterprise_lcd_swp_pl);
			return err;
		}
		enterprise_stereo_set_orientation(
						enterprise_stereo.orientation);
#if IS_EXTERNAL_PWM
		err = gpio_request(enterprise_bl_pwm, "bl_pwm");
		if (err < 0) {
			pr_err("%s: gpio_request failed %d\n", __func__, err);
			return err;
		}
		gpio_free(enterprise_bl_pwm);
#endif
	} else {
		/* External pwm is used but do not use IS_EXTERNAL_PWM
		compiler switch for TAI */
		err = gpio_request(enterprise_bl_pwm, "bl_pwm");
		if (err < 0) {
			pr_err("%s: gpio_request failed %d\n", __func__, err);
			return err;
		}
		gpio_free(enterprise_bl_pwm);
	}

#if !(DC_CTRL_MODE & TEGRA_DC_OUT_ONE_SHOT_MODE)
	err = gpio_request(enterprise_lcd_swp_pl, "lcd_te");
	if (err < 0) {
		pr_err("%s: gpio_request failed %d\n", __func__, err);
		return err;
	}
	err = gpio_direction_input(enterprise_lcd_te);
	if (err < 0) {
		pr_err("%s: gpio_direction_input failed %d\n",
			__func__, err);
		gpio_free(enterprise_lcd_te);
		return err;
	}
#endif

	if (board_info.board_id != BOARD_E1239)
		err = platform_add_devices(enterprise_gfx_devices,
			ARRAY_SIZE(enterprise_gfx_devices));
	else
		err = platform_add_devices(external_pwm_gfx_devices,
			ARRAY_SIZE(external_pwm_gfx_devices));

#ifdef CONFIG_TEGRA_GRHOST
	phost1x = tegra3_register_host1x_devices();
	if (!phost1x)
		return -EINVAL;
#endif

#if defined(CONFIG_TEGRA_GRHOST) && defined(CONFIG_TEGRA_DC)
	res = platform_get_resource_byname(&enterprise_disp1_device,
					 IORESOURCE_MEM, "fbmem");
	res->start = tegra_fb_start;
	res->end = tegra_fb_start + tegra_fb_size - 1;
#endif

	/* Copy the bootloader fb to the fb. */
	__tegra_move_framebuffer(&enterprise_nvmap_device,
		tegra_fb_start, tegra_bootloader_fb_start,
		min(tegra_fb_size, tegra_bootloader_fb_size));

#if defined(CONFIG_TEGRA_GRHOST) && defined(CONFIG_TEGRA_DC)
	if (!err) {
		enterprise_disp1_device.dev.parent = &phost1x->dev;
		err = platform_device_register(&enterprise_disp1_device);
	}

	res = platform_get_resource_byname(&enterprise_disp2_device,
					 IORESOURCE_MEM, "fbmem");
	res->start = tegra_fb2_start;
	res->end = tegra_fb2_start + tegra_fb2_size - 1;
	if (!err) {
		enterprise_disp2_device.dev.parent = &phost1x->dev;
		err = platform_device_register(&enterprise_disp2_device);
	}
#endif

#if defined(CONFIG_TEGRA_GRHOST) && defined(CONFIG_TEGRA_NVAVP)
	if (!err) {
		nvavp_device.dev.parent = &phost1x->dev;
		err = platform_device_register(&nvavp_device);
	}
#endif

	if (!err)
		err = platform_add_devices(enterprise_bl_devices,
				ARRAY_SIZE(enterprise_bl_devices));

	return err;
}
static int vhost_client_probe(struct platform_device *dev)
{
	int err;
	struct nvhost_device_data *pdata = NULL;

	if (dev->dev.of_node) {
		const struct of_device_id *match;

		match = of_match_device(tegra_client_of_match, &dev->dev);

		if (!match)
			return -ENODEV;

		pdata = (struct nvhost_device_data *)match->data;

#ifdef CONFIG_TEGRA_GRHOST_ISP
		/* If ISP, need to differentiate ISP.0 from ISP.1 */
		if (!IS_ENABLED(CONFIG_ARCH_TEGRA_18x_SOC)) {
			int dev_id = 0;
			if (sscanf(dev->name, "%x.isp", &dev_id) == 1) {
				switch (tegra_get_chipid()) {
				case TEGRA_CHIPID_TEGRA12:
					if (dev_id == 0x54600000)
						pdata = &t124_isp_info;
					if (dev_id == 0x54680000)
						pdata = &t124_ispb_info;
					break;
				default:
					/* Only T124 is virtualized, for now */
					return -EINVAL;
				}
			}
		}
#endif
	}

	if (!pdata) {
		dev_err(&dev->dev, "no platform data\n");
		return -ENODATA;
	}

	pdata->virtual_dev = true;

	nvhost_dbg_fn("dev:%p pdata:%p", dev, pdata);

	pdata->pdev = dev;
	mutex_init(&pdata->lock);
	platform_set_drvdata(dev, pdata);

	/* Disable power management when virtual */
	pdata->can_powergate = false;
	pdata->busy = NULL;
	pdata->idle = NULL;
	pdata->scaling_init = NULL;
	pdata->finalize_poweron = nvhost_vhost_client_finalize_poweron;
	pdata->poweron_reset = false;
	pdata->engine_cg_regs = NULL;
	pdata->keepalive = false;

	pdata->hw_init = NULL;

	dev->dev.platform_data = NULL;

	nvhost_module_init(dev);

#ifdef CONFIG_PM_GENERIC_DOMAINS
	pdata->pd.name = kstrdup(dev->name, GFP_KERNEL);
	if (!pdata->pd.name)
		return -ENOMEM;

	err = nvhost_module_add_domain(&pdata->pd, dev);
#endif

	err = nvhost_virt_init(dev, pdata->moduleid);
	if (err) {
		dev_err(&dev->dev, "nvhost_virt_init failed for %s",
			      dev->name);
		pm_runtime_put(&dev->dev);
		return err;
	}

	err = nvhost_client_device_init(dev);
	if (err) {
		dev_err(&dev->dev, "failed to init client device for %s",
			      dev->name);
		pm_runtime_put(&dev->dev);
		return err;
	}

	return 0;
}