static void get_qos_params(
		struct device_node * const dev_node,
		struct platform_device * const pdev,
		struct msm_bus_node_info_type *node_info)
{
	const char *qos_mode = NULL;
	unsigned int ret;
	unsigned int temp;

	ret = of_property_read_string(dev_node, "qcom,qos-mode", &qos_mode);

	if (ret)
		node_info->qos_params.mode = -1;
	else
		node_info->qos_params.mode = get_qos_mode(pdev, dev_node,
								qos_mode);

	of_property_read_u32(dev_node, "qcom,prio-lvl",
					&node_info->qos_params.prio_lvl);

	of_property_read_u32(dev_node, "qcom,prio1",
						&node_info->qos_params.prio1);

	of_property_read_u32(dev_node, "qcom,prio0",
						&node_info->qos_params.prio0);

	of_property_read_u32(dev_node, "qcom,reg-prio1",
					&node_info->qos_params.reg_prio1);

	of_property_read_u32(dev_node, "qcom,reg-prio0",
					&node_info->qos_params.reg_prio0);

	of_property_read_u32(dev_node, "qcom,prio-rd",
					&node_info->qos_params.prio_rd);

	of_property_read_u32(dev_node, "qcom,prio-wr",
						&node_info->qos_params.prio_wr);

	of_property_read_u32(dev_node, "qcom,gp",
						&node_info->qos_params.gp);

	of_property_read_u32(dev_node, "qcom,thmp",
						&node_info->qos_params.thmp);

	of_property_read_u32(dev_node, "qcom,ws",
						&node_info->qos_params.ws);

	ret = of_property_read_u32(dev_node, "qcom,bw_buffer", &temp);

	if (ret)
		node_info->qos_params.bw_buffer = 0;
	else
		node_info->qos_params.bw_buffer = KBTOB(temp);

}
static u64 *get_th_params(struct platform_device *pdev,
		const struct device_node *node, const char *prop,
		int *nports)
{
	int size = 0, ret;
	u64 *ret_arr = NULL;
	int *arr = NULL;
	int i;

	if (of_get_property(node, prop, &size)) {
		*nports = size / sizeof(int);
	} else {
		pr_debug("Property %s not available\n", prop);
		*nports = 0;
		return NULL;
	}

	ret_arr = devm_kzalloc(&pdev->dev, (*nports * sizeof(u64)),
							GFP_KERNEL);
	arr = kzalloc(size, GFP_KERNEL);
	if ((size > 0) && (ZERO_OR_NULL_PTR(arr)
			|| ZERO_OR_NULL_PTR(ret_arr))) {
		if (arr)
			kfree(arr);
		else if (ret_arr)
			devm_kfree(&pdev->dev, ret_arr);
		pr_err("Error: Failed to alloc mem for %s\n", prop);
		return NULL;
	}

	ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
	if (ret) {
		pr_err("Error in reading property: %s\n", prop);
		goto err;
	}

	for (i = 0; i < *nports; i++)
		ret_arr[i] = (uint64_t)KBTOB(arr[i]);

	MSM_BUS_DBG("%s: num entries %d prop %s", __func__, *nports, prop);

	for (i = 0; i < *nports; i++)
		MSM_BUS_DBG("Th %d val %llu", i, ret_arr[i]);

	kfree(arr);
	return ret_arr;
err:
	kfree(arr);
	devm_kfree(&pdev->dev, ret_arr);
	return NULL;
}
static struct msm_bus_scale_pdata *get_pdata(struct platform_device *pdev,
	struct device_node *of_node)
{
	struct msm_bus_scale_pdata *pdata = NULL;
	struct msm_bus_paths *usecase = NULL;
	int i = 0, j, ret, num_usecases = 0, num_paths, len;
	const uint32_t *vec_arr = NULL;
	bool mem_err = false;

	if (!pdev) {
		pr_err("Error: Null Platform device\n");
		return NULL;
	}

	pdata = devm_kzalloc(&pdev->dev, sizeof(struct msm_bus_scale_pdata),
		GFP_KERNEL);
	if (!pdata) {
		pr_err("Error: Memory allocation for pdata failed\n");
		mem_err = true;
		goto err;
	}

	ret = of_property_read_string(of_node, "qcom,msm-bus,name",
		&pdata->name);
	if (ret) {
		pr_err("Error: Client name not found\n");
		goto err;
	}

	ret = of_property_read_u32(of_node, "qcom,msm-bus,num-cases",
		&num_usecases);
	if (ret) {
		pr_err("Error: num-usecases not found\n");
		goto err;
	}

	pdata->num_usecases = num_usecases;

	if (of_property_read_bool(of_node, "qcom,msm-bus,active-only"))
		pdata->active_only = 1;
	else {
		pr_debug("active_only flag absent.\n");
		pr_debug("Using dual context by default\n");
	}

	usecase = devm_kzalloc(&pdev->dev, (sizeof(struct msm_bus_paths) *
		pdata->num_usecases), GFP_KERNEL);
	if (!usecase) {
		pr_err("Error: Memory allocation for paths failed\n");
		mem_err = true;
		goto err;
	}

	ret = of_property_read_u32(of_node, "qcom,msm-bus,num-paths",
		&num_paths);
	if (ret) {
		pr_err("Error: num_paths not found\n");
		goto err;
	}

	vec_arr = of_get_property(of_node, "qcom,msm-bus,vectors-KBps", &len);
	if (vec_arr == NULL) {
		pr_err("Error: Vector array not found\n");
		goto err;
	}

	if (len != num_usecases * num_paths * sizeof(uint32_t) * 4) {
		pr_err("Error: Length-error on getting vectors\n");
		goto err;
	}

	for (i = 0; i < num_usecases; i++) {
		usecase[i].num_paths = num_paths;
		usecase[i].vectors = devm_kzalloc(&pdev->dev, num_paths *
			sizeof(struct msm_bus_vectors), GFP_KERNEL);
		if (!usecase[i].vectors) {
			mem_err = true;
			pr_err("Error: Mem alloc failure in vectors\n");
			goto err;
		}

		for (j = 0; j < num_paths; j++) {
			int index = ((i * num_paths) + j) * 4;
			usecase[i].vectors[j].src = be32_to_cpu(vec_arr[index]);
			usecase[i].vectors[j].dst =
				be32_to_cpu(vec_arr[index + 1]);
			usecase[i].vectors[j].ab = (uint64_t)
				KBTOB(be32_to_cpu(vec_arr[index + 2]));
			usecase[i].vectors[j].ib = (uint64_t)
				KBTOB(be32_to_cpu(vec_arr[index + 3]));
		}
	}

	pdata->usecase = usecase;
	return pdata;
err:
	if (mem_err) {
		for (; i > 0; i--)
			kfree(usecase[i-1].vectors);

		kfree(usecase);
		kfree(pdata);
	}

	return NULL;
}
static struct msm_bus_node_info *get_nodes(struct device_node *of_node,
	struct platform_device *pdev,
	struct msm_bus_fabric_registration *pdata)
{
	struct msm_bus_node_info *info;
	struct device_node *child_node = NULL;
	int i = 0, ret;
	int j, max_masterp = 0, max_slavep = 0;
	u32 temp;

	for_each_child_of_node(of_node, child_node) {
		i++;
	}

	pdata->len = i;
	info = (struct msm_bus_node_info *)
		devm_kzalloc(&pdev->dev, sizeof(struct msm_bus_node_info) *
			pdata->len, GFP_KERNEL);
	if (ZERO_OR_NULL_PTR(info)) {
		pr_err("Failed to alloc memory for nodes: %d\n", pdata->len);
		goto err;
	}

	i = 0;
	child_node = NULL;
	for_each_child_of_node(of_node, child_node) {
		const char *sel_str;

		ret = of_property_read_string(child_node, "label",
			&info[i].name);
		if (ret)
			pr_err("Error reading node label\n");

		ret = of_property_read_u32(child_node, "cell-id", &info[i].id);
		if (ret) {
			pr_err("Error reading node id\n");
			goto err;
		}

		if (of_property_read_bool(child_node, "qcom,gateway"))
			info[i].gateway = 1;

		of_property_read_u32(child_node, "qcom,mas-hw-id",
			&info[i].mas_hw_id);

		of_property_read_u32(child_node, "qcom,slv-hw-id",
			&info[i].slv_hw_id);
		info[i].masterp = get_arr(pdev, child_node,
					"qcom,masterp", &info[i].num_mports);
		/* No need to store number of qports */
		info[i].qport = get_arr(pdev, child_node,
					"qcom,qport", &ret);
		pdata->nmasters += info[i].num_mports;
		for (j = 0; j < info[i].num_mports; j++)
			max_masterp = max(info[i].masterp[j], max_masterp);


		info[i].slavep = get_arr(pdev, child_node,
					"qcom,slavep", &info[i].num_sports);
		pdata->nslaves += info[i].num_sports;
		for (j = 0; j < info[i].num_sports; j++)
			max_slavep = max(info[i].slavep[j], max_slavep);


		info[i].tier = get_arr(pdev, child_node,
					"qcom,tier", &info[i].num_tiers);

		if (of_property_read_bool(child_node, "qcom,ahb"))
			info[i].ahb = 1;

		ret = of_property_read_string(child_node, "qcom,hw-sel",
			&sel_str);
		if (ret)
			info[i].hw_sel = 0;
		else {
			ret =  get_num(hw_sel_name, sel_str);
			if (ret < 0) {
				pr_err("Invalid hw-sel\n");
				goto err;
			}

			info[i].hw_sel = ret;
		}

		of_property_read_u32(child_node, "qcom,buswidth",
			&info[i].buswidth);
		of_property_read_u32(child_node, "qcom,ws", &info[i].ws);
		ret = of_property_read_u32(child_node, "qcom,thresh",
			&temp);
		if (!ret)
			info[i].th = (uint64_t)KBTOB(temp);

		ret = of_property_read_u32(child_node, "qcom,bimc,bw",
			&temp);
		if (!ret)
			info[i].bimc_bw = (uint64_t)KBTOB(temp);

		of_property_read_u32(child_node, "qcom,bimc,gp",
			&info[i].bimc_gp);
		of_property_read_u32(child_node, "qcom,bimc,thmp",
			&info[i].bimc_thmp);
		ret = of_property_read_string(child_node, "qcom,mode",
			&sel_str);
		if (ret)
			info[i].mode = 0;
		else {
			ret = get_num(mode_sel_name, sel_str);
			if (ret < 0) {
				pr_err("Unknown mode :%s\n", sel_str);
				goto err;
			}

			info[i].mode = ret;
		}

		info[i].dual_conf =
			of_property_read_bool(child_node, "qcom,dual-conf");

		ret = of_property_read_string(child_node, "qcom,mode-thresh",
			&sel_str);
		if (ret)
			info[i].mode_thresh = 0;
		else {
			ret = get_num(mode_sel_name, sel_str);
			if (ret < 0) {
				pr_err("Unknown mode :%s\n", sel_str);
				goto err;
			}

			info[i].mode_thresh = ret;
			MSM_BUS_DBG("AXI: THreshold mode set: %d\n",
				info[i].mode_thresh);
		}

		ret = of_property_read_string(child_node, "qcom,perm-mode",
			&sel_str);
		if (ret)
			info[i].perm_mode = 0;
		else {
			ret = get_num(mode_sel_name, sel_str);
			if (ret < 0)
				goto err;

			info[i].perm_mode = 1 << ret;
		}

		of_property_read_u32(child_node, "qcom,prio-lvl",
			&info[i].prio_lvl);
		of_property_read_u32(child_node, "qcom,prio-rd",
			&info[i].prio_rd);
		of_property_read_u32(child_node, "qcom,prio-wr",
			&info[i].prio_wr);
		of_property_read_u32(child_node, "qcom,prio0", &info[i].prio0);
		of_property_read_u32(child_node, "qcom,prio1", &info[i].prio1);
		ret = of_property_read_string(child_node, "qcom,slaveclk-dual",
			&info[i].slaveclk[DUAL_CTX]);
		if (!ret)
			pr_debug("Got slaveclk_dual: %s\n",
				info[i].slaveclk[DUAL_CTX]);
		else
			info[i].slaveclk[DUAL_CTX] = NULL;

		ret = of_property_read_string(child_node,
			"qcom,slaveclk-active", &info[i].slaveclk[ACTIVE_CTX]);
		if (!ret)
			pr_debug("Got slaveclk_active\n");
		else
			info[i].slaveclk[ACTIVE_CTX] = NULL;

		ret = of_property_read_string(child_node, "qcom,memclk-dual",
			&info[i].memclk[DUAL_CTX]);
		if (!ret)
			pr_debug("Got memclk_dual\n");
		else
			info[i].memclk[DUAL_CTX] = NULL;

		ret = of_property_read_string(child_node, "qcom,memclk-active",
			&info[i].memclk[ACTIVE_CTX]);
		if (!ret)
			pr_debug("Got memclk_active\n");
		else
			info[i].memclk[ACTIVE_CTX] = NULL;

		ret = of_property_read_string(child_node, "qcom,iface-clk-node",
			&info[i].iface_clk_node);
		if (!ret)
			pr_debug("Got iface_clk_node\n");
		else
			info[i].iface_clk_node = NULL;

		pr_debug("Node name: %s\n", info[i].name);
		of_node_put(child_node);
		i++;
	}

	if (pdata->nmasters)
		pdata->nmasters = max(pdata->nmasters,
				(unsigned int)(max_masterp + 1));
	if (pdata->nslaves)
		pdata->nslaves = max(pdata->nslaves,
				(unsigned int)(max_slavep + 1));
	pr_debug("Bus %d added: %d masters\n", pdata->id, pdata->nmasters);
	pr_debug("Bus %d added: %d slaves\n", pdata->id, pdata->nslaves);
	return info;
err:
	return NULL;
}
Beispiel #5
0
void
init_x86_64(paddr_t first_avail)
{
	extern void consinit(void);
	extern struct extent *iomem_ex;
	struct region_descriptor region;
	struct mem_segment_descriptor *ldt_segp;
	int x, first16q, ist;
	u_int64_t seg_start, seg_end;
	u_int64_t seg_start1, seg_end1;

	cpu_init_msrs(&cpu_info_primary);

	proc0.p_addr = proc0paddr;
	cpu_info_primary.ci_curpcb = &proc0.p_addr->u_pcb;

	x86_bus_space_init();

	consinit();	/* XXX SHOULD NOT BE DONE HERE */

	/*
	 * Initailize PAGE_SIZE-dependent variables.
	 */
	uvm_setpagesize();

#if 0
	uvmexp.ncolors = 2;
#endif
 
	/*
	 * Boot arguments are in a single page specified by /boot.
	 *
	 * We require the "new" vector form, as well as memory ranges
	 * to be given in bytes rather than KB.
	 *
	 * locore copies the data into bootinfo[] for us.
	 */
	if ((bootapiver & (BAPIV_VECTOR | BAPIV_BMEMMAP)) ==
	    (BAPIV_VECTOR | BAPIV_BMEMMAP)) {
		if (bootinfo_size >= sizeof(bootinfo))
			panic("boot args too big");

		getbootinfo(bootinfo, bootinfo_size);
	} else
		panic("invalid /boot");

	avail_start = PAGE_SIZE; /* BIOS leaves data in low memory */
				 /* and VM system doesn't work with phys 0 */
#ifdef MULTIPROCESSOR
	if (avail_start < MP_TRAMPOLINE + PAGE_SIZE)
		avail_start = MP_TRAMPOLINE + PAGE_SIZE;
#endif

	/*
	 * Call pmap initialization to make new kernel address space.
	 * We must do this before loading pages into the VM system.
	 */
	pmap_bootstrap(VM_MIN_KERNEL_ADDRESS,
	    IOM_END + trunc_page(KBTOB(biosextmem)));

	if (avail_start != PAGE_SIZE)
		pmap_prealloc_lowmem_ptps();

	if (mem_cluster_cnt == 0) {
		/*
		 * Allocate the physical addresses used by RAM from the iomem
		 * extent map.  This is done before the addresses are
		 * page rounded just to make sure we get them all.
		 */
		if (extent_alloc_region(iomem_ex, 0, KBTOB(biosbasemem),
		    EX_NOWAIT)) {
			/* XXX What should we do? */
			printf("WARNING: CAN'T ALLOCATE BASE MEMORY FROM "
			    "IOMEM EXTENT MAP!\n");
		}
		mem_clusters[0].start = 0;
		mem_clusters[0].size = trunc_page(KBTOB(biosbasemem));
		physmem += atop(mem_clusters[0].size);
		if (extent_alloc_region(iomem_ex, IOM_END, KBTOB(biosextmem),
		    EX_NOWAIT)) {
			/* XXX What should we do? */
			printf("WARNING: CAN'T ALLOCATE EXTENDED MEMORY FROM "
			    "IOMEM EXTENT MAP!\n");
		}
#if 0
#if NISADMA > 0
		/*
		 * Some motherboards/BIOSes remap the 384K of RAM that would
		 * normally be covered by the ISA hole to the end of memory
		 * so that it can be used.  However, on a 16M system, this
		 * would cause bounce buffers to be allocated and used.
		 * This is not desirable behaviour, as more than 384K of
		 * bounce buffers might be allocated.  As a work-around,
		 * we round memory down to the nearest 1M boundary if
		 * we're using any isadma devices and the remapped memory
		 * is what puts us over 16M.
		 */
		if (biosextmem > (15*1024) && biosextmem < (16*1024)) {
			char pbuf[9];

			format_bytes(pbuf, sizeof(pbuf),
			    biosextmem - (15*1024));
			printf("Warning: ignoring %s of remapped memory\n",
			    pbuf);
			biosextmem = (15*1024);
		}
#endif
#endif
		mem_clusters[1].start = IOM_END;
		mem_clusters[1].size = trunc_page(KBTOB(biosextmem));
		physmem += atop(mem_clusters[1].size);

		mem_cluster_cnt = 2;

		avail_end = IOM_END + trunc_page(KBTOB(biosextmem));
	}

	/*
	 * If we have 16M of RAM or less, just put it all on
	 * the default free list.  Otherwise, put the first
	 * 16M of RAM on a lower priority free list (so that
	 * all of the ISA DMA'able memory won't be eaten up
	 * first-off).
	 */
	if (avail_end <= (16 * 1024 * 1024))
		first16q = VM_FREELIST_DEFAULT;
	else
		first16q = VM_FREELIST_FIRST16;

	/* Make sure the end of the space used by the kernel is rounded. */
	first_avail = round_page(first_avail);
	kern_end = KERNBASE + first_avail;

	/*
	 * Now, load the memory clusters (which have already been
	 * rounded and truncated) into the VM system.
	 *
	 * NOTE: WE ASSUME THAT MEMORY STARTS AT 0 AND THAT THE KERNEL
	 * IS LOADED AT IOM_END (1M).
	 */
	for (x = 0; x < mem_cluster_cnt; x++) {
		seg_start = mem_clusters[x].start;
		seg_end = mem_clusters[x].start + mem_clusters[x].size;
		seg_start1 = 0;
		seg_end1 = 0;

		if (seg_start > 0xffffffffULL) {
			printf("skipping %lld bytes of memory above 4GB\n",
			    seg_end - seg_start);
			continue;
		}
		if (seg_end > 0x100000000ULL) {
			printf("skipping %lld bytes of memory above 4GB\n",
			    seg_end - 0x100000000ULL);
			seg_end = 0x100000000ULL;
		}

		/*
		 * Skip memory before our available starting point.
		 */
		if (seg_end <= avail_start)
			continue;

		if (avail_start >= seg_start && avail_start < seg_end) {
			if (seg_start != 0)
				panic("init_x86_64: memory doesn't start at 0");
			seg_start = avail_start;
			if (seg_start == seg_end)
				continue;
		}

		/*
		 * If this segment contains the kernel, split it
		 * in two, around the kernel.
		 */
		if (seg_start <= IOM_END && first_avail <= seg_end) {
			seg_start1 = first_avail;
			seg_end1 = seg_end;
			seg_end = IOM_END;
		}

		/* First hunk */
		if (seg_start != seg_end) {
			if (seg_start <= (16 * 1024 * 1024) &&
			    first16q != VM_FREELIST_DEFAULT) {
				u_int64_t tmp;

				if (seg_end > (16 * 1024 * 1024))
					tmp = (16 * 1024 * 1024);
				else
					tmp = seg_end;
#if DEBUG_MEMLOAD
				printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n",
				    (unsigned long long)seg_start,
				    (unsigned long long)tmp,
				    atop(seg_start), atop(tmp));
#endif
				uvm_page_physload(atop(seg_start),
				    atop(tmp), atop(seg_start),
				    atop(tmp), first16q);
				seg_start = tmp;
			}

			if (seg_start != seg_end) {
#if DEBUG_MEMLOAD
				printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n",
				    (unsigned long long)seg_start,
				    (unsigned long long)seg_end,
				    atop(seg_start), atop(seg_end));
#endif
				uvm_page_physload(atop(seg_start),
				    atop(seg_end), atop(seg_start),
				    atop(seg_end), VM_FREELIST_DEFAULT);
			}
		}

		/* Second hunk */
		if (seg_start1 != seg_end1) {
			if (seg_start1 <= (16 * 1024 * 1024) &&
			    first16q != VM_FREELIST_DEFAULT) {
				u_int64_t tmp;

				if (seg_end1 > (16 * 1024 * 1024))
					tmp = (16 * 1024 * 1024);
				else
					tmp = seg_end1;
#if DEBUG_MEMLOAD
				printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n",
				    (unsigned long long)seg_start1,
				    (unsigned long long)tmp,
				    atop(seg_start1), atop(tmp));
#endif
				uvm_page_physload(atop(seg_start1),
				    atop(tmp), atop(seg_start1),
				    atop(tmp), first16q);
				seg_start1 = tmp;
			}

			if (seg_start1 != seg_end1) {
#if DEBUG_MEMLOAD
				printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n",
				    (unsigned long long)seg_start1,
				    (unsigned long long)seg_end1,
				    atop(seg_start1), atop(seg_end1));
#endif
				uvm_page_physload(atop(seg_start1),
				    atop(seg_end1), atop(seg_start1),
				    atop(seg_end1), VM_FREELIST_DEFAULT);
			}
		}
	}

	/*
	 * Steal memory for the message buffer (at end of core).
	 */
	{
		struct vm_physseg *vps = NULL;
		psize_t sz = round_page(MSGBUFSIZE);
		psize_t reqsz = sz;

		for (x = 0; x < vm_nphysseg; x++) {
			vps = &vm_physmem[x];
			if (ptoa(vps->avail_end) == avail_end)
				break;
		}
		if (x == vm_nphysseg)
			panic("init_x86_64: can't find end of memory");

		/* Shrink so it'll fit in the last segment. */
		if ((vps->avail_end - vps->avail_start) < atop(sz))
			sz = ptoa(vps->avail_end - vps->avail_start);

		vps->avail_end -= atop(sz);
		vps->end -= atop(sz);
		msgbuf_paddr = ptoa(vps->avail_end);

		/* Remove the last segment if it now has no pages. */
		if (vps->start == vps->end) {
			for (vm_nphysseg--; x < vm_nphysseg; x++)
				vm_physmem[x] = vm_physmem[x + 1];
		}

		/* Now find where the new avail_end is. */
		for (avail_end = 0, x = 0; x < vm_nphysseg; x++)
			if (vm_physmem[x].avail_end > avail_end)
				avail_end = vm_physmem[x].avail_end;
		avail_end = ptoa(avail_end);

		/* Warn if the message buffer had to be shrunk. */
		if (sz != reqsz)
			printf("WARNING: %ld bytes not available for msgbuf "
			    "in last cluster (%ld used)\n", reqsz, sz);
	}

	/*
	 * XXXfvdl todo: acpi wakeup code.
	 */

	pmap_growkernel(VM_MIN_KERNEL_ADDRESS + 32 * 1024 * 1024);

	pmap_kenter_pa(idt_vaddr, idt_paddr, VM_PROT_READ|VM_PROT_WRITE);
	pmap_kenter_pa(idt_vaddr + PAGE_SIZE, idt_paddr + PAGE_SIZE,
	    VM_PROT_READ|VM_PROT_WRITE);

	pmap_kenter_pa(lo32_vaddr, lo32_paddr, VM_PROT_READ|VM_PROT_WRITE);

	idt = (struct gate_descriptor *)idt_vaddr;
	gdtstore = (char *)(idt + NIDT);
	ldtstore = gdtstore + DYNSEL_START;

	/* make gdt gates and memory segments */
	set_mem_segment(GDT_ADDR_MEM(gdtstore, GCODE_SEL), 0, 0xfffff, SDT_MEMERA,
	    SEL_KPL, 1, 0, 1);

	set_mem_segment(GDT_ADDR_MEM(gdtstore, GDATA_SEL), 0, 0xfffff, SDT_MEMRWA,
	    SEL_KPL, 1, 0, 1);

	set_sys_segment(GDT_ADDR_SYS(gdtstore, GLDT_SEL), ldtstore, LDT_SIZE - 1,
	    SDT_SYSLDT, SEL_KPL, 0);

	set_mem_segment(GDT_ADDR_MEM(gdtstore, GUCODE_SEL), 0,
	    atop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMERA, SEL_UPL, 1, 0, 1);

	set_mem_segment(GDT_ADDR_MEM(gdtstore, GUDATA_SEL), 0,
	    atop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMRWA, SEL_UPL, 1, 0, 1);

	/* make ldt gates and memory segments */
	setgate((struct gate_descriptor *)(ldtstore + LSYS5CALLS_SEL),
	    &IDTVEC(oosyscall), 0, SDT_SYS386CGT, SEL_UPL,
	    GSEL(GCODE_SEL, SEL_KPL));

	*(struct mem_segment_descriptor *)(ldtstore + LUCODE_SEL) =
	    *GDT_ADDR_MEM(gdtstore, GUCODE_SEL);
	*(struct mem_segment_descriptor *)(ldtstore + LUDATA_SEL) =
	    *GDT_ADDR_MEM(gdtstore, GUDATA_SEL);

	/*
	 * 32 bit GDT entries.
	 */

	set_mem_segment(GDT_ADDR_MEM(gdtstore, GUCODE32_SEL), 0,
	    atop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMERA, SEL_UPL, 1, 1, 0);

	set_mem_segment(GDT_ADDR_MEM(gdtstore, GUDATA32_SEL), 0,
	    atop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMRWA, SEL_UPL, 1, 1, 0);

	/*
	 * 32 bit LDT entries.
	 */
	ldt_segp = (struct mem_segment_descriptor *)(ldtstore + LUCODE32_SEL);
	set_mem_segment(ldt_segp, 0, atop(VM_MAXUSER_ADDRESS32) - 1,
	    SDT_MEMERA, SEL_UPL, 1, 1, 0);
	ldt_segp = (struct mem_segment_descriptor *)(ldtstore + LUDATA32_SEL);
	set_mem_segment(ldt_segp, 0, atop(VM_MAXUSER_ADDRESS32) - 1,
	    SDT_MEMRWA, SEL_UPL, 1, 1, 0);

	/*
	 * Other entries.
	 */
	memcpy((struct gate_descriptor *)(ldtstore + LSOL26CALLS_SEL),
	    (struct gate_descriptor *)(ldtstore + LSYS5CALLS_SEL),
	    sizeof (struct gate_descriptor));
	memcpy((struct gate_descriptor *)(ldtstore + LBSDICALLS_SEL),
	    (struct gate_descriptor *)(ldtstore + LSYS5CALLS_SEL),
	    sizeof (struct gate_descriptor));

	/* exceptions */
	for (x = 0; x < 32; x++) {
		ist = (x == 8) ? 1 : 0;
		setgate(&idt[x], IDTVEC(exceptions)[x], ist, SDT_SYS386IGT,
		    (x == 3 || x == 4) ? SEL_UPL : SEL_KPL,
		    GSEL(GCODE_SEL, SEL_KPL));
		idt_allocmap[x] = 1;
	}

	/* new-style interrupt gate for syscalls */
	setgate(&idt[128], &IDTVEC(osyscall), 0, SDT_SYS386IGT, SEL_UPL,
	    GSEL(GCODE_SEL, SEL_KPL));
	idt_allocmap[128] = 1;

	setregion(&region, gdtstore, DYNSEL_START - 1);
	lgdt(&region);

	cpu_init_idt();

#ifdef DDB
	db_machine_init();
	ddb_init();
	if (boothowto & RB_KDB)
		Debugger();
#endif
#ifdef KGDB
	kgdb_port_init();
	if (boothowto & RB_KDB) {
		kgdb_debug_init = 1;
		kgdb_connect(1);
	}
#endif

	intr_default_setup();

	softintr_init();
	splraise(IPL_IPI);
	enable_intr();

        /* Make sure maxproc is sane */ 
        if (maxproc > cpu_maxproc())
                maxproc = cpu_maxproc();
}
struct msm_bus_fabric_registration
	*msm_bus_of_get_fab_data(struct platform_device *pdev)
{
	struct device_node *of_node;
	struct msm_bus_fabric_registration *pdata;
	bool mem_err = false;
	int ret = 0;
	const char *sel_str;
	u32 temp;

	if (!pdev) {
		pr_err("Error: Null platform device\n");
		return NULL;
	}

	of_node = pdev->dev.of_node;
	pdata = devm_kzalloc(&pdev->dev,
			sizeof(struct msm_bus_fabric_registration), GFP_KERNEL);
	if (!pdata) {
		pr_err("Error: Memory allocation for pdata failed\n");
		mem_err = true;
		goto err;
	}

	ret = of_property_read_string(of_node, "label", &pdata->name);
	if (ret) {
		pr_err("Error: label not found\n");
		goto err;
	}
	pr_debug("Fab_of: Read name: %s\n", pdata->name);

	ret = of_property_read_u32(of_node, "cell-id",
		&pdata->id);
	if (ret) {
		pr_err("Error: num-usecases not found\n");
		goto err;
	}
	pr_debug("Fab_of: Read id: %u\n", pdata->id);

	if (of_property_read_bool(of_node, "qcom,ahb"))
		pdata->ahb = 1;

	ret = of_property_read_string(of_node, "qcom,fabclk-dual",
		&pdata->fabclk[DUAL_CTX]);
	if (ret) {
		pr_debug("fabclk_dual not available\n");
		pdata->fabclk[DUAL_CTX] = NULL;
	} else
		pr_debug("Fab_of: Read clk dual ctx: %s\n",
			pdata->fabclk[DUAL_CTX]);
	ret = of_property_read_string(of_node, "qcom,fabclk-active",
		&pdata->fabclk[ACTIVE_CTX]);
	if (ret) {
		pr_debug("Error: fabclk_active not available\n");
		pdata->fabclk[ACTIVE_CTX] = NULL;
	} else
		pr_debug("Fab_of: Read clk act ctx: %s\n",
			pdata->fabclk[ACTIVE_CTX]);

	ret = of_property_read_u32(of_node, "qcom,ntieredslaves",
		&pdata->ntieredslaves);
	if (ret) {
		pr_err("Error: ntieredslaves not found\n");
		goto err;
	}

	ret = of_property_read_u32(of_node, "qcom,qos-freq", &pdata->qos_freq);
	if (ret)
		pr_debug("qos_freq not available\n");

	ret = of_property_read_string(of_node, "qcom,hw-sel", &sel_str);
	if (ret) {
		pr_err("Error: hw_sel not found\n");
		goto err;
	} else {
		ret = get_num(hw_sel_name, sel_str);
		if (ret < 0)
			goto err;

		pdata->hw_sel = ret;
	}

	if (of_property_read_bool(of_node, "qcom,virt"))
		pdata->virt = true;

	ret = of_property_read_u32(of_node, "qcom,qos-baseoffset",
						&pdata->qos_baseoffset);
	if (ret)
		pr_debug("%s:qos_baseoffset not available\n", __func__);

	if (of_property_read_bool(of_node, "qcom,rpm-en"))
		pdata->rpm_enabled = 1;

	ret = of_property_read_u32(of_node, "qcom,nr-lim-thresh",
						&temp);

	if (ret) {
		pr_err("nr-lim threshold not specified");
		pdata->nr_lim_thresh = 0;
	} else
		pdata->nr_lim_thresh = KBTOB(temp);

	ret = of_property_read_u32(of_node, "qcom,eff-fact",
						&pdata->eff_fact);
	if (ret) {
		pr_err("Fab eff-factor not present");
		pdata->eff_fact = 0;
	}



	pdata->info = get_nodes(of_node, pdev, pdata);
	return pdata;
err:
	return NULL;
}