Example #1
0
/*
 * Initialize lwp's kernel stack.
 * Note that now that the floating point register save area (kfpu_t)
 * has been broken out from machpcb and aligned on a 64 byte boundary so that
 * we can do block load/stores to/from it, there are a couple of potential
 * optimizations to save stack space. 1. The floating point register save
 * area could be aligned on a 16 byte boundary, and the floating point code
 * changed to (a) check the alignment and (b) use different save/restore
 * macros depending upon the alignment. 2. The lwp_stk_init code below
 * could be changed to calculate if less space would be wasted if machpcb
 * was first instead of second. However there is a REGOFF macro used in
 * locore, syscall_trap, machdep and mlsetup that assumes that the saved
 * register area is a fixed distance from the %sp, and would have to be
 * changed to a pointer or something...JJ said later.
 */
caddr_t
lwp_stk_init(klwp_t *lwp, caddr_t stk)
{
	struct machpcb *mpcb;
	kfpu_t *fp;
	uintptr_t aln;

	stk -= SA(sizeof (kfpu_t) + GSR_SIZE);
	aln = (uintptr_t)stk & 0x3F;
	stk -= aln;
	fp = (kfpu_t *)stk;
	stk -= SA(sizeof (struct machpcb));
	mpcb = (struct machpcb *)stk;
	bzero(mpcb, sizeof (struct machpcb));
	bzero(fp, sizeof (kfpu_t) + GSR_SIZE);
	lwp->lwp_regs = (void *)&mpcb->mpcb_regs;
	lwp->lwp_fpu = (void *)fp;
	mpcb->mpcb_fpu = fp;
	mpcb->mpcb_fpu->fpu_q = mpcb->mpcb_fpu_q;
	mpcb->mpcb_thread = lwp->lwp_thread;
	mpcb->mpcb_wbcnt = 0;
	if (lwp->lwp_procp->p_model == DATAMODEL_ILP32) {
		mpcb->mpcb_wstate = WSTATE_USER32;
		mpcb->mpcb_wbuf = kmem_cache_alloc(wbuf32_cache, KM_SLEEP);
	} else {
		mpcb->mpcb_wstate = WSTATE_USER64;
		mpcb->mpcb_wbuf = kmem_cache_alloc(wbuf64_cache, KM_SLEEP);
	}
	ASSERT(((uintptr_t)mpcb->mpcb_wbuf & 7) == 0);
	mpcb->mpcb_wbuf_pa = va_to_pa(mpcb->mpcb_wbuf);
	mpcb->mpcb_pa = va_to_pa(mpcb);
	return (stk);
}
Example #2
0
/* set up page tables for kernel */
void init_page(void) {
	CR0 cr0;
	CR3 cr3;
	PDE *pdir = (PDE *)va_to_pa(kpdir);
	PTE *ptable = (PTE *)va_to_pa(kptable);
	uint32_t pdir_idx;

	/* make all PDEs invalid */
	memset(pdir, 0, NR_PDE * sizeof(PDE));

	/* fill PDEs */
	for (pdir_idx = 0; pdir_idx < PHY_MEM / PT_SIZE; pdir_idx ++) {
		pdir[pdir_idx].val = make_pde(ptable);
		pdir[pdir_idx + KOFFSET / PT_SIZE].val = make_pde(ptable);

		ptable += NR_PTE;
	}

	/* fill PTEs */

	/* We use inline assembly here to fill PTEs for efficiency.
	 * If you do not understand it, refer to the C code below.
	 */

	asm volatile ("std;\
	 1: stosl;\
		subl %0, %%eax;\
		jge 1b;\
		cld" : :
		"i"(PAGE_SIZE), "a"((PHY_MEM - PAGE_SIZE) | 0x7), "D"(ptable - 1));


	/*
		===== referenced code for the inline assembly above =====

		uint32_t pframe_addr = PHY_MEM - PAGE_SIZE;
		ptable --;

		// fill PTEs reversely
		for (; pframe_addr >= 0; pframe_addr -= PAGE_SIZE) {
			ptable->val = make_pte(pframe_addr);
			ptable --;
		}
	*/


	/* make CR3 to be the entry of page directory */
	cr3.val = 0;
	cr3.page_directory_base = ((uint32_t)pdir) >> 12;
	write_cr3(cr3.val);

	/* set PG bit in CR0 to enable paging */
	cr0.val = read_cr0();
	cr0.paging = 1;
	write_cr0(cr0.val);
}
Example #3
0
void
sfmmu_set_tsbs()
{
	uint64_t rv;
	struct hv_tsb_block *hvbp = &ksfmmup->sfmmu_hvblock;

#ifdef DEBUG
	if (hv_use_0_tsb == 0)
		return;
#endif /* DEBUG */

	rv = hv_set_ctx0(hvbp->hv_tsb_info_cnt,
	    hvbp->hv_tsb_info_pa);
	if (rv != H_EOK)
		prom_printf("cpu%d: hv_set_ctx0() returned %lx\n",
		    getprocessorid(), rv);

#ifdef SET_MMU_STATS
	ASSERT(getprocessorid() < NCPU);
	rv = hv_mmu_set_stat_area(va_to_pa(&mmu_stat_area[getprocessorid()]),
	    sizeof (mmu_stat_area[0]));
	if (rv != H_EOK)
		prom_printf("cpu%d: hv_mmu_set_stat_area() returned %lx\n",
		    getprocessorid(), rv);
#endif /* SET_MMU_STATS */
}
Example #4
0
static void
sfmmu_set_fault_status_area(void)
{
	caddr_t mmfsa_va;
	extern	caddr_t mmu_fault_status_area;

	mmfsa_va =
	    mmu_fault_status_area + (MMFSA_SIZE  * getprocessorid());
	set_mmfsa_scratchpad(mmfsa_va);
	prom_set_mmfsa_traptable(&trap_table, va_to_pa(mmfsa_va));
}
Example #5
0
static void cortexa_mem_write(target *t, target_addr dest, const void *src, size_t len)
{
	/* Clean and invalidate cache before writing */
	for (uint32_t cl = dest & ~(CACHE_LINE_LENGTH-1);
	     cl < dest + len; cl += CACHE_LINE_LENGTH) {
		write_gpreg(t, 0, cl);
		apb_write(t, DBGITR, MCR | DCCIMVAC);
	}
	ADIv5_AP_t *ahb = ((struct cortexa_priv*)t->priv)->ahb;
	adiv5_mem_write(ahb, va_to_pa(t, dest), src, len);
}
Example #6
0
static void cortexa_mem_read(target *t, void *dest, target_addr src, size_t len)
{
	/* Clean cache before reading */
	for (uint32_t cl = src & ~(CACHE_LINE_LENGTH-1);
	     cl < src + len; cl += CACHE_LINE_LENGTH) {
		write_gpreg(t, 0, cl);
		apb_write(t, DBGITR, MCR | DCCMVAC);
	}

	ADIv5_AP_t *ahb = ((struct cortexa_priv*)t->priv)->ahb;
	adiv5_mem_read(ahb, dest, va_to_pa(t, src), len);
}
Example #7
0
void init_mm() {
	PDE *kpdir = get_kpdir();

	/* make all PDE invalid */
	memset(updir, 0, NR_PDE * sizeof(PDE));

	/* create the same mapping above 0xc0000000 as the kernel mapping does */
	memcpy(&updir[KOFFSET / PT_SIZE], &kpdir[KOFFSET / PT_SIZE], 
			(PHY_MEM / PT_SIZE) * sizeof(PDE));

	ucr3.val = (uint32_t)va_to_pa((uint32_t)updir) & ~0x3ff;
}
Example #8
0
File: kvm.c Project: dpingg/os-lab1
/* Build a page table for the kernel */
void
init_page(void) {
	CR0 cr0;
	CR3 cr3;
	PDE *pdir = (PDE *)va_to_pa(kpdir);
	PTE *ptable = (PTE *)va_to_pa(kptable);
	uint32_t pdir_idx, ptable_idx, pframe_idx;


	for (pdir_idx = 0; pdir_idx < NR_PDE; pdir_idx ++) {
		make_invalid_pde(&pdir[pdir_idx]);
	}

	pframe_idx = 0;
	for (pdir_idx = 0; pdir_idx < PHY_MEM / PD_SIZE; pdir_idx ++) {
		make_pde(&pdir[pdir_idx], ptable);
		make_pde(&pdir[pdir_idx + KOFFSET / PD_SIZE], ptable);
		for (ptable_idx = 0; ptable_idx < NR_PTE; ptable_idx ++) {
			make_pte(ptable, (void*)(pframe_idx << 12));
			pframe_idx ++;
			ptable ++;
		}
	}

	/* make CR3 to be the entry of page directory */
	cr3.val = 0;
	cr3.page_directory_base = ((uint32_t)pdir) >> 12;
	write_cr3(&cr3);

	/* set PG bit in CR0 to enable paging */
	cr0.val = read_cr0();
	cr0.paging = 1;
	write_cr0(&cr0);

	/* Now we can access global variables! 
	 * Store CR3 in the global variable for future use. */
	kcr3.val = cr3.val;
}
Example #9
0
/*
 * gfc_vtop
 *
 * vtop ( vaddr -- paddr.lo paddr.hi)
 */
static int
gfc_vtop(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
{
	int vaddr;
	uint64_t paddr;
	struct fc_resource *ip;

	if (fc_cell2int(cp->nargs) != 1)
		return (fc_syntax_error(cp, "nargs must be 1"));

	if (fc_cell2int(cp->nresults) >= 3)
		return (fc_syntax_error(cp, "nresults must be less than 2"));

	vaddr = fc_cell2int(fc_arg(cp, 0));

	/*
	 * Find if this request matches a mapping resource we set up.
	 */
	fc_lock_resource_list(rp);
	for (ip = rp->head; ip != NULL; ip = ip->next) {
		if (ip->type != RT_CONTIGIOUS)
			continue;
		if (ip->fc_contig_virt == (void *)(uintptr_t)vaddr)
				break;
	}
	fc_unlock_resource_list(rp);

	if (ip == NULL)
		return (fc_priv_error(cp, "request doesn't match a "
		    "known mapping"));


	paddr = va_to_pa((void *)(uintptr_t)vaddr);

	FC_DEBUG2(1, CE_CONT, "gfc_vtop: vaddr=0x%x paddr=0x%x\n",
	    vaddr, paddr);

	cp->nresults = fc_int2cell(2);

	fc_result(cp, 0) = paddr;
	fc_result(cp, 1) = 0;

	return (fc_success_op(ap, rp, cp));
}
paddr_t cos_access_page(unsigned long cap_no)
{
	paddr_t addr;

	if (cap_no > COS_MAX_MEMORY) return 0;

	addr = cos_pages[cap_no].addr;
	if (0 == addr) {
		void *r = cos_alloc_page();

		if (NULL == r) {
			printk("cos: could not allocate page for cos memory\n");
			return 0;
		}
		addr = cos_pages[cap_no].addr = (paddr_t)va_to_pa(r);
	}

	return addr;
}
Example #11
0
/*
 * Set machine specific TSB information
 */
void
sfmmu_setup_tsbinfo(sfmmu_t *sfmmup)
{
	struct tsb_info *tsbinfop;
	hv_tsb_info_t *tdp;

	tsbinfop = sfmmup->sfmmu_tsb;
	if (tsbinfop == NULL) {
		sfmmup->sfmmu_hvblock.hv_tsb_info_pa = (uint64_t)-1;
		sfmmup->sfmmu_hvblock.hv_tsb_info_cnt = 0;
		return;
	}
	tdp = &sfmmup->sfmmu_hvblock.hv_tsb_info[0];
	sfmmup->sfmmu_hvblock.hv_tsb_info_pa = va_to_pa(tdp);
	sfmmup->sfmmu_hvblock.hv_tsb_info_cnt = 1;
	tdp->hvtsb_idxpgsz = TTE8K;
	tdp->hvtsb_assoc = 1;
	tdp->hvtsb_ntte = TSB_ENTRIES(tsbinfop->tsb_szc);
	tdp->hvtsb_ctx_index = 0;
	tdp->hvtsb_pgszs = tsbinfop->tsb_ttesz_mask;
	tdp->hvtsb_rsvd = 0;
	tdp->hvtsb_pa = tsbinfop->tsb_pa;
	if ((tsbinfop = tsbinfop->tsb_next) == NULL)
		return;
	sfmmup->sfmmu_hvblock.hv_tsb_info_cnt++;
	tdp++;
	tdp->hvtsb_idxpgsz = TTE4M;
	tdp->hvtsb_assoc = 1;
	tdp->hvtsb_ntte = TSB_ENTRIES(tsbinfop->tsb_szc);
	tdp->hvtsb_ctx_index = 0;
	tdp->hvtsb_pgszs = tsbinfop->tsb_ttesz_mask;
	tdp->hvtsb_rsvd = 0;
	tdp->hvtsb_pa = tsbinfop->tsb_pa;
	/* Only allow for 2 TSBs */
	ASSERT(tsbinfop->tsb_next == NULL);
}
Example #12
0
/*
 * This function takes care of pages which are not in kas or need to be
 * taken care of in a special way.  For example, panicbuf pages are not
 * in kas and their pages are allocated via prom_retain().
 */
pgcnt_t
i_cpr_count_special_kpages(int mapflag, bitfunc_t bitfunc)
{
	struct cpr_map_info *pri, *tail;
	pgcnt_t pages, total = 0;
	pfn_t pfn;

	/*
	 * Save information about prom retained panicbuf pages
	 */
	if (bitfunc == cpr_setbit) {
		pri = &cpr_prom_retain[CPR_PANICBUF];
		pri->virt = (cpr_ptr)panicbuf;
		pri->phys = va_to_pa(panicbuf);
		pri->size = sizeof (panicbuf);
	}

	/*
	 * Go through the prom_retain array to tag those pages.
	 */
	tail = &cpr_prom_retain[CPR_PROM_RETAIN_CNT];
	for (pri = cpr_prom_retain; pri < tail; pri++) {
		pages = mmu_btopr(pri->size);
		for (pfn = ADDR_TO_PN(pri->phys); pages--; pfn++) {
			if (pf_is_memory(pfn)) {
				if (bitfunc == cpr_setbit) {
					if ((*bitfunc)(pfn, mapflag) == 0)
						total++;
				} else
					total++;
			}
		}
	}

	return (total);
}
Example #13
0
File: vmem.c Project: ShijianXu/ICS
void create_video_mapping() {
	/* TODO: create an identical mapping from virtual memory area 
	 * [0xa0000, 0xa0000 + SCR_SIZE) to physical memory area 
	 * [0xa0000, 0xa0000 + SCR_SIZE) for user program. You may define
	 * some page tables to create this mapping.
	 */

	PDE *pdir = (PDE *)get_updir();
	PTE *ptable = (PTE *)va_to_pa(vdtable);
//	ptable += (SCR_SIZE / PAGE_SIZE + 1) * PAGE_SIZE;
		
	pdir->val = make_pde(vdtable);
//	pdir[0 + KOFFSET / PT_SIZE].val = make_pde(vdtable);

//	ptable --;
	uint32_t pframe_addr =	VMEM_ADDR;
	int i=0;
	for(; i<=0xf; i++) {
		ptable[i+0xa0].val = make_pte(pframe_addr);
		pframe_addr += PAGE_SIZE;
	}

//	panic("please implement me");
}
Example #14
0
/*
 * Routine to set up a CPU to prepare for starting it up.
 */
int
setup_cpu_common(int cpuid)
{
	struct cpu *cp = NULL;
	kthread_id_t tp;
#ifdef TRAPTRACE
	int tt_index;
	TRAP_TRACE_CTL	*ctlp;
	caddr_t	newbuf;
#endif /* TRAPTRACE */

	extern void idle();
	int	rval;

	ASSERT(MUTEX_HELD(&cpu_lock));
	ASSERT(cpu[cpuid] == NULL);

	ASSERT(ncpus <= max_ncpus);

#ifdef TRAPTRACE
	/*
	 * allocate a traptrace buffer for this CPU.
	 */
	ctlp = &trap_trace_ctl[cpuid];
	if (!trap_tr0_inuse) {
		trap_tr0_inuse = 1;
		newbuf = trap_tr0;
		tt_index = -1;
	} else {
		for (tt_index = 0; tt_index < (max_ncpus-1); tt_index++)
			if (!trap_trace_inuse[tt_index])
				break;
		ASSERT(tt_index < max_ncpus - 1);
		trap_trace_inuse[tt_index] = 1;
		newbuf = (caddr_t)(ttrace_buf + (tt_index * TRAP_TSIZE));
	}
	ctlp->d.vaddr_base = newbuf;
	ctlp->d.offset = ctlp->d.last_offset = 0;
	ctlp->d.limit = trap_trace_bufsize;
	ctlp->d.paddr_base = va_to_pa(newbuf);
	ASSERT(ctlp->d.paddr_base != (uint64_t)-1);
#endif /* TRAPTRACE */
	/*
	 * initialize hv traptrace buffer for this CPU
	 */
	mach_htraptrace_setup(cpuid);

	/*
	 * Obtain pointer to the appropriate cpu structure.
	 */
	if (cpu0.cpu_flags == 0) {
		cp = &cpu0;
	} else {
		/*
		 *  When dynamically allocating cpu structs,
		 *  cpus is used as a pointer to a list of freed
		 *  cpu structs.
		 */
		if (cpus) {
			/* grab the first cpu struct on the free list */
			cp = cpus;
			if (cp->cpu_next_free)
				cpus = cp->cpu_next_free;
			else
				cpus = NULL;
		}
	}

	if (cp == NULL)
		cp = vmem_xalloc(static_alloc_arena, CPU_ALLOC_SIZE,
		    CPU_ALLOC_SIZE, 0, 0, NULL, NULL, VM_SLEEP);

	bzero(cp, sizeof (*cp));

	cp->cpu_id = cpuid;
	cp->cpu_self = cp;

	/*
	 * Initialize ptl1_panic stack
	 */
	ptl1_init_cpu(cp);

	/*
	 * Initialize the dispatcher for this CPU.
	 */
	disp_cpu_init(cp);

	/*
	 * Bootstrap the CPU's PG data
	 */
	pg_cpu_bootstrap(cp);

	cpu_vm_data_init(cp);

	/*
	 * Now, initialize per-CPU idle thread for this CPU.
	 */
	tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_ONPROC, -1);

	cp->cpu_idle_thread = tp;

	tp->t_preempt = 1;
	tp->t_bound_cpu = cp;
	tp->t_affinitycnt = 1;
	tp->t_cpu = cp;
	tp->t_disp_queue = cp->cpu_disp;

	/*
	 * Registering a thread in the callback table is usually
	 * done in the initialization code of the thread. In this
	 * case, we do it right after thread creation to avoid
	 * blocking idle thread while registering itself. It also
	 * avoids the possibility of reregistration in case a CPU
	 * restarts its idle thread.
	 */
	CALLB_CPR_INIT_SAFE(tp, "idle");

	init_cpu_info(cp);

	/*
	 * Initialize the interrupt threads for this CPU
	 */
	cpu_intr_alloc(cp, NINTR_THREADS);

	/*
	 * Add CPU to list of available CPUs.
	 * It'll be on the active list after it is started.
	 */
	cpu_add_unit(cp);

	/*
	 * Allocate and init cpu module private data structures,
	 * including scrubber.
	 */
	cpu_init_private(cp);
	populate_idstr(cp);

	/*
	 * Initialize the CPUs physical ID cache, and processor groups
	 */
	pghw_physid_create(cp);
	(void) pg_cpu_init(cp, B_FALSE);

	if ((rval = cpu_intrq_setup(cp)) != 0) {
		return (rval);
	}

	/*
	 * Initialize MMU context domain information.
	 */
	sfmmu_cpu_init(cp);

	return (0);
}
/*
 * Call into the Hypervisor to retrieve the most recent copy of the
 * machine description. If references to the current MD are active
 * stow it in the obsolete MD list and update the current MD reference
 * with the new one.
 * The obsolete list contains one MD per generation. If the firmware
 * doesn't support MD generation fail the call.
 */
int
mach_descrip_update(void)
{
	uint64_t	md_size0, md_size;
	uint64_t	md_space = 0;
	uint64_t	hvret;
	caddr_t		tbuf = NULL;
	uint64_t	tbuf_pa;
	uint64_t	tgen;
	int		ret = 0;

	MDP(("MD: Requesting buffer size\n"));

	ASSERT((curr_mach_descrip != NULL));

	mutex_enter(&curr_mach_descrip_lock);

	/*
	 * If the required MD size changes between our first call
	 * to hv_mach_desc (to find the required buf size) and the
	 * second call (to get the actual MD) and our allocated
	 * memory is insufficient, loop until we have allocated
	 * sufficient space.
	 */
	do {
		if (tbuf != NULL)
			(*curr_mach_descrip_memops->buf_freep)(tbuf, md_space);

		md_size0 = 0LL;
		(void) hv_mach_desc((uint64_t)0, &md_size0);
		MDP(("MD: buffer size is %ld\n", md_size0));

		/*
		 * Align allocated space to nearest page.
		 * contig_mem_alloc_align() requires a power of 2 alignment.
		 */
		md_space = P2ROUNDUP(md_size0, PAGESIZE);
		MDP(("MD: allocated space is %ld\n", md_space));

		tbuf = (caddr_t)(*curr_mach_descrip_memops->buf_allocp)
		    (md_space, PAGESIZE);
		if (tbuf == NULL) {
			ret = -1;
			goto done;
		}

		tbuf_pa =  va_to_pa(tbuf);
		md_size = md_space;
		hvret = hv_mach_desc(tbuf_pa, &md_size);
		MDP(("MD: HV return code = %ld\n", hvret));

		/*
		 * We get H_EINVAL if our buffer size is too small. In
		 * that case stay in the loop, reallocate the buffer
		 * and try again.
		 */
		if (hvret != H_EOK && hvret != H_EINVAL) {
			MDP(("MD: Failed with code %ld from HV\n", hvret));
			ret = -1;
			goto done;
		}

	} while (md_space < md_size);

	tgen = mach_descrip_find_md_gen(tbuf);

#ifdef DEBUG
	if (!HAS_GEN(tgen)) {
		MDP(("MD: generation number not found\n"));
	} else
		MDP(("MD: generation number %ld\n", tgen));
#endif /* DEBUG */

	if (curr_mach_descrip->va != NULL) {

		/* check for the same generation number */
		if (HAS_GEN(tgen) && ((curr_mach_descrip->gen == tgen) &&
		    (curr_mach_descrip->size == md_size))) {
#ifdef DEBUG
			/*
			 * Pedantic Check for generation number. If the
			 * generation number is the same, make sure the
			 * MDs are really identical.
			 */
			if (bcmp(curr_mach_descrip->va, tbuf, md_size) != 0) {
				cmn_err(CE_WARN, "machine_descrip_update: MDs "
				    "with the same generation (%ld) are not "
				    "identical", tgen);
				ret = -1;
				goto done;
			}
#endif
			ret = 0;
			goto done;
		}

		/* check for generations moving backwards */
		if (HAS_GEN(tgen) && HAS_GEN(curr_mach_descrip->gen) &&
		    (curr_mach_descrip->gen > tgen)) {
			cmn_err(CE_WARN, "machine_descrip_update: new MD"
			    " older generation (%ld) than current MD (%ld)",
			    tgen, curr_mach_descrip->gen);
			ret = -1;
			goto done;
		}

		if (curr_mach_descrip->refcnt == 0) {

			MDP(("MD: freeing old md buffer gen %ld\n",
			    curr_mach_descrip->gen));

			/* Free old space */
			ASSERT(curr_mach_descrip->space > 0);

			(*curr_mach_descrip_memops->buf_freep)
			    (curr_mach_descrip->va, curr_mach_descrip->space);
		} else {
			if (!HAS_GEN(tgen)) {
				/*
				 * No update support if FW
				 * doesn't have MD generation id
				 * feature.
				 */
				prom_printf("WARNING: F/W does not support MD "
				    "generation count, MD update failed\n");
				ret = -1;
				goto done;
			}

			MDP(("MD: adding to obs list %ld\n",
			    curr_mach_descrip->gen));

			md_obs_list_add(curr_mach_descrip);

			curr_mach_descrip = new_mach_descrip();

			if (curr_mach_descrip == NULL) {
				panic("Allocation for machine description"
				    " failed\n");
			}
		}
	}

	curr_mach_descrip->va = tbuf;
	curr_mach_descrip->gen = tgen;
	curr_mach_descrip->size = md_size;
	curr_mach_descrip->space = md_space;

#ifdef MACH_DESC_DEBUG
	dump_buf((uint8_t *)curr_mach_descrip->va, md_size);
#endif /* MACH_DESC_DEBUG */

	mutex_exit(&curr_mach_descrip_lock);
	return (ret);

done:
	if (tbuf != NULL)
		(*curr_mach_descrip_memops->buf_freep)(tbuf, md_space);
	mutex_exit(&curr_mach_descrip_lock);
	return (ret);
}
Example #16
0
/*
 * Copy regs from parent to child.
 */
void
lwp_forkregs(klwp_t *lwp, klwp_t *clwp)
{
	kthread_t *t, *pt = lwptot(lwp);
	struct machpcb *mpcb = lwptompcb(clwp);
	struct machpcb *pmpcb = lwptompcb(lwp);
	kfpu_t *fp, *pfp = lwptofpu(lwp);
	caddr_t wbuf;
	uint_t wstate;

	t = mpcb->mpcb_thread;
	/*
	 * remember child's fp and wbuf since they will get erased during
	 * the bcopy.
	 */
	fp = mpcb->mpcb_fpu;
	wbuf = mpcb->mpcb_wbuf;
	wstate = mpcb->mpcb_wstate;
	/*
	 * Don't copy mpcb_frame since we hand-crafted it
	 * in thread_load().
	 */
	bcopy(lwp->lwp_regs, clwp->lwp_regs, sizeof (struct machpcb) - REGOFF);
	mpcb->mpcb_thread = t;
	mpcb->mpcb_fpu = fp;
	fp->fpu_q = mpcb->mpcb_fpu_q;

	/*
	 * It is theoretically possibly for the lwp's wstate to
	 * be different from its value assigned in lwp_stk_init,
	 * since lwp_stk_init assumed the data model of the process.
	 * Here, we took on the data model of the cloned lwp.
	 */
	if (mpcb->mpcb_wstate != wstate) {
		if (wstate == WSTATE_USER32) {
			kmem_cache_free(wbuf32_cache, wbuf);
			wbuf = kmem_cache_alloc(wbuf64_cache, KM_SLEEP);
			wstate = WSTATE_USER64;
		} else {
			kmem_cache_free(wbuf64_cache, wbuf);
			wbuf = kmem_cache_alloc(wbuf32_cache, KM_SLEEP);
			wstate = WSTATE_USER32;
		}
	}

	mpcb->mpcb_pa = va_to_pa(mpcb);
	mpcb->mpcb_wbuf = wbuf;
	mpcb->mpcb_wbuf_pa = va_to_pa(wbuf);

	ASSERT(mpcb->mpcb_wstate == wstate);

	if (mpcb->mpcb_wbcnt != 0) {
		bcopy(pmpcb->mpcb_wbuf, mpcb->mpcb_wbuf,
		    mpcb->mpcb_wbcnt * ((mpcb->mpcb_wstate == WSTATE_USER32) ?
		    sizeof (struct rwindow32) : sizeof (struct rwindow64)));
	}

	if (pt == curthread)
		pfp->fpu_fprs = _fp_read_fprs();
	if ((pfp->fpu_en) || (pfp->fpu_fprs & FPRS_FEF)) {
		if (pt == curthread && fpu_exists) {
			save_gsr(clwp->lwp_fpu);
		} else {
			uint64_t gsr;
			gsr = get_gsr(lwp->lwp_fpu);
			set_gsr(gsr, clwp->lwp_fpu);
		}
		fp_fork(lwp, clwp);
	}
}
Example #17
0
void
sc_create(pci_t *pci_p)
{
	dev_info_t *dip = pci_p->pci_dip;
	sc_t *sc_p;
	uint64_t paddr;

#ifdef lint
	dip = dip;
#endif

	if (!pci_stream_buf_exists)
		return;

	/*
	 * Allocate streaming cache state structure and link it to
	 * the pci state structure.
	 */
	sc_p = (sc_t *)kmem_zalloc(sizeof (sc_t), KM_SLEEP);
	pci_p->pci_sc_p = sc_p;
	sc_p->sc_pci_p = pci_p;

	pci_sc_setup(sc_p);
	sc_p->sc_sync_reg_pa = va_to_pa((char *)sc_p->sc_sync_reg);

	DEBUG3(DBG_ATTACH, dip, "sc_create: ctrl=%x, invl=%x, sync=%x\n",
		sc_p->sc_ctrl_reg, sc_p->sc_invl_reg,
		sc_p->sc_sync_reg);
	DEBUG2(DBG_ATTACH, dip, "sc_create: ctx_invl=%x ctx_match=%x\n",
		sc_p->sc_ctx_invl_reg, sc_p->sc_ctx_match_reg);
	DEBUG3(DBG_ATTACH, dip,
		"sc_create: data_diag=%x, tag_diag=%x, ltag_diag=%x\n",
		sc_p->sc_data_diag_acc, sc_p->sc_tag_diag_acc,
		sc_p->sc_ltag_diag_acc);

	/*
	 * Allocate the flush/sync buffer.  Make sure it's properly
	 * aligned.
	 */
	sc_p->sc_sync_flag_base =
	    vmem_xalloc(static_alloc_arena, PCI_SYNC_FLAG_SIZE,
		PCI_SYNC_FLAG_SIZE, 0, 0, NULL, NULL, VM_SLEEP);
	sc_p->sc_sync_flag_vaddr = (uint64_t *)sc_p->sc_sync_flag_base;
	paddr = (uint64_t)hat_getpfnum(kas.a_hat,
	    (caddr_t)sc_p->sc_sync_flag_vaddr);
	paddr <<= MMU_PAGESHIFT;
	paddr += (uint64_t)
	    ((uintptr_t)sc_p->sc_sync_flag_vaddr & ~MMU_PAGEMASK);
	sc_p->sc_sync_flag_pa = paddr;
	DEBUG2(DBG_ATTACH, dip, "sc_create: sync buffer - vaddr=%x paddr=%x\n",
	    sc_p->sc_sync_flag_vaddr, sc_p->sc_sync_flag_pa);

	/*
	 * Create a mutex to go along with it.  While the mutex is held,
	 * all interrupts should be blocked.  This will prevent driver
	 * interrupt routines from attempting to acquire the mutex while
	 * held by a lower priority interrupt routine.  Note also that
	 * we now block cross calls as well, to prevent issues with
	 * relocation.
	 */
	mutex_init(&sc_p->sc_sync_mutex, NULL, MUTEX_DRIVER,
	    (void *)ipltospl(XCALL_PIL));

	sc_configure(sc_p);
}
Example #18
0
static int
pxtool_phys_access(px_t *px_p, uintptr_t dev_addr,
    uint64_t *data_p, boolean_t is_big_endian, boolean_t is_write)
{
	uint64_t rfunc, pfunc;
	uint64_t rdata_addr, pdata_addr;
	uint64_t to_addr, from_addr;
	uint64_t local_data;
	int rval;
	dev_info_t *dip = px_p->px_dip;

	DBG(DBG_TOOLS, dip,
	    "pxtool_phys_access: dev_addr:0x%" PRIx64 "\n", dev_addr);
	DBG(DBG_TOOLS, dip, "    data_addr:0x%" PRIx64 ", is_write:%s\n",
	    data_p, (is_write ? "yes" : "no"));

	if (pxtool_hyp_version != PXTOOL_HYP_VER_OK) {
		pxtool_validate_diag_hyp_svc(dip, &pxtool_hyp_version);
		if (pxtool_hyp_version != PXTOOL_HYP_VER_OK) {
			DBG(DBG_TOOLS, dip, "Couldn't validate diag hyp svc\n");
			return (EPERM);
		}
	}

	if ((rfunc = va_to_pa((void *)px_phys_acc_4v))  == (uint64_t)-1) {
		DBG(DBG_TOOLS, dip, "Error getting real addr for function\n");
		return (EIO);
	}

	if ((pfunc = hv_ra2pa(rfunc)) == -1) {
		DBG(DBG_TOOLS, dip, "Error getting phys addr for function\n");
		return (EIO);
	}

	if ((rdata_addr = va_to_pa((void *)&local_data))  == (uint64_t)-1) {
		DBG(DBG_TOOLS, dip, "Error getting real addr for data_p\n");
		return (EIO);
	}

	if ((pdata_addr = hv_ra2pa(rdata_addr)) == -1) {
		DBG(DBG_TOOLS, dip, "Error getting phys addr for data ptr\n");
		return (EIO);
	}

	if (is_write) {
		to_addr = dev_addr;
		from_addr = pdata_addr;

		if (is_big_endian)
			local_data = *data_p;
		else
			local_data =
			    pxtool_swap_endian(*data_p, sizeof (uint64_t));
	} else {
		to_addr = pdata_addr;
		from_addr = dev_addr;
	}

	rval = hv_hpriv((void *)pfunc, from_addr, to_addr, NULL);
	switch (rval) {
	case H_ENOACCESS:	/* Returned by non-debug hypervisor. */
		rval = ENOTSUP;
		break;
	case H_EOK:
		rval = SUCCESS;
		break;
	default:
		rval = EIO;
		break;
	}

	if ((rval == SUCCESS) && (!is_write)) {
		if (is_big_endian)
			*data_p = local_data;
		else
			*data_p =
			    pxtool_swap_endian(local_data, sizeof (uint64_t));
	}

	return (rval);
}
Example #19
0
int
cpu_intrq_setup(struct cpu *cpu)
{
	struct machcpu *mcpup = &cpu->cpu_m;
	size_t size;

	/*
	 * This routine will return with an error return if any
	 * contig_mem_alloc() fails.  It is expected that the caller will
	 * call cpu_intrq_cleanup() (or cleanup_cpu_common() which will).
	 * That will cleanly free only those blocks that were alloc'd.
	 */

	/*
	 * Allocate mondo data for xcalls.
	 */
	mcpup->mondo_data = contig_mem_alloc(INTR_REPORT_SIZE);

	if (mcpup->mondo_data == NULL) {
		cmn_err(CE_NOTE, "cpu%d: cpu mondo_data allocation failed",
		    cpu->cpu_id);
		return (ENOMEM);
	}
	/*
	 * va_to_pa() is too expensive to call for every crosscall
	 * so we do it here at init time and save it in machcpu.
	 */
	mcpup->mondo_data_ra = va_to_pa(mcpup->mondo_data);

	/*
	 *  Allocate a per-cpu list of ncpu_guest_max for xcalls
	 */
	size = ncpu_guest_max * sizeof (uint16_t);
	if (size < INTR_REPORT_SIZE)
		size = INTR_REPORT_SIZE;

	/*
	 * contig_mem_alloc() requires size to be a power of 2.
	 * Increase size to a power of 2 if necessary.
	 */
	if ((size & (size - 1)) != 0) {
		size = 1 << highbit(size);
	}

	mcpup->cpu_list = contig_mem_alloc(size);

	if (mcpup->cpu_list == NULL) {
		cmn_err(CE_NOTE, "cpu%d: cpu cpu_list allocation failed",
		    cpu->cpu_id);
		return (ENOMEM);
	}
	mcpup->cpu_list_ra = va_to_pa(mcpup->cpu_list);

	/*
	 * Allocate sun4v interrupt and error queues.
	 */
	size = cpu_q_entries * INTR_REPORT_SIZE;

	mcpup->cpu_q_va = contig_mem_alloc(size);

	if (mcpup->cpu_q_va == NULL) {
		cmn_err(CE_NOTE, "cpu%d: cpu intrq allocation failed",
		    cpu->cpu_id);
		return (ENOMEM);
	}
	mcpup->cpu_q_base_pa = va_to_pa(mcpup->cpu_q_va);
	mcpup->cpu_q_size = size;

	/*
	 * Allocate device queues
	 */
	size = dev_q_entries * INTR_REPORT_SIZE;

	mcpup->dev_q_va = contig_mem_alloc(size);

	if (mcpup->dev_q_va == NULL) {
		cmn_err(CE_NOTE, "cpu%d: dev intrq allocation failed",
		    cpu->cpu_id);
		return (ENOMEM);
	}
	mcpup->dev_q_base_pa = va_to_pa(mcpup->dev_q_va);
	mcpup->dev_q_size = size;

	/*
	 * Allocate resumable queue and its kernel buffer
	 */
	size = cpu_rq_entries * Q_ENTRY_SIZE;

	mcpup->cpu_rq_va = contig_mem_alloc(2 * size);

	if (mcpup->cpu_rq_va == NULL) {
		cmn_err(CE_NOTE, "cpu%d: resumable queue allocation failed",
		    cpu->cpu_id);
		return (ENOMEM);
	}
	mcpup->cpu_rq_base_pa = va_to_pa(mcpup->cpu_rq_va);
	mcpup->cpu_rq_size = size;
	/* zero out the memory */
	bzero(mcpup->cpu_rq_va, 2 * size);

	/*
	 * Allocate non-resumable queues
	 */
	size = cpu_nrq_entries * Q_ENTRY_SIZE;

	mcpup->cpu_nrq_va = contig_mem_alloc(2 * size);

	if (mcpup->cpu_nrq_va == NULL) {
		cmn_err(CE_NOTE, "cpu%d: nonresumable queue allocation failed",
		    cpu->cpu_id);
		return (ENOMEM);
	}
	mcpup->cpu_nrq_base_pa = va_to_pa(mcpup->cpu_nrq_va);
	mcpup->cpu_nrq_size = size;
	/* zero out the memory */
	bzero(mcpup->cpu_nrq_va, 2 * size);

	return (0);
}
Example #20
0
uint64_t *
lpad_setup(int cpuid, uint64_t pc, uint64_t arg)
{
	lpad_t		*lpp;
	uint64_t	textsz;
	uint64_t	datasz;
	lpad_data_t	*lpd;
	lpad_map_t	*lpm;

	/* external parameters */
	extern caddr_t	textva;
	extern caddr_t	datava;
	extern tte_t	ktext_tte;
	extern tte_t	kdata_tte;
	extern caddr_t	mmu_fault_status_area;

	LPAD_DBG("lpad_setup...\n");

	if ((cpuid < 0) || (cpuid > NCPU)) {
		cmn_err(CE_PANIC, "lpad_setup: invalid cpuid");
	}

	/* allocate our landing pad */
	if ((lpp = lpad_alloc()) == NULL) {
		cmn_err(CE_PANIC, "lpad_setup: unable to allocate lpad");
	}

	/* calculate the size of our text */
	textsz = (uint64_t)mach_cpu_startup_end - (uint64_t)mach_cpu_startup;

	LPAD_DBG("lpad textsz=%ld\n", textsz);

	ASSERT(textsz <= LPAD_TEXT_SIZE);

	/* copy over text section */
	bcopy((void *)mach_cpu_startup, lpp->buf, textsz);

	lpd = (lpad_data_t *)(((caddr_t)lpp->buf) + LPAD_TEXT_SIZE);
	lpm = (lpad_map_t *)lpd->map;

	ASSERT(mmu_fault_status_area);

	bzero(lpd, LPAD_TEXT_SIZE);
	lpd->magic = LPAD_MAGIC_VAL;
	lpd->inuse = &(lpp->inuse);
	lpd->mmfsa_ra = va_to_pa(mmu_fault_status_area) + (MMFSA_SIZE * cpuid);
	lpd->pc = pc;
	lpd->arg = arg;

	/*
	 * List of mappings:
	 *
	 *    - permanent inst/data mapping for kernel text
	 *    - permanent data mapping for kernel data
	 *    - non-permanent inst mapping for kernel data,
	 *	required for landing pad text
	 */
	lpd->nmap = 3;

	/* verify the lpad has enough room for the data */
	datasz = sizeof (lpad_data_t);
	datasz += (lpd->nmap - 1) * sizeof (lpad_map_t);

	ASSERT(datasz <= LPAD_DATA_SIZE);

	/*
	 * Kernel Text Mapping
	 */
	lpm->va = (uint64_t)textva;
	lpm->tte = ktext_tte;
	lpm->flag_mmuflags = (MAP_ITLB | MAP_DTLB);
	lpm->flag_perm = 1;
	lpm++;

	/*
	 * Kernel Data Mapping
	 */
	lpm->va = (uint64_t)datava;
	lpm->tte = kdata_tte;
	lpm->flag_mmuflags = MAP_DTLB;
	lpm->flag_perm = 1;
	lpm++;

	/*
	 * Landing Pad Text Mapping
	 *
	 * Because this mapping should not be permanent,
	 * the permanent mapping above cannot be used.
	 */
	lpm->va = (uint64_t)datava;
	lpm->tte = kdata_tte;
	lpm->flag_mmuflags = MAP_ITLB;
	lpm->flag_perm = 0;
	lpm++;

	ASSERT(((uint64_t)lpm - (uint64_t)lpd) == datasz);

	LPAD_DBG("copied %ld bytes of data into lpad\n", datasz);

	LPAD_DUMP_DATA((uint64_t *)lpd, (uint64_t *)lpm);

	return (lpp->buf);
}
Example #21
0
static void
sunxi_set_bg(const char *path)
{
  char errbuf[128];
  image_meta_t im = {0};
  unsigned long args[4] = {0};
  pixmap_t *pm;
  int width = 1280, height = 720;
  int r;

  return;

  // hum
  im.im_req_width  = width;
  im.im_req_height = height;

  rstr_t *rpath = rstr_alloc(path);

  pm = backend_imageloader(rpath, &im, NULL, errbuf, sizeof(errbuf),
			   NULL, NULL, NULL);
  rstr_release(rpath);

  if(pm == NULL) {
    TRACE(TRACE_ERROR, "BG", "Unable to load %s -- %s", path, errbuf);
    return;
  }

  int bpp;

  switch(pm->pm_type) {
  case PIXMAP_RGB24:
    bpp = 3;
    break;
  case PIXMAP_BGR32:
    bpp = 4;
    break;
  default:
    abort();
  }


  size_t tsize = pm->pm_height * pm->pm_linesize;

  hts_mutex_lock(&sunxi.gfxmem_mutex);
  uint8_t *dst = tlsf_memalign(sunxi.gfxmem, 1024, tsize);
  hts_mutex_unlock(&sunxi.gfxmem_mutex);
  memcpy(dst, pm->pm_pixels, tsize);

  pixmap_release(pm);

  __disp_video_fb_t   frmbuf;
  memset(&frmbuf, 0, sizeof(__disp_video_fb_t));
  frmbuf.addr[0] = va_to_pa(dst);
  frmbuf.addr[1] = va_to_pa(dst);
  frmbuf.addr[2] = va_to_pa(dst);

  args[1] = DISP_LAYER_WORK_MODE_NORMAL;
  int hlay = ioctl(sunxi.dispfd, DISP_CMD_LAYER_REQUEST, args);
  if(hlay == -1)
    exit(3);

  __disp_layer_info_t l;
  memset(&l, 0, sizeof(l));
    
  l.mode = DISP_LAYER_WORK_MODE_NORMAL;
  l.pipe = 1;

  l.fb.size.width  = pm->pm_linesize / bpp;
  l.fb.size.height = pm->pm_height;
  l.fb.addr[0] = frmbuf.addr[0];
  l.fb.addr[1] = frmbuf.addr[1];
  l.fb.addr[2] = frmbuf.addr[2];

  switch(pm->pm_type) {
  case PIXMAP_RGB24:
    l.fb.format = DISP_FORMAT_RGB888;
    l.fb.br_swap       = 1;
    l.fb.mode  = DISP_MOD_INTERLEAVED;
    break;
  case PIXMAP_BGR32:
    l.fb.format = DISP_FORMAT_ARGB8888;
    l.fb.br_swap       = 1;
    l.fb.mode  = DISP_MOD_INTERLEAVED;
    break;
  default:
    abort();
  }

  ///  l.fb.seq   = 0;
  //  l.fb.mode   = DISP_MOD_NON_MB_PLANAR;
  //  l.fb.format = DISP_FORMAT_YUV420;

  l.ck_enable        = 0;
  l.alpha_en         = 1;
  l.alpha_val        = 0;
  l.src_win.x        = 0;
  l.src_win.y        = 0;
  l.src_win.width    = width;
  l.src_win.height   = height;
  l.scn_win.x        = 0;
  l.scn_win.y        = 0;
  l.scn_win.width    = width;
  l.scn_win.height   = height;
    
  args[1] = hlay;
  args[2] = (__u32)&l;
  args[3] = 0;
  r = ioctl(sunxi.dispfd,DISP_CMD_LAYER_SET_PARA,(void*)args);
  if(r)
    perror("ioctl(disphd,DISP_CMD_LAYER_SET_PARA)");
 
  args[1] = hlay;
  args[2] = 0;
  r = ioctl(sunxi.dispfd,DISP_CMD_LAYER_OPEN,(void*)args);
  if(r)
    perror("ioctl(disphd,DISP_CMD_LAYER_OPEN)");

  bg_open = 1;

  args[1] = hlay;
  if(ioctl(sunxi.dispfd, DISP_CMD_LAYER_BOTTOM, args))
    perror("ioctl(disphd,DISP_CMD_LAYER_BOTTOM)");

  bg_layer = hlay;
}