char * path_normalize( char *path ) { pem_t *pemp = pem_alloc( path ); pa_t *pap = pa_alloc( ); char *pep; char *npath; ASSERT( path[ 0 ] == '/' ); while ( ( pep = pem_next( pemp )) != 0 ) { if ( ! strcmp( pep, "" )) { free( ( void * )pep ); continue; } if ( ! strcmp( pep, "." )) { free( ( void * )pep ); continue; } if ( ! strcmp( pep, ".." )) { int ok; free( ( void * )pep ); ok = pa_peel( pap ); if ( ! ok ) { pa_free( pap ); pem_free( pemp ); return 0; } continue; } pa_append( pap, pep ); } npath = pa_gen( pap ); pa_free( pap ); pem_free( pemp ); return npath; }
int cpu_pte_manipulate(struct mm_pte_manipulate *data) { pte_t **pdep; pte_t *pdep_l2base; pte_t *ptep; pte_t pte; pte_t orig_pte; uintptr_t l2_vaddr; unsigned bits; unsigned l1bits; pte_t **l1pagetable; struct pa_quantum *pq; unsigned pa_status; int flushed; ADDRESS *adp; int r; part_id_t mpid; PROCESS * prp; // Assume alignment has been checked by the caller adp = data->adp; if(adp == NULL) crash(); l1pagetable = adp->cpu.pgdir; l1bits = 0x1; // validity bit if(data->op & PTE_OP_BAD) { bits = PPC800_RPN_CI; } else if(data->prot & (PROT_READ|PROT_WRITE|PROT_EXEC)) { bits = 0xf1; //RUSH3: if PTE_OP_TEMP, mark PTE as accessable from procnto only if possible if(data->prot & PROT_WRITE) { bits |= (0x2<<PPC800_RPN_PP1_SHIFT) | (0x1<<PPC800_RPN_PP2_SHIFT); } else if(data->prot & (PROT_READ|PROT_EXEC)) { bits |= 0x3<<PPC800_RPN_PP1_SHIFT; } if(data->shmem_flags & SHMCTL_HAS_SPECIAL) { if(data->special & ~PPC_SPECIAL_MASK) { return EINVAL; } //RUSH1: If PPC_SPECIAL_E/W/M/G is on, should I report an error? if((data->special & PPC_SPECIAL_I)) { bits |= PPC800_RPN_CI; } } if(data->prot & PROT_NOCACHE) { bits |= PPC800_RPN_CI; l1bits |= PPC800_TWC_G; } } else { bits = 0; } r = EOK; flushed = 0; prp = adp ? object_from_data(adp, address_cookie) : NULL; mpid = mempart_getid(prp, sys_memclass_id); for( ;; ) { if(data->start >= data->end) break; pdep = &l1pagetable[L1PAGEIDX(data->start)]; l2_vaddr = (uintptr_t)*pdep; if(l2_vaddr == 0) { memsize_t resv = 0; if(!(data->op & (PTE_OP_MAP|PTE_OP_PREALLOC|PTE_OP_BAD))) { //Move vaddr to next page directory data->start = (data->start + PDE_SIZE) & ~(PDE_SIZE - 1); if(data->start == 0) data->start = ~0; continue; } if (MEMPART_CHK_and_INCR(mpid, __PAGESIZE, &resv) != EOK) { return ENOMEM; } pq = pa_alloc(__PAGESIZE, __PAGESIZE, 0, 0, &pa_status, restrict_proc, resv); if(pq == NULL) { MEMPART_UNDO_INCR(mpid, __PAGESIZE, resv); return ENOMEM; } MEMCLASS_PID_USE(prp, mempart_get_classid(mpid), __PAGESIZE); pq->flags |= PAQ_FLAG_SYSTEM; pq->u.inuse.next = adp->cpu.l2_list; adp->cpu.l2_list = pq; l2_vaddr = pa_quantum_to_paddr(pq); if(pa_status & PAA_STATUS_NOT_ZEROED) { zero_page((uint32_t *)l2_vaddr, __PAGESIZE, NULL); } } *pdep = (pte_t *)(l2_vaddr | l1bits); if(data->op & PTE_OP_PREALLOC) { //Move vaddr to next page directory data->start = (data->start + PDE_SIZE) & ~(PDE_SIZE - 1); if(data->start == 0) data->start = ~0; continue; } pdep_l2base = PDE_ADDR(*pdep); ptep = &(pdep_l2base[L2PAGEIDX(data->start)]); orig_pte = *ptep; if(data->op & (PTE_OP_MAP|PTE_OP_BAD)) { pte = data->paddr | bits; } else if(data->op & PTE_OP_UNMAP) { pte = 0; } else if(orig_pte & (0xfff & ~PPC800_RPN_CI)) { // PTE_OP_PROT pte = (orig_pte & ~0xfff) | bits; } else { // We don't change PTE permissions if we haven't mapped the // page yet... pte = orig_pte; } *ptep = pte; if((orig_pte != 0) && (pte != orig_pte)) { flushed = 1; ppc_tlbie(data->start); } data->start += __PAGESIZE; data->paddr += __PAGESIZE; if((data->op & PTE_OP_PREEMPT) && KerextNeedPreempt()) { r = EINTR; break; } } if(flushed) { ppc_isync(); } return r; }
int cpu_vmm_mcreate(PROCESS *prp) { ADDRESS *adp = prp->memory; struct pa_quantum *pq; int asid; unsigned status; paddr_t pa; memsize_t resv = 0; part_id_t mpid = mempart_getid(prp, sys_memclass_id); /* * WARNING: we are currently called with kernel locked so no other * locks are required to manipulate asid_map[] */ for (asid = 0; asid < MAX_ASID; asid++) { if (asid_map[asid] == 0) { break; } } if (asid == MAX_ASID) { return EAGAIN; } asid_map[asid] = adp; adp->cpu.asid = asid; /* * Allocate 8K L1 table to map 00000000-7fffffff * * The pte entries are marked non-global so we can map the L1 table * at ARM_V6_USER_L1 without requiring a TLB flush on context switches */ if (MEMPART_CHK_and_INCR(mpid, 2*__PAGESIZE, &resv) != EOK) { return ENOMEM; } pq = pa_alloc(2*__PAGESIZE, 2*__PAGESIZE, PAQ_COLOUR_NONE, PAA_FLAG_CONTIG, &status, restrict_proc, resv); if (pq == NULL) { MEMPART_UNDO_INCR(mpid, 2*__PAGESIZE, resv); goto fail1; } MEMCLASS_PID_USE(prp, mempart_get_classid(mpid), 2*__PAGESIZE); pa = pa_quantum_to_paddr(pq); adp->cpu.l1_pq = pq; adp->cpu.l1_pte = pa | l2_prot | ARM_PTE_V6_nG; if (status & PAA_STATUS_NOT_ZEROED) { ptzero(pq); ptzero(pq + 1); } /* * FIXME_v6: some of the TTBR bits might be cpu specific? */ #ifdef VARIANT_smp adp->cpu.ttbr0 = pa | ARM_MMU_TTBR_S; #else adp->cpu.ttbr0 = pa; #endif /* * Allocate 4K page directory * Note that we only really use 2K (to map 00000000-7fffffff) * * The pte entries are marked non-global so we can map the L1 table * at ARM_UPTE_BASE without requiring a TLB flush on context switches */ if (MEMPART_CHK_and_INCR(mpid, __PAGESIZE, &resv) != EOK) { return ENOMEM; } pq = pa_alloc(__PAGESIZE, __PAGESIZE, PAQ_COLOUR_NONE, PAA_FLAG_CONTIG, &status, restrict_proc, resv); if (pq == NULL) { MEMPART_UNDO_INCR(mpid, __PAGESIZE, resv); goto fail2; } MEMCLASS_PID_USE(prp, mempart_get_classid(mpid), __PAGESIZE); pa = pa_quantum_to_paddr(pq); adp->cpu.l2_pq = pq; adp->cpu.l2_pte = pa | l2_prot | ARM_PTE_V6_nG; adp->cpu.l2_ptp = pa | ARM_PTP_V6_L2; if (status & PAA_STATUS_NOT_ZEROED) { ptzero(pq); } #ifdef VARIANT_smp /* * Indicate that vmm_aspace() needs to flush TLBs for this ASID */ adp->cpu.asid_flush = LEGAL_CPU_BITMASK; #else /* * Invalidate all (unified) TLB entries for our ASID * * FIXME_v6: current ARM11 and MPcore have unified TLBs * Need to check for other processors whether the unified op * will correctly invalidate both I and D TLBs... */ arm_v6_tlb_asid(adp->cpu.asid); #endif adp->cpu.l2_list = 0; /* * FIXME: need to figure out the correct thing here... */ adp->cpu.pgdir = L1_table; return EOK; fail2: pa_free(adp->cpu.l1_pq, 2, MEMPART_DECR(mpid, 2*__PAGESIZE)); MEMCLASS_PID_FREE(prp, mempart_get_classid(mpid), 2*__PAGESIZE); fail1: asid_map[adp->cpu.asid] = 0; return EAGAIN; }