void cpu_vmm_mdestroy(PROCESS *prp) { ADDRESS *adp = prp->memory; struct pa_quantum *pq; struct pa_quantum *nq; part_id_t mpid = mempart_getid(prp, sys_memclass_id); memsize_t memclass_pid_free = 0; /* * Free page tables */ for (pq = adp->cpu.l2_list; pq; pq = nq) { nq = pq->u.inuse.next; pa_free(pq, 1, MEMPART_DECR(mpid, NQUANTUM_TO_LEN(1))); memclass_pid_free += NQUANTUM_TO_LEN(1); } /* * Free L1 table and "page directory" */ if (adp->cpu.l1_pq) { pa_free(adp->cpu.l1_pq, 2, MEMPART_DECR(mpid, NQUANTUM_TO_LEN(2))); // 8K L1 table memclass_pid_free += NQUANTUM_TO_LEN(2); } if (adp->cpu.l2_pq) { pa_free(adp->cpu.l2_pq, 1, MEMPART_DECR(mpid, NQUANTUM_TO_LEN(1))); // 4K L2 table memclass_pid_free += NQUANTUM_TO_LEN(1); } MEMCLASS_PID_FREE(prp, mempart_get_classid(mpid), memclass_pid_free); /* * Release our ASID. * * WARNING: we are currently called with kernel locked so no other * locks are required to manipulate asid_map[] */ if (adp->cpu.asid) { asid_map[adp->cpu.asid] = 0; } }
char * path_normalize( char *path ) { pem_t *pemp = pem_alloc( path ); pa_t *pap = pa_alloc( ); char *pep; char *npath; ASSERT( path[ 0 ] == '/' ); while ( ( pep = pem_next( pemp )) != 0 ) { if ( ! strcmp( pep, "" )) { free( ( void * )pep ); continue; } if ( ! strcmp( pep, "." )) { free( ( void * )pep ); continue; } if ( ! strcmp( pep, ".." )) { int ok; free( ( void * )pep ); ok = pa_peel( pap ); if ( ! ok ) { pa_free( pap ); pem_free( pemp ); return 0; } continue; } pa_append( pap, pep ); } npath = pa_gen( pap ); pa_free( pap ); pem_free( pemp ); return npath; }
int cpu_vmm_mcreate(PROCESS *prp) { ADDRESS *adp = prp->memory; struct pa_quantum *pq; int asid; unsigned status; paddr_t pa; memsize_t resv = 0; part_id_t mpid = mempart_getid(prp, sys_memclass_id); /* * WARNING: we are currently called with kernel locked so no other * locks are required to manipulate asid_map[] */ for (asid = 0; asid < MAX_ASID; asid++) { if (asid_map[asid] == 0) { break; } } if (asid == MAX_ASID) { return EAGAIN; } asid_map[asid] = adp; adp->cpu.asid = asid; /* * Allocate 8K L1 table to map 00000000-7fffffff * * The pte entries are marked non-global so we can map the L1 table * at ARM_V6_USER_L1 without requiring a TLB flush on context switches */ if (MEMPART_CHK_and_INCR(mpid, 2*__PAGESIZE, &resv) != EOK) { return ENOMEM; } pq = pa_alloc(2*__PAGESIZE, 2*__PAGESIZE, PAQ_COLOUR_NONE, PAA_FLAG_CONTIG, &status, restrict_proc, resv); if (pq == NULL) { MEMPART_UNDO_INCR(mpid, 2*__PAGESIZE, resv); goto fail1; } MEMCLASS_PID_USE(prp, mempart_get_classid(mpid), 2*__PAGESIZE); pa = pa_quantum_to_paddr(pq); adp->cpu.l1_pq = pq; adp->cpu.l1_pte = pa | l2_prot | ARM_PTE_V6_nG; if (status & PAA_STATUS_NOT_ZEROED) { ptzero(pq); ptzero(pq + 1); } /* * FIXME_v6: some of the TTBR bits might be cpu specific? */ #ifdef VARIANT_smp adp->cpu.ttbr0 = pa | ARM_MMU_TTBR_S; #else adp->cpu.ttbr0 = pa; #endif /* * Allocate 4K page directory * Note that we only really use 2K (to map 00000000-7fffffff) * * The pte entries are marked non-global so we can map the L1 table * at ARM_UPTE_BASE without requiring a TLB flush on context switches */ if (MEMPART_CHK_and_INCR(mpid, __PAGESIZE, &resv) != EOK) { return ENOMEM; } pq = pa_alloc(__PAGESIZE, __PAGESIZE, PAQ_COLOUR_NONE, PAA_FLAG_CONTIG, &status, restrict_proc, resv); if (pq == NULL) { MEMPART_UNDO_INCR(mpid, __PAGESIZE, resv); goto fail2; } MEMCLASS_PID_USE(prp, mempart_get_classid(mpid), __PAGESIZE); pa = pa_quantum_to_paddr(pq); adp->cpu.l2_pq = pq; adp->cpu.l2_pte = pa | l2_prot | ARM_PTE_V6_nG; adp->cpu.l2_ptp = pa | ARM_PTP_V6_L2; if (status & PAA_STATUS_NOT_ZEROED) { ptzero(pq); } #ifdef VARIANT_smp /* * Indicate that vmm_aspace() needs to flush TLBs for this ASID */ adp->cpu.asid_flush = LEGAL_CPU_BITMASK; #else /* * Invalidate all (unified) TLB entries for our ASID * * FIXME_v6: current ARM11 and MPcore have unified TLBs * Need to check for other processors whether the unified op * will correctly invalidate both I and D TLBs... */ arm_v6_tlb_asid(adp->cpu.asid); #endif adp->cpu.l2_list = 0; /* * FIXME: need to figure out the correct thing here... */ adp->cpu.pgdir = L1_table; return EOK; fail2: pa_free(adp->cpu.l1_pq, 2, MEMPART_DECR(mpid, 2*__PAGESIZE)); MEMCLASS_PID_FREE(prp, mempart_get_classid(mpid), 2*__PAGESIZE); fail1: asid_map[adp->cpu.asid] = 0; return EAGAIN; }