static int pte_manipulate(struct pte_manip *manip) { int r; if(ADDR_OFFSET(manip->data.start) != 0) crash(); if(ADDR_OFFSET(manip->data.end+1) != 0) crash(); if((manip->data.op & PTE_OP_MAP) && (ADDR_OFFSET(manip->data.paddr) != 0)) crash(); manip->state = MSTATE_SPLIT_START; manip->mapped = manip->data.first = manip->data.start; manip->data.split_end = manip->data.end; if(KerextAmInKernel()) { return do_manipulation(manip); } //FUTURE: Do we actually have to go into the kernel all of the time //FUTURE: for PTE manipulation? We need to for the system space since //FUTURE: the X86 has to walk the mem_dir_list, but maybe not for //FUTURE: user space. The address space is usually locked (aside //FUTURE: from vmm_resize()) when manipulating the pgtbl, so we //FUTURE: might not even need a mutex. //FUTURE: Have to worry about SMP, where one CPU might be in a locked //FUTURE: kernel while we remove perms to access the memory being //FUTURE: referenced. manip->data.op |= PTE_OP_PREEMPT; do { r = __Ring0(ker_manipulate, manip); } while(r == EINTR); return r; }
/* Clear pin to GND */ void clr_pin(uint32_t p) { volatile uint32_t *clr_register = ADDR_OFFSET(&FIO0CLR, 0x20 * GET_PORT(p)); volatile uint32_t *mask_register = ADDR_OFFSET(&FIO0MASK, 0x20 * GET_PORT(p)); *mask_register = ~(1 << GET_PIN(p)); /* Affected pin are those whose mask is 0 */ *clr_register = (1 << GET_PIN(p)); /* Set pin to GND */ }
/* Set pin to HIGH */ void set_pin(uint32_t p) { volatile uint32_t *set_register = ADDR_OFFSET(&FIO0SET, 0x20 * GET_PORT(p)); volatile uint32_t *mask_register = ADDR_OFFSET(&FIO0MASK, 0x20 * GET_PORT(p)); *mask_register = ~(1 << GET_PIN(p)); /* Affected pin are those whose mask is 0 */ *set_register = (1 << GET_PIN(p)); /* Set pin to HIGH */ }
mps_res_t MPS_CALL ps_scan(mps_ss_t scan_state, mps_addr_t base, mps_addr_t limit) { register OBJECT *obj; OBJECT *obj_limit; register mps_addr_t ref; size_t len = 0; obj_limit = limit; MPS_SCAN_BEGIN( scan_state ) for ( obj = base; obj < obj_limit; obj++ ) { ref = (mps_addr_t)oOther( *obj ); switch ( oType( *obj )) { case ONAME: MPS_RETAIN( (mps_addr_t *)&oName( *obj ), TRUE ); continue; case OSAVE: continue; case ODICTIONARY: NOTREACHED; break; case OSTRING: { mps_addr_t ref_limit; ref_limit = ADDR_ADD( ref, theLen(*obj)); /* ref could point into the middle of a string, so align it. */ ref = PTR_ALIGN_DOWN( mps_addr_t, ref, MM_PS_ALIGNMENT ); len = ADDR_OFFSET( ref, ref_limit ); } break; case OFILE: NOTREACHED; break; case OARRAY: case OPACKEDARRAY: len = theLen(*obj) * sizeof( OBJECT ); break; case OGSTATE: case OLONGSTRING: NOTREACHED; break; default: continue; /* not a composite object */ } PS_MARK_BLOCK( scan_state, ref, len ); } MPS_SCAN_END(scan_state); return MPS_RES_OK; }
int seg_translate(struct segment *seg, vaddr_t vaddr, paddr_t *ret){ KASSERT(seg != NULL); KASSERT(seg_in_range(seg, vaddr)); KASSERT(seg_is_inited(seg)); DEBUG(DB_VM,"seg_translate %p\n", (void*)vaddr); int vpn = ADDR_MAPPING_NUM(vaddr); int offset = ADDR_OFFSET(vaddr); int rv; int idx = vpn - ADDR_MAPPING_NUM(seg->vbase); DEBUG(DB_VM,"\tindex values %x, vpn %x, vbase %x\n", idx, vpn,ADDR_MAPPING_NUM(seg->vbase)); if(!seg->pagetable[idx].alloc){ rv = seg_ondemand_load(seg, idx); if (rv){ return rv; } DEBUG(DB_VM,"\tFrame %d allocated for vpn %x (index:%d)\n", seg->pagetable[idx].pfn, vpn, idx); } if (seg->pagetable[idx].swapped){ DEBUG(DB_VM,"Swapping in %d\n", idx); int frame; // try to allocate memory before kicking out a frame int result = uframe_alloc1(&frame, curproc->pid, vpn); if(result) { result = core_kickvictim(&frame); if (result) return result; } seg->pagetable[idx].pfn = frame; rv = swap_to_mem(vpn, &seg->pagetable[idx], frame); // swap back if (rv) { return rv; } } *ret = frame_to_paddr(seg->pagetable[idx].pfn) | offset; DEBUG(DB_VM,"\tfinish v->p addr translation: %p\n", (void*) *ret); return 0; }
void select_gpio_out_mode(uint32_t p) { uint32_t register_idx = 2*GET_PORT(p); uint32_t pin = GET_PIN(p); volatile uint32_t *dir_register; if (pin >= 16) { pin -= 16; register_idx++; } /* Configure function */ PINSEL[register_idx] &= ~(3 << (pin * 2)); /* Clear the two function bits -> gpio mode (p. 108) */ /* Configure mode */ PINMODE[register_idx] &= ~(3 << (pin * 2)); /* Clear the two mode bit -> pull-up resistor */ /* Configure opendrain */ PINMODE_OD[GET_PORT(p)] &= ~(1 << GET_PIN(p)); /* Clear bit -> normal (not opendrain) mode */ /* Set GPIO mode to output */ dir_register = ADDR_OFFSET(&FIO0DIR, 0x20 * GET_PORT(p)); *dir_register |= (1 << GET_PIN(p)); /* set bit to 1 -> set pin to output */ }
OBJECT * vmm_vaddr_to_memobj(PROCESS *prp, void *addr, unsigned *offset, int mark_page) { pxe_t *pgdir; pxe_t *ptep; uint64_t pde; uint64_t pte; unsigned pg_offset; unsigned pde_mask = (1 << pd_bits) - 1; paddr_t paddr; uintptr_t vaddr = (uintptr_t)addr; #ifndef NDEBUG if(prp == NULL) crash(); #endif if(prp->memory == NULL) { pgdir = pgtbl_list->pgdir; } else { pgdir = prp->memory->cpu.pgdir; #ifndef NDEBUG if(prp->memory->cpu.ptroot_paddr != rdpgdir() && (vaddr < CPU_USER_VADDR_END)) crash(); #endif } pde = PXE_GET(GENERIC_VTOPDIRP(pgdir, vaddr)); if(!(pde & X86_PTE_PRESENT)) goto fail; if(pde & X86_PDE_PS) { pg_offset = vaddr & pde_mask; if(!(pde & (X86_PDE_USER1|X86_PDE_PRESENT))) goto fail; paddr = (pde & ~(X86_PTE_NX | pde_mask)) + pg_offset; *offset = PADDR_TO_SYNC_OFF(paddr); return PADDR_TO_SYNC_OBJ(paddr); } pg_offset = ADDR_OFFSET(vaddr); // We can use the currently active page table // to check the PTE - much faster ptep = VTOPTEP(vaddr); pte = PXE_GET(ptep); pte = PXE_GET(ptep); if(!(pte & (X86_PTE_USER1|X86_PTE_PRESENT))) goto fail; paddr = (pte & PTE_PADDR_BITS) | pg_offset; if(mark_page) { struct pa_quantum *pq; // This line is for the 386, which doesn't produce write faults // when we want it to. if(!(pte & X86_PTE_WRITE)) return (OBJECT *)-1; pq = pa_paddr_to_quantum(paddr); if(pq != NULL) { pq->flags |= PAQ_FLAG_HAS_SYNC; } } *offset = PADDR_TO_SYNC_OFF(paddr); return PADDR_TO_SYNC_OBJ(paddr); fail: #ifndef NDEBUG crash(); /* NOTREACHED */ #endif return (OBJECT *)-1; }
unsigned cpu_vmm_vaddrinfo(PROCESS *prp, uintptr_t vaddr, paddr_t *p, size_t *lenp) { unsigned pte; unsigned pg_size; unsigned pg_offset; unsigned prot; prot = MAP_PHYS; // so PROT_NONE doesn't come back /* * Check for section mapping */ pte = V6_USER_SPACE(vaddr) ? *UTOL1SC(vaddr) : *KTOL1SC(vaddr); if ((pte & 0x3) == (ARM_PTP_SC & 0x3)) { pg_offset = vaddr & ARM_SCMASK; *p = (pte & ~ARM_SCMASK) | pg_offset; if(lenp != NULL) { *lenp = ARM_SCSIZE - pg_offset; } prot |= PROT_READ | PROT_EXEC; if ((pte & ARM_PTP_V6_APX) == 0) { prot |= PROT_WRITE; } if ((pte & ARM_PTP_V6_XN) != 0) { prot &= ~PROT_EXEC; } if ((pte & ARM_PTP_C) == 0) { prot |= PROT_NOCACHE; } } else { pte = 0; if (V6_USER_SPACE(vaddr)) { if (*UTOPDIR(vaddr) & ARM_PTE_VALID) { pte = *UTOPTEP(vaddr); } } else { if (*KTOPDIR(vaddr) & ARM_PTE_VALID) { pte = *KTOPTEP(vaddr); } } if (!(pte & ARM_PTE_VALID)) { return PROT_NONE; } if ((pte & ARM_PTE_VALID) == ARM_PTE_LP) { pg_size = ARM_LPSIZE; pg_offset = vaddr & ARM_LPMASK; } else { pg_size = __PAGESIZE; pg_offset = ADDR_OFFSET(vaddr); } *p = (pte & ~(pg_size-1)) | pg_offset; if(lenp != NULL) { *lenp = pg_size - pg_offset; } // all valid translations are readable and executable prot |= PROT_READ | PROT_EXEC; if ((pte & ARM_PTE_V6_APX) == 0) { prot |= PROT_WRITE; } if ((pte & ARM_PTE_VALID) == ARM_PTE_LP) { if ((pte & ARM_PTE_V6_LP_XN) != 0) { prot &= ~PROT_EXEC; } } else { if ((pte & ARM_PTE_V6_SP_XN) != 0) { prot &= ~PROT_EXEC; } } if ((pte & ARM_PTE_C) == 0) { prot |= PROT_NOCACHE; } } return prot; }
int __prussdrv_memmap_init(void) { int i, fd; char hexstring[PRUSS_UIO_PARAM_VAL_LEN]; if (prussdrv.mmap_fd == 0) { for (i = 0; i < NUM_PRU_HOSTIRQS; i++) { if (prussdrv.fd[i]) break; } if (i == NUM_PRU_HOSTIRQS) return -1; else prussdrv.mmap_fd = prussdrv.fd[i]; } for (i=0; i<UIO_OPEN_TIMEOUT; i++) { fd = open(PRUSS_UIO_DRV_PRUSS_BASE, O_RDONLY); if (fd >= 0) { read(fd, hexstring, PRUSS_UIO_PARAM_VAL_LEN); prussdrv.pruss_phys_base = strtoul(hexstring, NULL, HEXA_DECIMAL_BASE); close(fd); break; } sleep(1); } if (i==UIO_OPEN_TIMEOUT) { DEBUG_PRINTF("open %s: timeout\n", PRUSS_UIO_DRV_PRUSS_BASE); return -2; } fd = open(PRUSS_UIO_DRV_PRUSS_SIZE, O_RDONLY); if (fd >= 0) { read(fd, hexstring, PRUSS_UIO_PARAM_VAL_LEN); prussdrv.pruss_map_size = strtoul(hexstring, NULL, HEXA_DECIMAL_BASE); close(fd); } else return -3; prussdrv.pru0_dataram_base = mmap(0, prussdrv.pruss_map_size, PROT_READ | PROT_WRITE, MAP_SHARED, prussdrv.mmap_fd, PRUSS_UIO_MAP_OFFSET_PRUSS); prussdrv.version = __pruss_detect_hw_version((unsigned int *) prussdrv.pru0_dataram_base); switch (prussdrv.version) { case PRUSS_V1: { DEBUG_PRINTF(PRUSS_V1_STR "\n"); prussdrv.pru0_dataram_phy_base = AM18XX_DATARAM0_PHYS_BASE; prussdrv.pru1_dataram_phy_base = AM18XX_DATARAM1_PHYS_BASE; prussdrv.intc_phy_base = AM18XX_INTC_PHYS_BASE; prussdrv.pru0_control_phy_base = AM18XX_PRU0CONTROL_PHYS_BASE; prussdrv.pru0_debug_phy_base = AM18XX_PRU0DEBUG_PHYS_BASE; prussdrv.pru1_control_phy_base = AM18XX_PRU1CONTROL_PHYS_BASE; prussdrv.pru1_debug_phy_base = AM18XX_PRU1DEBUG_PHYS_BASE; prussdrv.pru0_iram_phy_base = AM18XX_PRU0IRAM_PHYS_BASE; prussdrv.pru1_iram_phy_base = AM18XX_PRU1IRAM_PHYS_BASE; } break; case PRUSS_V2: { DEBUG_PRINTF(PRUSS_V2_STR "\n"); prussdrv.pru0_dataram_phy_base = AM33XX_DATARAM0_PHYS_BASE; prussdrv.pru1_dataram_phy_base = AM33XX_DATARAM1_PHYS_BASE; prussdrv.intc_phy_base = AM33XX_INTC_PHYS_BASE; prussdrv.pru0_control_phy_base = AM33XX_PRU0CONTROL_PHYS_BASE; prussdrv.pru0_debug_phy_base = AM33XX_PRU0DEBUG_PHYS_BASE; prussdrv.pru1_control_phy_base = AM33XX_PRU1CONTROL_PHYS_BASE; prussdrv.pru1_debug_phy_base = AM33XX_PRU1DEBUG_PHYS_BASE; prussdrv.pru0_iram_phy_base = AM33XX_PRU0IRAM_PHYS_BASE; prussdrv.pru1_iram_phy_base = AM33XX_PRU1IRAM_PHYS_BASE; prussdrv.pruss_sharedram_phy_base = AM33XX_PRUSS_SHAREDRAM_BASE; prussdrv.pruss_cfg_phy_base = AM33XX_PRUSS_CFG_BASE; prussdrv.pruss_uart_phy_base = AM33XX_PRUSS_UART_BASE; prussdrv.pruss_iep_phy_base = AM33XX_PRUSS_IEP_BASE; prussdrv.pruss_ecap_phy_base = AM33XX_PRUSS_ECAP_BASE; prussdrv.pruss_miirt_phy_base = AM33XX_PRUSS_MIIRT_BASE; prussdrv.pruss_mdio_phy_base = AM33XX_PRUSS_MDIO_BASE; } break; default: DEBUG_PRINTF(PRUSS_UNKNOWN_STR "\n"); } #define ADDR_OFFSET(a,o) ((void*) ((char*) (a) + (o))) prussdrv.pru1_dataram_base = ADDR_OFFSET(prussdrv.pru0_dataram_base, prussdrv.pru1_dataram_phy_base - prussdrv.pru0_dataram_phy_base); prussdrv.intc_base = ADDR_OFFSET(prussdrv.pru0_dataram_base, prussdrv.intc_phy_base - prussdrv.pru0_dataram_phy_base); prussdrv.pru0_control_base = ADDR_OFFSET(prussdrv.pru0_dataram_base, prussdrv.pru0_control_phy_base - prussdrv.pru0_dataram_phy_base); prussdrv.pru0_debug_base = ADDR_OFFSET(prussdrv.pru0_dataram_base, prussdrv.pru0_debug_phy_base - prussdrv.pru0_dataram_phy_base); prussdrv.pru1_control_base = ADDR_OFFSET(prussdrv.pru0_dataram_base, prussdrv.pru1_control_phy_base - prussdrv.pru0_dataram_phy_base); prussdrv.pru1_debug_base = ADDR_OFFSET(prussdrv.pru0_dataram_base, prussdrv.pru1_debug_phy_base - prussdrv.pru0_dataram_phy_base); prussdrv.pru0_iram_base = ADDR_OFFSET(prussdrv.pru0_dataram_base, prussdrv.pru0_iram_phy_base - prussdrv.pru0_dataram_phy_base); prussdrv.pru1_iram_base = ADDR_OFFSET(prussdrv.pru0_dataram_base, prussdrv.pru1_iram_phy_base - prussdrv.pru0_dataram_phy_base); if (prussdrv.version == PRUSS_V2) { prussdrv.pruss_sharedram_base = ADDR_OFFSET(prussdrv.pru0_dataram_base, prussdrv.pruss_sharedram_phy_base - prussdrv.pru0_dataram_phy_base); prussdrv.pruss_cfg_base = ADDR_OFFSET(prussdrv.pru0_dataram_base, prussdrv.pruss_cfg_phy_base - prussdrv.pru0_dataram_phy_base); prussdrv.pruss_uart_base = ADDR_OFFSET(prussdrv.pru0_dataram_base, prussdrv.pruss_uart_phy_base - prussdrv.pru0_dataram_phy_base); prussdrv.pruss_iep_base = ADDR_OFFSET(prussdrv.pru0_dataram_base, prussdrv.pruss_iep_phy_base - prussdrv.pru0_dataram_phy_base); prussdrv.pruss_ecap_base = ADDR_OFFSET(prussdrv.pru0_dataram_base, prussdrv.pruss_ecap_phy_base - prussdrv.pru0_dataram_phy_base); prussdrv.pruss_miirt_base = ADDR_OFFSET(prussdrv.pru0_dataram_base, prussdrv.pruss_miirt_phy_base - prussdrv.pru0_dataram_phy_base); prussdrv.pruss_mdio_base = ADDR_OFFSET(prussdrv.pru0_dataram_base, prussdrv.pruss_mdio_phy_base - prussdrv.pru0_dataram_phy_base); } #ifndef DISABLE_L3RAM_SUPPORT fd = open(PRUSS_UIO_DRV_L3RAM_BASE, O_RDONLY); if (fd >= 0) { read(fd, hexstring, PRUSS_UIO_PARAM_VAL_LEN); prussdrv.l3ram_phys_base = strtoul(hexstring, NULL, HEXA_DECIMAL_BASE); close(fd); } else return -4; fd = open(PRUSS_UIO_DRV_L3RAM_SIZE, O_RDONLY); if (fd >= 0) { read(fd, hexstring, PRUSS_UIO_PARAM_VAL_LEN); prussdrv.l3ram_map_size = strtoul(hexstring, NULL, HEXA_DECIMAL_BASE); close(fd); } else return -5; prussdrv.l3ram_base = mmap(0, prussdrv.l3ram_map_size, PROT_READ | PROT_WRITE, MAP_SHARED, prussdrv.mmap_fd, PRUSS_UIO_MAP_OFFSET_L3RAM); #endif fd = open(PRUSS_UIO_DRV_EXTRAM_BASE, O_RDONLY); if (fd >= 0) { read(fd, hexstring, PRUSS_UIO_PARAM_VAL_LEN); prussdrv.extram_phys_base = strtoul(hexstring, NULL, HEXA_DECIMAL_BASE); close(fd); } else return -6; fd = open(PRUSS_UIO_DRV_EXTRAM_SIZE, O_RDONLY); if (fd >= 0) { read(fd, hexstring, PRUSS_UIO_PARAM_VAL_LEN); prussdrv.extram_map_size = strtoul(hexstring, NULL, HEXA_DECIMAL_BASE); close(fd); } else return -7; prussdrv.extram_base = mmap(0, prussdrv.extram_map_size, PROT_READ | PROT_WRITE, MAP_SHARED, prussdrv.mmap_fd, PRUSS_UIO_MAP_OFFSET_EXTRAM); return 0; }
OBJECT * vmm_vaddr_to_memobj(PROCESS *prp, void *addr, unsigned *offset, int mark_page) { uint32_t *pde; uint32_t pte; unsigned pg_offset; paddr_t paddr; uintptr_t vaddr = (uintptr_t)addr; ADDRESS *adp; unsigned mask; /* piece of cake for P1, P2 addresses */ if(SH_IS_P1(vaddr) || SH_IS_P2(vaddr)) { // Assuming P1 & P2 are the same size paddr = vaddr & (SH_P1SIZE - 1); *offset = PADDR_TO_SYNC_OFF(paddr); return PADDR_TO_SYNC_OBJ(paddr); } pg_offset = ADDR_OFFSET(vaddr); // Check for system, cpu pages... if(ADDR_PAGE(vaddr) == SYSP_ADDCOLOR(VM_CPUPAGE_ADDR, SYSP_GETCOLOR(_cpupage_ptr))) { paddr = SH_P1_TO_PHYS((uintptr_t)_cpupage_ptr) | pg_offset; *offset = PADDR_TO_SYNC_OFF(paddr); return PADDR_TO_SYNC_OBJ(paddr); } if(ADDR_PAGE(vaddr) == SYSP_ADDCOLOR(VM_SYSPAGE_ADDR, SYSP_GETCOLOR(_syspage_ptr))) { paddr = SH_P1_TO_PHYS((uintptr_t)_syspage_ptr) | pg_offset; *offset = PADDR_TO_SYNC_OFF(paddr); return PADDR_TO_SYNC_OBJ(paddr); } #ifndef NDEBUG if(prp == NULL) crash(); #endif adp = prp->memory; #ifndef NDEBUG if(adp == NULL) crash(); #endif pde = adp->cpu.pgdir[L1PAGEIDX(vaddr)]; if((pde == NULL) || !PRESENT((pte = pde[L2PAGEIDX(vaddr)]))) { /* not mapped, check for perm mappings */ goto fail; } mask = SH_PTE_PGSIZE_MASK(pte); paddr = (pte & mask) | (vaddr & ~mask); if(mark_page) { struct pa_quantum *pq; pq = pa_paddr_to_quantum(paddr); if(pq != NULL) { pq->flags |= PAQ_FLAG_HAS_SYNC; } } *offset = PADDR_TO_SYNC_OFF(paddr); return PADDR_TO_SYNC_OBJ(paddr); fail: #ifndef NDEBUG crash(); /* NOTREACHED */ #endif return (OBJECT *)-1; }
int read_pin(uint32_t p){ volatile uint32_t *pin_register = ADDR_OFFSET(&FIO0PIN, 0x20 * GET_PORT(p)); volatile uint32_t *mask_register = ADDR_OFFSET(&FIO0MASK, 0x20 * GET_PORT(p)); *mask_register = ~(1 << GET_PIN(p)); return ((*pin_register & (1 << GET_PIN(p))) ? 1 : 0); }