int user_port_get_next_port_info(proc_id uproc, uint32 *ucookie, struct port_info *uinfo) { int res; struct port_info info; uint32 cookie; int rc; if (ucookie == NULL) return ERR_INVALID_ARGS; if (uinfo == NULL) return ERR_INVALID_ARGS; if(is_kernel_address(ucookie)) return ERR_VM_BAD_USER_MEMORY; if(is_kernel_address(uinfo)) return ERR_VM_BAD_USER_MEMORY; // copy from userspace rc = user_memcpy(&cookie, ucookie, sizeof(uint32)); if(rc < 0) return rc; res = port_get_next_port_info(uproc, &cookie, &info); // copy to userspace rc = user_memcpy(ucookie, &info, sizeof(uint32)); if(rc < 0) return rc; rc = user_memcpy(uinfo, &info, sizeof(struct port_info)); if(rc < 0) return rc; return res; }
ssize_t user_port_read_etc(port_id uport, int32 *umsg_code, void *umsg_buffer, size_t ubuffer_size, uint32 uflags, bigtime_t utimeout) { ssize_t res; int32 msg_code; int rc; if (umsg_code == NULL) return ERR_INVALID_ARGS; if (umsg_buffer == NULL) return ERR_INVALID_ARGS; if(is_kernel_address(umsg_code)) return ERR_VM_BAD_USER_MEMORY; if(is_kernel_address(umsg_buffer)) return ERR_VM_BAD_USER_MEMORY; res = port_read_etc(uport, &msg_code, umsg_buffer, ubuffer_size, uflags | PORT_FLAG_USE_USER_MEMCPY | SEM_FLAG_INTERRUPTABLE, utimeout); rc = user_memcpy(umsg_code, &msg_code, sizeof(int32)); if(rc < 0) return rc; return res; }
static int vesa_ioctl(dev_cookie cookie, int op, void *buf, size_t len) { int err = 0; if(!vesa.enabled) return ERR_NOT_FOUND; switch(op) { case IOCTL_DEVFS_GET_FRAMEBUFFER_INFO: if(is_kernel_address(buf)) err = ERR_VM_BAD_USER_MEMORY; else err = user_memcpy(buf, &vesa.fb_info, sizeof(vesa.fb_info)); break; case IOCTL_DEVFS_MAP_FRAMEBUFFER: { aspace_id aid = vm_get_current_user_aspace_id(); region_id rid; void *address; if(is_kernel_address(buf)) { err = ERR_VM_BAD_USER_MEMORY; goto out; } // map the framebuffer into the user's address space rid = vm_map_physical_memory(aid, "vesa_fb", &address, REGION_ADDR_ANY_ADDRESS, vesa.phys_memory.size, LOCK_RW, vesa.phys_memory.start); if(rid < 0) { err = rid; goto out; } // copy the new pointer back to the user err = user_memcpy(buf, &address, sizeof(address)); if(err < 0) { vm_delete_region(aid, rid); goto out; } // return the region id as the return code err = rid; break; } default: err = ERR_INVALID_ARGS; } out: return err; }
int user_port_write_etc(port_id uport, int32 umsg_code, void *umsg_buffer, size_t ubuffer_size, uint32 uflags, bigtime_t utimeout) { if (umsg_buffer == NULL) return ERR_INVALID_ARGS; if(is_kernel_address(umsg_buffer)) return ERR_VM_BAD_USER_MEMORY; return port_write_etc(uport, umsg_code, umsg_buffer, ubuffer_size, uflags | PORT_FLAG_USE_USER_MEMCPY | SEM_FLAG_INTERRUPTABLE, utimeout); }
int user_port_get_info(port_id id, struct port_info *uinfo) { int res; struct port_info info; int rc; if (uinfo == NULL) return ERR_INVALID_ARGS; if(is_kernel_address(uinfo)) return ERR_VM_BAD_USER_MEMORY; res = port_get_info(id, &info); // copy to userspace rc = user_memcpy(uinfo, &info, sizeof(struct port_info)); if(rc < 0) return rc; return res; }
port_id user_port_find(const char *port_name) { if(port_name != NULL) { char name[SYS_MAX_OS_NAME_LEN]; int rc; if(is_kernel_address(port_name)) return ERR_VM_BAD_USER_MEMORY; rc = user_strncpy(name, port_name, SYS_MAX_OS_NAME_LEN-1); if(rc < 0) return rc; name[SYS_MAX_OS_NAME_LEN-1] = 0; return port_find(name); } else { return ERR_INVALID_ARGS; } }
port_id user_port_create(int32 queue_length, const char *uname) { dprintf("user_port_create: queue_length %d\n", queue_length); if(uname != NULL) { char name[SYS_MAX_OS_NAME_LEN]; int rc; if(is_kernel_address(uname)) return ERR_VM_BAD_USER_MEMORY; rc = user_strncpy(name, uname, SYS_MAX_OS_NAME_LEN-1); if(rc < 0) return rc; name[SYS_MAX_OS_NAME_LEN-1] = 0; return port_create(queue_length, name); } else { return port_create(queue_length, NULL); } }
static void dump_port_info(int argc, char **argv) { int i; if(argc < 2) { dprintf("port: not enough arguments\n"); return; } // if the argument looks like a hex number, treat it as such if(strlen(argv[1]) > 2 && argv[1][0] == '0' && argv[1][1] == 'x') { unsigned long num = atoul(argv[1]); if(is_kernel_address(num)) { // XXX semi-hack // one can use either address or a port_id, since KERNEL_BASE > MAX_PORTS assumed _dump_port_info((struct port_entry *)num); return; } else { unsigned slot = num % MAX_PORTS; if(ports[slot].id != (int)num) { dprintf("port 0x%lx doesn't exist!\n", num); return; } _dump_port_info(&ports[slot]); return; } } // walk through the ports list, trying to match name for(i=0; i<MAX_PORTS; i++) { if (ports[i].name != NULL) if(strcmp(argv[1], ports[i].name) == 0) { _dump_port_info(&ports[i]); return; } } }
/* software page fault handler */ static status_t vm_soft_page_fault(addr_t addr, bool is_write, bool is_exec, bool is_user) { vm_address_space_t *aspace; vm_mapping_t *mapping; vm_upage_t *upage; vm_page_t *page; unsigned long irqstate; status_t err; /* get faulted address space */ if(is_kernel_address(addr)) { aspace = vm_get_kernel_aspace(); } else { aspace = vm_get_current_user_aspace(); if(!aspace) panic("vm_soft_page_fault: no user address space!\n"); } /* increment address space faults counter */ atomic_inc((atomic_t*)&aspace->faults_count); /** TODO: spinlocks are temporary solution here. **/ /* acquire lock before touching address space */ irqstate = spin_lock_irqsave(&aspace->lock); /* get faulted mapping */ err = vm_aspace_get_mapping(aspace, addr, &mapping); if(err != NO_ERROR) panic("vm_soft_page_fault: can't get mapping at address %x, err = %x!\n", addr, err); /* this page fault handler deals only with mapped objects */ if(mapping->type != VM_MAPPING_TYPE_OBJECT) panic("vm_soft_page_fault: wrong mapping type!\n"); /* lock mapped object */ spin_lock(&mapping->object->lock); /* get universal page for mapping its data */ err = vm_object_get_or_add_upage(mapping->object, addr - mapping->start + mapping->offset, &upage); if(err != NO_ERROR) panic("vm_soft_page_fault: can't get upage, err = %x!\n", err); /* allocate new physical page or just map existing page */ if(upage->state == VM_UPAGE_STATE_UNWIRED) { /* upage is not wired with physical page. * so... allocate new one. */ page = vm_page_alloc(VM_PAGE_STATE_CLEAR); if(page == NULL) panic("vm_soft_page_fault: out of physical memory!\n"); /* stick physical page into upage */ upage->state = VM_UPAGE_STATE_RESIDENT; upage->ppn = page->ppn; } else if(upage->state == VM_UPAGE_STATE_RESIDENT) { /* upage has resident physical page. * just get it for further mapping. */ page = vm_page_lookup(upage->ppn); if(page == NULL) panic("vm_soft_page_fault: wrong physical page number!\n"); } else { /* all other upage states is not supported for now */ panic("vm_soft_page_fault: invalid universal page state!\n"); page = NULL; /* keep compiler happy */ } /* now unlock object */ spin_unlock(&mapping->object->lock); /* ... and lock translation map */ aspace->tmap.ops->lock(&aspace->tmap); /* map page into address space */ aspace->tmap.ops->map(&aspace->tmap, addr, PAGE_ADDRESS(page->ppn), mapping->protect); /* unlock translation map */ aspace->tmap.ops->unlock(&aspace->tmap); /* .. and finally unlock address space */ spin_unlock_irqrstor(&aspace->lock, irqstate); /* return address space to the kernel */ vm_put_aspace(aspace); /* page fault handled successfully */ return NO_ERROR; }