void sched_ufree(FAR void *address) { #ifdef CONFIG_BUILD_KERNEL /* REVISIT: It is not safe to defer user allocation in the kernel mode * build. Why? Because the correct user context is in place now but * will not be in place when the deferred de-allocation is performed. In * order to make this work, we would need to do something like: (1) move * g_delayed_kufree into the group structure, then traverse the groups to * collect garbage on a group-by-group basis. */ ASSERT(!up_interrupt_context()); kumm_free(address); #else /* Check if this is an attempt to deallocate memory from an exception * handler. If this function is called from the IDLE task, then we * must have exclusive access to the memory manager to do this. */ if (up_interrupt_context() || kumm_trysemaphore() != 0) { irqstate_t flags; /* Yes.. Make sure that this is not a attempt to free kernel memory * using the user deallocator. */ flags = irqsave(); #if (defined(CONFIG_BUILD_PROTECTED) || defined(CONFIG_BUILD_KERNEL)) && \ defined(CONFIG_MM_KERNEL_HEAP) DEBUGASSERT(!kmm_heapmember(address)); #endif /* Delay the deallocation until a more appropriate time. */ sq_addlast((FAR sq_entry_t *)address, (FAR sq_queue_t *)&g_delayed_kufree); /* Signal the worker thread that is has some clean up to do */ #ifdef CONFIG_SCHED_WORKQUEUE work_signal(LPWORK); #endif irqrestore(flags); } else { /* No.. just deallocate the memory now. */ kumm_free(address); kumm_givesemaphore(); } #endif }
void group_free(FAR struct task_group_s *group, FAR void *mem) { /* A NULL group pointer means the current group */ if (!group) { FAR struct tcb_s *tcb = (FAR struct tcb_s *)g_readytorun.head; DEBUGASSERT(tcb && tcb->group); group = tcb->group; } /* Check the group is privileged */ if ((group->tg_flags & GROUP_FLAG_PRIVILEGED) != 0) { /* It is a privileged group... use the kernel mode memory allocator */ kmm_free(mem); } else { /* This is an unprivileged group... use the user mode memory * allocator. */ kumm_free(mem); } }
void elf_addrenv_free(FAR struct elf_loadinfo_s *loadinfo) { #ifdef CONFIG_ARCH_ADDRENV int ret; /* Free the address environment */ ret = up_addrenv_destroy(&loadinfo->addrenv); if (ret < 0) { bdbg("ERROR: up_addrenv_destroy failed: %d\n", ret); } #else /* If there is an allocation for the ELF image, free it */ if (loadinfo->textalloc != 0) { kumm_free((FAR void *)loadinfo->textalloc); } #endif /* Clear out all indications of the allocated address environment */ loadinfo->textalloc = 0; loadinfo->dataalloc = 0; loadinfo->textsize = 0; loadinfo->datasize = 0; }
void up_release_stack(struct tcb_s *dtcb, uint8_t ttype) { /* Is there a stack allocated? */ if (dtcb->stack_alloc_ptr) { #if defined(CONFIG_BUILD_KERNEL) && defined(CONFIG_MM_KERNEL_HEAP) /* Use the kernel allocator if this is a kernel thread */ if (ttype == TCB_FLAG_TTYPE_KERNEL) { kmm_free(dtcb->stack_alloc_ptr); } else #endif { /* Use the user-space allocator if this is a task or pthread */ kumm_free(dtcb->stack_alloc_ptr); } } /* Mark the stack freed */ dtcb->stack_alloc_ptr = NULL; dtcb->adj_stack_size = 0; dtcb->adj_stack_ptr = NULL; }
int unload_module(FAR struct binary_s *binp) { int ret; int i; if (binp) { /* Perform any format-specific unload operations */ if (binp->unload) { ret = binp->unload(binp); if (ret < 0) { bdbg("binp->unload() failed: %d\n", ret); set_errno(-ret); return ERROR; } } #ifdef CONFIG_BINFMT_CONSTRUCTORS /* Execute C++ destructors */ ret = exec_dtors(binp); if (ret < 0) { bdbg("exec_ctors() failed: %d\n", ret); set_errno(-ret); return ERROR; } #endif /* Unmap mapped address spaces */ if (binp->mapped) { bvdbg("Unmapping address space: %p\n", binp->mapped); munmap(binp->mapped, binp->mapsize); } /* Free allocated address spaces */ for (i = 0; i < BINFMT_NALLOC; i++) { if (binp->alloc[i]) { bvdbg("Freeing alloc[%d]: %p\n", i, binp->alloc[i]); kumm_free((FAR void *)binp->alloc[i]); } } /* Notice that the address environment is not destroyed. This should * happen automatically when the task exits. */ } return OK; }
void nxbe_closewindow(struct nxbe_window_s *wnd) { FAR struct nxbe_state_s *be; #ifdef CONFIG_DEBUG if (!wnd) { return; } #endif be = wnd->be; /* The background window should never be closed */ DEBUGASSERT(wnd != &be->bkgd); /* Is there a window above the one being closed? */ if (wnd->above) { /* Yes, now the window below that one is the window below * the one being closed. */ wnd->above->below = wnd->below; } else { /* No, then the top window is the one below this (which * can never be NULL because the background window is * always at the true bottom of the list */ be->topwnd = wnd->below; } /* There is always a window below the one being closed (because * the background is never closed. Now, the window above that * is the window above the one that is being closed. */ wnd->below->above = wnd->above; /* Redraw the windows that were below us (and may now be exposed) */ nxbe_redrawbelow(be, wnd->below, &wnd->bounds); /* Then discard the window structure. Here we assume that the user-space * allocator was used. */ kumm_free(wnd); }
int nx_constructwindow(NXHANDLE handle, NXWINDOW hwnd, FAR const struct nx_callback_s *cb, FAR void *arg) { FAR struct nxfe_state_s *fe = (FAR struct nxfe_state_s *)handle; FAR struct nxbe_window_s *wnd = (FAR struct nxbe_window_s *)hwnd; FAR struct nxbe_state_s *be = &fe->be; #ifdef CONFIG_DEBUG if (!wnd) { set_errno(EINVAL); return ERROR; } if (!fe || !cb) { kumm_free(wnd); errno = EINVAL; return ERROR; } #endif /* Initialize the window structure */ wnd->be = be; wnd->cb = cb; wnd->arg = arg; /* Insert the new window at the top on the display. topwnd is * never NULL (it may point only at the background window, however) */ wnd->above = NULL; wnd->below = be->topwnd; be->topwnd->above = wnd; be->topwnd = wnd; /* Report the initialize size/position of the window */ nxfe_reportposition((NXWINDOW)wnd); /* Provide the initial mouse settings */ #ifdef CONFIG_NX_XYINPUT nxsu_mousereport(wnd); #endif return OK; }
void sched_ufree(FAR void *address) { irqstate_t flags; /* Check if this is an attempt to deallocate memory from an exception * handler. If this function is called from the IDLE task, then we * must have exclusive access to the memory manager to do this. */ if (up_interrupt_context() || kumm_trysemaphore() != 0) { /* Yes.. Make sure that this is not a attempt to free kernel memory * using the user deallocator. */ flags = irqsave(); #if (defined(CONFIG_BUILD_PROTECTED) || defined(CONFIG_BUILD_KERNEL)) && \ defined(CONFIG_MM_KERNEL_HEAP) DEBUGASSERT(!kmm_heapmember(address)); #endif /* Delay the deallocation until a more appropriate time. */ sq_addlast((FAR sq_entry_t*)address, (sq_queue_t*)&g_delayed_kufree); /* Signal the worker thread that is has some clean up to do */ #ifdef CONFIG_SCHED_WORKQUEUE work_signal(LPWORK); #endif irqrestore(flags); } else { /* No.. just deallocate the memory now. */ kumm_free(address); kumm_givesemaphore(); } }
void nxflat_addrenv_free(FAR struct nxflat_loadinfo_s *loadinfo) { FAR struct dspace_s *dspace; #ifdef CONFIG_ARCH_ADDRENV int ret; #endif DEBUGASSERT(loadinfo); dspace = loadinfo->dspace; if (dspace) { #ifdef CONFIG_ARCH_ADDRENV /* Destroy the address environment */ ret = up_addrenv_destroy(loadinfo->addrenv); if (ret < 0) { bdbg("ERROR: up_addrenv_destroy failed: %d\n", ret); } loadinfo->addrenv = 0; #else /* Free the allocated D-Space region */ if (dspace->region) { kumm_free(dspace->region); } #endif /* Now destroy the D-Space container */ DEBUGASSERT(dspace->crefs == 1); kmm_free(dspace); loadinfo->dspace = NULL; } }
FAR void *rammap(int fd, size_t length, off_t offset) { FAR struct fs_rammap_s *map; FAR uint8_t *alloc; FAR uint8_t *rdbuffer; ssize_t nread; off_t fpos; int errcode; int ret; /* There is a major design flaw that I have not yet thought of fix for: * The goal is to have a single region of memory that represents a single * file and can be shared by many threads. That is, given a filename a * thread should be able to open the file, get a file descriptor, and * call mmap() to get a memory region. Different file descriptors opened * with the same file path should get the same memory region when mapped. * * The design flaw is that I don't have sufficient knowledge to know that * these different file descriptors map to the same file. So, for the time * being, a new memory region is created each time that rammap() is called. * Not very useful! */ /* Allocate a region of memory of the specified size */ alloc = (FAR uint8_t *)kumm_malloc(sizeof(struct fs_rammap_s) + length); if (!alloc) { ferr("ERROR: Region allocation failed, length: %d\n", (int)length); errcode = ENOMEM; goto errout; } /* Initialize the region */ map = (FAR struct fs_rammap_s *)alloc; memset(map, 0, sizeof(struct fs_rammap_s)); map->addr = alloc + sizeof(struct fs_rammap_s); map->length = length; map->offset = offset; /* Seek to the specified file offset */ fpos = lseek(fd, offset, SEEK_SET); if (fpos == (off_t)-1) { /* Seek failed... errno has already been set, but EINVAL is probably * the correct response. */ ferr("ERROR: Seek to position %d failed\n", (int)offset); errcode = EINVAL; goto errout_with_region; } /* Read the file data into the memory region */ rdbuffer = map->addr; while (length > 0) { nread = read(fd, rdbuffer, length); if (nread < 0) { /* Handle the special case where the read was interrupted by a * signal. */ errcode = get_errno(); if (errcode != EINTR) { /* All other read errors are bad. errno is already set. * (but maybe should be forced to EINVAL?). NOTE that if * FS DEBUG is enabled, then the following ferr() macro will * destroy the errno value. */ ferr("ERROR: Read failed: offset=%d errno=%d\n", (int)offset, errcode); #ifdef CONFIG_DEBUG_FS goto errout_with_region; #else goto errout_with_errno; #endif } } /* Check for end of file. */ if (nread == 0) { break; } /* Increment number of bytes read */ rdbuffer += nread; length -= nread; } /* Zero any memory beyond the amount read from the file */ memset(rdbuffer, 0, length); /* Add the buffer to the list of regions */ rammap_initialize(); ret = sem_wait(&g_rammaps.exclsem); if (ret < 0) { goto errout_with_errno; } map->flink = g_rammaps.head; g_rammaps.head = map; sem_post(&g_rammaps.exclsem); return map->addr; errout_with_region: kumm_free(alloc); errout: set_errno(errcode); return MAP_FAILED; errout_with_errno: kumm_free(alloc); return MAP_FAILED; }
int munmap(FAR void *start, size_t length) { FAR struct fs_rammap_s *prev; FAR struct fs_rammap_s *curr; FAR void *newaddr; unsigned int offset; int ret; int err; /* Find a region containing this start and length in the list of regions */ rammap_initialize(); ret = sem_wait(&g_rammaps.exclsem); if (ret < 0) { return ERROR; } /* Seach the list of regions */ for (prev = NULL, curr = g_rammaps.head; curr; prev = curr, curr = curr->flink) { /* Does this region include any part of the specified range? */ if ((uintptr_t)start < (uintptr_t)curr->addr + curr->length && (uintptr_t)start + length >= (uintptr_t)curr->addr) { break; } } /* Did we find the region */ if (!curr) { fdbg("Region not found\n"); err = EINVAL; goto errout_with_semaphore; } /* Get the offset from the beginning of the region and the actual number * of bytes to "unmap". All mappings must extend to the end of the region. * There is no support for free a block of memory but leaving a block of * memory at the end. This is a consequence of using kumm_realloc() to * simulate the unmapping. */ offset = start - curr->addr; if (offset + length < curr->length) { fdbg("Cannot umap without unmapping to the end\n"); err = ENOSYS; goto errout_with_semaphore; } /* Okay.. the region is beging umapped to the end. Make sure the length * indicates that. */ length = curr->length - offset; /* Are we unmapping the entire region (offset == 0)? */ if (length >= curr->length) { /* Yes.. remove the mapping from the list */ if (prev) { prev->flink = curr->flink; } else { g_rammaps.head = curr->flink; } /* Then free the region */ kumm_free(curr); } /* No.. We have been asked to "unmap' only a portion of the memory * (offset > 0). */ else { newaddr = kumm_realloc(curr->addr, sizeof(struct fs_rammap_s) + length); DEBUGASSERT(newaddr == (FAR void*)(curr->addr)); curr->length = length; } sem_post(&g_rammaps.exclsem); return OK; errout_with_semaphore: sem_post(&g_rammaps.exclsem); set_errno(err); return ERROR; }
FAR DIR *opendir(FAR const char *path) { FAR struct inode *inode = NULL; FAR struct fs_dirent_s *dir; struct inode_search_s desc; #ifndef CONFIG_DISABLE_MOUNTPOINT FAR const char *relpath = NULL; #endif bool isroot = false; int ret; /* If we are given 'nothing' then we will interpret this as * request for the root inode. */ SETUP_SEARCH(&desc, path, false); inode_semtake(); if (path == NULL || *path == '\0' || strcmp(path, "/") == 0) { inode = g_root_inode; isroot = true; } else { /* We don't know what to do with relative pathes */ if (*path != '/') { ret = -ENOTDIR; goto errout_with_semaphore; } /* Find the node matching the path. */ ret = inode_search(&desc); if (ret >= 0) { inode = desc.node; DEBUGASSERT(inode != NULL); #ifndef CONFIG_DISABLE_MOUNTPOINT relpath = desc.relpath; #endif } } /* Did we get an inode? */ if (inode == NULL) { /* Inode for 'path' does not exist. */ ret = ENOTDIR; goto errout_with_semaphore; } /* Allocate a type DIR -- which is little more than an inode * container. */ dir = (FAR struct fs_dirent_s *)kumm_zalloc(sizeof(struct fs_dirent_s)); if (!dir) { /* Insufficient memory to complete the operation. */ ret = ENOMEM; goto errout_with_semaphore; } /* Populate the DIR structure and return it to the caller. The way that * we do this depends on whenever this is a "normal" pseudo-file-system * inode or a file system mountpoint. */ dir->fd_position = 0; /* This is the position in the read stream */ /* First, handle the special case of the root inode. This must be * special-cased here because the root inode might ALSO be a mountpoint. */ if (isroot) { /* Whatever payload the root inode carries, the root inode is always * a directory inode in the pseudo-file system */ open_pseudodir(inode, dir); } /* Is this a node in the pseudo filesystem? Or a mountpoint? If the node * is the root (isroot == TRUE), then this is a special case. */ #ifndef CONFIG_DISABLE_MOUNTPOINT else if (INODE_IS_MOUNTPT(inode)) { /* Yes, the node is a file system mountpoint */ dir->fd_root = inode; /* Save the inode where we start */ /* Open the directory at the relative path */ ret = open_mountpoint(inode, relpath, dir); if (ret != OK) { goto errout_with_direntry; } } #endif else { /* The node is part of the root pseudo file system. Does the inode * have a child? If so that the child would be the 'root' of a list * of nodes under the directory. */ FAR struct inode *child = inode->i_child; if (child != NULL) { /* It looks we have a valid pseudo-filesystem directory node. */ open_pseudodir(child, dir); } else if (!inode->u.i_ops) { /* This is a dangling node with no children and no operations. Set * up to enumerate an empty directory. */ open_emptydir(dir); } else { ret = ENOTDIR; goto errout_with_direntry; } } RELEASE_SEARCH(&desc); inode_semgive(); return ((FAR DIR *)dir); /* Nasty goto's make error handling simpler */ errout_with_direntry: kumm_free(dir); errout_with_semaphore: RELEASE_SEARCH(&desc); inode_semgive(); set_errno(ret); return NULL; }
void nx_close(NXHANDLE handle) { /* For consistency, we use the user-space allocate (if available) */ kumm_free(handle); }