/** * vm_allocate - allocate zero-filled memory for specified address * * If "anywhere" argument is true, the "addr" argument will be * ignored. In this case, the address of free space will be * found automatically. * * The allocated area has writable, user-access attribute by * default. The "addr" and "size" argument will be adjusted * to page boundary. */ int vm_allocate(task_t task, void **addr, size_t size, int anywhere) { int err; void *uaddr; sched_lock(); if (!task_valid(task)) { err = ESRCH; goto out; } if (task != cur_task() && !task_capable(CAP_MEMORY)) { err = EPERM; goto out; } if (umem_copyin(addr, &uaddr, sizeof(*addr))) { err = EFAULT; goto out; } if (anywhere == 0 && !user_area(*addr)) { err = EACCES; goto out; } err = do_allocate(task->map, &uaddr, size, anywhere); if (err == 0) { if (umem_copyout(&uaddr, addr, sizeof(uaddr))) err = EFAULT; } out: sched_unlock(); return err; }
/* * Setup task image for boot task. (NOMMU version) * Return 0 on success, -1 on failure. * * Note: We assume that the task images are already copied to * the proper address by a boot loader. */ int vm_load(vm_map_t map, struct module *mod, void **stack) { void *base; size_t size; DPRINTF(("Loading task:\'%s\'\n", mod->name)); /* * Reserve text & data area */ base = (void *)mod->text; size = mod->textsz + mod->datasz + mod->bsssz; if (do_reserve(map, &base, size)) return -1; if (mod->bsssz != 0) memset((void *)(mod->data + mod->datasz), 0, mod->bsssz); /* * Create stack */ if (do_allocate(map, stack, USTACK_SIZE, 1)) return -1; return 0; }
/* * Load task image for boot task. * Return 0 on success, -1 on failure. */ int vm_load(vm_map_t map, struct module *mod, void **stack) { char *src; void *text, *data; DPRINTF(("Loading task: %s\n", mod->name)); /* * We have to switch VM mapping to touch the virtual * memory space of a target task without page fault. */ vm_switch(map); src = phys_to_virt(mod->phys); text = (void *)mod->text; data = (void *)mod->data; /* * Create text segment */ if (do_allocate(map, &text, mod->textsz, 0)) return -1; memcpy(text, src, mod->textsz); if (do_attribute(map, text, VMA_READ)) return -1; /* * Create data & BSS segment */ if (mod->datasz + mod->bsssz != 0) { if (do_allocate(map, &data, mod->datasz + mod->bsssz, 0)) return -1; src = src + (mod->data - mod->text); memcpy(data, src, mod->datasz); } /* * Create stack */ *stack = (void *)USTACK_BASE; if (do_allocate(map, stack, USTACK_SIZE, 0)) return -1; /* Free original pages */ page_free((void *)mod->phys, mod->size); return 0; }
gralloc_private_handle_t* gralloc_private_handle_from_client_buffer(EGLClientBuffer buffer){ gralloc_private_handle_t* retVal = (gralloc_private_handle_t*)malloc(sizeof(gralloc_private_handle_t)); android_native_buffer_t *android_buffer = (android_native_buffer_t *)buffer; retVal->w = android_buffer->width; retVal->h = android_buffer->height; retVal->stride = android_buffer->stride; //retVal->gl_format = android_buffer->format; retVal->gl_format = GRALLOC_MAGICS_HAL_PIXEL_FORMAT_TRANSLUCENT; retVal->buffer = android_buffer; retVal->res_type = is_dupe(retVal) ? GRALLOC_PRIV_TYPE_GL_RESOURCE : GRALLOC_PRIV_TYPE_MM_RESOURCE; if(!is_dupe(retVal)){ retVal->vcHandle = do_allocate((retVal->w * retVal->h) * get_size_pf(android_buffer)); add_to_dupes(*retVal); } return retVal; }
void* memory_resource::allocate(size_type bytes, size_type alignment) { return do_allocate(bytes, alignment); }