OBJECT_ID StringMultiply(OBJECT_ID a_id,OBJECT_ID b_id)//TODO check string alloc and leaks { NUM bi; int_object *b = (int_object*)mem_lock(b_id); unicode_object *a = (unicode_object*)mem_lock(a_id); BYTES_ID as_id = a->value; bi = b->value; mem_unlock(a_id,0); mem_unlock(b_id,0); char *as = (char*)mem_lock(as_id); NUM mlen = (strlen(as)*bi) + 1; #ifdef USE_MEMORY_DEBUGGING BYTES_ID tmp_id = mem_malloc_debug(mlen,MEM_POOL_CLASS_DYNAMIC, "str_Multiply() return"); #else BYTES_ID tmp_id = mem_malloc(mlen,MEM_POOL_CLASS_DYNAMIC); #endif char *tmp = (char*)mem_lock(tmp_id); memset(tmp,0,mlen); for(INDEX i = 0;i< bi;i++) { memcpy(tmp+(i*strlen(as)), as, strlen(as)); } //memset(tmp,'Z',mlen-1); mem_unlock(tmp_id,1); mem_unlock(as_id,0); OBJECT_ID r = obj_CreateUnicode(tmp_id);//TODO mem_create_string_copy return(r); }
/** * Allocate a new reference-counted memory object * * @param size Size of memory object * @param dh Optional destructor, called when destroyed * * @return Pointer to allocated object */ void *mem_alloc(size_t size, mem_destroy_h *dh) { struct mem *m; #if MEM_DEBUG mem_lock(); if (-1 != threshold && (memstat.blocks_cur >= (size_t)threshold)) { mem_unlock(); return NULL; } mem_unlock(); #endif m = malloc(sizeof(*m) + size); if (!m) return NULL; #if MEM_DEBUG memset(&m->le, 0, sizeof(struct le)); mem_lock(); list_append(&meml, &m->le, m); mem_unlock(); #endif m->nrefs = 1; m->dh = dh; STAT_ALLOC(m, size); return (void *)(m + 1); }
bool glxx_buffer_values_are_less_than(GLXX_BUFFER_T *buffer, int offset, int count, int size, int value) { int bsize = mem_get_size(buffer->pool[buffer->current_item].mh_storage); vcos_assert(size == 1 || size == 2); vcos_assert(offset >= 0 && size >= 0 && offset + size * count <= bsize); vcos_assert(!(offset & (size - 1))); if (buffer->size_used_for_max != (uint32_t)size) { void *data = mem_lock(buffer->pool[buffer->current_item].mh_storage); // Recalculate max buffer->max = find_max(bsize / size, size, data); buffer->size_used_for_max = size; mem_unlock(buffer->pool[buffer->current_item].mh_storage); } if (buffer->max < value) return true; else { void *data = mem_lock(buffer->pool[buffer->current_item].mh_storage); int max = find_max(count, size, (uint8_t *)data + offset); mem_unlock(buffer->pool[buffer->current_item].mh_storage); return max < value; } }
OBJECT_ID StringAdd(OBJECT_ID a_id,OBJECT_ID b_id) { unicode_object *a = (unicode_object*)mem_lock(a_id); unicode_object *b = (unicode_object*)mem_lock(b_id); BYTES_ID tmp = str_Cat(a->value,b->value); mem_unlock(b_id,0); mem_unlock(a_id,0); UNICODE_ID r = obj_CreateUnicode(tmp); return(r); }
void iter_InitGenerator(ITER_ID iter_id,VM_ID vm_id,BLOCK_ID bo_id) { iter_object *iter = (iter_object*)mem_lock(iter_id); iter->tag = bo_id; obj_IncRefCount(bo_id); block_object *bo = (block_object*)mem_lock(bo_id); bo->ip = bo->start; mem_unlock(bo_id,1); iter->block_stack = stack_Create(); iter->iter_func = &iter_Generator; mem_unlock(iter_id,1); }
void iter_SaveBlockStack(ITER_ID iter_id,VM_ID vm_id) { struct _vm *vm = (struct _vm*)mem_lock(vm_id); iter_object *iter = (iter_object*)mem_lock(iter_id); OBJECT_ID bo = 0; while((bo = stack_Pop(vm->blocks)) != iter->tag) { stack_Push(iter->block_stack,bo);//reverserd order } mem_unlock(iter_id,0); mem_unlock(vm_id,0); }
HI_VOID* HI_MALLOC(HI_U32 u32ModuleID, HI_U32 u32Size) { HI_VOID* pMemAddr = NULL; pMemAddr = malloc(u32Size); #ifdef CMN_MMGR_SUPPORT if (NULL != pMemAddr && g_fnModuleCallback) { struct head* pHead = NULL; HI_S32 s32MallocSize = 0; HI_S32 s32Ret = 0; mem_lock(&g_MemMutex); //lookup the module info. s32Ret = g_fnModuleCallback(u32ModuleID, MEM_TYPE_USR, 0); if(s32Ret != HI_SUCCESS) { mem_unlock(&g_MemMutex); free(pMemAddr); return NULL; } pHead = MEM_Add(pMemAddr, u32Size); if(NULL != pHead) { // Add memory info to MODULE MGR s32MallocSize = (HI_S32)u32Size; g_fnModuleCallback(u32ModuleID, MEM_TYPE_USR, s32MallocSize); mem_unlock(&g_MemMutex); return pMemAddr; } else { mem_unlock(&g_MemMutex); free(pMemAddr); return NULL; } } #endif return pMemAddr; }
void iter_RestoreBlockStack(ITER_ID iter_id,VM_ID vm_id) { struct _vm *vm = (struct _vm*)mem_lock(vm_id); iter_object *iter = (iter_object*)mem_lock(iter_id); stack_Push(vm->blocks,iter->tag); while(!stack_IsEmpty(iter->block_stack)) { OBJECT_ID bo = stack_Pop(iter->block_stack); stack_Push(vm->blocks,bo);//reverserd order } mem_unlock(iter_id,0); mem_unlock(vm_id,0); }
OBJECT_ID ic_vm_exit(VM_ID vm,TUPLE_ID locals,TUPLE_ID kw_locals) { OBJECT_ID self = tuple_GetItem(locals,0); OBJECT_ID message = tuple_GetItem(locals,1); unicode_object *m = (unicode_object*)mem_lock(message); BYTES_ID msg = m->value; mem_unlock(message,0); OBJECT_ID err_no = tuple_GetItem(locals,2); int_object *e = (int_object*)mem_lock(err_no); NUM eno = e->value; mem_unlock(err_no,0); vm_Exit(vm,msg,eno);//TODO crashing ? OBJECT_ID tmp = obj_CreateEmpty(TYPE_NONE); return(tmp); }
OBJECT_ID StringCompare(OBJECT_ID a_id,OBJECT_ID b_id) { unicode_object *a = (unicode_object*)mem_lock(a_id); unicode_object *b = (unicode_object*)mem_lock(b_id); OBJECT_ID as = a->value; OBJECT_ID bs = b->value; OBJECT_ID r = obj_CreateEmpty(mem_compare(as,bs) ? TYPE_TRUE : TYPE_FALSE); mem_unlock(b_id,0); mem_unlock(a_id,0); //#ifdef USE_DEBUGGING //debug_printf (DEBUG_VERBOSE_STEP,"%s == %s == %c\n", as, bs,obj_GetType(r)); //#endif return(r); }
/** * Dereference a reference-counted memory object. When the reference count * is zero, the destroy handler will be called (if present) and the memory * will be freed * * @param data Memory object * * @return Always NULL */ void *mem_deref(void *data) { struct mem *m; if (!data) return NULL; m = ((struct mem *)data) - 1; MAGIC_CHECK(m); if (--m->nrefs > 0) return NULL; if (m->dh) m->dh(data); if (m->nrefs > 0) return NULL; #if MEM_DEBUG mem_lock(); list_unlink(&m->le); mem_unlock(); #endif STAT_DEREF(m); free(m); return NULL; }
void egl_disp_free(EGL_DISP_HANDLE_T disp_handle) { #ifdef BRCM_V3D_OPT return; #endif DISP_T *disp = disp_from_handle(disp_handle); uint32_t i; finish(disp); /* take the current image off the display */ if (disp->in_use.last_win != EGL_PLATFORM_WIN_NONE) { egl_server_platform_display_nothing_sync(disp->in_use.last_win); } for (i = 0; i != disp->in_use.n; ++i) { if (disp->in_use.n != 1) { KHRN_IMAGE_T *image = (KHRN_IMAGE_T *)mem_lock(disp->in_use.images[i]); /* wait for all outstanding writes to the image to complete. we do * this to make sure there aren't any wait-for-display messages * hanging around in the system (we only post wait-for-display * messages before writes). we don't really want to flush unflushed * writes here, but whatever */ khrn_interlock_read_immediate(&image->interlock); vcos_assert(image->interlock.disp_image_handle == egl_disp_image_handle(disp_handle, i)); image->interlock.disp_image_handle = EGL_DISP_IMAGE_HANDLE_INVALID; mem_unlock(disp->in_use.images[i]); } mem_release(disp->in_use.images[i]); } free_disp_handle(disp_handle); }
static void vc_image_to_tga_memory( VC_IMAGE_T* image) { uint16_t width, height; uint16_t x, y; KHRN_IMAGE_WRAP_T src, dst; uint8_t* src_ptr = (image->mem_handle != MEM_INVALID_HANDLE) ? (uint8_t*)mem_lock(image->mem_handle) : (uint8_t*)image->image_data; bool tformat = (image->type == VC_IMAGE_TF_RGBA32 || image->type == VC_IMAGE_TF_RGBX32); uint8_t twelve_bytes[12] = {0,0,2,0,0,0,0,0,0,0,0,0}; uint32_t tga_mempos = 0; assert(tga_mem_size >= 18 + 4*image->height*image->width); /* 0-length image id, no colour map, uncompressed true-color image, (0,0) origin. */ memcpy(tga_memory, twelve_bytes, 12); tga_mempos += 12; /* Width and height, 16-bit little endian */ *(tga_memory+tga_mempos++) = image->width & 0xFF; *(tga_memory+tga_mempos++) = (image->width & 0xFF00) >> 8; *(tga_memory+tga_mempos++) = image->height & 0xFF; *(tga_memory+tga_mempos++) = (image->height & 0xFF00) >> 8; *(tga_memory+tga_mempos++) = '\x20'; *(tga_memory+tga_mempos++) = '\x20'; // Swap red and blue. Do t-format conversion if necessary. khrn_image_wrap(&src, tformat ? ABGR_8888_TF : ABGR_8888_RSO, image->width, image->height, image->pitch, src_ptr); khrn_image_wrap(&dst, ARGB_8888_RSO, image->width, image->height, image->pitch, tga_memory + tga_mempos); khrn_image_wrap_convert(&dst, &src, IMAGE_CONV_GL); if (image->mem_handle != MEM_INVALID_HANDLE) mem_unlock(image->mem_handle); }
void egl_disp_next(EGL_DISP_HANDLE_T disp_handle, MEM_HANDLE_T image_handle, uint32_t win, uint32_t swap_interval) { #ifdef BRCM_V3D_OPT return; #endif DISP_T *disp = disp_from_handle(disp_handle); KHRN_IMAGE_T *image; { MEM_HANDLE_T handle = disp->in_use.images[disp->in_use.post & (next_power_of_2(disp->in_use.n) - 1)]; if (image_handle == MEM_INVALID_HANDLE) { image_handle = handle; } else { vcos_assert(image_handle == handle); } } /* if there is an unflushed write to the image, we flush it here. * khrn_delayed_display is responsible for ensuring egl_disp_ready is called * after all flushed writes have completed (it may do this by, for example, * posting a message into the same fifo as the last write) */ image = (KHRN_IMAGE_T *)mem_lock(image_handle); khrn_interlock_read(&image->interlock, KHRN_INTERLOCK_USER_NONE); khrn_delayed_display(image, egl_disp_slot_handle(disp_handle, post(disp, win, swap_interval))); mem_unlock(image_handle); }
static void terminate(int dummy) { int i; dprintf("Resetting DMA...\n"); if (dma_reg && mbox.virt_addr) { for (i = 0; i < num_channels; i++) channel_pwm[i] = 0; update_pwm(); udelay(CYCLE_TIME_US); dma_reg[DMA_CS] = DMA_RESET; udelay(10); } dprintf("Freeing mbox memory...\n"); if (mbox.virt_addr != NULL) { unmapmem(mbox.virt_addr, NUM_PAGES * PAGE_SIZE); if (mbox.handle <= 2) { /* we need to reopen mbox file */ mbox.handle = mbox_open(); } mem_unlock(mbox.handle, mbox.mem_ref); mem_free(mbox.handle, mbox.mem_ref); mbox_close(mbox.handle); } dprintf("Unlink %s...\n", DEVFILE); unlink(DEVFILE); dprintf("Unlink %s...\n", DEVFILE_MBOX); unlink(DEVFILE_MBOX); printf("pi-blaster stopped.\n"); exit(1); }
/* call from master task. caller should notify llat task if necessary */ static uint32_t post(DISP_T *disp, uint32_t win, uint32_t swap_interval) { uint32_t slot; MEM_HANDLE_T handle; /* wait for a free slot */ while ((next_pos(disp->in_use.post, disp->in_use.n) - disp->in_use.on) > SLOTS_N) { khrn_sync_master_wait(); } khrn_barrier(); /* fill it in */ slot = disp->in_use.post & (SLOTS_N - 1); /* we take a copy of the KHRN_IMAGE_T here as in the swap interval 0 case the * width/height/stride could change before we get around to putting the image * on the display */ handle = disp->in_use.images[slot & (next_power_of_2(disp->in_use.n) - 1)]; disp->in_use.slots[slot].image = *(KHRN_IMAGE_T *)mem_lock(handle); mem_unlock(handle); disp->in_use.slots[slot].win = win; disp->in_use.slots[slot].swap_interval = swap_interval; disp->in_use.slots[slot].ready = false; disp->in_use.slots[slot].skip = false; disp->in_use.slots[slot].wait_posted = false; /* advance the post counter */ advance_pos(&disp->in_use.post, disp->in_use.n); return slot; }
/** * Cleanup previously allocated device memory and buffers. * * @param ws2811 ws2811 instance pointer. * * @returns None */ void ws2811_cleanup(ws2811_t *ws2811) { int chan; for (chan = 0; chan < RPI_PWM_CHANNELS; chan++) { if (ws2811->channel[chan].leds) { free(ws2811->channel[chan].leds); } ws2811->channel[chan].leds = NULL; } if (mbox.virt_addr != NULL) { unmapmem(mbox.virt_addr, mbox.size); mem_unlock(mbox.handle, mbox.mem_ref); mem_free(mbox.handle, mbox.mem_ref); if (mbox.handle >= 0) mbox_close(mbox.handle); memset(&mbox, 0, sizeof(mbox)); } ws2811_device_t *device = ws2811->device; if (device) { free(device); } ws2811->device = NULL; }
OBJECT_ID iter_Next(ITER_ID iter_id,VM_ID vm_id) { iter_object *iter = (iter_object*)mem_lock(iter_id); OBJECT_ID next = iter->iter_func(iter_id,vm_id); mem_unlock(iter_id,0); return(next); }
OBJECT_ID ic_file_close(VM_ID vm,TUPLE_ID locals,TUPLE_ID kw_locals) { OBJECT_ID self = tuple_GetItem(locals,0); UNICODE_ID file_name = obj_CreateUnicode(mem_create_string("__file__")); OBJECT_ID file_tag = obj_GetAttribute(self,file_name); obj_IncRefCount(file_name); obj_DecRefCount(file_name); if(obj_GetType(file_tag) == TYPE_TAG) { //#ifdef USE_MEMORY_LOCK_DEBUGGING //tag_object *ft = (tag_object*)mem_lock_debug(file_tag,"ic_file_close taglock - stream freeing"); //#else tag_object *ft = (tag_object*)mem_lock(file_tag); //#endif if(ft->tag != 0) { stream_Free(ft->tag); } //#ifdef USE_MEMORY_LOCK_DEBUGGING //mem_unlock_debug(file_tag,0,"ic_file_close taglock - stream freeing"); //#else mem_unlock(file_tag,0); //#endif } OBJECT_ID tmp = obj_CreateEmpty(TYPE_NONE); return(tmp); }
/** * Print memory status * * @param pf Print handler for debug output * @param unused Unused parameter * * @return 0 if success, otherwise errorcode */ int mem_status(struct re_printf *pf, void *unused) { #if MEM_DEBUG struct memstat stat; uint32_t c; int err = 0; (void)unused; mem_lock(); memcpy(&stat, &memstat, sizeof(stat)); c = list_count(&meml); mem_unlock(); err |= re_hprintf(pf, "Memory status: (%u bytes overhead pr block)\n", sizeof(struct mem)); err |= re_hprintf(pf, " Cur: %u blocks, %u bytes (total %u bytes)\n", stat.blocks_cur, stat.bytes_cur, stat.bytes_cur +(stat.blocks_cur*sizeof(struct mem))); err |= re_hprintf(pf, " Peak: %u blocks, %u bytes (total %u bytes)\n", stat.blocks_peak, stat.bytes_peak, stat.bytes_peak +(stat.blocks_peak*sizeof(struct mem))); err |= re_hprintf(pf, " Block size: min=%u, max=%u\n", stat.size_min, stat.size_max); err |= re_hprintf(pf, " Total %u blocks allocated\n", c); return err; #else (void)pf; (void)unused; return 0; #endif }
/** * Cleanup previously allocated device memory and buffers. * * @param ws2811 ws2811 instance pointer. * * @returns None */ void ws2811_cleanup(ws2811_t *ws2811) { ws2811_device_t *device = ws2811->device; int chan; for (chan = 0; chan < RPI_PWM_CHANNELS; chan++) { if (ws2811->channel[chan].leds) { free(ws2811->channel[chan].leds); } ws2811->channel[chan].leds = NULL; } if (device->mbox.handle != -1) { videocore_mbox_t *mbox = &device->mbox; unmapmem(mbox->virt_addr, mbox->size); mem_unlock(mbox->handle, mbox->mem_ref); mem_free(mbox->handle, mbox->mem_ref); mbox_close(mbox->handle); mbox->handle = -1; } if (device) { free(device); } ws2811->device = NULL; }
HI_VOID HI_FREE(HI_U32 u32ModuleID, HI_VOID* pMemAddr) { #ifdef CMN_MMGR_SUPPORT if (NULL != pMemAddr && g_fnModuleCallback) { struct head *p = NULL; HI_S32 s32MallocSize = 0; mem_lock(&g_MemMutex); p = MEM_Find(pMemAddr); if ( NULL != p ) { // Update memory info for MODULE MGR s32MallocSize = (HI_S32)p->size; s32MallocSize *= -1; g_fnModuleCallback(u32ModuleID, MEM_TYPE_USR, s32MallocSize); MEM_Del(p); } mem_unlock(&g_MemMutex); } #endif free(pMemAddr); return; }
/** * Close table (warning: file does not deleted) */ void dbt_close(dbt_t t) { // close the file (if exists) if (vmt[t].flags & 1) { // VMT: use file long dblen; dblen = lseek(vmt[t].f_handle, 0, SEEK_END); close(vmt[t].f_handle); close(vmt[t].i_handle); vmt[t].used = 0; if (dblen > VMT_DEFRAG_SIZE) { // too big, defrag it dbt_file_pack(t); } } else { // VMT: use memory int i; // free records for (i = 0; i < vmt[t].size; i++) { if (vmt[t].m_table[i]) { mem_free(vmt[t].m_table[i]); } } // free table mem_unlock(vmt[t].m_handle);mem_free(vmt[t].m_handle); vmt[t].used = 0; } }
/** * Allocate a standard size (use it at startup; good speed optimization) */ void dbt_prealloc(dbt_t t, int num, int recsize) { if (vmt[t].flags & 1) { // use file int newsize; newsize = num - vmt[t].size; if (newsize > 0) { dbt_file_append(t, newsize, recsize); } } else { // use memory int i; if (vmt[t].size < num) { mem_unlock(vmt[t].m_handle); vmt[t].m_handle = mem_realloc(vmt[t].m_handle, sizeof(mem_t) * num); vmt[t].m_table = (mem_t *)mem_lock(vmt[t].m_handle); for (i = vmt[t].size; i < num; i++) { vmt[t].m_table[i] = mem_alloc(recsize); } vmt[t].size = num; } } }
void out_find_free(oskit_addr_t *inout_addr, oskit_size_t *out_size, lmm_flags_t *out_flags) { mem_lock(); in_find_free(inout_addr,out_size,out_flags); mem_unlock(); }
static void terminate(int num) { // Stop outputting and generating the clock. if (clk_reg && gpio_reg && mbox.virt_addr) { // Set GPIO4 to be an output (instead of ALT FUNC 0, which is the clock). gpio_reg[GPFSEL0] = (gpio_reg[GPFSEL0] & ~(7 << 12)) | (1 << 12); // Disable the clock generator. clk_reg[GPCLK_CNTL] = 0x5A; } if (dma_reg && mbox.virt_addr) { dma_reg[DMA_CS] = BCM2708_DMA_RESET; udelay(10); } fm_mpx_close(); close_control_pipe(); if (mbox.virt_addr != NULL) { unmapmem(mbox.virt_addr, NUM_PAGES * 4096); mem_unlock(mbox.handle, mbox.mem_ref); mem_free(mbox.handle, mbox.mem_ref); } printf("Terminating: cleanly deactivated the DMA engine and killed the carrier.\n"); exit(num); }
oskit_size_t out_avail(lmm_flags_t flags) { oskit_size_t r; mem_lock(); r = in_avail(flags); mem_unlock(); return r; }
void *out_alloc(oskit_size_t size, lmm_flags_t flags) { void* r; mem_lock(); r = in_alloc(size,flags); mem_unlock(); return r; }
void *out_alloc_page(lmm_flags_t flags) { void* r; mem_lock(); r = in_alloc_page(flags); mem_unlock(); return r; }
void out_add_region(lmm_region_t *lmm_region, void *addr, oskit_size_t size, lmm_flags_t flags, lmm_pri_t pri) { mem_lock(); in_add_region(lmm_region,addr,size,flags,pri); mem_unlock(); }