hbool serial_map_value_set(hserial_map_t hMap,hint32 i,hany value,InvokeTickDeclare){ serial_map_t * map = (serial_map_t *)hMap; if(map && i >=0 && i< map->item_length){ mem_memcpy(SERIAL_MAP_ITEM_VALUE_AT(map,i),value,map->value_bytes); return hbool_true; } return hbool_false; }
hany serial_map_put(hserial_map_t hMap,hany key, hany value,InvokeTickDeclare){ serial_map_t * map = (serial_map_t *)hMap; if(map && key && value){ hint32 pIndex,nIndex,i; hint32 rs = _serial_map_find(map, key, &pIndex, &nIndex,InvokeTickArg); hbyte * item; if(rs == KEY_NOT_FOUND){ assert(map->item_length ==0); _serial_map_data_extend(map, map->item_length * map->item_bytes + map->item_bytes,InvokeTickArg); item = SERIAL_MAP_ITEM_AT(map,0); mem_memcpy(item, key, map->key_bytes); item += map->key_bytes; mem_memcpy(item, value, map->value_bytes); map->item_length ++; return item; } else if(rs == 0){ item = SERIAL_MAP_ITEM_VALUE_AT(map,pIndex); mem_memcpy(item, value, map->value_bytes); return item; } else if(rs <0){ _serial_map_data_extend(map, map->item_length * map->item_bytes + map->item_bytes,InvokeTickArg); for(i=map->item_length ;i>nIndex;i--){ mem_memcpy(SERIAL_MAP_ITEM_AT(map,i), SERIAL_MAP_ITEM_AT(map,i-1), map->item_bytes); } item = SERIAL_MAP_ITEM_AT(map,nIndex); mem_memcpy(item, key, map->key_bytes); item += map->key_bytes; mem_memcpy(item, value, map->value_bytes); map->item_length ++; return item; } else{ _serial_map_data_extend(map, map->item_length * map->item_bytes + map->item_bytes,InvokeTickArg); nIndex = pIndex +1; for(i=map->item_length ;i>nIndex;i--){ mem_memcpy(SERIAL_MAP_ITEM_AT(map,i), SERIAL_MAP_ITEM_AT(map,i-1), map->item_bytes); } item = SERIAL_MAP_ITEM_AT(map,nIndex); mem_memcpy(item, key, map->key_bytes); item += map->key_bytes; mem_memcpy(item, value, map->value_bytes); map->item_length ++; return item; } } return NULL; }
hlist_t list_clone(hlist_t list,InvokeTickDeclare){ list_t * l = (list_t *)list; list_t *r = (list_t *)mem_malloc( sizeof(list_t)); memset(r,0,sizeof(list_t)); r->extend_size = l->extend_size; r->max_count = l->count > l->extend_size ? l->count : l->extend_size; r->count = l->count; r->items = (hany *)mem_malloc( sizeof(hany) *r->max_count); mem_memcpy(r->items, l->items, sizeof(hany) *r->count); return (hlist_t)r; }
hbool serial_map_remove(hserial_map_t hMap,hany key,InvokeTickDeclare){ serial_map_t * map = (serial_map_t *)hMap; if(map && key){ hint32 pIndex,nIndex,i; hint32 rs = _serial_map_find(map, key, &pIndex, &nIndex,InvokeTickArg); if(rs == 0){ for(i = pIndex;i<map->item_length-1;i++){ mem_memcpy(SERIAL_MAP_ITEM_AT(map,i), SERIAL_MAP_ITEM_AT(map,i+1), map->item_bytes); } map->item_length --; return hbool_true; } } return hbool_false; }
Bitu XMS_MoveMemory(PhysPt bpt) { /* Read the block with mem_read's */ Bitu length=mem_readd(bpt+offsetof(XMS_MemMove,length)); Bitu src_handle=mem_readw(bpt+offsetof(XMS_MemMove,src_handle)); union { RealPt realpt; Bit32u offset; } src,dest; src.offset=mem_readd(bpt+offsetof(XMS_MemMove,src.offset)); Bitu dest_handle=mem_readw(bpt+offsetof(XMS_MemMove,dest_handle)); dest.offset=mem_readd(bpt+offsetof(XMS_MemMove,dest.offset)); PhysPt srcpt,destpt; if (src_handle) { if (InvalidHandle(src_handle)) { return XMS_INVALID_SOURCE_HANDLE; } if (src.offset>=(xms_handles[src_handle].size*1024U)) { return XMS_INVALID_SOURCE_OFFSET; } if (length>xms_handles[src_handle].size*1024U-src.offset) { return XMS_INVALID_LENGTH; } srcpt=(xms_handles[src_handle].mem*4096)+src.offset; } else { srcpt=Real2Phys(src.realpt); } if (dest_handle) { if (InvalidHandle(dest_handle)) { return XMS_INVALID_DEST_HANDLE; } if (dest.offset>=(xms_handles[dest_handle].size*1024U)) { return XMS_INVALID_DEST_OFFSET; } if (length>xms_handles[dest_handle].size*1024U-dest.offset) { return XMS_INVALID_LENGTH; } destpt=(xms_handles[dest_handle].mem*4096)+dest.offset; } else { destpt=Real2Phys(dest.realpt); } // LOG_MSG("XMS move src %X dest %X length %X",srcpt,destpt,length); mem_memcpy(destpt,srcpt,length); return 0; }
/* Queue TASK at LGRP task queue without locking. LGRP is a locality hint denoting to which locality group this task should be queued at. If LGRP is less than 0, the locality group is randomly selected. */ int tq_enqueue_seq (taskQ_t* tq, task_t *task, int lgrp) { tq_entry_t *entry; int index; assert (task != NULL); entry = (tq_entry_t *)phoenix_mem_malloc (sizeof (tq_entry_t)); if (entry == NULL) { return -1; } mem_memcpy (&entry->task, task, sizeof (task_t)); index = (lgrp < 0) ? rand() % tq->num_queues : lgrp % tq->num_queues; queue_push_back (tq->queues[index], &entry->queue_elem); return 0; }
void *shm_realloc(void *ptr, size_t sz) { int i; size_t tocopy; void *n; /* Find the chunk */ for(i = 0; i < MAX_CHUNKS; i++) { if(chunklist[i].start == ptr) { break; } } /* We didn't allocate this chunk */ CHECK_ERROR(i == MAX_CHUNKS); /* How much do we need to copy? */ if(sz > chunklist[i].size) { tocopy = chunklist[i].size; } else if(sz < chunklist[i].size) { tocopy = sz; } else { /* If we're reallocing the same amount of memory, don't bother. */ return ptr; } /* Allocate a new chunk */ n = shm_alloc(sz); if(n == NULL) { errno = ENOMEM; return NULL; } /* Copy over the data and free the old chunk */ /* NOTE: we could temporarily copy the data into local memory, then free, * then allocate and copy, to reduce failures here. --awg */ mem_memcpy(n, ptr, tocopy); shm_free(ptr); return n; }
static inline int tq_dequeue_normal_internal ( taskQ_t* tq, task_t* task, int lgrp, int tid, dequeue_fn dequeue_fn) { int i, ret, index; queue_elem_t *queue_elem; tq_entry_t *entry; assert (task != NULL); mem_memset (task, 0, sizeof (task_t)); index = (lgrp < 0) ? rand_r(&tq->seeds[tid]) : lgrp; index %= tq->num_queues; ret = (*dequeue_fn)(tq, index, tid, &queue_elem); /* Do task stealing if nothing on our queue. Cycle through all indexes until success or exhaustion */ for (i = (index + 1) % tq->num_queues; (ret == 0) && (i != index); i = (i + 1) % tq->num_queues) { ret = (*dequeue_fn)(tq, i, tid, &queue_elem); } if (ret == 0) { /* There really is no more work. */ return 0; } entry = queue_entry (queue_elem, tq_entry_t, queue_elem); assert (entry != NULL); mem_memcpy (task, &entry->task, sizeof (task_t)); return 1; }
/* Queue TASK at LGRP task queue with locking. LGRP is a locality hint denoting to which locality group this task should be queued at. If LGRP is less than 0, the locality group is randomly selected. TID is required for MCS locking. */ int tq_enqueue (taskQ_t* tq, task_t *task, int lgrp, int tid) { tq_entry_t *entry; int index; assert (tq != NULL); assert (task != NULL); entry = (tq_entry_t *)phoenix_mem_malloc (sizeof (tq_entry_t)); if (entry == NULL) { return -1; } mem_memcpy (&entry->task, task, sizeof (task_t)); index = (lgrp < 0) ? rand_r(&tq->seeds[tid]) : lgrp; index %= tq->num_queues; lock_acquire (tq->locks[index].per_thread[tid]); queue_push_back (tq->queues[index], &entry->queue_elem); lock_release (tq->locks[index].per_thread[tid]); return 0; }
vmRuntimeClassLibraryBytes * vmBinaryBytes(vmBinary * binary,InvokeTickDeclare){ hint32 i,c,j,length; vmClassMetaOffset uniqueKeyOffset = 0; vmClassMetaOffset operatorOffset = 0; vmClassMetaOffset offset = 0,t; vmClassMetaOffset classOffset = 0; vmCompileClassMeta * classMeta; vmCompileMetaOperator *op; vmCompileMeta * meta; vmRuntimeClassLibraryBytes * bytes; vmRuntimeMetaOperator rOperator; vmRuntimeMeta rMeta; vmClassMeta rClassMeta; vmRuntimeClassMetaBytes bClassMeta; hchar *p; length = vmBinaryLength(binary,InvokeTickArg); bytes = mem_malloc(length); vmRuntimeClassLibraryBytesInit(bytes,InvokeTickArg); bytes->classCount = vmCompileObjectArrayCount(binary->classMetas); bytes->uniqueKeyCount = binary->uniqueKeyCount; uniqueKeyOffset = length - buffer_length(binary->uniqueKeys); mem_memcpy((hbyte *)bytes + uniqueKeyOffset, buffer_data(binary->uniqueKeys), buffer_length(binary->uniqueKeys)); operatorOffset = uniqueKeyOffset - binary->operatorOffset; c = vmCompileObjectArrayCount(binary->operatorMetas); offset = operatorOffset; for(i=0;i<c;i++){ op = (vmCompileMetaOperator *)vmCompileObjectArrayGet(binary->operatorMetas, i); mem_memset(&rOperator, 0, sizeof(vmRuntimeMetaOperator)); rOperator.uniqueKey = op->binary.uniqueKey ? op->binary.uniqueKey + uniqueKeyOffset - UNIQUE_KEY_OFFSET : 0; rOperator.type = op->type; rOperator.metaCount = vmCompileObjectArrayCount(op->compileMetas); mem_memcpy((hbyte *)bytes + offset, &rOperator, sizeof(vmRuntimeMetaOperator)); offset += sizeof(vmRuntimeMetaOperator); for(j=0;j<rOperator.metaCount;j++){ meta = (vmCompileMeta *)vmCompileObjectArrayGet(op->compileMetas, j); mem_memset(&rMeta, 0, sizeof(vmRuntimeMeta)); rMeta.type = meta->type; if(rMeta.type & vmRuntimeMetaTypeBoolean){ rMeta.value.booleanValue = meta->value.booleanValue; } else if(rMeta.type & vmRuntimeMetaTypeInt16){ rMeta.value.int16Value = meta->value.int16Value; } else if(rMeta.type & vmRuntimeMetaTypeInt32){ rMeta.value.int32Value = meta->value.int32Value; } else if(rMeta.type & vmRuntimeMetaTypeInt64){ rMeta.value.int64Value = meta->value.int64Value; } else if(rMeta.type & vmRuntimeMetaTypeDouble){ rMeta.value.doubleValue = meta->value.doubleValue; } else if(rMeta.type & vmRuntimeMetaTypeString){ rMeta.value.stringKey = meta->binary.valueOffset ? uniqueKeyOffset + meta->binary.valueOffset - UNIQUE_KEY_OFFSET : 0; } else if(rMeta.type & vmRuntimeMetaTypeObject){ rMeta.value.objectKey = meta->binary.valueOffset ? uniqueKeyOffset + meta->binary.valueOffset - UNIQUE_KEY_OFFSET : 0; } else if(rMeta.type & vmRuntimeMetaTypeOperator){ rMeta.value.operatorOffset = meta->binary.valueOffset ? operatorOffset + meta->binary.valueOffset: 0; } else if(rMeta.type & vmRuntimeMetaTypeArg){ rMeta.value.objectKey = meta->binary.valueOffset ? uniqueKeyOffset + meta->binary.valueOffset - UNIQUE_KEY_OFFSET : 0; } mem_memcpy((hbyte *)bytes + offset, &rMeta, sizeof(vmRuntimeMeta)); offset += sizeof(vmRuntimeMeta); } } classOffset = operatorOffset - binary->classOffset; offset = classOffset; c = vmCompileObjectArrayCount(binary->classMetas); for(i=0;i<c;i++){ classMeta = (vmCompileClassMeta *) vmCompileObjectArrayGet(binary->classMetas, i); mem_memset(&rClassMeta, 0, sizeof(vmClassMeta)); rClassMeta.type = vmClassTypeMeta; rClassMeta.superClass = classMeta->binary.superClass ? classMeta->binary.superClass + uniqueKeyOffset - UNIQUE_KEY_OFFSET : 0; rClassMeta.offset = offset; rClassMeta.propertys = vmCompileObjectArrayCount(classMeta->propertys); rClassMeta.functions = vmCompileObjectArrayCount(classMeta->functions); mem_memcpy((hbyte *)bytes + offset, &rClassMeta, sizeof(vmClassMeta)); offset += sizeof(vmClassMeta); for(j=0;j<rClassMeta.propertys;j++){ op = (vmCompileMetaOperator *) vmCompileObjectArrayGet(classMeta->propertys,j); t = operatorOffset + op->binary.offset; mem_memcpy((hbyte *)bytes + offset, &t, sizeof(vmClassMetaOffset)); offset += sizeof(vmClassMetaOffset); } for(j=0;j<rClassMeta.functions;j++){ op = (vmCompileMetaOperator *) vmCompileObjectArrayGet(classMeta->functions,j); t = operatorOffset + op->binary.offset; mem_memcpy((hbyte *)bytes + offset, &t, sizeof(vmClassMetaOffset)); offset += sizeof(vmClassMetaOffset); } } offset = sizeof(vmRuntimeClassLibraryBytes); for(i=0;i<c;i++){ classMeta = (vmCompileClassMeta *) vmCompileObjectArrayGet(binary->classMetas, i); mem_memset(&bClassMeta, 0, sizeof(vmRuntimeClassMetaBytes)); bClassMeta.className = classMeta->binary.className ? classMeta->binary.className + uniqueKeyOffset - UNIQUE_KEY_OFFSET : 0; bClassMeta.classMeta = classOffset + classMeta->binary.offset; mem_memcpy((hbyte *)bytes + offset, &bClassMeta, sizeof(vmRuntimeClassMetaBytes)); offset += sizeof(vmRuntimeClassMetaBytes); } p = (hchar *)buffer_to_str(binary->uniqueKeys); for(i=0;i<binary->uniqueKeyCount;i++){ t = (vmClassMetaOffset)(uniqueKeyOffset + p - buffer_data(binary->uniqueKeys)); mem_memcpy((hbyte *)bytes + offset, &t, sizeof(vmClassMetaOffset)); offset += sizeof(vmClassMetaOffset); p += strlen(p) +1; } return bytes; }
void MEM_BlockCopy(PhysPt dest,PhysPt src,Bitu size) { mem_memcpy(dest,src,size); }
int main(void) { /* clear bss segment */ do{*tmpPtr ++ = 0;}while(tmpPtr <= (char *)&__bss_end); #ifdef MMU_OPENED //move other storage to sram: saved_resume_pointer(virtual addr), saved_mmu_state mem_memcpy((void *)&mem_para_info, (void *)(DRAM_BACKUP_BASE_ADDR1), sizeof(mem_para_info)); #else mem_preload_tlb_nommu(); /*switch stack*/ //save_mem_status_nommu(RESUME1_START |0x02); //move other storage to sram: saved_resume_pointer(virtual addr), saved_mmu_state mem_memcpy((void *)&mem_para_info, (void *)(DRAM_BACKUP_BASE_ADDR1_PA), sizeof(mem_para_info)); /*restore mmu configuration*/ restore_mmu_state(&(mem_para_info.saved_mmu_state)); //disable_dcache(); #endif //serial_init(); if(unlikely((mem_para_info.debug_mask)&PM_STANDBY_PRINT_RESUME)){ serial_puts("after restore mmu. \n"); } if (unlikely((mem_para_info.debug_mask)&PM_STANDBY_PRINT_CHECK_CRC)) { standby_dram_crc(1); } //after open mmu mapping #ifdef FLUSH_TLB //busy_waiting(); mem_flush_tlb(); mem_preload_tlb(); #endif #ifdef FLUSH_ICACHE //clean i cache flush_icache(); #endif //twi freq? setup_twi_env(); mem_twi_init(AXP_IICBUS); #ifdef POWER_OFF restore_ccmu(); #endif /*restore pmu config*/ #ifdef POWER_OFF if (likely(mem_para_info.axp_enable)) { mem_power_exit(mem_para_info.axp_event); } /* disable watch-dog: coresponding with boot0 */ mem_tmr_disable_watchdog(); #endif //before jump to late_resume #ifdef FLUSH_TLB mem_flush_tlb(); #endif #ifdef FLUSH_ICACHE //clean i cache flush_icache(); #endif if (unlikely((mem_para_info.debug_mask)&PM_STANDBY_PRINT_CHECK_CRC)) { serial_puts("before jump_to_resume. \n"); } //before jump, invalidate data jump_to_resume((void *)mem_para_info.resume_pointer, mem_para_info.saved_runtime_context_svc); return; }