/** * Initialize an output stream. * * @returns IPRT status code * @param pStream The stream to initialize. * @param pRelatedStream Pointer to a related stream. NULL is fine. */ int ScmStreamInitForWriting(PSCMSTREAM pStream, PCSCMSTREAM pRelatedStream) { scmStreamInitInternal(pStream, true /*fWriteOrRead*/); /* allocate stuff */ size_t cbEstimate = pRelatedStream ? pRelatedStream->cb + pRelatedStream->cb / 10 : _64K; cbEstimate = RT_ALIGN(cbEstimate, _4K); pStream->pch = (char *)RTMemAlloc(cbEstimate); if (pStream->pch) { size_t cLinesEstimate = pRelatedStream && pRelatedStream->fFullyLineated ? pRelatedStream->cLines + pRelatedStream->cLines / 10 : cbEstimate / 24; cLinesEstimate = RT_ALIGN(cLinesEstimate, 512); pStream->paLines = (PSCMSTREAMLINE)RTMemAlloc(cLinesEstimate * sizeof(SCMSTREAMLINE)); if (pStream->paLines) { pStream->paLines[0].off = 0; pStream->paLines[0].cch = 0; pStream->paLines[0].enmEol = SCMEOL_NONE; pStream->cbAllocated = cbEstimate; pStream->cLinesAllocated = cLinesEstimate; return VINF_SUCCESS; } RTMemFree(pStream->pch); pStream->pch = NULL; } return pStream->rc = VERR_NO_MEMORY; }
/** * @ingroup SystemInit * * This function will init system heap * * @param begin_addr the beginning address of system page * @param end_addr the end address of system page */ void rt_system_heap_init(void *begin_addr, void *end_addr) { rt_uint32_t limsize, npages; RT_DEBUG_NOT_IN_INTERRUPT; /* align begin and end addr to page */ heap_start = RT_ALIGN((rt_uint32_t)begin_addr, RT_MM_PAGE_SIZE); heap_end = RT_ALIGN_DOWN((rt_uint32_t)end_addr, RT_MM_PAGE_SIZE); if (heap_start >= heap_end) { rt_kprintf("rt_system_heap_init, wrong address[0x%x - 0x%x]\n", (rt_uint32_t)begin_addr, (rt_uint32_t)end_addr); return; } limsize = heap_end - heap_start; npages = limsize / RT_MM_PAGE_SIZE; /* initialize heap semaphore */ rt_sem_init(&heap_sem, "heap", 1, RT_IPC_FLAG_FIFO); RT_DEBUG_LOG(RT_DEBUG_SLAB, ("heap[0x%x - 0x%x], size 0x%x, 0x%x pages\n", heap_start, heap_end, limsize, npages)); /* init pages */ rt_page_init((void *)heap_start, npages); /* calculate zone size */ zone_size = ZALLOC_MIN_ZONE_SIZE; while (zone_size < ZALLOC_MAX_ZONE_SIZE && (zone_size << 1) < (limsize/1024)) zone_size <<= 1; zone_limit = zone_size / 4; if (zone_limit > ZALLOC_ZONE_LIMIT) zone_limit = ZALLOC_ZONE_LIMIT; zone_page_cnt = zone_size / RT_MM_PAGE_SIZE; RT_DEBUG_LOG(RT_DEBUG_SLAB, ("zone size 0x%x, zone page count 0x%x\n", zone_size, zone_page_cnt)); /* allocate memusage array */ limsize = npages * sizeof(struct memusage); limsize = RT_ALIGN(limsize, RT_MM_PAGE_SIZE); memusage = rt_page_alloc(limsize/RT_MM_PAGE_SIZE); RT_DEBUG_LOG(RT_DEBUG_SLAB, ("memusage 0x%x, size 0x%x\n", (rt_uint32_t)memusage, limsize)); }
/* * NOTE:write flash is a slow process */ static int write_whole_syscfgdata_tbl(struct syscfgdata_tbl *data) { unsigned char *page_addr; unsigned char *data_page; u32 all_data_len; if (NULL == data) return -RT_ERROR; FLASH_Unlock(); /* Clear pending flags (if any) */ FLASH_ClearFlag(FLASH_FLAG_EOP | FLASH_FLAG_OPTERR | FLASH_FLAG_WRPRTERR | FLASH_FLAG_PGERR | FLASH_FLAG_BSY); page_addr = (unsigned char *)SYSCFGDATA_TBL_BASE_OF_FLASH; data_page = (unsigned char *)data; all_data_len = SYSCFGDATA_TBL_SIZE_OF_FLASH; while (all_data_len > 0) { if (all_data_len > SIZE_PER_FLASH_PAGE) { do_write_flash_page(page_addr, data_page, SIZE_PER_FLASH_PAGE); page_addr += SIZE_PER_FLASH_PAGE; data_page += SIZE_PER_FLASH_PAGE; all_data_len -= SIZE_PER_FLASH_PAGE; } else { do_write_flash_page(page_addr, data, RT_ALIGN(all_data_len, 4)); all_data_len = 0; break; } } FLASH_Lock(); return FLASH_COMPLETE; }
/** * Get the statistic record for a tag. * * @returns Pointer to a stat record. * @returns NULL on failure. * @param pHeap The heap. * @param enmTag The tag. */ static PMMHYPERSTAT mmHyperStat(PMMHYPERHEAP pHeap, MMTAG enmTag) { /* try look it up first. */ PMMHYPERSTAT pStat = (PMMHYPERSTAT)RTAvloGCPhysGet(&pHeap->HyperHeapStatTree, enmTag); if (!pStat) { /* try allocate a new one */ PMMHYPERCHUNK pChunk = mmHyperAllocChunk(pHeap, RT_ALIGN(sizeof(*pStat), MMHYPER_HEAP_ALIGN_MIN), MMHYPER_HEAP_ALIGN_MIN); if (!pChunk) return NULL; pStat = (PMMHYPERSTAT)(pChunk + 1); pChunk->offStat = (uintptr_t)pStat - (uintptr_t)pChunk; ASMMemZero32(pStat, sizeof(*pStat)); pStat->Core.Key = enmTag; RTAvloGCPhysInsert(&pHeap->HyperHeapStatTree, &pStat->Core); } if (!pStat->fRegistered) { # ifdef IN_RING3 mmR3HyperStatRegisterOne(pHeap->pVMR3, pStat); # else /** @todo schedule a R3 action. */ # endif } return pStat; }
int write_whole_poweroff_info_tbl(struct poweroff_info_st *data) { u32 *ps, *pd; int i; FLASH_Status status; if (NULL == data) return -RT_ERROR; FLASH_Unlock(); /* Clear pending flags (if any) */ FLASH_ClearFlag(FLASH_FLAG_EOP | FLASH_FLAG_OPTERR | FLASH_FLAG_WRPRTERR | FLASH_FLAG_PGERR | FLASH_FLAG_BSY); status = FLASH_ErasePage((rt_uint32_t)POWEROFF_INFO_TBL_BASE_OF_FLASH); /* wait for complete in FLASH_ErasePage() */ if (FLASH_COMPLETE != status) while(1); //return status; ps = (u32 *)data; pd = (u32 *)POWEROFF_INFO_TBL_BASE_OF_FLASH; for (i=0; i < RT_ALIGN(POWEROFF_INFO_TBL_SIZE_OF_FLASH, 4)/4; i++) { status = FLASH_ProgramWord((u32)pd, *ps); /* wait for complete in FLASH_ProgramWord() */ if (FLASH_COMPLETE != status) while(1); //return status; ++pd; ++ps; } FLASH_Lock(); return FLASH_COMPLETE; }
/** * Grows the buffer of a write stream. * * @returns IPRT status code. * @param pStream The stream. Must be in write mode. * @param cbAppending The minimum number of bytes to grow the buffer * with. */ static int scmStreamGrowBuffer(PSCMSTREAM pStream, size_t cbAppending) { size_t cbAllocated = pStream->cbAllocated; cbAllocated += RT_MAX(0x1000 + cbAppending, cbAllocated); cbAllocated = RT_ALIGN(cbAllocated, 0x1000); void *pvNew; if (!pStream->fFileMemory) { pvNew = RTMemRealloc(pStream->pch, cbAllocated); if (!pvNew) return pStream->rc = VERR_NO_MEMORY; } else { pvNew = RTMemDupEx(pStream->pch, pStream->off, cbAllocated - pStream->off); if (!pvNew) return pStream->rc = VERR_NO_MEMORY; RTFileReadAllFree(pStream->pch, pStream->cbAllocated); pStream->fFileMemory = false; } pStream->pch = (char *)pvNew; pStream->cbAllocated = cbAllocated; return VINF_SUCCESS; }
/** * Performs a simple unicast test. * * @param pThis The test instance. * @param fHeadGuard Whether to use a head or tail guard. */ static void doUnicastTest(PTSTSTATE pThis, bool fHeadGuard) { static uint16_t const s_au16Frame[7] = { /* dst:*/ 0x8086, 0, 0, /*src:*/0x8086, 0, 1, 0x0800 }; RTTESTI_CHECK_RC_RETV(tstIntNetSendBuf(&pThis->pBuf1->Send, pThis->hIf1, g_pSession, s_au16Frame, sizeof(s_au16Frame)), VINF_SUCCESS); /* No echo, please */ RTTESTI_CHECK_RC_RETV(IntNetR0IfWait(pThis->hIf1, g_pSession, 1), VERR_TIMEOUT); /* The other interface should see it though. But Wait should only return once, thank you. */ RTTESTI_CHECK_RC_RETV(IntNetR0IfWait(pThis->hIf0, g_pSession, 1), VINF_SUCCESS); RTTESTI_CHECK_RC_RETV(IntNetR0IfWait(pThis->hIf0, g_pSession, 0), VERR_TIMEOUT); /* Receive the data. */ const unsigned cbExpect = RT_ALIGN(sizeof(s_au16Frame) + sizeof(INTNETHDR), sizeof(INTNETHDR)); RTTESTI_CHECK_MSG(IntNetRingGetReadable(&pThis->pBuf0->Recv) == cbExpect, ("%#x vs. %#x\n", IntNetRingGetReadable(&pThis->pBuf0->Recv), cbExpect)); void *pvBuf; RTTESTI_CHECK_RC_OK_RETV(RTTestGuardedAlloc(g_hTest, sizeof(s_au16Frame), 1, fHeadGuard, &pvBuf)); uint32_t cb; RTTESTI_CHECK_MSG_RETV((cb = IntNetRingReadAndSkipFrame(&pThis->pBuf0->Recv, pvBuf)) == sizeof(s_au16Frame), ("%#x vs. %#x\n", cb, sizeof(s_au16Frame))); if (memcmp(pvBuf, &s_au16Frame, sizeof(s_au16Frame))) RTTestIFailed("Got invalid data!\n" "received: %.*Rhxs\n" "expected: %.*Rhxs\n", cb, pvBuf, sizeof(s_au16Frame), s_au16Frame); }
/** * This function will create a mempool object and allocate the memory pool from * heap. * * @param name the name of memory pool * @param block_count the count of blocks in memory pool * @param block_size the size for each block * * @return the created mempool object */ rt_mp_t rt_mp_create(const char *name, rt_size_t block_count, rt_size_t block_size) { rt_uint8_t *block_ptr; struct rt_mempool *mp; register rt_base_t offset; RT_DEBUG_NOT_IN_INTERRUPT; /* allocate object */ mp = (struct rt_mempool *)rt_object_allocate(RT_Object_Class_MemPool, name); /* allocate object failed */ if (mp == RT_NULL) return RT_NULL; /* initialize memory pool */ block_size = RT_ALIGN(block_size, RT_ALIGN_SIZE); mp->block_size = block_size; mp->size = (block_size + sizeof(rt_uint8_t *)) * block_count; /* allocate memory */ mp->start_address = rt_malloc((block_size + sizeof(rt_uint8_t *)) * block_count); if (mp->start_address == RT_NULL) { /* no memory, delete memory pool object */ rt_object_delete(&(mp->parent)); return RT_NULL; } mp->block_total_count = block_count; mp->block_free_count = mp->block_total_count; /* initialize suspended thread list */ rt_list_init(&(mp->suspend_thread)); mp->suspend_thread_count = 0; /* initialize free block list */ block_ptr = (rt_uint8_t *)mp->start_address; for (offset = 0; offset < mp->block_total_count; offset ++) { *(rt_uint8_t **)(block_ptr + offset * (block_size + sizeof(rt_uint8_t *))) = block_ptr + (offset + 1) * (block_size + sizeof(rt_uint8_t *)); } *(rt_uint8_t **)(block_ptr + (offset - 1) * (block_size + sizeof(rt_uint8_t *))) = RT_NULL; mp->block_list = block_ptr; return mp; }
static void vqueueInit(PVQUEUE pQueue, uint32_t uPageNumber) { pQueue->VRing.addrDescriptors = (uint64_t)uPageNumber << PAGE_SHIFT; pQueue->VRing.addrAvail = pQueue->VRing.addrDescriptors + sizeof(VRINGDESC) * pQueue->VRing.uSize; pQueue->VRing.addrUsed = RT_ALIGN( pQueue->VRing.addrAvail + RT_OFFSETOF(VRINGAVAIL, auRing[pQueue->VRing.uSize]), PAGE_SIZE); /* The used ring must start from the next page. */ pQueue->uNextAvailIndex = 0; pQueue->uNextUsedIndex = 0; }
rt_err_t rt_mq_init(rt_mq_t mq, const char* name, void *msgpool, rt_size_t msg_size, rt_size_t pool_size, rt_uint8_t flag) { size_t index; struct rt_mq_message* head; /* parameter check */ RT_ASSERT(mq != RT_NULL); /* set parent flag */ mq->flag = flag; for (index = 0; index < RT_NAME_MAX; index ++) { mq->name[index] = name[index]; } /* append to mq list */ SDL_mutexP(_mq_list_mutex); rt_list_insert_after(&(_mq_list), &(mq->list)); SDL_mutexV(_mq_list_mutex); /* set message pool */ mq->msg_pool = msgpool; /* get correct message size */ mq->msg_size = RT_ALIGN(msg_size, RT_ALIGN_SIZE); mq->max_msgs = pool_size / (mq->msg_size + sizeof(struct rt_mq_message)); /* init message list */ mq->msg_queue_head = RT_NULL; mq->msg_queue_tail = RT_NULL; /* init message empty list */ mq->msg_queue_free = RT_NULL; for (index = 0; index < mq->max_msgs; index ++) { head = (struct rt_mq_message*)((rt_uint8_t*)mq->msg_pool + index * (mq->msg_size + sizeof(struct rt_mq_message))); head->next = mq->msg_queue_free; mq->msg_queue_free = head; } /* the initial entry is zero */ mq->entry = 0; /* init mutex */ mq->host_mq = (void*) malloc(sizeof(struct host_mq)); hmq->mutex = SDL_CreateSemaphore(1); hmq->msg = SDL_CreateSemaphore(0); return RT_EOK; }
/** * Grows the line array of a stream. * * @returns IPRT status code. * @param pStream The stream. * @param iMinLine Minimum line number. */ static int scmStreamGrowLines(PSCMSTREAM pStream, size_t iMinLine) { size_t cLinesAllocated = pStream->cLinesAllocated; cLinesAllocated += RT_MAX(512 + iMinLine, cLinesAllocated); cLinesAllocated = RT_ALIGN(cLinesAllocated, 512); void *pvNew = RTMemRealloc(pStream->paLines, cLinesAllocated * sizeof(SCMSTREAMLINE)); if (!pvNew) return pStream->rc = VERR_NO_MEMORY; pStream->paLines = (PSCMSTREAMLINE)pvNew; pStream->cLinesAllocated = cLinesAllocated; return VINF_SUCCESS; }
static rt_err_t rt_dflash_init (rt_device_t dev) { FLASH_Init(); SectorSize = FLASH_GetSectorSize(); StartAddr = RT_ALIGN(((uint32_t)&Image$$ER_IROM1$$RO$$Limit + SectorSize), SectorSize); DiskSize = FLASH_SIZE - StartAddr; rt_kprintf("dflash sector size:%d 0ffset:0x%X\r\n", SectorSize, StartAddr); mutex = rt_mutex_create("_mu", RT_IPC_FLAG_FIFO); return RT_EOK; }
/** * This function will allocate a block from system heap memory. * - If the nbytes is less than zero, * or * - If there is no nbytes sized memory valid in system, * the RT_NULL is returned. * * @param size the size of memory to be allocated * * @return the allocated memory */ void *rt_malloc(rt_size_t size) { slab_zone *z; rt_int32_t zi; slab_chunk *chunk; struct memusage *kup; /* zero size, return RT_NULL */ if (size == 0) return RT_NULL; #ifdef RT_USING_MODULE if (rt_module_self() != RT_NULL) return rt_module_malloc(size); #endif /* * Handle large allocations directly. There should not be very many of * these so performance is not a big issue. */ if (size >= zone_limit) { size = RT_ALIGN(size, RT_MM_PAGE_SIZE); chunk = rt_page_alloc(size >> RT_MM_PAGE_BITS); if (chunk == RT_NULL) return RT_NULL; /* set kup */ kup = btokup(chunk); kup->type = PAGE_TYPE_LARGE; kup->size = size >> RT_MM_PAGE_BITS; RT_DEBUG_LOG(RT_DEBUG_SLAB, ("malloc a large memory 0x%x, page cnt %d, kup %d\n", size, size >> RT_MM_PAGE_BITS, ((rt_uint32_t)chunk - heap_start) >> RT_MM_PAGE_BITS)); /* lock heap */ rt_sem_take(&heap_sem, RT_WAITING_FOREVER); #ifdef RT_MEM_STATS used_mem += size; if (used_mem > max_mem) max_mem = used_mem; #endif goto done; }
/** * Save and noisy string copy. * * @param pwszDst Destination buffer. * @param cbDst Size in bytes - not WCHAR count! * @param pSrc Source string. * @param pszWhat What this is. For the log. */ static void VBoxServiceVMInfoWinSafeCopy(PWCHAR pwszDst, size_t cbDst, LSA_UNICODE_STRING const *pSrc, const char *pszWhat) { Assert(RT_ALIGN(cbDst, sizeof(WCHAR)) == cbDst); size_t cbCopy = pSrc->Length; if (cbCopy + sizeof(WCHAR) > cbDst) { VBoxServiceVerbose(0, "%s is too long - %u bytes, buffer %u bytes! It will be truncated.\n", pszWhat, cbCopy, cbDst); cbCopy = cbDst - sizeof(WCHAR); } if (cbCopy) memcpy(pwszDst, pSrc->Buffer, cbCopy); pwszDst[cbCopy / sizeof(WCHAR)] = '\0'; }
/** * @ingroup SystemInit * * This function will init system heap * * @param begin_addr the beginning address of system page * @param end_addr the end address of system page */ void rt_system_heap_init(void *begin_addr, void *end_addr) { struct heap_mem *mem; rt_uint32_t begin_align = RT_ALIGN((rt_uint32_t)begin_addr, RT_ALIGN_SIZE); rt_uint32_t end_align = RT_ALIGN_DOWN((rt_uint32_t)end_addr, RT_ALIGN_SIZE); RT_DEBUG_NOT_IN_INTERRUPT; /* alignment addr */ if ((end_align > (2 * SIZEOF_STRUCT_MEM)) && ((end_align - 2 * SIZEOF_STRUCT_MEM) >= begin_align)) { /* calculate the aligned memory size */ mem_size_aligned = end_align - begin_align - 2 * SIZEOF_STRUCT_MEM; } else { rt_kprintf("mem init, error begin address 0x%x, and end address 0x%x\n", (rt_uint32_t)begin_addr, (rt_uint32_t)end_addr); return; } /* point to begin address of heap */ heap_ptr = (rt_uint8_t *)begin_align; RT_DEBUG_LOG(RT_DEBUG_MEM, ("mem init, heap begin address 0x%x, size %d\n", (rt_uint32_t)heap_ptr, mem_size_aligned)); /* initialize the start of the heap */ mem = (struct heap_mem *)heap_ptr; mem->magic = HEAP_MAGIC; mem->next = mem_size_aligned + SIZEOF_STRUCT_MEM; mem->prev = 0; mem->used = 0; /* initialize the end of the heap */ heap_end = (struct heap_mem *)&heap_ptr[mem->next]; heap_end->magic = HEAP_MAGIC; heap_end->used = 1; heap_end->next = mem_size_aligned + SIZEOF_STRUCT_MEM; heap_end->prev = mem_size_aligned + SIZEOF_STRUCT_MEM; rt_sem_init(&heap_sem, "heap", 1, RT_IPC_FLAG_FIFO); /* initialize the lowest-free pointer to the start of the heap */ lfree = (struct heap_mem *)heap_ptr; }
/** * This function will initialize a memory pool object, normally which is used * for static object. * * @param mp the memory pool object * @param name the name of memory pool * @param start the star address of memory pool * @param size the total size of memory pool * @param block_size the size for each block * * @return RT_EOK */ rt_err_t rt_mp_init(struct rt_mempool *mp, const char *name, void *start, rt_size_t size, rt_size_t block_size) { rt_uint8_t *block_ptr; register rt_base_t offset; /* parameter check */ RT_ASSERT(mp != RT_NULL); /* initialize object */ rt_object_init(&(mp->parent), RT_Object_Class_MemPool, name); /* initialize memory pool */ mp->start_address = start; mp->size = RT_ALIGN_DOWN(size, RT_ALIGN_SIZE); /* align the block size */ block_size = RT_ALIGN(block_size, RT_ALIGN_SIZE); mp->block_size = block_size; /* align to align size byte */ mp->block_total_count = mp->size / (mp->block_size + sizeof(rt_uint8_t *)); mp->block_free_count = mp->block_total_count; /* initialize suspended thread list */ rt_list_init(&(mp->suspend_thread)); mp->suspend_thread_count = 0; /* initialize free block list */ block_ptr = (rt_uint8_t *)mp->start_address; for (offset = 0; offset < mp->block_total_count; offset ++) { *(rt_uint8_t **)(block_ptr + offset * (block_size + sizeof(rt_uint8_t *))) = (rt_uint8_t *)(block_ptr + (offset + 1) * (block_size + sizeof(rt_uint8_t *))); } *(rt_uint8_t **)(block_ptr + (offset - 1) * (block_size + sizeof(rt_uint8_t *))) = RT_NULL; mp->block_list = block_ptr; return RT_EOK; }
rt_mq_t* rt_mq_create (rt_size_t msg_size, rt_size_t max_msgs) { size_t index; rt_mq_t *mq; struct rt_mq_message* head; /* allocate object */ mq = (rt_mq_t*) rt_malloc(sizeof(rt_mq_t)); if (mq == RT_NULL) return mq; /* get correct message size */ mq->msg_size = RT_ALIGN(msg_size, RT_ALIGN_SIZE); mq->max_msgs = max_msgs; /* allocate message pool */ mq->msg_pool = rt_malloc((mq->msg_size + sizeof(struct rt_mq_message))* mq->max_msgs); if (mq->msg_pool == RT_NULL) { rt_mq_delete(mq); return RT_NULL; } /* init message list */ mq->head = RT_NULL; mq->tail = RT_NULL; /* init message empty list */ mq->free = RT_NULL; for (index = 0; index < mq->max_msgs; index ++) { head = (struct rt_mq_message*)((rt_uint8_t*)mq->msg_pool + index * (mq->msg_size + sizeof(struct rt_mq_message))); head->next = mq->free; mq->free = head; } /* the initial entry is zero */ mq->entry = 0; return mq; }
void syscfgdata_syn_proc(void) { struct syscfgdata_tbl *p; rt_err_t ret_sem; if (is_syscfgdata_tbl_dirty(syscfgdata_tbl_cache->systbl_flag_set) || is_syscfgdata_tbl_wthrough(syscfgdata_tbl_cache->systbl_flag_set)) { p = rt_calloc(RT_ALIGN(SYSCFGDATA_TBL_SIZE_OF_FLASH, 4), 1); if (NULL == p) { printf_syn("func:%s(), line:%d:mem alloc fail\n", __FUNCTION__, __LINE__); return; } ret_sem = rt_sem_take(&write_syscfgdata_sem, RT_WAITING_FOREVER); if (RT_EOK != ret_sem) { SYSCFG_DATA_LOG(("func:%s, line:%d, error(%d)", __FUNCTION__, __LINE__, ret_sem)); goto free_entry; } if (0 != read_whole_syscfgdata_tbl(p, SYSCFGDATA_TBL_SIZE_OF_FLASH)) goto release_sem; if (0==rt_memcmp(&syscfgdata_tbl_cache->syscfg_data, p, sizeof(syscfgdata_tbl_cache->syscfg_data))) goto release_sem; write_whole_syscfgdata_tbl(&syscfgdata_tbl_cache->syscfg_data); clr_syscfgdata_tbl_dirty(syscfgdata_tbl_cache->systbl_flag_set); clr_syscfgdata_tbl_wthrough(syscfgdata_tbl_cache->systbl_flag_set); release_sem: rt_sem_release(&write_syscfgdata_sem); free_entry: rt_free(p); } return; }
/* * NOTE:write flash is a slow process */ static void do_write_flash_page(void *const page_addr, void *const data_page, const u32 write_data_len) { int i, w_cnt; u32 *ps, *pd; FLASH_Status status; w_cnt = RT_ALIGN(write_data_len, 4) / 4; ps = data_page; pd = page_addr; for (i=0; i<w_cnt; i++) { if (*pd++ != *ps++) break; } if (i < w_cnt) { status = FLASH_ErasePage((rt_uint32_t)page_addr); /* wait for complete in FLASH_ErasePage() */ if (FLASH_COMPLETE != status) { rt_kprintf("%s(), line:%d, erase page fail(%d)\n", __FUNCTION__, __LINE__, status); while(1); //return status; } ps = data_page; pd = page_addr; for (i=0; i < w_cnt; i++) { status = FLASH_ProgramWord((u32)pd, *ps); /* wait for complete in FLASH_ProgramWord() */ if (FLASH_COMPLETE != status) { rt_kprintf("%s(), line:%d, program word fail(%d)\n", __FUNCTION__, __LINE__, status); while(1); //return status; } ++pd; ++ps; } } return; }
/** * Internal worker for the queue creation apis. * * @returns VBox status. * @param pVM Pointer to the VM. * @param cbItem Item size. * @param cItems Number of items. * @param cMilliesInterval Number of milliseconds between polling the queue. * If 0 then the emulation thread will be notified whenever an item arrives. * @param fRZEnabled Set if the queue will be used from RC/R0 and need to be allocated from the hyper heap. * @param pszName The queue name. Unique. Not copied. * @param ppQueue Where to store the queue handle. */ static int pdmR3QueueCreate(PVM pVM, size_t cbItem, uint32_t cItems, uint32_t cMilliesInterval, bool fRZEnabled, const char *pszName, PPDMQUEUE *ppQueue) { PUVM pUVM = pVM->pUVM; /* * Validate input. */ AssertMsgReturn(cbItem >= sizeof(PDMQUEUEITEMCORE) && cbItem < _1M, ("cbItem=%zu\n", cbItem), VERR_OUT_OF_RANGE); AssertMsgReturn(cItems >= 1 && cItems <= _64K, ("cItems=%u\n", cItems), VERR_OUT_OF_RANGE); /* * Align the item size and calculate the structure size. */ cbItem = RT_ALIGN(cbItem, sizeof(RTUINTPTR)); size_t cb = cbItem * cItems + RT_ALIGN_Z(RT_OFFSETOF(PDMQUEUE, aFreeItems[cItems + PDMQUEUE_FREE_SLACK]), 16); PPDMQUEUE pQueue; int rc; if (fRZEnabled) rc = MMHyperAlloc(pVM, cb, 0, MM_TAG_PDM_QUEUE, (void **)&pQueue ); else rc = MMR3HeapAllocZEx(pVM, MM_TAG_PDM_QUEUE, cb, (void **)&pQueue); if (RT_FAILURE(rc)) return rc; /* * Initialize the data fields. */ pQueue->pVMR3 = pVM; pQueue->pVMR0 = fRZEnabled ? pVM->pVMR0 : NIL_RTR0PTR; pQueue->pVMRC = fRZEnabled ? pVM->pVMRC : NIL_RTRCPTR; pQueue->pszName = pszName; pQueue->cMilliesInterval = cMilliesInterval; //pQueue->pTimer = NULL; pQueue->cbItem = (uint32_t)cbItem; pQueue->cItems = cItems; //pQueue->pPendingR3 = NULL; //pQueue->pPendingR0 = NULL; //pQueue->pPendingRC = NULL; pQueue->iFreeHead = cItems; //pQueue->iFreeTail = 0; PPDMQUEUEITEMCORE pItem = (PPDMQUEUEITEMCORE)((char *)pQueue + RT_ALIGN_Z(RT_OFFSETOF(PDMQUEUE, aFreeItems[cItems + PDMQUEUE_FREE_SLACK]), 16)); for (unsigned i = 0; i < cItems; i++, pItem = (PPDMQUEUEITEMCORE)((char *)pItem + cbItem)) { pQueue->aFreeItems[i].pItemR3 = pItem; if (fRZEnabled) { pQueue->aFreeItems[i].pItemR0 = MMHyperR3ToR0(pVM, pItem); pQueue->aFreeItems[i].pItemRC = MMHyperR3ToRC(pVM, pItem); } } /* * Create timer? */ if (cMilliesInterval) { rc = TMR3TimerCreateInternal(pVM, TMCLOCK_REAL, pdmR3QueueTimer, pQueue, "Queue timer", &pQueue->pTimer); if (RT_SUCCESS(rc)) { rc = TMTimerSetMillies(pQueue->pTimer, cMilliesInterval); if (RT_FAILURE(rc)) { AssertMsgFailed(("TMTimerSetMillies failed rc=%Rrc\n", rc)); int rc2 = TMR3TimerDestroy(pQueue->pTimer); AssertRC(rc2); } } else AssertMsgFailed(("TMR3TimerCreateInternal failed rc=%Rrc\n", rc)); if (RT_FAILURE(rc)) { if (fRZEnabled) MMHyperFree(pVM, pQueue); else MMR3HeapFree(pQueue); return rc; } /* * Insert into the queue list for timer driven queues. */ pdmLock(pVM); pQueue->pNext = pUVM->pdm.s.pQueuesTimer; pUVM->pdm.s.pQueuesTimer = pQueue; pdmUnlock(pVM); } else { /* * Insert into the queue list for forced action driven queues. * This is a FIFO, so insert at the end. */ /** @todo we should add a priority to the queues so we don't have to rely on * the initialization order to deal with problems like @bugref{1605} (pgm/pcnet * deadlock caused by the critsect queue to be last in the chain). * - Update, the critical sections are no longer using queues, so this isn't a real * problem any longer. The priority might be a nice feature for later though. */ pdmLock(pVM); if (!pUVM->pdm.s.pQueuesForced) pUVM->pdm.s.pQueuesForced = pQueue; else { PPDMQUEUE pPrev = pUVM->pdm.s.pQueuesForced; while (pPrev->pNext) pPrev = pPrev->pNext; pPrev->pNext = pQueue; } pdmUnlock(pVM); } /* * Register the statistics. */ STAMR3RegisterF(pVM, &pQueue->cbItem, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Item size.", "/PDM/Queue/%s/cbItem", pQueue->pszName); STAMR3RegisterF(pVM, &pQueue->cItems, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Queue size.", "/PDM/Queue/%s/cItems", pQueue->pszName); STAMR3RegisterF(pVM, &pQueue->StatAllocFailures, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "PDMQueueAlloc failures.", "/PDM/Queue/%s/AllocFailures", pQueue->pszName); STAMR3RegisterF(pVM, &pQueue->StatInsert, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, "Calls to PDMQueueInsert.", "/PDM/Queue/%s/Insert", pQueue->pszName); STAMR3RegisterF(pVM, &pQueue->StatFlush, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, "Calls to pdmR3QueueFlush.", "/PDM/Queue/%s/Flush", pQueue->pszName); STAMR3RegisterF(pVM, &pQueue->StatFlushLeftovers, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Left over items after flush.", "/PDM/Queue/%s/FlushLeftovers", pQueue->pszName); #ifdef VBOX_WITH_STATISTICS STAMR3RegisterF(pVM, &pQueue->StatFlushPrf, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, "Profiling pdmR3QueueFlush.", "/PDM/Queue/%s/FlushPrf", pQueue->pszName); STAMR3RegisterF(pVM, (void *)&pQueue->cStatPending, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Pending items.", "/PDM/Queue/%s/Pending", pQueue->pszName); #endif *ppQueue = pQueue; return VINF_SUCCESS; }
/** * Allocates a chunk of memory from the specified heap. * The caller validates the parameters of this request. * * @returns Pointer to the allocated chunk. * @returns NULL on failure. * @param pHeap The heap. * @param cb Size of the memory block to allocate. * @param uAlignment The alignment specifications for the allocated block. * @internal */ static PMMHYPERCHUNK mmHyperAllocChunk(PMMHYPERHEAP pHeap, uint32_t cb, unsigned uAlignment) { Log3(("mmHyperAllocChunk: Enter cb=%#x uAlignment=%#x\n", cb, uAlignment)); #ifdef MMHYPER_HEAP_STRICT mmHyperHeapCheck(pHeap); #endif #ifdef MMHYPER_HEAP_STRICT_FENCE uint32_t cbFence = RT_MAX(MMHYPER_HEAP_STRICT_FENCE_SIZE, uAlignment); cb += cbFence; #endif /* * Check if there are any free chunks. (NIL_OFFSET use/not-use forces this check) */ if (pHeap->offFreeHead == NIL_OFFSET) return NULL; /* * Small alignments - from the front of the heap. * * Must split off free chunks at the end to prevent messing up the * last free node which we take the page aligned memory from the top of. */ PMMHYPERCHUNK pRet = NULL; PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)((char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offFreeHead); while (pFree) { ASSERT_CHUNK_FREE(pHeap, pFree); if (pFree->cb >= cb) { unsigned offAlign = (uintptr_t)(&pFree->core + 1) & (uAlignment - 1); if (offAlign) offAlign = uAlignment - offAlign; if (!offAlign || pFree->cb - offAlign >= cb) { Log3(("mmHyperAllocChunk: Using pFree=%p pFree->cb=%d offAlign=%d\n", pFree, pFree->cb, offAlign)); /* * Adjust the node in front. * Because of multiple alignments we need to special case allocation of the first block. */ if (offAlign) { MMHYPERCHUNKFREE Free = *pFree; if (MMHYPERCHUNK_GET_OFFPREV(&pFree->core)) { /* just add a bit of memory to it. */ PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + MMHYPERCHUNK_GET_OFFPREV(&Free.core)); pPrev->core.offNext += offAlign; AssertMsg(!MMHYPERCHUNK_ISFREE(&pPrev->core), ("Impossible!\n")); Log3(("mmHyperAllocChunk: Added %d bytes to %p\n", offAlign, pPrev)); } else { /* make new head node, mark it USED for simplicity. */ PMMHYPERCHUNK pPrev = (PMMHYPERCHUNK)pHeap->CTX_SUFF(pbHeap); Assert(pPrev == &pFree->core); pPrev->offPrev = 0; MMHYPERCHUNK_SET_TYPE(pPrev, MMHYPERCHUNK_FLAGS_USED); pPrev->offNext = offAlign; Log3(("mmHyperAllocChunk: Created new first node of %d bytes\n", offAlign)); } Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - offAlign, -(int)offAlign)); pHeap->cbFree -= offAlign; /* Recreate pFree node and adjusting everything... */ pFree = (PMMHYPERCHUNKFREE)((char *)pFree + offAlign); *pFree = Free; pFree->cb -= offAlign; if (pFree->core.offNext) { pFree->core.offNext -= offAlign; PMMHYPERCHUNK pNext = (PMMHYPERCHUNK)((char *)pFree + pFree->core.offNext); MMHYPERCHUNK_SET_OFFPREV(pNext, -(int32_t)pFree->core.offNext); ASSERT_CHUNK(pHeap, pNext); } if (MMHYPERCHUNK_GET_OFFPREV(&pFree->core)) MMHYPERCHUNK_SET_OFFPREV(&pFree->core, MMHYPERCHUNK_GET_OFFPREV(&pFree->core) - offAlign); if (pFree->offNext) { pFree->offNext -= offAlign; PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext); pNext->offPrev = -(int32_t)pFree->offNext; ASSERT_CHUNK_FREE(pHeap, pNext); } else pHeap->offFreeTail += offAlign; if (pFree->offPrev) { pFree->offPrev -= offAlign; PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev); pPrev->offNext = -pFree->offPrev; ASSERT_CHUNK_FREE(pHeap, pPrev); } else pHeap->offFreeHead += offAlign; pFree->core.offHeap = (uintptr_t)pHeap - (uintptr_t)pFree; pFree->core.offStat = 0; ASSERT_CHUNK_FREE(pHeap, pFree); Log3(("mmHyperAllocChunk: Realigned pFree=%p\n", pFree)); } /* * Split off a new FREE chunk? */ if (pFree->cb >= cb + RT_ALIGN(sizeof(MMHYPERCHUNKFREE), MMHYPER_HEAP_ALIGN_MIN)) { /* * Move the FREE chunk up to make room for the new USED chunk. */ const int off = cb + sizeof(MMHYPERCHUNK); PMMHYPERCHUNKFREE pNew = (PMMHYPERCHUNKFREE)((char *)&pFree->core + off); *pNew = *pFree; pNew->cb -= off; if (pNew->core.offNext) { pNew->core.offNext -= off; PMMHYPERCHUNK pNext = (PMMHYPERCHUNK)((char *)pNew + pNew->core.offNext); MMHYPERCHUNK_SET_OFFPREV(pNext, -(int32_t)pNew->core.offNext); ASSERT_CHUNK(pHeap, pNext); } pNew->core.offPrev = -off; MMHYPERCHUNK_SET_TYPE(pNew, MMHYPERCHUNK_FLAGS_FREE); if (pNew->offNext) { pNew->offNext -= off; PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pNew + pNew->offNext); pNext->offPrev = -(int32_t)pNew->offNext; ASSERT_CHUNK_FREE(pHeap, pNext); } else pHeap->offFreeTail += off; if (pNew->offPrev) { pNew->offPrev -= off; PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pNew + pNew->offPrev); pPrev->offNext = -pNew->offPrev; ASSERT_CHUNK_FREE(pHeap, pPrev); } else pHeap->offFreeHead += off; pNew->core.offHeap = (uintptr_t)pHeap - (uintptr_t)pNew; pNew->core.offStat = 0; ASSERT_CHUNK_FREE(pHeap, pNew); /* * Update the old FREE node making it a USED node. */ pFree->core.offNext = off; MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_USED); Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - (cb + sizeof(MMHYPERCHUNK)), -(int)(cb + sizeof(MMHYPERCHUNK)))); pHeap->cbFree -= (uint32_t)(cb + sizeof(MMHYPERCHUNK)); pRet = &pFree->core; ASSERT_CHUNK(pHeap, &pFree->core); Log3(("mmHyperAllocChunk: Created free chunk pNew=%p cb=%d\n", pNew, pNew->cb)); } else { /* * Link out of free list. */ if (pFree->offNext) { PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext); if (pFree->offPrev) { pNext->offPrev += pFree->offPrev; PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev); pPrev->offNext += pFree->offNext; ASSERT_CHUNK_FREE(pHeap, pPrev); } else { pHeap->offFreeHead += pFree->offNext; pNext->offPrev = 0; } ASSERT_CHUNK_FREE(pHeap, pNext); } else { if (pFree->offPrev) { pHeap->offFreeTail += pFree->offPrev; PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev); pPrev->offNext = 0; ASSERT_CHUNK_FREE(pHeap, pPrev); } else { pHeap->offFreeHead = NIL_OFFSET; pHeap->offFreeTail = NIL_OFFSET; } } Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pFree->cb, -(int32_t)pFree->cb)); pHeap->cbFree -= pFree->cb; MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_USED); pRet = &pFree->core; ASSERT_CHUNK(pHeap, &pFree->core); Log3(("mmHyperAllocChunk: Converted free chunk %p to used chunk.\n", pFree)); } Log3(("mmHyperAllocChunk: Returning %p\n", pRet)); break; } } /* next */ pFree = pFree->offNext ? (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext) : NULL; } #ifdef MMHYPER_HEAP_STRICT_FENCE uint32_t *pu32End = (uint32_t *)((uint8_t *)(pRet + 1) + cb); uint32_t *pu32EndReal = pRet->offNext ? (uint32_t *)((uint8_t *)pRet + pRet->offNext) : (uint32_t *)(pHeap->CTX_SUFF(pbHeap) + pHeap->cbHeap); cbFence += (uintptr_t)pu32EndReal - (uintptr_t)pu32End; Assert(!(cbFence & 0x3)); ASMMemFill32((uint8_t *)pu32EndReal - cbFence, cbFence, MMHYPER_HEAP_STRICT_FENCE_U32); pu32EndReal[-1] = cbFence; #endif #ifdef MMHYPER_HEAP_STRICT mmHyperHeapCheck(pHeap); #endif return pRet; }
RTR3DECL(int) RTTarFileClose(RTTARFILE hFile) { /* Already closed? */ if (hFile == NIL_RTTARFILE) return VINF_SUCCESS; PRTTARFILEINTERNAL pFileInt = hFile; RTTARFILE_VALID_RETURN(pFileInt); int rc = VINF_SUCCESS; /* In write mode: */ if ((pFileInt->fOpenMode & (RTFILE_O_WRITE | RTFILE_O_READ)) == RTFILE_O_WRITE) { pFileInt->pTar->fFileOpenForWrite = false; do { /* If the user has called RTTarFileSetSize in the meantime, we have to make sure the file has the right size. */ if (pFileInt->cbSetSize > pFileInt->cbSize) { rc = rtTarAppendZeros(pFileInt, pFileInt->cbSetSize - pFileInt->cbSize); if (RT_FAILURE(rc)) break; } /* If the written size isn't 512 byte aligned, we need to fix this. */ RTTARRECORD record; RT_ZERO(record); uint64_t cbSizeAligned = RT_ALIGN(pFileInt->cbSize, sizeof(RTTARRECORD)); if (cbSizeAligned != pFileInt->cbSize) { /* Note the RTFile method. We didn't increase the cbSize or cbCurrentPos here. */ rc = RTFileWriteAt(pFileInt->pTar->hTarFile, pFileInt->offStart + sizeof(RTTARRECORD) + pFileInt->cbSize, &record, cbSizeAligned - pFileInt->cbSize, NULL); if (RT_FAILURE(rc)) break; } /* Create a header record for the file */ /* Todo: mode, gid, uid, mtime should be setable (or detected myself) */ RTTIMESPEC time; RTTimeNow(&time); rc = rtTarCreateHeaderRecord(&record, pFileInt->pszFilename, pFileInt->cbSize, 0, 0, 0600, RTTimeSpecGetSeconds(&time)); if (RT_FAILURE(rc)) break; /* Write this at the start of the file data */ rc = RTFileWriteAt(pFileInt->pTar->hTarFile, pFileInt->offStart, &record, sizeof(RTTARRECORD), NULL); if (RT_FAILURE(rc)) break; } while (0); } /* * Now cleanup and delete the handle. */ if (pFileInt->pszFilename) RTStrFree(pFileInt->pszFilename); if (pFileInt->hVfsIos != NIL_RTVFSIOSTREAM) { RTVfsIoStrmRelease(pFileInt->hVfsIos); pFileInt->hVfsIos = NIL_RTVFSIOSTREAM; } pFileInt->u32Magic = RTTARFILE_MAGIC_DEAD; RTMemFree(pFileInt); return rc; }
/** * Implements the SVGA_3D_CMD_SURFACE_DEFINE_V2 and SVGA_3D_CMD_SURFACE_DEFINE * commands (fifo). * * @returns VBox status code (currently ignored). * @param pThis The VGA device instance data. * @param sid The ID of the surface to (re-)define. * @param surfaceFlags . * @param format . * @param face . * @param multisampleCount . * @param autogenFilter . * @param cMipLevels . * @param paMipLevelSizes . */ int vmsvga3dSurfaceDefine(PVGASTATE pThis, uint32_t sid, uint32_t surfaceFlags, SVGA3dSurfaceFormat format, SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES], uint32_t multisampleCount, SVGA3dTextureFilter autogenFilter, uint32_t cMipLevels, SVGA3dSize *paMipLevelSizes) { PVMSVGA3DSURFACE pSurface; PVMSVGA3DSTATE pState = pThis->svga.p3dState; AssertReturn(pState, VERR_NO_MEMORY); Log(("vmsvga3dSurfaceDefine: sid=%x surfaceFlags=%x format=%s (%x) multiSampleCount=%d autogenFilter=%d, cMipLevels=%d size=(%d,%d,%d)\n", sid, surfaceFlags, vmsvgaLookupEnum((int)format, &g_SVGA3dSurfaceFormat2String), format, multisampleCount, autogenFilter, cMipLevels, paMipLevelSizes->width, paMipLevelSizes->height, paMipLevelSizes->depth)); AssertReturn(sid < SVGA3D_MAX_SURFACE_IDS, VERR_INVALID_PARAMETER); AssertReturn(cMipLevels >= 1, VERR_INVALID_PARAMETER); /* Assuming all faces have the same nr of mipmaps. */ AssertReturn(!(surfaceFlags & SVGA3D_SURFACE_CUBEMAP) || cMipLevels == face[0].numMipLevels * 6, VERR_INVALID_PARAMETER); AssertReturn((surfaceFlags & SVGA3D_SURFACE_CUBEMAP) || cMipLevels == face[0].numMipLevels, VERR_INVALID_PARAMETER); if (sid >= pState->cSurfaces) { /* Grow the array. */ uint32_t cNew = RT_ALIGN(sid + 15, 16); void *pvNew = RTMemRealloc(pState->papSurfaces, sizeof(pState->papSurfaces[0]) * cNew); AssertReturn(pvNew, VERR_NO_MEMORY); pState->papSurfaces = (PVMSVGA3DSURFACE *)pvNew; while (pState->cSurfaces < cNew) { pSurface = (PVMSVGA3DSURFACE)RTMemAllocZ(sizeof(*pSurface)); AssertReturn(pSurface, VERR_NO_MEMORY); pSurface->id = SVGA3D_INVALID_ID; pState->papSurfaces[pState->cSurfaces++] = pSurface; } } pSurface = pState->papSurfaces[sid]; /* If one already exists with this id, then destroy it now. */ if (pSurface->id != SVGA3D_INVALID_ID) vmsvga3dSurfaceDestroy(pThis, sid); RT_ZERO(*pSurface); pSurface->id = sid; #ifdef VMSVGA3D_OPENGL pSurface->idWeakContextAssociation = SVGA3D_INVALID_ID; #else pSurface->idAssociatedContext = SVGA3D_INVALID_ID; #endif #ifdef VMSVGA3D_DIRECT3D pSurface->hSharedObject = NULL; pSurface->pSharedObjectTree = NULL; #else pSurface->oglId.buffer = OPENGL_INVALID_ID; #endif /* The surface type is sort of undefined now, even though the hints and format can help to clear that up. * In some case we'll have to wait until the surface is used to create the D3D object. */ switch (format) { case SVGA3D_Z_D32: case SVGA3D_Z_D16: case SVGA3D_Z_D24S8: case SVGA3D_Z_D15S1: case SVGA3D_Z_D24X8: case SVGA3D_Z_DF16: case SVGA3D_Z_DF24: case SVGA3D_Z_D24S8_INT: surfaceFlags |= SVGA3D_SURFACE_HINT_DEPTHSTENCIL; break; /* Texture compression formats */ case SVGA3D_DXT1: case SVGA3D_DXT2: case SVGA3D_DXT3: case SVGA3D_DXT4: case SVGA3D_DXT5: /* Bump-map formats */ case SVGA3D_BUMPU8V8: case SVGA3D_BUMPL6V5U5: case SVGA3D_BUMPX8L8V8U8: case SVGA3D_BUMPL8V8U8: case SVGA3D_V8U8: case SVGA3D_Q8W8V8U8: case SVGA3D_CxV8U8: case SVGA3D_X8L8V8U8: case SVGA3D_A2W10V10U10: case SVGA3D_V16U16: /* Typical render target formats; we should allow render target buffers to be used as textures. */ case SVGA3D_X8R8G8B8: case SVGA3D_A8R8G8B8: case SVGA3D_R5G6B5: case SVGA3D_X1R5G5B5: case SVGA3D_A1R5G5B5: case SVGA3D_A4R4G4B4: surfaceFlags |= SVGA3D_SURFACE_HINT_TEXTURE; break; case SVGA3D_LUMINANCE8: case SVGA3D_LUMINANCE4_ALPHA4: case SVGA3D_LUMINANCE16: case SVGA3D_LUMINANCE8_ALPHA8: case SVGA3D_ARGB_S10E5: /* 16-bit floating-point ARGB */ case SVGA3D_ARGB_S23E8: /* 32-bit floating-point ARGB */ case SVGA3D_A2R10G10B10: case SVGA3D_ALPHA8: case SVGA3D_R_S10E5: case SVGA3D_R_S23E8: case SVGA3D_RG_S10E5: case SVGA3D_RG_S23E8: case SVGA3D_G16R16: case SVGA3D_A16B16G16R16: case SVGA3D_UYVY: case SVGA3D_YUY2: case SVGA3D_NV12: case SVGA3D_AYUV: case SVGA3D_BC4_UNORM: case SVGA3D_BC5_UNORM: break; /* * Any surface can be used as a buffer object, but SVGA3D_BUFFER is * the most efficient format to use when creating new surfaces * expressly for index or vertex data. */ case SVGA3D_BUFFER: break; default: break; } pSurface->flags = surfaceFlags; pSurface->format = format; memcpy(pSurface->faces, face, sizeof(pSurface->faces)); pSurface->cFaces = 1; /* check for cube maps later */ pSurface->multiSampleCount = multisampleCount; pSurface->autogenFilter = autogenFilter; Assert(autogenFilter != SVGA3D_TEX_FILTER_FLATCUBIC); Assert(autogenFilter != SVGA3D_TEX_FILTER_GAUSSIANCUBIC); pSurface->pMipmapLevels = (PVMSVGA3DMIPMAPLEVEL)RTMemAllocZ(cMipLevels * sizeof(VMSVGA3DMIPMAPLEVEL)); AssertReturn(pSurface->pMipmapLevels, VERR_NO_MEMORY); for (uint32_t i=0; i < cMipLevels; i++) pSurface->pMipmapLevels[i].size = paMipLevelSizes[i]; pSurface->cbBlock = vmsvga3dSurfaceFormatSize(format); #ifdef VMSVGA3D_DIRECT3D /* Translate the format and usage flags to D3D. */ pSurface->formatD3D = vmsvga3dSurfaceFormat2D3D(format); pSurface->multiSampleTypeD3D= vmsvga3dMultipeSampleCount2D3D(multisampleCount); pSurface->fUsageD3D = 0; if (surfaceFlags & SVGA3D_SURFACE_HINT_DYNAMIC) pSurface->fUsageD3D |= D3DUSAGE_DYNAMIC; if (surfaceFlags & SVGA3D_SURFACE_HINT_RENDERTARGET) pSurface->fUsageD3D |= D3DUSAGE_RENDERTARGET; if (surfaceFlags & SVGA3D_SURFACE_HINT_DEPTHSTENCIL) pSurface->fUsageD3D |= D3DUSAGE_DEPTHSTENCIL; if (surfaceFlags & SVGA3D_SURFACE_HINT_WRITEONLY) pSurface->fUsageD3D |= D3DUSAGE_WRITEONLY; if (surfaceFlags & SVGA3D_SURFACE_AUTOGENMIPMAPS) pSurface->fUsageD3D |= D3DUSAGE_AUTOGENMIPMAP; pSurface->fu32ActualUsageFlags = 0; #else vmsvga3dSurfaceFormat2OGL(pSurface, format); #endif switch (surfaceFlags & (SVGA3D_SURFACE_HINT_INDEXBUFFER | SVGA3D_SURFACE_HINT_VERTEXBUFFER | SVGA3D_SURFACE_HINT_TEXTURE | SVGA3D_SURFACE_HINT_RENDERTARGET | SVGA3D_SURFACE_HINT_DEPTHSTENCIL | SVGA3D_SURFACE_CUBEMAP)) { case SVGA3D_SURFACE_CUBEMAP: Log(("SVGA3D_SURFACE_CUBEMAP\n")); pSurface->cFaces = 6; break; case SVGA3D_SURFACE_HINT_INDEXBUFFER: Log(("SVGA3D_SURFACE_HINT_INDEXBUFFER\n")); /* else type unknown at this time; postpone buffer creation */ break; case SVGA3D_SURFACE_HINT_VERTEXBUFFER: Log(("SVGA3D_SURFACE_HINT_VERTEXBUFFER\n")); /* Type unknown at this time; postpone buffer creation */ break; case SVGA3D_SURFACE_HINT_TEXTURE: Log(("SVGA3D_SURFACE_HINT_TEXTURE\n")); break; case SVGA3D_SURFACE_HINT_RENDERTARGET: Log(("SVGA3D_SURFACE_HINT_RENDERTARGET\n")); break; case SVGA3D_SURFACE_HINT_DEPTHSTENCIL: Log(("SVGA3D_SURFACE_HINT_DEPTHSTENCIL\n")); break; default: /* Unknown; decide later. */ break; } Assert(!VMSVGA3DSURFACE_HAS_HW_SURFACE(pSurface)); /* Allocate buffer to hold the surface data until we can move it into a D3D object */ for (uint32_t iFace=0; iFace < pSurface->cFaces; iFace++) { for (uint32_t i=0; i < pSurface->faces[iFace].numMipLevels; i++) { uint32_t idx = i + iFace * pSurface->faces[0].numMipLevels; Log(("vmsvga3dSurfaceDefine: face %d mip level %d (%d,%d,%d)\n", iFace, i, pSurface->pMipmapLevels[idx].size.width, pSurface->pMipmapLevels[idx].size.height, pSurface->pMipmapLevels[idx].size.depth)); Log(("vmsvga3dSurfaceDefine: cbPitch=%x cbBlock=%x \n", pSurface->cbBlock * pSurface->pMipmapLevels[idx].size.width, pSurface->cbBlock)); pSurface->pMipmapLevels[idx].cbSurfacePitch = pSurface->cbBlock * pSurface->pMipmapLevels[idx].size.width; pSurface->pMipmapLevels[idx].cbSurface = pSurface->pMipmapLevels[idx].cbSurfacePitch * pSurface->pMipmapLevels[idx].size.height * pSurface->pMipmapLevels[idx].size.depth; pSurface->pMipmapLevels[idx].pSurfaceData = RTMemAllocZ(pSurface->pMipmapLevels[idx].cbSurface); AssertReturn(pSurface->pMipmapLevels[idx].pSurfaceData, VERR_NO_MEMORY); } } return VINF_SUCCESS; }
RTDECL(int) RTHandleTableCreateEx(PRTHANDLETABLE phHandleTable, uint32_t fFlags, uint32_t uBase, uint32_t cMax, PFNRTHANDLETABLERETAIN pfnRetain, void *pvUser) { PRTHANDLETABLEINT pThis; uint32_t cLevel1; size_t cb; /* * Validate input. */ AssertPtrReturn(phHandleTable, VERR_INVALID_POINTER); *phHandleTable = NIL_RTHANDLETABLE; AssertPtrNullReturn(pfnRetain, VERR_INVALID_POINTER); AssertReturn(!(fFlags & ~RTHANDLETABLE_FLAGS_MASK), VERR_INVALID_PARAMETER); AssertReturn(cMax > 0, VERR_INVALID_PARAMETER); AssertReturn(UINT32_MAX - cMax >= uBase, VERR_INVALID_PARAMETER); /* * Adjust the cMax value so it is a multiple of the 2nd level tables. */ if (cMax >= UINT32_MAX - RTHT_LEVEL2_ENTRIES) cMax = UINT32_MAX - RTHT_LEVEL2_ENTRIES + 1; cMax = ((cMax + RTHT_LEVEL2_ENTRIES - 1) / RTHT_LEVEL2_ENTRIES) * RTHT_LEVEL2_ENTRIES; cLevel1 = cMax / RTHT_LEVEL2_ENTRIES; Assert(cLevel1 * RTHT_LEVEL2_ENTRIES == cMax); /* * Allocate the structure, include the 1st level lookup table * if it's below the threshold size. */ cb = sizeof(RTHANDLETABLEINT); if (cLevel1 < RTHT_LEVEL1_DYN_ALLOC_THRESHOLD) cb = RT_ALIGN(cb, sizeof(void *)) + cLevel1 * sizeof(void *); pThis = (PRTHANDLETABLEINT)RTMemAllocZ(cb); if (!pThis) return VERR_NO_MEMORY; /* * Initialize it. */ pThis->u32Magic = RTHANDLETABLE_MAGIC; pThis->fFlags = fFlags; pThis->uBase = uBase; pThis->cCur = 0; pThis->hSpinlock = NIL_RTSPINLOCK; if (cLevel1 < RTHT_LEVEL1_DYN_ALLOC_THRESHOLD) pThis->papvLevel1 = (void **)((uint8_t *)pThis + RT_ALIGN(sizeof(*pThis), sizeof(void *))); else pThis->papvLevel1 = NULL; pThis->pfnRetain = pfnRetain; pThis->pvRetainUser = pvUser; pThis->cMax = cMax; pThis->cCurAllocated = 0; pThis->cLevel1 = cLevel1 < RTHT_LEVEL1_DYN_ALLOC_THRESHOLD ? cLevel1 : 0; pThis->iFreeHead = NIL_RTHT_INDEX; pThis->iFreeTail = NIL_RTHT_INDEX; if (fFlags & RTHANDLETABLE_FLAGS_LOCKED) { int rc = RTSpinlockCreate(&pThis->hSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_UNSAFE, "RTHandleTableCreateEx"); if (RT_FAILURE(rc)) { RTMemFree(pThis); return rc; } } *phHandleTable = pThis; return VINF_SUCCESS; }
RTDECL(int) RTMemCacheCreate(PRTMEMCACHE phMemCache, size_t cbObject, size_t cbAlignment, uint32_t cMaxObjects, PFNMEMCACHECTOR pfnCtor, PFNMEMCACHEDTOR pfnDtor, void *pvUser, uint32_t fFlags) { AssertPtr(phMemCache); AssertPtrNull(pfnCtor); AssertPtrNull(pfnDtor); AssertReturn(!pfnDtor || pfnCtor, VERR_INVALID_PARAMETER); AssertReturn(cbObject > 0, VERR_INVALID_PARAMETER); AssertReturn(cbObject <= PAGE_SIZE / 8, VERR_INVALID_PARAMETER); AssertReturn(!fFlags, VERR_INVALID_PARAMETER); if (cbAlignment == 0) { if (cbObject <= 2) cbAlignment = cbObject; else if (cbObject <= 4) cbAlignment = 4; else if (cbObject <= 8) cbAlignment = 8; else if (cbObject <= 16) cbAlignment = 16; else if (cbObject <= 32) cbAlignment = 32; else cbAlignment = 64; } else { AssertReturn(!((cbAlignment - 1) & cbAlignment), VERR_NOT_POWER_OF_TWO); AssertReturn(cbAlignment <= 64, VERR_OUT_OF_RANGE); } /* * Allocate and initialize the instance memory. */ RTMEMCACHEINT *pThis = (RTMEMCACHEINT *)RTMemAlloc(sizeof(*pThis)); if (!pThis) return VERR_NO_MEMORY; int rc = RTCritSectInit(&pThis->CritSect); if (RT_FAILURE(rc)) { RTMemFree(pThis); return rc; } pThis->u32Magic = RTMEMCACHE_MAGIC; pThis->cbObject = (uint32_t)RT_ALIGN_Z(cbObject, cbAlignment); pThis->cbAlignment = (uint32_t)cbAlignment; pThis->cPerPage = (uint32_t)((PAGE_SIZE - RT_ALIGN_Z(sizeof(RTMEMCACHEPAGE), cbAlignment)) / pThis->cbObject); while ( RT_ALIGN_Z(sizeof(RTMEMCACHEPAGE), 8) + pThis->cPerPage * pThis->cbObject + RT_ALIGN(pThis->cPerPage, 64) / 8 * 2 > PAGE_SIZE) pThis->cPerPage--; pThis->cBits = RT_ALIGN(pThis->cPerPage, 64); pThis->cMax = cMaxObjects; pThis->fUseFreeList = cbObject >= sizeof(RTMEMCACHEFREEOBJ) && !pfnCtor && !pfnDtor; pThis->pPageHead = NULL; pThis->ppPageNext = &pThis->pPageHead; pThis->pfnCtor = pfnCtor; pThis->pfnDtor = pfnDtor; pThis->pvUser = pvUser; pThis->cTotal = 0; pThis->cFree = 0; pThis->pPageHint = NULL; pThis->pFreeTop = NULL; *phMemCache = pThis; return VINF_SUCCESS; }
void *rt_memheap_realloc(struct rt_memheap *heap, void *ptr, rt_size_t newsize) { rt_err_t result; rt_size_t oldsize; struct rt_memheap_item *header_ptr; struct rt_memheap_item *new_ptr; if (newsize == 0) { rt_memheap_free(ptr); return RT_NULL; } /* align allocated size */ newsize = RT_ALIGN(newsize, RT_ALIGN_SIZE); if (newsize < RT_MEMHEAP_MINIALLOC) newsize = RT_MEMHEAP_MINIALLOC; if (ptr == RT_NULL) { return rt_memheap_alloc(heap, newsize); } /* get memory block header and get the size of memory block */ header_ptr = (struct rt_memheap_item *) ((rt_uint8_t *)ptr - RT_MEMHEAP_SIZE); oldsize = MEMITEM_SIZE(header_ptr); /* re-allocate memory */ if (newsize > oldsize) { void* new_ptr; struct rt_memheap_item *next_ptr; /* lock memheap */ result = rt_sem_take(&(heap->lock), RT_WAITING_FOREVER); if (result != RT_EOK) { rt_set_errno(result); return RT_NULL; } next_ptr = header_ptr->next; /* header_ptr should not be the tail */ RT_ASSERT(next_ptr > header_ptr); /* check whether the following free space is enough to expand */ if (!RT_MEMHEAP_IS_USED(next_ptr)) { rt_int32_t nextsize; nextsize = MEMITEM_SIZE(next_ptr); RT_ASSERT(next_ptr > 0); /* Here is the ASCII art of the situation that we can make use of * the next free node without alloc/memcpy, |*| is the control * block: * * oldsize free node * |*|-----------|*|----------------------|*| * newsize >= minialloc * |*|----------------|*|-----------------|*| */ if (nextsize + oldsize > newsize + RT_MEMHEAP_MINIALLOC) { /* decrement the entire free size from the available bytes count. */ heap->available_size = heap->available_size - (newsize - oldsize); if (heap->pool_size - heap->available_size > heap->max_used_size) heap->max_used_size = heap->pool_size - heap->available_size; /* remove next_ptr from free list */ RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("remove block: block[0x%08x], next_free 0x%08x, prev_free 0x%08x", next_ptr, next_ptr->next_free, next_ptr->prev_free)); next_ptr->next_free->prev_free = next_ptr->prev_free; next_ptr->prev_free->next_free = next_ptr->next_free; next_ptr->next->prev = next_ptr->prev; next_ptr->prev->next = next_ptr->next; /* build a new one on the right place */ next_ptr = (struct rt_memheap_item*)((char*)ptr + newsize); RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("new free block: block[0x%08x] nextm[0x%08x] prevm[0x%08x]", next_ptr, next_ptr->next, next_ptr->prev)); /* mark the new block as a memory block and freed. */ next_ptr->magic = RT_MEMHEAP_MAGIC; /* put the pool pointer into the new block. */ next_ptr->pool_ptr = heap; next_ptr->prev = header_ptr; next_ptr->next = header_ptr->next; header_ptr->next->prev = next_ptr; header_ptr->next = next_ptr; /* insert next_ptr to free list */ next_ptr->next_free = heap->free_list->next_free; next_ptr->prev_free = heap->free_list; heap->free_list->next_free->prev_free = next_ptr; heap->free_list->next_free = next_ptr; RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("new ptr: next_free 0x%08x, prev_free 0x%08x", next_ptr->next_free, next_ptr->prev_free)); /* release lock */ rt_sem_release(&(heap->lock)); return ptr; } } /* release lock */ rt_sem_release(&(heap->lock)); /* re-allocate a memory block */ new_ptr = (void*)rt_memheap_alloc(heap, newsize); if (new_ptr != RT_NULL) { rt_memcpy(new_ptr, ptr, oldsize < newsize ? oldsize : newsize); rt_memheap_free(ptr); } return new_ptr; } /* don't split when there is less than one node space left */ if (newsize + RT_MEMHEAP_SIZE + RT_MEMHEAP_MINIALLOC >= oldsize) return ptr; /* lock memheap */ result = rt_sem_take(&(heap->lock), RT_WAITING_FOREVER); if (result != RT_EOK) { rt_set_errno(result); return RT_NULL; } /* split the block. */ new_ptr = (struct rt_memheap_item *) (((rt_uint8_t *)header_ptr) + newsize + RT_MEMHEAP_SIZE); RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("split: block[0x%08x] nextm[0x%08x] prevm[0x%08x] to new[0x%08x]\n", header_ptr, header_ptr->next, header_ptr->prev, new_ptr)); /* mark the new block as a memory block and freed. */ new_ptr->magic = RT_MEMHEAP_MAGIC; /* put the pool pointer into the new block. */ new_ptr->pool_ptr = heap; /* break down the block list */ new_ptr->prev = header_ptr; new_ptr->next = header_ptr->next; header_ptr->next->prev = new_ptr; header_ptr->next = new_ptr; /* determine if the block can be merged with the next neighbor. */ if (!RT_MEMHEAP_IS_USED(new_ptr->next)) { struct rt_memheap_item *free_ptr; /* merge block with next neighbor. */ free_ptr = new_ptr->next; heap->available_size = heap->available_size - MEMITEM_SIZE(free_ptr); RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("merge: right node 0x%08x, next_free 0x%08x, prev_free 0x%08x\n", header_ptr, header_ptr->next_free, header_ptr->prev_free)); free_ptr->next->prev = new_ptr; new_ptr->next = free_ptr->next; /* remove free ptr from free list */ free_ptr->next_free->prev_free = free_ptr->prev_free; free_ptr->prev_free->next_free = free_ptr->next_free; } /* insert the split block to free list */ new_ptr->next_free = heap->free_list->next_free; new_ptr->prev_free = heap->free_list; heap->free_list->next_free->prev_free = new_ptr; heap->free_list->next_free = new_ptr; RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("new free ptr: next_free 0x%08x, prev_free 0x%08x\n", new_ptr->next_free, new_ptr->prev_free)); /* increment the available byte count. */ heap->available_size = heap->available_size + MEMITEM_SIZE(new_ptr); /* release lock */ rt_sem_release(&(heap->lock)); /* return the old memory block */ return ptr; }
void *rt_memheap_alloc(struct rt_memheap *heap, rt_uint32_t size) { rt_err_t result; rt_uint32_t free_size; struct rt_memheap_item *header_ptr; RT_ASSERT(heap != RT_NULL); /* align allocated size */ size = RT_ALIGN(size, RT_ALIGN_SIZE); if (size < RT_MEMHEAP_MINIALLOC) size = RT_MEMHEAP_MINIALLOC; RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("allocate %d on heap:%8.*s", size, RT_NAME_MAX, heap->parent.name)); if (size < heap->available_size) { /* search on free list */ free_size = 0; /* lock memheap */ result = rt_sem_take(&(heap->lock), RT_WAITING_FOREVER); if (result != RT_EOK) { rt_set_errno(result); return RT_NULL; } //if(memcmp(heap->lock.parent.parent.name ,"ohea",4)==0){ //RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("get sem %s\n",heap->lock.parent.parent.name)); //} /* get the first free memory block */ header_ptr = heap->free_list->next_free; while (header_ptr != heap->free_list && free_size < size) { /* get current freed memory block size */ free_size = MEMITEM_SIZE(header_ptr); if (free_size < size) { /* move to next free memory block */ header_ptr = header_ptr->next_free; } } /* determine if the memory is available. */ if (free_size >= size) { /* a block that satisfies the request has been found. */ /* determine if the block needs to be split. */ if (free_size >= (size + RT_MEMHEAP_SIZE + RT_MEMHEAP_MINIALLOC)) { struct rt_memheap_item *new_ptr; /* split the block. */ new_ptr = (struct rt_memheap_item *) (((rt_uint8_t *)header_ptr) + size + RT_MEMHEAP_SIZE); RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("split: block[0x%08x] nextm[0x%08x] prevm[0x%08x] to new[0x%08x]\n", header_ptr, header_ptr->next, header_ptr->prev, new_ptr)); /* mark the new block as a memory block and freed. */ new_ptr->magic = RT_MEMHEAP_MAGIC; /* put the pool pointer into the new block. */ new_ptr->pool_ptr = heap; /* break down the block list */ new_ptr->prev = header_ptr; new_ptr->next = header_ptr->next; header_ptr->next->prev = new_ptr; header_ptr->next = new_ptr; /* remove header ptr from free list */ header_ptr->next_free->prev_free = header_ptr->prev_free; header_ptr->prev_free->next_free = header_ptr->next_free; header_ptr->next_free = RT_NULL; header_ptr->prev_free = RT_NULL; /* insert new_ptr to free list */ new_ptr->next_free = heap->free_list->next_free; new_ptr->prev_free = heap->free_list; heap->free_list->next_free->prev_free = new_ptr; heap->free_list->next_free = new_ptr; RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("new ptr: next_free 0x%08x, prev_free 0x%08x\n", new_ptr->next_free, new_ptr->prev_free)); /* decrement the available byte count. */ heap->available_size = heap->available_size - size - RT_MEMHEAP_SIZE; if (heap->pool_size - heap->available_size > heap->max_used_size) heap->max_used_size = heap->pool_size - heap->available_size; } else { /* decrement the entire free size from the available bytes count. */ heap->available_size = heap->available_size - free_size; if (heap->pool_size - heap->available_size > heap->max_used_size) heap->max_used_size = heap->pool_size - heap->available_size; /* remove header_ptr from free list */ RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("one block: block[0x%08x], next_free 0x%08x, prev_free 0x%08x\n", header_ptr, header_ptr->next_free, header_ptr->prev_free)); header_ptr->next_free->prev_free = header_ptr->prev_free; header_ptr->prev_free->next_free = header_ptr->next_free; header_ptr->next_free = RT_NULL; header_ptr->prev_free = RT_NULL; } /* Mark the allocated block as not available. */ header_ptr->magic |= RT_MEMHEAP_USED; /* release lock */ rt_sem_release(&(heap->lock)); /* Return a memory address to the caller. */ RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("alloc mem: memory[0x%08x], heap[0x%08x], size: %d\n", (void *)((rt_uint8_t *)header_ptr + RT_MEMHEAP_SIZE), header_ptr, size)); return (void *)((rt_uint8_t *)header_ptr + RT_MEMHEAP_SIZE); } /* release lock */ rt_sem_release(&(heap->lock)); } RT_DEBUG_LOG(RT_DEBUG_MEMHEAP, ("allocate memory: failed\n")); /* Return the completion status. */ return RT_NULL; }
RTDECL(int) RTMemCacheCreate(PRTMEMCACHE phMemCache, size_t cbObject, size_t cbAlignment, uint32_t cMaxObjects, PFNMEMCACHECTOR pfnCtor, PFNMEMCACHEDTOR pfnDtor, void *pvUser, uint32_t fFlags) { AssertPtr(phMemCache); AssertPtrNull(pfnCtor); AssertPtrNull(pfnDtor); AssertReturn(!pfnDtor || pfnCtor, VERR_INVALID_PARAMETER); AssertReturn(cbObject > 0, VERR_INVALID_PARAMETER); AssertReturn(cbObject <= PAGE_SIZE / 8, VERR_INVALID_PARAMETER); AssertReturn(!fFlags, VERR_INVALID_PARAMETER); if (cbAlignment == 0) { if (cbObject <= 2) cbAlignment = cbObject; else if (cbObject <= 4) cbAlignment = 4; else if (cbObject <= 8) cbAlignment = 8; else if (cbObject <= 16) cbAlignment = 16; else if (cbObject <= 32) cbAlignment = 32; else cbAlignment = 64; } else { AssertReturn(!((cbAlignment - 1) & cbAlignment), VERR_NOT_POWER_OF_TWO); AssertReturn(cbAlignment <= 64, VERR_OUT_OF_RANGE); } /* * Allocate and initialize the instance memory. */ RTMEMCACHEINT *pThis = (RTMEMCACHEINT *)RTMemAlloc(sizeof(*pThis)); if (!pThis) return VERR_NO_MEMORY; int rc = RTCritSectInit(&pThis->CritSect); if (RT_FAILURE(rc)) { RTMemFree(pThis); return rc; } pThis->u32Magic = RTMEMCACHE_MAGIC; pThis->cbObject = (uint32_t)RT_ALIGN_Z(cbObject, cbAlignment); pThis->cbAlignment = (uint32_t)cbAlignment; pThis->cPerPage = (uint32_t)((PAGE_SIZE - RT_ALIGN_Z(sizeof(RTMEMCACHEPAGE), cbAlignment)) / pThis->cbObject); while ( RT_ALIGN_Z(sizeof(RTMEMCACHEPAGE), 8) + pThis->cPerPage * pThis->cbObject + RT_ALIGN(pThis->cPerPage, 64) / 8 * 2 > PAGE_SIZE) pThis->cPerPage--; pThis->cBits = RT_ALIGN(pThis->cPerPage, 64); pThis->cMax = cMaxObjects; pThis->fUseFreeList = cbObject >= sizeof(RTMEMCACHEFREEOBJ) && !pfnCtor && !pfnDtor; pThis->pPageHead = NULL; pThis->pfnCtor = pfnCtor; pThis->pfnDtor = pfnDtor; pThis->pvUser = pvUser; pThis->cTotal = 0; pThis->cFree = 0; pThis->pPageHint = NULL; pThis->pFreeTop = NULL; /** @todo * Here is a puzzler (or maybe I'm just blind), the free list code breaks * badly on my macbook pro (i7) (32-bit). * * I tried changing the reads from unordered to ordered to no avail. Then I * tried optimizing the code with the ASMAtomicCmpXchgExPtr function to * avoid some reads - no change. Inserting pause instructions did nothing * (as expected). The only thing which seems to make a difference is * reading the pFreeTop pointer twice in the free code... This is weird or I'm * overlooking something.. * * No time to figure it out, so I'm disabling the broken code paths for * now. */ pThis->fUseFreeList = false; *phMemCache = pThis; return VINF_SUCCESS; }
RTDECL(int) RTAsn1BitString_DecodeAsn1Ex(PRTASN1CURSOR pCursor, uint32_t fFlags, uint32_t cMaxBits, PRTASN1BITSTRING pThis, const char *pszErrorTag) { pThis->cBits = 0; pThis->cMaxBits = cMaxBits; pThis->uBits.pv = NULL; pThis->pEncapsulated = NULL; RTAsn1CursorInitAllocation(pCursor, &pThis->EncapsulatedAllocation); int rc = RTAsn1CursorReadHdr(pCursor, &pThis->Asn1Core, pszErrorTag); if (RT_SUCCESS(rc)) { rc = RTAsn1CursorMatchTagClassFlagsString(pCursor, &pThis->Asn1Core, ASN1_TAG_BIT_STRING, ASN1_TAGCLASS_UNIVERSAL | ASN1_TAGFLAG_PRIMITIVE, fFlags, pszErrorTag, "BIT STRING"); if (RT_SUCCESS(rc)) { if (!(pThis->Asn1Core.fClass & ASN1_TAGFLAG_CONSTRUCTED)) { if ( ( cMaxBits == UINT32_MAX || RT_ALIGN(cMaxBits, 8) / 8 + 1 >= pThis->Asn1Core.cb) && pThis->Asn1Core.cb > 0) { uint8_t cUnusedBits = pThis->Asn1Core.cb > 0 ? *pThis->Asn1Core.uData.pu8 : 0; if (pThis->Asn1Core.cb < 2) { /* Not bits present. */ if (cUnusedBits == 0) { pThis->cBits = 0; pThis->uBits.pv = NULL; RTAsn1CursorSkip(pCursor, pThis->Asn1Core.cb); pThis->Asn1Core.pOps = &g_RTAsn1BitString_Vtable; pThis->Asn1Core.fFlags |= RTASN1CORE_F_PRIMITE_TAG_STRUCT; return VINF_SUCCESS; } rc = RTAsn1CursorSetInfo(pCursor, VERR_ASN1_INVALID_BITSTRING_ENCODING, "%s: Bad unused bit count: %#x (cb=%#x)", pszErrorTag, cUnusedBits, pThis->Asn1Core.cb); } else if (cUnusedBits < 8) { pThis->cBits = (pThis->Asn1Core.cb - 1) * 8; pThis->cBits -= cUnusedBits; pThis->uBits.pu8 = pThis->Asn1Core.uData.pu8 + 1; if ( !(pCursor->fFlags & (RTASN1CURSOR_FLAGS_DER | RTASN1CURSOR_FLAGS_CER)) || cUnusedBits == 0 || !( pThis->uBits.pu8[pThis->Asn1Core.cb - 2] & (((uint8_t)1 << cUnusedBits) - (uint8_t)1) ) ) { RTAsn1CursorSkip(pCursor, pThis->Asn1Core.cb); pThis->Asn1Core.pOps = &g_RTAsn1BitString_Vtable; pThis->Asn1Core.fFlags |= RTASN1CORE_F_PRIMITE_TAG_STRUCT; return VINF_SUCCESS; } rc = RTAsn1CursorSetInfo(pCursor, VERR_ASN1_INVALID_BITSTRING_ENCODING, "%s: Unused bits shall be zero in DER/CER mode: last byte=%#x cUnused=%#x", pszErrorTag, pThis->uBits.pu8[pThis->cBits / 8], cUnusedBits); } else rc = RTAsn1CursorSetInfo(pCursor, VERR_ASN1_INVALID_BITSTRING_ENCODING, "%s: Bad unused bit count: %#x (cb=%#x)", pszErrorTag, cUnusedBits, pThis->Asn1Core.cb); } else rc = RTAsn1CursorSetInfo(pCursor, VERR_ASN1_INVALID_BITSTRING_ENCODING, "%s: Size mismatch: cb=%#x, expected %#x (cMaxBits=%#x)", pszErrorTag, pThis->Asn1Core.cb, RT_ALIGN(cMaxBits, 8) / 8 + 1, cMaxBits); } else rc = RTAsn1CursorSetInfo(pCursor, VERR_ASN1_CONSTRUCTED_STRING_NOT_IMPL, "%s: Constructed BIT STRING not implemented.", pszErrorTag); } } RT_ZERO(*pThis); return rc; }
void VBoxGuestRAMSlider::init() { ulong fullSize = vboxGlobal().host().GetMemorySize(); CSystemProperties sys = vboxGlobal().virtualBox().GetSystemProperties(); mMinRAM = sys.GetMinGuestRAM(); mMaxRAM = RT_MIN (RT_ALIGN (fullSize, _1G / _1M), sys.GetMaxGuestRAM()); /* Come up with some nice round percent boundaries relative to * the system memory. A max of 75% on a 256GB config is ridiculous, * even on an 8GB rig reserving 2GB for the OS is way to conservative. * The max numbers can be estimated using the following program: * * double calcMaxPct(uint64_t cbRam) * { * double cbRamOverhead = cbRam * 0.0390625; // 160 bytes per page. * double cbRamForTheOS = RT_MAX(RT_MIN(_512M, cbRam * 0.25), _64M); * double OSPct = (cbRamOverhead + cbRamForTheOS) * 100.0 / cbRam; * double MaxPct = 100 - OSPct; * return MaxPct; * } * * int main() * { * uint64_t cbRam = _1G; * for (; !(cbRam >> 33); cbRam += _1G) * printf("%8lluGB %.1f%% %8lluKB\n", cbRam >> 30, calcMaxPct(cbRam), * (uint64_t)(cbRam * calcMaxPct(cbRam) / 100.0) >> 20); * for (; !(cbRam >> 51); cbRam <<= 1) * printf("%8lluGB %.1f%% %8lluKB\n", cbRam >> 30, calcMaxPct(cbRam), * (uint64_t)(cbRam * calcMaxPct(cbRam) / 100.0) >> 20); * return 0; * } * * Note. We might wanna put these calculations somewhere global later. */ /* System RAM amount test */ mMaxRAMAlw = (uint)(0.75 * fullSize); mMaxRAMOpt = (uint)(0.50 * fullSize); if (fullSize < 3072) /* done */; else if (fullSize < 4096) /* 3GB */ mMaxRAMAlw = (uint)(0.80 * fullSize); else if (fullSize < 6144) /* 4-5GB */ { mMaxRAMAlw = (uint)(0.84 * fullSize); mMaxRAMOpt = (uint)(0.60 * fullSize); } else if (fullSize < 8192) /* 6-7GB */ { mMaxRAMAlw = (uint)(0.88 * fullSize); mMaxRAMOpt = (uint)(0.65 * fullSize); } else if (fullSize < 16384) /* 8-15GB */ { mMaxRAMAlw = (uint)(0.90 * fullSize); mMaxRAMOpt = (uint)(0.70 * fullSize); } else if (fullSize < 32768) /* 16-31GB */ { mMaxRAMAlw = (uint)(0.93 * fullSize); mMaxRAMOpt = (uint)(0.75 * fullSize); } else if (fullSize < 65536) /* 32-63GB */ { mMaxRAMAlw = (uint)(0.94 * fullSize); mMaxRAMOpt = (uint)(0.80 * fullSize); } else if (fullSize < 131072) /* 64-127GB */ { mMaxRAMAlw = (uint)(0.95 * fullSize); mMaxRAMOpt = (uint)(0.85 * fullSize); } else /* 128GB- */ { mMaxRAMAlw = (uint)(0.96 * fullSize); mMaxRAMOpt = (uint)(0.90 * fullSize); } /* Now check the calculated maximums are out of the range for the guest * RAM. If so change it accordingly. */ mMaxRAMAlw = RT_MIN (mMaxRAMAlw, mMaxRAM); mMaxRAMOpt = RT_MIN (mMaxRAMOpt, mMaxRAM); setPageStep (calcPageStep (mMaxRAM)); setSingleStep (pageStep() / 4); setTickInterval (pageStep()); /* Setup the scale so that ticks are at page step boundaries */ setMinimum ((mMinRAM / pageStep()) * pageStep()); setMaximum (mMaxRAM); setSnappingEnabled (true); setOptimalHint (mMinRAM, mMaxRAMOpt); setWarningHint (mMaxRAMOpt, mMaxRAMAlw); setErrorHint (mMaxRAMAlw, mMaxRAM); }