RTCString::printfOutputCallback(void *pvArg, const char *pachChars, size_t cbChars) { RTCString *pThis = (RTCString *)pvArg; if (cbChars) { size_t cchBoth = pThis->m_cch + cbChars; if (cchBoth >= pThis->m_cbAllocated) { /* Double the buffer size, if it's less that _4M. Align sizes like for append. */ size_t cbAlloc = RT_ALIGN_Z(pThis->m_cbAllocated, IPRT_MINISTRING_APPEND_ALIGNMENT); cbAlloc += RT_MIN(cbAlloc, _4M); if (cbAlloc <= cchBoth) cbAlloc = RT_ALIGN_Z(cchBoth + 1, IPRT_MINISTRING_APPEND_ALIGNMENT); pThis->reserve(cbAlloc); #ifndef RT_EXCEPTIONS_ENABLED AssertReleaseReturn(pThis->capacity() > cchBoth, 0); #endif } memcpy(&pThis->m_psz[pThis->m_cch], pachChars, cbChars); pThis->m_cch = cchBoth; pThis->m_psz[cchBoth] = '\0'; } return cbChars; }
RTDECL(void *) RTMemAllocZVarTag(size_t cbUnaligned, const char *pszTag) { size_t cbAligned; if (cbUnaligned >= 16) cbAligned = RT_ALIGN_Z(cbUnaligned, 16); else cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *)); return RTMemAllocZTag(cbAligned, pszTag); }
/** * Aligns allocation sizes a little. * * @returns Aligned size. * @param cb Requested size. */ static size_t rtAsn1DefaultAllocator_AlignSize(size_t cb) { if (cb >= 64) return RT_ALIGN_Z(cb, 64); if (cb >= 32) return RT_ALIGN_Z(cb, 32); if (cb >= 16) return RT_ALIGN_Z(cb, 16); return cb; }
RTDECL(int) RTUtf16ReallocTag(PRTUTF16 *ppwsz, size_t cbNew, const char *pszTag) { PRTUTF16 pwszOld = *ppwsz; cbNew = RT_ALIGN_Z(cbNew, sizeof(RTUTF16)); if (!cbNew) { RTMemFree(pwszOld); *ppwsz = NULL; } else if (pwszOld) { PRTUTF16 pwszNew = (PRTUTF16)RTMemReallocTag(pwszOld, cbNew, pszTag); if (!pwszNew) return VERR_NO_STR_MEMORY; pwszNew[cbNew / sizeof(RTUTF16) - 1] = '\0'; *ppwsz = pwszNew; } else { PRTUTF16 pwszNew = (PRTUTF16)RTMemAllocTag(cbNew, pszTag); if (!pwszNew) return VERR_NO_UTF16_MEMORY; pwszNew[0] = '\0'; pwszNew[cbNew / sizeof(RTUTF16) - 1] = '\0'; *ppwsz = pwszNew; } return VINF_SUCCESS; }
/** * Frees memory allocated using RTMemContAlloc(). * * @param pv Pointer to return from RTMemContAlloc(). * @param cb The cb parameter passed to RTMemContAlloc(). */ RTR0DECL(void) RTMemContFree(void *pv, size_t cb) { if (pv) { int cOrder; unsigned cPages; unsigned iPage; struct page *paPages; IPRT_LINUX_SAVE_EFL_AC(); /* validate */ AssertMsg(!((uintptr_t)pv & PAGE_OFFSET_MASK), ("pv=%p\n", pv)); Assert(cb > 0); /* calc order and get pages */ cb = RT_ALIGN_Z(cb, PAGE_SIZE); cPages = cb >> PAGE_SHIFT; cOrder = CalcPowerOf2Order(cPages); paPages = virt_to_page(pv); /* * Restore page attributes freeing the pages. */ for (iPage = 0; iPage < cPages; iPage++) { ClearPageReserved(&paPages[iPage]); #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 20) /** @todo find the exact kernel where change_page_attr was introduced. */ MY_SET_PAGES_NOEXEC(&paPages[iPage], 1); #endif } __free_pages(paPages, cOrder); IPRT_LINUX_RESTORE_EFL_AC(); } }
/** * OS specific allocation function. */ DECLHIDDEN(int) rtR0MemAllocEx(size_t cb, uint32_t fFlags, PRTMEMHDR *ppHdr) { size_t cbAllocated = cb; PRTMEMHDR pHdr; #ifdef RT_ARCH_AMD64 if (fFlags & RTMEMHDR_FLAG_EXEC) { AssertReturn(!(fFlags & RTMEMHDR_FLAG_ANY_CTX), NULL); cbAllocated = RT_ALIGN_Z(cb + sizeof(*pHdr), PAGE_SIZE) - sizeof(*pHdr); pHdr = (PRTMEMHDR)segkmem_alloc(heaptext_arena, cbAllocated + sizeof(*pHdr), KM_SLEEP); } else #endif { unsigned fKmFlags = fFlags & RTMEMHDR_FLAG_ANY_CTX_ALLOC ? KM_NOSLEEP : KM_SLEEP; if (fFlags & RTMEMHDR_FLAG_ZEROED) pHdr = (PRTMEMHDR)kmem_zalloc(cb + sizeof(*pHdr), fKmFlags); else pHdr = (PRTMEMHDR)kmem_alloc(cb + sizeof(*pHdr), fKmFlags); } if (RT_UNLIKELY(!pHdr)) { LogRel(("rtMemAllocEx(%u, %#x) failed\n", (unsigned)cb + sizeof(*pHdr), fFlags)); return VERR_NO_MEMORY; } pHdr->u32Magic = RTMEMHDR_MAGIC; pHdr->fFlags = fFlags; pHdr->cb = cbAllocated; pHdr->cbReq = cb; *ppHdr = pHdr; return VINF_SUCCESS; }
/** * @interface_method_impl{TXSTRANSPORT,pfnSendPkt} */ static DECLCALLBACK(int) txsTcpSendPkt(PCTXSPKTHDR pPktHdr) { Assert(pPktHdr->cb >= sizeof(TXSPKTHDR)); /* * Fail if no client connection. */ RTSOCKET hTcpClient = g_hTcpClient; if (hTcpClient == NIL_RTSOCKET) return VERR_NET_NOT_CONNECTED; /* * Write it. */ size_t cbToSend = RT_ALIGN_Z(pPktHdr->cb, TXSPKT_ALIGNMENT); int rc = RTTcpWrite(hTcpClient, pPktHdr, cbToSend); if ( RT_FAILURE(rc) && rc != VERR_INTERRUPTED) { /* assume fatal connection error. */ Log(("RTTcpWrite -> %Rrc -> txsTcpDisconnectClient(%RTsock)\n", rc, g_hTcpClient)); txsTcpDisconnectClient(); } return rc; }
static void *alloc_bounce_buffer(size_t *tmp_sizep, PRTCCPHYS physp, size_t xfer_size, const char *caller) { size_t tmp_size; void *tmp; /* try for big first. */ tmp_size = RT_ALIGN_Z(xfer_size, PAGE_SIZE); if (tmp_size > 16U*_1K) tmp_size = 16U*_1K; tmp = kmalloc(tmp_size, GFP_KERNEL); if (!tmp) { /* fall back on a page sized buffer. */ tmp = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!tmp) { LogRel(("%s: could not allocate bounce buffer for xfer_size=%zu %s\n", caller, xfer_size)); return NULL; } tmp_size = PAGE_SIZE; } *tmp_sizep = tmp_size; *physp = virt_to_phys(tmp); return tmp; }
RTDECL(void *) RTHeapOffsetAllocZ(RTHEAPOFFSET hHeap, size_t cb, size_t cbAlignment) { PRTHEAPOFFSETINTERNAL pHeapInt = hHeap; PRTHEAPOFFSETBLOCK pBlock; /* * Validate and adjust the input. */ AssertPtrReturn(pHeapInt, NULL); if (cb < RTHEAPOFFSET_MIN_BLOCK) cb = RTHEAPOFFSET_MIN_BLOCK; else cb = RT_ALIGN_Z(cb, RTHEAPOFFSET_ALIGNMENT); if (!cbAlignment) cbAlignment = RTHEAPOFFSET_ALIGNMENT; else { Assert(!(cbAlignment & (cbAlignment - 1))); Assert((cbAlignment & ~(cbAlignment - 1)) == cbAlignment); if (cbAlignment < RTHEAPOFFSET_ALIGNMENT) cbAlignment = RTHEAPOFFSET_ALIGNMENT; } /* * Do the allocation. */ pBlock = rtHeapOffsetAllocBlock(pHeapInt, cb, cbAlignment); if (RT_LIKELY(pBlock)) { void *pv = pBlock + 1; memset(pv, 0, cb); return pv; } return NULL; }
/** * Expands the page list so we can index pages directly. * * @param paPages The page list array to fix. * @param cPages The number of pages that's supposed to go into the list. * @param cPagesRet The actual number of pages in the list. */ static void rtR0MemObjFixPageList(KernPageList_t *paPages, ULONG cPages, ULONG cPagesRet) { Assert(cPages >= cPagesRet); if (cPages != cPagesRet) { ULONG iIn = cPagesRet; ULONG iOut = cPages; do { iIn--; iOut--; Assert(iIn <= iOut); KernPageList_t Page = paPages[iIn]; Assert(!(Page.Addr & PAGE_OFFSET_MASK)); Assert(Page.Size == RT_ALIGN_Z(Page.Size, PAGE_SIZE)); if (Page.Size > PAGE_SIZE) { do { Page.Size -= PAGE_SIZE; paPages[iOut].Addr = Page.Addr + Page.Size; paPages[iOut].Size = PAGE_SIZE; iOut--; } while (Page.Size > PAGE_SIZE); } paPages[iOut].Addr = Page.Addr; paPages[iOut].Size = PAGE_SIZE; } while ( iIn != iOut && iIn > 0); } }
/** * Allocates memory from the specified heap. * * @returns Address of the allocated memory. * @param cb The number of bytes to allocate. * @param pszTag The tag. * @param fZero Whether to zero the memory or not. * @param fProtExec PROT_EXEC or 0. */ static void *rtMemPagePosixAlloc(size_t cb, const char *pszTag, bool fZero, int fProtExec) { /* * Validate & adjust the input. */ Assert(cb > 0); NOREF(pszTag); cb = RT_ALIGN_Z(cb, PAGE_SIZE); /* * Do the allocation. */ void *pv = mmap(NULL, cb, PROT_READ | PROT_WRITE | fProtExec, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (pv != MAP_FAILED) { AssertPtr(pv); if (fZero) RT_BZERO(pv, cb); } else pv = NULL; return pv; }
RTCString &RTCString::appendCodePoint(RTUNICP uc) { /* * Single byte encoding. */ if (uc < 0x80) return RTCString::append((char)uc); /* * Multibyte encoding. * Assume max encoding length when resizing the string, that's simpler. */ AssertReturn(uc <= UINT32_C(0x7fffffff), *this); if (m_cch + 6 >= m_cbAllocated) { reserve(RT_ALIGN_Z(m_cch + 6 + 1, IPRT_MINISTRING_APPEND_ALIGNMENT)); // calls realloc(cbBoth) and sets m_cbAllocated; may throw bad_alloc. #ifndef RT_EXCEPTIONS_ENABLED AssertRelease(capacity() > m_cch + 6); #endif } char *pszNext = RTStrPutCp(&m_psz[m_cch], uc); m_cch = pszNext - m_psz; *pszNext = '\0'; return *this; }
/** * Internal worker that creates an environment handle with a specified capacity. * * @returns IPRT status code. * @param ppIntEnv Where to store the result. * @param cAllocated The initial array size. * @param fCaseSensitive Whether the environment block is case sensitive or * not. * @param fPutEnvBlock Indicates whether this is a special environment * block that will be used to record change another * block. We will keep unsets in putenv format, i.e. * just the variable name without any equal sign. */ static int rtEnvCreate(PRTENVINTERNAL *ppIntEnv, size_t cAllocated, bool fCaseSensitive, bool fPutEnvBlock) { /* * Allocate environment handle. */ PRTENVINTERNAL pIntEnv = (PRTENVINTERNAL)RTMemAlloc(sizeof(*pIntEnv)); if (pIntEnv) { /* * Pre-allocate the variable array. */ pIntEnv->u32Magic = RTENV_MAGIC; pIntEnv->fPutEnvBlock = fPutEnvBlock; pIntEnv->pfnCompare = fCaseSensitive ? RTStrNCmp : RTStrNICmp; pIntEnv->papszEnvOtherCP = NULL; pIntEnv->cVars = 0; pIntEnv->cAllocated = RT_ALIGN_Z(RT_MAX(cAllocated, RTENV_GROW_SIZE), RTENV_GROW_SIZE); pIntEnv->papszEnv = (char **)RTMemAllocZ(sizeof(pIntEnv->papszEnv[0]) * pIntEnv->cAllocated); if (pIntEnv->papszEnv) { *ppIntEnv = pIntEnv; return VINF_SUCCESS; } RTMemFree(pIntEnv); } return VERR_NO_MEMORY; }
DECLHIDDEN(int) rtMemAllocEx16BitReach(size_t cbAlloc, uint32_t fFlags, void **ppv) { cbAlloc = RT_ALIGN_Z(cbAlloc, PAGE_SIZE); AssertReturn(cbAlloc <= _64K - PAGE_SIZE, VERR_NO_MEMORY); /* Seems this doesn't work on W7/64... */ return rtMemAllocExInRange(cbAlloc, fFlags, ppv, PAGE_SIZE, _64K - cbAlloc); }
/** * Initializes the tracing. * * @returns VBox status code * @param pVM The cross context VM structure. * @param cbEntry The trace entry size. * @param cEntries The number of entries. */ static int dbgfR3TraceEnable(PVM pVM, uint32_t cbEntry, uint32_t cEntries) { /* * Don't enable it twice. */ if (pVM->hTraceBufR3 != NIL_RTTRACEBUF) return VERR_ALREADY_EXISTS; /* * Resolve default parameter values. */ int rc; if (!cbEntry) { rc = CFGMR3QueryU32Def(CFGMR3GetChild(CFGMR3GetRoot(pVM), "DBGF"), "TraceBufEntrySize", &cbEntry, 128); AssertRCReturn(rc, rc); } if (!cEntries) { rc = CFGMR3QueryU32Def(CFGMR3GetChild(CFGMR3GetRoot(pVM), "DBGF"), "TraceBufEntries", &cEntries, 4096); AssertRCReturn(rc, rc); } /* * Figure the required size. */ RTTRACEBUF hTraceBuf; size_t cbBlock = 0; rc = RTTraceBufCarve(&hTraceBuf, cEntries, cbEntry, 0 /*fFlags*/, NULL, &cbBlock); if (rc != VERR_BUFFER_OVERFLOW) { AssertReturn(!RT_SUCCESS_NP(rc), VERR_IPE_UNEXPECTED_INFO_STATUS); return rc; } /* * Allocate a hyper heap block and carve a trace buffer out of it. * * Note! We ASSUME that the returned trace buffer handle has the same value * as the heap block. */ cbBlock = RT_ALIGN_Z(cbBlock, PAGE_SIZE); void *pvBlock; rc = MMR3HyperAllocOnceNoRel(pVM, cbBlock, PAGE_SIZE, MM_TAG_DBGF, &pvBlock); if (RT_FAILURE(rc)) return rc; rc = RTTraceBufCarve(&hTraceBuf, cEntries, cbEntry, 0 /*fFlags*/, pvBlock, &cbBlock); AssertRCReturn(rc, rc); AssertRelease(hTraceBuf == (RTTRACEBUF)pvBlock); AssertRelease((void *)hTraceBuf == pvBlock); pVM->hTraceBufR3 = hTraceBuf; pVM->hTraceBufR0 = MMHyperCCToR0(pVM, hTraceBuf); pVM->hTraceBufRC = MMHyperCCToRC(pVM, hTraceBuf); return VINF_SUCCESS; }
RTDECL(int) RTSymlinkReadA(const char *pszSymlink, char **ppszTarget) { AssertPtr(ppszTarget); char const *pszNativeSymlink; int rc = rtPathToNative(&pszNativeSymlink, pszSymlink, NULL); if (RT_SUCCESS(rc)) { /* Guess the initial buffer size. */ ssize_t cbBuf; struct stat s; if (!lstat(pszNativeSymlink, &s)) cbBuf = RT_MIN(RT_ALIGN_Z(s.st_size, 64), 64); else cbBuf = 1024; /* Read loop that grows the buffer. */ char *pszBuf = NULL; for (;;) { RTMemTmpFree(pszBuf); pszBuf = (char *)RTMemTmpAlloc(cbBuf); if (pszBuf) { ssize_t cbReturned = readlink(pszNativeSymlink, pszBuf, cbBuf); if (cbReturned >= cbBuf) { /* Increase the buffer size and try again */ cbBuf *= 2; continue; } if (cbReturned > 0) { pszBuf[cbReturned] = '\0'; rc = rtPathFromNativeDup(ppszTarget, pszBuf, pszSymlink); } else if (errno == EINVAL) rc = VERR_NOT_SYMLINK; else rc = RTErrConvertFromErrno(errno); } else rc = VERR_NO_TMP_MEMORY; break; } /* for loop */ RTMemTmpFree(pszBuf); rtPathFreeNative(pszNativeSymlink, pszSymlink); } if (RT_SUCCESS(rc)) LogFlow(("RTSymlinkReadA(%p={%s},%p): returns %Rrc *ppszTarget=%p:{%s}\n", pszSymlink, pszSymlink, ppszTarget, rc, *ppszTarget, *ppszTarget)); else LogFlow(("RTSymlinkReadA(%p={%s},%p): returns %Rrc\n", pszSymlink, pszSymlink, ppszTarget, rc)); return rc; }
/** * Allocates a new node and initialize the node part of it. * * The returned node has one reference. * * @returns VBox status code. * * @param cbNode The size of the node. * @param pszName The name of the node. * @param enmType The node type. * @param pDir The directory (parent). * @param ppNode Where to return the pointer to the node. */ static int vboxfuseNodeAlloc(size_t cbNode, const char *pszName, VBOXFUSETYPE enmType, PVBOXFUSEDIR pDir, PVBOXFUSENODE *ppNode) { Assert(cbNode >= sizeof(VBOXFUSENODE)); /* * Allocate the memory for it and init the critical section. */ size_t cchName = strlen(pszName); PVBOXFUSENODE pNode = (PVBOXFUSENODE)RTMemAlloc(cchName + 1 + RT_ALIGN_Z(cbNode, 8)); if (!pNode) return VERR_NO_MEMORY; int rc = RTCritSectInit(&pNode->CritSect); if (RT_FAILURE(rc)) { RTMemFree(pNode); return rc; } /* * Initialize the members. */ pNode->pszName = (char *)memcpy((uint8_t *)pNode + RT_ALIGN_Z(cbNode, 8), pszName, cchName + 1); pNode->cchName = cchName; pNode->enmType = enmType; pNode->cRefs = 1; pNode->pDir = pDir; #if 0 pNode->fMode = enmType == VBOXFUSETYPE_DIRECTORY ? S_IFDIR | 0755 : S_IFREG | 0644; #else pNode->fMode = enmType == VBOXFUSETYPE_DIRECTORY ? S_IFDIR | 0777 : S_IFREG | 0666; #endif pNode->Uid = 0; pNode->Gid = 0; pNode->cLinks = 0; pNode->Ino = g_NextIno++; /** @todo make this safe! */ pNode->cbPrimary = 0; *ppNode = pNode; return VINF_SUCCESS; }
RTDECL(PRTUTF16) RTUtf16AllocTag(size_t cb, const char *pszTag) { if (cb > sizeof(RTUTF16)) cb = RT_ALIGN_Z(cb, sizeof(RTUTF16)); else cb = sizeof(RTUTF16); PRTUTF16 pwsz = (PRTUTF16)RTMemAllocTag(cb, pszTag); if (pwsz) *pwsz = '\0'; return pwsz; }
/** * Returns the size of the NOTE section given the name and size of the data. * * @param pszName Name of the note section. * @param cb Size of the data portion of the note section. * * @return The size of the NOTE section as rounded to the file alignment. */ static uint64_t Elf64NoteSectionSize(const char *pszName, uint64_t cbData) { uint64_t cbNote = sizeof(Elf64_Nhdr); size_t cbName = strlen(pszName) + 1; size_t cbNameAlign = RT_ALIGN_Z(cbName, g_NoteAlign); cbNote += cbNameAlign; cbNote += RT_ALIGN_64(cbData, g_NoteAlign); return cbNote; }
DECLHIDDEN(int) rtR0MemAllocEx(size_t cb, uint32_t fFlags, PRTMEMHDR *ppHdr) { size_t cbAllocated = cb; PRTMEMHDR pHdr = NULL; #ifdef RT_ARCH_AMD64 /* * Things are a bit more complicated on AMD64 for executable memory * because we need to be in the ~2GB..~0 range for code. */ if (fFlags & RTMEMHDR_FLAG_EXEC) { if (fFlags & RTMEMHDR_FLAG_ANY_CTX) return VERR_NOT_SUPPORTED; # ifdef USE_KMEM_ALLOC_PROT pHdr = (PRTMEMHDR)kmem_alloc_prot(kernel_map, cb + sizeof(*pHdr), VM_PROT_ALL, VM_PROT_ALL, KERNBASE); # else vm_object_t pVmObject = NULL; vm_offset_t Addr = KERNBASE; cbAllocated = RT_ALIGN_Z(cb + sizeof(*pHdr), PAGE_SIZE); pVmObject = vm_object_allocate(OBJT_DEFAULT, cbAllocated >> PAGE_SHIFT); if (!pVmObject) return VERR_NO_EXEC_MEMORY; /* Addr contains a start address vm_map_find will start searching for suitable space at. */ int rc = vm_map_find(kernel_map, pVmObject, 0, &Addr, cbAllocated, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); if (rc == KERN_SUCCESS) { rc = vm_map_wire(kernel_map, Addr, Addr + cbAllocated, VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); if (rc == KERN_SUCCESS) { pHdr = (PRTMEMHDR)Addr; if (fFlags & RTMEMHDR_FLAG_ZEROED) bzero(pHdr, cbAllocated); } else vm_map_remove(kernel_map, Addr, Addr + cbAllocated); } else vm_object_deallocate(pVmObject); # endif } else #endif {
/** * Returns the size of the ring in bytes given the number of elements and * alignment requirements. * * @param cElements Number of elements. * @param Align Alignment (must be a power of two). * * @return Size of the Virtio ring. */ size_t VirtioRingSize(uint64_t cElements, ulong_t Align) { size_t cb = 0; cb = cElements * sizeof(VIRTIORINGDESC); /* Ring descriptors. */ cb += 2 * sizeof(uint16_t); /* Available flags and index. */ cb += cElements * sizeof(uint16_t); /* Available descriptors. */ size_t cbAlign = RT_ALIGN_Z(cb, Align); cbAlign += 2 * sizeof(uint16_t); /* Used flags and index. */ cbAlign += cElements * sizeof(VIRTIORINGUSEDELEM); /* Used descriptors. */ return cbAlign; }
RTDECL(PRTASN1ARRAYALLOCATION) RTAsn1MemInitArrayAllocation(PRTASN1ARRAYALLOCATION pAllocation, PCRTASN1ALLOCATORVTABLE pAllocator, size_t cbEntry) { Assert(cbEntry >= sizeof(RTASN1CORE)); Assert(cbEntry < _1M); Assert(RT_ALIGN_Z(cbEntry, sizeof(void *)) == cbEntry); pAllocation->cbEntry = (uint32_t)cbEntry; pAllocation->cPointersAllocated = 0; pAllocation->cEntriesAllocated = 0; pAllocation->cResizeCalls = 0; pAllocation->uReserved0 = 0; pAllocation->pAllocator = pAllocator; return pAllocation; }
/** * @interface_method_impl{PDMINETWORKUP,pfnAllocBuf} */ static DECLCALLBACK(int) drvNicNetworkUp_AllocBuf(PPDMINETWORKUP pInterface, size_t cbMin, PCPDMNETWORKGSO pGso, PPPDMSCATTERGATHER ppSgBuf) { PDRVNIC pThis = PDMINETWORKUP_2_DRVNIC(pInterface); /* * Allocate a scatter / gather buffer descriptor that is immediately * followed by the buffer space of its single segment. The GSO context * comes after that again. */ PPDMSCATTERGATHER pSgBuf = (PPDMSCATTERGATHER)RTMemAlloc(RT_ALIGN_Z(sizeof(*pSgBuf), 16) + RT_ALIGN_Z(cbMin, 16) + (pGso ? RT_ALIGN_Z(sizeof(*pGso), 16) : 0)); if (!pSgBuf) return VERR_NO_MEMORY; /* * Initialize the S/G buffer and return. */ pSgBuf->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_1; pSgBuf->cbUsed = 0; pSgBuf->cbAvailable = RT_ALIGN_Z(cbMin, 16); pSgBuf->pvAllocator = NULL; if (!pGso) pSgBuf->pvUser = NULL; else { pSgBuf->pvUser = (uint8_t *)(pSgBuf + 1) + pSgBuf->cbAvailable; *(PPDMNETWORKGSO)pSgBuf->pvUser = *pGso; } pSgBuf->cSegs = 1; pSgBuf->aSegs[0].cbSeg = pSgBuf->cbAvailable; pSgBuf->aSegs[0].pvSeg = pSgBuf + 1; *ppSgBuf = pSgBuf; return VINF_SUCCESS; }
RTR0DECL(void) RTMemContFree(void *pv, size_t cb) { RT_ASSERT_PREEMPTIBLE(); if (pv) { Assert(cb > 0); AssertMsg(!((uintptr_t)pv & PAGE_OFFSET_MASK), ("pv=%p\n", pv)); IPRT_DARWIN_SAVE_EFL_AC(); cb = RT_ALIGN_Z(cb, PAGE_SIZE); IOFreeContiguous(pv, cb); IPRT_DARWIN_RESTORE_EFL_AC(); } }
RTDECL(int) RTErrInfoAllocEx(size_t cbMsg, PRTERRINFO *ppErrInfo) { if (cbMsg == 0) cbMsg = _4K; else cbMsg = RT_ALIGN_Z(cbMsg, 256); PRTERRINFO pErrInfo; *ppErrInfo = pErrInfo = (PRTERRINFO)RTMemTmpAlloc(sizeof(*pErrInfo) + cbMsg); if (RT_UNLIKELY(!pErrInfo)) return VERR_NO_TMP_MEMORY; RTErrInfoInit(pErrInfo, (char *)(pErrInfo + 1), cbMsg); pErrInfo->fFlags = RTERRINFO_FLAGS_T_ALLOC | RTERRINFO_FLAGS_MAGIC; return VINF_SUCCESS; }
RTDECL(int) RTMemSaferUnscramble(void *pv, size_t cb) { AssertMsg(*(size_t *)((char *)pv - RTMEMSAFER_PAD_BEFORE) == cb, ("*pvStart=%#zx cb=%#zx\n", *(size_t *)((char *)pv - RTMEMSAFER_PAD_BEFORE), cb)); /* Note! This isn't supposed to be safe, just less obvious. */ uintptr_t *pu = (uintptr_t *)pv; cb = RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN); while (cb > 0) { *pu ^= g_uScramblerXor; pu++; cb -= sizeof(*pu); } return VINF_SUCCESS; }
/** * Free memory allocated by rtMemPagePosixAlloc. * * @param pv The address of the memory to free. * @param cb The size. */ static void rtMemPagePosixFree(void *pv, size_t cb) { /* * Validate & adjust the input. */ if (!pv) return; AssertPtr(pv); Assert(cb > 0); Assert(!((uintptr_t)pv & PAGE_OFFSET_MASK)); cb = RT_ALIGN_Z(cb, PAGE_SIZE); /* * Free the memory. */ int rc = munmap(pv, cb); AssertMsg(rc == 0, ("rc=%d pv=%p cb=%#zx\n", rc, pv, cb)); NOREF(rc); }
size_t rtDirNativeGetStructSize(const char *pszPath) { long cbNameMax = pathconf(pszPath, _PC_NAME_MAX); # ifdef NAME_MAX if (cbNameMax < NAME_MAX) /* This is plain paranoia, but it doesn't hurt. */ cbNameMax = NAME_MAX; # endif # ifdef _XOPEN_NAME_MAX if (cbNameMax < _XOPEN_NAME_MAX) /* Ditto. */ cbNameMax = _XOPEN_NAME_MAX; # endif size_t cbDir = RT_OFFSETOF(RTDIR, Data.d_name[cbNameMax + 1]); if (cbDir < sizeof(RTDIR)) /* Ditto. */ cbDir = sizeof(RTDIR); cbDir = RT_ALIGN_Z(cbDir, 8); return cbDir; }
void VBoxDbgConsole::commandSubmitted(const QString &rCommand) { Assert(isGUIThread()); lock(); RTSemEventSignal(m_EventSem); QByteArray Utf8Array = rCommand.toUtf8(); const char *psz = Utf8Array.constData(); size_t cb = strlen(psz); /* * Make sure we've got space for the input. */ if (cb + m_cbInputBuf >= m_cbInputBufAlloc) { size_t cbNew = RT_ALIGN_Z(cb + m_cbInputBufAlloc + 1, 128); void *pv = RTMemRealloc(m_pszInputBuf, cbNew); if (!pv) { unlock(); return; } m_pszInputBuf = (char *)pv; m_cbInputBufAlloc = cbNew; } /* * Add the input and output it. */ memcpy(m_pszInputBuf + m_cbInputBuf, psz, cb); m_cbInputBuf += cb; m_pszInputBuf[m_cbInputBuf++] = '\n'; m_pOutput->appendText(rCommand + "\n", true /*fClearSelection*/); m_pOutput->ensureCursorVisible(); m_fInputRestoreFocus = m_pInput->hasFocus(); /* dirty focus hack */ m_pInput->setEnabled(false); Log(("VBoxDbgConsole::commandSubmitted: %s (input-enabled=%RTbool)\n", psz, m_pInput->isEnabled())); unlock(); }
VBoxDbgConsole::backWrite(PDBGCBACK pBack, const void *pvBuf, size_t cbBuf, size_t *pcbWritten) { VBoxDbgConsole *pThis = VBOXDBGCONSOLE_FROM_DBGCBACK(pBack); int rc = VINF_SUCCESS; pThis->lock(); if (cbBuf + pThis->m_cbOutputBuf >= pThis->m_cbOutputBufAlloc) { size_t cbNew = RT_ALIGN_Z(cbBuf + pThis->m_cbOutputBufAlloc + 1, 1024); void *pv = RTMemRealloc(pThis->m_pszOutputBuf, cbNew); if (!pv) { pThis->unlock(); if (pcbWritten) *pcbWritten = 0; return VERR_NO_MEMORY; } pThis->m_pszOutputBuf = (char *)pv; pThis->m_cbOutputBufAlloc = cbNew; } /* * Add the output. */ memcpy(pThis->m_pszOutputBuf + pThis->m_cbOutputBuf, pvBuf, cbBuf); pThis->m_cbOutputBuf += cbBuf; pThis->m_pszOutputBuf[pThis->m_cbOutputBuf] = '\0'; if (pcbWritten) *pcbWritten = cbBuf; if (ASMAtomicUoReadBool(&pThis->m_fTerminate)) rc = VERR_GENERAL_FAILURE; /* * Tell the GUI thread to draw this text. * We cannot do it from here without frequent crashes. */ if (!pThis->m_fUpdatePending) QApplication::postEvent(pThis, new VBoxDbgConsoleEvent(VBoxDbgConsoleEvent::kUpdate)); pThis->unlock(); return rc; }