zOPER_EXPORT zLONG OPERATION WRKS_Init( zPVOID *ppAnchor ) { T_WRKSANCHOR *pAnchor; pAnchor = SysMalloc( sizeof( T_WRKSANCHOR ) ); if ( pAnchor == NULL ) return( -1 ); pAnchor->pFirstBlock = SysMalloc( sizeof( T_MEMBLOCK ) ); if ( pAnchor->pFirstBlock == NULL ) { SysFree( pAnchor ); return( -1 ); } pAnchor->pFirstBlock->pData = SysMalloc( WRKS_INIT_SIZE ); if ( pAnchor->pFirstBlock->pData == NULL ) { SysFree( pAnchor->pFirstBlock ); SysFree( pAnchor ); return( -1 ); } pAnchor->pFirstBlock->lData = WRKS_INIT_SIZE; pAnchor->pFirstBlock->lDataUsed = 0; *ppAnchor = pAnchor; return( 0 ); }
FPoly_t *Factorization(const Poly_t *pol) { factor_t *list, *l; FPoly_t *factors; /* Allocate result --------------- */ if ((factors = FpAlloc()) == NULL) { MTX_ERROR("Cannot allocate result"); return NULL; } /* Step 1: Squarefree factorization -------------------------------- */ if ((list = factorsquarefree(pol)) == NULL) { MTX_ERROR("Squarefree factorization failed"); return NULL; } /* Step 2: Decompose the squarefree factors using Berlekamp's algorithm -------------------------------------------------------------------- */ for (l = list; l->p != NULL; ++l) { Matrix_t *kernel; Poly_t **irr, **i; kernel = makekernel(l->p); if ((irr = berlekamp(l->p,kernel)) == NULL) { MTX_ERROR("Berlekamp factorization failed"); return NULL; } MatFree(kernel); for (i = irr; *i != NULL; ++i) { FpMulP(factors,*i,l->n); PolFree(*i); } /* Clean up -------- */ SysFree(irr); PolFree(l->p); } /* Clean up -------- */ SysFree(list); return factors; }
test_F ScalarProduct() { while (NextField() > 0) { int size; for (size = 0; size < 1000; size += size / 10 + 1) { PTR a, b; FfSetNoc(size); a = FfAlloc(1); b = FfAlloc(1); TestScalarProduct1(a,b,size); SysFree(a); SysFree(b); } } }
int MrFree(MatRep_t *rep) { int i; if (!MrIsValid(rep)) { MTX_ERROR1("%E",MTX_ERR_BADARG); return -1; } for (i = 0; i < rep->NGen; ++i) { MatFree(rep->Gen[i]); } memset(rep->Gen,0,sizeof(Matrix_t *) * rep->NGen); SysFree(rep->Gen); memset(rep,0,sizeof(MatRep_t)); SysFree(rep); return 0; }
static void RecordSpan(void *vh, byte *p) { MHeap *h; MSpan *s; MSpan **all; uint32 cap; h = vh; s = (MSpan*)p; if(h->nspan >= h->nspancap) { cap = 64*1024/sizeof(all[0]); if(cap < h->nspancap*3/2) cap = h->nspancap*3/2; all = (MSpan**)runtime·SysAlloc(cap*sizeof(all[0]), &mstats.other_sys); if(all == nil) runtime·throw("runtime: cannot allocate memory"); if(h->allspans) { runtime·memmove(all, h->allspans, h->nspancap*sizeof(all[0])); // Don't free the old array if it's referenced by sweep. // See the comment in mgc0.c. if(h->allspans != runtime·mheap.sweepspans) runtime·SysFree(h->allspans, h->nspancap*sizeof(all[0]), &mstats.other_sys); } h->allspans = all; h->nspancap = cap; } h->allspans[h->nspan++] = s; }
void ListPurgeFree(HSLIST &hList) { PLISTLINK lpCurr; while ((lpCurr = ListRemove(hList)) != INVALID_SLIST_PTR) SysFree(lpCurr); }
int UPopCheckMailboxSize(UserInfo *pUI, SYS_OFF_T *pllAvailSpace) { SYS_OFF_T llMBSize = 0, llProbeSize = (pllAvailSpace != NULL) ? *pllAvailSpace: 0; unsigned long ulNumMessages = 0; if (UPopGetMailboxSize(pUI, llMBSize, ulNumMessages) < 0) return ErrGetErrorCode(); pszMaxMBSize = UsrGetUserInfoVar(pUI, "MaxMBSize"); if (pszMaxMBSize != NULL) { SYS_OFF_T llMaxMBSize = Sys_atoi64(pszMaxMBSize) * 1024; SysFree(pszMaxMBSize); if (llMaxMBSize != 0 && (llMBSize + llProbeSize >= llMaxMBSize)) { ErrSetErrorCode(ERR_MAILBOX_SIZE); return ERR_MAILBOX_SIZE; } if (pllAvailSpace != NULL) *pllAvailSpace = (llMaxMBSize != 0 ? llMaxMBSize - llMBSize: MaxSignedType(SYS_OFF_T)); } else if (pllAvailSpace != NULL) *pllAvailSpace = MaxSignedType(SYS_OFF_T); return 0; }
runtime·MHeap_SysAlloc ( MHeap *h , uintptr n ) { byte *p; #line 353 "malloc.goc" if ( n <= h->arena_end - h->arena_used ) { #line 355 "malloc.goc" p = h->arena_used; runtime·SysMap ( p , n ) ; h->arena_used += n; runtime·MHeap_MapBits ( h ) ; return p; } #line 363 "malloc.goc" if ( sizeof ( void* ) == 8 ) return nil; #line 369 "malloc.goc" p = runtime·SysAlloc ( n ) ; if ( p == nil ) return nil; #line 373 "malloc.goc" if ( p < h->arena_start || p+n - h->arena_start >= MaxArena32 ) { runtime·printf ( "runtime: memory allocated by OS not in usable range\n" ) ; runtime·SysFree ( p , n ) ; return nil; } #line 379 "malloc.goc" if ( p+n > h->arena_used ) { h->arena_used = p+n; if ( h->arena_used > h->arena_end ) h->arena_end = h->arena_used; runtime·MHeap_MapBits ( h ) ; } #line 386 "malloc.goc" return p; }
BSOCK_HANDLE BSckAttach(SYS_SOCKET SockFD, int iBufferSize) { BuffSocketData *pBSD = (BuffSocketData *) SysAlloc(sizeof(BuffSocketData)); if (pBSD == NULL) return INVALID_BSOCK_HANDLE; char *pszBuffer = (char *) SysAlloc(iBufferSize); if (pszBuffer == NULL) { SysFree(pBSD); return INVALID_BSOCK_HANDLE; } pBSD->SockFD = SockFD; pBSD->iBufferSize = iBufferSize; pBSD->pszBuffer = pszBuffer; pBSD->iBytesInBuffer = 0; pBSD->iReadIndex = 0; pBSD->IOops.pPrivate.handle = SockFD; pBSD->IOops.pName = BSckSock_Name; pBSD->IOops.pFree = BSckSock_Free; pBSD->IOops.pRead = BSckSock_Read; pBSD->IOops.pWrite = BSckSock_Write; pBSD->IOops.pSendFile = BSckSock_SendFile; return (BSOCK_HANDLE) pBSD; }
void runtime·stackfree(G *gp, void *v, Stktop *top) { uint32 pos; uintptr n; n = (uintptr)(top+1) - (uintptr)v; if(StackDebug >= 1) runtime·printf("stackfree %p %d\n", v, (int32)n); gp->stacksize -= n; if(runtime·debug.efence || StackFromSystem) { if(runtime·debug.efence || StackFaultOnFree) runtime·SysFault(v, n); else runtime·SysFree(v, n, &mstats.stacks_sys); return; } if(top->malloced) { runtime·free(v); return; } if(n != FixedStack) runtime·throw("stackfree: bad fixed size"); if(m->stackcachecnt == StackCacheSize) stackcacherelease(); pos = m->stackcachepos; m->stackcache[pos] = v; m->stackcachepos = (pos + 1) % StackCacheSize; m->stackcachecnt++; m->stackinuse--; }
static void RecordSpan(void *vh, byte *p) { MHeap *h; MSpan *s; MSpan **all; uint32 cap; h = vh; s = (MSpan*)p; if(h->nspan >= h->nspancap) { cap = 64*1024/sizeof(all[0]); if(cap < h->nspancap*3/2) cap = h->nspancap*3/2; all = (MSpan**)runtime·SysAlloc(cap*sizeof(all[0])); if(all == nil) runtime·throw("runtime: cannot allocate memory"); if(h->allspans) { runtime·memmove(all, h->allspans, h->nspancap*sizeof(all[0])); runtime·SysFree(h->allspans, h->nspancap*sizeof(all[0])); } h->allspans = all; h->nspancap = cap; } h->allspans[h->nspan++] = s; }
static int HashGrow(Hash *pHash) { unsigned long i, ulHIdx, ulHMask; SysListHead *pBkts, *pHead, *pPos; HashNode *pHNode; ulHMask = pHash->ulHMask + 1; if (ulHMask & HMASK_TOP_BIT) return 0; ulHMask = (ulHMask << 1) - 1; if ((pBkts = (SysListHead *) SysAlloc((ulHMask + 1) * sizeof(SysListHead))) == NULL) return ErrGetErrorCode(); for (i = 0; i <= ulHMask; i++) SYS_INIT_LIST_HEAD(&pBkts[i]); for (i = 0; i <= pHash->ulHMask; i++) { pHead = &pHash->pBkts[i]; while ((pPos = SYS_LIST_FIRST(pHead)) != NULL) { pHNode = SYS_LIST_ENTRY(pPos, HashNode, Lnk); SYS_LIST_DEL(&pHNode->Lnk); ulHIdx = (*pHash->Ops.pGetHashVal)(pHash->Ops.pPrivate, &pHNode->Key) & ulHMask; SYS_LIST_ADDT(&pHNode->Lnk, &pBkts[ulHIdx]); } } SysFree(pHash->pBkts); pHash->pBkts = pBkts; pHash->ulHMask = ulHMask; return 0; }
int BSckVSendString(BSOCK_HANDLE hBSock, int iTimeout, char const *pszFormat, ...) { char *pszBuffer = NULL; StrVSprint(pszBuffer, pszFormat, pszFormat); if (pszBuffer == NULL) return ErrGetErrorCode(); if (BSckSendString(hBSock, pszBuffer, iTimeout) < 0) { ErrorPush(); SysFree(pszBuffer); return ErrorPop(); } SysFree(pszBuffer); return 0; }
MatRep_t *MrAlloc(int ngen, Matrix_t **gen, int flags) { MatRep_t *rep; int i; if (!GensAreValid(ngen,gen)) { MTX_ERROR1("%E",MTX_ERR_BADARG); return NULL; } // Allocate a new MatRep_t structure rep = ALLOC(MatRep_t); if (rep == NULL) { MTX_ERROR("Cannot allocate MatRep_t structure"); return NULL; } memset(rep,0,sizeof(MatRep_t)); rep->Gen = NALLOC(Matrix_t *,ngen); if (rep->Gen == NULL) { MTX_ERROR("Cannot allocate generator list"); SysFree(rep); return NULL; } // Copy generators rep->NGen = ngen; for (i = 0; i < ngen; ++i) { if (flags & MR_COPY_GENERATORS) { rep->Gen[i] = MatDup(gen[i]); if (rep->Gen[i] == NULL) { MTX_ERROR("Cannot copy generator"); while (--i >= 0) { MatFree(rep->Gen[i]); } SysFree(rep->Gen); SysFree(rep); return NULL; } } else { rep->Gen[i] = gen[i]; } } rep->Magic = MR_MAGIC; return rep; }
SYS_SOCKET BSckDetach(BSOCK_HANDLE hBSock, int iCloseSocket) { BuffSocketData *pBSD = (BuffSocketData *) hBSock; SYS_SOCKET SockFD = SYS_INVALID_SOCKET; if (pBSD != NULL) { SockFD = pBSD->SockFD; BSOCK_FREE(pBSD); SysFree(pBSD->pszBuffer); SysFree(pBSD); if (iCloseSocket) { SysCloseSocket(SockFD); return SYS_INVALID_SOCKET; } } return SockFD; }
void DeleteHashTable(int NUM_OF_ENTRY, int *hdTable){ int i; int *hdList; for(i = 0; i < NUM_OF_ENTRY; i++){ hdList = ReadHashTable(hdTable, i); hdList = DeleteList(hdList); } SysFree(hdTable); }
static void mktmp(long fl, long deg) { FfSetField(fl); if (deg > 0) { FfSetNoc(deg + 1); } if ((tmpfl != fl) || (tmpdeg < deg)) { if (tmpvec != NULL) { SysFree(tmpvec); } tmpvec = FfAlloc(1); tmpdeg = deg; tmpfl = fl; } }
static void TestGrMapRow1(Matrix_t *m, int gr_level) { Matrix_t *input = RndMat(FfOrder,m->Nor,m->Nor); GreasedMatrix_t *gm = GrMatAlloc(m,gr_level); PTR res_std = FfAlloc(1); PTR res_grease = FfAlloc(1); int i; for (i = 0; i < m->Nor; ++i) { PTR vec = MatGetPtr(input,i); FfSetNoc(m->Noc); FfMapRow(vec,m->Data,m->Nor,res_std); GrMapRow(vec,gm,res_grease); ASSERT_EQ_INT(FfCmpRows(res_grease,res_std), 0); } SysFree(res_std); SysFree(res_grease); MatFree(input); GrMatFree(gm); }
int MsFree(MatrixSet_t *set) { int i; if (!MsIsValid(set)) return -1; for (i = 0; i < set->Len; ++i) MatFree(set->List[i].Matrix); SysFree(set->List); memset(set,0,sizeof(*set)); return 0; }
Matrix_t *QAction(const Matrix_t *subspace, const Matrix_t *gen) { int k; int dim, sdim, qdim; int *piv, *non_piv; /* Check arguments. ---------------- */ if (!MatIsValid(subspace) || !MatIsValid(gen)) { return NULL; } if (subspace->Noc != gen->Nor) { MTX_ERROR1("subspace and gen: %E",MTX_ERR_INCOMPAT); return NULL; } if (gen->Nor != gen->Noc) { MTX_ERROR1("gen: %E",MTX_ERR_NOTSQUARE); return NULL; } /* Initialize ---------- */ dim = subspace->Noc; sdim = subspace->Nor; qdim = dim - sdim; Matrix_t *action = MatAlloc(subspace->Field,qdim,qdim); if (action == NULL) { return NULL; } /* Calculate the action on the quotient ------------------------------------ */ FfSetNoc(dim); PTR tmp = FfAlloc(1); if (tmp == NULL) { MatFree(action); return NULL; } piv = subspace->PivotTable; non_piv = piv + subspace->Nor; for (k = 0; k < qdim; ++k) { int l; PTR qx = MatGetPtr(action,k); FfCopyRow(tmp,MatGetPtr(gen,non_piv[k])); FfCleanRow(tmp,subspace->Data,sdim,piv); for (l = 0; l < qdim; ++l) { FfInsert(qx,l,FfExtract(tmp,non_piv[l])); } } SysFree(tmp); return action; }
zOPER_EXPORT zVOID OPERATION WRKS_Close( zPVOID *ppAnchor ) { T_WRKSANCHOR *pAnchor = *(T_WRKSANCHOR **)ppAnchor; T_MEMBLOCK *pBlock = pAnchor->pFirstBlock; // free the chain of blocks while ( pBlock ) { T_MEMBLOCK *pNextBlock = pBlock->pNext; if ( pBlock->pData ) SysFree( pBlock->pData ); // free the block struct itself SysFree( pBlock ); pBlock = pNextBlock; } // free anchor struct SysFree( pAnchor ); *ppAnchor = NULL; }
int RLckInitLockers(void) { /* Create resource locking mutex */ if ((hRLMutex = SysCreateMutex()) == SYS_INVALID_MUTEX) return ErrGetErrorCode(); /* Initialize wait gates */ for (int i = 0; i < STD_WAIT_GATES; i++) { if ((RLGates[i].hSemaphore = SysCreateSemaphore(0, SYS_DEFAULT_MAXCOUNT)) == SYS_INVALID_SEMAPHORE) { ErrorPush(); for (--i; i >= 0; i--) { SysFree(RLGates[i].pResList); SysCloseSemaphore(RLGates[i].hSemaphore); } SysCloseMutex(hRLMutex); return ErrorPop(); } RLGates[i].iWaitingProcesses = 0; RLGates[i].iHashSize = STD_RES_HASH_SIZE; if ((RLGates[i].pResList = (SysListHead *) SysAlloc(RLGates[i].iHashSize * sizeof(SysListHead))) == NULL) { ErrorPush(); SysCloseSemaphore(RLGates[i].hSemaphore); for (--i; i >= 0; i--) { SysFree(RLGates[i].pResList); SysCloseSemaphore(RLGates[i].hSemaphore); } SysCloseMutex(hRLMutex); return ErrorPop(); } for (int j = 0; j < RLGates[i].iHashSize; j++) SYS_INIT_LIST_HEAD(&RLGates[i].pResList[j]); } return 0; }
zOPER_EXPORT zLONG OPERATION WRKS_Get( zPVOID pAnchor, zLONG lNeeded, zPVOID *ppData ) { T_MEMBLOCK *pBlock = ((T_WRKSANCHOR *) pAnchor)->pFirstBlock; T_MEMBLOCK *pLastBlock = NULL; zLONG lAlloc; zPCHAR pData; // go through chain of blocks while ( pBlock ) { zLONG lFree = pBlock->lData - pBlock->lDataUsed; if ( lFree >= lNeeded ) { // ok, we found a slot big enough *ppData = (pBlock->pData + pBlock->lDataUsed); pBlock->lDataUsed += lNeeded; return( 0 ); } pLastBlock = pBlock; pBlock = pBlock->pNext; } // we did not find a fitting slot, allocate a new block lAlloc = WRKS_INIT_SIZE; if ( lNeeded * 2 > lAlloc ) lAlloc = lNeeded * 2; if ( (pBlock = SysMalloc( sizeof( T_MEMBLOCK ) )) == NULL ) { // Error allocating memory *ppData = NULL; return( -1 ); } if ( (pData = SysMalloc( lAlloc )) == NULL ) { // Error allocating memory SysFree( pBlock ); *ppData = NULL; return( -1 ); } // add to chain pLastBlock->pNext = pBlock; pBlock->pData = pData; pBlock->lData = lAlloc; pBlock->lDataUsed = lNeeded; *ppData = pData; return( 1 ); }
// Try to add at least npage pages of memory to the heap, // returning whether it worked. static bool MHeap_Grow(MHeap *h, uintptr npage) { uintptr ask; void *v; MSpan *s; // Ask for a big chunk, to reduce the number of mappings // the operating system needs to track; also amortizes // the overhead of an operating system mapping. // For Native Client, allocate a multiple of 64kB (16 pages). npage = (npage+15)&~15; ask = npage<<PageShift; if(ask < HeapAllocChunk) ask = HeapAllocChunk; v = SysAlloc(ask); if(v == nil) { if(ask > (npage<<PageShift)) { ask = npage<<PageShift; v = SysAlloc(ask); } if(v == nil) return false; } mstats.heap_sys += ask; if((byte*)v < h->min || h->min == nil) h->min = v; if((byte*)v+ask > h->max) h->max = (byte*)v+ask; // NOTE(rsc): In tcmalloc, if we've accumulated enough // system allocations, the heap map gets entirely allocated // in 32-bit mode. (In 64-bit mode that's not practical.) if(!MHeapMap_Preallocate(&h->map, ((uintptr)v>>PageShift) - 1, (ask>>PageShift) + 2)) { SysFree(v, ask); return false; } // Create a fake "in use" span and free it, so that the // right coalescing happens. s = FixAlloc_Alloc(&h->spanalloc); mstats.mspan_inuse = h->spanalloc.inuse; mstats.mspan_sys = h->spanalloc.sys; MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift); MHeapMap_Set(&h->map, s->start, s); MHeapMap_Set(&h->map, s->start + s->npages - 1, s); s->state = MSpanInUse; MHeap_FreeLocked(h, s); return true; }
int BSckSendString(BSOCK_HANDLE hBSock, char const *pszBuffer, int iTimeout) { BuffSocketData *pBSD = (BuffSocketData *) hBSock; char *pszSendBuffer = (char *) SysAlloc(strlen(pszBuffer) + 3); if (pszSendBuffer == NULL) return ErrGetErrorCode(); SysLogMessage(LOG_LEV_DEBUG, "socket write line: [%s]\n", pszBuffer); sprintf(pszSendBuffer, "%s\r\n", pszBuffer); int iSendLength = (int)strlen(pszSendBuffer); if (BSckWriteLL(pBSD, pszSendBuffer, iSendLength, iTimeout) != iSendLength) { SysFree(pszSendBuffer); return ErrGetErrorCode(); } SysFree(pszSendBuffer); return iSendLength; }
runtime·MHeap_SysAlloc ( MHeap *h , uintptr n ) { byte *p; #line 2418 "C:\Go\src\pkg\runtime\malloc.goc" if ( n > h->arena_end - h->arena_used ) { #line 2421 "C:\Go\src\pkg\runtime\malloc.goc" byte *new_end; uintptr needed; #line 2424 "C:\Go\src\pkg\runtime\malloc.goc" needed = ( uintptr ) h->arena_used + n - ( uintptr ) h->arena_end; #line 2426 "C:\Go\src\pkg\runtime\malloc.goc" needed = ( needed + ( 256<<20 ) - 1 ) & ~ ( ( 256<<20 ) -1 ) ; new_end = h->arena_end + needed; if ( new_end <= h->arena_start + MaxArena32 ) { p = runtime·SysReserve ( h->arena_end , new_end - h->arena_end ) ; if ( p == h->arena_end ) h->arena_end = new_end; } } if ( n <= h->arena_end - h->arena_used ) { #line 2436 "C:\Go\src\pkg\runtime\malloc.goc" p = h->arena_used; runtime·SysMap ( p , n ) ; h->arena_used += n; runtime·MHeap_MapBits ( h ) ; return p; } #line 2444 "C:\Go\src\pkg\runtime\malloc.goc" if ( sizeof ( void* ) == 8 && ( uintptr ) h->bitmap >= 0xffffffffU ) return nil; #line 2450 "C:\Go\src\pkg\runtime\malloc.goc" p = runtime·SysAlloc ( n ) ; if ( p == nil ) return nil; #line 2454 "C:\Go\src\pkg\runtime\malloc.goc" if ( p < h->arena_start || p+n - h->arena_start >= MaxArena32 ) { runtime·printf ( "runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n" , p , h->arena_start , h->arena_start+MaxArena32 ) ; runtime·SysFree ( p , n ) ; return nil; } #line 2461 "C:\Go\src\pkg\runtime\malloc.goc" if ( p+n > h->arena_used ) { h->arena_used = p+n; if ( h->arena_used > h->arena_end ) h->arena_end = h->arena_used; runtime·MHeap_MapBits ( h ) ; } #line 2468 "C:\Go\src\pkg\runtime\malloc.goc" return p; }
void HashFree(HASH_HANDLE hHash, void (*pFree)(void *, HashNode *), void *pPrivate) { Hash *pHash = (Hash *) hHash; unsigned long i; SysListHead *pHead, *pPos; HashNode *pHNode; if (pHash != NULL) { if (pFree != NULL) { for (i = 0; i <= pHash->ulHMask; i++) { pHead = &pHash->pBkts[i]; while ((pPos = SYS_LIST_FIRST(pHead)) != NULL) { pHNode = SYS_LIST_ENTRY(pPos, HashNode, Lnk); SYS_LIST_DEL(&pHNode->Lnk); (*pFree)(pPrivate, pHNode); } } } SysFree(pHash->pBkts); SysFree(pHash); } }
static Matrix_t *makekernel(const Poly_t *pol) { Matrix_t *materg; PTR rowptr; FEL *xbuf, *pbuf = pol->Data; long pdeg = pol->Degree; int k, xshift; long fl = pol->Field; materg = MatAlloc(fl,pdeg,pdeg); rowptr = materg->Data; xbuf = NALLOC(FEL,pdeg+1); for (k = 0; k <= pdeg; ++k) xbuf[k] = FF_ZERO; xbuf[0] = FF_ONE; for (k = 0; k < pdeg; ++k) { int l; for (l = 0; l < pdeg; ++l) FfInsert(rowptr,l,xbuf[l]); FfInsert(rowptr,k,FfSub(xbuf[k],FF_ONE)); FfStepPtr(&rowptr); for (xshift = (int) fl; xshift > 0; ) { FEL f; int d; /* Find leading pos */ for (l = pdeg-1; xbuf[l] == FF_ZERO && l >= 0; --l); /* Shift left as much as possible */ if ((d = pdeg - l) > xshift) d = xshift; for (; l >= 0; l--) xbuf[l+d] = xbuf[l]; for (l = d-1; l >= 0; --l) xbuf[l] = FF_ZERO; xshift -= d; if (xbuf[pdeg] == FF_ZERO) continue; /* Reduce with pol */ f = FfNeg(FfDiv(xbuf[pdeg],pbuf[pdeg])); for (l = pdeg-1; l >= 0; --l) xbuf[l] = FfAdd(xbuf[l],FfMul(pbuf[l],f)); xbuf[pdeg] = FF_ZERO; } } SysFree(xbuf); return MatNullSpace__(materg); }
static int BSslCtx__Free(void *pPrivate) { SslBindCtx *pCtx = (SslBindCtx *) pPrivate; BSslShutdown(pCtx); SSL_free(pCtx->pSSL); SSL_CTX_free(pCtx->pSCtx); /* * Restore default system blocking mode (-1) */ SysBlockSocket(pCtx->SockFD, -1); SysFree(pCtx); return 0; }
void EasyIocp::FreeThreadPool() { int i; for(i = 0; i < worksNum_; ++i) PostQueuedCompletionStatus(hCompPort_, 0, (ULONG_PTR)0, NULL); for(i = 0; i < worksNum_; ++i) { WaitForThreadpoolWorkCallbacks(ptpWorks_[i], FALSE); CloseThreadpoolWork(ptpWorks_[i]); } SysFree(ptpWorks_); ptpWorks_ = NULL; print("EasyIocp::FreeThreadPool: free thread pool."); }