/* ============= GetThreadWork ============= */ int GetThreadWork (void) { int r; int f; ThreadLock (); if (dispatch == workcount) { ThreadUnlock (); return -1; } f = 10*dispatch / workcount; if (f != oldf) { oldf = f; if (pacifier) _printf ("%i...", f); } r = dispatch; dispatch++; ThreadUnlock (); return r; }
Rlist *RlistAppendAlien(Rlist **start, void *item) /* Allocates new memory for objects - careful, could leak! */ { Rlist *rp, *lp = *start; rp = xmalloc(sizeof(Rlist)); if (*start == NULL) { *start = rp; } else { for (lp = *start; lp->next != NULL; lp = lp->next) { } lp->next = rp; } rp->item = item; rp->type = RVAL_TYPE_SCALAR; ThreadLock(cft_lock); rp->next = NULL; ThreadUnlock(cft_lock); return rp; }
// get a new work for thread int GetThreadWork ( void ) { int r; int f; ThreadLock(); if (dispatch >= workcount) { ThreadUnlock(); return -1; } if (pacifier == qtrue) { f = 10 * dispatch / workcount; while ( oldf < f) { oldf++; Sys_Printf ("%i...", oldf); } } r = dispatch; dispatch++; ThreadUnlock (); return r; }
/* ============= GetThreadWork ============= */ int GetThreadWork( void ){ int r; int f; ThreadLock(); if ( dispatch == workcount ) { ThreadUnlock(); return -1; } f = 10 * dispatch / workcount; if ( f != oldf ) { oldf = f; if ( pacifier ) { Sys_Printf( "%i...", f ); fflush( stdout ); /* ydnar */ } } r = dispatch; dispatch++; ThreadUnlock(); return r; }
Rlist *RlistAppend(Rlist **start, const void *item, RvalType type) { Rlist *rp, *lp = *start; switch (type) { case RVAL_TYPE_SCALAR: return RlistAppendScalar(start, item); case RVAL_TYPE_FNCALL: break; case RVAL_TYPE_LIST: for (rp = (Rlist *) item; rp != NULL; rp = rp->next) { lp = RlistAppend(start, rp->item, rp->type); } return lp; default: Log(LOG_LEVEL_DEBUG, "Cannot append %c to rval-list '%s'", type, (char *) item); return NULL; } rp = xmalloc(sizeof(Rlist)); if (*start == NULL) { *start = rp; } else { for (lp = *start; lp->next != NULL; lp = lp->next) { } lp->next = rp; } rp->item = RvalCopy((Rval) {(void *) item, type}).item; rp->type = type; /* scalar, builtin function */ ThreadLock(cft_lock); if (type == RVAL_TYPE_LIST) { rp->state_ptr = rp->item; } else { rp->state_ptr = NULL; } rp->next = NULL; ThreadUnlock(cft_lock); return rp; }
/* ============== LeafThread ============== */ void * LeafThread(void *arg) { double now; portal_t *p; do { ThreadLock(); /* Save state if sufficient time has elapsed */ now = I_FloatTime(); if (now > statetime + stateinterval) { statetime = now; SaveVisState(); } ThreadUnlock(); p = GetNextPortal(); if (!p) break; PortalFlow(p); PortalCompleted(p); if (verbose > 1) { logprint("portal:%4i mightsee:%4i cansee:%4i\n", (int)(p - portals), p->nummightsee, p->numcansee); } } while (1); return NULL; }
// debugging! -- not accurate! void DumpLuxels( facelight_t *pFaceLight, Vector *luxelColors, int ndxFace ) { static FileHandle_t pFpLuxels = NULL; ThreadLock(); if( !pFpLuxels ) { pFpLuxels = g_pFileSystem->Open( "luxels.txt", "w" ); } dface_t *pFace = &g_pFaces[ndxFace]; bool bDisp = ( pFace->dispinfo != -1 ); for( int ndx = 0; ndx < pFaceLight->numluxels; ndx++ ) { WriteWinding( pFpLuxels, pFaceLight->sample[ndx].w, luxelColors[ndx] ); if( bDumpNormals && bDisp ) { WriteNormal( pFpLuxels, pFaceLight->luxel[ndx], pFaceLight->luxelNormals[ndx], 15.0f, Vector( 255, 255, 0 ) ); } } ThreadUnlock(); }
/* ============= GetNextPortal Returns the next portal for a thread to work on Returns the portals from the least complex, so the later ones can reuse the earlier information. ============= */ portal_t * GetNextPortal(void) { int i; portal_t *p, *ret; unsigned min; ThreadLock(); min = INT_MAX; ret = NULL; for (i = 0, p = portals; i < numportals * 2; i++, p++) { if (p->nummightsee < min && p->status == pstat_none) { min = p->nummightsee; ret = p; } } if (ret) { ret->status = pstat_working; GetThreadWork_Locked__(); } ThreadUnlock(); return ret; }
/* ============= AllocWinding ============= */ winding_t *AllocWinding (int points) { winding_t *w; if (numthreads == 1) { c_winding_allocs++; c_winding_points += points; c_active_windings++; if (c_active_windings > c_peak_windings) c_peak_windings = c_active_windings; } ThreadLock(); if (winding_pool[points]) { w = winding_pool[points]; winding_pool[points] = w->next; } else { w = (winding_t *)malloc(sizeof(*w)); w->p = (Vector *)calloc( points, sizeof(Vector) ); } ThreadUnlock(); w->numpoints = 0; // None are occupied yet even though allocated. w->maxpoints = points; w->next = NULL; return w; }
static void PurgeOldConnections(Item **list, time_t now) /* Some connections might not terminate properly. These should be cleaned every couple of hours. That should be enough to prevent spamming. */ { assert(list != NULL); Log(LOG_LEVEL_DEBUG, "Purging Old Connections..."); if (ThreadLock(cft_count)) { Item *ip, *next; for (ip = *list; ip != NULL; ip = next) { int then = 0; sscanf(ip->classes, "%d", &then); next = ip->next; if (now > then + 7200) { Log(LOG_LEVEL_VERBOSE, "IP address '%s' has been more than two hours in connection list, purging", ip->name); DeleteItem(list, ip); } } ThreadUnlock(cft_count); } Log(LOG_LEVEL_DEBUG, "Done purging old connections"); }
int DlgDirListComboBoxW( HWND hwndDlg, LPWSTR lpszPathSpecClient, int idComboBox, int idStaticPath, UINT attrib) { LPWSTR lpszPathSpec; PWND pwndDlg; TL tlpwndDlg; BOOL fRet; pwndDlg = ValidateHwnd(hwndDlg); if (pwndDlg == NULL) return FALSE; lpszPathSpec = lpszPathSpecClient; ThreadLock(pwndDlg, &tlpwndDlg); fRet = xxxDlgDirListHelper(pwndDlg, lpszPathSpec, (LPBYTE)lpszPathSpecClient, idComboBox, idStaticPath, attrib, FALSE); ThreadUnlock(&tlpwndDlg); return fRet; }
BOOL xxxAddFullScreen(PWND pwnd) { BOOL fYielded; PDESKTOP pdesk = pwnd->head.rpdesk; CheckLock(pwnd); fYielded = FALSE; if (pdesk == NULL) return fYielded; if (!TestWF(pwnd, WFFULLSCREEN) && FCallTray(pdesk)) { SetWF(pwnd, WFFULLSCREEN); if (!(pdesk->cFullScreen)++) { xxxSetTrayWindow(pdesk, STW_SAME); fYielded = TRUE; } if ((pwnd = pwnd->spwndOwner) && !TestWF(pwnd, WFCHILD) && !pwnd->rcWindow.right && !pwnd->rcWindow.left && !TestWF(pwnd, WFVISIBLE)) { TL tlpwnd; ThreadLock(pwnd, &tlpwnd); if (xxxAddFullScreen(pwnd)) fYielded = TRUE; ThreadUnlock(&tlpwnd); } } return(fYielded); }
static int CompareResult(char *filename, char *prev_file) { int i; unsigned char digest1[EVP_MAX_MD_SIZE + 1]; unsigned char digest2[EVP_MAX_MD_SIZE + 1]; int md_len1, md_len2; FILE *fp; int rtn = 0; CfOut(cf_verbose, "", "Comparing files %s with %s\n", prev_file, filename); if ((fp = fopen(prev_file, "r")) != NULL) { fclose(fp); md_len1 = FileChecksum(prev_file, digest1); md_len2 = FileChecksum(filename, digest2); if (md_len1 != md_len2) { rtn = 1; } else { for (i = 0; i < md_len1; i++) { if (digest1[i] != digest2[i]) { rtn = 1; break; } } } } else { /* no previous file */ rtn = 1; } if (!ThreadLock(cft_count)) { CfOut(cf_error, "", "!! Severe lock error when mailing in exec"); return 1; } /* replace old file with new*/ unlink(prev_file); if (!LinkOrCopy(filename, prev_file, true)) { CfOut(cf_inform, "", "Could not symlink or copy %s to %s", filename, prev_file); rtn = 1; } ThreadUnlock(cft_count); return (rtn); }
void IncreaseNodeCounter(void) { ThreadLock(); //if (verbose) printf("\r%6d", numrecurse++); qprintf("\r%6d", numrecurse++); //qprintf("\r%6d %d, %5d ", numrecurse++, GetNumThreads(), nodelistsize); ThreadUnlock(); } //end of the function IncreaseNodeCounter
void logvprint(const char *fmt, va_list args) { ThreadLock(); InterruptThreadProgress__(); logvprint_locked__(fmt, args); ThreadUnlock(); }
static int CompareResult(const char *filename, const char *prev_file) { Log(LOG_LEVEL_VERBOSE, "Comparing files %s with %s", prev_file, filename); int rtn = 0; FILE *fp = fopen(prev_file, "r"); if (fp) { fclose(fp); unsigned char digest1[EVP_MAX_MD_SIZE + 1]; int md_len1 = FileChecksum(prev_file, digest1); unsigned char digest2[EVP_MAX_MD_SIZE + 1]; int md_len2 = FileChecksum(filename, digest2); if (md_len1 != md_len2) { rtn = 1; } else { for (int i = 0; i < md_len1; i++) { if (digest1[i] != digest2[i]) { rtn = 1; break; } } } } else { /* no previous file */ rtn = 1; } if (!ThreadLock(cft_count)) { Log(LOG_LEVEL_ERR, "Severe lock error when mailing in exec"); return 1; } /* replace old file with new*/ unlink(prev_file); if (!LinkOrCopy(filename, prev_file, true)) { Log(LOG_LEVEL_INFO, "Could not symlink or copy %s to %s", filename, prev_file); rtn = 1; } ThreadUnlock(cft_count); return rtn; }
//=========================================================================== // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== void AddThread(void (*func)(int)) { thread_t *thread; if (numthreads == 1) { if (currentnumthreads >= numthreads) { return; } currentnumthreads++; func(-1); currentnumthreads--; } //end if else { ThreadLock(); if (currentnumthreads >= numthreads) { ThreadUnlock(); return; } //end if //allocate new thread thread = GetMemory(sizeof(thread_t)); if (!thread) { Error("can't allocate memory for thread\n"); } // thread->threadid = currentthreadid; if (pthread_create(&thread->thread, attrib, (pthread_startroutine_t)func, (pthread_addr_t)thread->threadid) == -1) { Error("pthread_create failed"); } //add the thread to the end of the list thread->next = NULL; if (lastthread) { lastthread->next = thread; } else { firstthread = thread; } lastthread = thread; // #ifdef THREAD_DEBUG qprintf("added thread with id %d\n", thread->threadid); #endif //THREAD_DEBUG // currentnumthreads++; currentthreadid++; // ThreadUnlock(); } //end else } //end of the function AddThread
//=========================================================================== // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== void WaitForAllThreadsFinished(void) { pthread_t *thread; void *pthread_return; ThreadLock(); while(firstthread) { thread = &firstthread->thread; ThreadUnlock(); if (pthread_join(*thread, &pthread_return) == -1) Error("pthread_join failed"); ThreadLock(); } //end while ThreadUnlock(); } //end of the function WaitForAllThreadsFinished
int ArchiveToRepository(const char *file, Attributes attr, Promise *pp, const ReportContext *report_context) /* Returns true if the file was backup up and false if not */ { char destination[CF_BUFSIZE]; struct stat sb, dsb; if (!GetRepositoryPath(file, attr, destination)) { return false; } if (attr.copy.backup == cfa_nobackup) { return true; } if (IsItemIn(VREPOSLIST, file)) { CfOut(OUTPUT_LEVEL_INFORM, "", "The file %s has already been moved to the repository once. Multiple update will cause loss of backup.", file); return true; } ThreadLock(cft_getaddr); PrependItemList(&VREPOSLIST, file); ThreadUnlock(cft_getaddr); CfDebug("Repository(%s)\n", file); JoinPath(destination, CanonifyName(file)); if (!MakeParentDirectory(destination, attr.move_obstructions, report_context)) { } if (cfstat(file, &sb) == -1) { CfDebug("File %s promised to archive to the repository but it disappeared!\n", file); return true; } cfstat(destination, &dsb); CheckForFileHoles(&sb, pp); if (pp && CopyRegularFileDisk(file, destination, pp->makeholes)) { CfOut(OUTPUT_LEVEL_INFORM, "", "Moved %s to repository location %s\n", file, destination); return true; } else { CfOut(OUTPUT_LEVEL_INFORM, "", "Failed to move %s to repository location %s\n", file, destination); return false; } }
//=========================================================================== // // Parameter: - // Returns: - // Changes Globals: - //=========================================================================== void WaitForAllThreadsFinished(void) { pthread_t *thread; pthread_addr_t status; ThreadLock(); while(firstthread) { thread = &firstthread->thread; ThreadUnlock(); if (pthread_join(*thread, &status) == -1) Error("pthread_join failed"); ThreadLock(); } //end while ThreadUnlock(); } //end of the function WaitForAllThreadsFinished
//returns the size of the node list int NodeListSize( void ) { int size; ThreadLock(); size = nodelistsize; ThreadUnlock(); return size; } //end of the function NodeListSize
AgentConnection *NewServerConnection(FileCopy fc, bool background, int *err) { AgentConnection *conn; Rlist *rp; for (rp = fc.servers; rp != NULL; rp = rp->next) { const char *servername = RlistScalarValue(rp); if (ServerOffline(servername)) { continue; } if (background) { ThreadLock(&cft_serverlist); Rlist *srvlist_tmp = SERVERLIST; ThreadUnlock(&cft_serverlist); /* TODO not return NULL if >= CFA_MAXTREADS ? */ /* TODO RlistLen is O(n) operation. */ if (RlistLen(srvlist_tmp) < CFA_MAXTHREADS) { /* If background connection was requested, then don't cache it * in SERVERLIST since it will be closed right afterwards. */ conn = ServerConnection(servername, fc, err); return conn; } } else { conn = GetIdleConnectionToServer(servername); if (conn != NULL) { *err = 0; return conn; } /* This is first usage, need to open */ conn = ServerConnection(servername, fc, err); if (conn != NULL) { CacheServerConnection(conn, servername); return conn; } /* This server failed, trying next in list. */ Log(LOG_LEVEL_INFO, "Unable to establish connection with %s", servername); MarkServerOffline(servername); } } Log(LOG_LEVEL_ERR, "Unable to establish any connection with server."); return NULL; }
void RvalDestroy(Rval rval) { Rlist *clist, *next = NULL; CfDebug("DeleteRvalItem(%c)", rval.type); if (DEBUG) { RvalShow(stdout, rval); } CfDebug("\n"); if (rval.item == NULL) { CfDebug("DeleteRval NULL\n"); return; } switch (rval.type) { case RVAL_TYPE_SCALAR: ThreadLock(cft_lock); free((char *) rval.item); ThreadUnlock(cft_lock); break; case RVAL_TYPE_LIST: /* rval is now a list whose first item is clist->item */ for (clist = (Rlist *) rval.item; clist != NULL; clist = next) { next = clist->next; if (clist->item) { RvalDestroy((Rval) {clist->item, clist->type}); } free(clist); } break; case RVAL_TYPE_FNCALL: FnCallDestroy((FnCall *) rval.item); break; default: CfDebug("Nothing to do\n"); return; } }
void FreeWinding (winding_t *w) { if (w->numpoints == 0xdeaddead) Error ("FreeWinding: freed a freed winding"); ThreadLock(); w->numpoints = 0xdeaddead; // flag as freed w->next = winding_pool[w->maxpoints]; winding_pool[w->maxpoints] = w; ThreadUnlock(); }
int GetThreadWork(void) { int ret; ThreadLock(); ret = GetThreadWork_Locked__(); ThreadUnlock(); return ret; }
static Rlist *RlistPrependRval(Rlist **start, Rval rval) { Rlist *rp = xmalloc(sizeof(Rlist)); rp->next = *start; rp->val = rval; ThreadLock(cft_lock); *start = rp; ThreadUnlock(cft_lock); return rp; }
void logprint(const char *fmt, ...) { va_list args; ThreadLock(); InterruptThreadProgress__(); va_start(args, fmt); logvprint_locked__(fmt, args); va_end(args); ThreadUnlock(); }
int ArchiveToRepository(const char *file, Attributes attr) /* Returns true if the file was backup up and false if not */ { char destination[CF_BUFSIZE]; struct stat sb, dsb; if (!GetRepositoryPath(file, attr, destination)) { return false; } if (attr.copy.backup == BACKUP_OPTION_NO_BACKUP) { return true; } if (IsItemIn(VREPOSLIST, file)) { Log(LOG_LEVEL_INFO, "The file '%s' has already been moved to the repository once. Multiple update will cause loss of backup.", file); return true; } ThreadLock(cft_getaddr); PrependItemList(&VREPOSLIST, file); ThreadUnlock(cft_getaddr); JoinPath(destination, CanonifyName(file)); if (!MakeParentDirectory(destination, attr.move_obstructions)) { } if (stat(file, &sb) == -1) { Log(LOG_LEVEL_DEBUG, "File '%s' promised to archive to the repository but it disappeared!", file); return true; } stat(destination, &dsb); if (CopyRegularFileDisk(file, destination)) { Log(LOG_LEVEL_INFO, "Moved '%s' to repository location '%s'", file, destination); return true; } else { Log(LOG_LEVEL_INFO, "Failed to move '%s' to repository location '%s'", file, destination); return false; } }
byte *GetFileSpace (int size) { byte *buf; ThreadLock(); file_p = (byte *)(((intptr_t)file_p + 3) & ~3); buf = file_p; file_p += size; ThreadUnlock(); if (file_p > file_end) COM_Error ("%s: overrun", __thisfunc__); return buf; }
static void CloseChildrenFD() { ThreadLock(cft_count); int i; for (i = 0; i < MAX_FD; i++) { if (CHILDREN[i] > 0) { close(i); } } ThreadUnlock(cft_count); }