/* Increment m->counter on each iteration; then mark thread as done. */ static void inc(void *v /*=m*/) { struct test *m = v; gpr_int64 i; for (i = 0; i != m->iterations; i++) { gpr_mu_lock(&m->mu); m->counter++; gpr_mu_unlock(&m->mu); } mark_thread_done(m); }
/* Increment m->refcount m->iterations times, decrement m->thread_refcount once, and if it reaches zero, set m->event to (void*)1; then mark thread as done. */ static void refinc(void *v /*=m*/) { struct test *m = v; gpr_int64 i; for (i = 0; i != m->iterations; i++) { gpr_ref(&m->refcount); } if (gpr_unref(&m->thread_refcount)) { gpr_event_set(&m->event, (void *)1); } mark_thread_done(m); }
/* Increment m->stats_counter m->iterations times, transfer counter value to m->counter, then mark thread as done. */ static void statsinc(void *v /*=m*/) { struct test *m = v; gpr_int64 i; for (i = 0; i != m->iterations; i++) { gpr_stats_inc(&m->stats_counter, 1); } gpr_mu_lock(&m->mu); m->counter = gpr_stats_read(&m->stats_counter); gpr_mu_unlock(&m->mu); mark_thread_done(m); }
/* Increment m->counter under lock acquired with trylock, m->iterations times; then mark thread as done. */ static void inctry(void *v /*=m*/) { struct test *m = v; gpr_int64 i; for (i = 0; i != m->iterations;) { if (gpr_mu_trylock(&m->mu)) { m->counter++; gpr_mu_unlock(&m->mu); i++; } } mark_thread_done(m); }
/* Wait a millisecond and increment counter on each iteration, using an event for timing; then mark thread as done. */ static void inc_with_1ms_delay_event(void *v /*=m*/) { struct test *m = v; gpr_int64 i; for (i = 0; i != m->iterations; i++) { gpr_timespec deadline; deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_micros(1000, GPR_TIMESPAN)); GPR_ASSERT(gpr_event_wait(&m->event, deadline) == NULL); gpr_mu_lock(&m->mu); m->counter++; gpr_mu_unlock(&m->mu); } mark_thread_done(m); }
/* Wait until m->event is set to (void *)1, then decrement m->refcount m->stats_counter m->iterations times, and ensure that the last decrement caused the counter to reach zero, then mark thread as done. */ static void refcheck(void *v /*=m*/) { struct test *m = v; gpr_int64 n = m->iterations * m->threads; gpr_int64 i; GPR_ASSERT(gpr_event_wait(&m->event, gpr_inf_future(GPR_CLOCK_REALTIME)) == (void *)1); GPR_ASSERT(gpr_event_get(&m->event) == (void *)1); for (i = 1; i != n; i++) { GPR_ASSERT(!gpr_unref(&m->refcount)); m->counter++; } GPR_ASSERT(gpr_unref(&m->refcount)); m->counter++; mark_thread_done(m); }
/* Wait a millisecond and increment counter on each iteration; then mark thread as done. */ static void inc_with_1ms_delay(void *v /*=m*/) { struct test *m = v; gpr_int64 i; for (i = 0; i != m->iterations; i++) { gpr_timespec deadline; gpr_mu_lock(&m->mu); deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_micros(1000, GPR_TIMESPAN)); while (!gpr_cv_wait(&m->cv, &m->mu, deadline)) { } m->counter++; gpr_mu_unlock(&m->mu); } mark_thread_done(m); }
/* Increment counter only when (m->counter%m->threads)==m->thread_id; then mark thread as done. */ static void inc_by_turns(void *v /*=m*/) { struct test *m = v; gpr_int64 i; int id = thread_id(m); for (i = 0; i != m->iterations; i++) { gpr_mu_lock(&m->mu); while ((m->counter % m->threads) != id) { gpr_cv_wait(&m->cv, &m->mu, gpr_inf_future(GPR_CLOCK_REALTIME)); } m->counter++; gpr_cv_broadcast(&m->cv); gpr_mu_unlock(&m->mu); } mark_thread_done(m); }
/* Produce m->iterations elements on queue m->q, then mark thread as done. Even threads use queue_append(), and odd threads use queue_try_append() until it succeeds. */ static void many_producers(void *v /*=m*/) { struct test *m = v; gpr_int64 i; int x = thread_id(m); if ((x & 1) == 0) { for (i = 0; i != m->iterations; i++) { queue_append(&m->q, 1); } } else { for (i = 0; i != m->iterations; i++) { while (!queue_try_append(&m->q, 1)) { } } } mark_thread_done(m); }
/* Consume elements from m->q until m->threads*m->iterations are seen, wait an extra second to confirm that no more elements are arriving, then mark thread as done. */ static void consumer(void *v /*=m*/) { struct test *m = v; gpr_int64 n = m->iterations * m->threads; gpr_int64 i; int value; for (i = 0; i != n; i++) { queue_remove(&m->q, &value, gpr_inf_future(GPR_CLOCK_REALTIME)); } gpr_mu_lock(&m->mu); m->counter = n; gpr_mu_unlock(&m->mu); GPR_ASSERT( !queue_remove(&m->q, &value, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_micros(1000000, GPR_TIMESPAN)))); mark_thread_done(m); }
void *file_content_gc_thread(void *UnusedArg) { char command[2 * MAXPATHLEN]; exportlist_t *pexport = NULL; int is_hw_reached = FALSE; int some_flush_to_do = FALSE; unsigned long nb_blocks_to_manage; char cache_sub_dir[MAXPATHLEN]; cache_content_status_t cache_content_status; FILE *command_stream = NULL; char logfile_arg[MAXPATHLEN]; char *loglevel_arg; SetNameFunction("file_content_gc_thread"); LogEvent(COMPONENT_MAIN, "NFS FILE CONTENT GARBAGE COLLECTION : Starting GC thread"); if(mark_thread_existing(&gccb) == PAUSE_EXIT) { /* Oops, that didn't last long... exit. */ mark_thread_done(&gccb); LogDebug(COMPONENT_DISPATCH, "NFS FILE CONTENT GARBAGE COLLECTION Thread exiting before initialization"); return NULL; } LogDebug(COMPONENT_MAIN, "NFS FILE CONTENT GARBAGE COLLECTION : my pthread id is %p", (caddr_t) pthread_self()); while(1) { /* Sleep until some work is to be done */ sleep(nfs_param.cache_layers_param.dcgcpol.run_interval); if(gccb.tcb_state != STATE_AWAKE) { while(1) { P(gccb.tcb_mutex); if(gccb.tcb_state == STATE_AWAKE) { V(gccb.tcb_mutex); break; } switch(thread_sm_locked(&gccb)) { case THREAD_SM_RECHECK: V(gccb.tcb_mutex); continue; case THREAD_SM_BREAK: V(gccb.tcb_mutex); break; case THREAD_SM_EXIT: V(gccb.tcb_mutex); return NULL; } } } LogDebug(COMPONENT_MAIN, "NFS FILE CONTENT GARBAGE COLLECTION : processing..."); for(pexport = nfs_param.pexportlist; pexport != NULL; pexport = pexport->next) { if(pexport->options & EXPORT_OPTION_USE_DATACACHE) { snprintf(cache_sub_dir, MAXPATHLEN, "%s/export_id=%d", nfs_param.cache_layers_param.cache_content_client_param.cache_dir, 0); if((cache_content_status = cache_content_check_threshold(cache_sub_dir, nfs_param. cache_layers_param. dcgcpol.lwmark_df, nfs_param. cache_layers_param. dcgcpol.hwmark_df, &is_hw_reached, &nb_blocks_to_manage)) == CACHE_CONTENT_SUCCESS) { if(is_hw_reached) { LogEvent(COMPONENT_MAIN, "NFS FILE CONTENT GARBAGE COLLECTION : High Water Mark is reached, %lu blocks to be removed", nb_blocks_to_manage); some_flush_to_do = TRUE; break; } else { LogDebug(COMPONENT_MAIN, "NFS FILE CONTENT GARBAGE COLLECTION : High Water Mark is not reached"); } /* Use signal management */ if(force_flush_by_signal == TRUE) { some_flush_to_do = TRUE; break; } } } } /* for */ if (strncmp(fcc_log_path, "/dev/null", 9) == 0) switch(LogComponents[COMPONENT_CACHE_INODE_GC].comp_log_type) { case FILELOG: strncpy(logfile_arg, LogComponents[COMPONENT_CACHE_INODE_GC].comp_log_file, MAXPATHLEN); break; case SYSLOG: strncpy(logfile_arg, "SYSLOG", MAXPATHLEN); break; case STDERRLOG: strncpy(logfile_arg, "STDERRLOG", MAXPATHLEN); break; case STDOUTLOG: strncpy(logfile_arg, "STDOUTLOG", MAXPATHLEN); break; default: LogCrit(COMPONENT_MAIN, "Could not figure out the proper -L option for emergency cache flush thread."); } else strncpy(logfile_arg, fcc_log_path, MAXPATHLEN); /* config variable */ if(fcc_debug_level != -1) /* config variable */ loglevel_arg = ReturnLevelInt(fcc_debug_level); else loglevel_arg = ReturnLevelInt(ReturnLevelComponent(COMPONENT_CACHE_INODE_GC)); snprintf(command, 2 * MAXPATHLEN, "%s -f %s -N %s -L %s", ganesha_exec_path, config_path, loglevel_arg, logfile_arg); if(some_flush_to_do) strncat(command, " -P 3", 2 * MAXPATHLEN); /* Sync and erase */ else strncat(command, " -S 3", 2 * MAXPATHLEN); /* Sync Only */ if((command_stream = popen(command, "r")) == NULL) LogCrit(COMPONENT_MAIN, "NFS FILE CONTENT GARBAGE COLLECTION : /!\\ Cannot lauch command %s", command); else LogEvent(COMPONENT_MAIN, "NFS FILE CONTENT GARBAGE COLLECTION : I launched command %s", command); pclose(command_stream); } tcb_remove(&gccb); } /* file_content_gc_thread */
/* This thread processes FSAL UP events. */ void *fsal_up_process_thread(void *UnUsedArg) { struct timeval now; struct timespec timeout; fsal_up_event_t * fupevent; int rc; SetNameFunction("fsal_up_process_thread"); if (mark_thread_existing(&fsal_up_process_tcb) == PAUSE_EXIT) { /* Oops, that didn't last long... exit. */ mark_thread_done(&fsal_up_process_tcb); LogDebug(COMPONENT_INIT, "FSAL_UP Process Thread: Exiting before initialization"); return NULL; } LogFullDebug(COMPONENT_INIT, "FSAL_UP Process Thread: my pthread id is %p", (caddr_t) pthread_self()); while(1) { /* Check without tcb lock*/ if ((fsal_up_process_tcb.tcb_state != STATE_AWAKE) || glist_empty(&fsal_up_process_queue)) { while(1) { P(fsal_up_process_tcb.tcb_mutex); if ((fsal_up_process_tcb.tcb_state == STATE_AWAKE) && !glist_empty(&fsal_up_process_queue)) { V(fsal_up_process_tcb.tcb_mutex); break; } switch(thread_sm_locked(&fsal_up_process_tcb)) { case THREAD_SM_RECHECK: V(fsal_up_process_tcb.tcb_mutex); continue; case THREAD_SM_BREAK: if (glist_empty(&fsal_up_process_queue)) { gettimeofday(&now, NULL); timeout.tv_sec = 10 + now.tv_sec; timeout.tv_nsec = 0; rc = pthread_cond_timedwait(&fsal_up_process_tcb.tcb_condvar, &fsal_up_process_tcb.tcb_mutex, &timeout); } V(fsal_up_process_tcb.tcb_mutex); continue; case THREAD_SM_EXIT: V(fsal_up_process_tcb.tcb_mutex); return NULL; } } } P(fsal_up_process_tcb.tcb_mutex); fupevent = glist_first_entry(&fsal_up_process_queue, fsal_up_event_t, event_list); if(fupevent != NULL) { /* Pull the event off of the list */ glist_del(&fupevent->event_list); /* Release the mutex */ V(fsal_up_process_tcb.tcb_mutex); fupevent->event_process_func(&fupevent->event_data); gsh_free(fupevent->event_data.event_context.fsal_data.fh_desc.start); pool_free(fsal_up_event_pool, fupevent); continue; } V(fsal_up_process_tcb.tcb_mutex); } tcb_remove(&fsal_up_process_tcb); }