/* * call the destruct funtions of all the threadflow's flowops, * if it is still flagged as "running". */ void flowop_destruct_all_flows(threadflow_t *threadflow) { flowop_t *flowop; /* wait a moment to give other threads a chance to stop too */ (void) sleep(1); (void) ipc_mutex_lock(&threadflow->tf_lock); /* prepare to call destruct flow routines, if necessary */ if (threadflow->tf_running == 0) { /* allready destroyed */ (void) ipc_mutex_unlock(&threadflow->tf_lock); return; } flowop = threadflow->tf_thrd_fops; threadflow->tf_running = 0; (void) ipc_mutex_unlock(&threadflow->tf_lock); while (flowop) { flowop_destructflow(flowop); flowop = flowop->fo_exec_next; } }
/* * First creates the parent directories of the file using * fileset_mkdir(). Then Optionally sets the O_DSYNC flag * and opens the file with open64(). It unlocks the fileset * entry lock, sets the DIRECTIO_ON or DIRECTIO_OFF flags * as requested, and returns the file descriptor integer * for the opened file. */ int fileset_openfile(fileset_t *fileset, filesetentry_t *entry, int flag, int mode, int attrs) { char path[MAXPATHLEN]; char dir[MAXPATHLEN]; char *pathtmp; struct stat64 sb; int fd; int open_attrs = 0; *path = 0; (void) strcpy(path, *fileset->fs_path); (void) strcat(path, "/"); (void) strcat(path, fileset->fs_name); pathtmp = fileset_resolvepath(entry); (void) strcat(path, pathtmp); (void) strcpy(dir, path); free(pathtmp); (void) trunc_dirname(dir); /* If we are going to create a file, create the parent dirs */ if ((flag & O_CREAT) && (stat64(dir, &sb) != 0)) { if (fileset_mkdir(dir, 0755) == -1) return (-1); } if (flag & O_CREAT) entry->fse_flags |= FSE_EXISTS; if (attrs & FLOW_ATTR_DSYNC) { #ifdef sun open_attrs |= O_DSYNC; #else open_attrs |= O_FSYNC; #endif } if ((fd = open64(path, flag | open_attrs, mode)) < 0) { filebench_log(LOG_ERROR, "Failed to open file %s: %s", path, strerror(errno)); (void) ipc_mutex_unlock(&entry->fse_lock); return (-1); } (void) ipc_mutex_unlock(&entry->fse_lock); #ifdef sun if (attrs & FLOW_ATTR_DIRECTIO) (void) directio(fd, DIRECTIO_ON); else (void) directio(fd, DIRECTIO_OFF); #endif return (fd); }
/* * Return 0 on success and a non-zero error code on failure. */ int revalidate_cvar_handles() { cvar_t *t; cvar_library_t *cvar_lib; int ret; if (!filebench_shm->shm_cvar_list) return 0; /* Nothing to do. */ for (t = filebench_shm->shm_cvar_list; t != NULL; t = t->next) { cvar_lib = cvar_libraries[t->cvar_lib_info->index]; if (cvar_lib->cvar_op.cvar_revalidate_handle) { ipc_mutex_lock(&t->cvar_lock); ret = cvar_lib->cvar_op.cvar_revalidate_handle(t->cvar_handle); ipc_mutex_unlock(&t->cvar_lock); if (ret) { filebench_log(LOG_ERROR, "Revalidation failed for cvar_handle " "of type %s with error code %d", t->cvar_lib_info->type, ret); return ret; } } } return 0; }
double get_cvar_value(cvar_t *cvar) { int ret; double value = 0.0; fbint_t round = cvar->round; ipc_mutex_lock(&cvar->cvar_lock); cvar_library_t *cvar_lib = cvar_libraries[cvar->cvar_lib_info->index]; ret = cvar_lib->cvar_op.cvar_next_value(cvar->cvar_handle, &value); ipc_mutex_unlock(&cvar->cvar_lock); if (ret) { filebench_log(LOG_ERROR, "Unable to get next_value from custom variable" " of type %s", cvar->cvar_lib_info->type); filebench_shutdown(1); } if (round) { fbint_t num, lower, upper; num = (fbint_t) value; lower = num - (num % round); upper = lower + round; value = (num - lower) > (upper - num) ? upper : lower; } if (value < cvar->min) value = cvar->min; else if (value > cvar->max) value = cvar->max; return value; }
/* * Creates threads for the threadflows associated with a procflow. * The routine iterates through the list of threadflows in the * supplied procflow's pf_threads list. For each threadflow on * the list, it defines tf_instances number of cloned * threadflows, and then calls threadflow_createthread() for * each to create and start the actual operating system thread. * Note that each of the newly defined threadflows will be linked * into the procflows threadflow list, but at the head of the * list, so they will not become part of the supplied set. After * all the threads have been created, threadflow_init enters * a join loop for all the threads in the newly defined * threadflows. Once all the created threads have exited, * threadflow_init will return 0. If errors are encountered, it * will return a non zero value. */ int threadflow_init(procflow_t *procflow) { threadflow_t *threadflow = procflow->pf_threads; int ret = 0; (void) ipc_mutex_lock(&filebench_shm->shm_threadflow_lock); while (threadflow) { threadflow_t *newthread; int instances; int i; instances = avd_get_int(threadflow->tf_instances); filebench_log(LOG_VERBOSE, "Starting %d %s threads", instances, threadflow->tf_name); for (i = 1; i < instances; i++) { /* Create threads */ newthread = threadflow_define_common(procflow, threadflow->tf_name, threadflow, i + 1); if (newthread == NULL) return (-1); ret |= threadflow_createthread(newthread); } newthread = threadflow_define_common(procflow, threadflow->tf_name, threadflow, 1); if (newthread == NULL) return (-1); /* Create each thread */ ret |= threadflow_createthread(newthread); threadflow = threadflow->tf_next; } threadflow = procflow->pf_threads; (void) ipc_mutex_unlock(&filebench_shm->shm_threadflow_lock); while (threadflow) { /* wait for all threads to finish */ if (threadflow->tf_tid) { void *status; if (pthread_join(threadflow->tf_tid, &status) == 0) ret += *(int *)status; } threadflow = threadflow->tf_next; } procflow->pf_running = 0; return (ret); }
/* * XXX No check is made for out-of-memory condition */ char * ipc_ismmalloc(size_t size) { char *allocstr; filebench_log(LOG_DEBUG_SCRIPT, "Mallocing from ISM..."); (void) ipc_mutex_lock(&filebench_shm->ism_lock); /* Map in shared memory */ if (ipc_ismattach() < 0) { return(NULL); } allocstr = filebench_shm->shm_ptr; filebench_shm->shm_ptr += size; filebench_shm->shm_allocated += size; (void) ipc_mutex_unlock(&filebench_shm->ism_lock); filebench_log(LOG_DEBUG_SCRIPT, "Done allocing from ISM..."); return (allocstr); }
/* * Returns a list of flowops named "name" from the master * flowop list. */ flowop_t * flowop_find(char *name) { flowop_t *flowop; flowop_t *result = NULL; flowop_find_barrier(); (void) ipc_mutex_lock(&filebench_shm->shm_flowop_lock); flowop = filebench_shm->shm_flowoplist; while (flowop) { if (strcmp(name, flowop->fo_name) == 0) { /* Add flowop to result list */ if (result == NULL) { result = flowop; flowop->fo_resultnext = NULL; } else { flowop->fo_resultnext = result; result = flowop; } } flowop = flowop->fo_next; } (void) ipc_mutex_unlock(&filebench_shm->shm_flowop_lock); return (result); }
/* * Deletes all the flowops from a flowop list. */ void flowop_delete_all(flowop_t **flowoplist) { flowop_t *flowop = *flowoplist; filebench_log(LOG_DEBUG_IMPL, "Deleting all flowops..."); while (flowop) { filebench_log(LOG_DEBUG_IMPL, "Deleting all flowops (%s-%d)", flowop->fo_name, flowop->fo_instance); flowop = flowop->fo_threadnext; } flowop = *flowoplist; (void) ipc_mutex_lock(&filebench_shm->flowop_lock); while (flowop) { if (flowop->fo_instance && (flowop->fo_instance == FLOW_MASTER)) { flowop = flowop->fo_threadnext; continue; } flowop_delete(flowoplist, flowop); flowop = flowop->fo_threadnext; } (void) ipc_mutex_unlock(&filebench_shm->flowop_lock); }
/* * Updates flowop's latency statistics, using saved start * time and current high resolution time. Updates flowop's * io count and transferred bytes statistics. Also updates * threadflow's and flowop's cumulative read or write byte * and io count statistics. */ void flowop_endop(threadflow_t *threadflow, flowop_t *flowop, int64_t bytes) { hrtime_t t; flowop->fo_stats.fs_mstate[FLOW_MSTATE_LAT] += (gethrtime() - threadflow->tf_stime); #ifdef HAVE_PROCFS if ((filebench_shm->shm_mmode & FILEBENCH_MODE_NOUSAGE) == 0) { if ((pread(threadflow->tf_lwpusagefd, &threadflow->tf_eusage, sizeof (struct prusage), 0)) != sizeof (struct prusage)) filebench_log(LOG_ERROR, "cannot read /proc"); t = TIMESPEC_TO_HRTIME(threadflow->tf_susage.pr_utime, threadflow->tf_eusage.pr_utime) + TIMESPEC_TO_HRTIME(threadflow->tf_susage.pr_ttime, threadflow->tf_eusage.pr_ttime) + TIMESPEC_TO_HRTIME(threadflow->tf_susage.pr_stime, threadflow->tf_eusage.pr_stime); flowop->fo_stats.fs_mstate[FLOW_MSTATE_CPU] += t; flowop->fo_stats.fs_mstate[FLOW_MSTATE_WAIT] += TIMESPEC_TO_HRTIME(threadflow->tf_susage.pr_tftime, threadflow->tf_eusage.pr_tftime) + TIMESPEC_TO_HRTIME(threadflow->tf_susage.pr_dftime, threadflow->tf_eusage.pr_dftime) + TIMESPEC_TO_HRTIME(threadflow->tf_susage.pr_kftime, threadflow->tf_eusage.pr_kftime) + TIMESPEC_TO_HRTIME(threadflow->tf_susage.pr_kftime, threadflow->tf_eusage.pr_kftime) + TIMESPEC_TO_HRTIME(threadflow->tf_susage.pr_slptime, threadflow->tf_eusage.pr_slptime); } #endif flowop->fo_stats.fs_count++; flowop->fo_stats.fs_bytes += bytes; (void) ipc_mutex_lock(&controlstats_lock); if ((flowop->fo_type & FLOW_TYPE_IO) || (flowop->fo_type & FLOW_TYPE_AIO)) { controlstats.fs_count++; controlstats.fs_bytes += bytes; } if (flowop->fo_attrs & FLOW_ATTR_READ) { threadflow->tf_stats.fs_rbytes += bytes; threadflow->tf_stats.fs_rcount++; flowop->fo_stats.fs_rcount++; controlstats.fs_rbytes += bytes; controlstats.fs_rcount++; } else if (flowop->fo_attrs & FLOW_ATTR_WRITE) { threadflow->tf_stats.fs_wbytes += bytes; threadflow->tf_stats.fs_wcount++; flowop->fo_stats.fs_wcount++; controlstats.fs_wbytes += bytes; controlstats.fs_wcount++; } (void) ipc_mutex_unlock(&controlstats_lock); }
/* * The producer side of the event system. * Once eventgen_hz has been set by eventgen_setrate(), * the routine sends eventgen_hz events per second until * the program terminates. Events are posted by incrementing * filebench_shm->shm_eventgen_q by the number of generated * events then signalling the condition variable * filebench_shm->shm_eventgen_cv to indicate to event consumers * that more events are available. * * Eventgen_thread attempts to sleep for 10 event periods, * then, once awakened, determines how many periods actually * passed since sleeping, and issues a set of events equal * to the number of periods that it slept, thus keeping the * average rate at the requested rate. */ static void eventgen_thread(void) { hrtime_t last; last = gethrtime(); filebench_shm->shm_eventgen_enabled = FALSE; /* CONSTCOND */ while (1) { struct timespec sleeptime; hrtime_t delta; int count, rate; if (filebench_shm->shm_eventgen_hz == NULL) { (void) sleep(1); continue; } else { rate = avd_get_int(filebench_shm->shm_eventgen_hz); if (rate > 0) { filebench_shm->shm_eventgen_enabled = TRUE; } else { continue; } } /* Sleep for 10xperiod */ sleeptime.tv_sec = 0; sleeptime.tv_nsec = FB_SEC2NSEC / rate; sleeptime.tv_nsec *= 10; if (sleeptime.tv_nsec < 1000UL) sleeptime.tv_nsec = 1000UL; sleeptime.tv_sec = sleeptime.tv_nsec / FB_SEC2NSEC; if (sleeptime.tv_sec > 0) sleeptime.tv_nsec -= (sleeptime.tv_sec * FB_SEC2NSEC); (void) nanosleep(&sleeptime, NULL); delta = gethrtime() - last; last = gethrtime(); count = (rate * delta) / FB_SEC2NSEC; filebench_log(LOG_DEBUG_SCRIPT, "delta %llums count %d", (u_longlong_t)(delta / 1000000), count); /* Send 'count' events */ (void) ipc_mutex_lock(&filebench_shm->shm_eventgen_lock); /* Keep the producer with a max of 5 second depth */ if (filebench_shm->shm_eventgen_q < (5 * rate)) filebench_shm->shm_eventgen_q += count; (void) pthread_cond_signal(&filebench_shm->shm_eventgen_cv); (void) ipc_mutex_unlock(&filebench_shm->shm_eventgen_lock); } }
/* * Calls flowop_define_common() to allocate and initialize a * flowop, and holds the shared flowop_lock during the call. * It releases the created flowop's fo_lock when done. */ flowop_t * flowop_define(threadflow_t *threadflow, char *name, flowop_t *inherit, flowop_t **flowoplist_hdp, int instance, int type) { flowop_t *flowop; (void) ipc_mutex_lock(&filebench_shm->shm_flowop_lock); flowop = flowop_define_common(threadflow, name, inherit, flowoplist_hdp, instance, type); (void) ipc_mutex_unlock(&filebench_shm->shm_flowop_lock); if (flowop == NULL) return (NULL); (void) ipc_mutex_unlock(&flowop->fo_lock); return (flowop); }
/* * Searches through the master fileset list for the named fileset. * If found, returns pointer to same, otherwise returns NULL. */ fileset_t * fileset_find(char *name) { fileset_t *fileset = filebench_shm->filesetlist; (void) ipc_mutex_lock(&filebench_shm->fileset_lock); while (fileset) { if (strcmp(name, fileset->fs_name) == 0) { (void) ipc_mutex_unlock(&filebench_shm->fileset_lock); return (fileset); } fileset = fileset->fs_next; } (void) ipc_mutex_unlock(&filebench_shm->fileset_lock); return (NULL); }
/* * Waits till all threadflows are started, or a timeout occurs. * Checks through the list of threadflows, waiting up to 10 * seconds for each one to set its tf_running flag to 1. If not * set after 10 seconds, continues on to the next threadflow * anyway. */ void threadflow_allstarted(pid_t pid, threadflow_t *threadflow) { (void) ipc_mutex_lock(&filebench_shm->shm_threadflow_lock); while (threadflow) { int waits; if ((threadflow->tf_instance == 0) || (threadflow->tf_instance == FLOW_MASTER)) { threadflow = threadflow->tf_next; continue; } filebench_log(LOG_DEBUG_IMPL, "Checking pid %d thread %s-%d", pid, threadflow->tf_name, threadflow->tf_instance); waits = 10; while (waits && (threadflow->tf_running == 0) && (filebench_shm->shm_f_abort == 0)) { (void) ipc_mutex_unlock( &filebench_shm->shm_threadflow_lock); if (waits < 3) filebench_log(LOG_INFO, "Waiting for pid %d thread %s-%d", pid, threadflow->tf_name, threadflow->tf_instance); (void) sleep(1); (void) ipc_mutex_lock( &filebench_shm->shm_threadflow_lock); waits--; } threadflow = threadflow->tf_next; } (void) ipc_mutex_unlock(&filebench_shm->shm_threadflow_lock); }
/* * Calls flowop_define_common() to allocate and initialize a * composite flowop, and holds the shared flowop_lock during the call. * It releases the created flowop's fo_lock when done. */ flowop_t * flowop_new_composite_define(char *name) { flowop_t *flowop; (void) ipc_mutex_lock(&filebench_shm->shm_flowop_lock); flowop = flowop_define_common(NULL, name, NULL, NULL, 0, FLOW_TYPE_COMPOSITE); (void) ipc_mutex_unlock(&filebench_shm->shm_flowop_lock); if (flowop == NULL) return (NULL); flowop->fo_func = flowop_composite; flowop->fo_init = flowop_composite_init; flowop->fo_destruct = flowop_composite_destruct; (void) ipc_mutex_unlock(&flowop->fo_lock); return (flowop); }
/* * Composite flowop initialization. Creates runtime inner flowops * from prototype inner flowops. */ static int flowop_composite_init(flowop_t *flowop) { int err; err = flowop_create_runtime_flowops(flowop->fo_thread, &flowop->fo_comp_fops); if (err != FILEBENCH_OK) return (err); (void) ipc_mutex_unlock(&flowop->fo_lock); return (0); }
void flowop_destruct_generic(flowop_t *flowop) { char *buf; /* release any local resources held by the flowop */ (void) ipc_mutex_lock(&flowop->fo_lock); buf = flowop->fo_buf; flowop->fo_buf = NULL; (void) ipc_mutex_unlock(&flowop->fo_lock); if (buf) free(buf); }
/* * Searches the provided threadflow list for the named threadflow. * A pointer to the threadflow is returned, or NULL if threadflow * is not found. */ threadflow_t * threadflow_find(threadflow_t *threadlist, char *name) { threadflow_t *threadflow = threadlist; (void) ipc_mutex_lock(&filebench_shm->threadflow_lock); while (threadflow) { if (strcmp(name, threadflow->tf_name) == 0) { (void) ipc_mutex_unlock( &filebench_shm->threadflow_lock); return (threadflow); } threadflow = threadflow->tf_next; } (void) ipc_mutex_unlock(&filebench_shm->threadflow_lock); return (NULL); }
/* * Returns a pointer to flowop named "name" from the supplied tf_thrd_fops * list of flowops. Returns the named flowop if found, or NULL. */ flowop_t * flowop_find_from_list(char *name, flowop_t *list) { flowop_t *found_flowop; flowop_find_barrier(); (void) ipc_mutex_lock(&filebench_shm->shm_flowop_lock); found_flowop = flowop_recurse_search(NULL, name, list); (void) ipc_mutex_unlock(&filebench_shm->shm_flowop_lock); return (found_flowop); }
/* * Deletes shared memory region and resets shared memory region * information in filebench_shm. */ void ipc_ismdelete(void) { if (filebench_shm->shm_id == -1) return; filebench_log(LOG_VERBOSE, "Deleting ISM..."); (void) ipc_mutex_lock(&filebench_shm->ism_lock); (void) shmctl(filebench_shm->shm_id, IPC_RMID, 0); filebench_shm->shm_ptr = (char *)filebench_shm->shm_addr; filebench_shm->shm_id = -1; filebench_shm->shm_allocated = 0; (void) ipc_mutex_unlock(&filebench_shm->ism_lock); }
struct posset * posset_find(char *name) { struct posset *ps; (void)ipc_mutex_lock(&filebench_shm->shm_posset_lock); ps = filebench_shm->shm_possetlist; while (ps) { if (!strcmp(avd_get_str(ps->ps_name), name)) break; ps = ps->ps_next; } (void)ipc_mutex_unlock(&filebench_shm->shm_posset_lock); return ps; }
/* * Iterates over all the file sets in the filesetlist, * executing the supplied command "*cmd()" on them. Also * indicates to the executed command if it is the first * time the command has been executed since the current * call to fileset_iter. */ void fileset_iter(int (*cmd)(fileset_t *fileset, int first)) { fileset_t *fileset = filebench_shm->filesetlist; int count = 0; (void) ipc_mutex_lock(&filebench_shm->fileset_lock); while (fileset) { cmd(fileset, count == 0); fileset = fileset->fs_next; count++; } (void) ipc_mutex_unlock(&filebench_shm->fileset_lock); }
/* * Create an in memory FLOW_MASTER thread object as described * by the syntax. Acquire the filebench_shm->threadflow_lock and * call threadflow_define_common() to create a threadflow entity. * Set the number of instances to create at runtime, * tf_instances, to "instances". Return the threadflow pointer * returned by the threadflow_define_common call. */ threadflow_t * threadflow_define(procflow_t *procflow, char *name, threadflow_t *inherit, var_integer_t instances) { threadflow_t *threadflow; (void) ipc_mutex_lock(&filebench_shm->threadflow_lock); if ((threadflow = threadflow_define_common(procflow, name, inherit, FLOW_MASTER)) == NULL) return (NULL); threadflow->tf_instances = instances; (void) ipc_mutex_unlock(&filebench_shm->threadflow_lock); return (threadflow); }
/* * Limited functionality allocator for use by custom variables to allocate * state. */ void *ipc_cvar_heapalloc(size_t size) { void *memory; (void) ipc_mutex_lock(&filebench_shm->shm_malloc_lock); if ((filebench_shm->shm_cvar_heapsize + size) <= FILEBENCH_CVAR_HEAPSIZE) { memory = filebench_shm->shm_cvar_heap + filebench_shm->shm_cvar_heapsize; filebench_shm->shm_cvar_heapsize += size; } else memory = NULL; (void) ipc_mutex_unlock(&filebench_shm->shm_malloc_lock); return memory; }
/* * XXX No check is made for out-of-memory condition */ char * ipc_ismmalloc(size_t size) { char *allocstr; (void) ipc_mutex_lock(&filebench_shm->shm_ism_lock); /* Map in shared memory */ (void) ipc_ismattach(); allocstr = filebench_shm->shm_ptr; filebench_shm->shm_ptr += size; filebench_shm->shm_allocated += size; (void) ipc_mutex_unlock(&filebench_shm->shm_ism_lock); return (allocstr); }
/* * Create a pool of shared memory to fit the per-thread * allocations. Uses shmget() to create a shared memory region * of size "size", attaches to it using shmat(), and stores * the returned address of the region in filebench_shm->shm_addr. * The pool is only created on the first call. The routine * returns 0 if successful or the pool already exists, * -1 otherwise. */ int ipc_ismcreate(size_t size) { #ifdef HAVE_SHM_SHARE_MMU int flag = SHM_SHARE_MMU; #else int flag = 0; #endif /* HAVE_SHM_SHARE_MMU */ /* Already done? */ if (filebench_shm->shm_id != -1) return (0); filebench_log(LOG_VERBOSE, "Creating %zd bytes of ISM Shared Memory...", size); if ((filebench_shm->shm_id = shmget(0, size, IPC_CREAT | 0666)) == -1) { filebench_log(LOG_ERROR, "Failed to create %zd bytes of ISM shared memory (ret = %d)", size, errno); return (-1); } if ((filebench_shm->shm_addr = (caddr_t)shmat(filebench_shm->shm_id, 0, flag)) == (void *)-1) { filebench_log(LOG_ERROR, "Failed to attach %zd bytes of created ISM shared memory", size); return (-1); } filebench_shm->shm_ptr = (char *)filebench_shm->shm_addr; filebench_log(LOG_VERBOSE, "Allocated %zd bytes of ISM Shared Memory... at %zx", size, filebench_shm->shm_addr); /* Locked until allocated to block allocs */ (void) ipc_mutex_unlock(&filebench_shm->shm_ism_lock); return (0); }
int ioctl_mutex_unlock(struct ipc_driver *drv, unsigned long arg) { int error = 0; struct ipc_unlock_t ipc_param; if(copy_from_user(&ipc_param, (void *)arg, sizeof(struct ipc_unlock_t))) { err_msg(err_trace, "%s(): Error in copy_from_user()\n", __FUNCTION__); error = -EFAULT; goto do_exit; } error = ipc_mutex_unlock( drv, &ipc_param ); if(error < 0) { err_msg(err_trace, "%s(): Error in ipc_mutex_unlock()\n", __FUNCTION__); goto do_exit; } do_exit: return error; }
/* * Given a pointer to the thread list of a procflow, cycles * through all the threadflows on the list, deleting each one * except the FLOW_MASTER. */ void threadflow_delete_all(threadflow_t **threadlist) { threadflow_t *threadflow = *threadlist; (void) ipc_mutex_lock(&filebench_shm->threadflow_lock); filebench_log(LOG_DEBUG_IMPL, "Deleting all threads"); while (threadflow) { if (threadflow->tf_instance && (threadflow->tf_instance == FLOW_MASTER)) { threadflow = threadflow->tf_next; continue; } (void) threadflow_delete(threadlist, threadflow); threadflow = threadflow->tf_next; } (void) ipc_mutex_unlock(&filebench_shm->threadflow_lock); }
void * thread_routine(void *arg) { pthread_t p; long tid; int ret=0; p = pthread_self(); tid = ((struct ipc_pthread_private *)p)->tid; printf("I am tid:%x\n", tid); printf("Will now get mutex %p\n", mtx); ret = ipc_mutex_lock(mtx, NULL, 0); if (ret != 0) { printf("Ok I returned with %d err:%d - done\n", ret, errno); pthread_exit(&ret); } printf("I:%x now hold the lock, hit return when you want to release it\n", tid); ret = ipc_mutex_unlock(mtx, NULL); printf("Released\n"); pthread_exit(&ret); return (NULL); }
/* * Returns a pointer to the specified instance of flowop * "name" from the global list. */ flowop_t * flowop_find_one(char *name, int instance) { flowop_t *test_flowop; flowop_find_barrier(); (void) ipc_mutex_lock(&filebench_shm->shm_flowop_lock); test_flowop = filebench_shm->shm_flowoplist; while (test_flowop) { if ((strcmp(name, test_flowop->fo_name) == 0) && (instance == test_flowop->fo_instance)) break; test_flowop = test_flowop->fo_next; } (void) ipc_mutex_unlock(&filebench_shm->shm_flowop_lock); return (test_flowop); }
/* * Deletes all the flowops from a flowop list. */ void flowop_delete_all(flowop_t **flowoplist) { flowop_t *flowop = *flowoplist; flowop_t *next_flowop; (void) ipc_mutex_lock(&filebench_shm->shm_flowop_lock); while (flowop) { filebench_log(LOG_DEBUG_IMPL, "Deleting flowop (%s-%d)", flowop->fo_name, flowop->fo_instance); if (flowop->fo_instance && (flowop->fo_instance == FLOW_MASTER)) { flowop = flowop->fo_exec_next; continue; } next_flowop = flowop->fo_exec_next; flowop_delete(flowoplist, flowop); flowop = next_flowop; } (void) ipc_mutex_unlock(&filebench_shm->shm_flowop_lock); }