/* * Deletes the specified threadflow from the specified threadflow * list after first terminating the threadflow's thread, deleting * the threadflow's flowops, and finally freeing the threadflow * entity. It also subtracts the threadflow's shared memory * requirements from the total amount required, shm_required. If * the specified threadflow is found, returns 0, otherwise * returns -1. */ static int threadflow_delete(threadflow_t **threadlist, threadflow_t *threadflow, int wait_cnt) { threadflow_t *entry = *threadlist; filebench_log(LOG_DEBUG_IMPL, "Deleting thread: (%s-%d)", threadflow->tf_name, threadflow->tf_instance); if (threadflow->tf_attrs & THREADFLOW_USEISM) { filebench_shm->shm_required -= (*threadflow->tf_memsize); } if (threadflow == *threadlist) { /* First on list */ filebench_log(LOG_DEBUG_IMPL, "Deleted thread: (%s-%d)", threadflow->tf_name, threadflow->tf_instance); threadflow_kill(threadflow, wait_cnt); flowop_delete_all(&threadflow->tf_ops); *threadlist = threadflow->tf_next; ipc_free(FILEBENCH_THREADFLOW, (char *)threadflow); return (0); } while (entry->tf_next) { filebench_log(LOG_DEBUG_IMPL, "Delete thread: (%s-%d) == (%s-%d)", entry->tf_next->tf_name, entry->tf_next->tf_instance, threadflow->tf_name, threadflow->tf_instance); if (threadflow == entry->tf_next) { /* Delete */ filebench_log(LOG_DEBUG_IMPL, "Deleted thread: (%s-%d)", entry->tf_next->tf_name, entry->tf_next->tf_instance); threadflow_kill(entry->tf_next, wait_cnt); flowop_delete_all(&entry->tf_next->tf_ops); ipc_free(FILEBENCH_THREADFLOW, (char *)threadflow); entry->tf_next = entry->tf_next->tf_next; return (0); } entry = entry->tf_next; } return (-1); }
/* Free a semaphore set. */ static void freeary (int id) { struct sem_array *sma; struct sem_undo *un; struct sem_queue *q; int size; sma = sem_rmid(id); /* Invalidate the existing undo structures for this semaphore set. * (They will be freed without any further action in sem_exit() * or during the next semop.) */ for (un = sma->undo; un; un = un->id_next) un->semid = -1; /* Wake up all pending processes and let them fail with EIDRM. */ for (q = sma->sem_pending; q; q = q->next) { q->status = -EIDRM; q->prev = NULL; wake_up_process(q->sleeper); /* doesn't sleep */ } sem_write_unlock(id); used_sems -= sma->sem_nsems; size = sizeof (*sma) + sma->sem_nsems * sizeof (struct sem); ipc_free(sma, size); }
/* * Return 0 on success and a non zero error code on failure. */ static int alloc_cvar_lib_info(const char *filename) { int ret = -1; cvar_library_info_t *cli = NULL; cvar_library_info_t *t; cli = (cvar_library_info_t *) ipc_malloc(FILEBENCH_CVAR_LIB_INFO); if (!cli) goto out; cli->filename = ipc_stralloc(filename); if (!cli->filename) goto out; cli->type = ipc_stralloc(gettype(filename)); if (!cli->type) goto out; cli->next = NULL; if (filebench_shm->shm_cvar_lib_info_list) { for (t = filebench_shm->shm_cvar_lib_info_list; t->next != NULL; t = t->next); /* Seek to the last entry. */ cli->index = t->index + 1; t->next = cli; } else { cli->index = 0; filebench_shm->shm_cvar_lib_info_list = cli; } ret = 0; out: if (ret && cli) { /* NOTE: There is no mechanism to free cli->filename and cli->type. */ ipc_free(FILEBENCH_CVAR_LIB_INFO, (char *) cli); } return ret; }
static int newary (key_t key, int nsems, int semflg) { int id; struct sem_array *sma; int size; if (!nsems) return -EINVAL; if (used_sems + nsems > sc_semmns) return -ENOSPC; size = sizeof (*sma) + nsems * sizeof (struct sem); sma = (struct sem_array *) ipc_alloc(size); if (!sma) { return -ENOMEM; } memset (sma, 0, size); id = ipc_addid(&sem_ids, &sma->sem_perm, sc_semmni); if(id == -1) { ipc_free(sma, size); return -ENOSPC; } used_sems += nsems; sma->sem_perm.mode = (semflg & S_IRWXUGO); sma->sem_perm.key = key; sma->sem_base = (struct sem *) &sma[1]; /* sma->sem_pending = NULL; */ sma->sem_pending_last = &sma->sem_pending; /* sma->undo = NULL; */ sma->sem_nsems = nsems; sma->sem_ctime = CURRENT_TIME; sem_unlock(id); return sem_buildid(id, sma->sem_perm.seq); }
static int semctl_main(int semid, int semnum, int cmd, int version, union semun arg) { struct sem_array *sma; struct sem* curr; int err; ushort fast_sem_io[SEMMSL_FAST]; ushort* sem_io = fast_sem_io; int nsems; sma = sem_lock(semid); if(sma==NULL) return -EINVAL; nsems = sma->sem_nsems; err=-EIDRM; if (sem_checkid(sma,semid)) goto out_unlock; err = -EACCES; if (ipcperms (&sma->sem_perm, (cmd==SETVAL||cmd==SETALL)?S_IWUGO:S_IRUGO)) goto out_unlock; switch (cmd) { case GETALL: { ushort *array = arg.array; int i; if(nsems > SEMMSL_FAST) { sem_unlock(semid); sem_io = ipc_alloc(sizeof(ushort)*nsems); if(sem_io == NULL) return -ENOMEM; err = sem_revalidate(semid, sma, nsems, S_IRUGO); if(err) goto out_free; } for (i = 0; i < sma->sem_nsems; i++) sem_io[i] = sma->sem_base[i].semval; sem_unlock(semid); err = 0; if(copy_to_user(array, sem_io, nsems*sizeof(ushort))) err = -EFAULT; goto out_free; } case SETALL: { int i; struct sem_undo *un; sem_unlock(semid); if(nsems > SEMMSL_FAST) { sem_io = ipc_alloc(sizeof(ushort)*nsems); if(sem_io == NULL) return -ENOMEM; } if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) { err = -EFAULT; goto out_free; } for (i = 0; i < nsems; i++) { if (sem_io[i] > SEMVMX) { err = -ERANGE; goto out_free; } } err = sem_revalidate(semid, sma, nsems, S_IWUGO); if(err) goto out_free; for (i = 0; i < nsems; i++) sma->sem_base[i].semval = sem_io[i]; for (un = sma->undo; un; un = un->id_next) for (i = 0; i < nsems; i++) un->semadj[i] = 0; sma->sem_ctime = CURRENT_TIME; /* maybe some queued-up processes were waiting for this */ update_queue(sma); err = 0; goto out_unlock; } case IPC_STAT: { struct semid64_ds tbuf; memset(&tbuf,0,sizeof(tbuf)); kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); tbuf.sem_otime = sma->sem_otime; tbuf.sem_ctime = sma->sem_ctime; tbuf.sem_nsems = sma->sem_nsems; sem_unlock(semid); if (copy_semid_to_user (arg.buf, &tbuf, version)) return -EFAULT; return 0; } /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */ } err = -EINVAL; if(semnum < 0 || semnum >= nsems) goto out_unlock; curr = &sma->sem_base[semnum]; switch (cmd) { case GETVAL: err = curr->semval; goto out_unlock; case GETPID: err = curr->sempid & 0xffff; goto out_unlock; case GETNCNT: err = count_semncnt(sma,semnum); goto out_unlock; case GETZCNT: err = count_semzcnt(sma,semnum); goto out_unlock; case SETVAL: { int val = arg.val; struct sem_undo *un; err = -ERANGE; if (val > SEMVMX || val < 0) goto out_unlock; for (un = sma->undo; un; un = un->id_next) un->semadj[semnum] = 0; curr->semval = val; curr->sempid = current->tgid; sma->sem_ctime = CURRENT_TIME; /* maybe some queued-up processes were waiting for this */ update_queue(sma); err = 0; goto out_unlock; } } out_unlock: sem_unlock(semid); out_free: if(sem_io != fast_sem_io) ipc_free(sem_io, sizeof(ushort)*nsems); return err; }
/* * Delete the designated flowop from the thread's flowop list. */ static void flowop_delete(flowop_t **flowoplist, flowop_t *flowop) { flowop_t *entry = *flowoplist; int found = 0; filebench_log(LOG_DEBUG_IMPL, "Deleting flowop (%s-%d)", flowop->fo_name, flowop->fo_instance); /* Delete from thread's flowop list */ if (flowop == *flowoplist) { /* First on list */ *flowoplist = flowop->fo_exec_next; filebench_log(LOG_DEBUG_IMPL, "Delete0 flowop: (%s-%d)", flowop->fo_name, flowop->fo_instance); } else { while (entry->fo_exec_next) { filebench_log(LOG_DEBUG_IMPL, "Delete0 flowop: (%s-%d) == (%s-%d)", entry->fo_exec_next->fo_name, entry->fo_exec_next->fo_instance, flowop->fo_name, flowop->fo_instance); if (flowop == entry->fo_exec_next) { /* Delete */ filebench_log(LOG_DEBUG_IMPL, "Deleted0 flowop: (%s-%d)", entry->fo_exec_next->fo_name, entry->fo_exec_next->fo_instance); entry->fo_exec_next = entry->fo_exec_next->fo_exec_next; break; } entry = entry->fo_exec_next; } } #ifdef HAVE_PROC_PID_LWP /* Close /proc stats */ if (flowop->fo_thread) (void) close(flowop->fo_thread->tf_lwpusagefd); #endif /* Delete from global list */ entry = filebench_shm->shm_flowoplist; if (flowop == filebench_shm->shm_flowoplist) { /* First on list */ filebench_shm->shm_flowoplist = flowop->fo_next; found = 1; } else { while (entry->fo_next) { filebench_log(LOG_DEBUG_IMPL, "Delete flowop: (%s-%d) == (%s-%d)", entry->fo_next->fo_name, entry->fo_next->fo_instance, flowop->fo_name, flowop->fo_instance); if (flowop == entry->fo_next) { /* Delete */ entry->fo_next = entry->fo_next->fo_next; found = 1; break; } entry = entry->fo_next; } } if (found) { filebench_log(LOG_DEBUG_IMPL, "Deleted flowop: (%s-%d)", flowop->fo_name, flowop->fo_instance); ipc_free(FILEBENCH_FLOWOP, (char *)flowop); } else { filebench_log(LOG_DEBUG_IMPL, "Flowop %s-%d not found!", flowop->fo_name, flowop->fo_instance); } }
void _gnuc_noreturn vifm_exit(int exit_code) { ipc_free(curr_stats.ipc); exit(exit_code); }
int main(int argc, char *const argv[]) { int error = 0; log_debug("Parsing options"); Options *opts = parse_opt(argc, argv); if (!opts) { show_banner(); show_help(); return -1; } log_set_level(opts->loglevel); if (opts->logfile) { log_set_logfile(opts->logfile); log_set_logfile_level(LOG_LEVEL_MAX); } #ifdef HAVE_VSYSLOG if (opts->syslog) { log_set_syslog("daliserver"); } #endif if (opts->background) { daemonize(opts->pidfile); } else { show_banner(); } log_info("Starting daliserver"); log_debug("Initializing dispatch queue"); DispatchPtr dispatch = dispatch_new(); if (!dispatch) { error = -1; } else { //dispatch_set_timeout(dispatch, 100); UsbDaliPtr usb = NULL; if (!opts->dryrun) { log_debug("Initializing USB connection"); usb = usbdali_open(NULL, dispatch, opts->usbbus, opts->usbdev); if (!usb) { error = -1; } } if (opts->dryrun || usb) { log_debug("Initializing server"); ServerPtr server = server_open(dispatch, opts->address, opts->port, DEFAULT_NET_FRAMESIZE, net_frame_handler, usb); if (!server) { error = -1; } else { server_set_connection_destroy_callback(server, net_dequeue_connection, usb); if (usb) { usbdali_set_outband_callback(usb, dali_outband_handler, server); usbdali_set_inband_callback(usb, dali_inband_handler); } log_debug("Creating shutdown notifier"); killsocket = ipc_new(); if (!killsocket) { error = -1; } else { ipc_register(killsocket, dispatch); log_info("Server ready, waiting for events"); running = 1; signal(SIGTERM, signal_handler); signal(SIGINT, signal_handler); while (running && dispatch_run(dispatch, usbdali_get_timeout(usb))); log_info("Shutting daliserver down"); ipc_free(killsocket); } server_close(server); } if (usb) { usbdali_close(usb); } } dispatch_free(dispatch); } free_opt(opts); log_info("Exiting"); return error; }
struct posset * posset_alloc(avd_t name, avd_t type, avd_t seed, avd_t max, avd_t entries) { struct posset *ps; int ret; ps = (struct posset *)ipc_malloc(FILEBENCH_POSSET); if (!ps) { filebench_log(LOG_ERROR, "posset_alloc: " "can't malloc posset in IPC region"); return NULL; } /* we do not support any possets except "rnd" at the moment */ if (!strcmp(avd_get_str(type), "rnd")) { ps->ps_type = avd_int_alloc(POSSET_TYPE_RND); } else if (!strcmp(avd_get_str(type), "collection")) { ps->ps_type = avd_int_alloc(POSSET_TYPE_COLLECTION); } else { filebench_log(LOG_ERROR, "posset_alloc: wrong posset type"); ipc_free(FILEBENCH_POSSET, (char *)ps); return NULL; } ps->ps_name = name; ps->ps_rnd_seed = seed; ps->ps_rnd_max = max; ps->ps_entries = entries; if (avd_get_int(ps->ps_entries) > POSSET_MAX_ENTRIES) { filebench_log(LOG_ERROR, "posset_alloc: the number of posset " "entries is too high"); ipc_free(FILEBENCH_POSSET, (char *)ps); return NULL; } /* depending on the posset type generate (or load) positions */ switch (avd_get_int(ps->ps_type)) { case(POSSET_TYPE_RND): ret = posset_rnd_fill(ps); break; case(POSSET_TYPE_COLLECTION): ret = posset_collection_fill(ps); break; default: filebench_log(LOG_ERROR, "posset_alloc: wrong posset type"); ipc_free(FILEBENCH_POSSET, (char *)ps); return NULL; } if (ret < 0) { filebench_log(LOG_ERROR, "posset_alloc: could not fill posset"); ipc_free(FILEBENCH_POSSET, (char *)ps); return NULL; } /* add posset to the global list */ (void)ipc_mutex_lock(&filebench_shm->shm_posset_lock); if (filebench_shm->shm_possetlist == NULL) { filebench_shm->shm_possetlist = ps; ps->ps_next = NULL; } else { ps->ps_next = filebench_shm->shm_possetlist; filebench_shm->shm_possetlist = ps; } (void)ipc_mutex_unlock(&filebench_shm->shm_posset_lock); return ps; }