extern jobacctinfo_t *jobacct_gather_stat_task(pid_t pid) { if (!plugin_polling || _jobacct_shutdown_test()) return NULL; else if (pid) { struct jobacctinfo *jobacct = NULL; struct jobacctinfo *ret_jobacct = NULL; ListIterator itr = NULL; _poll_data(0); slurm_mutex_lock(&task_list_lock); if (!task_list) { error("no task list created!"); goto error; } itr = list_iterator_create(task_list); while ((jobacct = list_next(itr))) { if (jobacct->pid == pid) break; } list_iterator_destroy(itr); if (jobacct == NULL) goto error; ret_jobacct = xmalloc(sizeof(struct jobacctinfo)); memcpy(ret_jobacct, jobacct, sizeof(struct jobacctinfo)); error: slurm_mutex_unlock(&task_list_lock); return ret_jobacct; } else { /* In this situation, we are just trying to get a * basis of information since we are not pollng. So * we will give a chance for processes to spawn before we * gather information. This should largely eliminate the * the chance of having /proc open when the tasks are * spawned, which would prevent a valid checkpoint/restart * with some systems */ _task_sleep(1); _poll_data(0); return NULL; } }
extern int jobacct_gather_add_task(pid_t pid, jobacct_id_t *jobacct_id, int poll) { struct jobacctinfo *jobacct; if (jobacct_gather_init() < 0) return SLURM_ERROR; if (!plugin_polling) return SLURM_SUCCESS; if (_jobacct_shutdown_test()) return SLURM_ERROR; jobacct = jobacctinfo_create(jobacct_id); slurm_mutex_lock(&task_list_lock); if (pid <= 0) { error("invalid pid given (%d) for task acct", pid); goto error; } else if (!task_list) { error("no task list created!"); goto error; } jobacct->pid = pid; memcpy(&jobacct->id, jobacct_id, sizeof(jobacct_id_t)); jobacct->min_cpu = 0; debug2("adding task %u pid %d on node %u to jobacct", jobacct_id->taskid, pid, jobacct_id->nodeid); list_push(task_list, jobacct); slurm_mutex_unlock(&task_list_lock); (*(ops.add_task))(pid, jobacct_id); if (poll == 1) _poll_data(1); return SLURM_SUCCESS; error: slurm_mutex_unlock(&task_list_lock); jobacctinfo_destroy(jobacct); return SLURM_ERROR; }
static void *_watch_tasks(void *arg) { int type = PROFILE_TASK; /* Give chance for processes to spawn before starting * the polling. This should largely eliminate the * the chance of having /proc open when the tasks are * spawned, which would prevent a valid checkpoint/restart * with some systems */ _task_sleep(1); while (!jobacct_shutdown && acct_gather_profile_running) { /* Do this until shutdown is requested */ _poll_data(); slurm_mutex_lock(&acct_gather_profile_timer[type].notify_mutex); pthread_cond_wait( &acct_gather_profile_timer[type].notify, &acct_gather_profile_timer[type].notify_mutex); slurm_mutex_unlock(&acct_gather_profile_timer[type]. notify_mutex); } return NULL; }
extern jobacctinfo_t *jobacct_gather_remove_task(pid_t pid) { struct jobacctinfo *jobacct = NULL; ListIterator itr = NULL; if (!plugin_polling) return NULL; /* poll data one last time before removing task * mainly for updating energy consumption */ _poll_data(1); if (_jobacct_shutdown_test()) return NULL; slurm_mutex_lock(&task_list_lock); if (!task_list) { error("no task list created!"); goto error; } itr = list_iterator_create(task_list); while((jobacct = list_next(itr))) { if (jobacct->pid == pid) { list_remove(itr); break; } } list_iterator_destroy(itr); if (jobacct) { debug2("removing task %u pid %d from jobacct", jobacct->max_vsize_id.taskid, jobacct->pid); } else { debug2("pid(%d) not being watched in jobacct!", pid); } error: slurm_mutex_unlock(&task_list_lock); return jobacct; }
static void *_watch_tasks(void *arg) { int type = PROFILE_TASK; #if HAVE_SYS_PRCTL_H if (prctl(PR_SET_NAME, "acctg", NULL, NULL, NULL) < 0) { error("%s: cannot set my name to %s %m", __func__, "acctg"); } #endif (void) pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); (void) pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); /* Give chance for processes to spawn before starting * the polling. This should largely eliminate the * the chance of having /proc open when the tasks are * spawned, which would prevent a valid checkpoint/restart * with some systems */ _task_sleep(1); while (_init_run_test() && !_jobacct_shutdown_test() && acct_gather_profile_test()) { /* Do this until shutdown is requested */ slurm_mutex_lock(&acct_gather_profile_timer[type].notify_mutex); slurm_cond_wait( &acct_gather_profile_timer[type].notify, &acct_gather_profile_timer[type].notify_mutex); slurm_mutex_unlock(&acct_gather_profile_timer[type]. notify_mutex); slurm_mutex_lock(&g_context_lock); /* The initial poll is done after the last task is added */ _poll_data(1); slurm_mutex_unlock(&g_context_lock); } return NULL; }
// return true if a sample is available bool AP_InertialSensor_L3GD20::_sample_available() { _poll_data(); // return (_sum_count >> _sample_shift) > 0; return (_sum_count) > 0; }
// return true if a sample is available bool AP_InertialSensor_LSM9DS0::_sample_available() { _poll_data(); return (_sum_count_g >> _sample_shift || _sum_count_xm >> _sample_shift) > 0; }
// return true if a sample is available bool AP_InertialSensor_MPU6000::sample_available() { _poll_data(); return (_count >> _sample_shift) > 0; }