/* ChapelBase.chpl:831 */ static void _waitEndCount(chpl___EndCount e, int64_t _ln, c_string _fn) { memory_order local_memory_order_acquire; memory_order local_memory_order_relaxed; chpl_task_list_p ret; _ref_atomic_int64 call_tmp = NULL; chpl_bool T; _ref_atomic_int_least64_t call_tmp2 = NULL; int64_t call_tmp3; chpl_bool call_tmp4; _ref_atomic_int_least64_t call_tmp5 = NULL; int64_t call_tmp6; chpl_bool call_tmp7; int64_t ret2; locale call_tmp8 = NULL; int32_t call_tmp9; chpl_localeID_t call_tmp10; _ref_chpl_localeID_t ret_to_arg_ref_tmp_ = NULL; chpl_localeID_t call_tmp11; locale call_tmp12 = NULL; _ref_atomic_int64 call_tmp13 = NULL; _ref_atomic_int_least64_t call_tmp14 = NULL; chpl_task_list_p ret3; local_memory_order_acquire = memory_order_acquire; local_memory_order_relaxed = memory_order_relaxed; ret = (e)->taskList; chpl_taskListExecute(ret, _ln, _fn); call_tmp = &((e)->i); call_tmp2 = &((call_tmp)->_v); call_tmp3 = atomic_load_explicit_int_least64_t(call_tmp2, local_memory_order_relaxed); call_tmp4 = (call_tmp3 != INT64(0)); T = call_tmp4; while (T) { chpl_task_yield(); call_tmp5 = &((call_tmp)->_v); call_tmp6 = atomic_load_explicit_int_least64_t(call_tmp5, local_memory_order_relaxed); call_tmp7 = (call_tmp6 != INT64(0)); T = call_tmp7; } atomic_thread_fence(local_memory_order_acquire); ret2 = (e)->taskCnt; call_tmp9 = chpl_task_getRequestedSubloc(); ret_to_arg_ref_tmp_ = &call_tmp10; chpl_buildLocaleID(chpl_nodeID, call_tmp9, ret_to_arg_ref_tmp_, _ln, _fn); call_tmp11 = chpl__initCopy_chpl_rt_localeID_t(call_tmp10); call_tmp12 = chpl_localeID_to_locale(&call_tmp11, _ln, _fn); call_tmp8 = call_tmp12; call_tmp13 = &((call_tmp8)->runningTaskCounter); call_tmp14 = &((call_tmp13)->_v); atomic_fetch_sub_explicit_int_least64_t(call_tmp14, ret2, local_memory_order_relaxed); ret3 = (e)->taskList; chpl_taskListFree(ret3, _ln, _fn); return; }
// waitpid qioerr qio_waitpid(int64_t pid, int blocking, int* done, int* exitcode) { int status = 0; int flags = 0; pid_t got; flags |= WNOHANG; do { got = waitpid((pid_t) pid, &status, flags); if ( got == -1 && errno == EINTR ) { // Try again is a non-blocking wait was interrupted got = 0; } if ( ! blocking ) { break; } chpl_task_yield(); } while (got == 0); // Check for error if( got == -1 ) { return qio_int_to_err(errno); } // Only update (done, exitcode) if waitpid() returned for the desired pid else if ( got == pid ) { if( WIFEXITED(status) ) { *exitcode = WEXITSTATUS(status); *done = 1; } else if( WIFSIGNALED(status) ) { *exitcode = -WTERMSIG(status); *done = 1; } } return 0; }
void chpl_task_sleep(double secs) { struct timeval deadline; struct timeval now; // // Figure out when this task can proceed again, and until then, keep // yielding. // gettimeofday(&deadline, NULL); deadline.tv_usec += (suseconds_t) lround((secs - trunc(secs)) * 1.0e6); if (deadline.tv_usec > 1000000) { deadline.tv_sec++; deadline.tv_usec -= 1000000; } deadline.tv_sec += (time_t) trunc(secs); do { chpl_task_yield(); gettimeofday(&now, NULL); } while (now.tv_sec < deadline.tv_sec || (now.tv_sec == deadline.tv_sec && now.tv_usec < deadline.tv_usec)); }
/* ChapelDistribution.chpl:286 */ static int64_t destroyArr(BaseArr this8, int64_t _ln, c_string _fn) { memory_order local_memory_order_seq_cst; int64_t cnt; _ref_atomic_refcnt call_tmp = NULL; _ref_atomic_int64 call_tmp2 = NULL; memory_order default_argorder; _ref_atomic_int_least64_t call_tmp3 = NULL; int64_t call_tmp4; int64_t call_tmp5; chpl_bool call_tmp6; chpl_bool call_tmp7; BaseArr ret = NULL; object call_tmp8 = NULL; chpl_bool call_tmp9; BaseArr ret2 = NULL; int64_t call_tmp10; chpl_bool call_tmp11; BaseArr ret3 = NULL; int32_t _virtual_method_tmp_; chpl_opaque call_tmp12; int32_t _virtual_method_tmp_2; chpl_bool call_tmp13; BaseDom dom = NULL; BaseDom call_tmp14 = NULL; int32_t _virtual_method_tmp_3; chpl_bool T; _ref_atomicflag call_tmp15 = NULL; memory_order default_argorder2; _ref_atomic_flag call_tmp16 = NULL; chpl_bool call_tmp17; _ref_atomicflag call_tmp18 = NULL; memory_order default_argorder3; _ref_atomic_flag call_tmp19 = NULL; chpl_bool call_tmp20; _ref_list_BaseArr call_tmp21 = NULL; _ref_atomicflag call_tmp22 = NULL; memory_order default_argorder4; _ref_atomic_flag call_tmp23 = NULL; int64_t call_tmp24; chpl_bool call_tmp25; int32_t _virtual_method_tmp_4; chpl_opaque call_tmp26; local_memory_order_seq_cst = memory_order_seq_cst; compilerAssert(); compilerAssert(); call_tmp = &((this8)->_arrCnt); call_tmp2 = &((call_tmp)->_cnt); default_argorder = local_memory_order_seq_cst; call_tmp3 = &((call_tmp2)->_v); call_tmp4 = atomic_fetch_sub_explicit_int_least64_t(call_tmp3, INT64(1), default_argorder); call_tmp5 = (call_tmp4 - INT64(1)); call_tmp6 = (call_tmp5 < INT64(0)); if (call_tmp6) { halt("array reference count is negative!", _ln, _fn); } cnt = call_tmp5; call_tmp7 = (call_tmp5 == INT64(0)); if (call_tmp7) { ret = (this8)->_arrAlias; call_tmp8 = ((object)(ret)); call_tmp9 = (call_tmp8 != nil); if (call_tmp9) { ret2 = (this8)->_arrAlias; call_tmp10 = destroyArr(ret2, _ln, _fn); call_tmp11 = (call_tmp10 == INT64(0)); if (call_tmp11) { ret3 = (this8)->_arrAlias; _virtual_method_tmp_ = ((object)(ret3))->chpl__cid; ((void(*)(BaseArr, int64_t, c_string))chpl_vmtable[((INT64(8) * _virtual_method_tmp_) + INT64(0))])(ret3, _ln, _fn); call_tmp12 = ((void*)(ret3)); chpl_here_free(call_tmp12, _ln, _fn); } } else { _virtual_method_tmp_2 = ((object)(this8))->chpl__cid; ((void(*)(BaseArr, int64_t, c_string))chpl_vmtable[((INT64(8) * _virtual_method_tmp_2) + INT64(5))])(this8, _ln, _fn); } } call_tmp13 = (call_tmp5 == INT64(0)); if (call_tmp13) { _virtual_method_tmp_3 = ((object)(this8))->chpl__cid; call_tmp14 = ((BaseDom(*)(BaseArr, int64_t, c_string))chpl_vmtable[((INT64(8) * _virtual_method_tmp_3) + INT64(6))])(this8, _ln, _fn); dom = call_tmp14; call_tmp15 = &((dom)->_arrsLock); default_argorder2 = local_memory_order_seq_cst; call_tmp16 = &((call_tmp15)->_v); call_tmp17 = atomic_flag_test_and_set_explicit(call_tmp16, default_argorder2); T = call_tmp17; while (T) { chpl_task_yield(); call_tmp18 = &((dom)->_arrsLock); default_argorder3 = local_memory_order_seq_cst; call_tmp19 = &((call_tmp18)->_v); call_tmp20 = atomic_flag_test_and_set_explicit(call_tmp19, default_argorder3); T = call_tmp20; } call_tmp21 = &((dom)->_arrs); remove4(call_tmp21, this8, _ln, _fn); call_tmp22 = &((dom)->_arrsLock); default_argorder4 = local_memory_order_seq_cst; call_tmp23 = &((call_tmp22)->_v); atomic_flag_clear_explicit(call_tmp23, default_argorder4); call_tmp24 = destroyDom(dom, _ln, _fn); call_tmp25 = (call_tmp24 == INT64(0)); if (call_tmp25) { _virtual_method_tmp_4 = ((object)(dom))->chpl__cid; ((void(*)(BaseDom, int64_t, c_string))chpl_vmtable[((INT64(8) * _virtual_method_tmp_4) + INT64(0))])(dom, _ln, _fn); call_tmp26 = ((void*)(dom)); chpl_here_free(call_tmp26, _ln, _fn); } } return cnt; }
/* ChapelDistribution.chpl:133 */ static int64_t destroyDom(BaseDom this8, int64_t _ln, c_string _fn) { memory_order local_memory_order_seq_cst; int64_t cnt; _ref_atomic_refcnt call_tmp = NULL; _ref_atomic_int64 call_tmp2 = NULL; memory_order default_argorder; _ref_atomic_int_least64_t call_tmp3 = NULL; int64_t call_tmp4; int64_t call_tmp5; chpl_bool call_tmp6; chpl_bool call_tmp7; chpl_bool T; chpl_bool call_tmp8; int32_t _virtual_method_tmp_; BaseDist dist2 = NULL; BaseDist call_tmp9 = NULL; chpl_bool T2; _ref_atomicflag call_tmp10 = NULL; memory_order default_argorder2; _ref_atomic_flag call_tmp11 = NULL; chpl_bool call_tmp12; _ref_atomicflag call_tmp13 = NULL; memory_order default_argorder3; _ref_atomic_flag call_tmp14 = NULL; chpl_bool call_tmp15; _ref_list_BaseDom call_tmp16 = NULL; _ref_atomicflag call_tmp17 = NULL; memory_order default_argorder4; _ref_atomic_flag call_tmp18 = NULL; int64_t call_tmp19; chpl_bool call_tmp20; int32_t _virtual_method_tmp_2; chpl_opaque call_tmp21; local_memory_order_seq_cst = memory_order_seq_cst; compilerAssert(); compilerAssert(); call_tmp = &((this8)->_domCnt); call_tmp2 = &((call_tmp)->_cnt); default_argorder = local_memory_order_seq_cst; call_tmp3 = &((call_tmp2)->_v); call_tmp4 = atomic_fetch_sub_explicit_int_least64_t(call_tmp3, INT64(1), default_argorder); call_tmp5 = (call_tmp4 - INT64(1)); call_tmp6 = (call_tmp5 < INT64(0)); if (call_tmp6) { halt("domain reference count is negative!", _ln, _fn); } cnt = call_tmp5; call_tmp7 = (call_tmp5 == INT64(0)); if (call_tmp7) { _virtual_method_tmp_ = ((object)(this8))->chpl__cid; call_tmp8 = ((chpl_bool(*)(BaseDom))chpl_vmtable[((INT64(8) * _virtual_method_tmp_) + INT64(1))])(this8); T = call_tmp8; } else { T = false; } if (T) { call_tmp9 = dsiMyDist(this8, _ln, _fn); dist2 = call_tmp9; call_tmp10 = &((dist2)->_domsLock); default_argorder2 = local_memory_order_seq_cst; call_tmp11 = &((call_tmp10)->_v); call_tmp12 = atomic_flag_test_and_set_explicit(call_tmp11, default_argorder2); T2 = call_tmp12; while (T2) { chpl_task_yield(); call_tmp13 = &((dist2)->_domsLock); default_argorder3 = local_memory_order_seq_cst; call_tmp14 = &((call_tmp13)->_v); call_tmp15 = atomic_flag_test_and_set_explicit(call_tmp14, default_argorder3); T2 = call_tmp15; } call_tmp16 = &((dist2)->_doms); remove3(call_tmp16, this8, _ln, _fn); call_tmp17 = &((dist2)->_domsLock); default_argorder4 = local_memory_order_seq_cst; call_tmp18 = &((call_tmp17)->_v); atomic_flag_clear_explicit(call_tmp18, default_argorder4); call_tmp19 = destroyDist(dist2, _ln, _fn); call_tmp20 = (call_tmp19 == INT64(0)); if (call_tmp20) { _virtual_method_tmp_2 = ((object)(dist2))->chpl__cid; ((void(*)(BaseDist, int64_t, c_string))chpl_vmtable[((INT64(8) * _virtual_method_tmp_2) + INT64(0))])(dist2, _ln, _fn); call_tmp21 = ((void*)(dist2)); chpl_here_free(call_tmp21, _ln, _fn); } } return cnt; }
// commit input, sending any data to the subprocess. // once input is sent, close input channel and file. // While sending that data, read output and error channels, // buffering up the read data. // once output/error reads EOF, close output/error file (not channel // since output channel has the buffered data). qioerr qio_proc_communicate( const int threadsafe, qio_channel_t* input, qio_channel_t* output, qio_channel_t* error) { qioerr err = 0; int rc = 0; bool do_input; bool do_output; bool do_error; bool input_ready; bool output_ready; bool error_ready; fd_set rfds, wfds, efds; int nfds = 1; int input_fd = -1; int output_fd = -1; int error_fd = -1; if( threadsafe ) { // lock all three channels. // but unlock them immediately and set them to NULL // if they are already closed. if( input ) { err = qio_lock(&input->lock); if( err ) return err; if( qio_channel_isclosed(false, input) ) { qio_unlock(&input->lock); input = NULL; } } if( output ) { err = qio_lock(&output->lock); if( err ) { if( input ) qio_unlock(&input->lock); return err; } if( qio_channel_isclosed(false, output) ) { qio_unlock(&output->lock); output = NULL; } } if( error ) { err = qio_lock(&error->lock); if( err ) { if( input ) qio_unlock(&input->lock); if( output ) qio_unlock(&output->lock); return err; } if( qio_channel_isclosed(false, error) ) { qio_unlock(&error->lock); error = NULL; } } } if( input ) { input_fd = input->file->fd; if( nfds <= input_fd ) nfds = input_fd + 1; } if( output ) { output_fd = output->file->fd; if( nfds <= output_fd ) nfds = output_fd + 1; } if( error ) { error_fd = error->file->fd; if( nfds <= error_fd ) nfds = error_fd + 1; } // Adjust all three pipes to be non-blocking. if( input_fd != -1 ) { rc = fcntl(input_fd, F_SETFL, O_NONBLOCK); if( rc == -1 ) { err = qio_int_to_err(errno); } } if( output_fd != -1 ) { rc = fcntl(output_fd, F_SETFL, O_NONBLOCK); if( rc == -1 ) { err = qio_int_to_err(errno); } } if( error_fd != -1 ) { rc = fcntl(error_fd, F_SETFL, O_NONBLOCK); if( rc == -1 ) { err = qio_int_to_err(errno); } } // mark the output and error channels so that we // can just keep advancing to read while buffering // up all data. Before returning, we'll revert them // so that this buffered data can be read again and // update end_pos so that the channel knows not to // call read again. Then, we can close the file // descriptor. if( output ) { qio_channel_mark(false, output); } if( error ) { qio_channel_mark(false, error); } do_input = (input != NULL); do_output = (output != NULL); do_error = (error != NULL); while( do_input || do_output || do_error ) { // Now call select to wait for one of the descriptors to // become ready. FD_ZERO(&rfds); FD_ZERO(&wfds); FD_ZERO(&efds); if( do_input && input_fd != -1 ) { FD_SET(input_fd, &wfds); FD_SET(input_fd, &efds); } if( do_output && output_fd != -1 ) { FD_SET(output_fd, &rfds); FD_SET(output_fd, &efds); } if( do_error && error_fd != -1 ) { FD_SET(error_fd, &rfds); FD_SET(error_fd, &efds); } input_ready = false; output_ready = false; error_ready = false; // Run select to wait for something if( do_input || do_output || do_error ) { // TODO -- use sys_select so threading can interact struct timeval t; t.tv_sec = 0; t.tv_usec = 10; rc = select(nfds, &rfds, &wfds, &efds, &t); if (rc > 0) { // read ready file descriptors input_ready = input_fd != -1 && FD_ISSET(input_fd, &wfds); output_ready = output_fd != -1 && FD_ISSET(output_fd, &rfds); error_ready = error_fd != -1 && FD_ISSET(error_fd, &rfds); } // Ignore EAGAIN and EINTR if (rc == EAGAIN || rc == EINTR) rc = 0; } if( rc == -1 ) { err = qio_int_to_err(errno); break; } if( do_input && input_ready ) { err = _qio_channel_flush_qio_unlocked(input); if( !err ) { do_input = false; // Close input channel. err = qio_channel_close(false, input); } if( qio_err_to_int(err) == EAGAIN ) err = 0; if( err ) break; } if( do_output && output_ready ) { // read some into our buffer. err = qio_channel_advance(false, output, qbytes_iobuf_size); if( qio_err_to_int(err) == EEOF ) { qio_file_t* output_file = qio_channel_get_file(output); do_output = false; // close the output file (not channel), in case closing output // causes the program to output on stderr, e.g. if( output_file ) err = qio_file_close(output_file); // Set the output channel maximum position // This prevents a read on output from trying to get // more data from the (now closed) file. output->end_pos = qio_channel_offset_unlocked(output); } if( qio_err_to_int(err) == EAGAIN ) err = 0; if( err ) break; } if( do_error && error_ready ) { // read some into our buffer. err = qio_channel_advance(false, error, qbytes_iobuf_size); if( qio_err_to_int(err) == EEOF ) { qio_file_t* error_file = qio_channel_get_file(error); do_error = false; // close the error file (not channel) if( error_file ) err = qio_file_close(error_file); // Set the error channel maximum position error->end_pos = qio_channel_offset_unlocked(error); } if( qio_err_to_int(err) == EAGAIN ) err = 0; if( err ) break; } chpl_task_yield(); } // we could close the file descriptors at this point, // but we don't because we don't want to modify // the file descriptor of the file since it's // constant (and not protected by file's lock). // The pipes will be closed when the channels are destroyed. // We marked the output and error channels so that we // can just keep advancing to read while buffering // up all data. Before returning, we'll revert them // so that this buffered data can be read again and // update end_pos so that the channel knows not to // call read again. Then, we can close the file // descriptor. // revert the output and error channels if( output) qio_channel_revert_unlocked(output); if( error ) qio_channel_revert_unlocked(error); if( threadsafe ) { // unlock all three channels. if( error ) qio_unlock(&error->lock); if( output ) qio_unlock(&output->lock); if( input ) qio_unlock(&input->lock); } return err; }