bool ThreadObj::is_alive() { Thread::Raw t = thread(); if (TraceThreadEvents) { TTY_TRACE_CR(("ThreadObj 0x%x has Thread 0x%x, status %s", obj(), t.obj(), is_terminated() ? "TERMINATED" : "ALIVE")); } return !t().is_null() && !is_stillborn() && !is_terminated(); }
NTSTATUS thread_impl_t::verify_for_write( void *dest, size_t count ) { assert( process->is_valid() ); if (is_terminated()) return STATUS_THREAD_IS_TERMINATING; return process->vm->verify_for_write( dest, count ); }
/* * Format and send a message * Returns: false on error * true on success */ bool BSOCK::fsend(const char *fmt, ...) { va_list arg_ptr; int maxlen; if (errors || is_terminated()) { return false; } /* This probably won't work, but we vsnprintf, then if we * get a negative length or a length greater than our buffer * (depending on which library is used), the printf was truncated, so * get a bigger buffer and try again. */ for (;;) { maxlen = sizeof_pool_memory(msg) - 1; va_start(arg_ptr, fmt); msglen = bvsnprintf(msg, maxlen, fmt, arg_ptr); va_end(arg_ptr); if (msglen >= 0 && msglen < (maxlen - 5)) { break; } msg = realloc_pool_memory(msg, maxlen + maxlen / 2); } return send(); }
bool tronc::skip_to_eof() { bool ret; if(is_terminated()) throw SRC_BUG; if(limited) { ret = ref->skip(start + sz); if(ret) current = sz; else (void)ref->skip(start + current); } else { ret = ref->skip_to_eof(); if(ret) set_back_current_position(); else (void)skip(start + current); } return ret; }
bool tronc::skip(const infinint & pos) { if(is_terminated()) throw SRC_BUG; if(current == pos && check_pos) // we skip anyway when check_pos is false return true; if(limited && pos > sz) { if(ref->skip(start + sz)) current = sz; else (void)ref->skip(start + current); return false; } else { bool ret = ref->skip(start + pos); if(ret) current = pos; else (void)ref->skip(start + current); return ret; } }
bool memory_file::skip_relative(S_I x) { bool ret = false; if(is_terminated()) throw SRC_BUG; if(x < 0) { U_I tx = -x; if(position < tx) { position = 0; ret = false; } else { position -= tx; ret = true; } } else { position += x; if(position > data.size()) { position = data.size(); ret = false; } else ret = true; } return ret; }
void process(int thread_index) { (void)(thread_index); //currently unused, but maybe useful on debugging. for( ; ; ) { if(is_terminated()) { break; } task_ptr_t task = task_queue_.dequeue(); bool should_notify = false; task->run(); { task_count_lock_t lock(task_count_mutex_); --task_count_; if(is_waiting() && task_count_ == 0) { should_notify = true; } } if(should_notify) { c_task_.notify_all(); } } }
bool tronc::skippable(skippability direction, const infinint & amount) { if(is_terminated()) throw SRC_BUG; return ref->skippable(direction, amount); }
bool fichier_local::skip_to_eof() { if(is_terminated()) throw SRC_BUG; return lseek(filedesc, 0, SEEK_END) >= 0; }
NTSTATUS thread_impl_t::copy_from_user( void *dest, const void *src, size_t count ) { assert( process->is_valid() ); if (is_terminated()) return STATUS_THREAD_IS_TERMINATING; return process->vm->copy_from_user( dest, src, count ); }
void process(int thread_index) { for( ; ; ) { if(is_terminated()) { break; } task_ptr_t task = task_queue_.dequeue(); bool should_notify = false; task->run(); { task_count_lock_t lock(task_count_mutex_); --task_count_; if(is_waiting() && task_count_ == 0) { should_notify = true; } } if(should_notify) { c_task_.notify_all(); } } }
bool fichier_local::skip_relative(S_I x) { if(is_terminated()) throw SRC_BUG; if(x > 0) { if(lseek(filedesc, x, SEEK_CUR) < 0) return false; else return true; } if(x < 0) { bool ret = true; off_t actu = lseek(filedesc, 0, SEEK_CUR); if(actu < -x) { actu = 0; ret = false; } else actu += x; // x is negative if(lseek(filedesc, actu, SEEK_SET) < 0) ret = false; return ret; } return true; }
void join_threads() { assert(is_terminated()); for(auto &th: threads_) { th.join(); } }
bool memory_file::skip_to_eof() { if(is_terminated()) throw SRC_BUG; position = data.size(); return true; }
bool fiber_base::join( ptr_t const& p) { // protect against concurrent access to joining_ unique_lock< spinlock > lk( joining_mtx_); if ( is_terminated() ) return false; joining_.push_back( p); return true; }
void fichier_local::change_ownership(const std::string & user, const std::string & group) { if(is_terminated()) throw SRC_BUG; // this method cannot be inlined to avoid cyclic dependency in headers files // fichier_global.hpp would then needs tools.hpp, which need limitint.hpp which relies // back on fichier.hpp tools_set_ownership(filedesc, user, group); }
void fichier_local::change_permission(U_I perm) { if(is_terminated()) throw SRC_BUG; // this method cannot be inlined to avoid cyclic dependency in headers files // fichier_global.hpp would then needs tools.hpp, which need limitint.hpp which relies // back on fichier.hpp tools_set_permission(filedesc, perm); }
int thread_lb(struct lcore_cfg *lconf) { struct rte_mbuf *rx_mbuf[MAX_RING_BURST] __rte_cache_aligned; struct task_base *task[MAX_TASKS_PER_CORE]; uint64_t cur_tsc = rte_rdtsc(); uint64_t next_term_tsc = cur_tsc + TERM_TIMEOUT; uint64_t drain_tsc = cur_tsc + DRAIN_TIMEOUT; const uint8_t nb_tasks = lconf->nb_tasks; for (uint8_t task_id = 0; task_id < nb_tasks; ++task_id) { task[task_id] = lconf->task[task_id]; } for (;;) { cur_tsc = rte_rdtsc(); if (cur_tsc > drain_tsc) { drain_tsc = cur_tsc + DRAIN_TIMEOUT; FLUSH_STATS(lconf); /* check for termination request every timeout */ if (cur_tsc > next_term_tsc) { next_term_tsc = cur_tsc + TERM_TIMEOUT; if (is_terminated(lconf)) { break; } } for (uint8_t task_id = 0; task_id < nb_tasks; ++task_id) { if (!(task[task_id]->flags & (FLAG_TX_FLUSH | FLAG_NEVER_FLUSH))) { // Do not flush packets if we transmitted packets in last drain_timeout // This avoid flushing queue under load every x seconds task[task_id]->flags |= FLAG_TX_FLUSH; continue; } /* This part of the code is only run on low load - when we need to flush, i.e. when we did not send a bulk packets within last drain_timeout (16kpps if DRAIN_TIMEOUT=2msec). All queues are flushed in this case */ task[task_id]->flush_queues(task[task_id]); } } for (uint8_t task_id = 0; task_id < nb_tasks; ++task_id) { uint16_t nb_rx = task[task_id]->rx_pkt(rx_mbuf, task[task_id]); if (likely(nb_rx)) { INCR_NBRX(nb_rx); INCR_RX_PKT_COUNT(task[task_id]->stats, nb_rx); task[task_id]->handle_pkt_bulk(rx_mbuf, task[task_id], nb_rx); } } } return 0; }
void compressor::flush_read() { if(is_terminated()) throw SRC_BUG; if(decompr != NULL) // zlib if(decompr->wrap.decompressReset() != WR_OK) throw SRC_BUG; // keep in the buffer the bytes already read, these are discarded in case of a call to skip lzo_read_reached_eof = false; }
void pile::inherited_read_ahead(const infinint & amount) { if(is_terminated()) throw SRC_BUG; if(stack.size() > 0) { if(stack.back().ptr == NULL) throw SRC_BUG; return stack.back().ptr->read_ahead(amount); } }
void compressor::flush_write() { S_I ret; if(is_terminated()) throw SRC_BUG; if(compr != NULL && compr->wrap.get_total_in() != 0) // (z/bz)lib { // no more input compr->wrap.set_avail_in(0); do { // setting the buffer for reception of data compr->wrap.set_next_out(compr->buffer); compr->wrap.set_avail_out(compr->size); ret = compr->wrap.compress(WR_FINISH); switch(ret) { case WR_OK: case WR_STREAM_END: if(compr->wrap.get_next_out() != compr->buffer) compressed->write(compr->buffer, (char *)compr->wrap.get_next_out() - compr->buffer); break; case WR_BUF_ERROR : throw SRC_BUG; case WR_STREAM_ERROR : throw SRC_BUG; default : throw SRC_BUG; } } while(ret != WR_STREAM_END); if(compr->wrap.compressReset() != WR_OK) throw SRC_BUG; } if(lzo_write_buffer != NULL && ! lzo_write_flushed) // lzo { lzo_block_header lzo_bh; lzo_compress_buffer_and_write(); lzo_bh.type = BLOCK_HEADER_EOF; lzo_bh.size = 0; if(compressed == NULL) throw SRC_BUG; lzo_bh.dump(*compressed); lzo_write_flushed = true; } }
jint IsolateObj::status( void ) const { Task::Raw t = task(); if (t.is_null()) { if (is_terminated()) { return Task::TASK_STOPPED; } else { return Task::TASK_NEW; } } else { return t().status(); } }
infinint fichier_local::get_position() { if(is_terminated()) throw SRC_BUG; off_t ret = lseek(filedesc, 0, SEEK_CUR); if(ret == -1) throw Erange("fichier_local::get_position", string(gettext("Error getting file reading position: ")) + tools_strerror_r(errno)); return ret; }
void fichier_local::fsync() const { if(is_terminated()) throw SRC_BUG; #if HAVE_FDATASYNC S_I st = ::fdatasync(filedesc); #else S_I st = ::fsync(filedesc); #endif if(st < 0) throw Erange("fichier_local::fsync", string("Failed sync the slice (fdatasync): ") + tools_strerror_r(errno)); }
void ThreadObj::print_value_on(Stream* st) { st->print("Thread object for "); if (is_unstarted()) { tty->print("unstarted thread"); } else if (is_stillborn()) { tty->print("stillborn thread"); } else if (is_terminated()) { tty->print("terminated thread"); } else { Thread t = thread(); t.print_value_on(st); } }
NTSTATUS thread_impl_t::do_user_callback( ULONG index, ULONG &length, PVOID &buffer) { struct { ULONG x[4]; } frame; if (index == 0) die("zero index in win32 callback\n"); frame.x[0] = 0; frame.x[1] = index; frame.x[2] = ctx.Esp; frame.x[3] = 0; //callback_stack.push( &ctx, fn ); ULONG new_esp = ctx.Esp - sizeof frame; NTSTATUS r = copy_to_user( (void*) new_esp, &frame, sizeof frame ); if (r < STATUS_SUCCESS) { dprintf("%04lx: invalid stack handling exception at %08lx\n", id, ctx.Eip); terminate(r); return r; } // FIXME: limit recursion so we don't blow the stack // store the current user context callback_frame_t old_frame(this); // setup the new execution context BYTE *pKiUserCallbackDispatcher = (BYTE*)process->pntdll + get_proc_address( ntdll_section, "KiUserCallbackDispatcher" ); context_changed = 1; ctx.Eip = (ULONG) pKiUserCallbackDispatcher; ctx.Esp = new_esp; // recurse, resume user execution here dprintf("continuing execution at %08lx\n", ctx.Eip); run(); if (is_terminated()) return STATUS_THREAD_IS_TERMINATING; // fetch return values out of the frame old_frame.get_return(r, length, buffer); context_changed = 0; dprintf("callback returned %08lx\n", r); return r; }
infinint pile::get_position() { if(is_terminated()) throw SRC_BUG; if(stack.size() > 0) { if(stack.back().ptr == NULL) throw SRC_BUG; return stack.back().ptr->get_position(); } else throw Erange("pile::get_position", "Error: get_position() on empty stack"); }
void fichier_local::fadvise(advise adv) const { if(is_terminated()) throw SRC_BUG; #if HAVE_POSIX_FADVISE int ret = posix_fadvise(filedesc, 0, 0, advise_to_int(adv)); if(ret == EBADF) throw SRC_BUG; // filedesc not a valid file descriptor !?! if(ret != 0) throw Erange("fichier_local::fadvise", string("Set posix advise failed: ") + tools_strerror_r(errno)); #endif }
void pile::copy_to(generic_file & ref) { if(is_terminated()) throw SRC_BUG; if(stack.size() > 0) { if(stack.back().ptr == NULL) throw SRC_BUG; stack.back().ptr->copy_to(ref); } else throw Erange("pile::copy_to", "Error: copy_to() from empty stack"); }
void pile::copy_to(generic_file & ref, const infinint & crc_size, crc * & value) { if(is_terminated()) throw SRC_BUG; if(stack.size() > 0) { if(stack.back().ptr == NULL) throw SRC_BUG; stack.back().ptr->copy_to(ref, crc_size, value); } else throw Erange("pile::copy_to(crc)", "Error: copy_to(crc) from empty stack"); }