void IOUtil::createSocketPair(int pair[2]) throw (IOException&) { #if defined(WIN32) Socket connectSocket; Socket serverClientSocket; ServerSocket listenSocket; SocketAddress loopback = SocketAddress::loopback(AF_INET, 0); listenSocket.listen(loopback, 5, false); connectSocket.connect(listenSocket.getLocalAddress(), true); if (!listenSocket.accept(serverClientSocket)) { THROW2(IOException, "Can't accept for socket pair"); } pair[0] = connectSocket.detach(); pair[1] = serverClientSocket.detach(); #else if (0 != ::socketpair(AF_LOCAL, SOCK_STREAM, 0, pair)) { THROW2(IOException, "Can't create socket pair"); } #endif }
void SocketAddress::set(const char* hostname, int family, uint16_t port) throw (UnknownHostException) { int addrLen = 0; MX_ASSERT(NULL != hostname); if (AF_UNSPEC == family) { if (!getAddrInfo(hostname, AF_INET6, &data_, &addrLen) && !getAddrInfo( hostname, AF_INET, &data_, &addrLen)) { THROW2(UnknownHostException, std::string("Unknown hostname: [") + hostname + "]"); } } else { if (!getAddrInfo(hostname, family, &data_, &addrLen)) { THROW2(UnknownHostException, std::string("Unknown hostname: [") + hostname + "]"); } } setPort(port); }
bool CScriptGameObject::accessible_vertex_id(u32 level_vertex_id) { CCustomMonster *monster = smart_cast<CCustomMonster*>(&object()); if (!monster) { ai().script_engine().script_log (ScriptStorage::eLuaMessageTypeError,"CRestrictedObject : cannot access class member accessible!"); return (false); } THROW2 (ai().level_graph().valid_vertex_id(level_vertex_id),"Cannot check if level vertex id is accessible, because it is invalid"); return (monster->movement().restrictions().accessible(level_vertex_id)); }
void CScriptGameObject::set_desired_position (const Fvector *desired_position) { CAI_Stalker *stalker = smart_cast<CAI_Stalker*>(&object()); if (!stalker) ai().script_engine().script_log (ScriptStorage::eLuaMessageTypeError,"CAI_Stalker : cannot access class member movement!"); else { THROW2 (desired_position || stalker->movement().restrictions().accessible(*desired_position),*stalker->cName()); stalker->movement().set_desired_position (desired_position); } }
void page::fwrite_full_page(FILE *file) { size_t write_count = ::fwrite(this, 1, page_size(), file); if ( write_count != page_size() ) { TRACE(TRACE_ALWAYS, "::fwrite() wrote %zd/%zd page bytes %s\n", write_count, page_size(), strerror(errno)); THROW2(FileException, "::fwrite() failed %s", strerror(errno)); } }
bool page::read_full_page(int fd) { /* create an aligned array of bytes we can read into */ void* aligned_base; guard<char> ptr = (char*)aligned_alloc(page_size(), 512, &aligned_base); assert(ptr != NULL); /* read bytes over 'this' */ /* read system call may return short counts */ ssize_t size_read = rio_readn(fd, aligned_base, page_size()); /* check for error */ if (size_read == -1) THROW2(FileException, "::read failed %s", strerror(errno)); /* check for end of file */ if (size_read == 0) return false; /* rio_readn ensures we read the proper number of bytes */ /* save page attributes that we'll be overwriting */ page_pool* pool = _pool; memcpy(this, aligned_base, size_read); _pool = pool; /* more error checking */ if ( (page_size() != (size_t)size_read) ) { /* The page we read does not have the same size as the page object we overwrote. Luckily, we used the object size when reading, so we didn't overflow our internal buffer. */ TRACE(TRACE_ALWAYS, "Read %zd byte-page with internal page size of %zd bytes. " "Sizes should all match.\n", size_read, page_size()); THROW1(FileException, "::read read wrong size page"); } return true; }
/** * @brief Return the specified CPU from the cpu_set_t * instance. * * @param cpu_set The CPU set. * * @param index The index of the CPU. Must be between 0 and the * result of cpu_set_get_num_cpus(cpu_set). * * @return NULL on error. On success, returns the specified CPU. */ cpu_t cpu_set_get_cpu(cpu_set_p cpu_set, int index) { /* error checks */ if ( cpu_set == NULL ) THROW1(QPipeException, "Called with NULL cpu_set_t"); if ( index < 0 ) THROW2(OutOfRange, "Called with negative index %d\n", index); if ( index >= cpu_set->cpuset_num_cpus ) THROW3(OutOfRange, "Called with index %d in a cpu_set_t with %d CPUs\n", index, cpu_set->cpuset_num_cpus); return &cpu_set->cpuset_cpus[index]; }
void CScriptGameObject::set_dest_level_vertex_id(u32 level_vertex_id) { CAI_Stalker *stalker = smart_cast<CAI_Stalker*>(&object()); if (!stalker) ai().script_engine().script_log (ScriptStorage::eLuaMessageTypeError,"CAI_Stalker : cannot access class member set_dest_level_vertex_id!"); else { if (!ai().level_graph().valid_vertex_id(level_vertex_id)) { #ifdef DEBUG ai().script_engine().script_log (ScriptStorage::eLuaMessageTypeError,"CAI_Stalker : invalid vertex id being setup by action %s!",stalker->brain().CStalkerPlanner::current_action().m_action_name); #endif return; } THROW2 (stalker->movement().restrictions().accessible(level_vertex_id),*stalker->cName()); stalker->movement().set_level_dest_vertex (level_vertex_id); } }
uint64_t MysqlStatement::executeUpdate(const std::string& sql) throw (SqlException&) { if (0 != ::mysql_real_query(connection_->getConnection(), sql.c_str(), sql.size())) { THROW3(SqlException, ::mysql_error(connection_->getConnection()), ::mysql_errno(connection_->getConnection())); } if (::mysql_field_count(connection_->getConnection())) { THROW2(SqlException, "Run update but statement return result set"); } lastUpdateCount_ = ::mysql_affected_rows(connection_->getConnection()); return lastUpdateCount_; }
void HttpFileDownLoadServlet::addFilters(std::list<pcre*>& li, const std::vector<std::string>& params) throw (mxcore::Exception&) { for (std::vector<std::string>::const_iterator it = params.begin(); it != params.end(); it++) { if (!(*it).empty()) { pcre* re = mxhttp::PcreUtil::open(*it); if (NULL == re) { THROW2(mxcore::Exception , std::string("Can't compile pattern: [") + *it + "]"); } li.push_back(re); } } }
void IOUtil::finishConnect(int sock, int timeoutMs) throw (IOException&) { int err; fd_set w; struct timeval tv; MX_ASSERT(-1 != sock); FD_ZERO(&w); FD_SET(sock, &w); tv.tv_sec = timeoutMs / 1000; tv.tv_usec = (timeoutMs % 1000) * 1000; err = ::select(sock + 1, NULL, &w, NULL, &tv); if (err <= 0 || !FD_ISSET(sock, &w)) { THROW2(IOException, "Can't finish connect"); } }
void IOServiceLibEvent::open(void) throw (mxcore::IOException&) { close(); evbase_ = ::event_base_new(); if (NULL == evbase_) { THROW2(mxcore::IOException, "Can't open the event base"); } try { wakeuper_.open(evbase_); } catch (mxcore::IOException& e) { ::event_base_free(evbase_); evbase_ = NULL; throw e; } }
void page::write_full_page(int fd) { /* create an aligned copy of ourselves */ void* aligned_base; guard<char> ptr = (char*)aligned_alloc(page_size(), 512, &aligned_base); assert(ptr != NULL); memcpy(aligned_base, this, page_size()); ssize_t write_count = rio_writen(fd, aligned_base, page_size()); if ((size_t)write_count != page_size()) { TRACE(TRACE_ALWAYS, "::write(%d, %p, %zd) returned %zd: %s\n", fd, aligned_base, page_size(), write_count, strerror(errno)); THROW2(FileException, "::write() failed %s", strerror(errno)); } }
bool page::fread_full_page(FILE* file) { // save page attributes that we'll be overwriting size_t size = page_size(); TRACE(0&TRACE_ALWAYS, "Computed page size as %d\n", (int)size); page_pool* pool = _pool; // write over this page size_t size_read = ::fread(this, 1, size, file); _pool = pool; // Check for error if ( (size_read == 0) && !feof(file) ) THROW2(FileException, "::fread failed %s", strerror(errno)); // We expect to read either a whole page or no data at // all. Anything else is an error. if ( (size_read == 0) && feof(file) ) // done with file return false; // check sizes match if ( (size != size_read) || (size != page_size()) ) { // The page we read does not have the same size as the // page object we overwrote. Luckily, we used the object // size when reading, so we didn't overflow our internal // buffer. TRACE(TRACE_ALWAYS, "Read %zd byte-page with internal page size of %zd bytes into a buffer of %zd bytes. " "Sizes should all match.\n", size_read, page_size(), size); THROW1(FileException, "::fread read wrong size page"); } return true; }
void CScriptGameObject::Hit(CScriptHit *tpLuaHit) { CScriptHit &tLuaHit = *tpLuaHit; NET_Packet P; SHit HS; HS.GenHeader(GE_HIT,object().ID()); // object().u_EventGen(P,GE_HIT,object().ID()); THROW2 (tLuaHit.m_tpDraftsman,"Where is hit initiator??!"); // THROW2 (tLuaHit.m_tpDraftsman,"Where is hit initiator??!"); HS.whoID = u16(tLuaHit.m_tpDraftsman->ID()); // P.w_u16 (u16(tLuaHit.m_tpDraftsman->ID())); HS.weaponID = 0; // P.w_u16 (0); HS.dir = tLuaHit.m_tDirection; // P.w_dir (tLuaHit.m_tDirection); HS.power = tLuaHit.m_fPower; // P.w_float (tLuaHit.m_fPower); CKinematics *V = smart_cast<CKinematics*>(object().Visual()); // CKinematics *V = smart_cast<CKinematics*>(object().Visual()); VERIFY (V); // VERIFY (V); if (xr_strlen (tLuaHit.m_caBoneName)) // if (xr_strlen (tLuaHit.m_caBoneName)) HS.boneID = (V->LL_BoneID(tLuaHit.m_caBoneName)); // P.w_s16 (V->LL_BoneID(tLuaHit.m_caBoneName)); else // else HS.boneID = (s16(0)); // P.w_s16 (s16(0)); HS.p_in_bone_space = Fvector().set(0,0,0); // P.w_vec3 (Fvector().set(0,0,0)); HS.impulse = tLuaHit.m_fImpulse; // P.w_float (tLuaHit.m_fImpulse); HS.hit_type = (ALife::EHitType)(tLuaHit.m_tHitType); // P.w_u16 (u16(tLuaHit.m_tHitType)); HS.Write_Packet(P); object().u_EventSend(P); }
SqlResultSet* MysqlStatement::getResultSet(void) throw (SqlException&) { MYSQL_RES* result = NULL; SqlResultSet::enum_type tmp_type = SqlResultSet::TYPE_FORWARD_ONLY; switch (resultType_) { case SqlResultSet::TYPE_FORWARD_ONLY: result = ::mysql_use_result(connection_->getConnection()); break; default: tmp_type = SqlResultSet::TYPE_SCROLL_INSENSITIVE; result = ::mysql_store_result(connection_->getConnection()); break; } if (NULL == result) { THROW2(SqlException, "No result to return"); } return new MysqlResultSet(result, tmp_type); }
bool CScriptEntity::bfAssignMovement(CScriptEntityAction *tpEntityAction) { CScriptMovementAction &l_tMovementAction = tpEntityAction->m_tMovementAction; if (l_tMovementAction.m_bCompleted) return (false); CEntityAlive *entity_alive = smart_cast<CEntityAlive*>(this); if (entity_alive && !entity_alive->g_Alive()) { l_tMovementAction.m_bCompleted = true; return (false); } if (!m_monster) { ai().script_engine().script_log(eLuaMessageTypeError,"Cannot assign a movement action not to a monster!"); return (true); } switch (l_tMovementAction.m_tGoalType) { case CScriptMovementAction::eGoalTypeObject : { CGameObject *l_tpGameObject = smart_cast<CGameObject*>(l_tMovementAction.m_tpObjectToGo); #ifdef DEBUG THROW2 (l_tpGameObject,"eGoalTypeObject specified, but no object passed!"); #else R_ASSERT(l_tpGameObject); #endif m_monster->movement().set_path_type(MovementManager::ePathTypeLevelPath); // Msg ("%6d Object %s, position [%f][%f][%f]",Device.dwTimeGlobal,*l_tpGameObject->cName(),VPUSH(l_tpGameObject->Position())); m_monster->movement().detail().set_dest_position(l_tpGameObject->Position()); m_monster->movement().set_level_dest_vertex(l_tpGameObject->ai_location().level_vertex_id()); break; } case CScriptMovementAction::eGoalTypePatrolPath : { m_monster->movement().set_path_type (MovementManager::ePathTypePatrolPath); m_monster->movement().patrol().set_path (l_tMovementAction.m_path,l_tMovementAction.m_path_name); m_monster->movement().patrol().set_start_type (l_tMovementAction.m_tPatrolPathStart); m_monster->movement().patrol().set_route_type (l_tMovementAction.m_tPatrolPathStop); m_monster->movement().patrol().set_random (l_tMovementAction.m_bRandom); if (l_tMovementAction.m_previous_patrol_point != u32(-1)) { m_monster->movement().patrol().set_previous_point(l_tMovementAction.m_previous_patrol_point); } break; } case CScriptMovementAction::eGoalTypePathPosition : { m_monster->movement().set_path_type(MovementManager::ePathTypeLevelPath); m_monster->movement().detail().set_dest_position(l_tMovementAction.m_tDestinationPosition); u32 vertex_id; vertex_id = ai().level_graph().vertex(object().ai_location().level_vertex_id(),l_tMovementAction.m_tDestinationPosition); if (!ai().level_graph().valid_vertex_id(vertex_id)) vertex_id = ai().level_graph().check_position_in_direction(object().ai_location().level_vertex_id(),object().Position(),l_tMovementAction.m_tDestinationPosition); #ifdef DEBUG if (!ai().level_graph().valid_vertex_id(vertex_id)) { string256 S; sprintf_s (S,"Cannot find corresponding level vertex for the specified position [%f][%f][%f] for monster %s",VPUSH(l_tMovementAction.m_tDestinationPosition),*m_monster->cName()); THROW2 (ai().level_graph().valid_vertex_id(vertex_id),S); } #endif m_monster->movement().level_path().set_dest_vertex(vertex_id); break; } case CScriptMovementAction::eGoalTypePathNodePosition : { VERIFY(ai().level_graph().valid_vertex_id(l_tMovementAction.m_tNodeID)); m_monster->movement().set_path_type (MovementManager::ePathTypeLevelPath); m_monster->movement().detail().set_dest_position (l_tMovementAction.m_tDestinationPosition); m_monster->movement().level_path().set_dest_vertex (l_tMovementAction.m_tNodeID); break; } case CScriptMovementAction::eGoalTypeNoPathPosition : { m_monster->movement().set_path_type(MovementManager::ePathTypeLevelPath); if (m_monster->movement().detail().path().empty() || (m_monster->movement().detail().path()[m_monster->movement().detail().path().size() - 1].position.distance_to(l_tMovementAction.m_tDestinationPosition) > .1f)) { m_monster->movement().detail().m_path.resize(2); m_monster->movement().detail().m_path[0].position = object().Position(); m_monster->movement().detail().m_path[1].position = l_tMovementAction.m_tDestinationPosition; m_monster->movement().detail().m_current_travel_point = 0; } if (m_monster->movement().detail().m_path[1].position.similar(object().Position(),.2f)) l_tMovementAction.m_bCompleted = true; break; } default : { m_monster->movement().set_desirable_speed(0.f); return (l_tMovementAction.m_bCompleted = true); } } if (m_monster->movement().actual_all() && m_monster->movement().path_completed()) l_tMovementAction.m_bCompleted = true; return (!l_tMovementAction.m_bCompleted); }
bool CScriptIniFile::save_as(LPCSTR new_fname) { THROW2(new_fname, "File name is null"); return(inherited::save_as(new_fname)); }
void CLight::CopyFrom(CLight* src) { THROW2("CLight:: Go to AlexMX"); }
/** * @brief Use a timeout_ms value of 0 to wait until a page appears or * the tuple_fifo is closed (normal behavior). Use a negative * timeout_ms value to avoid waiting. If the tuple_fifo contains no * pages, we return immediately with a value of 0. Use a positive * timeout_ms value to wait for a max length of time. * * @return 1 if we got a page. -1 if the tuple_fifo has been * closed. If 'timeout_ms' is negative and this method returns 0, it * means the tuple_fifo is empty. If the timeout_ms value is positive * and we return 0, it means we timed out. */ int tuple_fifo::_get_read_page(int timeout_ms) { // * * * BEGIN CRITICAL SECTION * * * critical_section_t cs(_lock); _termination_check(); /* Free the page so the writer can use it. */ if (is_in_memory() && (_read_page != SENTINEL_PAGE)) { /* We are still maintaining an in-memory page list from which we are pulling pages. We release them to _free_pages as we are done with them. */ _read_page->clear(); _free_pages.push_back(_read_page.release()); _set_read_page(SENTINEL_PAGE); } /* If 'wait_on_empty' and the buffer is currently empty, we must wait for space to open up. Once we start waiting we continue waiting until either space for '_threshold' pages is available OR the writer has invoked send_eof() or terminate(). */ for(size_t t=1; (timeout_ms >= 0) && !is_done_writing() && (_available_fifo_reads() < t); t = _threshold) { /* We are to either wait for a page or wait for timeout_ms. */ if(!wait_for_writer(timeout_ms)) /* Timed out! */ break; _termination_check(); } TRACE(TRACE_ALWAYS&TRACE_MASK_DISK, "available reads = %d\n", (int)_available_fifo_reads()); if(_available_fifo_reads() == 0) { /* If we are here, we exited the loop above because one of the other conditions failed. We either noticed that the tuple_fifo has been closed or we've timed out. */ if(is_done_writing()) { /* notify caller that the tuple_fifo is closed */ TRACE(TRACE_ALWAYS&TRACE_MASK_DISK, "Returning -1\n"); return -1; } if(timeout_ms != 0) /* notify caller that we timed out */ return 0; unreachable(); } switch(_state.current()) { case tuple_fifo_state_t::IN_MEMORY: case tuple_fifo_state_t::IN_MEMORY_DONE_WRITING: { /* pull the page from page_list */ assert(!_pages.empty()); _set_read_page(_pages.front()); _pages.pop_front(); assert(_pages_in_memory > 0); _pages_in_memory--; break; } case tuple_fifo_state_t::ON_DISK: case tuple_fifo_state_t::ON_DISK_DONE_WRITING: { /* We are on disk. We should not be releasing _read_page after iterating over its entries. However, we still need to be prepared against code which extracts pages from the tuple_fifo using get_page(). get_page() sets _read_page to the SENTINEL_PAGE. */ if (_read_page == SENTINEL_PAGE) _set_read_page(_alloc_page()); else { /* We are reusing the same read page... do a reset */ _read_page->clear(); _set_read_page(_read_page.release()); } /* Make sure that at this point, we are not dealing with the SENTINAL_PAGE. */ assert(_read_page != SENTINEL_PAGE); assert(_read_page->page_size() == malloc_page_pool::instance()->page_size()); /* read page from disk file */ _read_page->clear(); TRACE(TRACE_ALWAYS&TRACE_MASK_DISK, "_next_page = %d\n", (int)_next_page); TRACE(TRACE_ALWAYS&TRACE_MASK_DISK, "_file_head_page = %d\n", (int)_file_head_page); unsigned long seek_pos = (_next_page - _file_head_page) * get_default_page_size(); TRACE(TRACE_ALWAYS&TRACE_MASK_DISK, "fseek to %lu\n", seek_pos); int fseek_ret = fseek(_page_file, seek_pos, SEEK_SET); assert(!fseek_ret); if (fseek_ret) THROW2(FileException, "fseek to %lu", seek_pos); int fread_ret = _read_page->fread_full_page(_page_file); assert(fread_ret); _set_read_page(_read_page.release()); size_t page_size = _read_page->page_size(); if (TRACE_ALWAYS&TRACE_MASK_DISK) { page* pg = _read_page.release(); unsigned char* pg_bytes = (unsigned char*)pg; for (size_t i = 0; i < page_size; i++) { printf("%02x", pg_bytes[i]); if (i % 2 == 0) printf("\t"); if (i % 16 == 0) printf("\n"); } _set_read_page(pg); } TRACE(TRACE_ALWAYS&TRACE_MASK_DISK, "Read %d %d-byte tuples\n", (int)_read_page->tuple_count(), (int)_read_page->tuple_size()); break; } default: unreachable(); } /* endof switch statement */ assert(_pages_in_fifo > 0); _pages_in_fifo--; _next_page++; /* wake the writer if necessary */ if(!FLUSH_TO_DISK_ON_FULL && (_available_in_memory_writes() >= _threshold) && !is_done_writing()) ensure_writer_running(); // * * * END CRITICAL SECTION * * * return 1; }
/** * @brief Get a page from the tuple_fifo. * * @return NULL if the tuple_fifo has been closed. A page otherwise. */ void tuple_fifo::_flush_write_page(bool done_writing) { // after the call to send_eof() the write page is NULL assert(!is_done_writing()); // * * * BEGIN CRITICAL SECTION * * * critical_section_t cs(_lock); _termination_check(); switch(_state.current()) { case tuple_fifo_state_t::IN_MEMORY: { /* Wait for space to free up if we are using a "no flush" policy. */ if (!FLUSH_TO_DISK_ON_FULL) { /* tuple_fifo stays in memory */ /* If the buffer is currently full, we must wait for space to open up. Once we start waiting we continue waiting until space for '_threshold' pages is available. */ for(size_t threshold=1; _available_in_memory_writes() < threshold; threshold = _threshold) { /* wait until something important changes */ wait_for_reader(); _termination_check(); } } /* At this point, we don't have to wait for space anymore. If we still don't have enough space, it must be because we are using a disk flush policy. Check whether we can proceed without flushing to disk. */ if (_available_in_memory_writes() >= 1) { /* Add _write_page to other tuple_fifo pages unless empty. */ if(!_write_page->empty()) { _pages.push_back(_write_page.release()); _pages_in_memory++; _pages_in_fifo++; } /* Allocate a new _write_page if necessary. */ if(done_writing) { /* Allocation of a new _write_page is not necessary (because we are done writing). Just do state transition. */ _state.transition(tuple_fifo_state_t::IN_MEMORY_DONE_WRITING); _write_page.done(); } else _write_page = _alloc_page(); /* wake the reader if necessary */ if(_available_in_memory_reads() >= _threshold || is_done_writing()) ensure_reader_running(); break; } /* If we are here, we need to flush to disk. */ /* Create on disk file. */ c_str filepath = tuple_fifo_directory_t::generate_filepath(_fifo_id); _page_file = fopen(filepath.data(), "w+"); assert(_page_file != NULL); if (_page_file == NULL) THROW2(FileException, "fopen(%s) failed", filepath.data()); TRACE(TRACE_ALWAYS, "Created tuple_fifo file %s\n", filepath.data()); /* Append this page to _pages and flush the entire page_list to disk. */ if(!_write_page->empty()) { _pages.push_back(_write_page.release()); _pages_in_memory++; _pages_in_fifo++; } for (page_list::iterator it = _pages.begin(); it != _pages.end(); ) { qpipe::page* p = *it; p->fwrite_full_page(_page_file); /* done with page */ p->clear(); _free_pages.push_back(p); it = _pages.erase(it); assert(_pages_in_memory > 0); _pages_in_memory--; } fflush(_page_file); /* update _file_head_page */ assert(_file_head_page == 0); _file_head_page = _next_page; _state.transition(tuple_fifo_state_t::ON_DISK); if (done_writing) { /* transition again! */ _state.transition(tuple_fifo_state_t::ON_DISK_DONE_WRITING); _write_page.done(); } else { /* allocate from free list */ assert(!_free_pages.empty()); _write_page = _alloc_page(); /* TODO It's clear whether we want to replace the SENTINAL_PAGE here. On the one hand, if we can replace it, we can free the rest of the pages in the free list. On the other, we still need to check for the SENTINAL_PAGE in _get_read_page since pages may be removed using get_page (instead of tuples removed with get_tuple). */ if (_read_page == SENTINEL_PAGE) { _set_read_page(_alloc_page()); /* After this point, we should not release either of these pages. */ } } /* wake the reader if necessary */ if(_available_fifo_reads() >= _threshold || is_done_writing()) ensure_reader_running(); break; } /* endof case: IN_MEMORY */ case tuple_fifo_state_t::ON_DISK: { int fseek_ret = fseek(_page_file, 0, SEEK_END); assert(!fseek_ret); if (fseek_ret) THROW1(FileException, "fseek to EOF"); _write_page->fwrite_full_page(_page_file); fflush(_page_file); _pages_in_fifo++; if (done_writing) { _state.transition(tuple_fifo_state_t::ON_DISK_DONE_WRITING); _write_page.done(); } else { /* simply reuse write page */ _write_page->clear(); } /* wake the reader if necessary */ if(_available_fifo_reads() >= _threshold || is_done_writing()) ensure_reader_running(); break; } default: unreachable(); } /* endof switch statement */ // * * * END CRITICAL SECTION * * * }