EPollEngine::EPollEngine() { int max = ulimit(4, 0); if (max > 0) { MAX_DESCRIPTORS = max; } else { ServerInstance->Logs->Log("SOCKET", LOG_DEFAULT, "ERROR: Can't determine maximum number of open sockets!"); std::cout << "ERROR: Can't determine maximum number of open sockets!" << std::endl; ServerInstance->Exit(EXIT_STATUS_SOCKETENGINE); } // This is not a maximum, just a hint at the eventual number of sockets that may be polled. EngineHandle = epoll_create(GetMaxFds() / 4); if (EngineHandle == -1) { ServerInstance->Logs->Log("SOCKET",LOG_DEFAULT, "ERROR: Could not initialize socket engine: %s", strerror(errno)); ServerInstance->Logs->Log("SOCKET",LOG_DEFAULT, "ERROR: Your kernel probably does not have the proper features. This is a fatal error, exiting now."); std::cout << "ERROR: Could not initialize epoll socket engine: " << strerror(errno) << std::endl; std::cout << "ERROR: Your kernel probably does not have the proper features. This is a fatal error, exiting now." << std::endl; ServerInstance->Exit(EXIT_STATUS_SOCKETENGINE); } ref = new EventHandler* [GetMaxFds()]; events = new struct epoll_event[GetMaxFds()]; memset(ref, 0, GetMaxFds() * sizeof(EventHandler*)); }
PortsEngine::PortsEngine() { int max = ulimit(4, 0); if (max > 0) { MAX_DESCRIPTORS = max; } else { ServerInstance->Logs->Log("SOCKET", LOG_DEFAULT, "ERROR: Can't determine maximum number of open sockets!"); std::cout << "ERROR: Can't determine maximum number of open sockets!" << std::endl; ServerInstance->Exit(EXIT_STATUS_SOCKETENGINE); } EngineHandle = port_create(); if (EngineHandle == -1) { ServerInstance->Logs->Log("SOCKET", LOG_SPARSE, "ERROR: Could not initialize socket engine: %s", strerror(errno)); ServerInstance->Logs->Log("SOCKET", LOG_SPARSE, "ERROR: This is a fatal error, exiting now."); std::cout << "ERROR: Could not initialize socket engine: " << strerror(errno) << std::endl; std::cout << "ERROR: This is a fatal error, exiting now." << std::endl; ServerInstance->Exit(EXIT_STATUS_SOCKETENGINE); } CurrentSetSize = 0; ref = new EventHandler* [GetMaxFds()]; events = new port_event_t[GetMaxFds()]; memset(ref, 0, GetMaxFds() * sizeof(EventHandler*)); }
KQueueEngine::KQueueEngine() { MAX_DESCRIPTORS = 0; int mib[2]; size_t len; mib[0] = CTL_KERN; #ifdef KERN_MAXFILESPERPROC mib[1] = KERN_MAXFILESPERPROC; #else mib[1] = KERN_MAXFILES; #endif len = sizeof(MAX_DESCRIPTORS); sysctl(mib, 2, &MAX_DESCRIPTORS, &len, NULL, 0); if (MAX_DESCRIPTORS <= 0) { ServerInstance->Logs->Log("SOCKET", LOG_DEFAULT, "ERROR: Can't determine maximum number of open sockets!"); std::cout << "ERROR: Can't determine maximum number of open sockets!" << std::endl; ServerInstance->QuickExit(EXIT_STATUS_SOCKETENGINE); } this->RecoverFromFork(); ke_list = new struct kevent[GetMaxFds()]; ref = new EventHandler* [GetMaxFds()]; memset(ref, 0, GetMaxFds() * sizeof(EventHandler*)); }
PollEngine::PollEngine(InspIRCd* Instance) : SocketEngine(Instance) { // Poll requires no special setup (which is nice). CurrentSetSize = 0; MAX_DESCRIPTORS = 0; ref = new EventHandler* [GetMaxFds()]; events = new struct pollfd[GetMaxFds()]; memset(events, 0, GetMaxFds() * sizeof(struct pollfd)); memset(ref, 0, GetMaxFds() * sizeof(EventHandler*)); }
SelectEngine::SelectEngine() { MAX_DESCRIPTORS = FD_SETSIZE; CurrentSetSize = 0; ref = new EventHandler* [GetMaxFds()]; memset(ref, 0, GetMaxFds() * sizeof(EventHandler*)); FD_ZERO(&ReadSet); FD_ZERO(&WriteSet); FD_ZERO(&ErrSet); MaxFD = 0; }
bool PollEngine::AddFd(EventHandler* eh, int event_mask) { int fd = eh->GetFd(); if ((fd < 0) || (fd > GetMaxFds() - 1)) { ServerInstance->Logs->Log("SOCKET",DEBUG,"AddFd out of range: (fd: %d, max: %d)", fd, GetMaxFds()); return false; } if (fd_mappings.find(fd) != fd_mappings.end()) { ServerInstance->Logs->Log("SOCKET",DEBUG,"Attempt to add duplicate fd: %d", fd); return false; } unsigned int index = CurrentSetSize; fd_mappings[fd] = index; ref[index] = eh; events[index].fd = fd; events[index].events = mask_to_poll(event_mask); ServerInstance->Logs->Log("SOCKET", DEBUG,"New file descriptor: %d (%d; index %d)", fd, events[index].events, index); SocketEngine::SetEventMask(eh, event_mask); CurrentSetSize++; return true; }
bool EPollEngine::AddFd(EventHandler* eh, int event_mask) { int fd = eh->GetFd(); if ((fd < 0) || (fd > GetMaxFds() - 1)) { ServerInstance->Logs->Log("SOCKET",LOG_DEBUG,"AddFd out of range: (fd: %d, max: %d)", fd, GetMaxFds()); return false; } if (ref[fd]) { ServerInstance->Logs->Log("SOCKET",LOG_DEBUG,"Attempt to add duplicate fd: %d", fd); return false; } struct epoll_event ev; memset(&ev,0,sizeof(ev)); ev.events = mask_to_epoll(event_mask); ev.data.fd = fd; int i = epoll_ctl(EngineHandle, EPOLL_CTL_ADD, fd, &ev); if (i < 0) { ServerInstance->Logs->Log("SOCKET",LOG_DEBUG,"Error adding fd: %d to socketengine: %s", fd, strerror(errno)); return false; } ServerInstance->Logs->Log("SOCKET",LOG_DEBUG,"New file descriptor: %d", fd); ref[fd] = eh; SocketEngine::SetEventMask(eh, event_mask); CurrentSetSize++; return true; }
bool SocketEngine::AddFd(EventHandler* eh, int event_mask) { int fd = eh->GetFd(); if ((fd < 0) || (fd > GetMaxFds() - 1)) { ServerInstance->Logs->Log("SOCKET", LOG_DEBUG, "AddFd out of range: (fd: %d, max: %d)", fd, GetMaxFds()); return false; } if (static_cast<unsigned int>(fd) < fd_mappings.size() && fd_mappings[fd] != -1) { ServerInstance->Logs->Log("SOCKET", LOG_DEBUG, "Attempt to add duplicate fd: %d", fd); return false; } unsigned int index = CurrentSetSize; if (!SocketEngine::AddFdRef(eh)) { ServerInstance->Logs->Log("SOCKET", LOG_DEBUG, "Attempt to add duplicate fd: %d", fd); return false; } while (static_cast<unsigned int>(fd) >= fd_mappings.size()) fd_mappings.resize(fd_mappings.size() * 2, -1); fd_mappings[fd] = index; ResizeDouble(events); events[index].fd = fd; events[index].events = mask_to_poll(event_mask); ServerInstance->Logs->Log("SOCKET", LOG_DEBUG, "New file descriptor: %d (%d; index %d)", fd, events[index].events, index); eh->SetEventMask(event_mask); return true; }
bool KQueueEngine::AddFd(EventHandler* eh, int event_mask) { int fd = eh->GetFd(); if ((fd < 0) || (fd > GetMaxFds() - 1)) return false; if (ref[fd]) return false; // We always want to read from the socket... struct kevent ke; EV_SET(&ke, fd, EVFILT_READ, EV_ADD, 0, 0, NULL); int i = kevent(EngineHandle, &ke, 1, 0, 0, NULL); if (i == -1) { ServerInstance->Logs->Log("SOCKET", LOG_DEFAULT, "Failed to add fd: %d %s", fd, strerror(errno)); return false; } ref[fd] = eh; SocketEngine::SetEventMask(eh, event_mask); OnSetEvent(eh, 0, event_mask); CurrentSetSize++; ServerInstance->Logs->Log("SOCKET", LOG_DEBUG, "New file descriptor: %d", fd); return true; }
void KQueueEngine::DelFd(EventHandler* eh) { int fd = eh->GetFd(); if ((fd < 0) || (fd > GetMaxFds() - 1)) { ServerInstance->Logs->Log("SOCKET", LOG_DEFAULT, "DelFd() on invalid fd: %d", fd); return; } struct kevent ke; // First remove the write filter ignoring errors, since we can't be // sure if there are actually any write filters registered. EV_SET(&ke, eh->GetFd(), EVFILT_WRITE, EV_DELETE, 0, 0, NULL); kevent(EngineHandle, &ke, 1, 0, 0, NULL); // Then remove the read filter. EV_SET(&ke, eh->GetFd(), EVFILT_READ, EV_DELETE, 0, 0, NULL); int j = kevent(EngineHandle, &ke, 1, 0, 0, NULL); if (j < 0) { ServerInstance->Logs->Log("SOCKET", LOG_DEFAULT, "Failed to remove fd: %d %s", fd, strerror(errno)); } CurrentSetSize--; ref[fd] = NULL; ServerInstance->Logs->Log("SOCKET", LOG_DEBUG, "Remove file descriptor: %d", fd); }
bool SocketEngine::BoundsCheckFd(EventHandler* eh) { if (!eh) return false; if ((eh->GetFd() < 0) || (eh->GetFd() > GetMaxFds())) return false; return true; }
int PortsEngine::DispatchEvents() { struct timespec poll_time; poll_time.tv_sec = 1; poll_time.tv_nsec = 0; unsigned int nget = 1; // used to denote a retrieve request. int ret = port_getn(EngineHandle, this->events, GetMaxFds() - 1, &nget, &poll_time); ServerInstance->UpdateTime(); // first handle an error condition if (ret == -1) return -1; TotalEvents += nget; unsigned int i; for (i = 0; i < nget; i++) { switch (this->events[i].portev_source) { case PORT_SOURCE_FD: { int fd = this->events[i].portev_object; EventHandler* eh = ref[fd]; if (eh) { int mask = eh->GetEventMask(); if (events[i].portev_events & POLLWRNORM) mask &= ~(FD_WRITE_WILL_BLOCK | FD_WANT_FAST_WRITE | FD_WANT_SINGLE_WRITE); if (events[i].portev_events & POLLRDNORM) mask &= ~FD_READ_WILL_BLOCK; // reinsert port for next time around, pretending to be one-shot for writes SetEventMask(eh, mask); port_associate(EngineHandle, PORT_SOURCE_FD, fd, mask_to_events(mask), eh); if (events[i].portev_events & POLLRDNORM) { ReadEvents++; eh->HandleEvent(EVENT_READ); if (eh != ref[fd]) continue; } if (events[i].portev_events & POLLWRNORM) { WriteEvents++; eh->HandleEvent(EVENT_WRITE); } } } default: break; } } return (int)i; }
PortsEngine::PortsEngine(InspIRCd* Instance) : SocketEngine(Instance) { MAX_DESCRIPTORS = 0; EngineHandle = port_create(); if (EngineHandle == -1) { ServerInstance->Logs->Log("SOCKET",SPARSE,"ERROR: Could not initialize socket engine: %s", strerror(errno)); ServerInstance->Logs->Log("SOCKET",SPARSE,"ERROR: This is a fatal error, exiting now."); printf("ERROR: Could not initialize socket engine: %s\n", strerror(errno)); printf("ERROR: This is a fatal error, exiting now.\n"); ServerInstance->Exit(EXIT_STATUS_SOCKETENGINE); } CurrentSetSize = 0; ref = new EventHandler* [GetMaxFds()]; events = new port_event_t[GetMaxFds()]; memset(ref, 0, GetMaxFds() * sizeof(EventHandler*)); }
PollEngine::PollEngine() { CurrentSetSize = 0; struct rlimit limits; if (!getrlimit(RLIMIT_NOFILE, &limits)) { MAX_DESCRIPTORS = limits.rlim_cur; } else { ServerInstance->Logs->Log("SOCKET", DEFAULT, "ERROR: Can't determine maximum number of open sockets: %s", strerror(errno)); std::cout << "ERROR: Can't determine maximum number of open sockets: " << strerror(errno) << std::endl; ServerInstance->QuickExit(EXIT_STATUS_SOCKETENGINE); } ref = new EventHandler* [GetMaxFds()]; events = new struct pollfd[GetMaxFds()]; memset(events, 0, GetMaxFds() * sizeof(struct pollfd)); memset(ref, 0, GetMaxFds() * sizeof(EventHandler*)); }
void PortsEngine::DelFd(EventHandler* eh) { int fd = eh->GetFd(); if ((fd < 0) || (fd > GetMaxFds() - 1)) return; port_dissociate(EngineHandle, PORT_SOURCE_FD, fd); CurrentSetSize--; ref[fd] = NULL; ServerInstance->Logs->Log("SOCKET", LOG_DEBUG, "Remove file descriptor: %d", fd); }
int KQueueEngine::DispatchEvents() { ts.tv_nsec = 0; ts.tv_sec = 1; int i = kevent(EngineHandle, NULL, 0, &ke_list[0], GetMaxFds(), &ts); ServerInstance->UpdateTime(); TotalEvents += i; for (int j = 0; j < i; j++) { EventHandler* eh = ref[ke_list[j].ident]; if (!eh) continue; if (ke_list[j].flags & EV_EOF) { ErrorEvents++; eh->HandleEvent(EVENT_ERROR, ke_list[j].fflags); continue; } if (ke_list[j].filter == EVFILT_WRITE) { WriteEvents++; /* When mask is FD_WANT_FAST_WRITE or FD_WANT_SINGLE_WRITE, * we set a one-shot write, so we need to clear that bit * to detect when it set again. */ const int bits_to_clr = FD_WANT_SINGLE_WRITE | FD_WANT_FAST_WRITE | FD_WRITE_WILL_BLOCK; SetEventMask(eh, eh->GetEventMask() & ~bits_to_clr); eh->HandleEvent(EVENT_WRITE); if (eh != ref[ke_list[j].ident]) // whoops, deleted out from under us continue; } if (ke_list[j].filter == EVFILT_READ) { ReadEvents++; SetEventMask(eh, eh->GetEventMask() & ~FD_READ_WILL_BLOCK); eh->HandleEvent(EVENT_READ); } } return i; }
bool PortsEngine::AddFd(EventHandler* eh, int event_mask) { int fd = eh->GetFd(); if ((fd < 0) || (fd > GetMaxFds() - 1)) return false; if (ref[fd]) return false; ref[fd] = eh; SocketEngine::SetEventMask(eh, event_mask); port_associate(EngineHandle, PORT_SOURCE_FD, fd, mask_to_events(event_mask), eh); ServerInstance->Logs->Log("SOCKET", LOG_DEBUG, "New file descriptor: %d", fd); CurrentSetSize++; return true; }
void SocketEngine::DelFd(EventHandler* eh) { int fd = eh->GetFd(); if ((fd < 0) || (fd > GetMaxFds() - 1)) return; SocketEngine::DelFdRef(eh); FD_CLR(fd, &ReadSet); FD_CLR(fd, &WriteSet); FD_CLR(fd, &ErrSet); if (fd == MaxFD) --MaxFD; ServerInstance->Logs->Log("SOCKET", LOG_DEBUG, "Remove file descriptor: %d", fd); }
bool SocketEngine::AddFd(EventHandler* eh, int event_mask) { int fd = eh->GetFd(); if ((fd < 0) || (fd > GetMaxFds() - 1)) return false; if (!SocketEngine::AddFdRef(eh)) return false; eh->SetEventMask(event_mask); OnSetEvent(eh, 0, event_mask); FD_SET(fd, &ErrSet); if (fd > MaxFD) MaxFD = fd; ServerInstance->Logs->Log("SOCKET", LOG_DEBUG, "New file descriptor: %d", fd); return true; }
bool PortsEngine::AddFd(EventHandler* eh) { int fd = eh->GetFd(); if ((fd < 0) || (fd > GetMaxFds() - 1)) return false; if (GetRemainingFds() <= 1) return false; if (ref[fd]) return false; ref[fd] = eh; port_associate(EngineHandle, PORT_SOURCE_FD, fd, eh->Readable() ? POLLRDNORM : POLLWRNORM, eh); ServerInstance->Logs->Log("SOCKET",DEBUG,"New file descriptor: %d", fd); CurrentSetSize++; return true; }
int PortsEngine::DispatchEvents() { struct timespec poll_time; poll_time.tv_sec = 1; poll_time.tv_nsec = 0; unsigned int nget = 1; // used to denote a retrieve request. int i = port_getn(EngineHandle, this->events, GetMaxFds() - 1, &nget, &poll_time); // first handle an error condition if (i == -1) return i; TotalEvents += nget; for (i = 0; i < nget; i++) { switch (this->events[i].portev_source) { case PORT_SOURCE_FD: { int fd = this->events[i].portev_object; if (ref[fd]) { // reinsert port for next time around port_associate(EngineHandle, PORT_SOURCE_FD, fd, POLLRDNORM, ref[fd]); if ((this->events[i].portev_events & POLLRDNORM)) ReadEvents++; else WriteEvents++; ref[fd]->HandleEvent((this->events[i].portev_events & POLLRDNORM) ? EVENT_READ : EVENT_WRITE); } } default: break; } } return i; }
bool PollEngine::AddFd(EventHandler* eh) { int fd = eh->GetFd(); if ((fd < 0) || (fd > GetMaxFds() - 1)) { ServerInstance->Logs->Log("SOCKET",DEBUG,"AddFd out of range: (fd: %d, max: %d)", fd, GetMaxFds()); return false; } if (GetRemainingFds() <= 1) { ServerInstance->Logs->Log("SOCKET",DEBUG,"No remaining FDs cannot add fd: %d", fd); return false; } if (fd_mappings.find(fd) != fd_mappings.end()) { ServerInstance->Logs->Log("SOCKET",DEBUG,"Attempt to add duplicate fd: %d", fd); return false; } unsigned int index = CurrentSetSize; fd_mappings[fd] = index; ref[index] = eh; events[index].fd = fd; if (eh->Readable()) { events[index].events = POLLIN; } else { events[index].events = POLLOUT; } ServerInstance->Logs->Log("SOCKET", DEBUG,"New file descriptor: %d (%d; index %d)", fd, events[fd].events, index); CurrentSetSize++; return true; }
void EPollEngine::DelFd(EventHandler* eh) { int fd = eh->GetFd(); if ((fd < 0) || (fd > GetMaxFds() - 1)) { ServerInstance->Logs->Log("SOCKET",LOG_DEBUG,"DelFd out of range: (fd: %d, max: %d)", fd, GetMaxFds()); return; } struct epoll_event ev; memset(&ev,0,sizeof(ev)); ev.data.fd = fd; int i = epoll_ctl(EngineHandle, EPOLL_CTL_DEL, fd, &ev); if (i < 0) { ServerInstance->Logs->Log("SOCKET",LOG_DEBUG,"epoll_ctl can't remove socket: %s", strerror(errno)); } ref[fd] = NULL; ServerInstance->Logs->Log("SOCKET",LOG_DEBUG,"Remove file descriptor: %d", fd); CurrentSetSize--; }
void SocketEngine::DelFd(EventHandler* eh) { int fd = eh->GetFd(); if ((fd < 0) || (fd > MAX_DESCRIPTORS)) { ServerInstance->Logs->Log("SOCKET", LOG_DEBUG, "DelFd out of range: (fd: %d, max: %d)", fd, GetMaxFds()); return; } if (static_cast<unsigned int>(fd) >= fd_mappings.size() || fd_mappings[fd] == -1) { ServerInstance->Logs->Log("SOCKET", LOG_DEBUG, "DelFd() on unknown fd: %d", fd); return; } unsigned int index = fd_mappings[fd]; unsigned int last_index = CurrentSetSize - 1; int last_fd = events[last_index].fd; if (index != last_index) { // We need to move the last fd we got into this gap (gaps are evil!) // So update the mapping for the last fd to its new position fd_mappings[last_fd] = index; // move last_fd from last_index into index events[index].fd = last_fd; events[index].events = events[last_index].events; } // Now remove all data for the last fd we got into out list. // Above code made sure this always is right fd_mappings[fd] = -1; events[last_index].fd = 0; events[last_index].events = 0; SocketEngine::DelFdRef(eh); ServerInstance->Logs->Log("SOCKET", LOG_DEBUG, "Remove file descriptor: %d (index: %d) " "(Filled gap with: %d (index: %d))", fd, index, last_fd, last_index); }
int PortsEngine::GetRemainingFds() { return GetMaxFds() - CurrentSetSize; }
EventHandler* SocketEngine::GetRef(int fd) { if ((fd < 0) || (fd > GetMaxFds())) return 0; return ref[fd]; }
int main(int argc, char **argv) { // On successful completion, print out the output file sizes. std::vector<std::string> output_files; try { std::string progname = argv[0]; // Process commandline options int argn; bool help = false; std::string outdir; int index_version = 0; int sortbuf = kDefaultSortBufferMegabytes; uint32 numcpus = kDefaultNumCPUs; uint32 read_cache_max_blocks = kDefaultReadCacheBlocks; uint32 read_cache_block_size = kDefaultReadCacheBlockKilobyteSize; khGetopt options; options.flagOpt("help", help); options.flagOpt("?", help); options.opt("output", outdir); options.opt("indexversion", index_version); options.opt("sortbuf", sortbuf); options.opt("numcpus", numcpus, &khGetopt::RangeValidator<uint32, 1, kMaxNumJobsLimit_2>); options.opt("read_cache_max_blocks", read_cache_max_blocks, &khGetopt::RangeValidator<uint32, 0, 1024>); options.opt("read_cache_block_size", read_cache_block_size, &khGetopt::RangeValidator<uint32, 1, 1024>); if (!options.processAll(argc, argv, argn)) { usage(progname); } if (help) { usage(progname); } if (argn == argc) { usage(progname, "No input indexes specified"); } numcpus = std::min(numcpus, CommandlineNumCPUsDefault()); // Validate commandline options if (!outdir.size()) { usage(progname, "No output specified"); } if (index_version <= 0) { usage(progname, "Index version not specified or <= 0"); } if (numcpus < 1) { usage(progname, "Number of CPUs should not be less than 1"); } if (sortbuf <= 0) { notify(NFY_FATAL, "--sortbuf must be > 0, is %d", sortbuf); } // Create a merge of the terrain indices JOBSTATS_BEGIN(job_stats, MERGER_CREATED); // validate // We'll need to limit the number of filebundles opened by the filepool // at a single time, to keep from overflowing memory. // Allow 50 files for other operations outside the filepool. int max_open_fds = GetMaxFds(-50); // Read Cache is enabled only if read_cache_max_blocks is > 2. if (read_cache_max_blocks < 2) { notify(NFY_WARN, "Read caching is disabled. This will cause %s" "to be much slower. To enable, set the " "read_cache_blocks setting\n" "to a number 2 or greater.\n", argv[0]); } else { // Get the physical memory size to help choose the read_cache_max_blocks. uint64 physical_memory_size = GetPhysicalMemorySize(); if (physical_memory_size == 0) { physical_memory_size = kDefaultMinMemoryAssumed; notify(NFY_WARN, "Physical Memory available not found. " "Assuming min recommended system size: %llu bytes", static_cast<long long unsigned int>(physical_memory_size)); } else { notify(NFY_NOTICE, "Physical Memory available: %llu bytes", static_cast<long long unsigned int>(physical_memory_size)); } // Convert this read cache block size from kilobytes to bytes. read_cache_block_size *= 1024U; // Figure out the worst case size of the read cache // (if all of max_open_fds are open simultaneously) uint64 estimated_read_cache_bytes = max_open_fds * static_cast<uint64>(read_cache_max_blocks * read_cache_block_size); notify(NFY_NOTICE, "Read Cache Settings: %u count %u byte blocks per resource " "(max files open set to %u)\n" "This will use approximately %llu bytes in memory.", read_cache_max_blocks, read_cache_block_size, max_open_fds, static_cast<long long unsigned int>(estimated_read_cache_bytes)); if (estimated_read_cache_bytes > physical_memory_size) { // If our worst case read cache blows out our memory, then // lower the max_open_fds to bring it to within 90% of the memory. // Be careful with overflow here. max_open_fds = (physical_memory_size * 90ULL)/ (100ULL * read_cache_max_blocks * read_cache_block_size); notify(NFY_WARN, "The estimated read cache size (%llu bytes) exceeds\n" "the Physical Memory available: %llu bytes.\n" "We are reducing the max files open to %d to eliminate" "memory overruns.\n", static_cast<long long unsigned int>(estimated_read_cache_bytes), static_cast<long long unsigned int>(physical_memory_size), max_open_fds); } } geFilePool file_pool(max_open_fds); geterrain::CountedPacketFileReaderPool packet_reader_pool( "TerrainReaderPool", file_pool); // Note: read cache's will not work without at least 2 blocks. if (read_cache_max_blocks >= 2) { packet_reader_pool.EnableReadCache(read_cache_max_blocks, read_cache_block_size); } khDeleteGuard<TerrainMergeType> merger( TransferOwnership(new TerrainMergeType("Terrain Merger"))); // Print the input file sizes for diagnostic log file info. std::vector<std::string> input_files; fprintf(stderr, "index version: %d\n", index_version); for (int i = argn; i < argc; ++i) { notify(NFY_INFO, "Opening terrain index: %s", argv[i]); merger->AddSource( TransferOwnership( new TranslatingTerrainTraverser(&packet_reader_pool, argv[i]))); input_files.push_back(argv[i]); } khPrintFileSizes("Input File Sizes", input_files); merger->Start(); JOBSTATS_END(job_stats, MERGER_CREATED); // Feed this merge into a QuadsetGather operation JOBSTATS_BEGIN(job_stats, GATHERER_CREATED); // validate qtpacket::QuadsetGather<geterrain::TerrainPacketItem> gather("TerrainQuadsetGather", TransferOwnership(merger)); // Create the output packetfile geterrain::TerrainCombiner combiner(packet_reader_pool, outdir, numcpus); combiner.StartThreads(); notify(NFY_DEBUG, "started combineterrain"); // We need to wrap the combiner with a try/catch because otherwise, the // exception causes a deconstructor failure which masks the real error // which could be a CRC error in one of the terrain packets. std::string error_message; try { do { combiner.CombineTerrainPackets(gather.Current()); } while (gather.Advance()); } catch (const khAbortedException &e) { notify(NFY_FATAL, "Unable to proceed: See previous warnings: %s", e.what()); } catch (const std::exception &e) { notify(NFY_FATAL, "%s", e.what()); } catch (...) { notify(NFY_FATAL, "Unknown error"); } notify(NFY_DEBUG, "waiting for compress and write threads to finish"); combiner.WaitForThreadsToFinish(); notify(NFY_DEBUG, "closing the gatherer"); gather.Close(); JOBSTATS_END(job_stats, GATHERER_CREATED); // Finish the packet file JOBSTATS_BEGIN(job_stats, COMBINE); // validate notify(NFY_DEBUG, "writing the packet index"); combiner.Close(static_cast<size_t>(sortbuf) * 1024 * 1024); JOBSTATS_END(job_stats, COMBINE); // On successful completion, print the output file sizes. output_files.push_back(outdir); } catch (const khAbortedException &e) { notify(NFY_FATAL, "Unable to proceed: See previous warnings"); } catch (const std::exception &e) { notify(NFY_FATAL, "%s", e.what()); } catch (...) { notify(NFY_FATAL, "Unknown error"); } // at the end, call dump all JOBSTATS_DUMPALL(); // On successful completion, print the output file sizes. // The print occurs here to allow progress to go out of scope. khPrintFileSizes("Output File Sizes", output_files); return 0; }
bool PollEngine::DelFd(EventHandler* eh, bool force) { int fd = eh->GetFd(); if ((fd < 0) || (fd > MAX_DESCRIPTORS)) { ServerInstance->Logs->Log("SOCKET", DEBUG, "DelFd out of range: (fd: %d, max: %d)", fd, GetMaxFds()); return false; } std::map<int, unsigned int>::iterator it = fd_mappings.find(fd); if (it == fd_mappings.end()) { ServerInstance->Logs->Log("SOCKET",DEBUG,"DelFd() on unknown fd: %d", fd); return false; } unsigned int index = it->second; unsigned int last_index = CurrentSetSize - 1; int last_fd = events[last_index].fd; if (index != last_index) { // We need to move the last fd we got into this gap (gaps are evil!) // So update the mapping for the last fd to its new position fd_mappings[last_fd] = index; // move last_fd from last_index into index events[index].fd = last_fd; events[index].events = events[last_index].events; ref[index] = ref[last_index]; } // Now remove all data for the last fd we got into out list. // Above code made sure this always is right fd_mappings.erase(it); events[last_index].fd = 0; events[last_index].events = 0; ref[last_index] = NULL; CurrentSetSize--; ServerInstance->Logs->Log("SOCKET", DEBUG, "Remove file descriptor: %d (index: %d) " "(Filled gap with: %d (index: %d))", fd, index, last_fd, last_index); return true; }
int EPollEngine::DispatchEvents() { socklen_t codesize = sizeof(int); int errcode; int i = epoll_wait(EngineHandle, events, GetMaxFds() - 1, 1000); ServerInstance->UpdateTime(); TotalEvents += i; for (int j = 0; j < i; j++) { EventHandler* eh = ref[events[j].data.fd]; if (!eh) { ServerInstance->Logs->Log("SOCKET",LOG_DEBUG,"Got event on unknown fd: %d", events[j].data.fd); epoll_ctl(EngineHandle, EPOLL_CTL_DEL, events[j].data.fd, &events[j]); continue; } if (events[j].events & EPOLLHUP) { ErrorEvents++; eh->HandleEvent(EVENT_ERROR, 0); continue; } if (events[j].events & EPOLLERR) { ErrorEvents++; /* Get error number */ if (getsockopt(events[j].data.fd, SOL_SOCKET, SO_ERROR, &errcode, &codesize) < 0) errcode = errno; eh->HandleEvent(EVENT_ERROR, errcode); continue; } int mask = eh->GetEventMask(); if (events[j].events & EPOLLIN) mask &= ~FD_READ_WILL_BLOCK; if (events[j].events & EPOLLOUT) { mask &= ~FD_WRITE_WILL_BLOCK; if (mask & FD_WANT_SINGLE_WRITE) { int nm = mask & ~FD_WANT_SINGLE_WRITE; OnSetEvent(eh, mask, nm); mask = nm; } } SetEventMask(eh, mask); if (events[j].events & EPOLLIN) { ReadEvents++; eh->HandleEvent(EVENT_READ); if (eh != ref[events[j].data.fd]) // whoa! we got deleted, better not give out the write event continue; } if (events[j].events & EPOLLOUT) { WriteEvents++; eh->HandleEvent(EVENT_WRITE); } } return i; }
bool SocketEngine::HasFd(int fd) { if ((fd < 0) || (fd > GetMaxFds())) return false; return (ref[fd] != NULL); }