Пример #1
0
size_t
BufferedStream::readInternal(T &buffer, size_t length)
{
    if (supportsSeek())
        flush(false);
    size_t remaining = length;

    size_t buffered = std::min(m_readBuffer.readAvailable(), remaining);
    m_readBuffer.copyOut(buffer, buffered);
    m_readBuffer.consume(buffered);
    remaining -= buffered;

    MORDOR_LOG_VERBOSE(g_log) << this << " read(" << length << "): "
        << buffered << " read from buffer";

    if (remaining == 0)
        return length;

    if (buffered == 0 || !m_allowPartialReads) {
        size_t result;
        do {
            // Read enough to satisfy this request, plus up to a multiple of
            // the buffer size
            size_t todo = ((remaining - 1) / m_bufferSize + 1) * m_bufferSize;
            try {
                MORDOR_LOG_TRACE(g_log) << this << " parent()->read(" << todo
                    << ")";
                result = parent()->read(m_readBuffer, todo);
                MORDOR_LOG_DEBUG(g_log) << this << " parent()->read(" << todo
                    << "): " << result;
            } catch (...) {
                if (remaining == length) {
                    MORDOR_LOG_VERBOSE(g_log) << this << " forwarding exception";
                    throw;
                } else {
                    MORDOR_LOG_VERBOSE(g_log) << this << " swallowing exception";
                    // Swallow the exception
                    return length - remaining;
                }
            }

            buffered = std::min(m_readBuffer.readAvailable(), remaining);
            m_readBuffer.copyOut(buffer, buffered);
            m_readBuffer.consume(buffered);
            advance(buffer, buffered);
            remaining -= buffered;
        } while (remaining > 0 && !m_allowPartialReads && result != 0);
    }

    return length - remaining;
}
Пример #2
0
void
IOManagerEPoll::tickle()
{
    int rc = write(m_tickleFds[1], "T", 1);
    MORDOR_LOG_VERBOSE(g_log) << this << " write(" << m_tickleFds[1] << ", 1): "
        << rc << " (" << errno << ")";
    MORDOR_VERIFY(rc == 1);
}
Пример #3
0
IOManagerEPoll::~IOManagerEPoll()
{
    stop();
    close(m_epfd);
    MORDOR_LOG_TRACE(g_log) << this << " close(" << m_epfd << ")";
    close(m_tickleFds[0]);
    MORDOR_LOG_VERBOSE(g_log) << this << " close(" << m_tickleFds[0] << ")";
    close(m_tickleFds[1]);
}
Пример #4
0
size_t
BufferedStream::flushWrite(size_t length)
{
    while (m_writeBuffer.readAvailable() >= m_bufferSize)
    {
        size_t result;
        try {
            if (m_readBuffer.readAvailable() && supportsSeek()) {
                parent()->seek(-(long long)m_readBuffer.readAvailable(), CURRENT);
                m_readBuffer.clear();
            }
            size_t toWrite = m_writeBuffer.readAvailable();
            if (m_flushMultiplesOfBuffer)
                toWrite = toWrite / m_bufferSize * m_bufferSize;
            MORDOR_LOG_TRACE(g_log) << this << " parent()->write("
                << m_writeBuffer.readAvailable() << ")";
            result = parent()->write(m_writeBuffer, toWrite);
            MORDOR_LOG_DEBUG(g_log) << this << " parent()->write("
                << m_writeBuffer.readAvailable() << "): " << result;
            m_writeBuffer.consume(result);
        } catch (...) {
            // If this entire write is still in our buffer,
            // back it out and report the error
            if (m_writeBuffer.readAvailable() >= length) {
                MORDOR_LOG_VERBOSE(g_log) << this << " forwarding exception";
                Buffer tempBuffer;
                tempBuffer.copyIn(m_writeBuffer, m_writeBuffer.readAvailable()
                    - length);
                m_writeBuffer.clear();
                m_writeBuffer.copyIn(tempBuffer);
                throw;
            } else {
                // Otherwise we have to say we succeeded,
                // because we're not allowed to have a partial
                // write, and we can't report an error because
                // the caller will think he needs to repeat
                // the entire write
                MORDOR_LOG_VERBOSE(g_log) << this << " swallowing exception";
                return length;
            }
        }
    }
    return length;
}
Пример #5
0
long long
HandleStream::size()
{
    SchedulerSwitcher switcher(m_scheduler);
    long long size;
    BOOL ret = GetFileSizeEx(m_hFile, (LARGE_INTEGER*)&size);
    DWORD error = GetLastError();
    MORDOR_LOG_VERBOSE(g_log) << this << " GetFileSizeEx(" << m_hFile << ", "
        << size << "): " << ret << " (" << error << ")";
    if (!ret)
        MORDOR_THROW_EXCEPTION_FROM_ERROR_API(error, "GetFileSizeEx");
    return size;
}
Пример #6
0
void
BufferedStream::close(CloseType type)
{
    MORDOR_LOG_VERBOSE(g_log) << this << " close(" << type << ")";
    if (type & READ)
        m_readBuffer.clear();
    try {
        if ((type & WRITE) && m_writeBuffer.readAvailable())
            flush(false);
    } catch (...) {
        if (ownsParent())
            parent()->close(type);
        throw;
    }
    if (ownsParent())
        parent()->close(type);
}
Пример #7
0
void
Scheduler::start()
{
    MORDOR_LOG_VERBOSE(g_log) << this << " starting " << m_threadCount << " threads";
    boost::mutex::scoped_lock lock(m_mutex);
    if (!m_stopping)
        return;
    // TODO: There may be a race condition here if one thread calls stop(),
    // and another thread calls start() before the worker threads for this
    // scheduler actually exit; they may resurrect themselves, and the stopping
    // thread would block waiting for the thread to exit

    m_stopping = false;
    MORDOR_ASSERT(m_threads.empty());
    m_threads.resize(m_threadCount);
    for (size_t i = 0; i < m_threadCount; ++i) {
        m_threads[i] = boost::shared_ptr<Thread>(new Thread(
            boost::bind(&Scheduler::run, this)));
    }
}
Пример #8
0
void
Scheduler::stop()
{
    // Already stopped
    if (m_rootFiber &&
        m_threadCount == 0 &&
        (m_rootFiber->state() == Fiber::TERM || m_rootFiber->state() == Fiber::INIT)) {
        MORDOR_LOG_VERBOSE(g_log) << this << " stopped";
        m_stopping = true;
        // A derived class may inhibit stopping while it has things to do in
        // its idle loop, so we can't break early
        if (stopping())
            return;
    }

    bool exitOnThisFiber = false;
    if (m_rootThread != emptytid()) {
        // A thread-hijacking scheduler must be stopped
        // from within itself to return control to the
        // original thread
        MORDOR_ASSERT(Scheduler::getThis() == this);
        if (Fiber::getThis() == m_callingFiber) {
            exitOnThisFiber = true;
            // First switch to the correct thread
            MORDOR_LOG_DEBUG(g_log) << this
                << " switching to root thread to stop";
            switchTo(m_rootThread);
        }
        if (!m_callingFiber)
            exitOnThisFiber = true;
    } else {
        // A spawned-threads only scheduler cannot be stopped from within
        // itself... who would get control?
        MORDOR_ASSERT(Scheduler::getThis() != this);
    }
    m_stopping = true;
    for (size_t i = 0; i < m_threadCount; ++i)
        tickle();
    if (m_rootFiber && (m_threadCount != 0u || Scheduler::getThis() != this))
        tickle();
    // Wait for all work to stop on this thread
    if (exitOnThisFiber) {
        while (!stopping()) {
            // Give this thread's run fiber a chance to kill itself off
            MORDOR_LOG_DEBUG(g_log) << this
                << " yielding to this thread to stop";
            yieldTo(true);
        }
    }
    // Wait for other threads to stop
    if (exitOnThisFiber ||
        Scheduler::getThis() != this) {
        MORDOR_LOG_DEBUG(g_log) << this
            << " waiting for other threads to stop";
        std::vector<boost::shared_ptr<Thread> > threads;
        {
            boost::mutex::scoped_lock lock(m_mutex);
            threads.swap(m_threads);
        }
        for (std::vector<boost::shared_ptr<Thread> >::const_iterator it
            (threads.begin());
            it != threads.end();
            ++it) {
            (*it)->join();
        }
    }
    MORDOR_LOG_VERBOSE(g_log) << this << " stopped";
}
Пример #9
0
void
Scheduler::run()
{
    t_scheduler = this;
    if (gettid() != m_rootThread) {
        // Running in own thread
        t_fiber = Fiber::getThis().get();
    } else {
        // Hijacked a thread
        MORDOR_ASSERT(t_fiber.get() == Fiber::getThis().get());
    }
    Fiber::ptr idleFiber(new Fiber(boost::bind(&Scheduler::idle, this)));
    MORDOR_LOG_VERBOSE(g_log) << this << " starting thread with idle fiber " << idleFiber;
    Fiber::ptr dgFiber;
    // use a vector for O(1) .size()
    std::vector<FiberAndThread> batch(m_batchSize);
    bool isActive = false;
    while (true) {
        batch.clear();
        bool dontIdle = false;
        bool tickleMe = false;
        {
            boost::mutex::scoped_lock lock(m_mutex);
            // Kill ourselves off if needed
            if (m_threads.size() > m_threadCount && gettid() != m_rootThread) {
                // Accounting
                if (isActive)
                    --m_activeThreadCount;
                // Kill off the idle fiber
                try {
                    throw boost::enable_current_exception(
                        OperationAbortedException());
                } catch(...) {
                    idleFiber->inject(boost::current_exception());
                }
                // Detach our thread
                for (std::vector<boost::shared_ptr<Thread> >
                    ::iterator it = m_threads.begin();
                    it != m_threads.end();
                    ++it)
                    if ((*it)->tid() == gettid()) {
                        m_threads.erase(it);
                        if (m_threads.size() > m_threadCount)
                            tickle();
                        return;
                    }
                MORDOR_NOTREACHED();
            }

            std::list<FiberAndThread>::iterator it(m_fibers.begin());
            while (it != m_fibers.end()) {
                // If we've met our batch size, and we're not checking to see
                // if we need to tickle another thread, then break
                if ( (tickleMe || m_activeThreadCount == threadCount()) &&
                    batch.size() == m_batchSize)
                    break;

                if (it->thread != emptytid() && it->thread != gettid()) {
                    MORDOR_LOG_DEBUG(g_log) << this
                        << " skipping item scheduled for thread "
                        << it->thread;

                    // Wake up another thread to hopefully service this
                    tickleMe = true;
                    dontIdle = true;
                    ++it;
                    continue;
                }
                MORDOR_ASSERT(it->fiber || it->dg);
                // This fiber is still executing; probably just some race
                // race condition that it needs to yield on one thread
                // before running on another thread
                if (it->fiber && it->fiber->state() == Fiber::EXEC) {
                    MORDOR_LOG_DEBUG(g_log) << this
                        << " skipping executing fiber " << it->fiber;
                    ++it;
                    dontIdle = true;
                    continue;
                }
                // We were just checking if there is more work; there is, so
                // set the flag and don't actually take this piece of work
                if (batch.size() == m_batchSize) {
                    tickleMe = true;
                    break;
                }
                batch.push_back(*it);
                it = m_fibers.erase(it);
                if (!isActive) {
                    ++m_activeThreadCount;
                    isActive = true;
                }
            }
            if (batch.empty() && isActive) {
                --m_activeThreadCount;
                isActive = false;
            }
        }
        if (tickleMe)
            tickle();
        MORDOR_LOG_DEBUG(g_log) << this
            << " got " << batch.size() << " fiber/dgs to process (max: "
            << m_batchSize << ", active: " << isActive << ")";
        MORDOR_ASSERT(isActive == !batch.empty());
        if (!batch.empty()) {
            std::vector<FiberAndThread>::iterator it;
            for (it = batch.begin(); it != batch.end(); ++it) {
                Fiber::ptr f = it->fiber;
                boost::function<void ()> dg = it->dg;

                try {
                    if (f && f->state() != Fiber::TERM) {
                        MORDOR_LOG_DEBUG(g_log) << this << " running " << f;
                        f->yieldTo();
                    } else if (dg) {
                        if (!dgFiber)
                            dgFiber.reset(new Fiber(dg));
                        dgFiber->reset(dg);
                        MORDOR_LOG_DEBUG(g_log) << this << " running " << dg;
                        dgFiber->yieldTo();
                        if (dgFiber->state() != Fiber::TERM)
                            dgFiber.reset();
                        else
                            dgFiber->reset(NULL);
                    }
                } catch (...) {
                    MORDOR_LOG_FATAL(Log::root())
                        << boost::current_exception_diagnostic_information();
                    throw;
                }
            }
            continue;
        }
        if (dontIdle)
            continue;

        if (idleFiber->state() == Fiber::TERM) {
            MORDOR_LOG_DEBUG(g_log) << this << " idle fiber terminated";
            if (gettid() == m_rootThread)
                m_callingFiber.reset();
            // Unblock the next thread
            if (threadCount() > 1)
                tickle();
            return;
        }
        MORDOR_LOG_DEBUG(g_log) << this << " idling";
        idleFiber->call();
    }
}
Пример #10
0
Result
PreparedStatement::execute()
{
    PGconn *conn = m_conn.lock().get();
    boost::shared_ptr<PGresult> result, next;
    int nParams = (int)m_params.size();
    Oid *paramTypes = NULL;
    int *paramLengths = NULL, *paramFormats = NULL;
    const char **params = NULL;
    if (nParams) {
        if (m_name.empty())
            paramTypes = &m_paramTypes[0];
        params = &m_params[0];
        paramLengths = &m_paramLengths[0];
        paramFormats = &m_paramFormats[0];
    }
    const char *api = NULL;
#ifndef WINDOWS
    SchedulerSwitcher switcher(m_scheduler);
#endif
    if (m_name.empty()) {
#ifndef WINDOWS
        if (m_scheduler) {
            api = "PQsendQueryParams";
            if (!PQsendQueryParams(conn, m_command.c_str(),
                nParams, paramTypes, params, paramLengths, paramFormats, m_resultFormat))
                throwException(conn);
            flush(conn, m_scheduler);
            next.reset(nextResult(conn, m_scheduler), &PQclear);
            while (next) {
                result = next;
                next.reset(nextResult(conn, m_scheduler), &PQclear);
                if (next) {
                    ExecStatusType status = PQresultStatus(next.get());
                    MORDOR_LOG_VERBOSE(g_log) << conn << "PQresultStatus(" <<
                        next.get() << "): " << PQresStatus(status);
                    switch (status) {
                        case PGRES_COMMAND_OK:
                        case PGRES_TUPLES_OK:
                            break;
                        default:
                            throwException(next.get());
                            MORDOR_NOTREACHED();
                    }
                }
            }
        } else
#endif
        {
            api = "PQexecParams";
            result.reset(PQexecParams(conn, m_command.c_str(),
                nParams, paramTypes, params, paramLengths, paramFormats, m_resultFormat),
                &PQclear);
        }
    } else {
#ifndef WINDOWS
        if (m_scheduler) {
            api = "PQsendQueryPrepared";
            if (!PQsendQueryPrepared(conn, m_name.c_str(),
                nParams, params, paramLengths, paramFormats, 1))
                throwException(conn);
            flush(conn, m_scheduler);
            next.reset(nextResult(conn, m_scheduler), &PQclear);
            while (next) {
                result = next;
                next.reset(nextResult(conn, m_scheduler), &PQclear);
                if (next) {
                    ExecStatusType status = PQresultStatus(next.get());
                    MORDOR_LOG_VERBOSE(g_log) << conn << "PQresultStatus(" <<
                        next.get() << "): " << PQresStatus(status);
                    switch (status) {
                        case PGRES_COMMAND_OK:
                        case PGRES_TUPLES_OK:
                            break;
                        default:
                            throwException(next.get());
                            MORDOR_NOTREACHED();
                    }
                }
            }
        } else
#endif
        {
            api = "PQexecPrepared";
            result.reset(PQexecPrepared(conn, m_name.c_str(),
                nParams, params, paramLengths, paramFormats, m_resultFormat),
                &PQclear);
        }
    }
    if (!result)
        throwException(conn);
    ExecStatusType status = PQresultStatus(result.get());
    MORDOR_ASSERT(api);
    MORDOR_LOG_VERBOSE(g_log) << conn << " " << api << "(\"" << m_command
        << m_name << "\", " << nParams << "), PQresultStatus(" << result.get()
        << "): " << PQresStatus(status);
    switch (status) {
        case PGRES_COMMAND_OK:
        case PGRES_TUPLES_OK:
            return Result(result);
        default:
            throwException(result.get());
            MORDOR_NOTREACHED();
    }
}
Пример #11
0
void
IOManagerEPoll::idle()
{
    epoll_event events[64];
    while (true) {
        unsigned long long nextTimeout;
        if (stopping(nextTimeout))
            return;
        int rc = -1;
        errno = EINTR;
        while (rc < 0 && errno == EINTR) {
            int timeout = -1;
            if (nextTimeout != ~0ull)
                timeout = (int)(nextTimeout / 1000) + 1;
            rc = epoll_wait(m_epfd, events, 64, timeout);
            if (rc < 0 && errno == EINTR)
                nextTimeout = nextTimer();
        }
        MORDOR_LOG_LEVEL(g_log, rc < 0 ? Log::ERROR : Log::VERBOSE) << this
            << " epoll_wait(" << m_epfd << "): " << rc << " (" << errno << ")";
        if (rc < 0)
            MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("epoll_wait");
        std::vector<boost::function<void ()> > expired = processTimers();
        schedule(expired.begin(), expired.end());

        for(int i = 0; i < rc; ++i) {
            epoll_event &event = events[i];
            if (event.data.fd == m_tickleFds[0]) {
                unsigned char dummy;
                int rc2 = read(m_tickleFds[0], &dummy, 1);
                MORDOR_VERIFY(rc2 == 1);
                MORDOR_LOG_VERBOSE(g_log) << this << " received tickle";
                continue;
            }
            bool err = event.events & (EPOLLERR | EPOLLHUP);
            boost::mutex::scoped_lock lock(m_mutex);
            std::map<int, AsyncEvent>::iterator it =
m_pendingEvents.find(event.data.fd);
            if (it == m_pendingEvents.end())
                continue;
            AsyncEvent &e = it->second;
            MORDOR_LOG_TRACE(g_log) << " epoll_event {"
                << (epoll_events_t)event.events << ", " << event.data.fd
                << "}, registered for " << (epoll_events_t)e.event.events;
            if ((event.events & EPOLLERR) && (e.event.events & EPOLLERR)) {
                if (e.m_dgError)
                    e.m_schedulerError->schedule(e.m_dgError);
                else
                    e.m_schedulerError->schedule(e.m_fiberError);
                // Block other events from firing
                e.m_dgError = NULL;
                e.m_fiberError.reset();
                e.m_dgIn = NULL;
                e.m_fiberIn.reset();
                e.m_dgOut = NULL;
                e.m_fiberOut.reset();
                event.events = 0;
                e.event.events = 0;
            }
            if ((event.events & EPOLLHUP) && (e.event.events & EPOLLHUP)) {
                if (e.m_dgClose)
                    e.m_schedulerError->schedule(e.m_dgClose);
                else
                    e.m_schedulerError->schedule(e.m_fiberClose);
                // Block write event from firing
                e.m_dgOut = NULL;
                e.m_fiberOut.reset();
                e.m_dgClose = NULL;
                e.m_fiberClose.reset();
                event.events &= EPOLLOUT;
                e.event.events &= EPOLLOUT;
                err = false;
            }

            if (((event.events & EPOLLIN) ||
                err) && (e.event.events & EPOLLIN)) {
                if (e.m_dgIn)
                    e.m_schedulerIn->schedule(e.m_dgIn);
                else
                    e.m_schedulerIn->schedule(e.m_fiberIn);
                e.m_dgIn = NULL;
                e.m_fiberIn.reset();
                event.events |= EPOLLIN;
            }
            if (((event.events & EPOLLOUT) ||
                err) && (e.event.events & EPOLLOUT)) {
                if (e.m_dgOut)
                    e.m_schedulerOut->schedule(e.m_dgOut);
                else
                    e.m_schedulerOut->schedule(e.m_fiberOut);
                e.m_dgOut = NULL;
                e.m_fiberOut.reset();
                event.events |= EPOLLOUT;
            }
            e.event.events &= ~event.events;

            int op = e.event.events == 0 ? EPOLL_CTL_DEL : EPOLL_CTL_MOD;
            int rc2 = epoll_ctl(m_epfd, op, event.data.fd,
                &e.event);
            MORDOR_LOG_LEVEL(g_log, rc2 ? Log::ERROR : Log::VERBOSE) << this
                << " epoll_ctl(" << m_epfd << ", " << (epoll_ctl_op_t)op << ", "
                << event.data.fd << ", " << (epoll_events_t)e.event.events << "): " << rc2
                << " (" << errno << ")";
            if (rc2)
                MORDOR_THROW_EXCEPTION_FROM_LAST_ERROR_API("epoll_ctl");
            if (op == EPOLL_CTL_DEL)
                m_pendingEvents.erase(it);
        }
        Fiber::yield();
    }
}