Пример #1
0
    /*
     * returns true if all elements have been written to output
     * false if the merge has been stopped to free work.
     */
    inline bool
    mergeToOutput(JobQueue& jobQueue)
    {
        for (size_t lastLength = length; length >= MERGE_BULK_SIZE; length -= MERGE_BULK_SIZE, output += MERGE_BULK_SIZE)
        {
            if (g_lengthOfLongestJob == lastLength)
                g_lengthOfLongestJob = length;

            if (g_lengthOfLongestJob < length)
                g_lengthOfLongestJob = length; // else if to prevent work sharing when we just increased g_lengthOfLongestJob
            else if (USE_WORK_SHARING &&
                     jobQueue.has_idle() &&
                     length > SHARE_WORK_THRESHOLD &&
                     g_lengthOfLongestJob == length)
                return false;

            loserTree.writeElementsToStream(output, MERGE_BULK_SIZE);
            lastLength = length;
        }

        loserTree.writeElementsToStream(output, length);

        return true;
    }
    virtual bool run(JobQueue& job_queue)
    {
        size_t n = strptr.size();

        LOGC(debug_jobs)
            << "Process SmallsortJob8 " << this << " of size " << n;

        strptr = strptr.copy_back();

        if (n < g_inssort_threshold) {
            inssort::inssort_generic(strptr.copy_back().active(), depth);
            return true;
        }

        Char* charcache = new Char[n];

        // std::deque is much slower than std::vector, so we use an artificial
        // pop_front variable.
        size_t pop_front = 0;
        std::vector<RadixStep8_CI> radixstack;
        radixstack.emplace_back(strptr, depth, charcache);

        while (radixstack.size() > pop_front)
        {
            while (radixstack.back().idx < 255)
            {
                RadixStep8_CI& rs = radixstack.back();
                size_t b = ++rs.idx; // process the bucket rs.idx

                size_t bktsize = rs.bkt[b + 1] - rs.bkt[b];

                if (bktsize == 0)
                    continue;
                else if (bktsize < g_inssort_threshold)
                {
                    inssort::inssort_generic(
                        rs.strptr.sub(rs.bkt[b], bktsize).copy_back().active(),
                        depth + radixstack.size());
                }
                else
                {
                    radixstack.emplace_back(rs.strptr.sub(rs.bkt[b], bktsize),
                                            depth + radixstack.size(),
                                            charcache);
                }

                if (use_work_sharing && job_queue.has_idle())
                {
                    // convert top level of stack into independent jobs
                    LOGC(debug_jobs)
                        << "Freeing top level of SmallsortJob8's radixsort stack";

                    RadixStep8_CI& rt = radixstack[pop_front];

                    while (rt.idx < 255)
                    {
                        b = ++rt.idx; // enqueue the bucket rt.idx

                        size_t bktsize = rt.bkt[b + 1] - rt.bkt[b];

                        if (bktsize == 0) continue;
                        EnqueueSmallsortJob8(
                            job_queue, rt.strptr.sub(rt.bkt[b], bktsize),
                            depth + pop_front);
                    }

                    // shorten the current stack
                    ++pop_front;
                }
            }
            radixstack.pop_back();
        }

        delete[] charcache;

        return true;
    }