virtual void init(MemoryBuffer &data, MemoryBuffer &slaveData) override
    {
        isLocal = 0 != (TIWlocal & helper->getFlags());

        mpTag = container.queryJobChannel().deserializeMPTag(data);
        mpTag2 = container.queryJobChannel().deserializeMPTag(data);
        data.read(active);
        if (active)
        {
            data.read(logicalFilename);
            partDesc.setown(deserializePartFileDescriptor(data));
        }

        data.read(singlePartKey);
        data.read(refactor);
        if (singlePartKey)
            buildTlk = false;
        else
        {
            data.read(buildTlk);
            if (firstNode())
            {
                if (buildTlk)
                    tlkDesc.setown(deserializePartFileDescriptor(data));
                else if (!isLocal) // existing tlk then..
                {
                    tlkDesc.setown(deserializePartFileDescriptor(data));
                    unsigned c;
                    data.read(c);
                    while (c--)
                    {
                        RemoteFilename rf;
                        rf.deserialize(data);
                        if (!existingTlkIFile)
                        {
                            Owned<IFile> iFile = createIFile(rf);
                            if (iFile->exists())
                                existingTlkIFile.set(iFile);
                        }
                    }
                    if (!existingTlkIFile)
                        throw MakeActivityException(this, TE_FileNotFound, "Top level key part does not exist, for key");
                }
            }
        }

        IOutputMetaData * diskSize = helper->queryDiskRecordSize();
        assertex(!(diskSize->getMetaFlags() & MDFneedserializedisk));
        if (diskSize->isVariableSize())
        {
            if (TIWmaxlength & helper->getFlags())
                maxDiskRecordSize = helper->getMaxKeySize();
            else
                maxDiskRecordSize = KEYBUILD_MAXLENGTH; //Current default behaviour, could be improved in the future
        }
        else
            maxDiskRecordSize = diskSize->getFixedSize();
        reportOverflow = false;
    }
示例#2
0
    virtual void init()
    {
        CMasterActivity::init();
        OwnedRoxieString helperFileName = helper->getFileName();
        StringBuffer expandedFileName;
        queryThorFileManager().addScope(container.queryJob(), helperFileName, expandedFileName, false);
        fileName.set(expandedFileName);
        dlfn.set(fileName);
        isLocal = 0 != (TIWlocal & helper->getFlags());
        IOutputMetaData * diskSize = helper->queryDiskRecordSize();
        unsigned minSize = diskSize->getMinRecordSize();
        if (minSize > KEYBUILD_MAXLENGTH)
            throw MakeActivityException(this, 0, "Index minimum record length (%d) exceeds %d internal limit", minSize, KEYBUILD_MAXLENGTH);
        unsigned maxSize;
        if (diskSize->isVariableSize())
        {
            if (TIWmaxlength & helper->getFlags())
                maxSize = helper->getMaxKeySize();
            else
                maxSize = KEYBUILD_MAXLENGTH; //Current default behaviour, could be improved in the future
        }
        else
            maxSize = diskSize->getFixedSize();
        if (maxSize > KEYBUILD_MAXLENGTH)
            throw MakeActivityException(this, 0, "Index maximum record length (%d) exceeds %d internal limit. Maximum size = %d, try adjusting index MAXLENGTH", maxSize, KEYBUILD_MAXLENGTH, minSize);

        singlePartKey = 0 != (helper->getFlags() & TIWsmall) || dlfn.isExternal();
        clusters.kill();
        unsigned idx=0;
        while (true)
        {
            OwnedRoxieString cluster(helper->getCluster(idx));
            if(!cluster)
                break;
            clusters.append(cluster);
            idx++;
        }
        IArrayOf<IGroup> groups;
        if (singlePartKey)
        {
            isLocal = true;
            buildTlk = false;
        }
        else if (!isLocal || globals->getPropBool("@buildLocalTlks", true))
            buildTlk = true;

        fillClusterArray(container.queryJob(), fileName, clusters, groups);
        unsigned restrictedWidth = 0;
        if (TIWhaswidth & helper->getFlags())
        {
            restrictedWidth = helper->getWidth();
            if (restrictedWidth > container.queryJob().querySlaves())
                throw MakeActivityException(this, 0, "Unsupported, can't refactor to width(%d) larger than host cluster(%d)", restrictedWidth, container.queryJob().querySlaves());
            else if (restrictedWidth < container.queryJob().querySlaves())
            {
                if (!isLocal)
                    throw MakeActivityException(this, 0, "Unsupported, refactoring to few parts only supported for local indexes.");
                assertex(!singlePartKey);
                unsigned gwidth = groups.item(0).ordinality();
                if (0 != container.queryJob().querySlaves() % gwidth)
                    throw MakeActivityException(this, 0, "Unsupported, refactored target size (%d) must be factor of thor cluster width (%d)", groups.item(0).ordinality(), container.queryJob().querySlaves());
                if (0 == restrictedWidth)
                    restrictedWidth = gwidth;
                ForEachItemIn(g, groups)
                {
                    IGroup &group = groups.item(g);
                    if (gwidth != groups.item(g).ordinality())
                        throw MakeActivityException(this, 0, "Unsupported, cannot output multiple refactored widths, targeting cluster '%s' and '%s'", clusters.item(0), clusters.item(g));
                    if (gwidth != restrictedWidth)
                        groups.replace(*group.subset((unsigned)0, restrictedWidth), g);
                }
                refactor = true;
            }