/******************************************************************************
MODULE:  label

PURPOSE:  Labeling the cloud pixel with a cloud number for each sengment cloud
          within the scene 

RETURN: None

HISTORY:
Date        Programmer       Reason
--------    ---------------  -------------------------------------
3/15/2013   Song Guo         Original Development

NOTES: 
******************************************************************************/
void label(unsigned char **cloud_mask, int nrows, int ncols, 
          cloud_node **cloud, int *obj_num)
{ 
    int row, col;
    int array[4];
    int min;
    int index;

    for (row = 0; row < nrows; row++)
    {
        for (col = 0; col <ncols; col++)
        {
            if (cloud_mask[row][col] == 1)
            {
                if (row > 0 && col > 0 && cloud_mask[row-1][col-1] == 1)
                    array[0] = cloud[row-1][col-1].value;
                else
                    array[0] = 0;
                if (row > 0 && cloud_mask[row-1][col] == 1)
                    array[1] = cloud[row-1][col].value;
                else
                    array[1] = 0;
                if (row > 0 && (col < ncols-1) && cloud_mask[row-1][col+1] == 1)
                    array[2] = cloud[row-1][col+1].value;
                else
                    array[2] = 0;
                if (col > 0 && cloud_mask[row][col-1] == 1)
                    array[3] = cloud[row][col-1].value;
                else
                    array[3] = 0;
              
                /* The cloud pixel will be labeled as a new cloud if neighboring
                   pixels before it are not cloud pixels, otherwise it will be
                   labeled as lowest cloud number neighboring it */
                find_min(array, 4, &min, &index);
                if (min == 0)
                {
                    num_clouds++;    
                    cloud[row][col].value = num_clouds;
                }
                else
                {
                    cloud[row][col].value = min;
                    if (index == 0)
                        Union(Find(&cloud[row-1][col-1]), 
                              Find(&cloud[row][col]));
                    else if (index == 1)
                        Union(Find(&cloud[row-1][col]), 
                              Find(&cloud[row][col]));
                    else if (index == 2)
                        Union(Find(&cloud[row-1][col+1]), 
                              Find(&cloud[row][col]));
                    else if (index == 3)
                        Union(Find(&cloud[row][col-1]), 
                              Find(&cloud[row][col]));
                    else
                        continue;

                   /* If two neighboring pixels are labeled as different cloud
                      numbers, the two cloud pixels are relaeled as the same 
                      cloud */
                   if ((row > 0 && col > 0 && cloud_mask[row-1][col-1] == 1) &&
                       (cloud[row-1][col-1].value != min))
                   {
                       if (index == 1)
                           Union(Find(&cloud[row-1][col]), 
                               Find(&cloud[row-1][col-1]));
                       else if (index == 2)
                           Union(Find(&cloud[row-1][col+1]), 
                               Find(&cloud[row-1][col-1]));
                       else if (index == 3)
                           Union(Find(&cloud[row][col-1]), 
                               Find(&cloud[row-1][col-1]));
                       else
                           continue;
                    }
                    if ((row > 0 && cloud_mask[row-1][col] == 1) && 
                        (cloud[row-1][col].value != min))
                    {
                        if (index == 0)
                            Union(Find(&cloud[row-1][col-1]), 
                               Find(&cloud[row-1][col]));
                        else if (index == 2)
                            Union(Find(&cloud[row-1][col+1]), 
                                Find(&cloud[row-1][col]));
                        else if (index == 3)
                            Union(Find(&cloud[row][col-1]), 
                                Find(&cloud[row-1][col]));
                        else
                            continue;
                     }
                     if ((row > 0 && (col < ncols-1) && 
                         cloud_mask[row-1][col+1] == 1) 
                         && (cloud[row-1][col+1].value != min))
                     {
                         if (index == 0)
                             Union(Find(&cloud[row-1][col-1]), 
                                Find(&cloud[row-1][col+1]));
                         else if (index == 1)
                             Union(Find(&cloud[row-1][col]), 
                                Find(&cloud[row-1][col+1]));
                         else if (index == 3)
                             Union(Find(&cloud[row][col-1]), 
                                   Find(&cloud[row-1][col+1]));
                         else
                             continue;
                     }
                     if ((col > 0 && cloud_mask[row][col-1] == 1) && 
                         (cloud[row][col-1].value != min))
                     {
                         if (index == 0)
                             Union(Find(&cloud[row-1][col-1]), 
                                 Find(&cloud[row][col-1]));
                         else if (index == 1)
                             Union(Find(&cloud[row-1][col]), 
                                 Find(&cloud[row][col-1]));
                         else if (index == 2)
                             Union(Find(&cloud[row-1][col+1]), 
                                 Find(&cloud[row][col-1]));
                         else
                             continue;
                     }
                }
            }
        }
    }

    /* The second pass labels all cloud pixels according two their root
       parent cloud pixel values */
    for (row = 0; row < nrows; row++)
    {
        for (col = 0; col <ncols; col++)
        {
            if (cloud_mask[row][col] == 1)
            {
                cloud[row][col].value = Find(&cloud[row][col])->value;
                obj_num[cloud[row][col].value]++;
            }
        }
    }
}      
示例#2
0
BBox Triangle::WorldBound() const {
	const Point &p1 = mMesh->p[mIndex[0]];
	const Point &p2 = mMesh->p[mIndex[1]];
	const Point &p3 = mMesh->p[mIndex[2]];
	return Union(BBox(p1, p2), p3);
}
示例#3
0
文件: bvh.cpp 项目: yzhwang/pbrt-v3
BVHBuildNode *BVHAccel::HLBVHBuild(
    MemoryArena &arena, const std::vector<BVHPrimitiveInfo> &primitiveInfo,
    int *totalNodes,
    std::vector<std::shared_ptr<Primitive>> &orderedPrims) const {
    // Compute bounding box of all primitive centroids
    Bounds3f bounds;
    for (const BVHPrimitiveInfo &pi : primitiveInfo)
        bounds = Union(bounds, pi.centroid);

    // Compute Morton indices of primitives
    std::vector<MortonPrimitive> mortonPrims(primitiveInfo.size());
    ParallelFor([&](int i) {
        // Initialize _mortonPrims[i]_ for _i_th primitive
        constexpr int mortonBits = 10;
        constexpr int mortonScale = 1 << mortonBits;
        mortonPrims[i].primitiveIndex = primitiveInfo[i].primitiveNumber;
        Vector3f centroidOffset = bounds.Offset(primitiveInfo[i].centroid);
        mortonPrims[i].mortonCode = EncodeMorton3(centroidOffset * mortonScale);
    }, primitiveInfo.size(), 512);

    // Radix sort primitive Morton indices
    RadixSort(&mortonPrims);

    // Create LBVH treelets at bottom of BVH

    // Find intervals of primitives for each treelet
    std::vector<LBVHTreelet> treeletsToBuild;
    for (int start = 0, end = 1; end <= (int)mortonPrims.size(); ++end) {
        uint32_t mask = 0b00111111111111000000000000000000;
        if (end == (int)mortonPrims.size() ||
            ((mortonPrims[start].mortonCode & mask) !=
             (mortonPrims[end].mortonCode & mask))) {
            // Add entry to _treeletsToBuild_ for this treelet
            int nPrimitives = end - start;
            int maxBVHNodes = 2 * nPrimitives;
            BVHBuildNode *nodes = arena.Alloc<BVHBuildNode>(maxBVHNodes, false);
            treeletsToBuild.push_back({start, nPrimitives, nodes});
            start = end;
        }
    }

    // Create LBVHs for treelets in parallel
    std::atomic<int> atomicTotal(0), orderedPrimsOffset(0);
    orderedPrims.resize(primitives.size());
    ParallelFor([&](int i) {
        // Generate _i_th LBVH treelet
        int nodesCreated = 0;
        const int firstBitIndex = 29 - 12;
        LBVHTreelet &tr = treeletsToBuild[i];
        tr.buildNodes =
            emitLBVH(tr.buildNodes, primitiveInfo, &mortonPrims[tr.startIndex],
                     tr.nPrimitives, &nodesCreated, orderedPrims,
                     &orderedPrimsOffset, firstBitIndex);
        atomicTotal += nodesCreated;
    }, treeletsToBuild.size());
    *totalNodes = atomicTotal;

    // Create and return SAH BVH from LBVH treelets
    std::vector<BVHBuildNode *> finishedTreelets;
    finishedTreelets.reserve(treeletsToBuild.size());
    for (LBVHTreelet &treelet : treeletsToBuild)
        finishedTreelets.push_back(treelet.buildNodes);
    return buildUpperSAH(arena, finishedTreelets, 0, finishedTreelets.size(),
                         totalNodes);
}
BBox TriangleMesh::ObjectBound() const {
    BBox objectBounds;
    for (int i = 0; i < nverts; i++)
        objectBounds = Union(objectBounds, (*WorldToObject)(p[i]));
    return objectBounds;
}
示例#5
0
文件: bPlus.cpp 项目: tianzhuwei/-_-
void bPlus::Union(Node* node) {          //问题
	//找到 node 结点的兄弟结点合并;
	if ((node->parent) == NULL)
	{
		for (int i = 0; i < node->key->size(); i++)
			node->child->at(i)->parent = NULL;
		
		head = head->child->at(0);
		delete node;
		return;
	}
	Node* brother;
	if (node->parent->child->back()==node)//;
	{
		brother = node->pre;
		//也要先考虑先借再合并;
		if (brother->key->size()>min)
		{
			BrrowKey(node, brother);
		}
		else
		{
			brother->key->insert(brother->key->end(), node->key->begin(), node->key->end());
			brother->child->insert(brother->child->end(), node->child->begin(), node->child->end());
			brother->next = node->next;
			if (node->next != NULL)
				node->next->pre = brother;
			for (int i = 0; i < node->key->size(); ++i)
				node->child->at(i)->parent = brother;
			delete node;
			node = NULL;
			vector<int>::iterator itKey = brother->parent->key->begin();
			vector<Node*>::iterator itChild = brother->parent->child->begin();
			int i = 0;
			while (brother->parent->child->at(i) != brother) {
				i++;
				itKey++;
				itChild++;
			}
			*itKey = brother->key->back();
			//brother->parent->key->at(i) = brother->key->back();
			brother->parent->key->erase(++itKey);
			brother->parent->child->erase(++itChild);
		if (brother->parent->key->size()<min)
			Union(brother->parent);
		}
	}
	else
	{
		brother = node->next;
		if (brother->key->size()>min)
		{
			BrrowKey(node, brother);
		}
		else
		{
			node->key->insert(node->key->end(), brother->key->begin(), brother->key->end());
			node->child->insert(node->child->end(), brother->child->begin(), brother->child->end());
			node->next = brother->next;
			if (brother->next!=NULL)
			{
				brother->next->pre = node;
			}
			for (int i = 0; i < brother->key->size(); i++)
				brother->child->at(i)->parent = node;
			delete brother;
			brother = NULL;
			vector<int>::iterator itKey = node->parent->key->begin();
			vector<Node*>::iterator itChild = node->parent->child->begin();
			while (*itChild != node) {
				itChild++;
				itKey++;
			}
			*(itKey) = node->key->back();
			node->parent->key->erase(++itKey);
			node->parent->child->erase(++itChild);
			if (node->parent->key->size() < min)
				Union(node->parent);
		}//else
	}
}
示例#6
0
MeasuredMaterial::MeasuredMaterial(string matName, const string &filename,
      Reference<Texture<float> > bump): Material(matName) {

   
    bumpMap = bump;
    const char *suffix = strrchr(filename.c_str(), '.');
    regularHalfangleData = NULL;
    thetaPhiData = NULL;
    if (!suffix)
        Error("No suffix in measured BRDF filename \"%s\".  "
              "Can't determine file type (.brdf / .merl)", filename.c_str());
    else if (!strcmp(suffix, ".brdf") || !strcmp(suffix, ".BRDF")) {
        // Load $(\theta, \phi)$ measured BRDF data
        if (loadedThetaPhi.find(filename) != loadedThetaPhi.end()) {
            thetaPhiData = loadedThetaPhi[filename];
            return;
        }
        
        vector<float> values;
        if (!ReadFloatFile(filename.c_str(), &values)) {
            Error("Unable to read BRDF data from file \"%s\"", filename.c_str());
            return;
        }
        
        uint32_t pos = 0;
        int numWls = int(values[pos++]);
        if ((values.size() - 1 - numWls) % (4 + numWls) != 0) {
            Error("Excess or insufficient data in theta, phi BRDF file \"%s\"",
                  filename.c_str());
            return;
        }

        vector<float> wls;
        for (int i = 0; i < numWls; ++i)
            wls.push_back(values[pos++]);
        
        BBox bbox;
        vector<IrregIsotropicBRDFSample> samples;
        while (pos < values.size()) {
            float thetai = values[pos++];
            float phii = values[pos++];
            float thetao = values[pos++];
            float phio = values[pos++];
            Vector wo = SphericalDirection(sinf(thetao), cosf(thetao), phio);
            Vector wi = SphericalDirection(sinf(thetai), cosf(thetai), phii);
            Spectrum s = Spectrum::FromSampled(&wls[0], &values[pos], numWls);
            pos += numWls;
            pbrt::Point p = BRDFRemap(wo, wi);
            samples.push_back(IrregIsotropicBRDFSample(p, s));
            bbox = Union(bbox, p);
        }
        loadedThetaPhi[filename] = thetaPhiData = new KdTree<IrregIsotropicBRDFSample>(samples);
    }
    else {
        // Load RegularHalfangle BRDF Data
        nThetaH = 90;
        nThetaD = 90;
        nPhiD = 180;
        
        if (loadedRegularHalfangle.find(filename) != loadedRegularHalfangle.end()) {
            regularHalfangleData = loadedRegularHalfangle[filename];
            return;
        }
        
        FILE *f = fopen(filename.c_str(), "rb");
        if (!f) {
            Error("Unable to open BRDF data file \"%s\"", filename.c_str());
            return;
        }
        int dims[3];
        if (fread(dims, sizeof(int), 3, f) != 3) {
            Error("Premature end-of-file in measured BRDF data file \"%s\"",
                  filename.c_str());
            fclose(f);
            return;
        }
        uint32_t n = dims[0] * dims[1] * dims[2];
        if (n != nThetaH * nThetaD * nPhiD)  {
            Error("Dimensions don't match\n");
            fclose(f);
            return;
        }
        
        regularHalfangleData = new float[3*n];
        const uint32_t chunkSize = 2*nPhiD;
        double *tmp = ALLOCA(double, chunkSize);
        uint32_t nChunks = n / chunkSize;
        Assert((n % chunkSize) == 0);
        float scales[3] = { 1.f/1500.f, 1.15f/1500.f, 1.66f/1500.f };
        for (int c = 0; c < 3; ++c) {
            int offset = 0;
            for (uint32_t i = 0; i < nChunks; ++i) {
                if (fread(tmp, sizeof(double), chunkSize, f) != chunkSize) {
                    Error("Premature end-of-file in measured BRDF data file \"%s\"",
                          filename.c_str());
                    delete[] regularHalfangleData;
                    regularHalfangleData = NULL;
                    fclose(f);
                    return;
                }
                for (uint32_t j = 0; j < chunkSize; ++j)
                    regularHalfangleData[3 * offset++ + c] = max(0., tmp[j] * scales[c]);
            }
        }
        
        loadedRegularHalfangle[filename] = regularHalfangleData;
        fclose(f);
    }
}
示例#7
0
//--------------------------------------------------------------------------------------
//
// ProcessVertices
//
// After all of the vertices have been loaded ProcessVertices is called to create the
// associated data with the hair vertices, which includes attributes like tangents,strand
// length, and transformations. Also the hair type is stored with each vertex which
// allows different simulation parameters for different sections of the hair.
//
//--------------------------------------------------------------------------------------
void TressFXAssetLoader::ProcessVertices()
{
    // construct local and global transforms for each hair strand.
    ConstructTransforms();

    // compute tangent vectors
    ComputeStrandTangent();

    // compute distances to root vertices
    ComputeDistanceToRoot();

    m_HairAsset.m_pVertices = new XMFLOAT4[m_HairAsset.m_NumTotalHairVertices];
    m_HairAsset.m_pHairStrandType = new int[m_HairAsset.m_NumTotalHairStrands];

    if (m_usingPerStrandTexCoords)
    {
        m_HairAsset.m_pStrandTexCoords = new XMFLOAT2[m_HairAsset.m_NumTotalHairStrands];
    }
    else
    {
        m_HairAsset.m_pStrandTexCoords = NULL;
    }

    m_HairAsset.m_pTangents = new XMFLOAT4[m_HairAsset.m_NumTotalHairVertices];

    // Initialize the hair strands and compute tangents
    for ( int i = 0; i < m_HairAsset.m_NumTotalHairVertices; i++ )
    {
        m_HairAsset.m_pTangents[i].x = m_Vertices[i].tangent.x;
        m_HairAsset.m_pTangents[i].y = m_Vertices[i].tangent.y;
        m_HairAsset.m_pTangents[i].z = m_Vertices[i].tangent.z;
    }

    m_HairAsset.m_pRestLengths = new float[m_HairAsset.m_NumTotalHairVertices];
    int index = 0;

    // Calculate rest lengths
    for ( int i = 0; i < m_HairAsset.m_NumTotalHairStrands; i++ )
    {
        int indexRootVert = i * m_HairAsset.m_NumOfVerticesInStrand;

        for ( int j = 0; j < m_HairAsset.m_NumOfVerticesInStrand - 1; j++ )
        {
            m_HairAsset.m_pRestLengths[index++] = (m_Vertices[indexRootVert + j].position - m_Vertices[indexRootVert + j + 1].position).Length();
        }

        // Since number of edges are one less than number of vertices in hair strand, below line acts as a placeholder.
        m_HairAsset.m_pRestLengths[index++] = 0;
    }

    assert(index == m_HairAsset.m_NumTotalHairVertices);

    m_HairAsset.m_pRefVectors = new XMFLOAT4[m_HairAsset.m_NumTotalHairVertices];
    m_HairAsset.m_pGlobalRotations = new XMFLOAT4[m_HairAsset.m_NumTotalHairVertices];
    m_HairAsset.m_pLocalRotations = new XMFLOAT4[m_HairAsset.m_NumTotalHairVertices];
    m_HairAsset.m_pTriangleVertices = new StrandVertex[m_HairAsset.m_NumTotalHairVertices];
    m_HairAsset.m_pThicknessCoeffs = new float[m_HairAsset.m_NumTotalHairVertices];
    m_HairAsset.m_pFollowRootOffset = new XMFLOAT4[m_HairAsset.m_NumTotalHairStrands];
    m_HairAsset.m_LineIndices.reserve(m_HairAsset.m_NumTotalHairVertices * 2);
    m_HairAsset.m_TriangleIndices.reserve(m_HairAsset.m_NumTotalHairVertices * 6);
    int id=0;
    index = 0;

    TressFXStrand* pGuideHair = NULL;
    int indexGuideHairStrand = -1;

    // initialize the rest of the hair data
    for ( int i = 0; i < m_HairAsset.m_NumTotalHairStrands; i++ )
    {
        int indexRootVert = i * m_HairAsset.m_NumOfVerticesInStrand;

        for ( int j = 0; j < m_HairAsset.m_NumOfVerticesInStrand - 1; j++ )
        {
            // line indices
            m_HairAsset.m_LineIndices.push_back(id);
            m_HairAsset.m_LineIndices.push_back(id + 1);

            // triangle indices
            m_HairAsset.m_TriangleIndices.push_back(2 * id);
            m_HairAsset.m_TriangleIndices.push_back(2 * id + 1);
            m_HairAsset.m_TriangleIndices.push_back(2 * id + 2);
            m_HairAsset.m_TriangleIndices.push_back(2 * id + 2);
            m_HairAsset.m_TriangleIndices.push_back(2 * id + 1);
            m_HairAsset.m_TriangleIndices.push_back(2 * id + 3);
            id++;
        }

        id++;

        TressFXHairVertex* vertices = &m_Vertices[indexRootVert];

        for ( int j = 0; j < m_HairAsset.m_NumOfVerticesInStrand; j++ )
        {
            // triangle vertices
            StrandVertex strandVertex;
            strandVertex.position = XMFLOAT3(vertices[j].position.x, vertices[j].position.y, vertices[j].position.z);
            strandVertex.tangent = XMFLOAT3(vertices[j].tangent.x, vertices[j].tangent.y, vertices[j].tangent.z);
            strandVertex.texcoord = XMFLOAT4(vertices[j].texcoord.x, vertices[j].texcoord.y, vertices[j].texcoord.z, 0);
            m_HairAsset.m_pTriangleVertices[index] = strandVertex;

            float tVal = m_HairAsset.m_pTriangleVertices[index].texcoord.z;
            m_HairAsset.m_pThicknessCoeffs[index] = sqrt(1.f - tVal * tVal);

            XMFLOAT4 v;

            // temp vertices
            v.x = vertices[j].position.x;
            v.y = vertices[j].position.y;
            v.z = vertices[j].position.z;
            v.w = vertices[j].invMass;
            m_HairAsset.m_pVertices[index] = v;

            // global rotations
            v.x = vertices[j].globalTransform.GetRotation().x;
            v.y = vertices[j].globalTransform.GetRotation().y;
            v.z = vertices[j].globalTransform.GetRotation().z;
            v.w = vertices[j].globalTransform.GetRotation().w;
            m_HairAsset.m_pGlobalRotations[index] = v;

            // local rotations
            v.x = vertices[j].localTransform.GetRotation().x;
            v.y = vertices[j].localTransform.GetRotation().y;
            v.z = vertices[j].localTransform.GetRotation().z;
            v.w = vertices[j].localTransform.GetRotation().w;
            m_HairAsset.m_pLocalRotations[index] = v;

            // ref vectors
            v.x = vertices[j].referenceVector.x;
            v.y = vertices[j].referenceVector.y;
            v.z = vertices[j].referenceVector.z;
            m_HairAsset.m_pRefVectors[index].x = v.x;
            m_HairAsset.m_pRefVectors[index].y = v.y;
            m_HairAsset.m_pRefVectors[index].z = v.z;

            index++;
        }

        int groupId = m_HairStrands[i].m_GroupID;
        m_HairAsset.m_pHairStrandType[i] = groupId;

        if ( m_usingPerStrandTexCoords )
        {
            m_HairAsset.m_pStrandTexCoords[i] = m_HairStrands[i].m_texCoord;
        }

        if ( m_HairStrands[i].m_bGuideHair )
        {
            indexGuideHairStrand = i;
            pGuideHair = &m_HairStrands[i];
            m_HairAsset.m_pFollowRootOffset[i] = XMFLOAT4(0, 0, 0, (float)indexGuideHairStrand); // forth component is an index to the guide hair strand. For guide hair, it points itself.
        }
        else
        {
            assert(pGuideHair);
            tressfx_vec3& offset = m_SlaveOffset[i];
            m_HairAsset.m_pFollowRootOffset[i] = XMFLOAT4(offset.x, offset.y, offset.z, (float)indexGuideHairStrand);
        }
    }

    // Find the bounding sphere
    BBox bBox;

    for ( int i = 0; i < (int)m_Vertices.size(); ++i )
    {
        const TressFXHairVertex& vertex = m_Vertices[i];
        bBox = Union(bBox, Float3(vertex.position.x, vertex.position.y, vertex.position.z));
    }

    Float3 c; float radius;
    bBox.BoundingSphere(&c, &radius);
    m_HairAsset.m_bSphere.center = XMFLOAT3(c.x, c.y, c.z);
    m_HairAsset.m_bSphere.radius = radius;
}
示例#8
0
static AddError()
{
    a_sym *xsym, *sym;
    a_pro *xpro, *pro;
    a_state *x, **s, **t, *AddErrState();
    a_shift_action *tx, *ty, *trans;
    a_reduce_action *rx, *ry, *redun;
    int i;
    a_word *defined, *conflict, *rset;
    short int *at;

    trans = CALLOC( nsym, a_shift_action );
    rx = redun = CALLOC( npro + 1, a_reduce_action );
    rset = conflict = AllocSet( npro + 2 );
    for( i = 0; i <= npro; ++i ) {
        (rx++)->follow = rset;
        rset += GetSetSize( 1 );
    }
    defined = rset;
    s = CALLOC( nstate, a_state * );
    at = CALLOC( nstate, short int );
    s = t = CALLOC( nstate + 1, a_state * );
    for( x = statelist; x != NULL; x = x->next ) {
         Mark( *x );
         *t++ = x;
    }
    restart = AddErrState( &errsym->enter, s, t );
    for( x = restart; x != NULL; x = x->next ) {
        Clear( defined );
        Clear( conflict );
        xpro = NULL;
        for( i = 0; i < x->kersize; ++i ) {
            at[i] = 0;
            pro = x->name.state[i]->redun->pro;
            if( pro > xpro ) {
                xpro = pro;
            }
        }
        redun->pro = errpro;
        rx = redun + 1;
        if( x != restart )
            while( xpro ) {
                pro = xpro;
                xpro = NULL;
                Clear( rx->follow );
                for( i = 0; i < x->kersize; ++i ) {
                    ry = &x->name.state[i]->redun[at[i]];
                    if( ry->pro == pro ) {
                        Union( rx->follow, ry->follow );
                        ++(at[i]);
                        ++ry;
                    }
                    if( ry->pro > xpro ) {
                        xpro = ry->pro;
                    }
                }
                UnionAnd( conflict, rx->follow, defined );
                Union( defined, rx->follow );
                rx->pro = pro;
                ++rx;
            }
        xsym = NULL;
        for( i = 0; i < x->kersize; ++i ) {
            at[i] = 0;
            sym = x->name.state[i]->trans->sym;
            if( sym > xsym ) {
                xsym = sym;
            }
        }
        tx = trans;
        while( xsym ) {
            sym = xsym;
            xsym = NULL;
            t = s;
            for( i = 0; i < x->kersize; ++i ) {
                ty = &x->name.state[i]->trans[at[i]];
                if( ty->sym == sym ) {
                    if( !IsMarked( *ty->state ) ) {
                        Mark( *ty->state );
                        *t++ = ty->state;
                    }
                    ++(at[i]);
                    ++ty;
                }
                if( ty->sym > xsym ) {
                    xsym = ty->sym;
                }
            }
            tx->sym = sym;
            if( sym->pro != NULL ) {
                ++nvtrans;
            } else {
                if( IsBitSet( defined, sym->id ) ) {
                    SetBit( conflict, sym->id );
                    while( --t >= s ) {
                        Unmark( **t );
                    }
                    continue;
                } else {
                    SetBit( defined, sym->id );
                }
            }
            tx->state = AddErrState( &errsym->enter, s, t );
            ++tx;
        }
        x->trans = CALLOC( tx - trans + 1, a_shift_action );
        memcpy( x->trans, trans, ((char *) tx) - ((char *) trans) );
        if( Empty( conflict ) ) {
            redun->pro = NULL;
            i = 0;
        } else {
            i = 1;
        }
        while( --rx > redun ) {
            AndNot( rx->follow, conflict );
            if( Empty( rx->follow ) ) {
                rx->pro = NULL;
            } else {
                ++i;
            }
        }
        x->redun = CALLOC( i + 1, a_reduce_action );
        if( i ) {
            rset = AllocSet( i );
            rx = redun;
            while( i > 0 ) {
                if( rx->pro != NULL ) {
                    --i;
                    x->redun[i].pro = rx->pro;
                    x->redun[i].follow = rset;
                    Assign( rset, rx->follow );
                    rset += GetSetSize( 1 );
                }
                ++rx;
            }
        }
    }
    FREE( trans );
    FREE( redun );
    FREE( conflict );
    FREE( s );
    FREE( at );
}
示例#9
0
BBox TriangleMesh::ObjectBound() const {
    BBox bobj;
    for (int i = 0; i < nverts; i++)
        bobj = Union(bobj, (*WorldToObject)(p[i]));
    return bobj;
}
示例#10
0
void CCAnalyzer::labelCC()
{
	// Declaration
	point p;
	vector<point> neighbours;
	vector<int> L;

	// linked = []
	vector<int> linked;

	int nextLabel = 1;
    int currLabel;

	Rank->Add(0);	
	Parent->Add(-1); // dummy

	// First pass //
   
	//for column in row:
	for(int col=0; col<this->ysize; col++){
		// for row in data:
		for(int row=0; row<this->xsize; row++){
			// if data[row][col] is not Background
			if(!BArray[row][col]){
               // neighbours = connected elements with the current element's label
               // search the eight neighbors
				neighbours.clear();
				L.clear();

				if(row-1>=0 && !BArray[row-1][col]){
					p.x = row-1;
					p.y = col;
					neighbours.push_back(p);
					L.push_back(LabelArray[row-1][col]);
				}
				if(row-1>=0 && col-1>=0 && !BArray[row-1][col-1]){
					p.x = row-1;
					p.y = col-1;
					neighbours.push_back(p);
					L.push_back(LabelArray[row-1][col-1]);
				}
				if(col-1>=0 && !BArray[row][col-1]){
					p.x = row;
					p.y = col-1;
					neighbours.push_back(p);
					L.push_back(LabelArray[row][col-1]);
				}		
				if(row+1<xsize && col-1>=0 &&!BArray[row+1][col-1]){
					p.x = row+1;
					p.y = col-1;
					neighbours.push_back(p);
					L.push_back(LabelArray[row+1][col-1]);
				}

                //if neighbours is empty
				if(neighbours.size() == 0){
					//linked[NextLabel] = set containing NextLabel
					linked.push_back(nextLabel);
					
					// labels[row][column] = nextLabel
					Parent->Add(nextLabel);
					Rank->Add(0);
					LabelArray[row][col] = nextLabel;
					
					// NextLabel += 1
					nextLabel++;               
				}else{                   
                   // Find the smallest label
                   // L = neighbours labels - Already containing the labels
                   LabelArray[row][col] = getMinVal(L);
                   currLabel = LabelArray[row][col];

				   /*for label in L
                       linked[label] = union(linked[label], L)*/
 		
				   for(int labelNum=0; labelNum < L.size(); labelNum++){
					   if(currLabel != L[labelNum]){
							Union(currLabel, L[labelNum]);
					   }
				   }

				} // if(neighbours.size() == 0){
			} // if(BArray[row][col] != 0){
		} // for(int row=1;
	} // for(int col=1; 

	// End of First pass //

	// Second pass

	//for column in row:
	for(int col=0; col<this->ysize; col++){
		// for row in data:
		for(int row=0; row<this->xsize; row++){
			// if labels[row][column] is not Background
			if(LabelArray[row][col] != 0){
				// labels[row][col] = Find(labels[row][col])     
				LabelArray[row][col] = Find(LabelArray[row][col]);	
			}
		}
	}

	// End of Second pass //
	numberOflabels = renumber_labels(1) - 1;

	/*
	System::IO::StreamWriter^ sr = gcnew System::IO::StreamWriter("C:\\renum.txt");
	int currLab;
	for (int i = 0; i < xsize; i++)
	{
		for (int j = 0; j < ysize; j++)
		{
			currLab = LabelArray[i][j];
			if (currLab>0)
				sr->Write(" "+currLab);
			else
				sr->Write(" "+0);
		}
		sr->WriteLine();
	}
	sr->Close();
	*/
}
示例#11
0
void BVHAccel::FindBestSplit(std::vector<BVHAccelTreeNode *> &list, unsigned int begin, unsigned int end, float *splitValue, unsigned int *bestAxis) {
	if (end - begin == 2) {
		// Trivial case with two elements
		*splitValue = (list[begin]->bbox.pMax[0] + list[begin]->bbox.pMin[0] +
				list[end - 1]->bbox.pMax[0] + list[end - 1]->bbox.pMin[0]) / 2;
		*bestAxis = 0;
	} else {
		// Calculate BBs mean center (times 2)
		Point mean2(0, 0, 0), var(0, 0, 0);
		for (unsigned int i = begin; i < end; i++)
			mean2 += list[i]->bbox.pMax + list[i]->bbox.pMin;
		mean2 /= static_cast<float>(end - begin);

		// Calculate variance
		for (unsigned int i = begin; i < end; i++) {
			Vector v = list[i]->bbox.pMax + list[i]->bbox.pMin - mean2;
			v.x *= v.x;
			v.y *= v.y;
			v.z *= v.z;
			var += v;
		}
		// Select axis with more variance
		if (var.x > var.y && var.x > var.z)
			*bestAxis = 0;
		else if (var.y > var.z)
			*bestAxis = 1;
		else
			*bestAxis = 2;

		if (costSamples > 1) {
			BBox nodeBounds;
			for (unsigned int i = begin; i < end; i++)
				nodeBounds = Union(nodeBounds, list[i]->bbox);

			Vector d = nodeBounds.pMax - nodeBounds.pMin;
			const float invTotalSA = 1.f / nodeBounds.SurfaceArea();

			// Sample cost for split at some points
			float increment = 2 * d[*bestAxis] / (costSamples + 1);
			float bestCost = INFINITY;
			for (float splitVal = 2 * nodeBounds.pMin[*bestAxis] + increment; splitVal < 2 * nodeBounds.pMax[*bestAxis]; splitVal += increment) {
				int nBelow = 0, nAbove = 0;
				BBox bbBelow, bbAbove;
				for (unsigned int j = begin; j < end; j++) {
					if ((list[j]->bbox.pMax[*bestAxis] + list[j]->bbox.pMin[*bestAxis]) < splitVal) {
						nBelow++;
						bbBelow = Union(bbBelow, list[j]->bbox);
					} else {
						nAbove++;
						bbAbove = Union(bbAbove, list[j]->bbox);
					}
				}
				const float pBelow = bbBelow.SurfaceArea() * invTotalSA;
				const float pAbove = bbAbove.SurfaceArea() * invTotalSA;
				float eb = (nAbove == 0 || nBelow == 0) ? emptyBonus : 0.f;
				float cost = traversalCost + isectCost * (1.f - eb) * (pBelow * nBelow + pAbove * nAbove);
				// Update best split if this is lowest cost so far
				if (cost < bestCost) {
					bestCost = cost;
					*splitValue = splitVal;
				}
			}
		} else {
			// Split in half around the mean center
			*splitValue = mean2[*bestAxis];
		}
	}
}
示例#12
0
文件: grid.cpp 项目: acpa2691/cs348b
// GridAccel Method Definitions
GridAccel::GridAccel(const vector<Reference<Primitive> > &p,
		bool forRefined, bool refineImmediately)
	: gridForRefined(forRefined) {
	// Initialize _prims_ with primitives for grid
	vector<Reference<Primitive> > prims;
	if (refineImmediately)
		for (u_int i = 0; i < p.size(); ++i)
			p[i]->FullyRefine(prims);
	else
		prims = p;
	// Initialize mailboxes for grid
	nMailboxes = prims.size();
	mailboxes = (MailboxPrim *)AllocAligned(nMailboxes *
		sizeof(MailboxPrim));
	for (u_int i = 0; i < nMailboxes; ++i)
		new (&mailboxes[i]) MailboxPrim(prims[i]);
	// Compute bounds and choose grid resolution
	for (u_int i = 0; i < prims.size(); ++i)
		bounds = Union(bounds, prims[i]->WorldBound());
	Vector delta = bounds.pMax - bounds.pMin;
	// Find _voxelsPerUnitDist_ for grid
	int maxAxis = bounds.MaximumExtent();
	float invMaxWidth = 1.f / delta[maxAxis];
	Assert(invMaxWidth > 0.f); // NOBOOK
	float cubeRoot = 3.f * powf(float(prims.size()), 1.f/3.f);
	float voxelsPerUnitDist = cubeRoot * invMaxWidth;
	for (int axis = 0; axis < 3; ++axis) {
		NVoxels[axis] =
		     Round2Int(delta[axis] * voxelsPerUnitDist);
		NVoxels[axis] = Clamp(NVoxels[axis], 1, 64);
	}
	// Compute voxel widths and allocate voxels
	for (int axis = 0; axis < 3; ++axis) {
		Width[axis] = delta[axis] / NVoxels[axis];
		InvWidth[axis] =
		    (Width[axis] == 0.f) ? 0.f : 1.f / Width[axis];
	}
	int nVoxels = NVoxels[0] * NVoxels[1] * NVoxels[2];
	voxels = (Voxel **)AllocAligned(nVoxels * sizeof(Voxel *));
	memset(voxels, 0, nVoxels * sizeof(Voxel *));
	// Add primitives to grid voxels
	for (u_int i = 0; i < prims.size(); ++i) {
		// Find voxel extent of primitive
		BBox pb = prims[i]->WorldBound();
		int vmin[3], vmax[3];
		for (int axis = 0; axis < 3; ++axis) {
			vmin[axis] = PosToVoxel(pb.pMin, axis);
			vmax[axis] = PosToVoxel(pb.pMax, axis);
		}
		// Add primitive to overlapping voxels
		for (int z = vmin[2]; z <= vmax[2]; ++z)
			for (int y = vmin[1]; y <= vmax[1]; ++y)
				for (int x = vmin[0]; x <= vmax[0]; ++x) {
					int offset = Offset(x, y, z);
					if (!voxels[offset]) {
						// Allocate new voxel and store primitive in it
						voxels[offset] = new (voxelArena) Voxel(&mailboxes[i]);
					}
					else {
						// Add primitive to already-allocated voxel
						voxels[offset]->AddPrimitive(&mailboxes[i]);
					}
				}
		static StatsRatio nPrimitiveVoxels("Grid Accelerator", // NOBOOK
			"Voxels covered vs # / primitives"); // NOBOOK
		nPrimitiveVoxels.Add((1 + vmax[0]-vmin[0]) * (1 + vmax[1]-vmin[1]) * // NOBOOK
			(1 + vmax[2]-vmin[2]), 1); // NOBOOK
	}
	// Update grid statistics
	static StatsPercentage nEmptyVoxels("Grid Accelerator",
	                                    "Empty voxels");
	static StatsRatio avgPrimsInVoxel("Grid Accelerator",
		"Average # of primitives in voxel");
	static StatsCounter maxPrimsInVoxel("Grid Accelerator",
		"Max # of primitives in a grid voxel");
	nEmptyVoxels.Add(0, NVoxels[0] * NVoxels[1] * NVoxels[2]);
	avgPrimsInVoxel.Add(0,NVoxels[0] * NVoxels[1] * NVoxels[2]);
	for (int z = 0; z < NVoxels[2]; ++z)
		for (int y = 0; y < NVoxels[1]; ++y)
			for (int x = 0; x < NVoxels[0]; ++x) {
				int offset = Offset(x, y, z);
				if (!voxels[offset]) nEmptyVoxels.Add(1, 0);
				else {
				    int nPrims = voxels[offset]->nPrimitives;
					maxPrimsInVoxel.Max(nPrims);
					avgPrimsInVoxel.Add(nPrims, 0);
				}
			}
}
示例#13
0
void InitializeTokenSets()
{
	if ( ! bTokenSetInitialized ) {
		First_name.insert( IDENTIFIER );

		First_typeName.insert( IDENTIFIER );

		First_colon.insert( COLON );

		First_inputend.insert( INPUTEND );

		First_inv.insert( INV );

		First_right_parenthesis.insert( RIGHT_PARENTHESIS );

		First_left_parenthesis.insert( LEFT_PARENTHESIS );

		First_prePost.insert( CLASS_PREPOST );

		First_in.insert( INN );

		First_equal.insert( EQUAL );

		First_then.insert( THEN );

		First_else.insert( ELSE );

		First_endif.insert( ENDIF );

		First_delimiter.insert( DELIMITER );

		First_right_bracket.insert( RIGHT_BRACKET );

		First_left_brace.insert( LEFT_BRACE );

		First_right_brace.insert( RIGHT_BRACE );

		First_literal.insert( STRING );
		First_literal.insert( REAL );
		First_literal.insert( INTEGER );
		First_literal.insert( POUND );
		First_literal.insert( CLASS_BOOLEAN );

		First_primaryExpression = First_literal;
		First_primaryExpression .insert( IDENTIFIER );
		First_primaryExpression .insert( LEFT_PARENTHESIS );
		First_primaryExpression .insert( IF );

		First_contextDeclarationHelper.insert( IDENTIFIER );

		First_formalParameter.insert( IDENTIFIER );

		First_formalParameterList = First_formalParameter;

		First_featureCallParameters.insert( LEFT_PARENTHESIS );

		First_featureCall.insert( IDENTIFIER );

		First_postfixExpression = Union( First_primaryExpression, First_featureCall );

		First_unaryExpression = First_postfixExpression;
		First_unaryExpression.insert( CLASS_UNARY );

		First_multiplicativeExpression = First_unaryExpression;

		First_additiveExpression = First_multiplicativeExpression;

		First_relationalExpression = First_additiveExpression;

		First_andExpression = First_relationalExpression;

		First_xorExpression = First_andExpression;

		First_orExpression = First_xorExpression;

		First_implicationExpression = First_orExpression;

		First_letExpression.insert( LET );

		First_expression = Union( First_letExpression, First_implicationExpression );

		First_featureCallParametersHelper = First_expression;
		First_featureCallParametersHelper.insert( IDENTIFIER );
		First_featureCallParametersHelper.insert( RIGHT_PARENTHESIS );

		First_actualParameterList = First_expression;

		First_expressionListOrRange = First_expression;

		First_qualifiers.insert( LEFT_BRACKET );


//<udmoclpat changes
		First_fileNode.insert( PAT_OPEN );

		First_handleNode.insert( PAT_SWITCH );

		First_printNode.insert( PAT_PRINT );

		First_textNode.insert( SEPARATOR );

		First_extendedExpression = Union( First_expression, First_fileNode );
		First_extendedExpression = Union( First_extendedExpression, First_handleNode );
		First_extendedExpression = Union( First_extendedExpression, First_printNode );

		First_enumeratedExpression = Union( First_expression, First_left_brace );
//udmoclpat changes>


		bTokenSetInitialized = true;

		Last_contextDeclarationHelper.insert( CLASS_STEREOTYPES );

		Last_literal.insert( STRING );
		Last_literal.insert( REAL );
		Last_literal.insert( INTEGER );
		Last_literal.insert( IDENTIFIER );
		Last_literal.insert( CLASS_BOOLEAN );

		Last_primaryExpression = Last_literal;
		Last_primaryExpression.insert( RIGHT_BRACE );
		Last_primaryExpression.insert( RIGHT_PARENTHESIS );
		Last_primaryExpression.insert( ENDIF );

		Last_featureCall.insert( IDENTIFIER );
		Last_featureCall.insert( RIGHT_PARENTHESIS );

		Last_implicationExpression = Union( Last_primaryExpression, Last_featureCall );

		Last_expression = Last_implicationExpression;

		Last_featureCallParametersHelper = Last_expression;
	}
}
示例#14
0
void QBVHAccel::Init(const Mesh *m) {
  assert (!initialized);

  mesh = m;
  const unsigned int totalTriangleCount = mesh->GetTotalTriangleCount();

  // Temporary data for building
  u_int *primsIndexes = new u_int[totalTriangleCount + 3]; // For the case where
  // the last quad would begin at the last primitive
  // (or the second or third last primitive)

  // The number of nodes depends on the number of primitives,
  // and is bounded by 2 * nPrims - 1.
  // Even if there will normally have at least 4 primitives per leaf,
  // it is not always the case => continue to use the normal bounds.
  nNodes = 0;
  maxNodes = 1;
  for (u_int layer = ((totalTriangleCount + maxPrimsPerLeaf - 1)
      / maxPrimsPerLeaf + 3) / 4; layer != 1; layer = (layer + 3) / 4)
    maxNodes += layer;
  nodes = AllocAligned<QBVHNode> (maxNodes);
  for (u_int i = 0; i < maxNodes; ++i)
    nodes[i] = QBVHNode();

  // The arrays that will contain
  // - the bounding boxes for all triangles
  // - the centroids for all triangles
  BBox *primsBboxes = new BBox[totalTriangleCount];
  Point *primsCentroids = new Point[totalTriangleCount];
  // The bouding volume of all the centroids
  BBox centroidsBbox;

  const Point *verts = mesh->GetVertices();
  const Triangle *triangles = mesh->GetTriangles();

  // Fill each base array
  for (u_int i = 0; i < totalTriangleCount; ++i) {
    // This array will be reorganized during construction.
    primsIndexes[i] = i;

    // Compute the bounding box for the triangle
    primsBboxes[i] = triangles[i].WorldBound(verts);
    primsBboxes[i].Expand(RAY_EPSILON);
    primsCentroids[i] = (primsBboxes[i].pMin + primsBboxes[i].pMax) * .5f;

    // Update the global bounding boxes
    worldBound = Union(worldBound, primsBboxes[i]);
    centroidsBbox = Union(centroidsBbox, primsCentroids[i]);
  }

  // Arbitrarily take the last primitive for the last 3
  primsIndexes[totalTriangleCount] = totalTriangleCount - 1;
  primsIndexes[totalTriangleCount + 1] = totalTriangleCount - 1;
  primsIndexes[totalTriangleCount + 2] = totalTriangleCount - 1;

  // Recursively build the tree
  LR_LOG( "Building QBVH, primitives: " << totalTriangleCount << ", initial nodes: " << maxNodes);

  nQuads = 0;
  BuildTree(0, totalTriangleCount, primsIndexes, primsBboxes, primsCentroids,
      worldBound, centroidsBbox, -1, 0, 0);

  prims = AllocAligned<QuadTriangle> (nQuads);
  nQuads = 0;
  PreSwizzle(0, primsIndexes);

  LR_LOG( "QBVH completed with " << nNodes << "/" << maxNodes << " nodes");
  LR_LOG( "Total QBVH memory usage: " << nNodes * sizeof(QBVHNode) / 1024 << "Kbytes");
  LR_LOG( "Total QBVH QuadTriangle count: " << nQuads);
  LR_LOG( "Max. QBVH Depth: " << maxDepth);

  // Release temporary memory
  delete[] primsBboxes;
  delete[] primsCentroids;
  delete[] primsIndexes;

  initialized = true;
}
 virtual flatbuffers::Offset<DeadCreator::Action> getActionObject(flatbuffers::FlatBufferBuilder& builder) override
 {
     auto switchInfo = DeadCreator::CreateSwitchInfo2(builder,
                                                      builder.CreateString(_name.getSwitchName()),
                                                      static_cast<DeadCreator::SwitchStatus2>(_status.getSwitchStatus()),
                                                      _name.getSwitchIndex());
     
     auto obj = DeadCreator::CreateSetSwitch(builder, switchInfo);
     return DeadCreator::CreateAction(builder, DeadCreator::ActionBase_SetSwitch, obj.Union());
 }
示例#16
0
void QBVHAccel::BuildTree(u_int start, u_int end, u_int *primsIndexes,
		BBox *primsBboxes, Point *primsCentroids, const BBox &nodeBbox,
		const BBox &centroidsBbox, int32_t parentIndex, int32_t childIndex, int depth) {
	maxDepth = (depth >= maxDepth) ? depth : maxDepth; // Set depth so we know how much stack we need later.

	// Create a leaf ?
	//********
	if (depth > 64 || end - start <= maxPrimsPerLeaf) {
		if (depth > 64) {
//			LR_LOG(ctx, "Maximum recursion depth reached while constructing QBVH, forcing a leaf node");
			if (end - start > 64) {
				//LR_LOG(ctx, "QBVH unable to handle geometry, too many primitives in leaf");
			}
		}
		CreateTempLeaf(parentIndex, childIndex, start, end, nodeBbox);
		return;
	}

	int32_t currentNode = parentIndex;
	int32_t leftChildIndex = childIndex;
	int32_t rightChildIndex = childIndex + 1;

	// Number of primitives in each bin
	int bins[NB_BINS];
	// Bbox of the primitives in the bin
	BBox binsBbox[NB_BINS];

	//--------------
	// Fill in the bins, considering all the primitives when a given
	// threshold is reached, else considering only a portion of the
	// primitives for the binned-SAH process. Also compute the bins bboxes
	// for the primitives. 

	for (u_int i = 0; i < NB_BINS; ++i)
		bins[i] = 0;

	u_int step = (end - start < fullSweepThreshold) ? 1 : skipFactor;

	// Choose the split axis, taking the axis of maximum extent for the
	// centroids (else weird cases can occur, where the maximum extent axis
	// for the nodeBbox is an axis of 0 extent for the centroids one.).
	const int axis = centroidsBbox.MaximumExtent();

	// Precompute values that are constant with respect to the current
	// primitive considered.
	const float k0 = centroidsBbox.pMin[axis];
	const float k1 = NB_BINS / (centroidsBbox.pMax[axis] - k0);

	// If the bbox is a point, create a leaf, hoping there are not more
	// than 64 primitives that share the same center.
	if (k1 == INFINITY) {
		if (end - start > 64) {}
			//LR_LOG(ctx, "QBVH unable to handle geometry, too many primitives with the same centroid");
		CreateTempLeaf(parentIndex, childIndex, start, end, nodeBbox);
		return;
	}

	// Create an intermediate node if the depth indicates to do so.
	// Register the split axis.
	if (depth % 2 == 0) {
		currentNode = CreateIntermediateNode(parentIndex, childIndex, nodeBbox);
		leftChildIndex = 0;
		rightChildIndex = 2;
	}

	for (u_int i = start; i < end; i += step) {
		u_int primIndex = primsIndexes[i];

		// Binning is relative to the centroids bbox and to the
		// primitives' centroid.
		const int binId = Min(NB_BINS - 1, Floor2Int(k1 * (primsCentroids[primIndex][axis] - k0)));

		bins[binId]++;
		binsBbox[binId] = Union(binsBbox[binId], primsBboxes[primIndex]);
	}

	//--------------
	// Evaluate where to split.

	// Cumulative number of primitives in the bins from the first to the
	// ith, and from the last to the ith.
	int nbPrimsLeft[NB_BINS];
	int nbPrimsRight[NB_BINS];
	// The corresponding cumulative bounding boxes.
	BBox bboxesLeft[NB_BINS];
	BBox bboxesRight[NB_BINS];

	// The corresponding volumes.
	float vLeft[NB_BINS];
	float vRight[NB_BINS];

	BBox currentBboxLeft, currentBboxRight;
	int currentNbLeft = 0, currentNbRight = 0;

	for (int i = 0; i < NB_BINS; ++i) {
		//-----
		// Left side
		// Number of prims
		currentNbLeft += bins[i];
		nbPrimsLeft[i] = currentNbLeft;
		// Prims bbox
		currentBboxLeft = Union(currentBboxLeft, binsBbox[i]);
		bboxesLeft[i] = currentBboxLeft;
		// Surface area
		vLeft[i] = currentBboxLeft.SurfaceArea();

		//-----
		// Right side
		// Number of prims
		int rightIndex = NB_BINS - 1 - i;
		currentNbRight += bins[rightIndex];
		nbPrimsRight[rightIndex] = currentNbRight;
		// Prims bbox
		currentBboxRight = Union(currentBboxRight, binsBbox[rightIndex]);
		bboxesRight[rightIndex] = currentBboxRight;
		// Surface area
		vRight[rightIndex] = currentBboxRight.SurfaceArea();
	}

	int minBin = -1;
	float minCost = INFINITY;
	// Find the best split axis,
	// there must be at least a bin on the right side
	for (int i = 0; i < NB_BINS - 1; ++i) {
		float cost = vLeft[i] * nbPrimsLeft[i] +
				vRight[i + 1] * nbPrimsRight[i + 1];
		if (cost < minCost) {
			minBin = i;
			minCost = cost;
		}
	}

	//-----------------
	// Make the partition, in a "quicksort partitioning" way,
	// the pivot being the position of the split plane
	// (no more binId computation)
	// track also the bboxes (primitives and centroids)
	// for the left and right halves.

	// The split plane coordinate is the coordinate of the end of
	// the chosen bin along the split axis
	float splitPos = centroidsBbox.pMin[axis] + (minBin + 1) *
			(centroidsBbox.pMax[axis] - centroidsBbox.pMin[axis]) / NB_BINS;


	BBox leftChildBbox, rightChildBbox;
	BBox leftChildCentroidsBbox, rightChildCentroidsBbox;

	u_int storeIndex = start;
	for (u_int i = start; i < end; ++i) {
		u_int primIndex = primsIndexes[i];

		if (primsCentroids[primIndex][axis] <= splitPos) {
			// Swap
			primsIndexes[i] = primsIndexes[storeIndex];
			primsIndexes[storeIndex] = primIndex;
			++storeIndex;

			// Update the bounding boxes,
			// this triangle is on the left side
			leftChildBbox = Union(leftChildBbox, primsBboxes[primIndex]);
			leftChildCentroidsBbox = Union(leftChildCentroidsBbox, primsCentroids[primIndex]);
		} else {
			// Update the bounding boxes,
			// this triangle is on the right side.
			rightChildBbox = Union(rightChildBbox, primsBboxes[primIndex]);
			rightChildCentroidsBbox = Union(rightChildCentroidsBbox, primsCentroids[primIndex]);
		}
	}

	// Build recursively
	BuildTree(start, storeIndex, primsIndexes, primsBboxes, primsCentroids,
			leftChildBbox, leftChildCentroidsBbox, currentNode,
			leftChildIndex, depth + 1);
	BuildTree(storeIndex, end, primsIndexes, primsBboxes, primsCentroids,
			rightChildBbox, rightChildCentroidsBbox, currentNode,
			rightChildIndex, depth + 1);
}
示例#17
0
文件: bvh.cpp 项目: joohaeng/pbrt-v2
BVHBuildNode *BVHAccel::recursiveBuild(MemoryArena &buildArena,
        vector<BVHPrimitiveInfo> &buildData, uint32_t start,
        uint32_t end, uint32_t *totalNodes,
        vector<Reference<Primitive> > &orderedPrims) {
    Assert(start != end);
    (*totalNodes)++;
    BVHBuildNode *node = buildArena.Alloc<BVHBuildNode>();
    // Compute bounds of all primitives in BVH node
    BBox bbox;
    for (uint32_t i = start; i < end; ++i) {
        uint32_t primNum = buildData[i].primitiveNumber;
        bbox = Union(bbox, primitives[primNum]->WorldBound());
    }
    uint32_t nPrimitives = end - start;
    if (nPrimitives == 1) {
        // Create leaf _BVHBuildNode_
        uint32_t firstPrimOffset = orderedPrims.size();
        for (uint32_t i = start; i < end; ++i) {
            uint32_t primNum = buildData[i].primitiveNumber;
            orderedPrims.push_back(primitives[primNum]);
        }
        node->InitLeaf(firstPrimOffset, nPrimitives, bbox);
    }
    else {
        // Compute bound of primitive centroids, choose split dimension _dim_
        BBox centroidBounds;
        for (uint32_t i = start; i < end; ++i)
            centroidBounds = Union(centroidBounds, buildData[i].centroid);
        int dim = centroidBounds.MaximumExtent();

        // Partition primitives into two sets and build children
        uint32_t mid = (start + end) / 2;
        if (centroidBounds.pMax[dim] == centroidBounds.pMin[dim]) {
            // Create leaf _BVHBuildNode_
            uint32_t firstPrimOffset = orderedPrims.size();
            for (uint32_t i = start; i < end; ++i) {
                uint32_t primNum = buildData[i].primitiveNumber;
                orderedPrims.push_back(primitives[primNum]);
            }
            node->InitLeaf(firstPrimOffset, nPrimitives, bbox);
            return node;
        }

        // Partition primitives based on _splitMethod_
        switch (splitMethod) {
        case SPLIT_MIDDLE: {
            // Partition primitives through node's midpoint
            float pmid = .5f * (centroidBounds.pMin[dim] + centroidBounds.pMax[dim]);
            BVHPrimitiveInfo *midPtr = std::partition(&buildData[start],
                                                      &buildData[end-1]+1,
                                                      CompareToMid(dim, pmid));
            mid = midPtr - &buildData[0];
            break;
        }
        case SPLIT_EQUAL_COUNTS: {
            // Partition primitives into equally-sized subsets
            mid = (start + end) / 2;
            std::nth_element(&buildData[start], &buildData[mid],
                             &buildData[end-1]+1, ComparePoints(dim));
            break;
        }
        case SPLIT_SAH: default: {
            // Partition primitives using approximate SAH
            if (end-start <= 4) {
                // Partition primitives into equally-sized subsets
                mid = (start + end) / 2;
                std::nth_element(&buildData[start], &buildData[mid],
                                 &buildData[end-1]+1, ComparePoints(dim));
            }
            else {
                // Allocate _BucketInfo_ for SAH partition buckets
                const int nBuckets = 12;
                struct BucketInfo {
                    BucketInfo() { count = 0; }
                    int count;
                    BBox bounds;
                };
                BucketInfo buckets[nBuckets];

                // Initialize _BucketInfo_ for SAH partition buckets
                for (uint32_t i = start; i < end; ++i) {
                    int b = nBuckets *
                        ((buildData[i].centroid[dim] - centroidBounds.pMin[dim]) /
                         (centroidBounds.pMax[dim] - centroidBounds.pMin[dim]));
                    if (b == nBuckets) b = nBuckets-1;
                    Assert(b >= 0 && b < nBuckets);
                    buckets[b].count++;
                    buckets[b].bounds = Union(buckets[b].bounds, buildData[i].bounds);
                }

                // Compute costs for splitting after each bucket
                float cost[nBuckets-1];
                for (int i = 0; i < nBuckets-1; ++i) {
                    BBox b0, b1;
                    int count0 = 0, count1 = 0;
                    for (int j = 0; j <= i; ++j) {
                        b0 = Union(b0, buckets[j].bounds);
                        count0 += buckets[j].count;
                    }
                    for (int j = i+1; j < nBuckets; ++j) {
                        b1 = Union(b1, buckets[j].bounds);
                        count1 += buckets[j].count;
                    }
                    cost[i] = count0 * b0.SurfaceArea() + count1 * b1.SurfaceArea();
                }

                // Find bucket to split at that minimizes SAH metric
                float minCost = cost[0];
                uint32_t minCostSplit = 0;
                for (int i = 1; i < nBuckets-1; ++i) {
                    if (cost[i] < minCost) {
                        minCost = cost[i];
                        minCostSplit = i;
                    }
                }

                // Either create leaf or split primitives at selected SAH bucket
                if (nPrimitives > maxPrimsInNode ||
                    minCost < (end-start) * bbox.SurfaceArea()) {
                    BVHPrimitiveInfo *pmid = std::partition(&buildData[start],
                        &buildData[end-1]+1,
                        CompareToBucket(minCostSplit, nBuckets, dim, centroidBounds));
                    mid = pmid - &buildData[0];
                }
                else {
                    // Create leaf _BVHBuildNode_
                    uint32_t firstPrimOffset = orderedPrims.size();
                    for (uint32_t i = start; i < end; ++i) {
                        uint32_t primNum = buildData[i].primitiveNumber;
                        orderedPrims.push_back(primitives[primNum]);
                    }
                    node->InitLeaf(firstPrimOffset, nPrimitives, bbox);
                }
            }
            break;
        }
        }
        node->InitInterior(dim,
                           recursiveBuild(buildArena, buildData, start, mid,
                                          totalNodes, orderedPrims),
                           recursiveBuild(buildArena, buildData, mid, end,
                                          totalNodes, orderedPrims));
    }
    return node;
}
示例#18
0
QBVHAccel::QBVHAccel(const unsigned int vCount, const unsigned int triangleCount, const Triangle *p, const Point *v, 
		u_int mp, u_int fst, u_int sf)  : Accellerator(vCount, triangleCount, p,v),fullSweepThreshold(fst),
		skipFactor(sf), maxPrimsPerLeaf(mp) {
	initialized = false;
	maxDepth = 0;
	int nPrims = triangleCount;
	vertices = v;
	triangles = p;

	// Temporary data for building
	u_int *primsIndexes = new u_int[nPrims + 3]; // For the case where
	// the last quad would begin at the last primitive
	// (or the second or third last primitive)

	// The number of nodes depends on the number of primitives,
	// and is bounded by 2 * nPrims - 1.
	// Even if there will normally have at least 4 primitives per leaf,
	// it is not always the case => continue to use the normal bounds.
	nNodes = 0;
	maxNodes = 1;
	for (u_int layer = ((nPrims + maxPrimsPerLeaf - 1) / maxPrimsPerLeaf + 3) / 4; layer != 1; layer = (layer + 3) / 4)
		maxNodes += layer;
	nodes = AllocAligned<QBVHNode>(maxNodes);
	for (u_int i = 0; i < maxNodes; ++i)
		nodes[i] = QBVHNode();

	// The arrays that will contain
	// - the bounding boxes for all triangles
	// - the centroids for all triangles	
	BBox *primsBboxes = new BBox[nPrims];
	Point *primsCentroids = new Point[nPrims];
	// The bouding volume of all the centroids
	BBox centroidsBbox;

	// Fill each base array
	for (u_int i = 0; i < triangleCount; ++i) {
		// This array will be reorganized during construction. 
		primsIndexes[i] = i;

		// Compute the bounding box for the triangle
		primsBboxes[i] = triangles[i].WorldBound(v);
		primsBboxes[i].Expand(RAY_EPSILON);
		primsCentroids[i] = (primsBboxes[i].pMin + primsBboxes[i].pMax) * .5f;

		// Update the global bounding boxes
		worldBound = Union(worldBound, primsBboxes[i]);
		centroidsBbox = Union(centroidsBbox, primsCentroids[i]);
	}

	// Arbitrarily take the last primitive for the last 3
	primsIndexes[nPrims] = nPrims - 1;
	primsIndexes[nPrims + 1] = nPrims - 1;
	primsIndexes[nPrims + 2] = nPrims - 1;

	// Recursively build the tree
	//LR_LOG(context, "Building QBVH, primitives: " << nPrims << ", initial nodes: " << maxNodes);

	nQuads = 0;
	BuildTree(0, nPrims, primsIndexes, primsBboxes, primsCentroids,worldBound, centroidsBbox, -1, 0, 0);

	prims = AllocAligned<QuadTriangle>(nQuads);
	nQuads = 0;
	PreSwizzle(0, primsIndexes);

	//LR_LOG(context, "QBVH completed with " << nNodes << "/" << maxNodes << " nodes");
	//LR_LOG(context, "Total QBVH memory usage: " << nNodes * sizeof(QBVHNode) / 1024 << "Kbytes");
	//LR_LOG(context, "Total QBVH QuadTriangle count: " << nQuads);

	// Release temporary memory
	delete[] primsBboxes;
	delete[] primsCentroids;
	delete[] primsIndexes;
}
示例#19
0
AggregateRegion::AggregateRegion(const vector<Region *> &r) :
	Region("AggregateRegion-" + boost::lexical_cast<string>(this)) {
	regions = r;
	for (u_int i = 0; i < regions.size(); ++i)
		bound = Union(bound, regions[i]->WorldBound());
}
 virtual flatbuffers::Offset<DeadCreator::Action> getActionObject(flatbuffers::FlatBufferBuilder& builder) override
 {
     auto obj = DeadCreator::CreateResumeGame(builder);
     return DeadCreator::CreateAction(builder, DeadCreator::ActionBase_ResumeGame, obj.Union());
 }
示例#21
0
文件: R3Box.C 项目: acplus/peptalk
void R3Box::
Union (const R3Sphere& sphere) 
{
    // Expand this to include sphere
    Union(sphere.BBox());
}
示例#22
0
AggregateVolume::AggregateVolume(const vector<VolumeRegion *> &r) {
    regions = r;
    for (uint32_t i = 0; i < regions.size(); ++i)
        bound = Union(bound, regions[i]->WorldBound());
}
BBox TriangleMesh::WorldBound() const {
    BBox worldBounds;
    for (int i = 0; i < nverts; i++)
        worldBounds = Union(worldBounds, p[i]);
    return worldBounds;
}
示例#24
0
文件: set.c 项目: H1d3r/binary_blobs
      } else {
        Py_DECREF(item);
        Py_DECREF(b);
        Py_DECREF(a);
        return NULL;
      }
    }
    Py_DECREF(item);
  }
  Py_DECREF(b);
  return make_ordered_set(a);
}


static char union_doc[] = "\
Union(a, b) -> ordered-set\n\
\n\
Return the union of the two sets as a new set (i.e., all the elements\n\
that are in either set).";

static PyObject *set_union(PyObject *module, PyObject *args)
{
  PyObject *a, *b, *item;

  if (!PyArg_ParseTuple(args, "OO:Union", &a, &b))
    return NULL;

  if (PyObject_IsTrue(a) == 0) {
    /* empty set a, reuse b */
    return PySequence_List(b);
  } else if (PyObject_IsTrue(b) == 0) {
示例#25
0
void DipoleSubsurfaceIntegrator::Preprocess(const Scene *scene,
        const Camera *camera, const Renderer *renderer) {
    if (scene->lights.size() == 0) return;
    vector<SurfacePoint> pts;
    // Get _SurfacePoint_s for translucent objects in scene
    if (filename != "") {
        // Initialize _SurfacePoint_s from file
        vector<float> fpts;
        if (ReadFloatFile(filename.c_str(), &fpts)) {
            if ((fpts.size() % 8) != 0)
                Error("Excess values (%d) in points file \"%s\"", int(fpts.size() % 8),
                      filename.c_str());
            for (u_int i = 0; i < fpts.size(); i += 8)
                pts.push_back(SurfacePoint(Point(fpts[i], fpts[i+1], fpts[i+2]),
                                           Normal(fpts[i+3], fpts[i+4], fpts[i+5]),
                                           fpts[i+6], fpts[i+7]));
        }
    }
    if (pts.size() == 0) {
        Point pCamera = camera->CameraToWorld(camera->shutterOpen,
                                              Point(0, 0, 0));
        FindPoissonPointDistribution(pCamera, camera->shutterOpen,
                                     minSampleDist, scene, &pts);
    }

    // Compute irradiance values at sample points
    RNG rng;
    MemoryArena arena;
    PBRT_SUBSURFACE_STARTED_COMPUTING_IRRADIANCE_VALUES();
    ProgressReporter progress(pts.size(), "Computing Irradiances");
    for (uint32_t i = 0; i < pts.size(); ++i) {
        SurfacePoint &sp = pts[i];
        Spectrum E(0.f);
        for (uint32_t j = 0; j < scene->lights.size(); ++j) {
            // Add irradiance from light at point
            const Light *light = scene->lights[j];
            Spectrum Elight = 0.f;
            int nSamples = RoundUpPow2(light->nSamples);
            uint32_t scramble[2] = { rng.RandomUInt(), rng.RandomUInt() };
            uint32_t compScramble = rng.RandomUInt();
            for (int s = 0; s < nSamples; ++s) {
                float lpos[2];
                Sample02(s, scramble, lpos);
                float lcomp = VanDerCorput(s, compScramble);
                LightSample ls(lpos[0], lpos[1], lcomp);
                Vector wi;
                float lightPdf;
                VisibilityTester visibility;
                Spectrum Li = light->Sample_L(sp.p, sp.rayEpsilon,
                                              ls, camera->shutterOpen, &wi, &lightPdf, &visibility);
                if (Dot(wi, sp.n) <= 0.) continue;
                if (Li.IsBlack() || lightPdf == 0.f) continue;
                Li *= visibility.Transmittance(scene, renderer, NULL, rng, arena);
                if (visibility.Unoccluded(scene))
                    Elight += Li * AbsDot(wi, sp.n) / lightPdf;
            }
            E += Elight / nSamples;
        }
        irradiancePoints.push_back(IrradiancePoint(sp, E));
        PBRT_SUBSURFACE_COMPUTED_IRRADIANCE_AT_POINT(&sp, &E);
        arena.FreeAll();
        progress.Update();
    }
    progress.Done();
    PBRT_SUBSURFACE_FINISHED_COMPUTING_IRRADIANCE_VALUES();

    // Create octree of clustered irradiance samples
    octree = octreeArena.Alloc<SubsurfaceOctreeNode>();
    for (uint32_t i = 0; i < irradiancePoints.size(); ++i)
        octreeBounds = Union(octreeBounds, irradiancePoints[i].p);
    for (uint32_t i = 0; i < irradiancePoints.size(); ++i)
        octree->Insert(octreeBounds, &irradiancePoints[i], octreeArena);
    octree->InitHierarchy();
}
示例#26
0
void AutoCaptureMechanism::CaptureAll()
{
    // start from the first page
    m_notebook->SetSelection(0);
    wxYield();

#if defined(__INTEL_COMPILER) && 1 /* VDM auto patch */
#   pragma ivdep
#   pragma swp
#   pragma unroll
#   pragma prefetch
#   if 0
#       pragma simd noassert
#   endif
#endif /* VDM auto patch */
    for (ControlList::iterator it = m_controlList.begin();
         it != m_controlList.end();
         ++it)
    {
        Control &ctrl = *it;

        if (ctrl.flag == AJ_TurnPage)    // Turn to next page
        {
            m_notebook->SetSelection(m_notebook->GetSelection() + 1);
            wxYield();
            continue;
        }

        // create the screenshot
        wxBitmap screenshot(1, 1);
        Capture(&screenshot, ctrl);

        if(ctrl.flag & AJ_Union)
        {
            // union screenshots until AJ_UnionEnd
#if defined(__INTEL_COMPILER) && 1 /* VDM auto patch */
#   pragma ivdep
#   pragma swp
#   pragma unroll
#   pragma prefetch
#   if 0
#       pragma simd noassert
#   endif
#endif /* VDM auto patch */
            do
            {
                ++it;
                it->name = ctrl.name; //preserving the name
                wxBitmap screenshot2(1, 1);
                Capture(&screenshot2, *it);
                wxBitmap combined(1, 1);
                Union(&screenshot, &screenshot2, &combined);
                screenshot = combined;
            }
#if defined(__INTEL_COMPILER) && 0 /* VDM auto patch */
#   pragma ivdep
#   pragma swp
#   pragma unroll
#   pragma prefetch
#   if 0
#       pragma simd noassert
#   endif
#endif /* VDM auto patch */
            while(!(it->flag & AJ_UnionEnd));
        }

        // and save it
        Save(&screenshot, ctrl.name);
    }
}
示例#27
0
文件: bvh.cpp 项目: yzhwang/pbrt-v3
BVHBuildNode *BVHAccel::recursiveBuild(
    MemoryArena &arena, std::vector<BVHPrimitiveInfo> &primitiveInfo, int start,
    int end, int *totalNodes,
    std::vector<std::shared_ptr<Primitive>> &orderedPrims) {
    Assert(start != end);
    BVHBuildNode *node = arena.Alloc<BVHBuildNode>();
    (*totalNodes)++;
    // Compute bounds of all primitives in BVH node
    Bounds3f bounds;
    for (int i = start; i < end; ++i)
        bounds = Union(bounds, primitiveInfo[i].bounds);
    int nPrimitives = end - start;
    if (nPrimitives == 1) {
        // Create leaf _BVHBuildNode_
        int firstPrimOffset = orderedPrims.size();
        for (int i = start; i < end; ++i) {
            int primNum = primitiveInfo[i].primitiveNumber;
            orderedPrims.push_back(primitives[primNum]);
        }
        node->InitLeaf(firstPrimOffset, nPrimitives, bounds);
    } else {
        // Compute bound of primitive centroids, choose split dimension _dim_
        Bounds3f centroidBounds;
        for (int i = start; i < end; ++i)
            centroidBounds = Union(centroidBounds, primitiveInfo[i].centroid);
        int dim = centroidBounds.MaximumExtent();

        // Partition primitives into two sets and build children
        int mid = (start + end) / 2;
        if (centroidBounds.pMax[dim] == centroidBounds.pMin[dim]) {
            // Create leaf _BVHBuildNode_
            int firstPrimOffset = orderedPrims.size();
            for (int i = start; i < end; ++i) {
                int primNum = primitiveInfo[i].primitiveNumber;
                orderedPrims.push_back(primitives[primNum]);
            }
            node->InitLeaf(firstPrimOffset, nPrimitives, bounds);
        } else {
            // Partition primitives based on _splitMethod_
            switch (splitMethod) {
            case SplitMethod::Middle: {
                // Partition primitives through node's midpoint
                Float pmid =
                    (centroidBounds.pMin[dim] + centroidBounds.pMax[dim]) / 2;
                BVHPrimitiveInfo *midPtr = std::partition(
                    &primitiveInfo[start], &primitiveInfo[end - 1] + 1,
                    [dim, pmid](const BVHPrimitiveInfo &pi) {
                        return pi.centroid[dim] < pmid;
                    });
                mid = midPtr - &primitiveInfo[0];
                // For lots of prims with large overlapping bounding boxes, this
                // may fail to partition; in that case don't break and fall
                // through
                // to EqualCounts.
                if (mid != start && mid != end) break;
            }
            case SplitMethod::EqualCounts: {
                // Partition primitives into equally-sized subsets
                mid = (start + end) / 2;
                std::nth_element(&primitiveInfo[start], &primitiveInfo[mid],
                                 &primitiveInfo[end - 1] + 1,
                                 [dim](const BVHPrimitiveInfo &a,
                                       const BVHPrimitiveInfo &b) {
                                     return a.centroid[dim] < b.centroid[dim];
                                 });
                break;
            }
            case SplitMethod::SAH:
            default: {
                // Partition primitives using approximate SAH
                if (nPrimitives <= 4) {
                    // Partition primitives into equally-sized subsets
                    mid = (start + end) / 2;
                    std::nth_element(&primitiveInfo[start], &primitiveInfo[mid],
                                     &primitiveInfo[end - 1] + 1,
                                     [dim](const BVHPrimitiveInfo &a,
                                           const BVHPrimitiveInfo &b) {
                                         return a.centroid[dim] <
                                                b.centroid[dim];
                                     });
                } else {
                    // Allocate _BucketInfo_ for SAH partition buckets
                    constexpr int nBuckets = 12;
                    BucketInfo buckets[nBuckets];

                    // Initialize _BucketInfo_ for SAH partition buckets
                    for (int i = start; i < end; ++i) {
                        int b = nBuckets *
                                centroidBounds.Offset(
                                    primitiveInfo[i].centroid)[dim];
                        if (b == nBuckets) b = nBuckets - 1;
                        Assert(b >= 0 && b < nBuckets);
                        buckets[b].count++;
                        buckets[b].bounds =
                            Union(buckets[b].bounds, primitiveInfo[i].bounds);
                    }

                    // Compute costs for splitting after each bucket
                    Float cost[nBuckets - 1];
                    for (int i = 0; i < nBuckets - 1; ++i) {
                        Bounds3f b0, b1;
                        int count0 = 0, count1 = 0;
                        for (int j = 0; j <= i; ++j) {
                            b0 = Union(b0, buckets[j].bounds);
                            count0 += buckets[j].count;
                        }
                        for (int j = i + 1; j < nBuckets; ++j) {
                            b1 = Union(b1, buckets[j].bounds);
                            count1 += buckets[j].count;
                        }
                        cost[i] = .125f +
                                  (count0 * b0.SurfaceArea() +
                                   count1 * b1.SurfaceArea()) /
                                      bounds.SurfaceArea();
                    }

                    // Find bucket to split at that minimizes SAH metric
                    Float minCost = cost[0];
                    int minCostSplitBucket = 0;
                    for (int i = 1; i < nBuckets - 1; ++i) {
                        if (cost[i] < minCost) {
                            minCost = cost[i];
                            minCostSplitBucket = i;
                        }
                    }

                    // Either create leaf or split primitives at selected SAH
                    // bucket
                    Float leafCost = nPrimitives;
                    if (nPrimitives > maxPrimsInNode || minCost < leafCost) {
                        BVHPrimitiveInfo *pmid = std::partition(
                            &primitiveInfo[start], &primitiveInfo[end - 1] + 1,
                            [=](const BVHPrimitiveInfo &pi) {
                                int b = nBuckets *
                                        centroidBounds.Offset(pi.centroid)[dim];
                                if (b == nBuckets) b = nBuckets - 1;
                                Assert(b >= 0 && b < nBuckets);
                                return b <= minCostSplitBucket;
                            });
                        mid = pmid - &primitiveInfo[0];
                    } else {
                        // Create leaf _BVHBuildNode_
                        int firstPrimOffset = orderedPrims.size();
                        for (int i = start; i < end; ++i) {
                            int primNum = primitiveInfo[i].primitiveNumber;
                            orderedPrims.push_back(primitives[primNum]);
                        }
                        node->InitLeaf(firstPrimOffset, nPrimitives, bounds);
                    }
                }
                break;
            }
            }
            node->InitInterior(dim,
                               recursiveBuild(arena, primitiveInfo, start, mid,
                                              totalNodes, orderedPrims),
                               recursiveBuild(arena, primitiveInfo, mid, end,
                                              totalNodes, orderedPrims));
        }
    }
    return node;
}
示例#28
0
FibHeapNode *FibHeap::ExtractMin()
{
FibHeapNode *Result;
FibHeap *ChildHeap = NULL;

// Remove minimum node and set MinRoot to next node

     if ((Result = Minimum()) == NULL)
          return NULL;

     MinRoot = Result->Right;
     Result->Right->Left = Result->Left;
     Result->Left->Right = Result->Right;
     Result->Left = Result->Right = NULL;

     NumNodes --;
     if (Result->Mark)
     {
	 NumMarkedNodes --;
         Result->Mark = 0;
     }
     Result->Degree = 0;

// Attach child list of Minimum node to the root list of the heap
// If there is no child list, then do no work

     if (Result->Child == NULL)
     {
	 if (MinRoot == Result)
	     MinRoot = NULL;
     }

// If MinRoot==Result then there was only one root tree, so the
// root list is simply the child list of that node (which is
// NULL if this is the last node in the list)

     else if (MinRoot == Result)
         MinRoot = Result->Child;

// If MinRoot is different, then the child list is pushed into a
// new temporary heap, which is then merged by Union() onto the
// root list of this heap.

     else 
     {
         ChildHeap = new FibHeap();
         ChildHeap->MinRoot = Result->Child;
     }

// Complete the disassociation of the Result node from the heap

     if (Result->Child != NULL)
	 Result->Child->Parent = NULL;
     Result->Child = Result->Parent = NULL;

// If there was a child list, then we now merge it with the
//	rest of the root list

     if (ChildHeap)
         Union(ChildHeap);

// Consolidate heap to find new minimum and do reorganize work

     if (MinRoot != NULL)
         _Consolidate();

// Return the minimum node, which is now disassociated with the heap
// It has Left, Right, Parent, Child, Mark and Degree cleared.

     return Result;
}
示例#29
0
文件: bvh.cpp 项目: yzhwang/pbrt-v3
BVHBuildNode *BVHAccel::emitLBVH(
    BVHBuildNode *&buildNodes,
    const std::vector<BVHPrimitiveInfo> &primitiveInfo,
    MortonPrimitive *mortonPrims, int nPrimitives, int *totalNodes,
    std::vector<std::shared_ptr<Primitive>> &orderedPrims,
    std::atomic<int> *orderedPrimsOffset, int bitIndex) const {
    Assert(nPrimitives > 0);
    if (bitIndex == -1 || nPrimitives < maxPrimsInNode) {
        // Create and return leaf node of LBVH treelet
        (*totalNodes)++;
        BVHBuildNode *node = buildNodes++;
        Bounds3f bounds;
        int firstPrimOffset = orderedPrimsOffset->fetch_add(nPrimitives);
        for (int i = 0; i < nPrimitives; ++i) {
            int primitiveIndex = mortonPrims[i].primitiveIndex;
            orderedPrims[firstPrimOffset + i] = primitives[primitiveIndex];
            bounds = Union(bounds, primitiveInfo[primitiveIndex].bounds);
        }
        node->InitLeaf(firstPrimOffset, nPrimitives, bounds);
        return node;
    } else {
        int mask = 1 << bitIndex;
        // Advance to next subtree level if there's no LBVH split for this bit
        if ((mortonPrims[0].mortonCode & mask) ==
            (mortonPrims[nPrimitives - 1].mortonCode & mask))
            return emitLBVH(buildNodes, primitiveInfo, mortonPrims, nPrimitives,
                            totalNodes, orderedPrims, orderedPrimsOffset,
                            bitIndex - 1);

        // Find LBVH split point for this dimension
        int searchStart = 0, searchEnd = nPrimitives - 1;
        while (searchStart + 1 != searchEnd) {
            Assert(searchStart != searchEnd);
            int mid = (searchStart + searchEnd) / 2;
            if ((mortonPrims[searchStart].mortonCode & mask) ==
                (mortonPrims[mid].mortonCode & mask))
                searchStart = mid;
            else {
                Assert((mortonPrims[mid].mortonCode & mask) ==
                       (mortonPrims[searchEnd].mortonCode & mask));
                searchEnd = mid;
            }
        }
        int splitOffset = searchEnd;
        Assert(splitOffset <= nPrimitives - 1);
        Assert((mortonPrims[splitOffset - 1].mortonCode & mask) !=
               (mortonPrims[splitOffset].mortonCode & mask));

        // Create and return interior LBVH node
        (*totalNodes)++;
        BVHBuildNode *node = buildNodes++;
        BVHBuildNode *lbvh[2] = {
            emitLBVH(buildNodes, primitiveInfo, mortonPrims, splitOffset,
                     totalNodes, orderedPrims, orderedPrimsOffset,
                     bitIndex - 1),
            emitLBVH(buildNodes, primitiveInfo, &mortonPrims[splitOffset],
                     nPrimitives - splitOffset, totalNodes, orderedPrims,
                     orderedPrimsOffset, bitIndex - 1)};
        int axis = bitIndex % 3;
        node->InitInterior(axis, lbvh[0], lbvh[1]);
        return node;
    }
}
示例#30
0
文件: grid.cpp 项目: jwzhang/pbrt-v2
// GridAccel Method Definitions
GridAccel::GridAccel(const vector<Reference<Primitive> > &p,
                     bool forRefined, bool refineImmediately)
    : gridForRefined(forRefined) {
    PBRT_GRID_STARTED_CONSTRUCTION(this, p.size());
    // Create reader-writeer mutex for grid
    rwMutex = RWMutex::Create();

    // Initialize _primitives_ with primitives for grid
    if (refineImmediately)
        for (u_int i = 0; i < p.size(); ++i)
            p[i]->FullyRefine(primitives);
    else
        primitives = p;

    // Compute bounds and choose grid resolution
    for (u_int i = 0; i < primitives.size(); ++i)
        bounds = Union(bounds, primitives[i]->WorldBound());
    Vector delta = bounds.pMax - bounds.pMin;

    // Find _voxelsPerUnitDist_ for grid
    int maxAxis = bounds.MaximumExtent();
    float invMaxWidth = 1.f / delta[maxAxis];
    Assert(invMaxWidth > 0.f);
    float cubeRoot = 3.f * powf(float(primitives.size()), 1.f/3.f);
    float voxelsPerUnitDist = cubeRoot * invMaxWidth;
    for (int axis = 0; axis < 3; ++axis) {
        NVoxels[axis] = Round2Int(delta[axis] * voxelsPerUnitDist);
        NVoxels[axis] = Clamp(NVoxels[axis], 1, 64);
    }
    PBRT_GRID_BOUNDS_AND_RESOLUTION(&bounds, NVoxels);

    // Compute voxel widths and allocate voxels
    for (int axis = 0; axis < 3; ++axis) {
        Width[axis] = delta[axis] / NVoxels[axis];
        InvWidth[axis] = (Width[axis] == 0.f) ? 0.f : 1.f / Width[axis];
    }
    int nVoxels = NVoxels[0] * NVoxels[1] * NVoxels[2];
    voxels = AllocAligned<Voxel *>(nVoxels);
    memset(voxels, 0, nVoxels * sizeof(Voxel *));

    // Add primitives to grid voxels
    for (u_int i = 0; i < primitives.size(); ++i) {
        // Find voxel extent of primitive
        BBox pb = primitives[i]->WorldBound();
        int vmin[3], vmax[3];
        for (int axis = 0; axis < 3; ++axis) {
            vmin[axis] = posToVoxel(pb.pMin, axis);
            vmax[axis] = posToVoxel(pb.pMax, axis);
        }

        // Add primitive to overlapping voxels
        PBRT_GRID_VOXELIZED_PRIMITIVE(vmin, vmax);
        for (int z = vmin[2]; z <= vmax[2]; ++z)
            for (int y = vmin[1]; y <= vmax[1]; ++y)
                for (int x = vmin[0]; x <= vmax[0]; ++x) {
                    int o = offset(x, y, z);
                    if (!voxels[o]) {
                        // Allocate new voxel and store primitive in it
                        voxels[o] = voxelArena.Alloc<Voxel>();
                        *voxels[o] = Voxel(primitives[i]);
                    }
                    else {
                        // Add primitive to already-allocated voxel
                        voxels[o]->AddPrimitive(primitives[i]);
                    }
                }
    }
    PBRT_GRID_FINISHED_CONSTRUCTION(this);
}