Ejemplo n.º 1
0
void build(int x, int l,int r){
    if(l == r){
        scanf("%d%d",&xmi[x].first,&ymi[x].first);
        xmi[x].second = ymi[x].second  = l;
        xma[x] = xmi[x];
        yma[x] = ymi[x];
        return;
    }
    int mid = (l+r)>>1;
    build(x<<1,l,mid);
    build(x<<1|1,mid+1,r);
    xmi[x] = minP(xmi[x<<1], xmi[x<<1|1]);
    xma[x] = maxP(xma[x<<1], xma[x<<1|1]);
    ymi[x] = minP(ymi[x<<1], ymi[x<<1|1]);
    yma[x] = maxP(yma[x<<1], yma[x<<1|1]);
}
Ejemplo n.º 2
0
pair askxmi(int x,int l,int r,int c,int d){
    if(c>d)
        return newPair(inf,0);
    if(c<=l&&r<=d)return xmi[x];
    int mid=(l+r)>>1;
    pair t = newPair(inf,0);
    if(c<=mid)
        t=askxmi(x<<1,l,mid,c,d);
    if(d>mid)
        t=minP(t, askxmi(x<<1|1,mid+1,r,c,d));
    return t;
}
Ejemplo n.º 3
0
/**
 * this gets a normal non-angle-aligned bounding box for a set of points
 * or a contour.
 */
ofRectangle tricks::math::getBoundingBox(vector<ofVec2f> &contour) {
	ofVec2f minP(FLT_MAX, FLT_MAX);
	ofVec2f maxP(FLT_MIN, FLT_MIN);
	for(int i = 0; i < contour.size(); i++) {
		if(contour[i].x>maxP.x) maxP.x = contour[i].x;
		if(contour[i].y>maxP.y) maxP.y = contour[i].y;
		if(contour[i].x<minP.x) minP.x = contour[i].x;
		if(contour[i].y<minP.y) minP.y = contour[i].y;
	}
	
	return ofRectangle(minP.x, minP.y, maxP.x - minP.x, maxP.y - minP.y);
}
 int maxProfit(vector<int> &prices) {
     vector<int> minP(prices.size(), INT_MAX);
     int res = 0;
     for(int i=0;i<minP.size();i++) {
         if(i==0) {
             minP[i] = prices[i];
         } else {
             minP[i] = min(minP[i-1], prices[i]);
         }
     }
     int maxP = 0;
     for(int i=minP.size()-1;i>=1;i--) {
         if(i==minP.size()-1) {
             maxP = prices[i];
             res = max(res, maxP-minP[i-1]);
         } else {
             maxP = max(maxP, prices[i]);
             res = max(res, maxP-minP[i-1]);
         }
     }
     return res;
 }
Point getPupilPointFromGradient(Mat gx) {
    double maxVal = FLT_MIN;
    Point2i maxP(0,0);
    double minVal = FLT_MAX;
    Point2i minP(0, 0);
    double * v;
    for (int i = 0; i < gx.rows; i++) {
        v = gx.ptr<double>(i);
        for (int j = 0; j < gx.cols; j++) {
            if (v[j] > maxVal) {
                maxVal = v[j];
                maxP.x = j;
                maxP.y = i;
            }
            else if (v[j] < minVal) {
                minVal = v[j];
                minP.x = j;
                minP.y = i;
            }
        }

    }
    return Point((minP.x + maxP.x) / 2, (minP.y + maxP.y) / 2);
}
	dgInt32 UpdateBox (const dgVector* const position, const dgInt32* const faceIndices)
	{
		dgVector p0;
		dgVector p1;
		TriangleBox (position, faceIndices, p0, p1);

		p0 = p0.CompProduct3(dgVector (DG_DEFORMABLE_PADDING, DG_DEFORMABLE_PADDING, DG_DEFORMABLE_PADDING, dgFloat32 (0.0f)));
		p1 = p1.CompProduct3(dgVector (DG_DEFORMABLE_PADDING, DG_DEFORMABLE_PADDING, DG_DEFORMABLE_PADDING, dgFloat32 (0.0f)));

		dgVector minP (dgFloor (p0.m_x) * DG_DEFORMABLE_INV_PADDING, dgFloor (p0.m_y) * DG_DEFORMABLE_INV_PADDING, dgFloor (p0.m_z) * DG_DEFORMABLE_INV_PADDING, dgFloat32(0.0f));  
		dgVector maxP (dgFloor (p1.m_x + dgFloat32 (1.0f)) * DG_DEFORMABLE_INV_PADDING,  
					   dgFloor (p1.m_y + dgFloat32 (1.0f)) * DG_DEFORMABLE_INV_PADDING,  
					   dgFloor (p1.m_z + dgFloat32 (1.0f)) * DG_DEFORMABLE_INV_PADDING, dgFloat32(0.0f));

		dgInt32 state = dgCompareBox (minP, maxP, m_minBox, m_maxBox);
		if (state) {
			m_minBox = minP;
			m_maxBox = maxP;
			dgVector side0 (m_maxBox - m_minBox);
			dgVector side1 (side0.m_y, side0.m_z, side0.m_x, dgFloat32 (0.0f));
			m_surfaceArea = side0 % side1;
		}
		return state;
	}
Ejemplo n.º 7
0
dgAABBPointTree4d* dgConvexHull4d::BuildTree (dgAABBPointTree4d* const parent, dgHullVector* const points, dgInt32 count, dgInt32 baseIndex, dgInt8** memoryPool, dgInt32& maxMemSize) const
{
	dgAABBPointTree4d* tree = NULL;

	dgAssert (count);
	dgBigVector minP ( dgFloat32 (1.0e15f),  dgFloat32 (1.0e15f),  dgFloat32 (1.0e15f),  dgFloat32 (1.0e15f)); 
	dgBigVector maxP (-dgFloat32 (1.0e15f), -dgFloat32 (1.0e15f), -dgFloat32 (1.0e15f), -dgFloat32 (1.0e15f)); 
	if (count <= DG_VERTEX_CLUMP_SIZE_4D) {

		dgAABBPointTree4dClump* const clump = new (*memoryPool) dgAABBPointTree4dClump;
		*memoryPool += sizeof (dgAABBPointTree4dClump);
		maxMemSize -= sizeof (dgAABBPointTree4dClump);
		dgAssert (maxMemSize >= 0);

		dgAssert (clump);
		clump->m_count = count;
		for (dgInt32 i = 0; i < count; i ++) {
			clump->m_indices[i] = i + baseIndex;

			const dgBigVector& p = points[i];
			minP.m_x = dgMin (p.m_x, minP.m_x); 
			minP.m_y = dgMin (p.m_y, minP.m_y); 
			minP.m_z = dgMin (p.m_z, minP.m_z); 
			minP.m_w = dgMin (p.m_w, minP.m_w); 

			maxP.m_x = dgMax (p.m_x, maxP.m_x); 
			maxP.m_y = dgMax (p.m_y, maxP.m_y); 
			maxP.m_z = dgMax (p.m_z, maxP.m_z); 
			maxP.m_w = dgMax (p.m_w, maxP.m_w); 
		}

		clump->m_left = NULL;
		clump->m_right = NULL;
		tree = clump;

	} else {
		dgBigVector median (dgFloat32 (0.0f), dgFloat32 (0.0f), dgFloat32 (0.0f), dgFloat32 (0.0f));
		dgBigVector varian (dgFloat32 (0.0f), dgFloat32 (0.0f), dgFloat32 (0.0f), dgFloat32 (0.0f));
		for (dgInt32 i = 0; i < count; i ++) {

			const dgBigVector& p = points[i];
			minP.m_x = dgMin (p.m_x, minP.m_x); 
			minP.m_y = dgMin (p.m_y, minP.m_y); 
			minP.m_z = dgMin (p.m_z, minP.m_z); 
			minP.m_w = dgMin (p.m_w, minP.m_w); 

			maxP.m_x = dgMax (p.m_x, maxP.m_x); 
			maxP.m_y = dgMax (p.m_y, maxP.m_y); 
			maxP.m_z = dgMax (p.m_z, maxP.m_z); 
			maxP.m_w = dgMax (p.m_w, maxP.m_w); 

			median = median + p;
			varian = varian + p.CompProduct4(p);
		}

		varian = varian.Scale4 (dgFloat32 (count)) - median.CompProduct4(median);

		dgInt32 index = 0;
		dgFloat64 maxVarian = dgFloat64 (-1.0e10f);
		for (dgInt32 i = 0; i < 4; i ++) {
			if (varian[i] > maxVarian) {
				index = i;
				maxVarian = varian[i];
			}
		}
		dgBigVector center = median.Scale4 (dgFloat64 (1.0f) / dgFloat64 (count));

		dgFloat64 test = center[index];

		dgInt32 i0 = 0;
		dgInt32 i1 = count - 1;
		do {    
			for (; i0 <= i1; i0 ++) {
				dgFloat64 val = points[i0][index];
				if (val > test) {
					break;
				}
			}

			for (; i1 >= i0; i1 --) {
				dgFloat64 val = points[i1][index];
				if (val < test) {
					break;
				}
			}

			if (i0 < i1)	{
				dgSwap(points[i0], points[i1]);
				i0++; 
				i1--;
			}
		} while (i0 <= i1);

		if (i0 == 0){
			i0 = count / 2;
		}
		if (i0 >= (count - 1)){
			i0 = count / 2;
		}

		tree = new (*memoryPool) dgAABBPointTree4d;
		*memoryPool += sizeof (dgAABBPointTree4d);
		maxMemSize -= sizeof (dgAABBPointTree4d);
		dgAssert (maxMemSize >= 0);

		dgAssert (i0);
		dgAssert (count - i0);

		tree->m_left = BuildTree (tree, points, i0, baseIndex, memoryPool, maxMemSize);
		tree->m_right = BuildTree (tree, &points[i0], count - i0, i0 + baseIndex, memoryPool, maxMemSize);
	}

	dgAssert (tree);
	tree->m_parent = parent;
	tree->m_box[0] = minP - dgBigVector (dgFloat64 (1.0e-3f), dgFloat64 (1.0e-3f), dgFloat64 (1.0e-3f), dgFloat64 (1.0e-3f));
	tree->m_box[1] = maxP + dgBigVector (dgFloat64 (1.0e-3f), dgFloat64 (1.0e-3f), dgFloat64 (1.0e-3f), dgFloat64 (1.0e-3f));
	return tree;
}
bool dgCollisionConvexHull::Create (dgInt32 count, dgInt32 strideInBytes, const dgFloat32* const vertexArray, dgFloat32 tolerance)
{
	dgInt32 stride = strideInBytes / sizeof (dgFloat32);
	dgStack<dgFloat64> buffer(3 * 2 * count);
	for (dgInt32 i = 0; i < count; i ++) {
		buffer[i * 3 + 0] = vertexArray[i * stride + 0];
		buffer[i * 3 + 1] = vertexArray[i * stride + 1];
		buffer[i * 3 + 2] = vertexArray[i * stride + 2];
	}

	dgConvexHull3d* convexHull =  new (GetAllocator()) dgConvexHull3d (GetAllocator(), &buffer[0], 3 * sizeof (dgFloat64), count, tolerance);
	if (!convexHull->GetCount()) {
		// this is a degenerated hull hull to add some thickness and for a thick plane
		delete convexHull;

		dgStack<dgVector> tmp(3 * count);
		for (dgInt32 i = 0; i < count; i ++) {
			tmp[i][0] = dgFloat32 (buffer[i*3 + 0]);
			tmp[i][1] = dgFloat32 (buffer[i*3 + 1]);
			tmp[i][2] = dgFloat32 (buffer[i*3 + 2]);
			tmp[i][2] = dgFloat32 (0.0f);
		}
	
		dgObb sphere;
		sphere.SetDimensions (&tmp[0][0], sizeof (dgVector), count);

		dgInt32 index = 0;
		dgFloat32 size = dgFloat32 (1.0e10f);
		for (dgInt32 i = 0; i < 3; i ++) {
			if (sphere.m_size[i] < size) {
				index = i;
				size = sphere.m_size[i];
			}
		}
		dgVector normal (dgFloat32 (0.0f), dgFloat32 (0.0f), dgFloat32 (0.0f), dgFloat32 (0.0f));
		normal[index] = dgFloat32 (1.0f);
		dgVector step = sphere.RotateVector (normal.Scale3 (dgFloat32 (0.05f)));
		for (dgInt32 i = 0; i < count; i ++) {
			dgVector p1 (tmp[i] + step);
			dgVector p2 (tmp[i] - step);

			buffer[i * 3 + 0] = p1.m_x;
			buffer[i * 3 + 1] = p1.m_y;
			buffer[i * 3 + 2] = p1.m_z;
			buffer[(i + count) * 3 + 0] = p2.m_x;
			buffer[(i + count) * 3 + 1] = p2.m_y;
			buffer[(i + count) * 3 + 2] = p2.m_z;
		}
		count *= 2;
		convexHull =  new (GetAllocator()) dgConvexHull3d (GetAllocator(), &buffer[0], 3 * sizeof (dgFloat64), count, tolerance);
		if (!convexHull->GetCount()) {
			delete convexHull;
			return false;
		}
	}

	// check for degenerated faces
	for (bool success = false; !success;  ) {
		success = true;
		const dgBigVector* const hullVertexArray = convexHull->GetVertexPool();

		dgStack<dgInt8> mask(convexHull->GetVertexCount());
		memset (&mask[0], 1, mask.GetSizeInBytes());
		for (dgConvexHull3d::dgListNode* node = convexHull->GetFirst(); node; node = node->GetNext()) {
			dgConvexHull3DFace& face = node->GetInfo();
			const dgBigVector& p0 = hullVertexArray[face.m_index[0]];
			const dgBigVector& p1 = hullVertexArray[face.m_index[1]];
			const dgBigVector& p2 = hullVertexArray[face.m_index[2]];
			dgBigVector p1p0 (p1 - p0);
			dgBigVector p2p0 (p2 - p0);
			dgBigVector normal (p2p0 * p1p0);
			dgFloat64 mag2 = normal % normal;
			if (mag2 < dgFloat64 (1.0e-6f * 1.0e-6f)) {
				success = false;
				dgInt32 index = -1;
				dgBigVector p2p1 (p2 - p1);
				dgFloat64 dist10 = p1p0 % p1p0;
				dgFloat64 dist20 = p2p0 % p2p0;
				dgFloat64 dist21 = p2p1 % p2p1;
				if ((dist10 >= dist20) && (dist10 >= dist21)) {
					index = 2;
				} else if ((dist20 >= dist10) && (dist20 >= dist21)) {
					index = 1;
				} else if ((dist21 >= dist10) && (dist21 >= dist20)) {
					index = 0;
				}
				dgAssert (index != -1);
				mask[face.m_index[index]] = 0;
			}
		}
		if (!success) {
			dgInt32 count = 0;
			dgInt32 vertexCount = convexHull->GetVertexCount();
			for (dgInt32 i = 0; i < vertexCount; i ++) {
				if (mask[i]) {
					buffer[count * 3 + 0] = hullVertexArray[i].m_x;
					buffer[count * 3 + 1] = hullVertexArray[i].m_y;
					buffer[count * 3 + 2] = hullVertexArray[i].m_z;
					count ++;
				}
			}
			delete convexHull;
			convexHull =  new (GetAllocator()) dgConvexHull3d (GetAllocator(), &buffer[0], 3 * sizeof (dgFloat64), count, tolerance);
		}
	}

	dgAssert (convexHull);
	dgInt32 vertexCount = convexHull->GetVertexCount();
	if (vertexCount < 4) {
		delete convexHull;
		return false;
	}
	

	const dgBigVector* const hullVertexArray = convexHull->GetVertexPool();

	dgPolyhedra polyhedra (GetAllocator());
	polyhedra.BeginFace();
	for (dgConvexHull3d::dgListNode* node = convexHull->GetFirst(); node; node = node->GetNext()) {
		dgConvexHull3DFace& face = node->GetInfo();
		polyhedra.AddFace (face.m_index[0], face.m_index[1], face.m_index[2]);
	}
	polyhedra.EndFace();

	if (vertexCount > 4) {
//		bool edgeRemoved = false;
//		while (RemoveCoplanarEdge (polyhedra, hullVertexArray)) {
//			edgeRemoved = true;
//		}
//		if (edgeRemoved) {
//			if (!CheckConvex (polyhedra, hullVertexArray)) {
//				delete convexHull;
//				return false;
//			}
//		}
		while (RemoveCoplanarEdge (polyhedra, hullVertexArray));
	}

	dgStack<dgInt32> vertexMap(vertexCount);
	memset (&vertexMap[0], -1, vertexCount * sizeof (dgInt32));

	dgInt32 mark = polyhedra.IncLRU();
	dgPolyhedra::Iterator iter (polyhedra);
	for (iter.Begin(); iter; iter ++) {
		dgEdge* const edge = &iter.GetNode()->GetInfo();
		if (edge->m_mark != mark) {
			if (vertexMap[edge->m_incidentVertex] == -1) {
				vertexMap[edge->m_incidentVertex] = m_vertexCount;
				m_vertexCount ++;
			}
			dgEdge* ptr = edge;
			do {
				ptr->m_mark = mark;
				ptr->m_userData = m_edgeCount;
				m_edgeCount ++;
				ptr = ptr->m_twin->m_next;
			} while (ptr != edge) ;
		}
	} 

	m_vertex = (dgVector*) m_allocator->Malloc (dgInt32 (m_vertexCount * sizeof (dgVector)));
	m_simplex = (dgConvexSimplexEdge*) m_allocator->Malloc (dgInt32 (m_edgeCount * sizeof (dgConvexSimplexEdge)));
	m_vertexToEdgeMapping = (const dgConvexSimplexEdge**) m_allocator->Malloc (dgInt32 (m_vertexCount * sizeof (dgConvexSimplexEdge*)));

	for (dgInt32 i = 0; i < vertexCount; i ++) {
		if (vertexMap[i] != -1) {
			m_vertex[vertexMap[i]] = hullVertexArray[i];
			m_vertex[vertexMap[i]].m_w = dgFloat32 (0.0f);
		}
	}
	delete convexHull;

	vertexCount = m_vertexCount;
	mark = polyhedra.IncLRU();;
	for (iter.Begin(); iter; iter ++) {
		dgEdge* const edge = &iter.GetNode()->GetInfo();
		if (edge->m_mark != mark) {
			dgEdge *ptr = edge;
			do {
				ptr->m_mark = mark;
				dgConvexSimplexEdge* const simplexPtr = &m_simplex[ptr->m_userData];
				simplexPtr->m_vertex = vertexMap[ptr->m_incidentVertex];
				simplexPtr->m_next = &m_simplex[ptr->m_next->m_userData];
				simplexPtr->m_prev = &m_simplex[ptr->m_prev->m_userData];
				simplexPtr->m_twin = &m_simplex[ptr->m_twin->m_userData];

				ptr = ptr->m_twin->m_next;
			} while (ptr != edge) ;
		}
	} 

	
	m_faceCount = 0;
	dgStack<char> faceMarks (m_edgeCount);
	memset (&faceMarks[0], 0, m_edgeCount * sizeof (dgInt8));

	dgStack<dgConvexSimplexEdge*> faceArray (m_edgeCount);
	for (dgInt32 i = 0; i < m_edgeCount; i ++) {
		dgConvexSimplexEdge* const face = &m_simplex[i];
		if (!faceMarks[i]) {
			dgConvexSimplexEdge* ptr = face;
			do {
				dgAssert ((ptr - m_simplex) >= 0);
				faceMarks[dgInt32 (ptr - m_simplex)] = '1';
				ptr = ptr->m_next;
			} while (ptr != face);

			faceArray[m_faceCount] = face;
			m_faceCount ++;
		}
	}
	m_faceArray = (dgConvexSimplexEdge **) m_allocator->Malloc(dgInt32 (m_faceCount * sizeof(dgConvexSimplexEdge *)));
	memcpy (m_faceArray, &faceArray[0], m_faceCount * sizeof(dgConvexSimplexEdge *));
	
	if (vertexCount > DG_CONVEX_VERTEX_CHUNK_SIZE) {
		// create a face structure for support vertex
		dgStack<dgConvexBox> boxTree (vertexCount);
		dgTree<dgVector,dgInt32> sortTree(GetAllocator());
		dgStack<dgTree<dgVector,dgInt32>::dgTreeNode*> vertexNodeList(vertexCount);

		dgVector minP ( dgFloat32 (1.0e15f),  dgFloat32 (1.0e15f),  dgFloat32 (1.0e15f), dgFloat32 (0.0f)); 
		dgVector maxP (-dgFloat32 (1.0e15f), -dgFloat32 (1.0e15f), -dgFloat32 (1.0e15f), dgFloat32 (0.0f)); 	
		for (dgInt32 i = 0; i < vertexCount; i ++) {
			const dgVector& p = m_vertex[i];
			vertexNodeList[i] = sortTree.Insert (p, i);
			minP.m_x = dgMin (p.m_x, minP.m_x); 
			minP.m_y = dgMin (p.m_y, minP.m_y); 
			minP.m_z = dgMin (p.m_z, minP.m_z); 
			
			maxP.m_x = dgMax (p.m_x, maxP.m_x); 
			maxP.m_y = dgMax (p.m_y, maxP.m_y); 
			maxP.m_z = dgMax (p.m_z, maxP.m_z); 
		}

		boxTree[0].m_box[0] = minP;
		boxTree[0].m_box[1] = maxP;
		boxTree[0].m_leftBox = -1;
		boxTree[0].m_rightBox = -1;
		boxTree[0].m_vertexStart = 0;
		boxTree[0].m_vertexCount = vertexCount;
		dgInt32 boxCount = 1;

		dgInt32 stack = 1;
		dgInt32 stackBoxPool[64];
		stackBoxPool[0] = 0;

		while (stack) {
			stack --;
			dgInt32 boxIndex = stackBoxPool[stack];
			dgConvexBox& box = boxTree[boxIndex];
			if (box.m_vertexCount > DG_CONVEX_VERTEX_CHUNK_SIZE) {
				dgVector median (dgFloat32 (0.0f), dgFloat32 (0.0f), dgFloat32 (0.0f), dgFloat32 (0.0f));
				dgVector varian (dgFloat32 (0.0f), dgFloat32 (0.0f), dgFloat32 (0.0f), dgFloat32 (0.0f));
				for (dgInt32 i = 0; i < box.m_vertexCount; i ++) {
					dgVector& p = vertexNodeList[box.m_vertexStart + i]->GetInfo();
					minP.m_x = dgMin (p.m_x, minP.m_x); 
					minP.m_y = dgMin (p.m_y, minP.m_y); 
					minP.m_z = dgMin (p.m_z, minP.m_z); 

					maxP.m_x = dgMax (p.m_x, maxP.m_x); 
					maxP.m_y = dgMax (p.m_y, maxP.m_y); 
					maxP.m_z = dgMax (p.m_z, maxP.m_z); 

					median += p;
					varian += p.CompProduct3 (p);
				}

				varian = varian.Scale3 (dgFloat32 (box.m_vertexCount)) - median.CompProduct3(median);
				dgInt32 index = 0;
				dgFloat64 maxVarian = dgFloat64 (-1.0e10f);
				for (dgInt32 i = 0; i < 3; i ++) {
					if (varian[i] > maxVarian) {
						index = i;
						maxVarian = varian[i];
					}
				}
				dgVector center = median.Scale3 (dgFloat32 (1.0f) / dgFloat32 (box.m_vertexCount));
				dgFloat32 test = center[index];

				dgInt32 i0 = 0;
				dgInt32 i1 = box.m_vertexCount - 1;
				do {    
					for (; i0 <= i1; i0 ++) {
						dgFloat32 val = vertexNodeList[box.m_vertexStart + i0]->GetInfo()[index];
						if (val > test) {
							break;
						}
					}

					for (; i1 >= i0; i1 --) {
						dgFloat32 val = vertexNodeList[box.m_vertexStart + i1]->GetInfo()[index];
						if (val < test) {
							break;
						}
					}

					if (i0 < i1)	{
						dgSwap(vertexNodeList[box.m_vertexStart + i0], vertexNodeList[box.m_vertexStart + i1]);
						i0++; 
						i1--;
					}
				} while (i0 <= i1);

				if (i0 == 0){
					i0 = box.m_vertexCount / 2;
				}
				if (i0 >= (box.m_vertexCount - 1)){
					i0 = box.m_vertexCount / 2;
				}


				{
					dgVector minP ( dgFloat32 (1.0e15f),  dgFloat32 (1.0e15f),  dgFloat32 (1.0e15f), dgFloat32 (0.0f)); 
					dgVector maxP (-dgFloat32 (1.0e15f), -dgFloat32 (1.0e15f), -dgFloat32 (1.0e15f), dgFloat32 (0.0f)); 	
					for (dgInt32 i = i0; i < box.m_vertexCount; i ++) {
						const dgVector& p = vertexNodeList[box.m_vertexStart + i]->GetInfo();
						minP.m_x = dgMin (p.m_x, minP.m_x); 
						minP.m_y = dgMin (p.m_y, minP.m_y); 
						minP.m_z = dgMin (p.m_z, minP.m_z); 

						maxP.m_x = dgMax (p.m_x, maxP.m_x); 
						maxP.m_y = dgMax (p.m_y, maxP.m_y); 
						maxP.m_z = dgMax (p.m_z, maxP.m_z); 
					}

					box.m_rightBox = boxCount;
					boxTree[boxCount].m_box[0] = minP;
					boxTree[boxCount].m_box[1] = maxP;
					boxTree[boxCount].m_leftBox = -1;
					boxTree[boxCount].m_rightBox = -1;
					boxTree[boxCount].m_vertexStart = box.m_vertexStart + i0;
					boxTree[boxCount].m_vertexCount = box.m_vertexCount - i0;
					stackBoxPool[stack] = boxCount;
					stack ++;
					boxCount ++;
				}

				{
					dgVector minP ( dgFloat32 (1.0e15f),  dgFloat32 (1.0e15f),  dgFloat32 (1.0e15f), dgFloat32 (0.0f)); 
					dgVector maxP (-dgFloat32 (1.0e15f), -dgFloat32 (1.0e15f), -dgFloat32 (1.0e15f), dgFloat32 (0.0f)); 	
					for (dgInt32 i = 0; i < i0; i ++) {
						const dgVector& p = vertexNodeList[box.m_vertexStart + i]->GetInfo();
						minP.m_x = dgMin (p.m_x, minP.m_x); 
						minP.m_y = dgMin (p.m_y, minP.m_y); 
						minP.m_z = dgMin (p.m_z, minP.m_z); 

						maxP.m_x = dgMax (p.m_x, maxP.m_x); 
						maxP.m_y = dgMax (p.m_y, maxP.m_y); 
						maxP.m_z = dgMax (p.m_z, maxP.m_z); 
					}

					box.m_leftBox = boxCount;
					boxTree[boxCount].m_box[0] = minP;
					boxTree[boxCount].m_box[1] = maxP;
					boxTree[boxCount].m_leftBox = -1;
					boxTree[boxCount].m_rightBox = -1;
					boxTree[boxCount].m_vertexStart = box.m_vertexStart;
					boxTree[boxCount].m_vertexCount = i0;
					stackBoxPool[stack] = boxCount;
					stack ++;
					boxCount ++;
				}
			}
		}

		for (dgInt32 i = 0; i < m_vertexCount; i ++) {
			m_vertex[i] = vertexNodeList[i]->GetInfo();
			vertexNodeList[i]->GetInfo().m_w = dgFloat32 (i);
		}

		m_supportTreeCount = boxCount;
		m_supportTree = (dgConvexBox*) m_allocator->Malloc(dgInt32 (boxCount * sizeof(dgConvexBox)));		
		memcpy (m_supportTree, &boxTree[0], boxCount * sizeof(dgConvexBox));

		for (dgInt32 i = 0; i < m_edgeCount; i ++) {
			dgConvexSimplexEdge* const ptr = &m_simplex[i];
			dgTree<dgVector,dgInt32>::dgTreeNode* const node = sortTree.Find(ptr->m_vertex);
			dgInt32 index = dgInt32 (node->GetInfo().m_w);
			ptr->m_vertex = dgInt16 (index);
		}
	}

	for (dgInt32 i = 0; i < m_edgeCount; i ++) {
		dgConvexSimplexEdge* const edge = &m_simplex[i];
		m_vertexToEdgeMapping[edge->m_vertex] = edge;
	}


	SetVolumeAndCG ();
	return true;
}
Ejemplo n.º 9
0
void App::onGraphics3D(RenderDevice* rd, Array<shared_ptr<Surface> >& allSurfaces) {
    // This implementation is equivalent to the default GApp's. It is repeated here to make it
    // easy to modify rendering. If you don't require custom rendering, just delete this
    // method from your application and rely on the base class.

    if (!scene()) {
        if ((submitToDisplayMode() == SubmitToDisplayMode::MAXIMIZE_THROUGHPUT) && (!rd->swapBuffersAutomatically())) {
            swapBuffers();
        }
        rd->clear();
        rd->pushState(); {
            rd->setProjectionAndCameraMatrix(activeCamera()->projection(), activeCamera()->frame());
            drawDebugShapes();
        } rd->popState();
        return;
    }

    updateAudioData();

    if (System::time() - m_lastInterestingEventTime > 10.0) {
        if (Random::common().uniform() < 0.001f) {
            if (m_sonicSculpturePieces.size() > 0) {
                int index = Random::common().integer(0, m_sonicSculpturePieces.size() - 1);
                generatePlayPulse(m_sonicSculpturePieces[index]);
                m_lastInterestingEventTime = System::time();
            }
        }
    }
    handlePlayPulses();

    GBuffer::Specification gbufferSpec = m_gbufferSpecification;
    extendGBufferSpecification(gbufferSpec);
    m_gbuffer->setSpecification(gbufferSpec);
    m_gbuffer->resize(m_framebuffer->width(), m_framebuffer->height());
    m_gbuffer->prepare(rd, activeCamera(), 0, -(float)previousSimTimeStep(), m_settings.depthGuardBandThickness, m_settings.colorGuardBandThickness);

    m_renderer->render(rd, m_framebuffer, m_depthPeelFramebuffer, scene()->lightingEnvironment(), m_gbuffer, allSurfaces);

    // Debug visualizations and post-process effects
    rd->pushState(m_framebuffer); {
        // Call to make the App show the output of debugDraw(...)
        rd->setProjectionAndCameraMatrix(activeCamera()->projection(), activeCamera()->frame());

        for (auto& piece : m_sonicSculpturePieces) {
            piece->render(rd, scene()->lightingEnvironment());
        }
        if (notNull(m_currentSonicSculpturePiece)) {
            m_currentSonicSculpturePiece->render(rd, scene()->lightingEnvironment());
        }
        for (int i = m_playPlanes.size() - 1; i >= 0; --i) {
	        const PlayPlane& pp = m_playPlanes[i];
	        if (pp.endWindowIndex < g_sampleWindowIndex) {
	            m_playPlanes.remove(i);
	        }

	        Point3 point = pp.origin + (pp.direction * METERS_PER_SAMPLE_WINDOW * (g_sampleWindowIndex-pp.beginWindowIndex));

	        Color4 solidColor(1.0f, .02f, .03f, .15f);
	        //	  Plane plane(point, pp.direction);
	        //	  Draw::plane(plane, rd, solidColor, Color4::clear());

	        CFrame planeFrame = pp.frame;
	        planeFrame.translation = point;
            Vector3 minP(finf(), finf(), finf());
            Vector3 maxP(-finf(), -finf(), -finf());
            for (auto& piece : m_sonicSculpturePieces) {
                piece->minMaxValue(planeFrame, minP, maxP);
            }
            Box b(Vector3(minP.xy()-Vector2(3,3), 0.0f), Vector3(maxP.xy() + Vector2(3, 3), 0.1f), planeFrame);

            Draw::box(b, rd, solidColor, Color4::clear());
        }


        drawDebugShapes();
        const shared_ptr<Entity>& selectedEntity = (notNull(developerWindow) && notNull(developerWindow->sceneEditorWindow)) ? developerWindow->sceneEditorWindow->selectedEntity() : shared_ptr<Entity>();
        scene()->visualize(rd, selectedEntity, allSurfaces, sceneVisualizationSettings(), activeCamera());

        // Post-process special effects
        m_depthOfField->apply(rd, m_framebuffer->texture(0), m_framebuffer->texture(Framebuffer::DEPTH), activeCamera(), m_settings.depthGuardBandThickness - m_settings.colorGuardBandThickness);

        m_motionBlur->apply(rd, m_framebuffer->texture(0), m_gbuffer->texture(GBuffer::Field::SS_EXPRESSIVE_MOTION),
            m_framebuffer->texture(Framebuffer::DEPTH), activeCamera(),
            m_settings.depthGuardBandThickness - m_settings.colorGuardBandThickness);
    } rd->popState();

    rd->push2D(m_framebuffer); {
        rd->setBlendFunc(RenderDevice::BLEND_SRC_ALPHA, RenderDevice::BLEND_ONE_MINUS_SRC_ALPHA);
        Array<SoundInstance> soundInstances;
        Synthesizer::global->getSoundInstances(soundInstances);
        int xOffset = 10;
        Vector2 dim(256,100);
        for (int i = 0; i < soundInstances.size(); ++i) {
            int yOffset = rd->height() - 120 - (120 * i);
            Draw::rect2D(Rect2D::xywh(Point2(xOffset, yOffset), dim), rd, Color3::white(), soundInstances[i].displayTexture());
            float playheadAlpha = ((float)soundInstances[i].currentPosition) / soundInstances[i].audioSample->buffer.size();
            float playheadX = xOffset + (playheadAlpha * dim.x);
            Draw::rect2D(Rect2D::xywh(Point2(playheadX, yOffset), Vector2(1, dim.y)), rd, Color3::yellow());
        }


	
	static shared_ptr<GFont> font = GFont::fromFile(System::findDataFile("arial.fnt"));
	float time = System::time() - m_initialTime;
	if (time < 10.0f) {
	  float fontAlpha = time < 9.0f ? 1.0f : 10.0f - time;
	  font->draw2D(rd, "Press Space to Sculpt", Vector2(rd->width()/2, rd->height()-100.0f), 30.0f, Color4(Color3::black(), fontAlpha), Color4(Color3::white()*0.6f, fontAlpha), GFont::XALIGN_CENTER);
	}
    } rd->pop2D();

    if ((submitToDisplayMode() == SubmitToDisplayMode::MAXIMIZE_THROUGHPUT) && (!renderDevice->swapBuffersAutomatically())) {
        // We're about to render to the actual back buffer, so swap the buffers now.
        // This call also allows the screenshot and video recording to capture the
        // previous frame just before it is displayed.
        swapBuffers();
    }

    // Clear the entire screen (needed even though we'll render over it, since
    // AFR uses clear() to detect that the buffer is not re-used.)
    rd->clear();

    // Perform gamma correction, bloom, and SSAA, and write to the native window frame buffer
    m_film->exposeAndRender(rd, activeCamera()->filmSettings(), m_framebuffer->texture(0));
}
Ejemplo n.º 10
0
void RS_Line::draw(RS_Painter* painter, RS_GraphicView* view, double& patternOffset) {
	if (! (painter && view)) {
        return;
    }

    auto viewportRect = view->getViewRect();
    RS_VectorSolutions endPoints(0);
    endPoints.push_back(getStartpoint());
    endPoints.push_back(getEndpoint());

    RS_EntityContainer ec(nullptr);
    ec.addRectangle(viewportRect.minP(), viewportRect.maxP());

//    if (viewportRect.inArea(getStartpoint(), RS_TOLERANCE))
//         endPoints.push_back(getStartpoint());
//    if (viewportRect.inArea(getEndpoint(), RS_TOLERANCE))
//         endPoints.push_back(getEndpoint());

//    if (endPoints.size() < 2){
//        RS_VectorSolutions vpIts;
//        for(auto p: ec) {
//            auto const sol=RS_Information::getIntersection(this, p, true);
//            for (auto const& vp: sol) {
//                if (vpIts.getClosestDistance(vp) <= RS_TOLERANCE * 10.)
//                    continue;
//                vpIts.push_back(vp);
//            }
//        }
//        for (auto const& vp: vpIts) {
//            if (endPoints.getClosestDistance(vp) <= RS_TOLERANCE * 10.)
//                continue;
//            endPoints.push_back(vp);
//        }
//    }

//    if (endPoints.size()<2) return;

    if ((endPoints[0] - getStartpoint()).squared() >
            (endPoints[1] - getStartpoint()).squared() )
    {
        std::swap(endPoints[0],endPoints[1]);
    }

	RS_Vector pStart{view->toGui(endPoints.at(0))};
	RS_Vector pEnd{view->toGui(endPoints.at(1))};
    //    std::cout<<"draw line: "<<pStart<<" to "<<pEnd<<std::endl;
	RS_Vector direction = pEnd-pStart;

	if (isConstruction(true) && direction.squared() > RS_TOLERANCE){
        //extend line on a construction layer to fill the whole view
		RS_VectorSolutions vpIts;
		for(auto p: ec) {
            auto const sol=RS_Information::getIntersection(this, p, true);
			for (auto const& vp: sol) {
				if (vpIts.getClosestDistance(vp) <= RS_TOLERANCE * 10.)
					continue;
				vpIts.push_back(vp);
			}
		}

		//draw construction lines up to viewport border
		switch (vpIts.size()) {
		case 2:
			// no need to sort intersections
			break;
		case 3:
		case 4: {
			// will use the inner two points
			size_t i{0};
			for (size_t j = 0; j < vpIts.size(); ++j)
				if (viewportRect.inArea(vpIts.at(j), RS_TOLERANCE * 10.))
					std::swap(vpIts[j], vpIts[i++]);

		}
			break;
		default:
			//should not happen
			return;
		}
		pStart=view->toGui(vpIts.get(0));
		pEnd=view->toGui(vpIts.get(1));
		direction=pEnd-pStart;
    }
    double  length=direction.magnitude();
    patternOffset -= length;
    if (( !isSelected() && (
              getPen().getLineType()==RS2::SolidLine ||
              view->getDrawingMode()==RS2::ModePreview)) ) {
        //if length is too small, attempt to draw the line, could be a potential bug
        painter->drawLine(pStart,pEnd);
        return;
    }
    //    double styleFactor = getStyleFactor(view);


    // Pattern:
    const RS_LineTypePattern* pat;
    if (isSelected()) {
//        styleFactor=1.;
        pat = &RS_LineTypePattern::patternSelected;
    } else {
        pat = view->getPattern(getPen().getLineType());
    }
	if (!pat) {
//        patternOffset -= length;
        RS_DEBUG->print(RS_Debug::D_WARNING,
                        "RS_Line::draw: Invalid line pattern");
        painter->drawLine(pStart,pEnd);
        return;
    }
//    patternOffset = remainder(patternOffset - length-0.5*pat->totalLength,pat->totalLength)+0.5*pat->totalLength;
    if(length<=RS_TOLERANCE){
        painter->drawLine(pStart,pEnd);
        return; //avoid division by zero
    }
    direction/=length; //cos(angle), sin(angle)
    // Pen to draw pattern is always solid:
    RS_Pen pen = painter->getPen();

    pen.setLineType(RS2::SolidLine);
    painter->setPen(pen);

	if (pat->num <= 0) {
		RS_DEBUG->print(RS_Debug::D_WARNING,"invalid line pattern for line, draw solid line instead");
		painter->drawLine(view->toGui(getStartpoint()),
						  view->toGui(getEndpoint()));
		return;
	}

	// pattern segment length:
	double patternSegmentLength = pat->totalLength;

	// create pattern:
	std::vector<RS_Vector> dp(pat->num);
	std::vector<double> ds(pat->num);
	double dpmm=static_cast<RS_PainterQt*>(painter)->getDpmm();
	for (size_t i=0; i < pat->num; ++i) {
		//        ds[j]=pat->pattern[i] * styleFactor;
		//fixme, styleFactor support needed

		ds[i]=dpmm*pat->pattern[i];
		if (fabs(ds[i]) < 1. ) ds[i] = copysign(1., ds[i]);
		dp[i] = direction*fabs(ds[i]);
	}
	double total= remainder(patternOffset-0.5*patternSegmentLength,patternSegmentLength) -0.5*patternSegmentLength;
    //    double total= patternOffset-patternSegmentLength;

	RS_Vector curP{pStart+direction*total};
	for (int j=0; total<length; j=(j+1)%pat->num) {

        // line segment (otherwise space segment)
		double const t2=total+fabs(ds[j]);
		RS_Vector const& p3=curP+dp[j];
        if (ds[j]>0.0 && t2 > 0.0) {
            // drop the whole pattern segment line, for ds[i]<0:
            // trim end points of pattern segment line to line
			RS_Vector const& p1 =(total > -0.5)?curP:pStart;
			RS_Vector const& p2 =(t2 < length+0.5)?p3:pEnd;
            painter->drawLine(p1,p2);
        }
        total=t2;
        curP=p3;
	}

}
Ejemplo n.º 11
0
int main(int argc, char *argv[]) {
	if (argc < 3){
		printf("Usage: %s <image-file-name1> <image-file-name2>\n", argv[0]);
		exit(1);
	}

	IplImage* img1 = cvLoadImage(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
	if (!img1) {
		printf("Could not load image file: %s\n", argv[1]);
		exit(1);
	}
	IplImage* img1f = cvCreateImage(cvGetSize(img1), IPL_DEPTH_32F, 1);
	cvConvertScale(img1, img1f, 1.0 / 255.0);

	IplImage* img2 = cvLoadImage(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
	if (!img2) {
		printf("Could not load image file: %s\n", argv[2]);
		exit(1);
	}
	IplImage* img2f = cvCreateImage(cvGetSize(img1), IPL_DEPTH_32F, 1);
	cvConvertScale(img2, img2f, 1.0 / 255.0);

	/**
	 * Aufgabe: Homographien (5 Punkte)
	 * 
	 * Unter der Annahme, dass Bilder mit einer verzerrungsfreien Lochbildkamera
	 * aufgenommen werden, kann man Aufnahmen mit verschiedenen Bildebenen und
	 * gleichem Projektionszentren durch projektive Abbildungen, sogenannte
	 * Homographien, beschreiben.
	 * 
	 * - Schreibe Translation als Homographie auf (auf Papier!).
	 * - Verschiebe die Bildebene eines Testbildes um 20 Pixel nach rechts, ohne
	 *   das Projektionszentrum zu ändern. Benutze dafür \code{cvWarpPerspective}.
	 * - Wieviele Punktkorrespondenzen benötigt man mindestens, um eine projektive
	 *   Abbildung zwischen zwei Bildern bis auf eine Skalierung eindeutig zu
	 *   bestimmen? Warum? (Schriftlich beantworten!)
	 */

/* TODO */
	IplImage* img_moved = cvCreateImage(cvGetSize(img1), IPL_DEPTH_32F, 1);
	cv::Mat matImg1f_task1 = cv::Mat(img1f);
	cv::Mat matImgMoved = cv::Mat(img_moved);

	float data[] = { 1, 0, -20, 0, 1, 0, 0, 0, 1 };
	cv::Mat trans(3, 3, CV_32FC1, data);;
	cv::warpPerspective(matImg1f_task1, matImgMoved, trans, matImgMoved.size());
	cv::namedWindow("mainWin", CV_WINDOW_AUTOSIZE);
	cv::Mat img_moved_final(img_moved);
	cv::imshow("mainWin", img_moved_final);
	cvWaitKey(0);

	/**
	 * Aufgabe: Panorama (15 Punkte)
	 *
	 * Ziel dieser Aufgabe ist es, aus zwei gegebenen Bildern ein Panorama zu konstruieren.
	 * \begin{center}
	 * \includegraphics[width = 0.3\linewidth]{left.png}
	 * \includegraphics[width = 0.3\linewidth]{right.png}
	 * \end{center}
	 * 
	 * Dafür muss zunächst aus den gegeben Punktkorrespondenzen
	 * \begin{center}
	 * \begin{tabular}{|c|c|}
	 * \hline
	 * linkes Bild & rechtes Bild \\
	 * $(x, y)$ & $(x, y)$ \\ \hline \hline
	 * (463, 164) & (225, 179)\\ \hline
	 * (530, 357) & (294, 370)\\ \hline
	 * (618, 357) &(379, 367)\\ \hline
	 * (610, 153) & (369, 168)\\ \hline
	 * \end{tabular}
	 * \end{center}
	 * eine perspektivische Transformation bestimmt werden, mit der die Bilder auf eine gemeinsame Bildebene transformiert werden können.
	 * 
	 * - Berechne die Transformation aus den gegebenen Punktkorrespondenzen.
	 *   Benutze die Funktion \code{cvGetPerspectiveTransform}. Was ist die
	 *   zentrale Idee des DLT-Algorithmus, wie er in der Vorlesung vorgestellt
	 *   wurde?
	*/

/* TODO */
	CvMat *P = cvCreateMat(3, 3, CV_32FC1);
	CvPoint points1[] = { cvPoint(463, 164), cvPoint(530, 357), cvPoint(618, 357), cvPoint(610, 153) };
	CvPoint points2[] = { cvPoint(225, 179), cvPoint(294, 370), cvPoint(379, 367), cvPoint(369, 168) };
	CvPoint2D32f pt1[4], pt2[4];
	for (int i = 0; i < 4; ++i) {
		pt2[i].x = points2[i].x;
		pt2[i].y = points2[i].y;
		pt1[i].x = points1[i].x;
		pt1[i].y = points1[i].y;
	}
	cvGetPerspectiveTransform(pt1, pt2, P);
	
	/**
	 * - Bestimme die notwendige Bildgröße für das Panoramabild.
	 */

/* TODO */
	int h = img1f->height - 1;
	int w = img1f->width - 1;
	float p1[] = { 0.0, 0.0, 1.0 };
	float p2[] = { 0.0, (float)(h), 1.0 };
	float p3[] = { (float)(w), (float)(h), 1.0 };
	float p4[] = { (float)(w), 0.0, 1.0 };

	
	cv::Mat P1 = P * cv::Mat(3, 1, CV_32FC1, p1);
	cv::Mat P2 = P * cv::Mat(3, 1, CV_32FC1, p2);
	cv::Mat P3 = P * cv::Mat(3, 1, CV_32FC1, p3);
	cv::Mat P4 = P * cv::Mat(3, 1, CV_32FC1, p4);

	// mustn't be zero
	assert(P1.at<float>(2,0) != 0 && P2.at<float>(2,0) != 0 && P3.at<float>(2,0) != 0 && P4.at<float>(2,0) != 0);

	P1 = P1 / P1.at<float>(2,0);
	P2 = P2 / P2.at<float>(2,0);
	P3 = P3 / P3.at<float>(2,0);
	P4 = P4 / P4.at<float>(2,0);



	/**
	 * - Projiziere das linke Bild in die Bildebene des rechten Bildes. Beachte
	 *   dabei, dass auch der linke Bildrand in das Panoramabild projiziert
	 *   wird.
	 */

/* TODO */
	///////// Hier wird irgendwo ein fehler sein bei der Groesse...
	std::vector<cv::Mat*> matrices;
	matrices.push_back(&P1);
	matrices.push_back(&P2);
	matrices.push_back(&P3);
	matrices.push_back(&P4);
	cv::Point minP(P1.at<float>(0,0), P1.at<float>(1,0)), maxP(P1.at<float>(0,0), P1.at<float>(1,0));
	for(int i = 0; i < matrices.size(); ++i) {
			minP.x = (int)(min(matrices[i]->at<float>(0,0), (float)minP.x));
			minP.y = (int)(min(matrices[i]->at<float>(1,0), (float)minP.y));

			maxP.x = (int)(max(matrices[i]->at<float>(0,0), (float)maxP.x)+1.0);
			maxP.y = (int)(max(matrices[i]->at<float>(1,0), (float)maxP.y)+1.0);
	}

	minP.x = min(minP.x, 0); minP.y = min(minP.y, 0);
	maxP.x = max(maxP.x, img1f->width-1); maxP.y = max(maxP.y, img1f->height-1);
	// create image
	cv::Mat Panorama = cv::Mat(cv::Size(maxP-minP),  CV_32FC1, cv::Scalar(0.0));
	cv::Mat PLeft = cv::Mat(cv::Size(maxP-minP),  CV_32FC1, cv::Scalar(0.0));
	cv::Mat PRight = cv::Mat(cv::Size(maxP-minP),  CV_32FC1, cv::Scalar(0.0));

	cv::Mat matImg1f = cv::Mat( img1f);
	cv::Mat matImg2f = cv::Mat( img2f);
	for(int y=0; y < matImg1f.rows; ++y ) {
		for(int x=0; x < matImg1f.cols; ++x ) {
			PLeft.at<float>(y,x) = matImg1f.at<float>(y,x);
		}
	}
	for(int y=0; y < matImg2f.rows; ++y ) {
		for(int x=0; x < matImg2f.cols; ++x ) {
			PRight.at<float>(y,x) = matImg2f.at<float>(y,x);
		}
	}

	
	cv::imshow("mainWin", PLeft);
	cv::waitKey(0);
	cv::imshow("mainWin", PRight);
	cv::waitKey(0);

	float trans2[] = { 1.0, 0.0, -minP.x, 0.0, 1.0, -minP.y, 0.0, 0.0, 1.0};
	cv::Mat translation(3,3,CV_32FC1,trans2);
	//translate P
	cv::Mat Pnew = translation*cv::Mat(P);
	cv::warpPerspective(PLeft, Panorama, Pnew, Panorama.size());
	cv::warpPerspective(PRight, PLeft, translation, PLeft.size());
	PRight = PLeft.clone();

	cv::imshow("mainWin", PLeft);
	cv::waitKey(0);
	cv::imshow("mainWin", Panorama);
	cv::waitKey(0);
	/**
	 * - Bilde das Panoramabild, so dass Pixel, für die zwei Werte vorhanden sind,
	 *   den Mittelwert zugeordnet bekommen.
	 */

	cv::Mat mask = (Panorama > 0.0) & (PLeft > 0.0);
	cv::imshow("mainWin", mask);
	cv::waitKey(0);

	mask.convertTo(mask,CV_32FC1, 0.5/255.); 
	cv::Mat weighted = cv::Mat(Panorama.size(),  CV_32FC1, cv::Scalar(1.0)) - mask;

	Panorama = Panorama + PLeft;
	cv::multiply(Panorama, weighted, Panorama);

	cv::imshow("mainWin", Panorama);
	cv::waitKey(0);
/* TODO */

	/**
	 * - Zeige das Panoramabild an.
	 */

/* TODO */

}