Пример #1
0
string predictDigits(Mat &originalImage) {
	string numbers = "";
	Mat clon = originalImage.clone();

	// Read the model from the XML file and create the neural network.
	CvANN_MLP nnetwork;
	CvFileStorage* storage = cvOpenFileStorage(
			"/home/andersson/Escritorio/Temporales/neural_network.xml", 0,
			CV_STORAGE_READ);
	CvFileNode *n = cvGetFileNodeByName(storage, 0, "DigitOCR");
	nnetwork.read(storage, n);
	cvReleaseFileStorage(&storage);

	int rows = originalImage.rows;
	int cols = originalImage.cols;

	int lx = 0;
	int ty = 0;
	int by = 0;
	int rx = 0;
	int flag = 0;
	int currentColumn = 1;
	bool temp = false;

	while (!temp) {
		/* Left X */
		for (int i = currentColumn; i < cols; i++) {
			for (int j = 1; j < rows; j++) {
				if (i != (cols - 1)) {
					if (originalImage.at<uchar> (j, i) == 0) {
						lx = i;
						flag = 1;
						break;
					}
				} else {
					temp = true;
					break;
				}
			}

			if (!temp) {
				if (flag == 1) {
					flag = 0;
					break;
				}
			} else {
				break;
			}
		}

		if (temp) {
			continue;
		}

		/* Right X */
		int tempNum;
		for (int i = lx; i < cols; i++) {
			tempNum = 0;
			for (int j = 1; j < rows; j++) {
				if (originalImage.at<uchar> (j, i) == 0) {
					tempNum += 1;
				}
			}

			if (tempNum == 0) {
				rx = (i - 1);
				break;
			}
		}

		currentColumn = rx + 1;

		/* Top Y */
		for (int i = 1; i < rows; i++) {
			for (int j = lx; j <= rx; j++) {
				if (originalImage.at<uchar> (i, j) == 0) {
					ty = i;
					flag = 1;
					break;
				}
			}

			if (flag == 1) {
				flag = 0;
				break;
			}
		}

		/* Bottom Y */
		for (int i = (rows - 1); i >= 1; i--) {
			for (int j = lx; j <= rx; j++) {
				if (originalImage.at<uchar> (i, j) == 0) {
					by = i;
					flag = 1;
					break;
				}
			}

			if (flag == 1) {
				flag = 0;
				break;
			}
		}

		int width = rx - lx;
		int height = by - ty;

		// Cropping image
		Mat crop(originalImage, Rect(lx, ty, width, height));

		// Cloning image
		Mat splittedImage;
		splittedImage = crop.clone();

		//		imwrite("/home/andersson/Escritorio/Temporales/splitted.png",
		//				splittedImage);

		// Processing image
		Mat output;
		cv::GaussianBlur(splittedImage, output, cv::Size(5, 5), 0);
		cv::threshold(output, output, 50, ATTRIBUTES - 1, 0);
		cv::Mat scaledDownImage(ROWCOLUMN, ROWCOLUMN, CV_8U, cv::Scalar(0));
		scaleDownImage(output, scaledDownImage);

		int pixelValueArray[ATTRIBUTES];
		cv::Mat testSet(1, ATTRIBUTES, CV_32F);
		// Mat to Pixel Value Array
		convertToPixelValueArray(scaledDownImage, pixelValueArray);

		// Pixel Value Array to Mat CV_32F
		cv::Mat classificationResult(1, CLASSES, CV_32F);
		for (int i = 0; i <= ATTRIBUTES; i++) {
			testSet.at<float> (0, i) = pixelValueArray[i];
		}

		// Predicting the number
		nnetwork.predict(testSet, classificationResult);

		// Selecting the correct response
		int maxIndex = 0;
		float value = 0.0f;
		float maxValue = classificationResult.at<float> (0, 0);
		for (int index = 1; index < CLASSES; index++) {
			value = classificationResult.at<float> (0, index);
			if (value > maxValue) {
				maxValue = value;
				maxIndex = index;
			}
		}

		printf("Class result: %d\n", maxIndex);
		numbers = numbers + convertIntToString(maxIndex);

		Scalar colorRect = Scalar(0.0, 0.0, 255.0);
		rectangle(clon, Point(lx, ty), Point(rx, by), colorRect, 1, 8, 0);
		namedWindow("Clon", CV_WINDOW_NORMAL);
		imshow("Clon", clon);
		waitKey(0);

		namedWindow("Test", CV_WINDOW_NORMAL);
		imshow("Test", splittedImage);
		waitKey(0);
	}

	imwrite("/home/andersson/Escritorio/Temporales/clon.png", clon);

	return numbers;
}
Пример #2
0
void doCompile(FILE *inf, FILE *debOutf, FILE *resOutf, TextFile *cptDef, FILE *sve) {
	uint16 maxStrl = 0;
	uint16 maxCptl = 0;

	printf("Processing...\n");
	CptObj *resCpts;
	uint16 baseLists[NUM_DATA_LISTS];
	memset(baseLists, 0, NUM_DATA_LISTS * 2);
	resCpts = (CptObj *)malloc(MAX_CPTS * sizeof(CptObj));
	memset(resCpts, 0, MAX_CPTS * sizeof(CptObj));
	printf(" MainLists...\n");
	processMainLists(inf, resCpts, baseLists);
	printf(" Compacts...\n");
	processCpts(inf, resCpts);
	printf(" Turntables...\n");
	processTurntabs(inf, resCpts);
	printf(" Animation tables...\n");
	processBins(inf, resCpts, "ANIMSEQS", "ANIMSEQ", ANIMSEQ);
	printf(" Unknown binaries...\n");
	processBins(inf, resCpts, "MISCBINS", "MISCBIN", MISCBIN);
	printf(" Get To tables...\n");
	processBins(inf, resCpts, "GETTOTAB", "GET_TOS", GETTOTAB);
	printf(" Scratch buffers...\n");
	processBins(inf, resCpts, "SCRATCHR", "SCRATCH", ROUTEBUF);
	printf(" Symbolic links...\n");
	processSymlinks(inf, resCpts, baseLists);
	printf("Converting to binary data...\n");
	uint32 numCpts = 1;
	for (uint32 cnt = 1; cnt < MAX_CPTS; cnt++)
		if (resCpts[cnt].data || resCpts[cnt].dbgName || resCpts[cnt].len)
			numCpts++;

	uint16 dataListLen[NUM_DATA_LISTS];
	for (uint32 cnt = 0; cnt < NUM_DATA_LISTS; cnt++)
		for (uint16 elemCnt = 0; elemCnt < 0x1000; elemCnt++) {
			uint32 id = (cnt << 12) | elemCnt;
			if (resCpts[id].data || resCpts[id].dbgName || resCpts[id].len)
				dataListLen[cnt] = elemCnt + 1;
		}

	// write the header
	uint32 rev = 0;
	fwrite(&rev, 2, 1, debOutf);
	fwrite(&rev, 2, 1, resOutf);
	rev = NUM_DATA_LISTS;
	fwrite(&rev, 2, 1, debOutf);
	fwrite(&rev, 2, 1, resOutf);
	for (uint32 cnt = 0; cnt < NUM_DATA_LISTS; cnt++) {
		fwrite(dataListLen + cnt, 2, 1, debOutf);
		fwrite(dataListLen + cnt, 2, 1, resOutf);
	}

	uint32 binSize = 0;
	uint32 binDest = ftell(debOutf);
	fwrite(&binSize, 1, 4, debOutf);
	fwrite(&binSize, 1, 4, resOutf);
	fwrite(&binSize, 1, 4, debOutf);
	fwrite(&binSize, 1, 4, resOutf);

	char *asciiBuf = (char *)malloc(ASCII_SIZE);
	char *asciiPos = asciiBuf;

	// now process all the compacts
	uint32 cptSize[2];
	cptSize[0] = ftell(debOutf);
	cptSize[1] = ftell(resOutf);
	for (uint32 lcnt = 0; lcnt < NUM_DATA_LISTS; lcnt++) {
		for (uint32 eCnt = 0; eCnt < dataListLen[lcnt]; eCnt++) {
			uint32 cId = (lcnt << 12) | eCnt;
			CptObj *cpt = resCpts + cId;
			if (resCpts[cId].data || resCpts[cId].dbgName || resCpts[cId].len || resCpts[cId].type) {
				strcpy(asciiPos, cpt->dbgName);
				asciiPos += strlen(cpt->dbgName) + 1;

				assert(cpt->len < 0xFFFF);
				uint16 dlen = (uint16)cpt->len;
				if (dlen > maxCptl)
					maxCptl = dlen;
				binSize += dlen;
				assert(dlen != 0);

				fwrite(&dlen, 2, 1, debOutf);
				fwrite(&dlen, 2, 1, resOutf);

				uint16 field = resCpts[cId].type;
				fwrite(&field, 2, 1, debOutf);

				fwrite(cpt->data, 2, dlen, debOutf);
				fwrite(cpt->data, 2, dlen, resOutf);
			} else {
				uint16 tmp = 0;
				fwrite(&tmp, 2, 1, debOutf);
				fwrite(&tmp, 2, 1, resOutf);
			}
		}
		printf("DEBUG lcnt: %lu Output File Position: 0x%08lX\r\n", lcnt, ftell(debOutf));
	}
	cptSize[0] = ftell(debOutf) - cptSize[0];
	cptSize[1] = ftell(resOutf) - cptSize[1];
	assert(!(cptSize[0] & 1));
	assert(!(cptSize[1] & 1));
	cptSize[0] /= 2;
	cptSize[1] /= 2;

	for (uint32 cnt = 0; cnt < dlinkCount; cnt++) {
		strcpy(asciiPos, dlinkNames[cnt]);
		asciiPos += strlen(dlinkNames[cnt]) + 1;
	}

	uint32 asciiSize = (uint32)(asciiPos - asciiBuf);
	fwrite(&asciiSize, 1, 4, debOutf);
	fwrite(asciiBuf, 1, asciiSize, debOutf);
	free(asciiBuf);

	// the direct links...
	fwrite(&dlinkCount, 2, 1, debOutf);
	fwrite(&dlinkCount, 2, 1, resOutf);
	for (uint32 cnt = 0; cnt < dlinkCount; cnt++) {
		fwrite(dlinks + cnt * 2 + 0, 2, 1, debOutf);
		fwrite(dlinks + cnt * 2 + 0, 2, 1, resOutf);

		fwrite(dlinks + cnt * 2 + 1, 2, 1, debOutf);
		fwrite(dlinks + cnt * 2 + 1, 2, 1, resOutf);
	}
	printf("Processing diff data...\n");
	printf("DEBUG Output File Position: 0x%08lX\r\n", ftell(debOutf));
	// 288 diffdata
	FILE *dif = fopen("288diff.txt", "r");
	assert(dif);
	char line[1024];
	uint16 diff[8192];
	uint16 diffDest = 0;
	uint16 diffNo = 0;
	while (fgets(line, 1024, dif)) {
		crop(line);
		if (line[0] != '$') {
			assert(memcmp(line, "data_", 5) == 0);
			char *pos = line + 5;
			char *stopCh;
			uint16 lId = (uint16)strtoul(pos, &stopCh, 10);
			assert(*stopCh == '[');
			uint16 eId = (uint16)strtoul(stopCh + 1, &stopCh, 10);
			assert((stopCh[0] == ']') && (stopCh[1] == '[') && (eId <= 0xFFF) && (lId <= 7));
			uint16 id = (lId << 12) | eId;
			uint16 elemNo = (uint16)strtoul(stopCh + 2, &stopCh, 10);
			assert(*stopCh == ']');
			stopCh = strstr(stopCh, "0x") + 2;
			uint16 val = (uint16)strtoul(stopCh, &stopCh, 16);
			assert(*stopCh == ';');
			diff[diffDest++] = id;
			diff[diffDest++] = elemNo;
			diff[diffDest++] = 1;
			diff[diffDest++] = val;
			diffNo++;
		} else {
			char *pos = strchr(line, ' ');
			*pos = '\0';
			uint16 id = findCptId(line + 1, cptDef);
			assert(id);
			diff[diffDest++] = id;
			diff[diffDest++] = 0;
            pos++;
			uint16 len = (uint16)strtoul(pos, &pos, 10);
			diff[diffDest++] = len;
			assert(len);
			assert(resCpts[id].len == len);
			for (uint16 cnt = 0; cnt < len; cnt++) {
				assert(*pos == ' ');
				pos++;
				diff[diffDest++] = (uint16)strtoul(pos, &pos, 16);
			}
			assert(diff[diffDest - 1] == 0xFFFF);
			diffNo++;
		}
	}
	fclose(dif);
	free(resCpts);
	assert(diffDest <= 8192);
	fwrite(&diffNo, 1, 2, debOutf);
	fwrite(&diffDest, 1, 2, debOutf);
	fwrite(diff, 2, diffDest, debOutf);
	fwrite(&diffNo, 1, 2, resOutf);
	fwrite(&diffDest, 1, 2, resOutf);
	fwrite(diff, 2, diffDest, resOutf);

	printf("Converting Save data...\n");
	printf("DEBUG Output File Position: 0x%08lX\r\n", ftell(debOutf));
	// the IDs of the compacts to be saved
	char cptName[1024];
	uint16 saveIds[2048];
	uint16 numIds = 0;
	while (fgets(cptName, 1024, sve)) {
		crop(cptName);
		uint16 resId = findCptId(cptName, cptDef);
		if (!resId)
			printf("ERROR: Can't find definition of %s\n", cptName);
		else {
			saveIds[numIds] = resId;
			numIds++;
		}
	}
	printf("%d saveIds\n", numIds);
	fwrite(&numIds, 2, 1, debOutf);
	fwrite(saveIds, 2, numIds, debOutf);
	fwrite(&numIds, 2, 1, resOutf);
	fwrite(saveIds, 2, numIds, resOutf);

	printf("Converting Reset data...\n");
	// now append the reset data
	uint16 gameVers[7] = { 303, 331, 348, 365, 368, 372, 288 };
	// make sure all files exist
	bool filesExist = true;
	char inName[32];
	for (int i = 0; i < 7; i++) {
		sprintf(inName, "RESET.%03d", gameVers[i]);
		FILE *test = fopen(inName, "rb");
		if (test)
			fclose(test);
		else {
			filesExist = false;
			printf("File %s not found\n", inName);
		}
	}

	if (filesExist) {
		FILE *res288 = fopen("RESET.288", "rb");
		fseek(res288, 0, SEEK_END);
		assert((ftell(res288) / 2) < 65536);
		uint16 resSize = (uint16)(ftell(res288) / 2);
		fseek(res288, 0, SEEK_SET);
		uint16 *buf288 = (uint16 *)malloc(resSize * 2);
		fread(buf288, 2, resSize, res288);
		fclose(res288);
		fwrite(&resSize, 1, 2, debOutf);
		fwrite(buf288, 2, resSize, debOutf);

		uint16 tmp = 7;
		fwrite(&tmp, 2, 1, debOutf);
		tmp = 288;
		fwrite(&tmp, 2, 1, debOutf);
		tmp = 0;
		fwrite(&tmp, 2, 1, debOutf);

		printf("DEBUG Output File Position: 0x%08lX\r\n", ftell(debOutf));
		printf("reset destination: %ld\n", ftell(debOutf));
		for (int cnt = 0; cnt < 6; cnt++) {
			printf("Processing diff v0.0%03d\n", gameVers[cnt]);
			uint16 diffPos = 0;
			sprintf(inName, "RESET.%03d", gameVers[cnt]);
			FILE *resDiff = fopen(inName, "rb");
			fseek(resDiff, 0, SEEK_END);
			assert(ftell(resDiff) == (resSize * 2));
			fseek(resDiff, 0, SEEK_SET);
			uint16 *bufDif = (uint16 *)malloc(resSize *2);
			fread(bufDif, 2, resSize, resDiff);
			fclose(resDiff);
			for (uint16 eCnt = 0; eCnt < resSize; eCnt++)
				if (buf288[eCnt] != bufDif[eCnt]) {
					diff[diffPos++] = eCnt;
					diff[diffPos++] = bufDif[eCnt];
				}
			free(bufDif);
			fwrite(gameVers + cnt, 1, 2, debOutf);
			assert(!(diffPos & 1));
			diffPos /= 2;
			fwrite(&diffPos, 1, 2, debOutf);
			fwrite(diff, 2, 2 * diffPos, debOutf);
			printf("diff v0.0%03d: 2 * 2 * %d\n", gameVers[cnt], diffPos);
			printf("DEBUG Output File Position: 0x%08lX\r\n", ftell(debOutf));
		}
		free(buf288);
	} else {
		printf("Creating CPT file with Dummy reset data @ %ld\n", ftell(debOutf));
		uint16 resetFields16 = 4;
		fwrite(&resetFields16, 2, 1, debOutf);
		uint32 blah = 8;
		fwrite(&blah, 4, 1, debOutf); // size field: 8 bytes
		blah = (uint32)-1;
		fwrite(&blah, 4, 1, debOutf); // save file revision. -1 is unknown to scummvm, so it'll refuse to load it.
		resetFields16 = 0;
		fwrite(&resetFields16, 2, 1, debOutf); // numDiffs: 0, no further reset blocks.
	}

	// now fill the raw-compact-data-size header field
	fseek(resOutf, binDest, SEEK_SET);
	fseek(debOutf, binDest, SEEK_SET);
	fwrite(&binSize, 1, 4, debOutf);
	fwrite(&binSize, 1, 4, resOutf);
	fwrite(cptSize + 0, 1, 4, debOutf);
	fwrite(cptSize + 1, 1, 4, resOutf);

	printf("%d diffs\n", diffNo);
	printf("%ld Compacts in total\n", numCpts);
	printf("max strlen = %d\n", maxStrl);
	printf("raw size = 2 * %ld\n", binSize);
	printf("max cptlen = %d\n", maxCptl);
}
Пример #3
0
static int sort(struct pqNode **ps, pTree *node, int M)
{
    int sp=0;
    pTree *pLeft=NULL, *pRight=NULL;
    int nBucket = 0;

    /*========================================================================
     * Allocate stack
     *======================================================================*/
    int ns = (int)MMAX(1, floor(log(((double)(NPARTICLES+1))/(M+1))/log(2.0)));
    sp = 0;
    sortStack = (pTree **)realloc(sortStack, ns * sizeof(pTree *));
    assert(sortStack != NULL);

    if (node->iUpper - node->iLower + 1 <= M) 
        return 0;

    while (1)
    {
        int i, j;
        struct pqNode *t;
        int nl, nr;
        int d;
        FLOAT fSplit;

        assert(node != NULL);
        split(node, &d, &fSplit);

        i = node->iLower;
        j = node->iUpper;

        partitionCount++;
        PARTITION(ps, t, ->r[d], i,j, < fSplit,> fSplit);
        nl = i - node->iLower;
        nr = node->iUpper - i + 1;

        //fprintf(err, "nl=%i nr=%i\n", nl, nr);

        /*========================================================================
         * If both sides of the partition are not empty then create two new nodes
         * and crop the bounding boxes. Otherwise, undo the split operation.
         *======================================================================*/
        if (nl > 0 || nr > 0) {  // jpc changed this from && to || so that a split is always created.
            NEW_NODE(node, pLeft, node->iLower, i - 1);
            NEW_NODE(node, pRight, i, node->iUpper);
            nodeCount += 2;
#if 0
            crop(ps, node->pLeft, node->pLeft->iLower,  node->pLeft->iUpper);
            crop(ps, node->pRight, node->pRight->iLower, node->pRight->iUpper);
#else

            _3x_(i) node->pLeft->bnd.fCenter[i]  = node->bnd.fCenter[i];
            _3x_(i) node->pLeft->bnd.fMax[i]     = node->bnd.fMax[i];

            _3x_(i) node->pRight->bnd.fCenter[i] = node->bnd.fCenter[i];
            _3x_(i) node->pRight->bnd.fMax[i]    = node->bnd.fMax[i];

            node->pLeft->bnd.fMax[d]            *= 0.5;
            node->pLeft->bnd.fCenter[d]         -= node->pLeft->bnd.fMax[d];

            node->pRight->bnd.fMax[d]           *= 0.5;
            node->pRight->bnd.fCenter[d]        += node->pRight->bnd.fMax[d];
#endif

            pLeft = node->pLeft;
            pRight = node->pRight;
        }
        else
        {
            node->bnd.fMax[d] *= 0.5;
            if (nl > 0) {
                node->bnd.fCenter[d] -= node->bnd.fMax[d];
                pLeft = node;
            }
            else {
                node->bnd.fCenter[d] += node->bnd.fMax[d];
                pRight = node;
            }
        }

        /*========================================================================
         * Now figure out which subfile to process next
         *======================================================================*/
        if (nl > M && nr > M) 
        {
            if (nr > nl) sortStack[sp++] = pRight, node = pLeft;
            else sortStack[sp++] = pLeft, node = pRight;
        }
        else 
        {
            if (nl > M) node = pLeft;
            //else if (nl > 0) pLeft->iLower = 0;

            if (nr > M) node = pRight;
            //else if (nr > 0) pRight->iLower = 0;
        }

        if (nl <= M && nr <= M) {
            if (sp) node = sortStack[--sp];      /* pop tn */
            else break;
        }

    }

    return 0;
}
    typename EMSubpixelCorrelatorView<ImagePixelT>::prerasterize_type
    EMSubpixelCorrelatorView<ImagePixelT>::prerasterize(BBox2i bbox) const {
      vw_out(InfoMessage, "stereo") << "EMSubpixelCorrelatorView: rasterizing image block " << bbox << ".\n";
      
      // Find the range of disparity values for this patch.
      // int num_good; // not used
      BBox2i search_range;
      try {
        search_range = get_disparity_range(crop(m_course_disparity, bbox));
      }
      catch ( std::exception &e ) {
        search_range = BBox2i();
      }


#ifdef USE_GRAPHICS
      ImageWindow window;
      if(debug_level >= 0) {
        window = vw_create_window("disparity");
      }
#endif

      // The area in the right image that we'll be searching is
      // determined by the bbox of the left image plus the search
      // range.
      BBox2i left_crop_bbox(bbox);
      BBox2i right_crop_bbox(bbox.min() + search_range.min(),
                             bbox.max() + search_range.max());

      // The correlator requires the images to be the same size. The
      // search bbox will always be larger than the given left image
      // bbox, so we just make the left bbox the same size as the
      // right bbox.
      left_crop_bbox.max() = left_crop_bbox.min() + Vector2i(right_crop_bbox.width(), right_crop_bbox.height());

      // Finally, we must adjust both bounding boxes to account for
      // the size of the kernel itself.
      right_crop_bbox.min() -= Vector2i(m_kernel_size[0], m_kernel_size[1]);
      right_crop_bbox.max() += Vector2i(m_kernel_size[0], m_kernel_size[1]);
      left_crop_bbox.min() -= Vector2i(m_kernel_size[0], m_kernel_size[1]);
      left_crop_bbox.max() += Vector2i(m_kernel_size[0], m_kernel_size[1]);

      // We crop the images to the expanded bounding box and edge
      // extend in case the new bbox extends past the image bounds.
      ImageView<ImagePixelT> left_image_patch, right_image_patch;
      ImageView<disparity_pixel> disparity_map_patch_in;
      ImageView<result_type> disparity_map_patch_out;

      
      left_image_patch = crop(edge_extend(m_left_image, ZeroEdgeExtension()),
                              left_crop_bbox);
      right_image_patch = crop(edge_extend(m_right_image, ZeroEdgeExtension()),
                               right_crop_bbox);
      disparity_map_patch_in = crop(edge_extend(m_course_disparity, ZeroEdgeExtension()),
				    left_crop_bbox);
      disparity_map_patch_out.set_size(disparity_map_patch_in.cols(), disparity_map_patch_in.rows());
      
      
      // Adjust the disparities to be relative to the cropped
      // image pixel locations
      for (int v = 0; v < disparity_map_patch_in.rows(); ++v) {
        for (int u = 0; u < disparity_map_patch_in.cols(); ++u) {
          if (disparity_map_patch_in(u,v).valid())  {
            disparity_map_patch_in(u,v).child().x() -= search_range.min().x();
            disparity_map_patch_in(u,v).child().y() -= search_range.min().y();
          }
        }
      }


      double blur_sigma_progressive = .5; // 3*sigma = 1.5 pixels
      
      // create the pyramid first
      std::vector<ImageView<ImagePixelT> > left_pyramid(pyramid_levels), right_pyramid(pyramid_levels);
      std::vector<BBox2i> regions_of_interest(pyramid_levels);
      std::vector<ImageView<Matrix2x2> > warps(pyramid_levels);
      std::vector<ImageView<disparity_pixel> > disparity_map_pyramid(pyramid_levels);
      
      
      // initialize the pyramid at level 0
      left_pyramid[0] = channels_to_planes(left_image_patch);
      right_pyramid[0] = channels_to_planes(right_image_patch);
      disparity_map_pyramid[0] = disparity_map_patch_in;
      regions_of_interest[0] = BBox2i(m_kernel_size[0], m_kernel_size[1],
                                      bbox.width(),bbox.height());
      

      // downsample the disparity map and the image pair to initialize the intermediate levels
      for(int i = 1; i < pyramid_levels; i++) {
        left_pyramid[i] = subsample(gaussian_filter(left_pyramid[i-1], blur_sigma_progressive), 2);
        right_pyramid[i] = subsample(gaussian_filter(right_pyramid[i-1], blur_sigma_progressive), 2);
	
        disparity_map_pyramid[i] = subsample_disp_map_by_two(disparity_map_pyramid[i-1]);
        regions_of_interest[i] = BBox2i(regions_of_interest[i-1].min()/2, regions_of_interest[i-1].max()/2);
      }
      
      // initialize warps at the lowest resolution level
      warps[pyramid_levels-1].set_size(left_pyramid[pyramid_levels-1].cols(),
				       left_pyramid[pyramid_levels-1].rows());
      for(int y = 0; y < warps[pyramid_levels-1].rows(); y++) {
        for(int x = 0; x < warps[pyramid_levels-1].cols(); x++) {
          warps[pyramid_levels-1](x, y).set_identity();
        }
      }      

#ifdef USE_GRAPHICS
      vw_initialize_graphics(0, NULL);
      if(debug_level >= 0) {
        for(int i = 0; i < pyramid_levels; i++) {
          vw_show_image(window, left_pyramid[i]);
          usleep((int)(.2*1000*1000));
        }
      }
#endif
      
      // go up the pyramid; first run refinement, then upsample result for the next level
      for(int i = pyramid_levels-1; i >=0; i--) {
        vw_out() << "processing pyramid level "
                  << i << " of " << pyramid_levels-1 << std::endl;
        
        if(debug_level >= 0) {
          std::stringstream stream;
          stream << "pyramid_level_" << i << ".tif";
          write_image(stream.str(), disparity_map_pyramid[i]);
        }
        
        ImageView<ImagePixelT> process_left_image = left_pyramid[i];
        ImageView<ImagePixelT> process_right_image = right_pyramid[i];
        
        if(i > 0) { // in this case take refine the upsampled disparity map from the previous level,
          // and upsample for the next level
          m_subpixel_refine(edge_extend(process_left_image, ZeroEdgeExtension()), edge_extend(process_right_image, ZeroEdgeExtension()),
                            disparity_map_pyramid[i], disparity_map_pyramid[i], warps[i],
                            regions_of_interest[i], false, debug_level == i);
          
          // upsample the warps and the refined map for the next level of processing
          int up_width = left_pyramid[i-1].cols();
          int up_height = left_pyramid[i-1].rows();
          warps[i-1] = copy(resize(warps[i], up_width , up_height, ConstantEdgeExtension(), NearestPixelInterpolation())); //upsample affine transforms
          disparity_map_pyramid[i-1] = copy(upsample_disp_map_by_two(disparity_map_pyramid[i], up_width, up_height));
        }
        else { // here there is no next level so we refine directly to the output patch
          m_subpixel_refine(edge_extend(process_left_image, ZeroEdgeExtension()), edge_extend(process_right_image, ZeroEdgeExtension()),
                            disparity_map_pyramid[i], disparity_map_patch_out, warps[i],
                            regions_of_interest[i], true, debug_level == i);
        }
      }
      
#ifdef USE_GRAPHICS
      if(debug_level >= 0) {
        vw_show_image(window, .5 + select_plane(channels_to_planes(disparity_map_patch_out)/6., 0));
        usleep(10*1000*1000);
      }
#endif

      // Undo the above adjustment
      for (int v = 0; v < disparity_map_patch_out.rows(); ++v) {
        for (int u = 0; u < disparity_map_patch_out.cols(); ++u) {
          if (disparity_map_patch_out(u,v).valid())  {
            disparity_map_patch_out(u,v).child().x() += search_range.min().x();
            disparity_map_patch_out(u,v).child().y() += search_range.min().y();
          }
        }
      }

#ifdef USE_GRAPHICS
      if(debug_level >= 0 ) {
        vw_destroy_window(window);
      }
#endif
      
      return crop(disparity_map_patch_out, BBox2i(m_kernel_size[0]-bbox.min().x(),
						  m_kernel_size[1]-bbox.min().y(),
						  m_left_image.cols(),
						  m_left_image.rows()));
    }
Пример #5
0
Image::Image(const std::string& filename, const Rect& region)
	: Image()
{
	load(filename);
	crop(region);
}
Пример #6
0
// ######################################################################
void GistEstimatorFFT::computeGistFeatureVector(Image<float> img)
{
  // get the current (normalized) input image
  // check whether it is not the same size as the previous one
  int w = img.getWidth()/WIN_NUM, h = img.getHeight()/WIN_NUM;
  Image<float> wImg(w,h, ZEROS);

  itsFftImage.resize(img.getDims());

  // reset the gabor masks initially or whenever dimension changed
  if((gaborMaskImg == NULL) ||
     (w != gaborMaskImg[0][0].getWidth() ) ||
     (h != gaborMaskImg[0][0].getHeight()))
    {
      //xw = new XWinManaged
      //  (Dims(2*img.getWidth(), img.getHeight()), 0, 0, "xw");
      //xw->drawImage(img,0,0); Raster::waitForKey();
      LDEBUG("w: %d, h: %d",w,h);
      setupFFTW(w,h);
    }
  //Raster::waitForKey();

  // get the features on each window
  Image<float> cImg(img.getWidth(), img.getHeight(), ZEROS);
  Image<float> dImg = img;
  float mn, mx; getMinMax(dImg,mn,mx);
  drawGrid(dImg, img.getWidth()/4,img.getHeight()/4,1,1, mx);

  Image<double>::iterator aptr = itsGistVector.beginw();
  for(uint ii = 0; ii < WIN_NUM; ii++)
    for(uint jj = 0; jj < WIN_NUM; jj++)
      {
        Point2D<int> p((ii*img.getWidth())/WIN_NUM,
                       (jj*img.getHeight())/WIN_NUM);
        Rectangle r(p, Dims(w,h));
        wImg = crop(img, r);

        //compute feature vector
        //Fourier Transform of the image
        fftCompute(wImg, itsOutFftwBuffer);

        // go through each gabor masks
        for(int i = 0; i < NUM_G_LEV; i++)
          for(int j = 0; j < NUM_G_DIR; j++)
            {
              // weighted sum of fft for a feature
              double sum = 0.0;
              for(int k = 0; k < h; k++)
                for(int l = 0; l < w/2+1; l++)
                  {
                    sum += log(1.0 + gaborMask[i][j][k][l]) *
                      log(1.0 + itsOutFftwBuffer[k][l]);
                  }
              *aptr++ = sum/(h*w/2+1);
            }

        // display Fourier Transform Image
        Image<float> temp = getFftImage(itsOutFftwBuffer, w, h);
        LDEBUG("w,h:%d:%d: i,j:%d:%d [%d %d]",
               ii*w, jj*h, temp.getWidth(), temp.getHeight(),
               itsFftImage.getWidth(), itsFftImage.getHeight());

        inplacePaste(itsFftImage, temp, Point2D<int>(ii*w, jj*h));
      }
}
Пример #7
0
//pack images, return list of positions
QList<QPoint> ImagePacker::pack(QList<packedImage> *im, int heur, uint w, uint h)
{
    int i, j, x, y;
    QList<packedImage*> images;
    for(i = 0; i < im->size(); i++)
        images << &im->operator [](i);
    crop(&images);

    QList<QPoint> out;

//    int maxRepeats = 30;
//    if(bruteForce == false)
//        maxRepeats = 1;

    //repeat trying to find best solution
//    for (int repeat = 0; repeat < maxRepeats; ++repeat)
    {
        sort(&images);
        out.clear();
        missingChars = 0;
        area = 0;
        mergedChars = 0;
        neededArea = 0;

        MaxRects rects;
        MaxRectsNode mrn;
        mrn.r = QRect(0, 0, w, h);
        mrn.i = NULL;
        rects.F << mrn;
        rects.heuristic = heur;
        rects.leftToRight = ltr;
        rects.w = w;
        rects.h = h;
        QPoint pt;
        bool t;
        for(i = 0; i < images.size(); i++)
        {
            images.at(i)->merged = false;
            t = false;
            for(j = 0; j < out.size(); j++)
            {
                if(compareImages(&images.at(j)->img, &images.at(i)->img, &x, &y))
                {
                    pt = out.at(j)+QPoint(x, y);
                    t = true;
                    images.at(i)->merged = true;
                    mergedChars++;
                    break;
                }
            }
            if(!t)
                pt = rects.insertNode(&images.operator [](i)->img);
            if(pt != QPoint(999999,999999))
            {
                if(!t)
                    area += images.at(i)->img.width() * images.at(i)->img.height();
            }
            else
                missingChars++;
            if(!t)
                neededArea += images.at(i)->img.width() * images.at(i)->img.height();
            out << pt;
            images.operator [](i)->rc = QRect(pt.x(), pt.y(), images.at(i)->rc.width(), images.at(i)->rc.height());
        }
//        if(missingChars == 0) break;
    }
    return out;
}
Пример #8
0
//-----------------------------------------------------------------------------
void DatPanel::toolLeft(QBoxLayout *l)
{
	QAction *a;
	QMenu *o;
	QToolButton *bb;

	// size menu
	o = menu->addMenu(tr("Sizes"));
	a = new QAction(QPixmap(":/png/document-new.png"), tr("Create new"), this);
	connect(a, SIGNAL(triggered()), this, SLOT(create()));
	a->setToolTip(tr("Recreate the data with new sizes and fill it by zeros (Ctrl+Shift+N)."));
	a->setShortcut(Qt::CTRL+Qt::SHIFT+Qt::Key_N);	o->addAction(a);
	bb = new QToolButton(this);	l->addWidget(bb);	bb->setDefaultAction(a);

	a = new QAction(QPixmap(size_xpm), tr("Resize"), this);
	connect(a, SIGNAL(triggered()), this, SLOT(reSize()));
	a->setToolTip(tr("Resize (interpolate) the data to specified sizes (Ctrl+Shift+R)."));
	a->setShortcut(Qt::CTRL+Qt::SHIFT+Qt::Key_R);	o->addAction(a);
	bb = new QToolButton(this);	l->addWidget(bb);	bb->setDefaultAction(a);

	a = new QAction(QPixmap(squize_xpm), tr("Squeeze"), this);
	connect(a, SIGNAL(triggered()), this, SLOT(squize()));
	a->setToolTip(tr("Keep only each n-th element of the data array."));
	o->addAction(a);
	bb = new QToolButton(this);	l->addWidget(bb);	bb->setDefaultAction(a);

	a = new QAction(QPixmap(crop_xpm), tr("Cro&p"), this);
	connect(a, SIGNAL(triggered()), this, SLOT(crop()));
	a->setToolTip(tr("Crop the data edges. Useful to cut off the zero-filled area."));
	o->addAction(a);
	bb = new QToolButton(this);	l->addWidget(bb);	bb->setDefaultAction(a);

	a = new QAction(QPixmap(oper_of_xpm), tr("Transform"), this);
	connect(a, SIGNAL(triggered()), this, SLOT(newdat()));
	a->setToolTip(tr("Transform data along dimension(s) (Ctrl+Shift+T)."));
	a->setShortcut(Qt::CTRL+Qt::SHIFT+Qt::Key_T);	o->addAction(a);
	bb = new QToolButton(this);	l->addWidget(bb);	bb->setDefaultAction(a);

	a = new QAction(QPixmap(oper_dir_xpm), tr("Make new (Ctrl+Shift+M)"), this);
	connect(a, SIGNAL(triggered()), this, SLOT(oper()));
	a->setToolTip(tr("Make another data."));
	a->setShortcut(Qt::CTRL+Qt::SHIFT+Qt::Key_M);	o->addAction(a);
	bb = new QToolButton(this);	l->addWidget(bb);	bb->setDefaultAction(a);

	a = new QAction(QPixmap(hist_xpm), tr("Histogram (Ctrl+Shift+H)"), this);
	connect(a, SIGNAL(triggered()), this, SLOT(hist()));
	a->setToolTip(tr("Find histogram of data."));
	a->setShortcut(Qt::CTRL+Qt::SHIFT+Qt::Key_H);	o->addAction(a);
	bb = new QToolButton(this);	l->addWidget(bb);	bb->setDefaultAction(a);

/*	a = new QAction(QPixmap(":/png/view-refresh.png"), tr("Refresh"), this);
	connect(a, SIGNAL(triggered()), this, SLOT(refresh()));
	a->setToolTip(tr("Refresh data values."));
	o->addAction(a);
	bb = new QToolButton(this);	l->addWidget(bb);	bb->setDefaultAction(a);*/

/*	a = new QAction(tr("Rearrange"), this);	// TODO: move in generalized dialog
	connect(a, SIGNAL(triggered()), this, SLOT(rearrange()));
	a->setToolTip(tr("Rearrange data sizes without changing data values."));
	o->addAction(a);
	a = new QAction(tr("Fill in range"), this);
	connect(a, SIGNAL(triggered()), this, SLOT(inrange()));
	a->setToolTip(tr("Fill data equidistantly from one value to another."));
	o->addAction(a);
	a = new QAction(tr("Normalize"), this);
	connect(a, SIGNAL(triggered()), this, SLOT(norm()));
	a->setToolTip(tr("Normalize data so that its minimal\nand maximal values be in specified range."));
	o->addAction(a);
	a = new QAction(tr("Norm. slices"), this);
	connect(a, SIGNAL(triggered()), this, SLOT(normsl()));
	a->setToolTip(tr("Normalize each data slice perpendicular to some direction\nso that its minimal and maximal values be in specified range."));
	o->addAction(a);*/

	l->addStretch(1);

	a = new QAction(QPixmap(":/png/tab-close.png"), tr("Close tab"), this);
	connect(a, SIGNAL(triggered()), this, SLOT(close()));
	a->setToolTip(tr("Close this data tab."));
	bb = new QToolButton(this);	l->addWidget(bb);	bb->setDefaultAction(a);
}
Region Layer::latchBuffer(bool& recomputeVisibleRegions)
{
    ATRACE_CALL();

    Region outDirtyRegion;
    if (mQueuedFrames > 0) {

        // if we've already called updateTexImage() without going through
        // a composition step, we have to skip this layer at this point
        // because we cannot call updateTeximage() without a corresponding
        // compositionComplete() call.
        // we'll trigger an update in onPreComposition().
        if (mRefreshPending) {
            return outDirtyRegion;
        }

        // Capture the old state of the layer for comparisons later
        const bool oldOpacity = isOpaque();
        sp<GraphicBuffer> oldActiveBuffer = mActiveBuffer;

        struct Reject : public SurfaceFlingerConsumer::BufferRejecter {
            Layer::State& front;
            Layer::State& current;
            bool& recomputeVisibleRegions;
            Reject(Layer::State& front, Layer::State& current,
                    bool& recomputeVisibleRegions)
                : front(front), current(current),
                  recomputeVisibleRegions(recomputeVisibleRegions) {
            }

            virtual bool reject(const sp<GraphicBuffer>& buf,
                    const IGraphicBufferConsumer::BufferItem& item) {
                if (buf == NULL) {
                    return false;
                }

                uint32_t bufWidth  = buf->getWidth();
                uint32_t bufHeight = buf->getHeight();

                // check that we received a buffer of the right size
                // (Take the buffer's orientation into account)
                if (item.mTransform & Transform::ROT_90) {
                    swap(bufWidth, bufHeight);
                }

                bool isFixedSize = item.mScalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE;
                if (front.active != front.requested) {

                    if (isFixedSize ||
                            (bufWidth == front.requested.w &&
                             bufHeight == front.requested.h))
                    {
                        // Here we pretend the transaction happened by updating the
                        // current and drawing states. Drawing state is only accessed
                        // in this thread, no need to have it locked
                        front.active = front.requested;

                        // We also need to update the current state so that
                        // we don't end-up overwriting the drawing state with
                        // this stale current state during the next transaction
                        //
                        // NOTE: We don't need to hold the transaction lock here
                        // because State::active is only accessed from this thread.
                        current.active = front.active;

                        // recompute visible region
                        recomputeVisibleRegions = true;
                    }

                    ALOGD_IF(DEBUG_RESIZE,
                            "latchBuffer/reject: buffer (%ux%u, tr=%02x), scalingMode=%d\n"
                            "  drawing={ active   ={ wh={%4u,%4u} crop={%4d,%4d,%4d,%4d} (%4d,%4d) }\n"
                            "            requested={ wh={%4u,%4u} crop={%4d,%4d,%4d,%4d} (%4d,%4d) }}\n",
                            bufWidth, bufHeight, item.mTransform, item.mScalingMode,
                            front.active.w, front.active.h,
                            front.active.crop.left,
                            front.active.crop.top,
                            front.active.crop.right,
                            front.active.crop.bottom,
                            front.active.crop.getWidth(),
                            front.active.crop.getHeight(),
                            front.requested.w, front.requested.h,
                            front.requested.crop.left,
                            front.requested.crop.top,
                            front.requested.crop.right,
                            front.requested.crop.bottom,
                            front.requested.crop.getWidth(),
                            front.requested.crop.getHeight());
                }

                if (!isFixedSize) {
                    if (front.active.w != bufWidth ||
                        front.active.h != bufHeight) {
                        // reject this buffer
                        //ALOGD("rejecting buffer: bufWidth=%d, bufHeight=%d, front.active.{w=%d, h=%d}",
                        //        bufWidth, bufHeight, front.active.w, front.active.h);
                        return true;
                    }
                }

                // if the transparent region has changed (this test is
                // conservative, but that's fine, worst case we're doing
                // a bit of extra work), we latch the new one and we
                // trigger a visible-region recompute.
                if (!front.activeTransparentRegion.isTriviallyEqual(
                        front.requestedTransparentRegion)) {
                    front.activeTransparentRegion = front.requestedTransparentRegion;

                    // We also need to update the current state so that
                    // we don't end-up overwriting the drawing state with
                    // this stale current state during the next transaction
                    //
                    // NOTE: We don't need to hold the transaction lock here
                    // because State::active is only accessed from this thread.
                    current.activeTransparentRegion = front.activeTransparentRegion;

                    // recompute visible region
                    recomputeVisibleRegions = true;
                }

                return false;
            }
        };


        Reject r(mDrawingState, getCurrentState(), recomputeVisibleRegions);

        status_t updateResult = mSurfaceFlingerConsumer->updateTexImage(&r);
        if (updateResult == BufferQueue::PRESENT_LATER) {
            // Producer doesn't want buffer to be displayed yet.  Signal a
            // layer update so we check again at the next opportunity.
            mFlinger->signalLayerUpdate();
            return outDirtyRegion;
        }

        // Decrement the queued-frames count.  Signal another event if we
        // have more frames pending.
        if (android_atomic_dec(&mQueuedFrames) > 1) {
            mFlinger->signalLayerUpdate();
        }

        if (updateResult != NO_ERROR) {
            // something happened!
            recomputeVisibleRegions = true;
            return outDirtyRegion;
        }

        // update the active buffer
        mActiveBuffer = mSurfaceFlingerConsumer->getCurrentBuffer();
        if (mActiveBuffer == NULL) {
            // this can only happen if the very first buffer was rejected.
            return outDirtyRegion;
        }

        mRefreshPending = true;
        mFrameLatencyNeeded = true;
        if (oldActiveBuffer == NULL) {
             // the first time we receive a buffer, we need to trigger a
             // geometry invalidation.
            recomputeVisibleRegions = true;
         }

        Rect crop(mSurfaceFlingerConsumer->getCurrentCrop());
        const uint32_t transform(mSurfaceFlingerConsumer->getCurrentTransform());
        const uint32_t scalingMode(mSurfaceFlingerConsumer->getCurrentScalingMode());
        if ((crop != mCurrentCrop) ||
            (transform != mCurrentTransform) ||
            (scalingMode != mCurrentScalingMode))
        {
            mCurrentCrop = crop;
            mCurrentTransform = transform;
            mCurrentScalingMode = scalingMode;
            recomputeVisibleRegions = true;
        }

        if (oldActiveBuffer != NULL) {
            uint32_t bufWidth  = mActiveBuffer->getWidth();
            uint32_t bufHeight = mActiveBuffer->getHeight();
            if (bufWidth != uint32_t(oldActiveBuffer->width) ||
                bufHeight != uint32_t(oldActiveBuffer->height)) {
                recomputeVisibleRegions = true;
            }
        }

        mCurrentOpacity = getOpacityForFormat(mActiveBuffer->format);
        if (oldOpacity != isOpaque()) {
            recomputeVisibleRegions = true;
        }

        // FIXME: postedRegion should be dirty & bounds
        const Layer::State& s(getDrawingState());
        Region dirtyRegion(Rect(s.active.w, s.active.h));

        // transform the dirty region to window-manager space
        outDirtyRegion = (s.transform.transform(dirtyRegion));
    }
    return outDirtyRegion;
}
Пример #10
0
int main(const int argc, const char **argv)
{
  MYLOGVERB = LOG_INFO;
  ModelManager *mgr = new ModelManager("Test ObjRec");

  nub::ref<OutputFrameSeries> ofs(new OutputFrameSeries(*mgr));
  mgr->addSubComponent(ofs);

  nub::ref<InputFrameSeries> ifs(new InputFrameSeries(*mgr));
  mgr->addSubComponent(ifs);

  nub::ref<EnvSegmenterCannyContour> seg(new EnvSegmenterCannyContour(*mgr));
  mgr->addSubComponent(seg);

  mgr->exportOptions(MC_RECURSE);

  if (mgr->parseCommandLine(
        (const int)argc, (const char**)argv, "", 0, 0) == false)
    return 1;

  mgr->start();

  seg->setModelParamVal("CannyMinCos", 1.0);
  seg->setModelParamVal("CannyMaxArea", 6000);
  seg->setModelParamVal("CannyMaxArea", 12000);

  itsObjectDB.loadFrom("cards.vdb");
  while(1)
  {
    Image< PixRGB<byte> > inputImg;
    const FrameState is = ifs->updateNext();
    if (is == FRAME_COMPLETE)
      break;

    //grab the images
    GenericFrame input = ifs->readFrame();
    if (!input.initialized())
      break;
    inputImg = input.asRgb();

    Image<PixRGB<byte> > out;

    const Rectangle cardbox = seg->getFoa(inputImg, Point2D<int>(), NULL, &out);

    ofs->writeRGB(out, "input", FrameInfo("input", SRC_POS));

    if (cardbox.isValid())
    {
      Image<PixRGB<byte> > card =
        crop(inputImg, cardbox.getOverlap(inputImg.getBounds()));

      std::string cardName = recCard(card);

      if (cardName.length() == 0)
      {
        LINFO("Enter name for card:");
        std::getline(std::cin, cardName, '\n');

        if (cardName.length() > 0)
          trainCard(card, cardName);
      }

      writeText(card, Point2D<int>(0,0), cardName.c_str(),
          PixRGB<byte>(255), PixRGB<byte>(127));

      ofs->writeRGB(card, "card", FrameInfo("card", SRC_POS));
    }

    ofs->updateNext();
  }
  mgr->stop();

  return 0;

}
// ######################################################################
void Beobot2_GistSalLocalizerWorkerI::updateMessage
(const RobotSimEvents::EventMessagePtr& eMsg, const Ice::Current&)
{
  // Get a gist-sal message
  if(eMsg->ice_isA("::BeobotEvents::LandmarkSearchQueueMessage"))
  {
    BeobotEvents::LandmarkSearchQueueMessagePtr lsqMsg =
      BeobotEvents::LandmarkSearchQueueMessagePtr::dynamicCast(eMsg);

    //Get the current request ID
    int currRequestID = lsqMsg->RequestID;
    itsInputFnum = currRequestID;

    LINFO("Got an lsqMessage with Request ID = %d", currRequestID);

    // get the inputImage
    its_input_info_mutex.lock();
    itsInputImage = Ice2Image<PixRGB<byte> >(lsqMsg->currIma);
    //itsInputWin->setTitle(sformat("WM: %d",itsInputFnum).c_str());
    //itsInputWin->drawImage(itsInputImage, 0, 0);

    // get the salient region information
    itsInputVO.clear();
    itsVOKeypointsComputed.clear();
    itsInputObjOffset.clear();
    uint inputSize = lsqMsg->salientRegions.size();
    for(uint i = 0; i < inputSize; i++)
      {
        BeobotEvents::SalientRegion salReg = lsqMsg->salientRegions[i];
        LDEBUG("W[%4d] sp[%4d,%4d] rect[%4d,%4d,%4d,%4d]",
               i, salReg.salpt.i, salReg.salpt.j,
               salReg.objRect.tl.i, salReg.objRect.tl.j,
               salReg.objRect.br.i, salReg.objRect.br.j);

        // print the pre-attentive feature vector
        std::vector<float> features;
        uint fsize = salReg.salFeatures.size();
        for(uint j = 0; j < fsize; j++)
          {
            features.push_back(salReg.salFeatures[j]);
            LDEBUG("[%4d]:%7f", j, salReg.salFeatures[j]);
          }

        Point2D<int> salpt(salReg.salpt.i, salReg.salpt.j);
        Point2D<int> offset( salReg.objRect.tl.i, salReg.objRect.tl.j);
        Rectangle rect = Rectangle::tlbrO
          (salReg.objRect.tl.j, salReg.objRect.tl.i,
           salReg.objRect.br.j, salReg.objRect.br.i);

        // create a visual object for the salient region
        Image<PixRGB<byte> > objImg = crop(itsInputImage, rect);

        std::string testRunFPrefix("testRunFPrefix");
        std::string iname("iname");
        std::string saveFilePath("saveFilePath");

        std::string
          iName(sformat("%s_SAL_%07d_%02d",
                        testRunFPrefix.c_str(), currRequestID, i));
        std::string ifName = iName + std::string(".png");
        ifName = saveFilePath + ifName;
        rutz::shared_ptr<VisualObject>
          vo(new VisualObject
             (iName, ifName, objImg, salpt - offset, features,
              std::vector< rutz::shared_ptr<Keypoint> >(), false, false));
        itsInputVO.push_back(vo);
        itsVOKeypointsComputed.push_back(false);
        itsInputObjOffset.push_back(offset);

        LDEBUG("[%d] image[%d]: %s sal:[%d,%d] offset:[%d,%d]",
               currRequestID, i, iName.c_str(),
               (salpt - offset).i, (salpt - offset).j,
               offset.i, offset.j);
      }
    its_input_info_mutex.unlock();

    its_results_mutex.lock();
    itsMatchFound.clear();
    itsVOmatch.clear();         itsVOmatch.resize(inputSize);
    itsLmkMatch.clear();        itsLmkMatch.resize(inputSize);
    itsSegNumMatch.clear();     itsSegNumMatch.resize(inputSize);
    itsLenTravMatch.clear();    itsLenTravMatch.resize(inputSize);
    itsNumObjectSearch.clear(); itsNumObjectSearch.resize(inputSize);
    for(uint i = 0; i < inputSize; i++) itsMatchFound.push_back(false);
    for(uint i = 0; i < inputSize; i++) itsNumObjectSearch[i] = 0;
    itsNumJobsProcessed = 0;
    its_results_mutex.unlock();

    // fill the job queue
    its_job_queue_mutex.lock();
    itsJobQueue.clear();
    uint njobs = lsqMsg->jobs.size();
    for(uint i = 0; i < njobs; i++)
      {
        BeobotEvents::LandmarkSearchJob tempJob = lsqMsg->jobs[i];
        itsJobQueue.push_back
          (GSlocJobData(tempJob.inputSalRegID,
                        tempJob.dbSegNum,
                        tempJob.dbLmkNum,
                        tempJob.dbVOStart,
                        tempJob.dbVOEnd));
      }

    // print the job queue
    std::list<GSlocJobData>::iterator itr = itsJobQueue.begin();
    uint count = 0;
    while (itr != itsJobQueue.end())
      {
        LDEBUG("[%5d] match obj[%d] lDB[%3d][%3d]:[%3d,%3d]", count,
               (*itr).objNum, (*itr).segNum, (*itr).lmkNum,
               (*itr).voStartNum,(*itr).voEndNum);
        itr++; count++;
      }
    its_job_queue_mutex.unlock();
  }

  // Got a landmark match results - stop searching for that salient region
  else if(eMsg->ice_isA("::BeobotEvents::LandmarkMatchResultMessage"))
  {
    BeobotEvents::LandmarkMatchResultMessagePtr lmrMsg =
      BeobotEvents::LandmarkMatchResultMessagePtr::dynamicCast(eMsg);

    //Get the current request ID
    //int currRequestID = gistSalMsg->RequestID;

    BeobotEvents::LandmarkSearchJob tempJob = lmrMsg->matchInfo;
    LINFO("Got an lmrMessage");

    LINFO("LMR -> found match[%d]: with itsLandmarkDB[%d][%d]",
          tempJob.inputSalRegID, tempJob.dbSegNum, tempJob.dbLmkNum);

    its_results_mutex.lock();
    if(!itsMatchFound[tempJob.inputSalRegID])
      {
        itsMatchFound[tempJob.inputSalRegID]   = true;
        itsSegNumMatch[tempJob.inputSalRegID]  = lmrMsg->segNumMatch;
        itsLenTravMatch[tempJob.inputSalRegID] = lmrMsg->lenTravMatch;
      }
    its_results_mutex.unlock();
  }

  else if(eMsg->ice_isA("::BeobotEvents::CancelSearchMessage"))
    {
      its_job_queue_mutex.lock();
      itsEmptyQueue = true;
      its_job_queue_mutex.unlock();

      its_results_mutex.lock();
      LINFO("CancelSearchMessage: %d processed here", itsNumJobsProcessed);
      its_results_mutex.unlock();
    }
}
Пример #12
0
void LayerBase::drawWithOpenGL(const Region& clip) const
{
    const DisplayHardware& hw(graphicPlane(0).displayHardware());
    const uint32_t fbHeight = hw.getHeight();
    const State& s(drawingState());

    GLenum src = mPremultipliedAlpha ? GL_ONE : GL_SRC_ALPHA;
    if (CC_UNLIKELY(s.alpha < 0xFF)) {
        const GLfloat alpha = s.alpha * (1.0f/255.0f);
        if (mPremultipliedAlpha) {
            glColor4f(alpha, alpha, alpha, alpha);
        } else {
            glColor4f(1, 1, 1, alpha);
        }
        glEnable(GL_BLEND);
        glBlendFunc(src, GL_ONE_MINUS_SRC_ALPHA);
        glTexEnvx(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
    } else {
        glColor4f(1, 1, 1, 1);
        glTexEnvx(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
        if (!isOpaque()) {
            glEnable(GL_BLEND);
            glBlendFunc(src, GL_ONE_MINUS_SRC_ALPHA);
        } else {
            glDisable(GL_BLEND);
        }
    }

    struct TexCoords {
        GLfloat u;
        GLfloat v;
    };

    Rect crop(s.active.w, s.active.h);
    if (!s.active.crop.isEmpty()) {
        crop = s.active.crop;
    }
    GLfloat left = GLfloat(crop.left) / GLfloat(s.active.w);
    GLfloat top = GLfloat(crop.top) / GLfloat(s.active.h);
    GLfloat right = GLfloat(crop.right) / GLfloat(s.active.w);
    GLfloat bottom = GLfloat(crop.bottom) / GLfloat(s.active.h);

    TexCoords texCoords[4];
    texCoords[0].u = left;
    texCoords[0].v = top;
    texCoords[1].u = left;
    texCoords[1].v = bottom;
    texCoords[2].u = right;
    texCoords[2].v = bottom;
    texCoords[3].u = right;
    texCoords[3].v = top;
    for (int i = 0; i < 4; i++) {
        texCoords[i].v = 1.0f - texCoords[i].v;
    }

    glEnableClientState(GL_TEXTURE_COORD_ARRAY);
    glVertexPointer(2, GL_FLOAT, 0, mVertices);
    glTexCoordPointer(2, GL_FLOAT, 0, texCoords);
    glDrawArrays(GL_TRIANGLE_FAN, 0, mNumVertices);

    glDisableClientState(GL_TEXTURE_COORD_ARRAY);
    glDisable(GL_BLEND);
}
Пример #13
0
unsigned char* V4Linux2Camera::getFrame()  {

    if (dev_handle<0) return NULL;

    if (ioctl(dev_handle, VIDIOC_DQBUF, &v4l2_buf)<0) {
        running = false;
        return NULL;
    }

    unsigned char *raw_buffer = (unsigned char*)buffers[v4l2_buf.index].start;
    if (raw_buffer==NULL) return NULL;

    if(cfg->color) {
        if (cfg->frame) {
         if (pixelformat==V4L2_PIX_FMT_YUYV)
            crop_yuyv2rgb(cfg->cam_width,raw_buffer,frm_buffer);
         else if (pixelformat==V4L2_PIX_FMT_UYVY)
            crop_uyvy2rgb(cfg->cam_width,raw_buffer,frm_buffer);
         else if (pixelformat==V4L2_PIX_FMT_YUV420) { //TODO
         } else if (pixelformat==V4L2_PIX_FMT_YUV410) { //TODO
         } else if (pixelformat==V4L2_PIX_FMT_GREY)
            crop_gray2rgb(cfg->cam_width,raw_buffer, frm_buffer);
         else if ((pixelformat == V4L2_PIX_FMT_MJPEG) || (pixelformat == V4L2_PIX_FMT_JPEG)) {
                int jpegSubsamp;
                tjDecompressHeader2(_jpegDecompressor, raw_buffer, v4l2_buf.bytesused, &cfg->cam_width, &cfg->cam_height, &jpegSubsamp);
                tjDecompress2(_jpegDecompressor, raw_buffer, v4l2_buf.bytesused, cam_buffer, cfg->cam_width, 0, cfg->cam_height, TJPF_RGB, TJFLAG_FASTDCT);
                crop(cfg->cam_width, cfg->cam_height,cam_buffer,frm_buffer,3);
         }

        } else {
         if (pixelformat==V4L2_PIX_FMT_YUYV)
            yuyv2rgb(cfg->cam_width,cfg->cam_height,raw_buffer,cam_buffer);
         else if (pixelformat==V4L2_PIX_FMT_UYVY)
            uyvy2rgb(cfg->cam_width,cfg->cam_height,raw_buffer,cam_buffer);
         else if (pixelformat==V4L2_PIX_FMT_YUV420) { //TODO
         } else if (pixelformat==V4L2_PIX_FMT_YUV410) { //TODO
         } else if (pixelformat==V4L2_PIX_FMT_GREY)
            gray2rgb(cfg->cam_width,cfg->cam_height,raw_buffer,cam_buffer);
         else if ((pixelformat == V4L2_PIX_FMT_MJPEG) || (pixelformat == V4L2_PIX_FMT_JPEG)) {
                int jpegSubsamp;
                tjDecompressHeader2(_jpegDecompressor, raw_buffer, v4l2_buf.bytesused, &cfg->cam_width, &cfg->cam_height, &jpegSubsamp);
                tjDecompress2(_jpegDecompressor, raw_buffer, v4l2_buf.bytesused, cam_buffer, cfg->cam_width, 0, cfg->cam_height, TJPF_RGB, TJFLAG_FASTDCT);
         }

        }

    } else {
        if (cfg->frame) {
            if (pixelformat==V4L2_PIX_FMT_YUYV)
                crop_yuyv2gray(cfg->cam_width,raw_buffer,frm_buffer);
            else if (pixelformat==V4L2_PIX_FMT_UYVY)
                crop_uyvy2gray(cfg->cam_width,raw_buffer,frm_buffer);
            else if (pixelformat==V4L2_PIX_FMT_YUV420)
                crop(cfg->cam_width, cfg->cam_height,raw_buffer,frm_buffer,1);
            else if (pixelformat==V4L2_PIX_FMT_YUV410)
                crop(cfg->cam_width, cfg->cam_height,raw_buffer,frm_buffer,1);
            else if (pixelformat==V4L2_PIX_FMT_GREY)
                crop(cfg->cam_width, cfg->cam_height,raw_buffer,frm_buffer,1);
            else if ((pixelformat == V4L2_PIX_FMT_MJPEG) || (pixelformat == V4L2_PIX_FMT_JPEG)) {

                int jpegSubsamp;
                tjDecompressHeader2(_jpegDecompressor, raw_buffer, v4l2_buf.bytesused, &cfg->cam_width, &cfg->cam_height, &jpegSubsamp);
                tjDecompress2(_jpegDecompressor, raw_buffer, v4l2_buf.bytesused, cam_buffer, cfg->cam_width, 0, cfg->cam_height, TJPF_GRAY, TJFLAG_FASTDCT);
                crop(cfg->cam_width, cfg->cam_height,cam_buffer,frm_buffer,1);
            }
        } else {
            if (pixelformat==V4L2_PIX_FMT_YUYV) yuyv2gray(cfg->cam_width, cfg->cam_height, raw_buffer, cam_buffer);
            else if (pixelformat==V4L2_PIX_FMT_UYVY) uyvy2gray(cfg->cam_width, cfg->cam_height, raw_buffer, cam_buffer);
            else if (pixelformat==V4L2_PIX_FMT_YUV420) memcpy(cam_buffer,raw_buffer,cfg->cam_width*cfg->cam_height);
            else if (pixelformat==V4L2_PIX_FMT_YUV410) memcpy(cam_buffer,raw_buffer,cfg->cam_width*cfg->cam_height);
            //else if (pixelformat==V4L2_PIX_FMT_GREY) memcpy(cam_buffer,raw_buffer,cam_width*cam_height);
            else if ((pixelformat == V4L2_PIX_FMT_MJPEG) || (pixelformat == V4L2_PIX_FMT_JPEG)) {

                int jpegSubsamp;
                tjDecompressHeader2(_jpegDecompressor, raw_buffer, v4l2_buf.bytesused, &cfg->cam_width, &cfg->cam_height, &jpegSubsamp);
                tjDecompress2(_jpegDecompressor, raw_buffer, v4l2_buf.bytesused, cam_buffer, cfg->cam_width, 0, cfg->cam_height, TJPF_GRAY, TJFLAG_FASTDCT);
            }
        }
    }

    if (-1 == ioctl (dev_handle, VIDIOC_QBUF, &v4l2_buf)) {
        printf("cannot unqueue buffer: %s\n", strerror(errno));
        return NULL;
    }

    if (cfg->frame) return frm_buffer;
    else if ((!cfg->color) && (pixelformat==V4L2_PIX_FMT_GREY)) return raw_buffer;
    else return cam_buffer;
}
Пример #14
0
bool photoController::capture(const char *filename) 
{
	if(checkCameraDetection() == false)
	{
		DEBUG_PRINTF(V_WARNING, "No camera detected.\n");
		return false;
	}
	int fd, retval;
	CameraFile *file;
	CameraFilePath camera_file_path;
	START_CHRONOMETER();
	DEBUG_PRINTF(V_MESSAGE, "Deleting old files.\n");
	retval = gp_camera_folder_delete_all(camera, "/", context);
	if(retval != GP_OK) // Error.
	{
		DEBUG_PRINTF(V_WARNING, "ERROR: Couldn't delete old files in camera memory. Code: %d\n", retval);
		return false;
	}
	DEBUG_PRINTF(V_MESSAGE, "Camera capture.\n");

	retval = gp_camera_capture(camera, GP_CAPTURE_IMAGE, &camera_file_path, context);
	if(retval != GP_OK) // Error.
	{
		if(retval == GP_ERROR_NOT_SUPPORTED)
			DEBUG_PRINTF(V_WARNING, "ERROR: This camera can not capture.\n");
		else
		{
			DEBUG_PRINTF(V_WARNING, "ERROR: Unexpected gp_camera_capture return. Code: %d\n", retval);
			releaseCamera(&camera, context);
			camera_detected = false;
			initCamera();
		}
		return false;
	}
	DEBUG_PRINTF(V_MESSAGE, "camera_file_path.folder %s!\n", camera_file_path.folder);
	DEBUG_PRINTF(V_MESSAGE, "Open %s!\n", filename);
	fd = open(filename, O_CREAT | O_WRONLY, 0644);
	if(fd < 0)  // Error.
	{
		DEBUG_PRINTF(V_WARNING, "Error opening file: %s!\n", strerror(errno));
		return false;
	}
	DEBUG_PRINTF(V_MESSAGE, "Create new CameraFile object from a file descriptor FD: %d.\n", fd);
	retval = gp_file_new_from_fd(&file, fd);
	if(retval != GP_OK) // Error.
	{
		DEBUG_PRINTF(V_WARNING, "ERROR: Unexpected gp_file_new_from_fd return. Code: %d\n", retval);
		gp_file_free(file);
		close(fd);
		return false;
	}
	if(checkCameraDetection() == false)
	{
		DEBUG_PRINTF(V_WARNING, "No camera detected 2.\n");
		gp_file_free(file);
		close(fd);
		return false;
	}
	DEBUG_PRINTF(V_MESSAGE, "Copy file from camera.\n");
	retval = gp_camera_file_get(camera, camera_file_path.folder, camera_file_path.name, GP_FILE_TYPE_NORMAL, file, context);
	if(retval != GP_OK) // Error.
	{
		if(retval == GP_ERROR_DIRECTORY_NOT_FOUND)
			DEBUG_PRINTF(V_WARNING, "Photo directory not found.\n");
		else if(retval == GP_ERROR_FILE_NOT_FOUND)
			DEBUG_PRINTF(V_WARNING, "Photo file name not found.\n");
		else
			DEBUG_PRINTF(V_WARNING, "ERROR: Unexpected gp_camera_file_get return. Code: %d\n", retval);
		gp_file_free(file);
		close(fd);
		return false;
	}
	if(checkCameraDetection() == false)
	{
		DEBUG_PRINTF(V_WARNING, "No camera detected 3.\n");
		gp_file_free(file);
		close(fd);
		return false;
	}
	DEBUG_PRINTF(V_MESSAGE, "Delete file from camera.\n");
	retval = gp_camera_file_delete(camera, camera_file_path.folder, camera_file_path.name, context);
	if(retval != GP_OK) // Error.
	{
		DEBUG_PRINTF(V_WARNING, "ERROR: Unexpected gp_camera_file_delete return. Code: %d\n", retval);
		gp_file_free(file);
		close(fd);
		return false;
	}

	DEBUG_PRINTF(V_MESSAGE, "Free CameraFile object.\n");
	gp_file_unref(file);
	close(fd);

	cv::Mat raw = cv::imread(filename);
	if(raw.data == NULL)
	{
		DEBUG_PRINTF(V_WARNING, "ERROR: OpenCV failed to open image file.\n");
		return false;
	}
	cv::Size raw_size = raw.size();
	DEBUG_PRINTF(V_MESSAGE, "capture() cv::Mat total=%u width=%d height=%d refcount=%d\n", raw.total(), raw_size.width, raw_size.height, (int)(void*)raw.refcount);
	crop(raw);
	raw.release();

	STOP_CHRONOMETER("Capture");
	return true;
}
Пример #15
0
void Layer::lockPageFlip(bool& recomputeVisibleRegions)
{
    ATRACE_CALL();

    if (mQueuedFrames > 0) {

        // if we've already called updateTexImage() without going through
        // a composition step, we have to skip this layer at this point
        // because we cannot call updateTeximage() without a corresponding
        // compositionComplete() call.
        // we'll trigger an update in onPreComposition().
        if (mRefreshPending) {
            mPostedDirtyRegion.clear();
            return;
        }

        // Capture the old state of the layer for comparisons later
        const bool oldOpacity = isOpaque();
        sp<GraphicBuffer> oldActiveBuffer = mActiveBuffer;

        // signal another event if we have more frames pending
        if (android_atomic_dec(&mQueuedFrames) > 1) {
            mFlinger->signalLayerUpdate();
        }

        struct Reject : public SurfaceTexture::BufferRejecter {
            Layer::State& front;
            Layer::State& current;
            bool& recomputeVisibleRegions;
            Reject(Layer::State& front, Layer::State& current,
                    bool& recomputeVisibleRegions)
                : front(front), current(current),
                  recomputeVisibleRegions(recomputeVisibleRegions) {
            }

            virtual bool reject(const sp<GraphicBuffer>& buf,
                    const BufferQueue::BufferItem& item) {
                if (buf == NULL) {
                    return false;
                }

                uint32_t bufWidth  = buf->getWidth();
                uint32_t bufHeight = buf->getHeight();

                // check that we received a buffer of the right size
                // (Take the buffer's orientation into account)
                if (item.mTransform & Transform::ROT_90) {
                    swap(bufWidth, bufHeight);
                }


                bool isFixedSize = item.mScalingMode != NATIVE_WINDOW_SCALING_MODE_FREEZE;
                if (front.active != front.requested) {

                    if (isFixedSize ||
                            (bufWidth == front.requested.w &&
                             bufHeight == front.requested.h))
                    {
                        // Here we pretend the transaction happened by updating the
                        // current and drawing states. Drawing state is only accessed
                        // in this thread, no need to have it locked
                        front.active = front.requested;

                        // We also need to update the current state so that
                        // we don't end-up overwriting the drawing state with
                        // this stale current state during the next transaction
                        //
                        // NOTE: We don't need to hold the transaction lock here
                        // because State::active is only accessed from this thread.
                        current.active = front.active;

                        // recompute visible region
                        recomputeVisibleRegions = true;
                    }

                    ALOGD_IF(DEBUG_RESIZE,
                            "lockPageFlip: (layer=%p), buffer (%ux%u, tr=%02x), scalingMode=%d\n"
                            "  drawing={ active   ={ wh={%4u,%4u} crop={%4d,%4d,%4d,%4d} (%4d,%4d) }\n"
                            "            requested={ wh={%4u,%4u} crop={%4d,%4d,%4d,%4d} (%4d,%4d) }}\n",
                            this, bufWidth, bufHeight, item.mTransform, item.mScalingMode,
                            front.active.w, front.active.h,
                            front.active.crop.left,
                            front.active.crop.top,
                            front.active.crop.right,
                            front.active.crop.bottom,
                            front.active.crop.getWidth(),
                            front.active.crop.getHeight(),
                            front.requested.w, front.requested.h,
                            front.requested.crop.left,
                            front.requested.crop.top,
                            front.requested.crop.right,
                            front.requested.crop.bottom,
                            front.requested.crop.getWidth(),
                            front.requested.crop.getHeight());
                }

                if (!isFixedSize) {
                    if (front.active.w != bufWidth ||
                        front.active.h != bufHeight) {
                        // reject this buffer
                        return true;
                    }
                }
                return false;
            }
        };


        Reject r(mDrawingState, currentState(), recomputeVisibleRegions);

        if (mSurfaceTexture->updateTexImage(&r) < NO_ERROR) {
            // something happened!
            recomputeVisibleRegions = true;
            return;
        }

        // update the active buffer
        mActiveBuffer = mSurfaceTexture->getCurrentBuffer();
        if (mActiveBuffer == NULL) {
            // this can only happen if the very first buffer was rejected.
            return;
        }

        mRefreshPending = true;
        mFrameLatencyNeeded = true;
        if (oldActiveBuffer == NULL) {
             // the first time we receive a buffer, we need to trigger a
             // geometry invalidation.
             mFlinger->invalidateHwcGeometry();
         }

        Rect crop(mSurfaceTexture->getCurrentCrop());
        const uint32_t transform(mSurfaceTexture->getCurrentTransform());
        const uint32_t scalingMode(mSurfaceTexture->getCurrentScalingMode());
        if ((crop != mCurrentCrop) ||
            (transform != mCurrentTransform) ||
            (scalingMode != mCurrentScalingMode))
        {
            mCurrentCrop = crop;
            mCurrentTransform = transform;
            mCurrentScalingMode = scalingMode;
            mFlinger->invalidateHwcGeometry();
        }

        if (oldActiveBuffer != NULL) {
            uint32_t bufWidth  = mActiveBuffer->getWidth();
            uint32_t bufHeight = mActiveBuffer->getHeight();
            if (bufWidth != uint32_t(oldActiveBuffer->width) ||
                bufHeight != uint32_t(oldActiveBuffer->height)) {
                mFlinger->invalidateHwcGeometry();
            }
        }

        mCurrentOpacity = getOpacityForFormat(mActiveBuffer->format);
        if (oldOpacity != isOpaque()) {
            recomputeVisibleRegions = true;
        }

        // FIXME: mPostedDirtyRegion = dirty & bounds
        const Layer::State& front(drawingState());
        mPostedDirtyRegion.set(front.active.w, front.active.h);

        glTexParameterx(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
        glTexParameterx(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
    }
}
FloatRect Layer::computeCrop(const sp<const DisplayDevice>& hw) const {
    // the content crop is the area of the content that gets scaled to the
    // layer's size.
    FloatRect crop(getContentCrop());

    // the active.crop is the area of the window that gets cropped, but not
    // scaled in any ways.
    const State& s(getDrawingState());

    // apply the projection's clipping to the window crop in
    // layerstack space, and convert-back to layer space.
    // if there are no window scaling involved, this operation will map to full
    // pixels in the buffer.
    // FIXME: the 3 lines below can produce slightly incorrect clipping when we have
    // a viewport clipping and a window transform. we should use floating point to fix this.

    Rect activeCrop(s.active.w, s.active.h);
    if (!s.active.crop.isEmpty()) {
        activeCrop = s.active.crop;
    }

    activeCrop = s.transform.transform(activeCrop);
    activeCrop.intersect(hw->getViewport(), &activeCrop);
    activeCrop = s.transform.inverse().transform(activeCrop);

    // paranoia: make sure the window-crop is constrained in the
    // window's bounds
    activeCrop.intersect(Rect(s.active.w, s.active.h), &activeCrop);

    // subtract the transparent region and snap to the bounds
    activeCrop = reduce(activeCrop, s.activeTransparentRegion);

    if (!activeCrop.isEmpty()) {
        // Transform the window crop to match the buffer coordinate system,
        // which means using the inverse of the current transform set on the
        // SurfaceFlingerConsumer.
        uint32_t invTransform = mCurrentTransform;
        int winWidth = s.active.w;
        int winHeight = s.active.h;
        if (invTransform & NATIVE_WINDOW_TRANSFORM_ROT_90) {
            invTransform ^= NATIVE_WINDOW_TRANSFORM_FLIP_V |
                    NATIVE_WINDOW_TRANSFORM_FLIP_H;
            winWidth = s.active.h;
            winHeight = s.active.w;
        }
        const Rect winCrop = activeCrop.transform(
                invTransform, s.active.w, s.active.h);

        // below, crop is intersected with winCrop expressed in crop's coordinate space
        float xScale = crop.getWidth()  / float(winWidth);
        float yScale = crop.getHeight() / float(winHeight);

        float insetL = winCrop.left                 * xScale;
        float insetT = winCrop.top                  * yScale;
        float insetR = (winWidth  - winCrop.right ) * xScale;
        float insetB = (winHeight - winCrop.bottom) * yScale;

        crop.left   += insetL;
        crop.top    += insetT;
        crop.right  -= insetR;
        crop.bottom -= insetB;
    }
    return crop;
}
Пример #17
0
// Main function
int main(const int argc, const char **argv)
{

  MYLOGVERB = LOG_INFO;  // suppress debug messages

  // Instantiate a ModelManager:
  ModelManager manager("Gist Features Extraction");

  // we cannot use saveResults() on our various ModelComponent objects
  // here, so let's not export the related command-line options.
  manager.allowOptions(OPTEXP_ALL & (~OPTEXP_SAVE));

  // Instantiate our various ModelComponents:
  nub::soft_ref<SimEventQueueConfigurator>
    seqc(new SimEventQueueConfigurator(manager));
  manager.addSubComponent(seqc);

  nub::soft_ref<InputMPEGStream>
    ims(new InputMPEGStream(manager, "Input MPEG Stream", "InputMPEGStream"));
  manager.addSubComponent(ims);

  nub::soft_ref<StdBrain> brain(new StdBrain(manager));
  manager.addSubComponent(brain);

  nub::ref<SpatialMetrics> metrics(new SpatialMetrics(manager));
  manager.addSubComponent(metrics);

  manager.exportOptions(MC_RECURSE);
  metrics->setFOAradius(30); // FIXME
  metrics->setFoveaRadius(30); // FIXME

  // setting up the GIST ESTIMATOR
  manager.setOptionValString(&OPT_GistEstimatorType,"Std");
  //manager.setOptionValString(&OPT_GistEstimatorType,"FFT");

  // Request a bunch of option aliases (shortcuts to lists of options):
  REQUEST_OPTIONALIAS_NEURO(manager);

  // Parse command-line:
  if (manager.parseCommandLine(argc, argv, "<*.mpg or *_gistList.txt>",
                               1, 1) == false)
    return(1);

  nub::soft_ref<SimEventQueue> seq = seqc->getQ();

  // if the file passed ends with _gistList.txt
  // we have a different protocol
  bool isGistListInput = false;
  int ifLen = manager.getExtraArg(0).length();
  if(ifLen > 13 &&
     manager.getExtraArg(0).find("_gistList.txt",ifLen - 13) !=
     std::string::npos)
    isGistListInput = true;

  // NOTE: this could now be controlled by a command-line option
  // --preload-mpeg=true
  manager.setOptionValString(&OPT_InputMPEGStreamPreload, "true");

  // do post-command-line configs:
  std::vector<std::string> tag;
  std::vector<int> start;
  std::vector<int> num;
  unsigned int cLine = 0; int cIndex = 0;
  if(isGistListInput)
    {
      LINFO("we have a gistList input");
      getGistFileList(manager.getExtraArg(0).c_str(), tag, start, num);
      cIndex = start[0];
    }
  else
    {
      LINFO("we have an mpeg input");
      ims->setFileName(manager.getExtraArg(0));
      manager.setOptionValString(&OPT_InputFrameDims,
                                 convertToString(ims->peekDims()));
    }

  // frame delay in seconds
  //double fdelay = 33.3667/1000.0; // real time
  double fdelay = 3.3667/1000.0;

  // let's get all our ModelComponent instances started:
  manager.start();

  // get the GistEstimator
  nub::soft_ref<GistEstimatorStd> ge;////// =
  ///////    dynCastWeak<GistEstimatorStd>(brain->getGE());
  LFATAL("fixme");
  if (ge.isInvalid()) LFATAL("I am useless without a GistEstimator");

  // MAIN LOOP
  SimTime prevstime = SimTime::ZERO();
  int fNum = 0;
  Image< PixRGB<byte> > inputImg;  Image< PixRGB<byte> > dispImg;
  Image<double> cgist;
  std::string folder =  "";
  std::string::size_type sPos = manager.getExtraArg(0).rfind("/",ifLen);
  if(sPos != std::string::npos)
    folder = manager.getExtraArg(0).substr(0,sPos+1);

  LINFO("let's start");
  while(1)
  {
    // has the time come for a new frame?
    if (fNum == 0 ||
        (seq->now() - 0.5 * (prevstime - seq->now())).secs() - fNum * fdelay > fdelay)
    {
      // load new frame
      std::string fName;
      if(isGistListInput)
        {
          if (cLine >= tag.size()) break;  // end of input list

          // open the current file
          char tNumStr[100]; sprintf(tNumStr,"%06d",cIndex);
          fName = folder + tag[cLine] + std::string(tNumStr) + ".ppm";

          inputImg = Raster::ReadRGB(fName);
          cIndex++;

          if(cIndex >= start[cLine] + num[cLine])
            {
              cLine++;
              if (cLine < tag.size()) cIndex = start[cLine];
            }

          // reformat the file name to a gist name
          int fNameLen = fName.length();
          unsigned int uPos = fName.rfind("_",fNameLen);
          fName = fName.substr(0,uPos)+ ".ppm";

        }
      else
        {
          fName = manager.getExtraArg(0);
          inputImg = ims->readRGB(); //Raster::ReadRGB(manager.getExtraArg(1));
          if (inputImg.initialized() == false) break;  // end of input stream
          // format new frame
          inputImg = crop(inputImg,
                          Rectangle(Point2D<int>(0,25),
                                    Dims(inputImg.getHeight(),
                                         inputImg.getWidth()-25+1)));
          cIndex = fNum+1;
        }

      dispImg = inputImg;
      LINFO("\nnew frame :%d",fNum);

      // pass input to brain:
      rutz::shared_ptr<SimEventInputFrame>
        e(new SimEventInputFrame(brain.get(), GenericFrame(inputImg), 0));
      seq->post(e); // post the image to the brain

      // get the gist feature vector
      cgist = ge->getGist();
      //for(uint k = 0; k < cgist.getSize(); k++) LINFO("%d: %f",k, cgist.getVal(0,k));

//       // setup display at the start of stream
//       if (fNum == 0)
//       {
//         int s = SQ_SIZE;
//         inputWin = new XWinManaged(Dims(w, h), 0, 0, manager.getExtraArg(0).c_str());
//         wList.add(inputWin);
//         gistWin = new XWinManaged(Dims(NUM_GIST_COL * s, NUM_GIST_FEAT * s), 0,0, "Gist");
//         wList.add(gistWin);
//       }

//       // display the input image and the gist histogram
//       drawGrid(dispImg, w/4,h/4,1,1,PixRGB<byte>(255,255,255));
//       inputWin->drawImage(dispImg,0,0);
//       gistWin->drawImage(ge->getGistHistogram(SQ_SIZE),0,0);

      // SAVE GIST FEATURES TO A FILE
      saveData(cgist, fName, cIndex-1);
      //LINFO("\nFrame number just saved:%d",fNum);Raster::waitForKey();

      // increase frame count
      fNum++;
    }

    // evolve brain:
    prevstime = seq->now(); // time before current step
    const SimStatus status = seq->evolve();
    if (SIM_BREAK == status) // Brain decided it's time to quit
      break;

  }

  // stop all our ModelComponents
  manager.stop();

  // all done!
  return 0;
}
Пример #18
0
// ######################################################################
GenericFrame XMLInput::readFrame()
{
  if (itsTestImages.get() == 0)
    LFATAL("No scene data. Need xml file");

  if (!itsGetObjects.getVal())
    itsCurrentSceneNum = itsFrameNum;

  //If we dont have the frame number, then return an empty image
  if (itsCurrentSceneNum >= itsTestImages->getNumScenes())
  {
    LINFO("No more scenes");
    return GenericFrame();
  }


  //Get the scene


  TestImages::SceneData sceneData = itsTestImages->getSceneData(itsCurrentSceneNum);
  rutz::shared_ptr<TestImages::SceneData> scene(new TestImages::SceneData);
  scene->description = sceneData.description;
  scene->filename = sceneData.filename;
  scene->type = sceneData.type;
  scene->useType = sceneData.useType;

//  LINFO("Scene %s", sceneData.filename.c_str());
  Image<PixRGB<byte> > sceneImg;
  if (itsGetObjects.getVal())
  {

    if (itsObjectNum < sceneData.objects.size())
    {
      TestImages::ObjData objData = sceneData.objects[itsObjectNum];
      std::vector<Point2D<int> > objPoly = objData.polygon;

      Image<PixRGB<byte> > img = itsTestImages->getScene(itsCurrentSceneNum);

      //Get the bounding box
      Rectangle rect = findBoundingRect(objPoly, img.getDims());
      sceneImg = crop(img, rect);

      scene->objects.push_back(objData);
      itsObjectNum++;
      if (itsObjectNum >= sceneData.objects.size())
      {
        itsCurrentSceneNum++;
        itsObjectNum = 0;
      }
    }



  } else {
    scene->objects = sceneData.objects;
    sceneImg = itsTestImages->getScene(itsCurrentSceneNum);

    if (itsDrawPolygons.getVal())
    {
      for(uint i=0; i<sceneData.objects.size(); i++)
      {
        TestImages::ObjData objData = sceneData.objects[i];

        if (itsFilterObjectName.getVal() == objData.name || itsFilterObjectName.getVal().empty())
        {
          std::vector<Point2D<int> > objPoly = objData.polygon;
          Point2D<int> p1 = objPoly[0];
          for(uint i=1; i<objPoly.size(); i++)
          {
            drawLine(sceneImg, p1, objPoly[i], PixRGB<byte>(0, 255, 0), 0);
            p1 = objPoly[i];
          }
          drawLine(sceneImg, p1, objPoly[0], PixRGB<byte>(0, 255, 0)); //close the polygon

          writeText(sceneImg, objPoly[0]+10, objData.name.c_str(), PixRGB<byte>(255,255,255), PixRGB<byte>(0,0,0));
        }

      }
    }

  }

  if (!sceneData.dims.isEmpty())
    sceneImg = rescale(sceneImg, sceneData.dims);
  scene->dims = sceneImg.getDims();


  GenericFrame frame(sceneImg);
  frame.addMetaData(std::string("SceneData"), scene);

  return frame;
}
Пример #19
0
void RectCutCMPT::OnOutputData(wxCommandEvent& event)
{
	auto op = std::dynamic_pointer_cast<RectCutOP>(m_editop);
	const std::vector<sm::rect*>& rects = op->GetRectMgr().GetAllRect();
	if (rects.empty()) {
		return;
	}

	const ee::SprConstPtr& spr = m_stage->GetImage();
	if (!spr) {
		return;
	}

	const std::shared_ptr<ee::ImageSymbol>& sym = std::dynamic_pointer_cast<const ee::ImageSymbol>>(spr->GetSymbol());
	if (!sym) {
		return;
	}

	ee::Image* image = sym->GetImage();

	std::string img_dir = m_imagePath->GetValue();
	std::string json_dir = m_jsonPath->GetValue();

	
	sm::vec2 center = op->GetCenter();
	if (center == sm::vec2(0, 0)) {
		center.x = image->GetClippedRegion().Width() * 0.5f;
		center.y = image->GetClippedRegion().Height() * 0.5f;
	}

	auto img_data = ee::ImageDataMgr::Instance()->GetItem(sym->GetFilepath());
	assert(img_data->GetFormat() == GPF_RGB || img_data->GetFormat() == GPF_RGBA8);
	int channels = img_data->GetFormat() == GPF_RGB ? 3 : 4;
	pimg::Cropping crop(img_data->GetPixelData(), img_data->GetWidth(), img_data->GetHeight(), channels);

	std::string img_name = ee::FileHelper::GetFilename(image->GetFilepath());
	ecomplex::Symbol* complex_all = new ecomplex::Symbol;
	ecomplex::Symbol* complex_part = new ecomplex::Symbol;
	for (int i = 0, n = rects.size(); i < n; ++i)
	{
		const sm::rect& r = *rects[i];
		if (r.Width() == 0 || r.Height() == 0) {
			continue;
		}

		uint8_t* pixels = crop.Crop(r.xmin, r.ymin, r.xmax, r.ymax);
		sm::vec2 sz = r.Size();

		std::string img_filename = img_dir + "\\" + img_name + "_" + ee::StringHelper::ToString(i) + ".png";
		gimg_export(img_filename.c_str(), pixels, sz.x, sz.y, GPF_RGBA8, true);

		auto& spr = new ee::DummySprite(new ee::DummySymbol(img_filename, sz.x, sz.y));
		sm::vec2 offset = r.Center() - center;
		spr->Translate(offset);
		complex_all->Add(spr);

		for (int j = 0, m = m_part_rects.size(); j < m; ++j) {
			if (m_part_rects[j] == r) {
				complex_part->Add(spr);
				break;
			}
		}
	}

	complex_all->name = img_name;
	complex_part->name = img_name + "_part";

	std::string tag = ee::SymbolFile::Instance()->Tag(s2::SYM_COMPLEX);

	std::string filename_all = json_dir + "\\" + img_name + "_" + tag + ".json";
	ecomplex::FileStorer::Store(filename_all.c_str(), complex_all, json_dir);
	delete complex_all;

	if (!complex_part->GetAllChildren().empty()) {
		std::string filename_part = json_dir + "\\" + img_name + "_part_" + tag + ".json";
		ecomplex::FileStorer::Store(filename_part.c_str(), complex_part, json_dir);
	}
	delete complex_part;

	ee::FinishDialog dlg(this);
	dlg.ShowModal();
}
Пример #20
0
int main (int argc, char** argv) {

	FILE* meme_file = 0;
	FILE* action_file = 0;
	FILE* font_file = 0;
	FILE* font_simp_file = 0;
	FILE* simp_file = 0;
	FILE* outfile = 0;

	char* line = 0;
	char* name = 0;
	char* value = 0;
	char* file = 0;
	char* tmp_word = 0;
	char* tmp_value = 0;
	char* meme_filename = 0;
	char* action_filename = 0;

	size_t line_size = 0;

	int i = 0;
	int j = 0;
	int x = 0;
	int y = 0;
	int w = 0;
	int h = 0;
	int line_counter = 0;
	int search_flag = 0;

	char cur_char = 0;

	meme* meme_data = 0;
	font* font_data = 0;

	simp* meme_simp = 0;
	simp* font_simp = 0;
	simp* string_simp = 0;
	simp* temp_simp = 0;

	simp* temp_swap_ptr = 0;


	/* Check to make sure there are the proper number of argumnets. */
	if (argc != 3 ) {
		printf("Invalid number of arguments!\n");
		return 1;
	}

	meme_filename = argv[1];
	action_filename = argv[2];
	
	/* Open the files for reading. If one fails to open, then exit and return 1. */
	meme_file = fopen(meme_filename, "r");

	if (meme_file == 0) {
		printf("File %s failed to open!\n", meme_filename);
		return 1;
	}

	action_file = fopen(action_filename, "r");

	if (action_file == 0) {
		printf("File %s failed to open!\n", action_filename);

		freeAll("ccccmnssffffff", line, name, value, file, meme_data, font_data, font_simp, meme_simp, meme_file, action_file, font_file, font_simp_file, simp_file, outfile);

		return 1;
	}

	/* Create space for the meme and font data structure */
	meme_data = (meme*) malloc(sizeof(meme));
	font_data = (font*) malloc(sizeof(font));

	/* Create space for the strings */
	/* line = (char*) malloc(256); */
	name = (char*) malloc(128);
	value = (char*) malloc(128);
	file = (char*) malloc(128);

	line_counter = 0;

	/* Read through the act file */
	while (getline(&line, &line_size, action_file) != -1) {
		line_counter++;

		if (isspace(line[0])) continue;

		/* Split the line into a name and a value. */
		strcpy(name, strtok(line, ":\n"));
		strcpy(value, strtok(0, ":\n"));

		/* For each line, take action based on what it starts with */
		if (strncmp(line, "OUTFILE", 7) == 0) {
			
			/* Open the outfile for writing binary. */
			outfile = fopen(value, "wb");

			/* If the outfile doesn't open then close everything and exit */
			if (outfile == 0) {
		
				printf("The outfile from line %d of %s failed to open!\n", line_counter, action_filename);

				freeAll("ccccmnssffffff", line, name, value, file, meme_data, font_data, font_simp, meme_simp, meme_file, action_file, font_file, font_simp_file, simp_file, outfile);

				return 1;
			}

		} else if (strncmp(line, "MEME", 4) == 0) {

			/* Initialize the meme structure with the given name. */
			initMeme(meme_data, value);

		} else if (strncmp(line, "FONT", 4) == 0) {
			
			/* Initialize the font structure with the given name. */
			initFont(font_data, value);

		} else {

			/* If the meme structure already exists, add attributes. */
			addAttribute(meme_data, name, value, 0, 0);
		}
	}

	line_counter = 0;

	/* Read through the mem file */
	while (getline(&line, &line_size, meme_file) != -1) {
		line_counter++;

		if (line[0] == '\n') continue;

		/* Split the line into a name and a value. */
		strcpy(name, strtok(line, ":\n"));
		strcpy(value, strtok(0, ":\n"));
		
		/* For each line, take action based on what it starts with */
		if (strncmp(line, "MEMES", 5) == 0) {
			search_flag = 0;
			tmp_word = strtok(value, " \t\n\v\f\r");
			/* Check that at least of of the values matches meme_data->name. If none do, then exit the program. */
			while(tmp_word != 0 ) {
				if (strcmp(tmp_word, meme_data->name) == 0) {
					search_flag = 1;
					break;
				}
				tmp_word = strtok(0, " \t\n\v\f\r");
			}

			/* If the meme we are looking for is not included in this file, then exit. */
			if (!search_flag) {
				
				printf("The Meme %s is not included in the file %s on line %d!", meme_data->name, meme_filename, line_counter);

				freeAll("ccccmnssffffff", line, name, value, file, meme_data, font_data, font_simp, meme_simp, meme_file, action_file, font_file, font_simp_file, simp_file, outfile);

				return 1;
			}

		} else if (strncmp(line, "FONTS", 5) == 0) {

			
			/* Read the name of each one. If the name matches font_data->name, then keep that open as font_file and close all other fsf files. */
			tmp_word = strtok(value, " \t\n\v\f\r");

			search_flag = 0;

			/* Check that at least of of the values matches font_data->name. If none do, then exit the program. */
			while(tmp_word != 0) {

				/* Open each font file for reading */
				font_file = fopen(tmp_word, "r");

				/* If the font_file doesn't open, then close everything and exit. */
				if (font_file == 0) {
			
					printf("The file %s on line %d of %s failed to open!\n", tmp_word, line_counter, meme_filename);

					freeAll("ccccmnssffffff", line, name, value, file, meme_data, font_data, font_simp, meme_simp, meme_file, action_file, font_file, font_simp_file, simp_file, outfile);
				
					return 1;
				}

				
				/* Read the fsf file, and look for the name tag. */
				while (getline(&line, &line_size, font_file) != -1) {

					if (isspace(line[0])) continue;

					if (strncmp(line, "NAME", 4) == 0) {
						
						tmp_value = line;
						tmp_value = fustrtok(tmp_value, file, 128, ":\n");
						tmp_value = fustrtok(tmp_value, file, 128, ":\n");
						
						if (strcmp(file, font_data->name) == 0) {
							search_flag = 1;
							break;
						}

					}
				}

				if (search_flag) {
					break;
				}
				
				if (font_file) {
					fclose(font_file);
				}

				tmp_word = strtok(0, " \t\n\v\f\r");
				
			}

			/* If the meme we are looking for is not included in this file, then exit. */
			if (!search_flag) {
				
				printf("The Font %s on line %d is not included in the mem file!\n", font_data->name, line_counter);

				freeAll("ccccmnssffffff", line, name, value, file, meme_data, font_data, font_simp, meme_simp, meme_file, action_file, font_file, font_simp_file, simp_file, outfile);

				return 1;

			}

		} else if (strncmp(line, meme_data->name, strlen(meme_data->name)) == 0) {

			/* Check to see of the next word is "FILE". If it is then open that simp file, otherwise add the values to the associated attribute. */
			tmp_word = name;
			sscanf(name, "%*s %s", tmp_word);

			if (strcmp(name, "FILE") == 0) {

				/* Open each font file for reading */
				simp_file = fopen(value, "rb");				

				/* If the simp_file doesn't open, then close everything and exit. */
				if (simp_file == 0) {
			
					printf("The simp file, %s, on line %d of %s failed to open!\n", value, line_counter, meme_filename);

					freeAll("ccccmnssffffff", line, name, value, file, meme_data, font_data, font_simp, meme_simp, meme_file, action_file, font_file, font_simp_file, simp_file, outfile);
			
					return 1;
				}

				meme_simp = (simp*) malloc(sizeof(simp));

				if (!readSimp(meme_simp, simp_file)) {
					
					printf("The meme simp file was unable to be read!\n");

					freeAll("ccccmnssffffff", line, name, value, file, meme_data, font_data, font_simp, meme_simp, meme_file, action_file, font_file, font_simp_file, simp_file, outfile);

					return 1;
				}

			} else {

				if (sscanf(value, "%d %d", &x, &y) != 2) {
					
					printf("Invalid argument(s) on line %d of %s: %s:value!\n", line_counter, meme_filename, line, value);

					freeAll("ccccmnssffffff", line, name, value, file, meme_data, font_data, font_simp, meme_simp, meme_file, action_file, font_file, font_simp_file, simp_file, outfile);

					return 1;
				}

				setAttrCoord(meme_data, tmp_word, x, y);
			}

		}
	}

	line_counter = 0;

	/* Read through the fsf file */
	while (getline(&line, &line_size, font_file) != -1) {
		line_counter++;

		if (isspace(line[0])) continue;

		/* For each line, take action based on what it starts with */
		if (strncmp(line, "NAME", 4) == 0) {
			
			/* This statement may be able to be removed because the NAME was already checked in the mem file read. */

		} else if (strncmp(line, "IMAGE", 5) == 0) {

			/* Split the line into a name and a value. */
			tmp_word = line;
			tmp_word = fustrtok(tmp_word, file, 128, ":\n");
			tmp_word = fustrtok(tmp_word, file, 128, ":\n");

			/* Open the simp image for editing */
			font_simp_file = fopen(file, "rb");

			/* If the simp_file doesn't open, then close everything and exit. */
			if (font_simp_file == 0) {
		
				printf("The simp file, %s, on line %d of the specified fsf file failed to open!\n", value, line_counter);

				freeAll("ccccmnssffffff", line, name, value, file, meme_data, font_data, font_simp, meme_simp, meme_file, action_file, font_file, font_simp_file, simp_file, outfile);
				
				return 1;
			}

			font_simp = (simp*) malloc(sizeof(simp));
			
			if (!readSimp(font_simp, font_simp_file)) {

				printf("The file %s from line %d of the fsf file was unable to be read!\nThe filetype may be incorrect or the file may be corrupted.\n", value, line_counter);

				freeAll("ccccmnssffffff", line, name, value, file, meme_data, font_data, font_simp, meme_simp, meme_file, action_file, font_file, font_simp_file, simp_file, outfile);
			
				return 1;

			}
			
		} else if (strncmp(line, "CHARACTER", 9) == 0) {

			if (!font_simp_file) {
				printf("The fsf IMAGE line must come before any CHARACTERn line!\n");

				freeAll("ccccmnssffffff", line, name, value, file, meme_data, font_data, font_simp, meme_simp, meme_file, action_file, font_file, font_simp_file, simp_file, outfile);
			
				return 1;

			}
			
			/* Check the character after CHARACTER. Crop the image at the given values and store it at the proper index. */

			if (sscanf(value, "%d %d %d %d", &x, &y, &w, &h) != 4) {
				printf("Invalid argument(s) on line %d of the fsf file!\n", line_counter);

				freeAll("ccccmnssffffff", line, name, value, file, meme_data, font_data, font_simp, meme_simp, meme_file, action_file, font_file, font_simp_file, simp_file, outfile);

				return 1;				
			}

			addCharacter(font_simp, font_data, name[9], x, y, w, h);
			
		}
	}
	
	/* create a string_simp to overlay, and a temp_simp to hold the temporary crop. */
	string_simp = (simp*) malloc(sizeof(simp));
	temp_simp = (simp*) malloc(sizeof(simp));

	/* For each attribute in the meme */
	for (i = 0; i < meme_data->num_attr; i++) {
	
		/* TODO: take care of the scenario where there are zero letters in the message. */
		w = font_data->characters[meme_data->attr[i].msg[0]]->width;
		h = font_data->characters[meme_data->attr[i].msg[0]]->height;

		/* initialize the string_simp with the width of the first two letters. */
		initSimp(string_simp, w, h);
		crop(font_data->characters[meme_data->attr[i].msg[0]], string_simp, 0, 0, w, h);

		line_size = strlen(meme_data->attr[i].msg);

		/* For each letter in that attribute's message. */
		for (j = 1; j < line_size; j++) {

			cur_char = meme_data->attr[i].msg[j];
			w = font_data->characters[cur_char]->width;
			w += string_simp->width;
			h = font_data->characters[cur_char]->height;
			
			initSimp(temp_simp, w, h);
			
			/* Crop simp_string into temp_simp with simp_string->width + current character's width and the standard height. */
			crop(string_simp, temp_simp, 0, 0, w, h);

			/* Swap string_simp and temp_simp pointers. */
			temp_swap_ptr = string_simp;
			string_simp = temp_simp;
			temp_simp = temp_swap_ptr;

			/* overlay the new letter */
			x = w - font_data->characters[cur_char]->width;
			overlay(font_data->characters[cur_char], string_simp, x, 0);

			freeSimp(temp_simp);
		}

		/* Calculate the upper left corner based on the centers given. */
		y = meme_data->attr[i].y - string_simp->height;
		x = meme_data->attr[i].x - (string_simp->width / 2);

		/* Overlay the completed string_simp onto the meme_simp. */
		overlay(string_simp, meme_simp, x, y);

		/* Free the string_simp to use on the next attribute. */
		freeSimp(string_simp);
	}

	/* Write the meme_simp to the outfile */
	writeSimp(meme_simp, outfile);

	/* cleanup */
	freeAll("ccccmnssssffffff", line, name, value, file, meme_data, font_data, font_simp, meme_simp, string_simp, temp_simp, meme_file, action_file, font_file, font_simp_file, simp_file, outfile);

	return 0;
}
Пример #21
0
int main(int argc, char** argv)
{
    /**********
    bfs::create_directory(input_dir / "prova");
    std::ofstream file(input_dir / "prova/testo.txt");
    file << "ciao!";
    file.close();
    if (!bfs::exists(input_dir / "prova/testo.txt"))
        std::cout << "Something went wrong." << std::endl;
    else std::cout << "apposto" << std::endl;
    **************/
    static const char *faceCascadeFilename = "/usr/share/opencv/haarcascades/haarcascade_frontalface_alt2.xml";
    CvHaarClassifierCascade* faceCascade = (CvHaarClassifierCascade*)cvLoad(faceCascadeFilename, 0, 0, 0 );
     if( !faceCascade ) 
     {
       printf("Could not load Haar cascade Face detection classifier in '%s'.", faceCascadeFilename);
       exit(1);
     }   

    if (!bfs::exists(input_dir))
    { 
        std::cout << "Directory does not exist!" << std::endl;
        return -1; // check existence of input_dir
    }

    bfs::directory_iterator end_itr;
    //int pictures=0;
    std::vector<int> compression_params; //params for imwrite function
    compression_params.push_back(CV_IMWRITE_PXM_BINARY);
    compression_params.push_back(1);
    std::string filename;
    IplImage* img;
    //CvMat* mat;
    
    for (bfs::directory_iterator itr(input_dir); itr!=end_itr; itr++)
    {
        try
        { 
            filename = itr->path().string();
            img = cvLoadImage( filename.c_str(),1);
        }
        catch(int e)
        {
            std::cout << "An exception occured: exception n: " << e << std::endl;
        }
       CvRect rect = detectFaceInImage(img, faceCascade);
       std::cout << "3 cvrect ok" << std::endl;
       if (rect.x > 0 && rect.y > 0 && rect.height > 0 && rect.width > 0)
            img = crop(img,rect);
       
       //mat = cvCreateMat(img->height, img->width,CV_32FC3);
       //std::cout << "5 createMat ok" << std::endl;
       //cvConvert(img,mat);
       //std::cout << "6 convert ok" << std::endl;
       //cvNamedWindow( "check2", 1 );
       //cvShowImage( "check2", mat );
       //cv::waitKey(30);
       //std::string tmp;
       // std::cin>>tmp;
       if(cvSaveImage(filename.c_str(), img))
      // if (cv::imwrite(filename,(cv::InputArray)mat,compression_params))
           std::cout<<"image " << filename << " written" << std::endl;
       else
            std::cout<<"can't write image"<<std::endl;
      // std::cout << filename << std::endl;
        
    }
    

    
    return 0;
}
Пример #22
0
// ----------------------------------------------------------------------------
// Handles the action [id].
// Returns true if the action was handled, false otherwise
// ----------------------------------------------------------------------------
bool GfxEntryPanel::handleEntryPanelAction(std::string_view id)
{
	// We're only interested in "pgfx_" actions
	if (!StrUtil::startsWith(id, "pgfx_"))
		return false;

	// For pgfx_brush actions, the string after pgfx is a brush name
	if (StrUtil::startsWith(id, "pgfx_brush"))
	{
		gfx_canvas_->setBrush(SBrush::get(std::string{ id }));
		button_brush_->setIcon(StrUtil::afterFirst(id, '_'));
	}

	// Editing - drag mode
	else if (id == "pgfx_drag")
	{
		editing_ = false;
		gfx_canvas_->setEditingMode(GfxCanvas::EditMode::None);
	}

	// Editing - draw mode
	else if (id == "pgfx_draw")
	{
		editing_ = true;
		gfx_canvas_->setEditingMode(GfxCanvas::EditMode::Paint);
		gfx_canvas_->setPaintColour(cb_colour_->colour());
	}

	// Editing - erase mode
	else if (id == "pgfx_erase")
	{
		editing_ = true;
		gfx_canvas_->setEditingMode(GfxCanvas::EditMode::Erase);
	}

	// Editing - translate mode
	else if (id == "pgfx_magic")
	{
		editing_ = true;
		gfx_canvas_->setEditingMode(GfxCanvas::EditMode::Translate);
	}

	// Editing - set translation
	else if (id == "pgfx_settrans")
	{
		// Create translation editor dialog
		TranslationEditorDialog ted(
			theMainWindow, *theMainWindow->paletteChooser()->selectedPalette(), " Colour Remap", image());

		// Create translation to edit
		ted.openTranslation(edit_translation_);

		// Show the dialog
		if (ted.ShowModal() == wxID_OK)
		{
			// Set the translation
			edit_translation_.copy(ted.getTranslation());
			gfx_canvas_->setTranslation(&edit_translation_);
		}
	}

	// Editing - set brush
	else if (id == "pgfx_setbrush")
	{
		auto p = button_brush_->GetScreenPosition() -= GetScreenPosition();
		p.y += button_brush_->GetMaxHeight();
		PopupMenu(menu_brushes_, p);
	}

	// Mirror
	else if (id == "pgfx_mirror")
	{
		// Mirror X
		image()->mirror(false);

		// Update UI
		gfx_canvas_->updateImageTexture();
		gfx_canvas_->Refresh();

		// Update variables
		image_data_modified_ = true;
		setModified();
	}

	// Flip
	else if (id == "pgfx_flip")
	{
		// Mirror Y
		image()->mirror(true);

		// Update UI
		gfx_canvas_->updateImageTexture();
		gfx_canvas_->Refresh();

		// Update variables
		image_data_modified_ = true;
		setModified();
	}

	// Rotate
	else if (id == "pgfx_rotate")
	{
		// Prompt for rotation angle
		wxString angles[] = { "90", "180", "270" };
		int      choice   = wxGetSingleChoiceIndex("Select rotation angle", "Rotate", 3, angles, 0);

		// Rotate image
		switch (choice)
		{
		case 0: image()->rotate(90); break;
		case 1: image()->rotate(180); break;
		case 2: image()->rotate(270); break;
		default: break;
		}

		// Update UI
		gfx_canvas_->updateImageTexture();
		gfx_canvas_->Refresh();

		// Update variables
		image_data_modified_ = true;
		setModified();
	}

	// Translate
	else if (id == "pgfx_remap")
	{
		// Create translation editor dialog
		auto                    pal = MainEditor::currentPalette();
		TranslationEditorDialog ted(theMainWindow, *pal, " Colour Remap", &gfx_canvas_->image());

		// Create translation to edit
		ted.openTranslation(prev_translation_);

		// Show the dialog
		if (ted.ShowModal() == wxID_OK)
		{
			// Apply translation to image
			image()->applyTranslation(&ted.getTranslation(), pal);

			// Update UI
			gfx_canvas_->updateImageTexture();
			gfx_canvas_->Refresh();

			// Update variables
			image_data_modified_ = true;
			gfx_canvas_->updateImageTexture();
			setModified();
			prev_translation_.copy(ted.getTranslation());
		}
	}

	// Colourise
	else if (id == "pgfx_colourise")
	{
		auto               pal = MainEditor::currentPalette();
		GfxColouriseDialog gcd(theMainWindow, entry_, *pal);
		gcd.setColour(last_colour);

		// Show colourise dialog
		if (gcd.ShowModal() == wxID_OK)
		{
			// Colourise image
			image()->colourise(gcd.colour(), pal);

			// Update UI
			gfx_canvas_->updateImageTexture();
			gfx_canvas_->Refresh();

			// Update variables
			image_data_modified_ = true;
			Refresh();
			setModified();
		}
		last_colour = gcd.colour().toString(ColRGBA::StringFormat::RGB);
	}

	// Tint
	else if (id == "pgfx_tint")
	{
		auto          pal = MainEditor::currentPalette();
		GfxTintDialog gtd(theMainWindow, entry_, *pal);
		gtd.setValues(last_tint_colour, last_tint_amount);

		// Show tint dialog
		if (gtd.ShowModal() == wxID_OK)
		{
			// Tint image
			image()->tint(gtd.colour(), gtd.amount(), pal);

			// Update UI
			gfx_canvas_->updateImageTexture();
			gfx_canvas_->Refresh();

			// Update variables
			image_data_modified_ = true;
			Refresh();
			setModified();
		}
		last_tint_colour = gtd.colour().toString(ColRGBA::StringFormat::RGB);
		last_tint_amount = (int)(gtd.amount() * 100.0);
	}

	// Crop
	else if (id == "pgfx_crop")
	{
		auto          image = this->image();
		auto          pal   = MainEditor::currentPalette();
		GfxCropDialog gcd(theMainWindow, image, pal);

		// Show crop dialog
		if (gcd.ShowModal() == wxID_OK)
		{
			// Prompt to adjust offsets
			auto crop = gcd.cropRect();
			if (crop.tl.x > 0 || crop.tl.y > 0)
			{
				if (wxMessageBox(
						"Do you want to adjust the offsets? This will keep the graphic in the same relative "
						"position it was before cropping.",
						"Adjust Offsets?",
						wxYES_NO)
					== wxYES)
				{
					image->setXOffset(image->offset().x - crop.tl.x);
					image->setYOffset(image->offset().y - crop.tl.y);
				}
			}

			// Crop image
			image->crop(crop.x1(), crop.y1(), crop.x2(), crop.y2());

			// Update UI
			gfx_canvas_->updateImageTexture();
			gfx_canvas_->Refresh();

			// Update variables
			image_data_modified_ = true;
			Refresh();
			setModified();
		}
	}

	// alPh/tRNS
	else if (id == "pgfx_alph" || id == "pgfx_trns")
	{
		setModified();
		Refresh();
	}

	// Optimize PNG
	else if (id == "pgfx_pngopt")
	{
		// This is a special case. If we set the entry as modified, SLADE will prompt
		// to save it, rewriting the entry and cancelling the optimization done...
		if (EntryOperations::optimizePNG(entry_))
			setModified(false);
		else
			wxMessageBox(
				"Warning: Couldn't optimize this image, check console log for info",
				"Warning",
				wxOK | wxCENTRE | wxICON_WARNING);
		Refresh();
	}

	// Extract all
	else if (id == "pgfx_extract")
	{
		extractAll();
	}

	// Convert
	else if (id == "pgfx_convert")
	{
		GfxConvDialog gcd(theMainWindow);
		gcd.CenterOnParent();
		gcd.openEntry(entry_);

		gcd.ShowModal();

		if (gcd.itemModified(0))
		{
			// Get image and conversion info
			auto image  = gcd.itemImage(0);
			auto format = gcd.itemFormat(0);

			// Write converted image back to entry
			format->saveImage(*image, entry_data_, gcd.itemPalette(0));
			// This makes the "save" button (and the setModified stuff) redundant and confusing!
			// The alternative is to save to entry effectively (uncomment the importMemChunk line)
			// but remove the setModified and image_data_modified lines, and add a call to refresh
			// to get the PNG tRNS status back in sync.
			// entry->importMemChunk(entry_data);
			image_data_modified_ = true;
			setModified();

			// Fix tRNS status if we converted to paletted PNG
			int MENU_GFXEP_PNGOPT      = SAction::fromId("pgfx_pngopt")->wxId();
			int MENU_GFXEP_ALPH        = SAction::fromId("pgfx_alph")->wxId();
			int MENU_GFXEP_TRNS        = SAction::fromId("pgfx_trns")->wxId();
			int MENU_ARCHGFX_EXPORTPNG = SAction::fromId("arch_gfx_exportpng")->wxId();
			if (format->name() == "PNG")
			{
				ArchiveEntry temp;
				temp.importMemChunk(entry_data_);
				temp.setType(EntryType::fromId("png"));
				menu_custom_->Enable(MENU_GFXEP_ALPH, true);
				menu_custom_->Enable(MENU_GFXEP_TRNS, true);
				menu_custom_->Check(MENU_GFXEP_TRNS, EntryOperations::gettRNSChunk(&temp));
				menu_custom_->Enable(MENU_ARCHGFX_EXPORTPNG, false);
				menu_custom_->Enable(MENU_GFXEP_PNGOPT, true);
				toolbar_->enableGroup("PNG", true);
			}
			else
			{
				menu_custom_->Enable(MENU_GFXEP_ALPH, false);
				menu_custom_->Enable(MENU_GFXEP_TRNS, false);
				menu_custom_->Enable(MENU_ARCHGFX_EXPORTPNG, true);
				menu_custom_->Enable(MENU_GFXEP_PNGOPT, false);
				toolbar_->enableGroup("PNG", false);
			}

			// Refresh
			this->image()->open(entry_data_, 0, format->id());
			gfx_canvas_->Refresh();
		}
	}

	// Unknown action
	else
		return false;

	// Action handled
	return true;
}
Пример #23
0
// ######################################################################
void DescriptorVec::buildRawDV()
{

  bool salientLocationWithinSubmaps = true;
  Point2D<int> objSalientLoc(-1,-1);  //the feature location

  const LevelSpec lspec = itsComplexChannel->getModelParamVal<LevelSpec>("LevelSpec");
  const int smlevel = lspec.mapLevel();

  int x=int(itsFoveaLoc.i / double(1 << smlevel) + 0.49);
  int y=int(itsFoveaLoc.j / double(1 << smlevel) + 0.49);

  int foveaW = int(itsFoveaSize.getVal().w() / double(1 << smlevel) + 0.49);
  int foveaH = int(itsFoveaSize.getVal().h() / double(1 << smlevel) + 0.49);

  int tl_x = x - (foveaW/2);
  int tl_y = y - (foveaH/2);

  Dims mapDims = itsComplexChannel->getSubmap(0).getDims();

  //Shift the fovea location so we dont go outside the image
  //Sift the fovea position if nessesary
  if (tl_x < 0) tl_x = 0; if (tl_y < 0) tl_y = 0;
  if (tl_x+foveaW > mapDims.w()) tl_x = mapDims.w() - foveaW;
  if (tl_y+foveaH > mapDims.h()) tl_y = mapDims.h() - foveaH;

  if (!salientLocationWithinSubmaps)
  {
    //Find the most salient location within the fovea
    Image<float> SMap = itsComplexChannel->getOutput();

    Image<float> tmp = SMap; //TODO need to resize to fovea
    //Find the max location within the fovea

    float maxVal; Point2D<int> maxLoc;
    findMax(tmp, maxLoc, maxVal);
    //convert back to original SMap cordinates
   // objSalientLoc.i=tl_x+maxLoc.i;
   // objSalientLoc.j=tl_y+maxLoc.j;
    objSalientLoc.i=x;
    objSalientLoc.j=y;
    itsAttentionLoc = objSalientLoc;
  }

  //Go through all the submaps building the DV
  itsFV.clear(); //clear the FV
  uint numSubmaps = itsComplexChannel->numSubmaps();
  for (uint i = 0; i < numSubmaps; i++)
  {
    //Image<float> submap = itsComplexChannel->getSubmap(i);
    Image<float> submap = itsComplexChannel->getRawCSmap(i);

    // resize submap to fixed scale if necessary:
    if (submap.getWidth() > mapDims.w())
      submap = downSize(submap, mapDims);
    else if (submap.getWidth() < mapDims.w())
      submap = rescale(submap, mapDims); //TODO convert to  quickInterpolate


    if (salientLocationWithinSubmaps) //get the location from the salient location within each submap
    {
      Image<float> tmp = submap;
      //get only the fovea region

      if (foveaW < tmp.getWidth()) //crop if our fovea is smaller
        tmp = crop(tmp, Point2D<int>(tl_x, tl_y), Dims(foveaW, foveaH));
     // tmp = maxNormalize(tmp, 0.0F, 10.0F, VCXNORM_MAXNORM);  //find salient locations

      //Find the max location within the fovea
      float maxVal; Point2D<int> maxLoc; findMax(tmp, maxLoc, maxVal);
      //LINFO("%i: Max val %f, loc(%i,%i)", i, maxVal, maxLoc.i, maxLoc.j);

      objSalientLoc.i=tl_x+maxLoc.i;
      objSalientLoc.j=tl_y+maxLoc.j;

    }

    if (objSalientLoc.i < 0) objSalientLoc.i = 0;
    if (objSalientLoc.j < 0) objSalientLoc.j = 0;

    if (objSalientLoc.i > submap.getWidth()-1) objSalientLoc.i = submap.getWidth()-1;
    if (objSalientLoc.j > submap.getHeight()-1) objSalientLoc.j = submap.getHeight()-1;



   // LINFO("Location from %i,%i: (%i,%i)", objSalientLoc.i, objSalientLoc.j,
    //    submap.getWidth(), submap.getHeight());
    float featureVal = submap.getVal(objSalientLoc.i,objSalientLoc.j);
    itsFV.push_back(featureVal);
 //   SHOWIMG(rescale(submap, 255, 255));

  }
}
Пример #24
0
static void process_image_common(IplImage *frame)
{
  CvFont font;
  cvInitFont(&font, CV_FONT_VECTOR0, 0.25f, 0.25f);

  CvSize video_size;
#if defined(USE_POSIX_SHARED_MEMORY)
  video_size.height = *shrd_ptr_height;
  video_size.width  = *shrd_ptr_width;
#else
  // XXX These parameters should be set ROS parameters
  video_size.height = frame->height;
  video_size.width  = frame->width;
#endif
  CvSize    frame_size = cvSize(video_size.width, video_size.height/2);
  IplImage *temp_frame = cvCreateImage(frame_size, IPL_DEPTH_8U, 3);
  IplImage *gray       = cvCreateImage(frame_size, IPL_DEPTH_8U, 1);
  IplImage *edges      = cvCreateImage(frame_size, IPL_DEPTH_8U, 1);
  IplImage *half_frame = cvCreateImage(cvSize(video_size.width/2, video_size.height/2), IPL_DEPTH_8U, 3);

  CvMemStorage *houghStorage = cvCreateMemStorage(0);

  cvPyrDown(frame, half_frame, CV_GAUSSIAN_5x5); // Reduce the image by 2

  /* we're intersted only in road below horizont - so crop top image portion off */
  crop(frame, temp_frame, cvRect(0, frame_size.height, frame_size.width, frame_size.height));
  cvCvtColor(temp_frame, gray, CV_BGR2GRAY); // contert to grayscale

  /* Perform a Gaussian blur & detect edges */
  // smoothing image more strong than original program
  cvSmooth(gray, gray, CV_GAUSSIAN, 15, 15);
  cvCanny(gray, edges, CANNY_MIN_TRESHOLD, CANNY_MAX_TRESHOLD);

  /* do Hough transform to find lanes */
  double rho = 1;
  double theta = CV_PI/180;
  CvSeq *lines = cvHoughLines2(edges, houghStorage, CV_HOUGH_PROBABILISTIC,
                               rho, theta, HOUGH_TRESHOLD, HOUGH_MIN_LINE_LENGTH, HOUGH_MAX_LINE_GAP);

  processLanes(lines, edges, temp_frame, frame);

#ifdef SHOW_DETAIL
  /* show middle line */
  cvLine(temp_frame, cvPoint(frame_size.width/2, 0),
         cvPoint(frame_size.width/2, frame_size.height), CV_RGB(255, 255, 0), 1);

  // cvShowImage("Gray", gray);
  // cvShowImage("Edges", edges);
  // cvShowImage("Color", temp_frame);
  // cvShowImage("temp_frame", temp_frame);
  // cvShowImage("frame", frame);
#endif

#if defined(USE_POSIX_SHARED_MEMORY)
  setImage_toSHM(frame);
#endif

#ifdef SHOW_DETAIL
  // cvMoveWindow("Gray", 0, 0);
  // cvMoveWindow("Edges", 0, frame_size.height+25);
  // cvMoveWindow("Color", 0, 2*(frame_size.height+25));
#endif

  cvReleaseMemStorage(&houghStorage);
  cvReleaseImage(&gray);
  cvReleaseImage(&edges);
  cvReleaseImage(&temp_frame);
  cvReleaseImage(&half_frame);
}
void PlotWindow::populateGraphableBlock(Memory &memory) {
  GraphableBlock *graphable = NULL;
  memory.getOrAddBlockByName(graphable,"graphable");

  WalkEngineBlock *walk_engine;
  memory.getBlockByName(walk_engine,"walk_engine",false);
  if (walk_engine != NULL) {
    graphable->addData("abs_left_foot.x",walk_engine->abs_left_foot_.translation.x);
    graphable->addData("abs_left_foot.y",walk_engine->abs_left_foot_.translation.y);
    graphable->addData("abs_left_foot.z",walk_engine->abs_left_foot_.translation.z);
    graphable->addData("abs_left_foot.rot",walk_engine->abs_left_foot_.rotation.getZAngle());
    graphable->addData("abs_right_foot.x",walk_engine->abs_right_foot_.translation.x);
    graphable->addData("abs_right_foot.y",walk_engine->abs_right_foot_.translation.y);
    graphable->addData("abs_right_foot.z",walk_engine->abs_right_foot_.translation.z);
    graphable->addData("abs_right_foot.rot",walk_engine->abs_right_foot_.rotation.getZAngle());

    graphable->addData("sensor_zmp.x",walk_engine->sensor_zmp_.x);
    graphable->addData("sensor_zmp.y",walk_engine->sensor_zmp_.y);
    graphable->addData("current_state_zmp.x",walk_engine->current_state_.zmp_.x);
    graphable->addData("current_state_zmp.y",walk_engine->current_state_.zmp_.y);
    graphable->addData("current_state_pen_pos.x",walk_engine->current_state_.pen_pos_.x);
    graphable->addData("current_state_pen_pos.y",walk_engine->current_state_.pen_pos_.y);
    graphable->addData("desired_state_zmp.x",walk_engine->desired_next_state_.zmp_.x);
    graphable->addData("desired_state_zmp.y",walk_engine->desired_next_state_.zmp_.y);
    graphable->addData("desired_state_pen_pos.x",walk_engine->desired_next_state_.pen_pos_.x);
    graphable->addData("desired_state_pen_pos.y",walk_engine->desired_next_state_.pen_pos_.y);
    graphable->addData("desired_state_pen_vel.x",walk_engine->desired_next_state_.pen_vel_.x);
    graphable->addData("desired_state_pen_vel.y",walk_engine->desired_next_state_.pen_vel_.y);
    graphable->addData("desired_state_open_zmp.x",walk_engine->desired_next_without_closed_loop_.zmp_.x);
    graphable->addData("desired_state_open_zmp.y",walk_engine->desired_next_without_closed_loop_.zmp_.y);
    graphable->addData("desired_state_open_pen_pos.x",walk_engine->desired_next_without_closed_loop_.pen_pos_.x);
    graphable->addData("desired_state_open_pen_pos.y",walk_engine->desired_next_without_closed_loop_.pen_pos_.y);
    graphable->addData("desired_state_open_pen_vel.x",walk_engine->desired_next_without_closed_loop_.pen_vel_.x);
    graphable->addData("desired_state_open_pen_vel.y",walk_engine->desired_next_without_closed_loop_.pen_vel_.y);
    graphable->addData("ref_zmp.x",walk_engine->zmp_ref_[0].x);
    graphable->addData("ref_zmp.y",walk_engine->zmp_ref_[0].y);
    graphable->addData("delayed_zmp.x",walk_engine->delayed_zmp_state_.x);
    graphable->addData("delayed_zmp.y",walk_engine->delayed_zmp_state_.y);
    graphable->addData("delayed_com.x",walk_engine->delayed_pen_state_.x);
    graphable->addData("delayed_com.y",walk_engine->delayed_pen_state_.y);
    graphable->addData("sensor_com.x",walk_engine->sensor_pen_.x);
    graphable->addData("sensor_com.y",walk_engine->sensor_pen_.y);
    
    graphable->addData("swing_target.x",walk_engine->swing_foot_.translation.x);
    graphable->addData("swing_target.y",walk_engine->swing_foot_.translation.y);
    graphable->addData("swing_target.z",walk_engine->swing_foot_.translation.z);
    graphable->addData("swing_target.rot",walk_engine->swing_foot_.rotation.getZAngle());

    graphable->addData("step_current.x",walk_engine->step_current_.position_.translation.x);
    graphable->addData("step_current.y",walk_engine->step_current_.position_.translation.y);
    graphable->addData("step_current.rot",RAD_T_DEG * walk_engine->step_current_.position_.rotation);

    float support_foot;
    if (walk_engine->step_current_.is_left_foot_)
      support_foot = -10;
    else
      support_foot = 10;
    WalkParamBlock *param_block(NULL);
    memory.getBlockByName(param_block,"walk_param",false);
    FrameInfoBlock *frame_info(NULL);
    memory.getBlockByName(frame_info,"frame_info",false);
    if (param_block != NULL && frame_info != NULL) {
      if (frame_info->frame_id < walk_engine->step_current_.frame_ + walk_engine->num_double_support_frames_)
        support_foot = 0;
  
      float phase_frac = (frame_info->frame_id - walk_engine->step_current_.frame_) / (float)(walk_engine->step_next_.frame_ - walk_engine->step_current_.frame_);
      //std::cout << phase_frac << std::endl;
      phase_frac = crop(phase_frac,-10,10);

      //float single_support_frac = (phase_frac - param_block->params_.double_support_frac_ - 0.01 / param_block->params_.phase_length_) / (1.0 - param_block->params_.double_support_frac_);
      float denom = walk_engine->step_next_.frame_ - walk_engine->step_current_.frame_;
      if (denom <= 0)
        denom = 1;
      float single_support_frac = ((int)frame_info->frame_id - (int)walk_engine->step_current_.frame_ - (int)walk_engine->num_double_support_frames_) / denom;


      graphable->addData("phase_frac",phase_frac);
      graphable->addData("single_support_frac",single_support_frac);
    }
    graphable->addData("support_foot",support_foot);
  }

  BodyModelBlock *body_model;
  memory.getBlockByName(body_model,"body_model",false);
  if (body_model != NULL) {
    graphable->addData("com.x",body_model->center_of_mass_.x);
    graphable->addData("com.y",body_model->center_of_mass_.y);
    graphable->addData("com.z",body_model->center_of_mass_.z);
     
    if (walk_engine != NULL) {
      Vector3<float> abs_to_stance_offset;
      Vector3<float> abs_to_swing_offset;
      if (walk_engine->step_current_.is_left_foot_) {
        abs_to_stance_offset = -body_model->abs_parts_[BodyPart::left_foot].translation;
        abs_to_swing_offset = -body_model->abs_parts_[BodyPart::right_foot].translation;
      } else {
        abs_to_stance_offset = -body_model->abs_parts_[BodyPart::right_foot].translation;
        abs_to_swing_offset = -body_model->abs_parts_[BodyPart::left_foot].translation;
      }

      graphable->addData("abs_to_stance_offset.x",abs_to_stance_offset.x);
      graphable->addData("abs_to_stance_offset.y",abs_to_stance_offset.y);
      graphable->addData("abs_to_swing_offset.x",abs_to_swing_offset.x);
      graphable->addData("abs_to_swing_offset.y",abs_to_swing_offset.y);

      // convert from abs to stance
      Pose3D com(0,0,0);
      com.translation = body_model->center_of_mass_ + abs_to_stance_offset;
      // convert from stance back to global
      com = com.relativeToGlobal(walk_engine->global_frame_offset_);
      com.translation.z += robot_dimensions_.footHeight;
      graphable->addData("global_com.x",com.translation.x);
      graphable->addData("global_com.y",com.translation.y);
      graphable->addData("global_com.z",com.translation.z);
    }

    graphable->addData("sensor_tilt", RAD_T_DEG*body_model->sensors_tilt_roll_.tilt_);
    graphable->addData("sensor_roll", RAD_T_DEG*body_model->sensors_tilt_roll_.roll_);
    graphable->addData("left_foot_tilt", RAD_T_DEG*body_model->left_foot_body_tilt_roll_.tilt_);
    graphable->addData("left_foot_roll",RAD_T_DEG* body_model->left_foot_body_tilt_roll_.roll_);
    graphable->addData("right_foot_tilt", RAD_T_DEG*body_model->right_foot_body_tilt_roll_.tilt_);
    graphable->addData("right_foot_roll", RAD_T_DEG*body_model->right_foot_body_tilt_roll_.roll_);
  }

  SensorBlock* sensors;
  memory.getBlockByName(sensors,"processed_sensors",false);
  if (sensors != NULL){
    graphable->addData("sensor_accel.x",sensors->values_[accelX]);
    graphable->addData("sensor_accel.y",sensors->values_[accelY]);
    graphable->addData("sensor_accel.z",sensors->values_[accelZ]);
    graphable->addData("sensor_tilt",sensors->values_[angleY]);
    graphable->addData("sensor_roll",sensors->values_[angleX]);
  }
  
  SensorBlock* raw_sensors;
  memory.getBlockByName(raw_sensors,"raw_sensors",false);
  if (raw_sensors != NULL) {
    graphable->addData("raw_sensor_tilt",raw_sensors->values_[angleY]);
    graphable->addData("raw_sensor_roll",raw_sensors->values_[angleX]);
  }

  JointBlock *joint_angles;
  memory.getBlockByName(joint_angles,"processed_joint_angles",false);
  if (joint_angles != NULL) {
    graphable->addData("sensed_lhiproll",RAD_T_DEG * joint_angles->values_[LHipRoll]);
    graphable->addData("sensed_rhiproll",RAD_T_DEG * joint_angles->values_[RHipRoll]);
    graphable->addData("sensed_lhippitch",RAD_T_DEG * joint_angles->values_[LHipPitch]);
    graphable->addData("sensed_rhippitch",RAD_T_DEG * joint_angles->values_[RHipPitch]);

    graphable->addData("sensed_lanklepitch",RAD_T_DEG * joint_angles->values_[LAnklePitch]);
    graphable->addData("sensed_ranklepitch",RAD_T_DEG * joint_angles->values_[RAnklePitch]);
    graphable->addData("sensed_lankleroll",RAD_T_DEG * joint_angles->values_[LAnkleRoll]);
    graphable->addData("sensed_rankleroll",RAD_T_DEG * joint_angles->values_[RAnkleRoll]);
  }
  
  JointCommandBlock *joint_commands;
  memory.getBlockByName(joint_commands,"processed_joint_commands",false);
  if (joint_commands != NULL) {
    graphable->addData("commanded_lhiproll",RAD_T_DEG * joint_commands ->angles_[LHipRoll]);
    graphable->addData("commanded_rhiproll",RAD_T_DEG * joint_commands ->angles_[RHipRoll]);
    graphable->addData("commanded_lhippitch",RAD_T_DEG * joint_commands ->angles_[LHipPitch]);
    graphable->addData("commanded_rhippitch",RAD_T_DEG * joint_commands ->angles_[RHipPitch]);
    
    graphable->addData("commanded_lkneepitch",RAD_T_DEG * joint_commands ->angles_[LKneePitch]);
    graphable->addData("commanded_rkneepitch",RAD_T_DEG * joint_commands ->angles_[RKneePitch]);
    
    graphable->addData("commanded_lanklepitch",RAD_T_DEG * joint_commands->angles_[LAnklePitch]);
    graphable->addData("commanded_ranklepitch",RAD_T_DEG * joint_commands->angles_[RAnklePitch]);
    graphable->addData("commanded_lankleroll",RAD_T_DEG * joint_commands->angles_[LAnkleRoll]);
    graphable->addData("commanded_rankleroll",RAD_T_DEG * joint_commands->angles_[RAnkleRoll]);
  }
  
  OdometryBlock *odometry;
  memory.getBlockByName(odometry,"vision_odometry",false);
  if (odometry != NULL) {
    graphable->addData("odom.y",odometry->displacement.translation.y);
  }

  WorldObjectBlock *world_object;
  memory.getBlockByName(world_object,"world_objects",false);
  if (world_object != NULL) {
    WorldObject &ball = world_object->objects_[WO_BALL];
    graphable->addData("rel_ball.y",ball.relPos.y);
    graphable->addData("vision_rel_ball.y",ball.visionDistance * sin(ball.visionBearing));
  }
}
Пример #26
0
int main()
{
  FILE* intr = fopen("input.txt", "r");
  struct bmp_fileheader f_head;
  struct bmp_infoheader i_head;
  struct pixel ref, off; //pixelii referinta si offset
  struct pixel **matrix, **matrix_cpy; //matricea de pixeli si o copie a ei
  char red, padding = 0; //caracter redundant si caracter pentru padding
  char output_name[20] = {"output.bmp"};
  double P; //procentul pentru gasirea clusterelor
  unsigned int i, j, k;
  unsigned int **matrix_clus, nr_clus = 0; //matrice de marcare a clusterelor si numarul lor
  struct cluster *clus; //structura in care retinem caracteristicile clusterelor

  fscanf(intr, "%hhu%c%hhu%c%hhu%c", &ref.R, &red, &ref.G, &red, &ref.B, &red);
  fscanf(intr, "%hhu%c%hhu%c%hhu", &off.R, &red, &off.G, &red, &off.B);
  fscanf(intr, "%lf", &P);

//Aici incepe cerinta 1:
  FILE* g1 =fopen("output.txt", "w");
  FILE* bmp = fopen("input.bmp", "rb");
  //Citire date pentru file header:
  fread(&f_head, sizeof(struct bmp_fileheader), 1, bmp);

  //Citire date pentru image header:
  fread(&i_head, sizeof(struct bmp_infoheader), 1, bmp);
  i_head.biXPelsPerMeter = 0;
  i_head.biYPelsPerMeter = 0;

  matrix = calloc(i_head.height, sizeof(struct pixel*));
  for(i = 0; i < (unsigned int)i_head.height; i++)
    matrix[i] = calloc(i_head.width, sizeof(struct pixel));

  matrix_cpy = calloc(i_head.height, sizeof(struct pixel*));
  for(i = 0; i < (unsigned int)i_head.height; i++)
    matrix_cpy[i] = calloc(i_head.width, sizeof(struct pixel));

  matrix_clus = calloc(i_head.height, sizeof(int*));
  for(i = 0; i < (unsigned int)i_head.height; i++)
    matrix_clus[i] = calloc(i_head.width, sizeof(int));

  clus = calloc(i_head.height * i_head.width, sizeof(struct cluster));

//Citim matricea imagine
  fseek(bmp, f_head.imageDataOffset, SEEK_SET);
  unsigned int poz = f_head.imageDataOffset; //pozitia de inceput a unei linii din matricea imagine
  for(i = 0; i < (unsigned int)i_head.height; i++)
  {
    for(j = 0; j < (unsigned int)i_head.width; j++)
    {
      pix_read(bmp, &matrix[i][j]);
      matrix_cpy[i][j] = matrix[i][j]; //realizarea copiei matricii imagine
    }
    poz += 3 * i_head.width + (4 - (3 * (i_head.width)) % 4) % 4;
    fseek(bmp, 0, poz); //pozitionare pe noua linie
  }

  //Afisam raspunsul pentru cerinta 1:
  struct param_clus x; //structura ce contine parametrii unei functii
  x.f = g1;
  x.m = matrix;
  x.ref = ref;
  x.off = off;
  x.h = i_head.height;
  x.w = i_head.width;
  x.P = P;
  x.a = matrix_clus;
  x.v = clus;
  x.nr = &nr_clus;
  dim_clus(x); //functia de cautare a clusterelor si afisare a dimensiunilor lor
              //in ordine crescatoare

//Aici incepe cerinta 2:
  char nFisOut[20] = {"output_blur.bmp"}; //numele imaginii blurate

  FILE* g2 = fopen(nFisOut, "wb");

//Generam si afisam matricea blurata
  blur(matrix_cpy, i_head.height, i_head.width, clus, nr_clus, matrix_clus);
  fwrite(&f_head, sizeof(struct bmp_fileheader), 1, g2);
  fwrite(&i_head, sizeof(struct bmp_infoheader), 1, g2);
  for(i = 0; i < (unsigned int)i_head.height; i++)
  {
    for(j = 0; j < (unsigned int)i_head.width; j++)
    {
      fwrite(&matrix_cpy[i][j].B, 1, 1, g2);
      fwrite(&matrix_cpy[i][j].G, 1, 1, g2);
      fwrite(&matrix_cpy[i][j].R, 1, 1, g2);
    }
    for(k = 0; k < (4 - (3 * (i_head.width)) % 4) % 4; k++)
      fwrite(&padding, 1, 1, g2); //adaugam octetii de padding
  }

//Aici incepe cerinta 3:
  //Sortam clusterele dupa ordinea aparitiei (indice):
  qsort(clus, nr_clus, sizeof(struct cluster), compare);
  crop(matrix, clus, nr_clus, output_name); //generare imagini separate pentru clustere

  for(i = 0; i < (unsigned int)i_head.height; i++)
    free(matrix[i]);
  free(matrix);

  for(i = 0; i < (unsigned int)i_head.height; i++)
    free(matrix_cpy[i]);
  free(matrix_cpy);

  for(i = 0; i < (unsigned int)i_head.height; i++)
    free(matrix_clus[i]);
  free(matrix_clus);

  free(clus);

  return 0;
}
Пример #27
0
bool ParticleFilter::update(Mat& img, Mat& img_prev, Rect& detection_new, int use_hist, int frame_num){
//! Update step for the tracker after prediction
/*!
	Here, update() function contains measurement step, resampling step, and get the mean value of all particles.
*/
	// generate new particles
    Rect detection;;
    Mat HSVinterpolated;
    vector<Mat> MultiHSVInterpolated(3);
    // bounding box previous frame
    for (unsigned int i = 0; i < trackerbox.size()-1; i++) {
        trackerbox.at(i) = trackerbox.at(i+1);
    }
    // bounding box current frame
    trackerbox.at(trackerbox.size()-1) = detection_new;
    // calculate the average width and height of all trackers
    int width_avg=0, height_avg=0;
    for (unsigned int i = 0; i < trackerbox.size(); i++) {
        width_avg += trackerbox[i].width;
        height_avg += trackerbox[i].height;
    }
    width_avg = (int)((double)(width_avg/trackerbox.size()));
    height_avg = (int)((double)(height_avg/trackerbox.size()));
    if (width_avg < width_/2) {
        width_avg = width_;
    }
    if (height_avg < height_/2) {
        height_avg = height_;
    }
    width_tracker = width_avg;
    height_tracker = height_avg;
    // if there is no occlusion
    if (rely_detection) {
        detection = detection_new;
    }
    else{ // if there is occlusion
        detection = detectionprev;
    }
    // size of tracker and template should be same to be compared
    if (width_tracker > width_template) {
        width_tracker = width_template;
    }
    if (height_tracker > height_template) {
        height_tracker = height_template;
    }
    
    Rect crop(PFTrack.x-width_tracker/2,PFTrack.y-height_tracker/2,width_tracker,height_tracker);
    
    if(use_hist==1){ // compute one region
        if ((crop.x < 0) || (crop.x+crop.width > img_prev.cols) || (crop.y < 0) || (crop.y+crop.height > img_prev.rows) ) {
            HSVInterp(HSVTemplate, HSVTemplate, HSVinterpolated);
        }
        else{
            Mat imHSV = img_prev(crop);
           Mat HSVbefore;
            computeHSVHist(imHSV, HSVbefore);
            HSVInterp(HSVbefore, HSVTemplate, HSVinterpolated);
            HSVbefore.release();
       }
    }
    else if (use_hist == 2){// compute three regions
        if ((crop.x < 0) || (crop.x+crop.width > img_prev.cols) || (crop.y < 0) || (crop.y+crop.height > img_prev.rows) ) {
            Mat HSVTemp;
            for (unsigned int i = 0; i < MultiHSVTemplate.size(); i++) {
                HSVInterp(MultiHSVTemplate[i], MultiHSVTemplate[i], HSVTemp);
                MultiHSVInterpolated.at(i) = HSVTemp;
            }
         }
        else
        {
            Mat imHSV = img_prev(crop);
            vector<Mat> MultiIMHSV;
             Mat tempHSV;Mat beforetemp;
            int third = imHSV.rows/3;
            tempHSV = imHSV.rowRange(0, third);
            computeHSVHist(tempHSV, beforetemp);
            MultiIMHSV.push_back(beforetemp);
            tempHSV = imHSV.rowRange(third,third+third);
            computeHSVHist(tempHSV, beforetemp);
            MultiIMHSV.push_back(beforetemp);
            tempHSV = imHSV.rowRange(third, imHSV.rows);
            computeHSVHist(tempHSV, beforetemp);
            MultiIMHSV.push_back(beforetemp);
            tempHSV.release();
            beforetemp.release();
            imHSV.release();
            
            Mat temp;
            for (unsigned int i = 0; i < MultiHSVTemplate.size(); i++) {
                HSVInterp(MultiIMHSV[i], MultiHSVTemplate[i], temp);
                MultiHSVInterpolated.at(i) = temp;
            }
         }
    }
    else if (use_hist == 3){ // compute three regions and overlap
        if ((crop.x < 0) || (crop.x+crop.width > img_prev.cols) || (crop.y < 0) || (crop.y+crop.height > img_prev.rows) ) {
            Mat HSVTemp;
            for (unsigned int i = 0; i < MultiHSVTemplate.size(); i++) {
                HSVInterp(MultiHSVTemplate[i], MultiHSVTemplate[i], HSVTemp);
                MultiHSVInterpolated.at(i) = HSVTemp;
            }
        }
        else
        {
            Mat imHSV = img_prev(crop);
            vector<Mat> MultiIMHSV(3);
             Mat tempHSV;Mat beforetemp;
             int third = imHSV.rows/3;
            int half = imHSV.rows/2;
            tempHSV = imHSV.rowRange(0, half);
            computeHSVHist(tempHSV, beforetemp);
            normalize(beforetemp, beforetemp, 0, beforetemp.rows, NORM_MINMAX, -1, Mat() );
            
            MultiIMHSV.at(0) = beforetemp;
            tempHSV = imHSV.rowRange(third,half+third);
            computeHSVHist(tempHSV, beforetemp);
            normalize(beforetemp, beforetemp, 0, beforetemp.rows, NORM_MINMAX, -1, Mat() );
            
            MultiIMHSV.at(1) = beforetemp;
            tempHSV = imHSV.rowRange(half, imHSV.rows);
            computeHSVHist(tempHSV, beforetemp);
            normalize(beforetemp, beforetemp, 0, beforetemp.rows, NORM_MINMAX, -1, Mat() );
            
            MultiIMHSV.at(2) = beforetemp;
            Mat temp;
            for (unsigned int i = 0; i < MultiHSVTemplate.size(); i++) {
                HSVInterp(MultiIMHSV[i], MultiHSVTemplate[i], temp);
                MultiHSVInterpolated.at(i) = temp;
            }
        }
    }

    measurement(particles_new, detection,HSVinterpolated, MultiHSVInterpolated, img, use_hist, frame_num);
    vector<Mat>().swap(MultiHSVInterpolated);
    // normalize weight
    double w_sum = 0.0;
    for (unsigned int i = 0; i < numParticles; i++) {
        w_sum += weight.at(i);
    }
    // check if all particles are invalid
    if (w_sum == 0.0) {
        cerr << "[Warning] none of the particles is valid. "
        << "the particle filter is reset." << endl;
        initialization(detection);
        return false;
    }
    vector<double> normalized_weight(numParticles,0.0);
    for (unsigned int i = 0; i < numParticles; i++) {
        normalized_weight[i] = weight.at(i)/w_sum;
    }
    //weighted mean pos
    double xpos = 0.0, ypos = 0.0, xvel = 0.0, yvel = 0.0;
    for (unsigned int i = 0; i < numParticles; i++) {
        particles_temp[i][X_POS] = particles_new[i][X_POS] * normalized_weight.at(i);
        particles_temp[i][Y_POS] = particles_new[i][Y_POS] * normalized_weight.at(i);
        particles_temp[i][X_VEL] = particles_new[i][X_VEL] * normalized_weight.at(i);
        particles_temp[i][Y_VEL] = particles_new[i][Y_VEL] * normalized_weight.at(i);
        
        xpos += particles_temp[i][X_POS];
        ypos += particles_temp[i][Y_POS];
        xvel += particles_temp[i][X_VEL];
        yvel += particles_temp[i][Y_VEL];
    }
    detectionprev = detection;
    
    // Array PFTrackSeries will store tracker position from last 4 frames
    for (int pf =  (int)(PFTrackSeries.size()-1); pf >= 1; pf--) {
        PFTrackSeries[pf] = PFTrackSeries[pf-1];
    }
    PFTrackSeries[0] = PFTrack;
    // Assign tracker positiont to the next frame
    PFTrackPrev = PFTrack;
    PFTrack.x = xpos;
    PFTrack.y = ypos;
    // Calculate tracker velocity from position
    PFTrackVel = PFTrack - PFTrackPrev;
    
    // resample particles
    vector<int> indx(numParticles,0);
    for (unsigned int i = 0; i < numParticles; i++) {
        indx.at(i) = i;
    }
    systematicResampling(normalized_weight, indx);
    for (unsigned int i =0; i < numParticles; i++) {
        int id = indx[i];
        for (int j = 0; j < NUM_STATES; j++) {
            particles[i][j] = particles_new[id][j];
        }
    }

    return true;
}
Пример #28
0
MAIN_QTHREAD(int, argc, char **, argv) { // macro to enable multithreaded gui
#else
  int main(int argc, char **argv) { // regular main without gui
#endif
    try {
      // check input parameters
      if ((argc != 2) && (argc != 3) ) {
	cerr << "warning: wrong number of parameters." << endl;
	cerr << "usage: detect <config file> [directory or file]" << endl;
	//	return -1;
      }
#ifdef __LINUX__
      feenableexcept(FE_DIVBYZERO | FE_INVALID); // enable float exceptions
#endif
      // load configuration
      configuration	conf(argv[1], true, true, false);
      if (conf.exists_true("fixed_randomization"))
	cout << "Using fixed seed: " << fixed_init_drand() << endl;
      else
	cout << "Using random seed: " << dynamic_init_drand(argc, argv) << endl;
      if (!conf.exists("root2") || !conf.exists("current_dir")) {
	string dir;
	dir << dirname(argv[1]) << "/";
	cout << "Looking for trained files in: " << dir << endl;
	conf.set("root2", dir.c_str());
	conf.set("current_dir", dir.c_str());
      }
      conf.set("run_type", "detect"); // tell conf that we are in detect mode
      conf.resolve(); // manual call to resolving variable
      bool              silent        = conf.exists_true("silent");
      if (conf.exists_true("show_conf") && !silent) conf.pretty();
      // output synchronization
      bool sync = conf.exists_true("sync_outputs");
      mutex out_mutex;
      mutex_ostream mutout(std::cout, &out_mutex, "Thread M");
      mutex_ostream muterr(std::cerr, &out_mutex, "Thread M");
      ostream &mout = sync ? mutout : cout;
      ostream &merr = sync ? muterr : cerr;
      bootstrapping<t_net> boot(conf);
      // output dir
      string outdir = detection_thread<t_net>::get_output_directory(conf);
      mout << "Saving outputs to " << outdir << endl;
      // save conf to output dir
      string cname = outdir;
      cname << filename(argv[1]);
      if (conf.write(cname.c_str()))
	mout << "Wrote configuration to " << cname << endl;
      // load classes of network
      idx<ubyte> classes(1,1);
      vector<string> sclasses;
      try { // try loading classes names but do not stop upon failure
	load_matrix<ubyte>(classes, conf.get_cstring("classes"));
      } catch(string &err) { merr << "warning: " << err << endl; }
      sclasses = ubyteidx_to_stringvector(classes);
      t_bbox_saving bbsaving = bbox_none;
      if (conf.exists("bbox_saving"))
	bbsaving = (t_bbox_saving) conf.get_int("bbox_saving");
      bboxes boxes(bbsaving, &outdir, mout, merr);

      uint              ipp_cores     = 1;
      if (conf.exists("ipp_cores")) ipp_cores = conf.get_uint("ipp_cores");
      ipp_init(ipp_cores); // limit IPP (if available) to 1 core
      bool		save_video    = conf.exists_true("save_video");
      bool              save_detections = conf.exists_true("save_detections");
      int		height        = -1;
      int		width         = -1;
      if (conf.exists("input_height")) height = conf.get_int("input_height");
      if (conf.exists("input_width")) width = conf.get_int("input_width");
      bool              input_random  = conf.exists_true("input_random");
      uint              npasses       = 1;
      char              next_on_key   = 0;
      if (conf.exists("next_on_key")) {
	next_on_key = conf.get_char("next_on_key");
	mout << "Press " << next_on_key << " to process next frame." << endl;
      }
      uint skip_frames = conf.try_get_uint("skip_frames", 0);
      if (conf.exists("input_npasses"))
	npasses = conf.get_uint("input_npasses");
      string viddir;
      if (save_video) {
	viddir << outdir << "video/";
	mkdir_full(viddir);
      }
      bool precomputed_boxes = conf.exists("bbox_file");
      uint save_bbox_period = conf.try_get_uint("save_bbox_period", 500);
      idxdim crop(1, 1, 1);
      if (conf.exists("input_crop"))
	crop = string_to_idxdim(conf.get_string("input_crop"));

      string		cam_type;
#ifdef __LINUX__ // default camera for linux if not defined
      cam_type = "v4l2";
#endif
      if (conf.exists("camera"))
	cam_type = conf.get_string("camera");

      // allocate threads
      uint nthreads = 1;
      bool updated = false;
      idx<ubyte> detframe; // frame returned by detection thread
      uint frame_id = 0;
      svector<midx<t_net> > all_samples, samples; // extracted samples
      bboxes all_bbsamples, bbsamples; // boxes corresponding to samples
      if (conf.exists("nthreads"))
	nthreads = (std::max)((uint) 1, conf.get_uint("nthreads"));
      list<detection_thread<t_net>*>  threads;
      list<detection_thread<t_net>*>::iterator ithreads;
      idx<uint> total_saved(nthreads);
      idx_clear(total_saved);
      mout << "Initializing " << nthreads << " detection threads." << endl;
      for (uint i = 0; i < nthreads; ++i) {
	detection_thread<t_net> *dt =
	  new detection_thread<t_net>(conf, &out_mutex, NULL, NULL, sync);
	threads.push_back(dt);
	dt->start();
      }
      // image search can be configured with a search pattern
      const char *fpattern = IMAGE_PATTERN_MAT;
      if (conf.exists("file_pattern"))
	fpattern = conf.get_cstring("file_pattern");

      // initialize camera (opencv, directory, shmem or video)
      idx<ubyte> frame(std::max(height, 1), std::max(width, 1), 3);
      camera<ubyte> *cam = NULL, *cam2 = NULL;
      if (!strcmp(cam_type.c_str(), "directory")) {
	string dir;
	if (argc >= 3) // read input dir from command line
	  dir = argv[2];
	else if (conf.exists("input_dir"))
	  dir = conf.get_string("input_dir");
	// given list
	list<string> files;
	if (conf.exists("input_list")) {
	  files = string_to_stringlist(conf.get_string("input_list"));
	  cam = new camera_directory<ubyte>(dir.c_str(), height, width,
					    input_random, npasses, mout, merr,
					    fpattern, &files);
	} else // given directory only
	  cam = new camera_directory<ubyte>(dir.c_str(), height, width,
					    input_random, npasses, mout, merr,
					    fpattern, &files);
      } else if (!strcmp(cam_type.c_str(), "opencv"))
	cam = new camera_opencv<ubyte>(-1, height, width);
#ifdef __LINUX__
      else if (!strcmp(cam_type.c_str(), "v4l2"))
	cam = new camera_v4l2<ubyte>(conf.get_cstring("device"),
				     height, width,
				     conf.exists_true("camera_grayscale"),
                                     conf.exists_true("camera_rgb"));
      else if (!strcmp(cam_type.c_str(), "mac"))
	cam = new camera_mac<ubyte>(conf.get_cstring("device"),
				     height, width,
				     conf.exists_true("camera_grayscale"),
                                     conf.exists_true("camera_rgb"));
      else if (!strcmp(cam_type.c_str(), "mcams")) {
        vector<string> devices = conf.get_all_strings("device");
	cam = new camera_mcams<ubyte>(conf, devices, height, width,
                                      conf.exists_true("camera_grayscale"),
                                      conf.exists_true("camera_rgb"));
      }
#endif
#ifdef __KINECT__
      else if (!strcmp(cam_type.c_str(), "kinect"))
	cam = new camera_kinect<ubyte>(height, width);
#endif
      else if (!strcmp(cam_type.c_str(), "shmem"))
	cam = new camera_shmem<ubyte>("shared-mem", height, width);
      else if (!strcmp(cam_type.c_str(), "video")) {
	if (argc >= 3)
	  cam = new camera_video<ubyte>
	    (argv[2], height, width, conf.get_uint("input_video_sstep"),
	     conf.get_uint("input_video_max_duration"));
	else eblerror("expected 2nd argument");
      } else if (!strcmp(cam_type.c_str(), "datasource")) {
        cam = new camera_datasource<ubyte,int>(conf);
      } else eblerror("unknown camera type, set \"camera\" in your .conf");
      // a camera directory may be used first, then switching to regular cam
      if (conf.exists_true("precamera"))
	cam2 = new camera_directory<ubyte>(conf.get_cstring("precamdir"),
					   height, width, input_random,
					   npasses, mout, merr, fpattern);
      if (conf.exists_true("camera_grayscale")) cam->set_grayscale();
      if (conf.exists_true("silent")) cam->set_silent();

      // answer variables & initializations
      bboxes bb;

      // gui
#ifdef __GUI__
      bool              bkey_msg      = false; // display key message
      bool display	     = conf.exists_bool("display");
      bool show_parts        = conf.exists_true("show_parts");
      bool bbox_show_conf = !conf.exists_false("bbox_show_conf");
      bool bbox_show_class = !conf.exists_false("bbox_show_class");
      // mindisplay     = conf.exists_bool("minimal_display");
      uint display_sleep  = 0;
      if (conf.exists("display_sleep"))
	display_sleep = conf.get_uint("display_sleep");
      // display_states = conf.exists_bool("display_states");
      // uint qstep1 = 0, qheight1 = 0, qwidth1 = 0,
      // 	qheight2 = 0, qwidth2 = 0, qstep2 = 0;
      // if (conf.exists_bool("queue1")) {
      // 	qstep1 = conf.get_uint("qstep1");
      // 	qheight1 = conf.get_uint("qheight1");
      // 	qwidth1 = conf.get_uint("qwidth1"); }
      // if (conf.exists_bool("queue2")) {
      // 	qstep2 = conf.get_uint("qstep2");
      // 	qheight2 = conf.get_uint("qheight2");
      // 	qwidth2 = conf.get_uint("qwidth2"); }
      // wid_states  = display_states ? new_window("network states"):0;
      // night_mode();
      uint wid  = display ? new_window("eblearn object recognition") : 0;
      night_mode();
      float display_transp = 0.0;
      if (conf.exists("display_bb_transparency"))
	display_transp = conf.get_float("display_bb_transparency");
      detector_gui<t_net> dgui(conf.exists_true("show_extracted"));
#endif
      // timing variables
      timer tpass, toverall, tstop;
      uint cnt = 0;
      bool stop = false, finished = false;

      // loop
      toverall.start();
      while (!finished) {
	// check for results and send new image for each thread
	uint i = 0;
	finished = true;
	for (ithreads = threads.begin();
	     ithreads != threads.end(); ++ithreads, ++i) {
	  // do nothing if thread is finished already
	  if ((*ithreads)->finished()) continue ;
	  finished = false; // a thread is not finished
	  string processed_fname;
	  uint processed_id = 0;
	  // retrieve new data if present
	  bool skipped = false;
	  updated = (*ithreads)->get_data
	    (bb, detframe, *(total_saved.idx_ptr() + i), processed_fname,
	     &processed_id, &samples, &bbsamples, &skipped);
	  if (skipped) cnt++; // a new skipped frame was received
	  // save bounding boxes
	  if (updated) {
	    idxdim d(detframe);
	    if (boot.activated()) bb.clear();
	    if (bbsaving != bbox_none) {
              if (!silent)
                mout << "Adding " << bb.size() << " boxes into new group: "
                     << processed_fname << " with id " << processed_id << endl;
	      boxes.new_group(d, &processed_fname, processed_id);
	      boxes.add(bb, d, &processed_fname, processed_id);
	      if (cnt % save_bbox_period == 0) boxes.save();
	      // avoid sample accumulation if not using bootstrapping
	      if (boot.activated())
		mout << "Received " << samples.size()
		     << " bootstrapping samples." << endl;
	    }
	    if (conf.exists_true("bootstrapping_save")) {
	      all_samples.push_back_new(samples);
	      all_bbsamples.push_back_new(bbsamples);
	    }
            // datasource mode, check and log answers
            if (dynamic_cast<camera_datasource<ubyte,int>*>(cam)) {
              camera_datasource<ubyte,int>* dscam =
                  (camera_datasource<ubyte,int>*) cam;
              dscam->log_answers(bb);
            }
	    cnt++;
	    // display processed frame
#ifdef __GUI__
	    if (display) {
	      select_window(wid);
	      disable_window_updates();
	      clear_resize_window();
	      set_window_title(processed_fname.c_str());
	      uint h = 0, w = 0;
	      // display frame with resulting boxes
	      dgui.display_minimal
		(detframe, bb, ((*ithreads)->pdetect ?
				(*ithreads)->pdetect->get_labels() : sclasses),
		 h, w, 1, 0, 255, wid, show_parts, display_transp,
		 bbox_show_class, bbox_show_conf, &bbsamples);
	      // display extracted samples
	      if (boot.activated()) {
		dgui.display_preprocessed
		  (samples, bbsamples, ((*ithreads)->pdetect ?
					(*ithreads)->pdetect->get_labels() : sclasses),
		   h, w, 1, -1, 1);
	      }
	      enable_window_updates();
	      if (save_video && display) {
		string fname;
		fname << viddir << processed_fname;
		save_window(fname.c_str());
		if (!silent) mout << "saved " << fname << endl;
	      }
	    }
	    // sleep display
	    if (display_sleep > 0) {
	      mout << "sleeping for " << display_sleep << "ms." << endl;
	      millisleep(display_sleep);
	    }
#endif
            if (!silent) {
              // output info
              uint k = cnt, tot = cam->size() - cnt; // progress variables
              if (conf.exists("save_max")) tot = conf.get_uint("save_max");
              if (!silent) {
                if (save_detections) {
                  mout << "total_saved=" << idx_sum(total_saved);
                  if (conf.exists("save_max")) mout << " / " << tot;
                  mout << endl;
                }
              }
              if (boot.activated())
                mout << "total_bootstrapping=" << all_samples.size() << endl;
              mout << "remaining=" << (cam->size() - cnt)
                   << " elapsed=" << toverall.elapsed();
              if (cam->size() > 0)
                mout << " ETA=" << toverall.eta(cnt, cam->size());
              if (conf.exists("save_max") && save_detections) {
                k = idx_sum(total_saved);
                mout << " save_max_ETA=" << toverall.eta(k, tot);
              }
              mout << endl;
              mout << "i=" << cnt << " processing: " << tpass.elapsed_ms()
                   << " fps: " << cam->fps() << endl;
              // save progress
              if (!conf.exists_false("save_progress"))
                job::write_progress(k, tot);
            }
	  }
	  // check if ready
	  if ((*ithreads)->available()) {
	    if (stop)
	      (*ithreads)->ask_stop(); // stop but let thread finish
	    else {
	      // grab a new frame if available
	      if (cam->empty()) {
		stop = true;
		tstop.start(); // start countdown timer
		(*ithreads)->ask_stop(); // ask this thread to stop
		millisleep(50);
	      } else {
#ifdef __GUI__
		int key = gui.pop_key_pressed();
		// if thread has already received data, wait for next key
		if ((*ithreads)->fed() && next_on_key) {
		  if ((int)next_on_key != key && (int)next_on_key != key + 32) {
		    if (!bkey_msg)
		      mout << "Press " << next_on_key
			   << " to process next frame." << endl;
		    bkey_msg = true;
		    continue ; // pause until key is pressed
		  } else {
		    mout << "Key pressed (" << key
			 << ") allowing next frame to process." << endl;
		    bkey_msg = false;
		    tpass.restart();
		  }
		}
#endif
		bool frame_grabbed = false;
		frame_id = cam->frame_id();
		// if the pre-camera is defined use it until empty
		if (cam2 && !cam2->empty())
		  frame = cam2->grab();
		else { // empty pre-camera, use regular camera
		  if (skip_frames > 0)
		    cam->skip(skip_frames); // skip frames if skip_frames > 0
		  if (cam->empty()) continue ;
		  if (precomputed_boxes && !save_video)
		    cam->next(); // move to next frame but without grabbing
		  else if (dynamic_cast<camera_directory<ubyte>*>(cam)) {
		    cam->grab_filename(); // just get the filename, no data
		  } else { // actually grab the frame
		    frame = cam->grab();
		    frame_grabbed = true;
		    // cropping
		    if (crop.nelements() > crop.order()) {
		      cout << "cropping frame from " << frame;
		      for (uint i = 0; i < crop.order(); ++i)
			if (crop.dim(i) > 1)
			  frame = frame.narrow(i, crop.dim(i), 0);
		      cout << " to " << frame << endl;
		    }
		  }
		}
		// send new frame to this thread
		string ffname = cam->frame_fullname();
		string fname = cam->frame_name();
		if (frame_grabbed) {
		  while (!(*ithreads)->set_data(frame, ffname, fname, frame_id))
		  millisleep(5);
		} else {
		  while (!(*ithreads)->set_data(ffname, fname, frame_id))
		    millisleep(5);
		}
		// we just sent a new frame
		tpass.restart();
	      }
	    }
	  }
	}
	if ((conf.exists("save_max") && !stop &&
	     idx_sum(total_saved) > conf.get_uint("save_max"))
	    || (boot.activated()
		&& (intg) all_samples.size() > boot.max_size())) {
	  mout << "Reached max number of detections, exiting." << endl;
	  stop = true; // limit number of detection saves
	  tstop.start(); // start countdown timer
	}
	// sleep a bit between each iteration
	millisleep(5);
	// check if stop countdown reached 0
	if (stop && tstop.elapsed_minutes() >= 20) {
	  cerr << "threads did not all return 20 min after request, stopping"
	       << endl;
	  break ; // program too long to stop, force exit
	}
      }
      // saving boxes
      if (bbsaving != bbox_none) boxes.save();
      mout << "Execution time: " << toverall.elapsed() << endl;
      if (save_video)
	cam->stop_recording(conf.exists_bool("use_original_fps") ?
			    cam->fps() : conf.get_uint("save_video_fps"),
			    outdir.c_str());
      // saving bootstrapping
      if (conf.exists_true("bootstrapping_save") && boot.activated())
	boot.save_dataset(all_samples, all_bbsamples, outdir, classes);
      // free variables
      if (cam) delete cam;
      for (ithreads = threads.begin(); ithreads != threads.end(); ++ithreads) {
	if (!(*ithreads)->finished())
	  (*ithreads)->stop(); // stop thread without waiting
	delete *ithreads;
      }
#ifdef __GUI__
      if (!conf.exists_true("no_gui_quit") && !conf.exists("next_on_key")) {
	mout << "Closing windows..." << endl;
	quit_gui(); // close all windows
	mout << "Windows closed." << endl;
      }
#endif
      job::write_finished(); // declare job finished
      mout << "Detection finished." << endl;
      // evaluation of bbox
      if (conf.exists_true("evaluate") && conf.exists("evaluate_cmd")) {
	string cmd;
	cmd << "cd " << outdir << " && " << conf.get_string("evaluate_cmd");
	int res = std::system(cmd.c_str());
	if (res != 0)
	  cerr << "bbox evaluation failed with command " << cmd << endl;
      }
    } eblcatcherror();
  return 0;
}