Example #1
0
void VideoPlayback::decodeVideoFrame()
{
    try {
        unique_lock<mutex> lock(m_access);
        while (m_threadRunning) {
            if (m_videoFrames.size() == 0) {
                m_condition.wait(lock);
                continue;
            }

            EncodedVideoFrame encodedFrame = m_videoFrames.front();
            m_videoFrames.pop_front();
            unsigned int size = m_videoFrames.size();
            lock.unlock();

            // decode the frame
            vector<char> buf(encodedFrame.data.get(), encodedFrame.data.get() + encodedFrame.dataLength);
            cv::Mat img(buf);
            cv::Mat frame = imdecode(img, -1);

            // display the decoded frame
            try {
                imshow(m_windowName, frame);
            }
            catch (std::exception& e) {
                cout << "VideoPlayback: Imshow Exception caught:" << endl << e.what() << endl;
            }
            cv::waitKey(1);
            lock.lock();
        }
    }
    catch (thread_interrupted&) {
    }
}
Example #2
0
//JNIEXPORT jbyteArray JNICALL Java_com_ocr_jni_Mser_detectIdNum
JNIEXPORT jbyteArray JNICALL Java_org_smirkcat_plateocr_Mser_detectIdNum
(JNIEnv *env, jobject obj, jbyteArray image){
	jboolean isCopy = JNI_FALSE;
	int size = env->GetArrayLength(image);
	jbyte* imagebuffer = env->GetByteArrayElements(image, &isCopy);
	if (NULL == imagebuffer)
	{
		return NULL;
	}
	vector<uchar> outputdata;
	Mser fun;
	Mat src = imdecode(Mat(1, size, CV_8U, imagebuffer), IMREAD_COLOR);
	Mat dst = fun.detectNumber(src);

	if (!dst.data)
		return NULL;

	env->ReleaseByteArrayElements(image, imagebuffer, 0);
	imencode(".bmp", dst, outputdata);

	jbyteArray outarray = env->NewByteArray(outputdata.size());
	env->SetByteArrayRegion(outarray, 0, outputdata.size(), (jbyte*)&outputdata[0]);

	return outarray;
}
/**
 * @brief FacialModule::onScoreRequest
 * On charge le modèle contenue dans le filename
 * On enregistre l'image de la personne dans une Mat
 * On tente la reconnaissance
 * @param p
 */
void FacialModule::onScoreRequest(Packet *p) {

    ScoreResultPacket pReturn(p);
    ScoringVector *score = new ScoringVector();

    FacialUtils::loadFaceRecognizer(_faceRecognizer, FACERECO);

    vector<uint8> img(p->getData(), p->getData() + p->getBodySize());

    Mat m = imdecode(img, CV_LOAD_IMAGE_GRAYSCALE);

    //On recupère la prediction;
    double confidence = 0.0;
    int predictedLabel = -1;

    try {
        _faceRecognizer->predict(m, predictedLabel, confidence);
    } catch(Exception) {
        *this << WARNING << "Impossible de prédire un score" << endl;
        score->push_back(Score("Inconnu", confidence));
    }

    map<int, string> names = FacialUtils::reloadFromCSVFile(FICHIER);

    if(predictedLabel != -1) {
        score->push_back(Score(names[predictedLabel], confidence));
        //*this << INFO << names[predictedLabel] << confidence << endl;
    }

    pReturn.setScoringVector(score)->doSend();
}
Example #4
0
Mat JpegImage::toOpenCvMat(Image& image)
{
	// image.getWidth(), image.getHeight()
	vector<char> imageData = image.getImageData();
	//Mat buf(Size(480, 720), CV_8U, &imageData[0]);
	Mat buf(Size(image.getWidth(), image.getHeight()), CV_8U, &imageData[0]);
	Mat matImage = imdecode(buf, CV_LOAD_IMAGE_COLOR);
	return matImage;
}
Example #5
0
  MatPtr Util::imageToMat(const char *data, int len) {
    if (!data || len <= 0) {
      return nullptr;
    }

    std::vector<char> vec(data, data + len);
    if (vec.size() == 0) {
      return nullptr;
    }

    auto mat = MatPtr(new cv::Mat);
    imdecode(cv::Mat(vec), CV_LOAD_IMAGE_COLOR, mat.get());
    return mat;
  }
Example #6
0
/**
 * Image file reader.
 *
 * @FUTURE: RAW format reader.
 */
void DigitalCameraCapture::readFrameFromFile(CameraFile * file, OutputArray outputFrame)

{
    // FUTURE: OpenCV cannot read RAW files right now.
    const char * data;
    unsigned long int size;
    CR(gp_file_get_data_and_size(file, &data, &size));
    if (size > 0)
    {
        Mat buf = Mat(1, size, CV_8UC1, (void *) data);
        if(!buf.empty())
        {
            frame = imdecode(buf, CV_LOAD_IMAGE_UNCHANGED);
        }
        frame.copyTo(outputFrame);
    }
}
u_int32_t ImageLoader::loadImage(unsigned i_imgSize, char *p_imgData, Mat &img)
{
    vector<char> imgData(i_imgSize);
    memcpy(imgData.data(), p_imgData, i_imgSize);

    try
    {
        img = imdecode(imgData, CV_LOAD_IMAGE_GRAYSCALE);
    }
    catch (cv::Exception& e) // The decoding of an image can raise an exception.
    {
        const char* err_msg = e.what();
        cout << "Exception caught: " << err_msg << endl;
        return IMAGE_NOT_DECODED;
    }

    if (!img.data)
    {
        cout << "Error reading the image." << std::endl;
        return IMAGE_NOT_DECODED;
    }

    unsigned i_imgWidth = img.cols;
    unsigned i_imgHeight = img.rows;


    if (i_imgWidth > 1000
        || i_imgHeight > 1000)
    {
        cout << "Image too large." << endl;
        return IMAGE_SIZE_TOO_BIG;
    }

#if 1
    if (i_imgWidth < 200
        || i_imgHeight < 200)
    {
        cout << "Image too small." << endl;
        return IMAGE_SIZE_TOO_SMALL;
    }
#endif

    return OK;
}
/**
 * @brief FacialModule::onTrainRequest
 * On cherche le dernier label du fichier csv
 * On vérifie que le nom envoyé n'est pas contenu dans le csv
 *
 * 2 - Absence de la personne
 * --- on ouvre le fichier csv en écriture et on enregiste la personne
 * --- sous la forme "dernierlabel;nom". On entraine ensuite le modele
 * --- et on met à jour les informations contenues dans le fichier csv
 *
 * 1 - Présence de la personne
 * --- on met juste à jour le modele en donnant l'image de personne
 * --- et son label associé
 *
 * @param p
 */
void FacialModule::onTrainRequest(Packet *p) {

    *this << DEBUG << "Train Request" << endl;

    *this << DEBUG << "Loading FaceRecognizer..." << endl;
    FacialUtils::loadFaceRecognizer(_faceRecognizer, FACERECO);
    *this << DEBUG << "FaceRecognizer loaded" << endl;
    TrainRequestPacket trp(p);
    string name = trp.getPerson()->getId(); //nom de la personne
    vector<uint8> img(trp.getTrainData(), trp.getTrainData() + trp.getTrainDataSize());

    char a[50];

    Mat m = imdecode(img, CV_LOAD_IMAGE_GRAYSCALE);

    int labelName = FacialUtils::labelFromName(FICHIER, name);

    vector<Mat> newImage;
    vector<int> newLabel;

    newImage.push_back(m);

    if(labelName == -1) {
        labelName = FacialUtils::lastClassLabel(FICHIER);
        FacialUtils::newUser(FICHIER, labelName, name);
    }

    newLabel.push_back(labelName);

    _faceRecognizer->update(newImage, newLabel);

    *this << DEBUG << "Train Request complete" << endl;

    _faceRecognizer->save(FACERECO);

    TrainResultPacket pReturn(p);
    pReturn.doSend();

}
Example #9
0
File: Job.cpp Project: sijp/NASH
		/*
		 * downloads the image from the manager' from the urlDownload
		 */
		bool Job::download(Employee &employee, HttpLineInterperter *resConfiguration)
		{
			cout<<"Resource is:"<<resConfiguration->getResource()<<endl;
			string downloadRequest = "GET /photos/" +
									resConfiguration->getResource() +
									"?rep=" + this->repDownload +
									" HTTP/1.1";
			employee.send(downloadRequest);
			employee.send("Host: "+employee.getHost()+"\n");
			
			HttpLineInterperter downResponsetInterperter;
			string s;
			while(employee.getFrameAscii(s) && s.size()>1)
			{
				downResponsetInterperter.insertLine(s);
				s.clear();
			}
			//if we succeded in downloading the image
			if(downResponsetInterperter.getStatus() == "200 OK")
			{
				//init a new byte array in the size of the content length
				int byteLength = downResponsetInterperter.getContentLength();
				uchar* dataByte = new uchar[byteLength];
				employee.getBytes(dataByte , byteLength);
				//dataBytes holds the bytes for the image
				this->mimeType = downResponsetInterperter.getContentType();
				//cout<<dataByte<<endl;
				vector<uchar> vecByte(dataByte , dataByte + byteLength/sizeof(uchar));
				//-1 as a flag means we get the image as is.
				this->image=imdecode(Mat(vecByte) , -1);
				
				cout<<"BLJKADSLVGJASDLFAS"<<endl;

				this->gP.setImage(&(this->image));
				delete [] dataByte;
				return true;
			}
			return false;
		}
Example #10
0
void MainWindow::decrypt()
{
    FILE *f = fopen(filename.c_str(), "rb");
    if (!f) {
        QMessageBox::critical(0, "Error", "No image file is loaded!!");
        return;
    }

    isEncryption = false;

    QString text = QInputDialog::getText(this, "Password",
                     "Please enter your password for decryption",
                      QLineEdit::Password, QString());

    string pwd = string((const char *)text.toLocal8Bit());
    MD5 md5(pwd);
    key = md5.getDigest();

    AES aes(key);

    unsigned char info[16] = "";
    fseek(f, -16, SEEK_END);
    fread(info, 1, 16, f);

    // verify key and info
    aes.InvCipher(info);
    if (memcmp(info+8, "SEAcret", 7)) {
        QMessageBox::critical(0, "Error", "Incorrect password or there is no secret in the image");
        return;
    }

    int buf_size[2];
    memcpy(buf_size, info, sizeof(int)*2);
    fseek(f, -16-buf_size[0]-buf_size[1], SEEK_END);
    unsigned char *temp;

    vector<unsigned char> mask_buf(buf_size[0]);
    vector<unsigned char> secret_buf(buf_size[1]);
    fread(&mask_buf[0], 1, buf_size[0], f);
    fread(&secret_buf[0], 1, buf_size[1], f);
    fclose(f);

    for (int i = 0; i < buf_size[0]; i+=16) {
        temp = &mask_buf[0]+i;
        aes.InvCipher(temp);
    }

    Mat mask = imdecode((Mat)mask_buf, CV_LOAD_IMAGE_GRAYSCALE);
    mask = mask > 128;

    for (int i = 0; i < buf_size[1]; i+=16) {
        temp = &secret_buf[0]+i;
        aes.InvCipher(temp);
    }

    Mat secret = imdecode((Mat)secret_buf, 1);

    _dst = imread(filename, 1);
    secret.copyTo(_dst, mask);

    imshow("Result", _dst);
    waitKey(0);
    destroyAllWindows();
}
Example #11
0
EasyOcr::EasyOcr(std::vector<char> &imgData) {
  this->mat = imdecode(cv::Mat(imgData), CV_LOAD_IMAGE_COLOR);
  this->result = "";
}
Example #12
0
		int analyse(const char* filename, const char* outFile){
			Mat img = cv_imread(filename), imgG, imgTmp;
			if(!img.data){
				//cerr << "invalid image" << endl;
				throw "[ghosts] invalid image";
			}
			cvtColor(img, imgG, CV_BGR2GRAY);
			int bCntX = (img.cols - w ) / bSize, bCntY = (img.rows - w) / bSize;
			int bCnt = bCntX * bCntY;
			int bCntR = bCnt;
			int res = 0;
			vector<vector<vector<double> > > bGhosts(bCnt);
			vector<vector<vector<double> > > bInv(bCnt);
			vector<double> bPredict(bCnt);
			for(int i = 0; i < bCnt; i++){
				bGhosts[i].resize(w * w);
				for(int j = 0; j < w * w; j++)
					bGhosts[i][j].resize(101);
			}
			double elaAv = 0.0;
			for(int q = min(qNorm, qMin); q <= qMax; q++){
				if(!(q == qNorm || (q >= qMin && q <= qMax)))
					continue;
				for(int j = 0; j < w * w; j++){
					Rect curRoi = Rect(j % w, j / w, img.cols - j % w, img.rows - j / w);
					vector<int> compParams;
					vector<uchar> buff;
					compParams.push_back(CV_IMWRITE_JPEG_QUALITY);
					compParams.push_back(q);
					imencode(".jpg", imgG(curRoi), buff, compParams); 
					imgTmp = imdecode(buff, 0);
					for(int i = 0; i < bCnt; i++){
						bGhosts[i][j][q] = calcDiff(imgG(curRoi), imgTmp, bCoordX, bCoordY, bCoordX + bSize, bCoordY + bSize);
						if(q == qNorm && j == 1)
							elaAv += bGhosts[i][j][q];
					}
				}
				fprintf(stderr, "\r%d", q);
			}
			elaAv /= bCnt;
			for(int i = 0; i < bCnt; i++){
				vector<double> predictArgsV;
				predictArgsV.push_back(bCoordX);
				predictArgsV.push_back(bCoordY);
				predictArgsV.push_back(elaAv);
				predictArgsV.push_back(bGhosts[i][1][qNorm]);
				bPredict[i] = predictF(imgG, predictArgsV);
				if(bPredict[i] == 1)
					bCntR--;
			}
			for(int i = 0; i < bCnt; i++)
				for(int j = 0; j < w * w; j++)
					bGhosts[i][j] = nrm(bGhosts[i][j]);
			if(1){
				for(int i = 0; i < bCnt; i++){
					bInv[i].resize(101);
					if(bPredict[i] == 1)
						continue;
					vector<double> m1V(101), m2V(101);
					for(int q = qMin; q <= qMax; q++){
						for(int j = 1; j < w * w; j++) // -- j = 1
							m1V[q] += bGhosts[i][j][q];
						m1V[q] /= w * w - 1;
					}
					for(int q = qMin; q <= qMax; q++){
						for(int j = 1; j < w * w; j++) // -- j = 1
							m2V[q] += bGhosts[i][j][q] * bGhosts[i][j][q];
						m2V[q] = sqrt(m2V[q] / (w * w - 1) - m1V[q] * m1V[q]);
					}
					for(int q = qMin; q <= qMax; q++){
						double maxD = -1, minD = 2;
						for(int j = 1; j < w * w; j++){ // -- j = 1
							if(bGhosts[i][j][q] < minD)
								minD = bGhosts[i][j][q];	
							if(bGhosts[i][j][q] > maxD)
								maxD = bGhosts[i][j][q];
						}
						double dlt = max((maxD - minD) / 3.0, 0.01);
						double dlt1 = max((maxD - minD) / 6.0, 0.004);
						if((bGhosts[i][0][q] < minD - dlt || bGhosts[i][0][q] > maxD + dlt)/* && q <= qMax - 3 && q >= qMin + 3*/){
							bInv[i][q].push_back(1);	
						}else
							bInv[i][q].push_back(0);
						if((bGhosts[i][0][q] < minD - dlt1 || bGhosts[i][0][q] > maxD + dlt1)/* && q <= qMax - 3 && q >= qMin + 3*/){
							bInv[i][q].push_back(1);	
						}else
							bInv[i][q].push_back(0);
					}
				}
				vector<double> qMask(101);
				int qMaskCnt = 0;
				for(int q = qMin; q <= qMax; q++){
					for(int i = 0; i < bCnt; i++){
						if(bPredict[i] == 1)
							continue;
						if(bInv[i][q][0] == 1)
							qMask[q]++;
					}
					if(qMask[q] / bCntR > 0.5){
						qMask[q] = 1;
						qMaskCnt++;
					}else
						qMask[q] = 0;
					cerr << qMask[q];
				}
				cerr << endl;
				if(qMaskCnt < 3){
					//cerr << "don't want to analyse" << endl;
					//throw "[ghosts] don't want to analyse";
					return -1;
				}
				for(int i = 0; i < bCnt; i++){
					int clr;
					if(bPredict[i] == 1)
						clr = 0;
					else{
						int tmpI2 = 0;
						for(int q = qMin; q <= qMax; q++)
							if(qMask[q] == 1 && bInv[i][q][1] == 1){
								tmpI2++;
							}
						if((tmpI2 <= 0 && qMaskCnt >= 3) ||
							(tmpI2 <= 1 && qMaskCnt >= 4) ||
							(tmpI2 <= 2 && qMaskCnt >= 5) || 
							(tmpI2 <= 3 && qMaskCnt >= 8))
							tmpI2 = 0;
						clr = (tmpI2 == 0);
					}
					if(clr){
						res++;
						for(int ix = bCoordX; ix < bCoordX + bSize; ix++)
							for(int iy = bCoordY; iy < bCoordY + bSize; iy++)
								img.at<Vec3b>(iy, ix)[2] = 255;
					}
				}
				if(outFile)
					cv_imwrite(outFile, img);
			}
			return res;
		}