Exemple #1
0
//	cv::Mat segment(cv::Mat inputFrame) {
	PyObject *segment(PyObject *_inputFrame) {
		NDArrayConverter cvt;
		cv::Mat inputFrame = cvt.toMat(_inputFrame);

        cv::Mat patch;
        inputFrame(cv::Rect(p0.x, p0.y, p1.x - p0.x, p3.y - p0.y)).copyTo(patch);

		cv::imshow("Video Captured", inputFrame);
		cv::imshow("Patch", patch);

		return cvt.toNDArray(patch);
	}
Exemple #2
0
AlgorithmStatus FrameCutter::process() {
  bool lastFrame = false;

  EXEC_DEBUG("process()");

  // if _streamIndex < _startIndex, we need to advance into the stream until we
  // arrive at _startIndex
  if (_streamIndex < _startIndex) {
    // to make sure we can skip that many, use frameSize (buffer has been resized
    // to be able to accomodate at least that many sample before starting processing)
    int skipSize = _frameSize;
    int howmuch = min(_startIndex - _streamIndex, skipSize);
    _audio.setAcquireSize(howmuch);
    _audio.setReleaseSize(howmuch);
    _frames.setAcquireSize(0);
    _frames.setReleaseSize(0);

    if (acquireData() != OK) return NO_INPUT;

    releaseData();
    _streamIndex += howmuch;

    return OK;
  }

  // need to know whether we have to zero-pad on the left: ie, _startIndex < 0
  int zeropadSize = 0;
  int acquireSize = _frameSize;
  int releaseSize = min(_hopSize, _frameSize); // in case hopsize > framesize
  int available = _audio.available();

  // we need this check anyway because we might be at the very end of the stream and try to acquire 0
  // for our last frame, which will unfortunately work, so just get rid of this case right now
  if (available == 0) return NO_INPUT;

  if (_startIndex < 0) {
    // left zero-padding and only acquire  as much as _frameSize + startIndex tokens and should release zero
    acquireSize = _frameSize + _startIndex;
    releaseSize = 0;
    zeropadSize = -_startIndex;
  }

  // if there are not enough tokens in the stream (howmuch < available):
  if (acquireSize >= available) { // has to be >= in case the size of the audio fits exactly with frameSize & hopSize
    if (!shouldStop()) return NO_INPUT; // not end of stream -> return and wait for more data to come

    acquireSize = available; // need to acquire what's left
    releaseSize = _startIndex >= 0 ? min(available, _hopSize) : 0; // cannot release more tokens than there are available
    if (_startFromZero) {
      if (_lastFrameToEndOfFile) {
        if (_startIndex >= _streamIndex+available) lastFrame = true;
      }
      else lastFrame = true;
    }
    else {
      if (_startIndex + _frameSize/2 >= _streamIndex + available) // center of frame >= end of stream
        lastFrame = true;
    }
  }

  _frames.setAcquireSize(1);
  _frames.setReleaseSize(1);
  _audio.setAcquireSize(acquireSize);
  _audio.setReleaseSize(releaseSize);

  /*
  EXEC_DEBUG("zeropadSize: " << zeropadSize
             << "\tacquireSize: " << acquireSize
             << "\treleaseSize: " << releaseSize
             << "\tavailable: " << available
             << "\tlast frame: " << lastFrame
             << "\tstartIndex: " << _startIndex
             << "\tstreamIndex: " << _streamIndex);
  */

  AlgorithmStatus status = acquireData();
  EXEC_DEBUG("data acquired (audio: " << acquireSize << " - frames: 1)");

  if (status != OK) {
    if (status == NO_INPUT) return NO_INPUT;
    if (status == NO_OUTPUT) return NO_OUTPUT;
    throw EssentiaException("FrameCutter: something weird happened.");
  }

  // some semantic description to not get mixed up between the 2 meanings
  // of a vector<Real> (which acts both as a stream of Real tokens at the
  // input and as a single vector<Real> token at the output)
  typedef vector<AudioSample> Frame;

  // get the audio input and copy it as a frame to the output
  const vector<AudioSample>& audio = _audio.tokens();
  Frame& frame = _frames.firstToken();


  frame.resize(_frameSize);

  // left zero-padding of the frame
  int idxInFrame = 0;
  for (; idxInFrame < zeropadSize; idxInFrame++) {
    frame[idxInFrame] = (Real)0.0;
  }

  fastcopy(frame.begin()+idxInFrame, audio.begin(), acquireSize);
  idxInFrame += acquireSize;

  // check if the idxInFrame is below the threshold (this would only happen
  // for the last frame in the stream) and if so, don't produce data
  if (idxInFrame < _validFrameThreshold) {
    E_INFO("FrameCutter: dropping incomplete frame");

    // release inputs (advance to next frame), but not the output frame (we didn't produce anything)
    _audio.release(_audio.releaseSize());
    return NO_INPUT;
  }

  // right zero-padding on the last frame
  for (; idxInFrame < _frameSize; idxInFrame++) {
    frame[idxInFrame] = (Real)0.0;
  }

  _startIndex += _hopSize;

  if (isSilent(frame)) {
    switch (_silentFrames) {
    case DROP:
      E_INFO("FrameCutter: dropping silent frame");

      // release inputs (advance to next frame), but not the output frame (we didn't produce anything)
      _audio.release(_audio.releaseSize());
      return OK;

    case ADD_NOISE: {
      vector<AudioSample> inputFrame(_frameSize, 0.0);
      fastcopy(&inputFrame[0]+zeropadSize, &frame[0], acquireSize);
      _noiseAdder->input("signal").set(inputFrame);
      _noiseAdder->output("signal").set(frame);
      _noiseAdder->compute();
      break;
    }

    // otherwise, don't do nothing...
    case KEEP:
    default:
      ;
    }
  }

  EXEC_DEBUG("produced frame; releasing");
  releaseData();
  _streamIndex += _audio.releaseSize();

  EXEC_DEBUG("released");

  if (lastFrame) return PASS;

  return OK;
}
int main(int argc, char *argv[]) {
	// Check command arguments
	int c;
	int DEVICE, SEGMENTATION_RATIO, OFFSET, ANGLE, THRESHOLD, RESOLUTION;
	while ((c = getopt(argc, argv, "v:s:o:a:t:r:")) != -1) {
		switch (c) {
			case 'v':
				DEVICE = atoi(optarg);
				break;
			case 's':
				SEGMENTATION_RATIO = atoi(optarg);
				break;
			case 'o':
				OFFSET = atoi(optarg);
				break;
			case 'a':
				ANGLE = atoi(optarg);
				break;
			case 't':
				THRESHOLD = atoi(optarg);
				break;
			case 'r':
				RESOLUTION = atoi(optarg);
				break;
			default: // '?'
				std::cout << "Usage: " << argv[0] << " [-v device] [-s segmentation_ratio] [-o offset] [-a angle] [-t threshold] [-r resolution]" << std::endl;
				return -1;
		}
	}
	if (argc != 13) {
		std::cout << "Usage: " << argv[0] << " [-v device] [-s segmentation_ratio] [-o offset] [-a angle] [-t threshold] [-r resolution]" << std::endl;
		return -1;
	}

	/*ROS*/
	r_init( Road_Detecter);
	r_hdlr( hdl);
	r_newPub( pubRoad, hdl, challenge1::road, Road, 1000);
	ros::Rate loop_rate(10);

	challenge1::road rd;
	/**/

	cv::VideoCapture myVideoCapture(DEVICE);
	if (!myVideoCapture.isOpened()) {
		std::cout << "ERROR: Cannot open device " << DEVICE << std::endl;
		return -1;
	}

	cv::namedWindow("Video Captured");
	cv::namedWindow("Road Detected");

	int imgCenter = myVideoCapture.get(CV_CAP_PROP_FRAME_WIDTH) / 2;
	int preCenter = imgCenter;
	int movingDirection = GO_STRAIGHT;
	//while (1) {
	while( ros::ok()){
		int key = cv::waitKey(1);
		if ((key & 0xFF) == 27) // 'Esc' key
			return 0;

		cv::Mat inputFrame, segmentedInputFrame;
		myVideoCapture >> inputFrame;
		inputFrame(cv::Range(inputFrame.rows - inputFrame.rows / SEGMENTATION_RATIO, inputFrame.rows - 1), cv::Range::all()).copyTo(segmentedInputFrame);

		// Detect road. In general, there are only vertical lines
		cv::Mat road = detectRoad(segmentedInputFrame, OFFSET, ANGLE);

		// Compute the road center
		int curCenter = computeCenter(road);

		// Compute the moving direction
		movingDirection = computeMovingDirection(imgCenter, preCenter, curCenter, movingDirection, THRESHOLD, RESOLUTION);
		std::cout << movingDirection << std::endl;

		cv::imshow("Video Captured", segmentedInputFrame);
		cv::imshow("Road Detected", road);

		/*ROS*/
		/*switch( movingDirection){
			case GO_STRAIGHT:
			case TURN_LEFT:
			case TURN_RIGHT:
			default:
				break;
		}*/
		rd.direction = movingDirection;

		pubRoad.publish( rd);
		ros::spinOnce();
		loop_rate.sleep();
		/*\ROS*/
	}

	return 0;
}
Exemple #4
0
bool NcpSpi::SpiTransactionComplete(uint8_t *aOutputBuf,
                                    uint16_t aOutputLen,
                                    uint8_t *aInputBuf,
                                    uint16_t aInputLen,
                                    uint16_t aTransLen)
{
    // This can be executed from an interrupt context, therefore we cannot
    // use any of OpenThread APIs here. If further processing is needed,
    // returned value `shouldProcess` is set to `true` to indicate to
    // platform SPI slave driver to invoke `SpiTransactionProcess()` callback
    // which unlike this callback must be called from the same OS context
    // that OpenThread APIs/callbacks are executed.

    uint16_t transDataLen;
    bool     shouldProcess = false;
    SpiFrame outputFrame(aOutputBuf);
    SpiFrame inputFrame(aInputBuf);
    SpiFrame sendFrame(mSendFrame);

    VerifyOrExit((aTransLen >= kSpiHeaderSize) && (aInputLen >= kSpiHeaderSize) && (aOutputLen >= kSpiHeaderSize));
    VerifyOrExit(inputFrame.IsValid() && outputFrame.IsValid());

    transDataLen = aTransLen - kSpiHeaderSize;

    if (!mHandlingRxFrame)
    {
        uint16_t rxDataLen = inputFrame.GetHeaderDataLen();

        // A new frame is successfully received if input frame
        // indicates that there is data and the "data len" is not
        // larger than than the "accept len" we provided in the
        // exchanged output frame.

        if ((rxDataLen > 0) && (rxDataLen <= transDataLen) && (rxDataLen <= outputFrame.GetHeaderAcceptLen()))
        {
            mHandlingRxFrame = true;
            shouldProcess    = true;
        }
    }

    if (mTxState == kTxStateSending)
    {
        uint16_t txDataLen = outputFrame.GetHeaderDataLen();

        // Frame transmission is successful if master indicates
        // in the input frame that it could accept the frame
        // length that was exchanged, i.e., the "data len" in
        // the output frame is smaller than or equal to "accept
        // len" in the received input frame from master.

        if ((txDataLen > 0) && (txDataLen <= transDataLen) && (txDataLen <= inputFrame.GetHeaderAcceptLen()))
        {
            mTxState      = kTxStateHandlingSendDone;
            shouldProcess = true;
        }
    }

exit:
    // Determine the input and output frames to prepare a new transaction.

    if (mResetFlag && (aTransLen > 0) && (aOutputLen > 0))
    {
        mResetFlag = false;
        sendFrame.SetHeaderFlagByte(/*aResetFlag */ false);
        SpiFrame(mEmptySendFrameFullAccept).SetHeaderFlagByte(/*aResetFlag */ false);
        SpiFrame(mEmptySendFrameZeroAccept).SetHeaderFlagByte(/*aResetFlag */ false);
    }

    if (mTxState == kTxStateSending)
    {
        aOutputBuf = mSendFrame;
        aOutputLen = mSendFrameLength;
    }
    else
    {
        aOutputBuf = mHandlingRxFrame ? mEmptySendFrameZeroAccept : mEmptySendFrameFullAccept;
        aOutputLen = kSpiHeaderSize;
    }

    if (mHandlingRxFrame)
    {
        aInputBuf = mEmptyReceiveFrame;
        aInputLen = kSpiHeaderSize;
    }
    else
    {
        aInputBuf = mReceiveFrame;
        aInputLen = kSpiBufferSize;
    }

    sendFrame.SetHeaderAcceptLen(aInputLen - kSpiHeaderSize);

    otPlatSpiSlavePrepareTransaction(aOutputBuf, aOutputLen, aInputBuf, aInputLen, (mTxState == kTxStateSending));

    return shouldProcess;
}