Esempio n. 1
0
JSON::String PolylineCompressor::printEncodedString(
const std::vector<SegmentInformation> &polyline, const bool use_elevation) const
{
    std::string output;
    std::vector<int> delta_numbers;
    if (!polyline.empty())
    {
        FixedPointCoordinate last_coordinate = polyline[0].location;
        delta_numbers.emplace_back(last_coordinate.lat);
        delta_numbers.emplace_back(last_coordinate.lon);
        if (use_elevation) {
            delta_numbers.emplace_back(last_coordinate.getEle());
        }

        // iterate after skipping the first, already handled, segment
        for (auto it = ++polyline.cbegin(); it != polyline.cend(); ++it)
        {
            const auto &segment = *it;
            if (segment.necessary)
            {
                int lat_diff = segment.location.lat - last_coordinate.lat;
                int lon_diff = segment.location.lon - last_coordinate.lon;
                delta_numbers.emplace_back(lat_diff);
                delta_numbers.emplace_back(lon_diff);
                if (use_elevation) {
                    int ele_diff = segment.location.getEle() - last_coordinate.getEle();
                    delta_numbers.emplace_back(ele_diff);
                }
                last_coordinate = segment.location;
            }
        }
        encodeVectorSignedNumber(delta_numbers, output);
    }
    JSON::String return_value(output);
    return return_value;
}
		//	Function responsible of reading a string of the given size. For size = 0, a maximum number of _buffer_size characters is read.
		std::string	PDCursesWindow::read_string( const unsigned int size )
		{
			std::string	return_value("");
			

			
			if ( _window != NULL )
			{
				char*			buffer = NULL;
				unsigned int	buffer_size = 0;


				
				if ( size == 0 )
				{
					buffer = new (std::nothrow) char[_buffer_size+1];
					buffer_size = _buffer_size;
				}
				else
				{
					buffer = new (std::nothrow) char[size+1];
					buffer_size = size;
				}

				if ( buffer != NULL )
				{
					memset(buffer,'\0',buffer_size+1);
					wgetnstr(_window,buffer,buffer_size);
					return_value = buffer;
					delete[] buffer;
				}
			}
			
			
			return return_value;
		};
Esempio n. 3
0
  void MethodBuilder::check_arity() {
    Value* total_offset = b().CreateConstGEP2_32(info_.args(), 0,
        offset::args_total, "total_pos");
    Value* total = b().CreateLoad(total_offset, "arg.total");

    // For others to use.
    arg_total = total;

    BasicBlock* arg_error = info_.new_block("arg_error");
    BasicBlock* cont = info_.new_block("import_args");

    // Check arguments
    //
    // if there is a splat..
    if(vmm_->splat_position >= 0) {
      if(vmm_->required_args > 0) {
        // Make sure we got at least the required args
        Value* cmp = b().CreateICmpSLT(total,
            cint(vmm_->required_args), "arg_cmp");
        b().CreateCondBr(cmp, arg_error, cont);
      } else {
        // Only splat or optionals, no handling!
        b().CreateBr(cont);
      }

      // No splat, a precise number of args
    } else if(vmm_->required_args == vmm_->total_args) {
      // Make sure we got the exact number of arguments
      Value* cmp = b().CreateICmpNE(total,
          cint(vmm_->required_args), "arg_cmp");
      b().CreateCondBr(cmp, arg_error, cont);

      // No splat, with optionals
    } else {
      Value* c1 = b().CreateICmpSLT(total,
          cint(vmm_->required_args), "arg_cmp");
      Value* c2 = b().CreateICmpSGT(total,
          cint(vmm_->total_args), "arg_cmp");

      Value* cmp = b().CreateOr(c1, c2, "arg_combine");
      b().CreateCondBr(cmp, arg_error, cont);
    }

    b().SetInsertPoint(arg_error);

    // Call our arg_error helper
    Signature sig(ls_, "Object");

    sig << "VM";
    sig << "CallFrame";
    sig << "Arguments";
    sig << ls_->Int32Ty;

    Value* call_args[] = {
      info_.vm(),
      info_.previous(),
      info_.args(),
      cint(vmm_->total_args)
    };

    Value* val = sig.call("rbx_arg_error", call_args, 4, "ret", b());
    return_value(val);

    // Switch to using continuation
    b().SetInsertPoint(cont);
  }
Esempio n. 4
0
inline function* new_wrapped_function(F pmf)
{
    // Deduce the return type and pass it off to the helper function above
	return new_wrapped_function_aux(return_value(pmf), pmf);
}
Esempio n. 5
0
std::pair<float, float> worldBallPosFromImgCoords(AL::ALMotionProxy motionProxy,
                                                  std::pair<int, int> ballPosCam,
                                                  int imgWidth, int imgHeight,
                                                  int camera)
{
	std::string cameraName = "CameraTop";
	if (camera == 1)
	{
		cameraName = "CameraBottom";
	}

	// Image coordinates of ball
	int u = ballPosCam.first;
	int v = ballPosCam.second;

	// Angles of observation of the ball
	float phi = ((float)u-((float)imgWidth)/2)/(float)imgWidth * img_WFOV;
	float theta = ((float)v-((float)imgHeight)/2)/(float)imgHeight * img_HFOV;

	// Select the right coordinates for the NAO system!
	// x outward from camera, y to the left and z vertically upwards

	// Coefficients for line-equation going from NAO camera to the ball
	float b_c = -sin(phi);
	float c_c = -sin(theta);
	float a_c = sqrt((cos(phi)*cos(phi)+cos(theta)*cos(theta))/2);

	int space = 2; //FRAME_ROBOT
	bool useSensorValues = true;
	std::vector<float> transVec =
		motionProxy.getTransform(cameraName, space, useSensorValues); // Transform camera -> FRAME_ROBOT
	std::vector<float> cameraPos =
		motionProxy.getPosition(cameraName, space, useSensorValues); // Camera position in FRAME_ROBOT


	// std::cout << "Position of bottom camera: " << std::endl;
	// std::cout << cameraPos.at(0) << " " << cameraPos.at(1) << " " << cameraPos.at(2) << std::endl;
	// std::cout << cameraPos.at(3) << " " << cameraPos.at(4) << " " << cameraPos.at(5) << std::endl;


	// Put the camera transform into an Eigen matrix for easy multiplication
	Eigen::Matrix4f trans;
	trans <<
		transVec[0] , transVec[1] , transVec[2] , transVec[3] ,
		transVec[4] , transVec[5] , transVec[6] , transVec[7] ,
		transVec[8] , transVec[9] , transVec[10], transVec[11],
		transVec[12], transVec[13], transVec[14], transVec[15];

	Eigen::Vector4f vec(a_c, b_c, c_c, 1);

	// Transform the line equation from NAO camera coordinate system into FRAME_ROBOT
	Eigen::Vector4f transformedLine = trans*vec;
	// std::cout << "trans*vec = " << transformedLine << std::endl;

	// Solve line-plane intersection with plane at z (floor)
	// Solution from Wikipedia line-plane intersection article
	float z = 0.00;

	Eigen::Matrix3f lineMat;
	lineMat <<
		cameraPos.at(0)-transformedLine[0], 1.0-0.0, 0.0-0.0,
		cameraPos.at(1)-transformedLine[1], 0.0-0.0, 1.0-0.0,
		cameraPos.at(2)-transformedLine[2], z  -  z, z  -  z;

	Eigen::Vector3f lineVec;
	lineVec << cameraPos.at(0)-0.0, cameraPos.at(1)-0.0, cameraPos.at(2)-z;
	Eigen::Vector3f txy = lineMat.inverse()*lineVec;
	std::cout << "Ball is at (x, y): (" << txy[1] << ", " << txy[2] << ")" << std::endl;
	std::pair<float, float> return_value(txy[1], txy[2]);
	return return_value; //Return ball position (x, y)
}
Esempio n. 6
0
std::pair<int, int> GetThresholdedImage(
	cv::Mat& img_BGR, cv::Mat& img_THR,
	int& HueLow, int& HueHigh, int& SatLow, int& SatHigh, int& ValueLow, int& ValueHigh,
	cv::Scalar Color, int *nr_pixels_ptr,
	int HueLow2, int HueHigh2, int SatLow2, int SatHigh2, int ValueLow2, int ValueHigh2)
{
	std::cout << "GetThresholdedImage starting" << std::endl;
	cv::RNG rng(12345);
	// Convert the image into an HSV image
	cv::Mat img_HSV; cv::cvtColor(img_BGR, img_HSV, CV_BGR2HSV);

	if (manual)
	{
		cv::inRange(img_HSV, cv::Scalar(lowerH, lowerS, lowerV),
		            cv::Scalar(upperH, upperS, upperV), img_THR);
	}
	else
	{
		cv::Mat img_THR1;
		cv::Mat img_THR2;
		cv::inRange(img_HSV, cv::Scalar(HueLow, SatLow, ValueLow),
		            cv::Scalar(HueHigh, SatHigh, ValueHigh), img_THR1);

		if (HueLow2 != -1   &&
		    HueHigh2 != -1  &&
		    SatLow2 != -1   &&
		    SatHigh2 != -1  &&
		    ValueLow2 != -1 &&
		    ValueHigh2 != -1)
		{
			// Optional arguments for second thresholds are set
			cv::inRange(img_HSV, cv::Scalar(HueLow2, SatLow2, ValueLow2),
			            cv::Scalar(HueHigh2, SatHigh2, ValueHigh2), img_THR2);
			cv::bitwise_or(img_THR1, img_THR2, img_THR);
		}
		else
		{
			img_THR = img_THR1;
		}
	}

	int kernel_size = 5;
	cv::Mat kernel = cv::Mat::ones( kernel_size, kernel_size, CV_32F ) / (float)(kernel_size * kernel_size);
	cv::dilate(img_THR, img_THR, kernel, cv::Point(-1,-1), 3, cv::BORDER_CONSTANT, cv::morphologyDefaultBorderValue());
	cv::erode (img_THR, img_THR, kernel, cv::Point(-1,-1), 4, cv::BORDER_CONSTANT, cv::morphologyDefaultBorderValue());
	// cv::floodFill(img_THR, cv::Point(0, 0), cv::Scalar(0), NULL, cv::Scalar(20), cv::Scalar(20), 4);

	// Detect edges using canny
	cv::Mat canny_output; int thresh = 100;
	cv::Canny(img_THR, canny_output, thresh, thresh * 2, 3);
	// Find contours
	std::vector<std::vector<cv::Point> > contours;
	std::vector<cv::Vec4i> hierarchy;
	cv::findContours(canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0));
	// Aproximate contours
	std::vector<std::vector<cv::Point> > approxContours;
	approxContours.resize(contours.size());
	// Draw contours
	for (unsigned int i = 0; i < contours.size(); i++)
	{
		cv::Scalar color(rand()&255, rand()&255, rand()&255);
		// cv::drawContours(img_BGR, contours, i, color, CV_FILLED, 8, hierarchy );
		cv::drawContours(img_THR, contours, i,   255, CV_FILLED, 8, hierarchy );
	}
	cv::medianBlur(img_THR, img_THR, 5);

	// Blur image
	cv::GaussianBlur(img_THR, img_THR, cv::Size(7,7), 15000, 15000, cv::BORDER_DEFAULT);

	// Detect edges using Threshold
	cv::threshold(img_THR, img_THR, 100, 250, cv::THRESH_BINARY);

	// Find contours
	findContours(img_THR, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0));

	// Find the convex hull object for each contour
	std::vector<std::vector<cv::Point> > hull(contours.size());
	for(unsigned int i = 0; i < contours.size(); i++)
	{
		convexHull(cv::Mat(contours[i]), hull[i], false);
	}

	// Draw contours + hull results
	for(unsigned int i = 0; i< contours.size(); i++)
	{
		cv::Scalar color = cv::Scalar(rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255));
		drawContours(img_BGR, contours, i, color, 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point());
		drawContours(img_THR,     hull, i, color, 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point());
	}

	// for (unsigned int i = 0; i < contours.size(); i++)
	// {
	// 	approxPolyDP(cv::Mat(contours[i]), approxContours[i], 4, 1);
	// 	drawContours(img_BGR, contours      , i, CV_RGB(rand()&255, rand()&255, rand()&255) );
	// 	// drawContours(img_BGR, approxContours, i, CV_RGB(rand()&255, rand()&255, rand()&255) );
	// }

	// cv::Mat draw_contour = cv::Mat::zeros(canny_output.size(), CV_8UC3);
	// for (unsigned int i = 0; i < contours.size(); i++)
	// {
	// 	cv::Scalar color = cv::Scalar(rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255));
	// 	cv::drawContours(img_BGR, contours, i, color, 2, 8, hierarchy, 0, cv::Point());
	// }

	// Detect blobs
	std::vector<cv::KeyPoint> keyPoints;
	blobDetector->detect(img_THR, keyPoints);
	// Draw keypoints
	cv::drawKeypoints(img_BGR, keyPoints, img_BGR,
	                  CV_RGB(rand()&255, rand()&255, rand()&255),
	                  cv::DrawMatchesFlags::DEFAULT);

	float X_obj = 0; float Y_obj = 0;

	std::cout << "*Keypoints " << keyPoints.size() << "  *Contours  " << contours.size()  << std::endl;
	for (unsigned int i = 0; i < keyPoints.size(); i++) // check the logic of this for loop
	{
		float X = keyPoints[i].pt.x;
		float Y = keyPoints[i].pt.y;
		float R = keyPoints[i].size;
		int intR = (int)R;
		if (intR > *nr_pixels_ptr) *nr_pixels_ptr = intR;
		circle(img_BGR, cv::Point(X, Y), R + 5, Color, 8, 0);
		X_obj += X; Y_obj += Y;
		std::cout << "  i: " << i << "  (X -|- Y) : (" << X << " -|- " << Y << ") Radius: " << R << std::endl;
	}
	X_obj /= keyPoints.size();
	Y_obj /= keyPoints.size();

	std::pair<int, int> return_value(-1, -1);

	if (keyPoints.size() != 0)
	{
		X_obj_old = X_obj; Y_obj_old = Y_obj;
		return_value.first = X_obj;
		return_value.second = Y_obj;
		circle(img_BGR, cv::Point(X_obj, Y_obj), 5, CV_RGB(255,255,255), 4, 8, 0);
	}
	else
	{
		std::cout << "******************** NO BLOBS FOUND ********************" << std::endl;
		circle(img_BGR, cv::Point(X_obj_old, Y_obj_old), 5, CV_RGB(255,255,255), 4, 8, 0);
	}

	// std::cout << "Reached end of GetThresholdedImage" << std::endl;

	// sleep(5);

	return return_value;
}
static void
serve_command (ossmix_commad_packet_t * pack)
{
  switch (pack->cmd)
    {
    case OSSMIX_CMD_INIT:
      polling_started = 0;
      if (pack->ack_rq)
	send_ack ();
      break;

    case OSSMIX_CMD_EXIT:
      //fprintf(stderr, "Exit\n");
      polling_started = 0;
      memset(mixer_open_mask, 0, sizeof(mixer_open_mask));
      if (pack->ack_rq)
	send_ack ();
      break;

    case OSSMIX_CMD_START_EVENTS:
      polling_started = 1;
      break;

    case OSSMIX_CMD_GET_NMIXERS:
      return_value (num_mixers=ossmix_get_nmixers ());
      break;

    case OSSMIX_CMD_GET_MIXERINFO:
      {
	oss_mixerinfo mi;

	if (ossmix_get_mixerinfo (pack->p1, &mi) < 0)
	  send_error ("Cannot get mixer info\n");
	else
	  send_response_long (OSSMIX_CMD_OK, 0, 0, 0, 0, 0, (void *) &mi,
			      sizeof (mi), 0);
      }
      break;

    case OSSMIX_CMD_OPEN_MIXER:
      mixer_open_mask[pack->p1 / 8] |= (1<<(pack->p1 % 8)); // Open
      return_value (ossmix_open_mixer (pack->p1));
      break;

    case OSSMIX_CMD_CLOSE_MIXER:
      mixer_open_mask[pack->p1 / 8] &= ~(1<<(pack->p1 % 8)); // Closed
      ossmix_close_mixer (pack->p1);
      break;

    case OSSMIX_CMD_GET_NREXT:
      return_value (ossmix_get_nrext (pack->p1));
      break;

    case OSSMIX_CMD_GET_NODEINFO:
      {
	oss_mixext ext;

	if (pack->p3 > pack->p2)
	  {
	    send_multiple_nodes (pack);
	    break;
	  }

	if (ossmix_get_nodeinfo (pack->p1, pack->p2, &ext) < 0)
	  send_error ("Cannot get mixer node info\n");
	else
	  {
	    mixc_add_node (pack->p1, pack->p2, &ext);
	    send_response_long (OSSMIX_CMD_OK, 0, 0, 0, 0, 0, (void *) &ext,
				sizeof (ext), 0);
	  }
      }
      break;

    case OSSMIX_CMD_GET_ENUMINFO:
      {
	oss_mixer_enuminfo desc;

	if (ossmix_get_enuminfo (pack->p1, pack->p2, &desc) < 0)
	  send_error ("Cannot get mixer enum strings\n");
	else
	  send_response_long (OSSMIX_CMD_OK, 0, 0, 0, 0, 0, (void *) &desc,
			      sizeof (desc), 0);
      }
      break;

    case OSSMIX_CMD_GET_DESCRIPTION:
      {
	oss_mixer_enuminfo desc;

	if (ossmix_get_description (pack->p1, pack->p2, &desc) < 0)
	  send_error ("Cannot get mixer description\n");
	else
	  send_response_long (OSSMIX_CMD_OK, 0, 0, 0, 0, 0, (void *) &desc,
			      sizeof (desc), 0);
      }
      break;

    case OSSMIX_CMD_GET_VALUE:
      return_value (ossmix_get_value (pack->p1, pack->p2, pack->p3));
      break;

    case OSSMIX_CMD_GET_ALL_VALUES:
      {
	int n;
	value_packet_t value_packet;

	update_values (pack->p1);
	n = mixc_get_all_values (pack->p1, value_packet, 0);

	send_response_long (OSSMIX_CMD_GET_ALL_VALUES, n, pack->p1, 0, 0, 0,
			    (void *) &value_packet,
			    n * sizeof (value_record_t), 0);
	mixc_clear_changeflags (pack->p1);
      }
      break;

    case OSSMIX_CMD_SET_VALUE:
      ossmix_set_value (pack->p1, pack->p2, pack->p3, pack->p4);
      break;

    default:

      if (pack->ack_rq)
	send_error ("Unrecognized request");
    }
}