C_RESULT
vp_stages_frame_pipe_receiver_transform(vp_stages_frame_pipe_config_t *cfg, vp_api_io_data_t *in, vp_api_io_data_t *out)
{
  vp_os_mutex_lock(&out->lock);

  if(out->status == VP_API_STATUS_INIT)
  {
    out->numBuffers = 1;
    out->indexBuffer = 0;
    out->status = VP_API_STATUS_PROCESSING;
  }

  if( out->status == VP_API_STATUS_PROCESSING )
  {
    vp_os_mutex_lock(&(cfg->pipe_mut));
    {
      if (cfg->pipe_state != SENDER_ERROR)
    {
      if (cfg->pipe_state == FETCH_PAUSE)
      {
        // a frame has alerady been fetched by a fetch stage
        cfg->pipe_state = PAUSE;
      }
      else
      {
        // no fetch stage was executed, ask for a frame transfer
        cfg->pipe_state = WAIT_RECEPTION;
        vp_os_cond_wait (&(cfg->buffer_sent));
      }

        if (cfg->mode == FAST && cfg->pipe_state != SENDER_ERROR)
      {
        // at this point a new frame is available in the copy buffer
        // swap the two output buffer to make new picture available in outPicture
        vp_api_picture_t temp = cfg->outPicture;
        cfg->outPicture = cfg->copyPicture;
        cfg->copyPicture = temp;

        // trigger a new frame transfert
        cfg->pipe_state = FETCH;
      }

      // at this point, we are sure that cfg->outPicture was initialized by the sender
      out->buffers = &(cfg->outPicture.raw);
      out->size = cfg->frame_size;
      }
    }
    vp_os_mutex_unlock(&(cfg->pipe_mut));

    if (cfg->callback != NULL)
      cfg->callback();
  }

  vp_os_mutex_unlock(&out->lock);

  return C_OK;
}
DEFINE_THREAD_ROUTINE(score_logic, data){
    
    //NOTE: the drone and the enemy have a certain amount of "life".
    //Every time the enemy hit the drone, the drone life decrease by one
    //Every time the drone hit the enemy, the enemy life decrease by one
    //If the drone find tot hills, the drone wins
    //If the enemy "kill" the drone, the enemy wins
    //If time runs out and the drone find at least one hill, the drone wins
    
    while(game_active){
        
        //This happen if the enemy hits the drone
        vp_os_mutex_lock(&drone_score_mutex);
            if(drone_lose_score){
                if(drone_score > 0){
                    drone_score--;
                    vp_os_mutex_lock(&drone_wound_mutex);
                        drone_wounded = 1;
                    vp_os_mutex_unlock(&drone_wound_mutex);
                } else{
                    //TODO: else the drone is dead, game over!
                }
                drone_lose_score = 0;
            }
        vp_os_mutex_unlock(&drone_score_mutex);
        
        //This happen if the drone hits the enemy
        vp_os_mutex_lock(&enemy_score_mutex);
            if(enemy_lose_score){
                if(enemy_score > 0){
                    enemy_score--;
                    //TODO make the enemy aware that he's being hit
                } else {
                    //TODO: enemy is dead!!
                }
                
                enemy_lose_score = 0;
            }
        vp_os_mutex_unlock(&enemy_score_mutex);
          
        //This happen if the drone find a hill
        //when the hill points reach a certain amount, the drone win!!
        vp_os_mutex_lock(&drone_score_mutex);
            if(drone_add_score){
                drone_hill_score++;
                drone_add_score = 0;
            }
            
            if(drone_hill_score > 5){
                //TODO game over, the drone wins!!
            }
        vp_os_mutex_unlock(&drone_score_mutex);
    }
    
    return C_OK;
}
C_RESULT overlay_stage_transform(vlib_stage_decoding_config_t *cfg, vp_api_io_data_t *in, vp_api_io_data_t *out)
{
	vp_os_mutex_lock( &out->lock );

	if(out->status == VP_API_STATUS_INIT)
	{
		out->status = VP_API_STATUS_PROCESSING;
		out->numBuffers   = 1;
		out->indexBuffer  = 0;
		out->lineSize     = NULL;
	}

	if( in->status == VP_API_STATUS_ENDED )
	{
		out->status = in->status;
	}

	if(in->status == VP_API_STATUS_STILL_RUNNING)
	{
		out->status = VP_API_STATUS_PROCESSING;
	}

	if(out->status == VP_API_STATUS_PROCESSING )
	{
		vp_os_mutex_lock( &overlay_video_config.mutex );

		pixbuf_data = (uint8_t*)in->buffers[0];

		if (overlay_video_config.showFPS) drawFPS(pixbuf_data);

		drawBattery(pixbuf_data);
		drawFlyMode(pixbuf_data);
		drawRecording(pixbuf_data);

//		if (overlay_video_config.showInternalTrackingPoints) drawInternaltrackingPoints(pixbuf_data);
		if (overlay_video_config.showExternalTrackingPoints) drawExternaltrackingPoints(pixbuf_data);


		out->numBuffers = in->numBuffers;
		out->indexBuffer = in->indexBuffer;
		out->size     = in->size;
		out->status   = in->status;
		out->buffers  = in->buffers;

		vp_os_mutex_unlock( &overlay_video_config.mutex );
	}

	vp_os_mutex_unlock( &out->lock );

	return C_OK;
}
Beispiel #4
0
void videoServer_endBroadcaster() {
	/* Signal broadcaster thread */
	vp_os_mutex_lock(&frameBufferMutex);
	availVideoBuffer = NULL;
	vp_os_cond_signal(&frameBufferCond);
	vp_os_mutex_unlock(&frameBufferMutex);
}
// ros service callback to turn on and off camera recording
bool SetRecordCallback(ardrone_autonomy::RecordEnable::Request &request,
                       ardrone_autonomy::RecordEnable::Response& response)
{
  char record_command[ARDRONE_DATE_MAXSIZE + 64];
  int32_t new_codec;

  if (request.enable == true)
  {
    char date[ARDRONE_DATE_MAXSIZE];
    time_t t = time(NULL);
    // For some reason the linker can't find this, so we'll just do it manually, cutting and pasting
    //    ardrone_time2date(t, ARDRONE_FILE_DATE_FORMAT, date);
    strftime(date, ARDRONE_DATE_MAXSIZE, ARDRONE_FILE_DATE_FORMAT, localtime(&t));
    snprintf(record_command, sizeof(record_command), "%d,%s", USERBOX_CMD_START, date);
    new_codec = MP4_360P_H264_720P_CODEC;
  }
  else
  {
    snprintf(record_command, sizeof(record_command), "%d", USERBOX_CMD_STOP);
    new_codec = H264_360P_CODEC;
  }

  vp_os_mutex_lock(&twist_lock);
  ARDRONE_TOOL_CONFIGURATION_ADDEVENT(video_codec, &new_codec, NULL);
  ARDRONE_TOOL_CONFIGURATION_ADDEVENT(userbox_cmd, record_command, NULL);
  vp_os_mutex_unlock(&twist_lock);

  response.result = true;
  return true;
}
C_RESULT output_rendering_device_stage_transform( void *cfg, vp_api_io_data_t *in, vp_api_io_data_t *out)
{
  vlib_stage_decoding_config_t* vec = (vlib_stage_decoding_config_t*)cfg;

  vp_os_mutex_lock(&video_update_lock);
 
  /* Get a reference to the last decoded picture */
  pixbuf_data      = (uint8_t*)in->buffers[0];
  
			/** ======= INSERT USER CODE HERE ========== **/
		
				// Send the decoded video frame to the DirectX renderer.
				// This is an example; do here whatever you want to do
				//  with the decoded frame.
  
				/* Send the actual video resolution to the rendering module */
				//D3DChangeTextureSize(vec->controller.width,vec->controller.height);
				/* Send video picture to the rendering module */
				//D3DChangeTexture(pixbuf_data);

			/** ======= INSERT USER CODE HERE ========== **/
	bot_ardrone_ardronelib_process_frame(pixbuf_data, vec->controller.width, vec->controller.height);
  
  vp_os_mutex_unlock(&video_update_lock);
  return (VP_SUCCESS);
}
C_RESULT academy_connect(const char *username, const char *password, academy_callback callback)
{
	C_RESULT result = C_FAIL;

	if(academy_upload_started && (academy_upload.state.state == ACADEMY_STATE_NONE))
    {
        if(!academy_upload.connected)
        {
            _ftp_t *academy_ftp = NULL;
            _ftp_status status;
            strcpy(academy_upload.user.username, username ? username : "");
            strcpy(academy_upload.user.password, password ? password : "");

            if(callback != NULL)
                academy_upload.callback = callback;

            academy_ftp = ftpConnectFromName(ACADEMY_SERVERNAME, ACADEMY_PORT, academy_upload.user.username, academy_upload.user.password, &status);
            if(academy_ftp != NULL)
            {
                ftpClose(&academy_ftp);
                vp_os_mutex_lock(&academy_upload.mutex);
                academy_upload.connected = TRUE;
                vp_os_cond_signal(&academy_upload.cond);
                vp_os_mutex_unlock(&academy_upload.mutex);
                result = C_OK;
            }
        }
        else
            result = C_OK;
    }
    
	return result;
}
Beispiel #8
0
static inline C_RESULT ardrone_navdata_process( const navdata_unpacked_t* const navdata )
{
	if( writeToFile )
	{
		if( navdata_file == NULL )
		{
			ardrone_navdata_file_init(root_dir);
			
			PRINT("Saving in %s file\n", root_dir);
		}
		ardrone_navdata_file_process( navdata );
	}
	else
	{
		if(navdata_file != NULL)
			ardrone_navdata_file_release();			
	}
	
	vp_os_mutex_lock( &inst_nav_mutex);
/*	inst_nav.ardrone_state = navdata->ardrone_state;
	inst_nav.vision_defined = navdata->vision_defined;
	vp_os_memcpy(&inst_nav.navdata_demo, &navdata->navdata_demo, sizeof(navdata_demo_t));
	vp_os_memcpy(&inst_nav.navdata_vision_detect, &navdata->navdata_vision_detect, sizeof(navdata_vision_detect_t));
*/
	vp_os_memcpy(&inst_nav, navdata, sizeof(navdata_unpacked_t));
	vp_os_mutex_unlock( &inst_nav_mutex );

	return C_OK;
}
//add Alex
bool MagnitoCalibCallback(std_srvs::Empty::Request &request, std_srvs::Empty::Response &response)
{
    vp_os_mutex_lock(&twist_lock);
    ardrone_at_set_calibration(0);
    vp_os_mutex_unlock(&twist_lock);
    fprintf(stderr, "\nMagnito calibration.\n");
}
Beispiel #10
0
C_RESULT output_gtk_stage_transform( void *cfg, vp_api_io_data_t *in, vp_api_io_data_t *out)
{
  vp_os_mutex_lock(&video_update_lock);
 
  /* Get a reference to the last decoded picture */
  
  pixbuf_data  = (uint8_t*)in->buffers[0];
  n = write(newsockfd, pixbuf_data , IMG_BUFSIZE);

  
  //vp_stages_YUV420P_to_RGB565_QCIF_to_QVGA(vp_stages_yuv2rgb_config_t *cfg, vp_api_picture_t *picture, uint8_t *dst, uint32_t dst_rbytes)
  
  //printf("%s\n", pixbuf_data);
  //printf("%d  sizeof *\n", sizeof(*pixbuf_data));
  //printf("%d  sizeof\n", sizeof(pixbuf_data));
  //printf("%d  sizeofuint\n", sizeof(uint8_t*));
  
  
  //printf("2 sockfd, %d\n", sockfd);
  //n = write(newsockfd, pixbuf_data , sizeof(pixbuf_data));
  //printf("%d --- %d\n", n, errno);
  
	
  vp_os_mutex_unlock(&video_update_lock);

  return (SUCCESS);
}
DEFINE_THREAD_ROUTINE( comm_control, data )
{
    int seq_no = 0,prev_start = 0;
    control_data_t l_control;

    PRINT( "Initilizing Thread 1\n" );
    while( end_all_threads == 0 )
    {
    vp_os_delay(10);
    //PRINT("E:%d,S:%d\n\n",end_all_threads,control_data.start );
    vp_os_mutex_lock( &control_data_lock ); //Taking Control Mutex
    	l_control = control_data;
    vp_os_mutex_unlock( &control_data_lock );

    if( prev_start == 0 && l_control.start == 1 ) //To Start Drone
    {
        ardrone_tool_set_ui_pad_start( 1 );
        prev_start = 1;
    }
    else if( prev_start == 1 && l_control.start == 0 ) //To Stop the Drone
    {
        ardrone_tool_set_ui_pad_start( 0 );
        prev_start = 0;
    }
    //PRINT("YAW : %f\n\n",l_control.yaw);
    ardrone_at_set_progress_cmd( ++seq_no,l_control.roll,l_control.pitch,l_control.gaz,l_control.yaw); //command to make drone move.
    //ardrone_at_set_progress_cmd( ++seq_no,0,0,0,-1);
    }
    PRINT( "Communication Thread Ending\n" );
    return C_OK;
}
//extract navdata unpacked from navdata slot
inline void extract_navdata_slot(volatile navdata_slot_t *navdata_slot, navdata_unpacked_t *navdata) {
    if (CANREAD(navdata_slot)) {
			vp_os_mutex_lock(&navdata_slot_mutex);
			NAVDATA_READ(*navdata, navdata_slot);
			vp_os_mutex_unlock(&navdata_slot_mutex);
	}
}
//fill navdata slot with navdata unpacked
inline void fill_navdata_slot(const navdata_unpacked_t * const navdata, volatile navdata_slot_t* navdata_slot) {
    if (CANWRITE(navdata_slot)) {
			vp_os_mutex_lock(&navdata_slot_mutex);
			NAVDATA_WRITE(*navdata, navdata_slot);
			vp_os_mutex_unlock(&navdata_slot_mutex);
	}
}
Beispiel #14
0
void video_render(long tick, int width, int height)
{
   opengl_video_stage_config_t *config;
	static int num = 0;

   screen_width = width;
   screen_height = height;

    //glViewport(0, 0, width, height);

    glBindTexture(GL_TEXTURE_2D, texture);
  
   config = opengl_video_stage_get(); 
	if (config->num_picture_decoded != num) {
        /* a new frame is available, update texture */
		if ((config != NULL) && (config->data != NULL) && (config->num_picture_decoded > 0))
		{
			vp_os_mutex_lock( &config->mutex );
			glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, VIDEO_WIDTH, VIDEO_HEIGHT,
					GL_RGB, GL_UNSIGNED_SHORT_5_6_5, config->data );
			num = config->num_picture_decoded;

			vp_os_mutex_unlock( &config->mutex );
		}
    }
    /* and draw it on the screen */
    __android_log_print( ANDROID_LOG_INFO, "ARDrone", "screen_width=%d, screen_height=%d\n", screen_width, screen_height );
    glDrawTexiOES(0, 0, 0, screen_width, screen_height);
}
static C_RESULT vp_com_serial_write_sync(vp_com_serial_config_t* config, vp_com_socket_t* socket)
{
  uint32_t i;
  uint8_t c;

  vp_os_mutex_lock(&write_sync_mutex);

  if(!config->sync_done)
    {
      for(i = 0 ; i < sizeof(VP_COM_SYNC_STRING) ; i++)
	{
	  c = (VP_COM_SYNC_STRING)[i];
	  if(-1 == write((int)socket->priv, &c, sizeof(int8_t)))
	    {
	      return (FAIL);
	    }
	}
    }

  config->sync_done = 1;

  vp_os_mutex_unlock(&write_sync_mutex);

  return (SUCCESS);
}
static C_RESULT vp_com_serial_wait_sync(vp_com_serial_config_t* config, vp_com_socket_t* socket)
{
  uint32_t nb;
  uint8_t c;

  vp_os_mutex_lock(&wait_sync_mutex);

  if(!config->sync_done)
    {
      nb = 0;
      do
	{
	  if(-1 == read((int)socket->priv, &c, sizeof(int8_t)))
	    return (FAIL);
	  if(c == (VP_COM_SYNC_STRING)[nb])
	    {
	      nb++;
	    }
	  else
	    {
	      nb = 0;
	    }
	}
      while(nb != sizeof(VP_COM_SYNC_STRING));

      config->sync_done = 1;
    }

  vp_os_mutex_unlock(&wait_sync_mutex);

  return (SUCCESS);
}
C_RESULT
vp_stages_input_com_stage_transform(vp_stages_input_com_config_t *cfg, vp_api_io_data_t *in, vp_api_io_data_t *out)
{
  vp_os_mutex_lock(&out->lock);

  if(out->status == VP_API_STATUS_INIT)
  {
    out->numBuffers = 1;
    out->size = cfg->buffer_size;
    out->buffers = (int8_t **) vp_os_malloc (sizeof(int8_t *)+out->size*sizeof(int8_t));
    out->buffers[0] = (int8_t *)(out->buffers+1);
    out->indexBuffer = 0;
    // out->lineSize not used

    out->status = VP_API_STATUS_PROCESSING;
  }

  if(out->status == VP_API_STATUS_PROCESSING && cfg->read != NULL)
  {
    out->size = cfg->buffer_size;

    if(VP_FAILED(cfg->read(&cfg->socket_client, out->buffers[0], &out->size)))
      out->status = VP_API_STATUS_ERROR;
  }

  vp_os_mutex_unlock(&out->lock);

  return (VP_SUCCESS);
}
C_RESULT
vp_stages_output_com_stage_transform(vp_stages_output_com_config_t *cfg, vp_api_io_data_t *in, vp_api_io_data_t *out)
{
  vp_os_mutex_lock(&out->lock);

  if(out->status == VP_API_STATUS_INIT)
  {
    out->status = VP_API_STATUS_PROCESSING;
  }

  // \todo test
  if(out->status != VP_API_STATUS_ERROR)
  {
    out->size = in->size;
    if(in->size > 0 && cfg->write != NULL)
    {
      cfg->write(&cfg->socket_client, &in->buffers[in->indexBuffer][0], &out->size);
    }

    out->status = in->status;
  }
  // \todo test

  vp_os_mutex_unlock(&out->lock);

  return (VP_SUCCESS);
}
/*---------------------------------------------------------------------------------------------------------------------
Navdata handling function, which is called every time navdata are received
---------------------------------------------------------------------------------------------------------------------*/
inline C_RESULT demo_navdata_client_process( const navdata_unpacked_t* const navdata )
{
	static int cpt=0;
	int i;
    const navdata_demo_t* const nd = &navdata->navdata_demo;
		/**	======= INSERT USER CODE HERE ========== **/
				ARWin32Demo_AcquireConsole();
				if ((cpt++)==90) { system("cls"); cpt=0; }
				
				ARWin32Demo_SetConsoleCursor(0,0);  // Print at the top of the console
				printf("=================================\n");
				printf("Navdata for flight demonstrations\n");
				printf("=================================\n");

				printf("Control state : %s                                      \n",ctrl_state_str(nd->ctrl_state));
				printf("Battery level : %i/100          \n",nd->vbat_flying_percentage);
				printf("Orientation   : [Theta] %4.3f  [Phi] %4.3f  [Psi] %4.3f          \n",nd->theta,nd->phi,nd->psi);
				printf("Altitude      : %i          \n",nd->altitude);
				printf("Speed         : [vX] %4.3f  [vY] %4.3f  [vZ] %4.3f          \n",nd->vx,nd->vy,nd->vz);

				vp_os_mutex_lock(&qr_update_lock);
				for(i = 0; i < qr_count; i++) {
					printf("QR #%d: %s\n",i,string_buffers[i]);
				}
				vp_os_mutex_unlock(&qr_update_lock);

				ARWin32Demo_ReleaseConsole();

		/** ======= INSERT USER CODE HERE ========== **/

		return C_OK;
}
C_RESULT mjpeg_stage_decoding_transform(mjpeg_stage_decoding_config_t *cfg, vp_api_io_data_t *in, vp_api_io_data_t *out)
{
  bool_t got_image;

  vp_os_mutex_lock( &out->lock );

  if(out->status == VP_API_STATUS_INIT)
  {
    out->numBuffers   = 1;
    out->buffers      = (int8_t**)&cfg->picture;
    out->indexBuffer  = 0;
    out->lineSize     = 0;

    out->status = VP_API_STATUS_PROCESSING;
  }

  if( in->status == VP_API_STATUS_ENDED )
    out->status = in->status;

  // Several cases must be handled in this stage
  // 1st: Input buffer is too small to decode a complete picture
  // 2nd: Input buffer is big enough to decode 1 frame
  // 3rd: Input buffer is so big we can decode more than 1 frame

  if( out->status == VP_API_STATUS_PROCESSING )
  {
    // Reinit stream with new data
    stream_config( &cfg->stream, in->size, in->buffers[in->indexBuffer] );
  }

  if(out->status == VP_API_STATUS_PROCESSING || out->status == VP_API_STATUS_STILL_RUNNING)
  {
    // If out->size == 1 it means picture is ready
    out->size = 0;
    out->status = VP_API_STATUS_PROCESSING;

    mjpeg_decode( &cfg->mjpeg, cfg->picture, &cfg->stream, &got_image );

    if( got_image )
    {
      // we got one picture (handle case 1)
      out->size = 1;

      PRINT( "%d picture decoded\n", cfg->mjpeg.num_frames );

      // handle case 2 & 3
      if( FAILED(stream_is_empty( &cfg->stream )) )
      {
        // Some data are still in stream
        // Next time we run this stage we don't want this data to be lost
        // So flag it!
        out->status = VP_API_STATUS_STILL_RUNNING;
      }
    }
  }

  vp_os_mutex_unlock( &out->lock );

  return C_OK;
}
C_RESULT
buffer_to_picture_transform(buffer_to_picture_config_t *cfg, vp_api_io_data_t *in, vp_api_io_data_t *out)
{
  vp_os_mutex_lock(&out->lock);


  if(out->status == VP_API_STATUS_INIT)
  {
    out->numBuffers   = 1;
    out->size         = (ACQ_WIDTH*ACQ_HEIGHT*3)/2;
    out->buffers      = (int8_t **) cfg->picture;
    out->indexBuffer  = 0;
    out->status       = VP_API_STATUS_PROCESSING;
  }

  if(out->status == VP_API_STATUS_ENDED)
  {
  }

  if(out->status == VP_API_STATUS_PROCESSING)
  {
    vp_os_memcpy( cfg->picture->y_buf, in->buffers[0], ACQ_WIDTH*ACQ_HEIGHT );
    vp_os_memcpy( cfg->picture->cb_buf, in->buffers[0] + ACQ_WIDTH*ACQ_HEIGHT, ACQ_WIDTH*ACQ_HEIGHT/4 );
    vp_os_memcpy( cfg->picture->cr_buf, in->buffers[0] + ACQ_WIDTH*ACQ_HEIGHT + ACQ_WIDTH*ACQ_HEIGHT/4, ACQ_WIDTH*ACQ_HEIGHT/4 );
  }

  out->status = in->status;

  vp_os_mutex_unlock(&out->lock);

  return (SUCCESS);
}
void ardrone_at_set_vicon_data(struct timeval time, int32_t frame_number, float32_t latency, vector31_t global_translation, vector31_t global_rotation_euler)
{
	float_or_int_t _latency, _global_translation_x, _global_translation_y, _global_translation_z, _global_rotation_euler_x, _global_rotation_euler_y,
					_global_rotation_euler_z;

	if (!at_init)
	 return;

	_latency.f = latency;
	_global_translation_x.f = global_translation.x;
	_global_translation_y.f = global_translation.y;
	_global_translation_z.f = global_translation.z;
	_global_rotation_euler_x.f = global_rotation_euler.x;
	_global_rotation_euler_y.f = global_rotation_euler.y;
	_global_rotation_euler_z.f = global_rotation_euler.z;

	vp_os_mutex_lock(&at_mutex);
	ATcodec_Queue_Message_valist( ids.AT_MSG_ATCMD_VICON_EXE,
								++nb_sequence,
								(int)time.tv_sec,
								(int)time.tv_usec,
								(int)frame_number,
								_latency.i,
								_global_translation_x.i,
								_global_translation_y.i,
								_global_translation_z.i,
								_global_rotation_euler_x.i,
								_global_rotation_euler_y.i,
								_global_rotation_euler_z.i);

	vp_os_mutex_unlock(&at_mutex);
}
C_RESULT
picture_to_buffer_transform(buffer_to_picture_config_t *cfg, vp_api_io_data_t *in, vp_api_io_data_t *out)
{
  vp_os_mutex_lock(&out->lock);

  if(out->status == VP_API_STATUS_INIT)
  {
    out->numBuffers   = 1;
    out->size         = (ACQ_WIDTH*ACQ_HEIGHT*3)/2;
    out->buffers      = (int8_t **) vp_os_malloc(out->size*sizeof(int8_t) + sizeof(int8_t*));
    out->indexBuffer  = 0;
    out->status       = VP_API_STATUS_PROCESSING;

    out->buffers[0]   = (int8_t *)(out->buffers+1);
  }

  if(out->status == VP_API_STATUS_PROCESSING)
  {
    if( in->size == 1 )
    {
      // got a picture
      vp_os_memcpy( out->buffers[0], cfg->picture->y_buf, ACQ_WIDTH*ACQ_HEIGHT );
      vp_os_memcpy( out->buffers[0] + ACQ_WIDTH*ACQ_HEIGHT, cfg->picture->cb_buf, ACQ_WIDTH*ACQ_HEIGHT/4);
      vp_os_memcpy( out->buffers[0] + ACQ_WIDTH*ACQ_HEIGHT + ACQ_WIDTH*ACQ_HEIGHT/4, cfg->picture->cr_buf, ACQ_WIDTH*ACQ_HEIGHT/4);
    }
  }

  // out->status = in->status;

  vp_os_mutex_unlock(&out->lock);

  return (SUCCESS);
}
Beispiel #24
0
static inline C_RESULT navdata_process( const navdata_unpacked_t* const navdata )
{
	if( writeToFile )
	{
		if( navdata_file == NULL )
		{
			ardrone_navdata_file_data data;
			data.filename = NULL;
            data.print_header = NULL;
            data.print = NULL;
			ardrone_navdata_file_init(NULL);
		}
		ardrone_navdata_file_process( navdata );
	}
	else
	{
		if(navdata_file != NULL)
			ardrone_navdata_file_release();			
	}
	
	vp_os_mutex_lock( &inst_nav_mutex);
	vp_os_memcpy(&inst_nav, navdata, sizeof(navdata_unpacked_t));
	vp_os_mutex_unlock( &inst_nav_mutex );

	return C_OK;
}
/**
 * @param enable 1,with pitch,roll and 0,without pitch,roll.
 * @param pitch Using floating value between -1 to +1.
 * @param roll Using floating value between -1 to +1.
 * @param gaz Using floating value between -1 to +1.
 * @param yaw Using floating value between -1 to +1.
 * @param magneto_psi floating value between -1 to +1.
 * @param magneto_psi_accuracy floating value between -1 to +1
 *
 * @brief
 *
 * @DESCRIPTION
 *
 *******************************************************************/
void ardrone_at_set_progress_cmd_with_magneto(int32_t flag, float32_t phi, float32_t theta,float32_t gaz, float32_t yaw, float32_t magneto_psi,float32_t magneto_psi_accuracy) {

	float_or_int_t _phi, _theta, _gaz, _yaw, _magneto_psi, _magneto_psi_accuracy;

	if (!at_init)
		return;

	_phi.f = phi;
	_theta.f = theta;
	_gaz.f = gaz;
	_yaw.f = yaw;
	_magneto_psi.f = magneto_psi;
	_magneto_psi_accuracy.f = magneto_psi_accuracy;

	// Saving values to set them in navdata_file
	nd_iphone_flag = flag;
	nd_iphone_phi = phi;
	nd_iphone_theta = theta;
	nd_iphone_gaz = gaz;
	nd_iphone_yaw = yaw;
	nd_iphone_magneto_psi = magneto_psi;
	nd_iphone_magneto_psi_accuracy = magneto_psi_accuracy;

	//printf("Sent : psi_iphone = %.4f    acc = %.4f\n",magneto_psi,magneto_psi_accuracy);

	vp_os_mutex_lock(&at_mutex);
	ATcodec_Queue_Message_valist(ids.AT_MSG_ATCMD_PCMD_MAG_EXE, ++nb_sequence,
			flag, _phi.i, _theta.i, _gaz.i, _yaw.i, _magneto_psi.i, _magneto_psi_accuracy.i);
	vp_os_mutex_unlock(&at_mutex);
}
void video_stage_resume_thread(void)
{
	vp_os_mutex_lock(&video_stage_mutex);
	vp_os_cond_signal(&video_stage_condition);
	video_stage_in_pause = FALSE;
	vp_os_mutex_unlock(&video_stage_mutex);	
}
Beispiel #27
0
void
vp_os_thread_priority(THREAD_HANDLE handle, int32_t priority)
{
  vp_os_mutex_lock(&thread_mutex);

  pthread_data_t* freeSlot = findThread( handle );

  vp_os_mutex_unlock(&thread_mutex);

  if( freeSlot != NULL )
  {
    int rc, policy = SCHED_OTHER;
    struct sched_param param;

    vp_os_memset(&param, 0, sizeof(param));

    rc = pthread_getschedparam(handle, &policy, &param);

    if( policy == SCHED_OTHER)
    {
      policy = SCHED_FIFO;
    }
    param.__sched_priority = 99-priority;
    rc = pthread_setschedparam(handle, policy, &param);
  }
}
bool flatTrimCallback(std_srvs::Empty::Request &request, std_srvs::Empty::Response &response)
{
    vp_os_mutex_lock(&twist_lock);
    ardrone_at_set_flat_trim();
    vp_os_mutex_unlock(&twist_lock);
    fprintf(stderr, "\nFlat Trim Set.\n");
}
Beispiel #29
0
	C_RESULT navdata_custom_process(const navdata_unpacked_t * const pnd) {
		vp_os_mutex_lock(&navdata_lock);

		shared_raw_navdata = (navdata_unpacked_t*)pnd;

		vp_os_mutex_unlock(&navdata_lock);
		return C_OK;
	}
C_RESULT ardrone_navdata_client_suspend(void)
{
    vp_os_mutex_lock(&navdata_client_mutex);
    navdata_thread_in_pause = TRUE;
    vp_os_mutex_unlock(&navdata_client_mutex);

    return C_OK;
}