Beispiel #1
0
void load_defaults (void)
{
	while (1)
	{
		LCD_Clear();
		LCD_SetXY(0,0);
		LCD_WriteString("------NOTICE------");
		LCD_SetXY(0,1);
		LCD_WriteString("Are you sure to load defaults?Press OK to continue,or back to cancel");
		
		refresh();
		delay(LCD_DELAY);
		
		if(KeyReady==1)
		{
			KeyReady=0;
			switch(KeyValue)
			{
				case 18://ok
				  flash_save[0].u16_data[0]=800;//AGV_speed
					flash_save[0].u16_data[1]=800;//delta_x
					flash_save[0].u16_data[2]=800;//delta_y
					flash_save[0].u16_data[3]=200;//Stop_time
					Data_Save();
					init_parameter();				
					return;
				case keyback:
					return;
			}
		}
	}
}
Beispiel #2
0
/* 韻律情報の読み込み */
void setProsFile( char *rel, char *filename )
{
	int error;

	strcpy( slot_Speak_stat, "PROCESSING" );
	if( prop_Speak_stat == AutoOutput )  inqSpeakStat();

	if( strcmp(rel,"=")==0 )  {
		strcpy( slot_Pros_file, filename );
		if( prop_ProsFile == AutoOutput )  inqProsFile();

		refresh();
		/* prosBuf に各種パラメータを読み込む */
		error = read_pros_file( filename );
		if( ! error )  {

			text_analysis( input_text );	/* テキスト解析 */
			if( prop_Text_text == AutoOutput )  inqTextText();
			if( prop_Speak_text == AutoOutput )  inqSpeakText();

/*		parameter_generation();		*/

			init_parameter(); /* パラメータ生成の準備 */
			make_duration(); /* 素のテキストから状態継続長を生成 */
			modify_duration(); /* 継続長の修正(タグ処理) */

			/* 音素継続長が修正されている場合は、状態継続長を
			   計算しなおす */
			update_duration();
			
			/* ここで、prosBuf のデータで音素時間長を設定する。 */

			make_cumul_time(); /* 音素時間長の累積を計算 */
			modify_voice(); /* 話者のスイッチ、αパラメータの変更(タグ処理) */

			if( prop_Text_pho == AutoOutput )  inqTextPho();
			if( prop_Speak_pho == AutoOutput )  inqSpeakPho();
			if( prop_Text_dur == AutoOutput )  inqTextDur();
			if( prop_Speak_dur == AutoOutput )  inqSpeakDur();

			make_parameter(); /* パラメータ生成を実行 */

			modify_f0(); /* F0の修正(タグ処理) */
			modify_power(); /* パワーの修正(タグ処理) */

/*		parameter_generation();	 ここまで	*/

			/* 生成されたパラメータに対してF0とc0を更新 */
			error = set_f0_and_power( filename );
			if( ! error )  {
				do_synthesis();		/* 合成波形の生成 */
			}
		}
	} else {
		unknown_com();
	}
	strcpy( slot_Speak_stat, "READY" );
	if( prop_Speak_stat == AutoOutput )  inqSpeakStat();
}
Beispiel #3
0
void parameter_setting (void)
{
	while(1)
	{
		LCD_Clear();
		LCD_SetXY(0,0);
		LCD_WriteString("PARAMETER SETTING");	
		
		LCD_SetXY(0,1);
		LCD_WriteString("1.SPEED:");
		LCD_WriteDouble(AGV_speed,1);
		
		LCD_SetXY(0,2);
		LCD_WriteString("2.DELTA:");
		LCD_WriteDouble(delta_x,0);
		LCD_WriteString("/");
		LCD_WriteDouble(delta_y,0);
				
		LCD_SetXY(0,3);
		LCD_WriteString("3.STOPTIME:");
		LCD_WriteInt(Stop_time);
		
		refresh();
		
		if (KeyReady==1)
		{
			KeyReady=0;
			
			switch (KeyValue)
			{
				case 1:
					Input_FloatValue(&AGV_speed,"SPEED");
				  flash_save[0].u16_data[0]=AGV_speed;
				  break;
				
				case 2:
					Input_FloatValue(&delta_x,"DELTA_X");
				  flash_save[0].u16_data[1]=delta_x;
				  Input_FloatValue(&delta_y,"DELTA_Y");
				  flash_save[0].u16_data[2]=delta_y;
 				  break;

				case 3:
					Input_IntValue(&Stop_time,"STOPTIME");
				  flash_save[0].u16_data[3]=Stop_time;
				  break;
				
				case 5:
					return;
				
			}
			Data_Save();
			init_parameter();
		}
		
		delay(LCD_DELAY);
		
	}
}
Beispiel #4
0
void parameter_generation()
{
/* 音声合成の初期設定 */
	init_parameter();

/* 音素継続長の決定 */
	make_duration();
/* 音素継続長の変更 */
	modify_duration();
	make_cumul_time();
	modify_voice();

	if( prop_Text_pho == AutoOutput )  inqTextPho();
	if( prop_Speak_pho == AutoOutput )  inqSpeakPho();
	if( prop_Text_dur == AutoOutput )  inqTextDur();
	if( prop_Speak_dur == AutoOutput )  inqSpeakDur();

/* パラメータ生成 F0,MLSAフィルタ係数 */
	make_parameter();

/* F0, ゲイン b(0) の変更 */
	modify_f0();
	modify_power();
}
Beispiel #5
0
int main(void)
{

	int frame = 0, ret = 0, got_picture = 0, frameFinished = 0, videoStream = 0, check_yuv = 0;
	int frame_size = 0, bitrate = 0;
	int streamIdx = 0;
	unsigned i=0;
	enum AVMediaType mediaType;
	struct SwsContext *sws_ctx = NULL;
	AVStream *video_st = NULL;
	AVCodecContext    *pCodecCtx = NULL, *ctxEncode = NULL;
	AVFrame           *pFrame = NULL;
	AVPacket          input_pkt, output_pkt;

	check_yuv = check_file();

	// Register all formats and codecs
	av_register_all();

	if (open_input_file(check_yuv) < 0) exit(1);
	if (open_output_file() < 0) exit(1);

	init_parameter(&input_pkt, &output_pkt); //init parameter function
	pictureEncoded_init();

	// initialize SWS context for software scaling
	sws_ctx = sws_getContext(inFmtCtx->streams[streamIdx]->codec->width, inFmtCtx->streams[streamIdx]->codec->height, inFmtCtx->streams[streamIdx]->codec->pix_fmt, clip_width, clip_height, PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);

	while (av_read_frame(inFmtCtx, &input_pkt) >= 0) {

		streamIdx = input_pkt.stream_index;
		mediaType = inFmtCtx->streams[streamIdx]->codec->codec_type;

		av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n", streamIdx);
		av_log(NULL, AV_LOG_DEBUG, "Going to reencode \n");

		pFrame = av_frame_alloc();

		if (!pFrame)
		{
			ret = AVERROR(ENOMEM);
			break;
		}

		av_packet_rescale_ts(&input_pkt, inFmtCtx->streams[videoStream]->time_base, inFmtCtx->streams[streamIdx]->codec->time_base);


		if (mediaType == AVMEDIA_TYPE_VIDEO){


			ret = avcodec_decode_video2(inFmtCtx->streams[streamIdx]->codec, pFrame, &frameFinished, &input_pkt); 		// Decode video frame (input_pkt-> pFrame)


			if (ret < 0)
			{
				av_frame_free(&pFrame);
				av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
				break;
			}

		
		if (frameFinished){

			frame_num++;

			sws_scale(sws_ctx, (const uint8_t * const *)pFrame->data, pFrame->linesize, 0, clip_height, pictureEncoded->data, pictureEncoded->linesize);

			pictureEncoded->pts = av_frame_get_best_effort_timestamp(pFrame);

			//pictureEncoded-> output_pkt
			//avcodec_encode_video2(ctxEncode, &output_pkt, pictureEncoded, &got_picture);
			avcodec_encode_video2(ofmt_ctx->streams[streamIdx]->codec, &output_pkt, pictureEncoded, &got_picture);

			av_frame_free(&pFrame);

			//if the function is working
			if (got_picture){

				printf("Encoding %d \n", frame_use);

				frame_use++;
				

				av_packet_rescale_ts(&output_pkt, ofmt_ctx->streams[streamIdx]->codec->time_base, ofmt_ctx->streams[streamIdx]->time_base);

				//av_packet_rescale_ts(&output_pkt, ctxEncode->time_base, video_st->time_base);

				ret = av_interleaved_write_frame(ofmt_ctx, &output_pkt);

				if (ret < 0) {
					fprintf(stderr, "Error muxing packet\n");
					break;
				}
			}
		}

		av_free_packet(&input_pkt);
		av_free_packet(&output_pkt);

		}

	}

	//flush encoders
	for (i = 0; i < inFmtCtx->nb_streams; i++)
	{
		if (inFmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			
			ret = flush_encoder(i);
			if (ret < 0)
			{
				av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
				exit(1);
			}
		}
	}

	printf("\n\n total frame_num : %d , frame_encode:  %d \n", frame_num-1, frame_use-1);


	/* Write the trailer, if any. The trailer must be written before you
	* close the CodecContexts open when you wrote the header; otherwise
	* av_write_trailer() may try to use memory that was freed on
	* av_codec_close(). */
	av_write_trailer(ofmt_ctx);

	// Free the YUV frame
	av_frame_free(&pFrame);
	av_frame_free(&pictureEncoded);

	// Close the codecs
	//avcodec_close(pCodecCtx);

	// Close the video file
	avformat_close_input(&inFmtCtx);
	//avcodec_close(ctxEncode);

	return 0;
}
Beispiel #6
0
void art_jni_onload(JNIEnv* env, int version) {
	registerNativeMethods(env, JNIHOOK_CLASS, gMethods, sizeof(gMethods) / sizeof(gMethods[0]));
	init_parameter(version);
}