コード例 #1
0
ファイル: player.c プロジェクト: DC-SWAT/DreamShell
int aica_audio_write(char *buffer, int len) {
 
	/*If this stuff works, get rid of that bit left from the old output system. */
	if (len== -1) { 
		return 0;
	} 
	
retry:

	mutex_lock(audio_mut);	
		
	if (sndptr + len > BUFFER_MAX_FILL) {
		
		mutex_unlock(audio_mut);
		
		if(!aud_set) {
			start_audio();
		}
		
		thd_pass();
		goto retry;
	}

	memcpy (tmpbuf + sndptr, buffer, len);
	sndptr += len;
	mutex_unlock(audio_mut);
	
	if(!aud_set && sndptr >= sbsize) {
		start_audio();
	}
	return 0;
	//return len;
}
コード例 #2
0
ファイル: ogg_common.c プロジェクト: Jheengut/gmerlin
int bg_ogg_encoder_start(void * data)
  {
  int i;
  bg_ogg_encoder_t * e = data;

  /* Start encoders and write identification headers */
  for(i = 0; i < e->num_video_streams; i++)
    {
    if(!start_video(e, i))
      return 0;
    }
  for(i = 0; i < e->num_audio_streams; i++)
    {
    if(!start_audio(e, i))
      return 0;
    }

  /* Write remaining header pages */
  for(i = 0; i < e->num_video_streams; i++)
    {
    bg_ogg_stream_t * s = &e->video_streams[i];
    if(bg_ogg_stream_flush(s, 1) < 0)
      return 0;
    }
  for(i = 0; i < e->num_audio_streams; i++)
    {
    bg_ogg_stream_t * s = &e->audio_streams[i];
    if(bg_ogg_stream_flush(s, 1) < 0)
      return 0;
    }
  
  return 1;
  }
コード例 #3
0
  void new_data(const unsigned char* data, UInt32 numBytes)
  {
    MutexHolder mh(m_mutex);

    // Buffer at most 2 s of audio samples
    const size_t MAX_BUFFER = static_cast<size_t>(2*m_input_format.mBytesPerFrame*m_format.mSampleRate);

    assert(numBytes < MAX_BUFFER);

    if (m_buffer.size() + numBytes > MAX_BUFFER)
      {
	const size_t toRemove = my_min(m_buffer.size(),
				       m_buffer.size()+numBytes-MAX_BUFFER);

	m_buffer.erase(m_buffer.begin(),
		       m_buffer.begin() + toRemove);
      }

    m_buffer.insert(m_buffer.end(),
		    data,
		    data + numBytes);

    assert(m_buffer.size() <= MAX_BUFFER);

    if (m_outputProcState == kOff &&
	m_buffer.size() >= m_min_buffer)
      {
	start_audio();
      }
  }
コード例 #4
0
ファイル: player.cpp プロジェクト: Drenn1/GameYob
void play_samples( const short* samples, long count )
{
	if ( count )
	{
		start_audio();
		//if ( audio.sample_count() >= 1024 )
		//	scope.draw( samples, scope_width + 1, chan_count ); // assumes count >= scope_width
		audio.write( samples, count );
	}
	
	//poll_events();
}
コード例 #5
0
ファイル: main.c プロジェクト: dsqmoore/dsra
int parse_data(uint8_t * buffer, size_t size)
{
	static struct params prm;
	static int prm_valid = 0;
	if (!size) return 1;
	if (buffer[0] == DSRA_SIG)
	{
		if ((prm_valid = (read_header(buffer,size,&prm) == 0)))
			start_audio(&prm);
		return 1;
	}
	if (buffer[0] == DSRA_SIG_DATA)
	{
		if (!audio_started)
			return 1;
		return queue_data(buffer,size);
	}
	return 1;
}
コード例 #6
0
ファイル: android.c プロジェクト: tigerking/psone
static void *sound_thread_play(void *none)
{
#ifndef NOSOUND
#if 0
	if( enable_audio == 0 ) return NULL;
	pthread_mutex_init(&sound_mutex, NULL);
	pthread_cond_init(&sound_cond, NULL);
  start_audio();
  while(!sound_thread_exit && !bEndThread)
  {
  	pthread_mutex_lock(&sound_mutex);
  	pthread_cond_wait(&sound_cond, &sound_mutex);
  	pthread_mutex_unlock(&sound_mutex);
  	if(pSpuBuffer)
      render_audio((s16*)pSpuBuffer, sound_buffer_bytes);
  }
  sound_thread_exit = 0;
  pthread_cond_destroy(&sound_cond);
  pthread_mutex_destroy(&sound_mutex);
  end_audio();
#endif
  return NULL;
#endif
}
コード例 #7
0
JNIEXPORT jint JNICALL Java_cn_edu_hust_buildingtalkback_jni_NativeInterface_startAudio(
		JNIEnv *env, jclass clazz)
{
	LOG_FUNC();
	return start_audio();
}
コード例 #8
0
ファイル: main.c プロジェクト: ahmedlogic/vipcw-spectrogram
int main(int argc, char *argv[])
{
    if(debug)
        printf("IN: main:main()\n");

    int opt;
    char* d = DEFAULT_D;
    int p = DEFAULT_P;
    int r = DEFAULT_R;
    int s = DEFAULT_S;
    int f = DEFAULT_F;
    double t = DEFAULT_T;
    double w = DEFAULT_W;
    char a = DEFAULT_A;
    char c = DEFAULT_C;

    /* Read arguments */ 
    while(-1 != (opt = getopt(argc, argv, "d:p:r:s:cf:at:w:h"))) {
        switch(opt) {
        case 'd':
            d = optarg;
            break;
        case 'p':
            p = atoi(optarg);
            break;
        case 'r':
            r = atoi(optarg);
            break;
        case 's':
            s = atoi(optarg);
            break;
        case 'f':
            f = atoi(optarg);
            break;
        case 'c':
            c = 0;
            break;
        case 'a':
            a = 1;
            break;
        case 't':
            t = atof(optarg);
            break;
        case 'w':
            w = atof(optarg);
            break;
        case 'h':
            /* Fall through */
        default:
            print_help_and_exit();
            break;
        }
    }

    printf("Source Settings\n");
    printf("d: %s\n", d);
    printf("p: %d\n", p);
    printf("r: %d\n", r);
    printf("s: %d\n", s);
    printf("c: %s\n", c?"Left Channel":"Right Channel");
    printf("f: %d\n", f);
    printf("a: %s\n", a?"Off":"On");
    printf("t: %.3f\n", t);
    printf("w: %.3f\n", w);
    printf("\n");

    int sockfd, portno, n, newsockfd;
    struct sockaddr_in serv_addr, cli_addr;
    socklen_t clilen;
    // portno = 51717;
    portno = p;
    sockfd = socket(AF_INET, SOCK_STREAM, 0);
    if (sockfd < 0) 
       err("ERROR opening socket");
    //server = gethostbyname(argv[1]);
    bzero((char *) &serv_addr, sizeof(serv_addr));
    serv_addr.sin_family = AF_INET;
    serv_addr.sin_addr.s_addr = INADDR_ANY;
    serv_addr.sin_port = htons(portno);
    if (bind(sockfd, (struct sockaddr *) &serv_addr, sizeof(serv_addr)) < 0)
               err("ERROR on binding");
	printf("Listening for connections..\n");
    listen(sockfd,5);
    clilen = sizeof(cli_addr);
        
	printf("Accepting connection..\n");
    newsockfd = accept(sockfd, (struct sockaddr *) &cli_addr, &clilen);
	printf("Accepted a connection..\n");
    if (newsockfd < 0)
            err("ERROR on accept");
    else 
        start_audio(newsockfd, d, r, s, f, a, t, w, c);


}
コード例 #9
0
ファイル: audio.cpp プロジェクト: djyt/cannonball
void Audio::init()
{
    if (config.sound.enabled)
        start_audio();
}
コード例 #10
0
ファイル: TheoraPlayer.cpp プロジェクト: binji/drod-nacl
bool CTheoraPlayer::playVideo(
//Plays specified OGG Theora file to screen surface.
//If screen == NULL, then this method will test that the file is playable
//by decoding it as fast as possible but not displaying anything.
//
//Returns: whether playback was successful
	CStretchyBuffer& buffer, SDL_Surface *screen,
	const int x, const int y) //[default=(0,0)]
{
	//init
	theora_p = vorbis_p = 0;
	startticks = 0;
	bool bSkippedLastFrame = false;

	// start up Ogg stream synchronization layer
	ogg_sync_init(&oy);

	// init supporting Vorbis structures needed in header parsing
	vorbis_info_init(&vi);
	vorbis_comment_init(&vc);

	// init supporting Theora structures needed in header parsing
	theora_comment_init(&tc);
	theora_info_init(&ti);
	if (!screen)
		ti.quick_p = 1;
	ti.quality = 63;

	if (!parseHeaders(buffer))
		return false;

	// force audio off
	vorbis_p = 0;

	// initialize decoders
	if (theora_p) {
		theora_decode_init(&td,&ti);
#if 0
		printf("Ogg logical stream %x is Theora %dx%d %.02f fps video\n"
			  "  Frame content is %dx%d with offset (%d,%d).\n",
			to.serialno,ti.width,ti.height, (double)ti.fps_numerator/ti.fps_denominator,
			ti.frame_width, ti.frame_height, ti.offset_x, ti.offset_y);
		//report_colorspace(&ti); //we're not using this info for anything
		dump_comments(&tc);
#endif
	} else {
		// tear down the partial theora setup
		theora_info_clear(&ti);
		theora_comment_clear(&tc);
	}
	if(vorbis_p) {
		vorbis_synthesis_init(&vd,&vi);
		vorbis_block_init(&vd,&vb);  
		printf("Ogg logical stream %lx is Vorbis %d channel %ld Hz audio.\n",
			vo.serialno,vi.channels,vi.rate);
	} else {
		// tear down the partial vorbis setup
		vorbis_info_clear(&vi);
		vorbis_comment_clear(&vc);
	}

	// open audio
	if (vorbis_p)
		open_audio();

	// open video
	SDL_Overlay *yuv_overlay = NULL;
	if (theora_p && screen)
		yuv_overlay = open_video(screen);
  
	// single frame video buffering
	ogg_packet op;
	ogg_int64_t  videobuf_granulepos=-1;
	double       videobuf_time=0;
	double last_frame_time = 0;
	bool hasdatatobuffer = true;

	// Main loop
	bool audiobuf_ready=false;
	bool videobuf_ready=false;
	bool playbackdone = (yuv_overlay == NULL);
	bool isPlaying = false;
	bool bBreakout = false;
	while (!playbackdone)
	{
		// break out on SDL quit event
		SDL_Event event;
		if (SDL_PollEvent(&event))
		{
			switch (event.type)
			{
				case SDL_QUIT: playbackdone = bBreakout = true; break;
				case SDL_KEYDOWN:
					if (event.key.keysym.sym == SDLK_ESCAPE)
						playbackdone = bBreakout = true;
				break;
				default: break;
			}
		}

		while (theora_p && !videobuf_ready) {
			// get one video packet...
			if (ogg_stream_packetout(&to,&op)>0)
			{
				theora_decode_packetin(&td,&op);

				videobuf_granulepos=td.granulepos;
				videobuf_time=theora_granule_time(&td,videobuf_granulepos);

#if 0
				//Without sound channels to synch to, don't need to worry about skipping frames when slow.
				// update the frame counter
				//++frameNum;

				// check if this frame time has not passed yet.
				//	If the frame is late we need to decode additional
				//	ones and keep looping, since theora at this stage
				//	needs to decode all frames.
				const double now=get_time();
				const double delay=videobuf_time-now;
				if(delay>=0.0){
					/// got a good frame, not late, ready to break out
					videobuf_ready=true;
				} else if(now-last_frame_time>=1.0) {
					// display at least one frame per second, regardless
					videobuf_ready=true;
				} else {
					//Need to catch up -- no time to display frame.
					if (bSkippedLastFrame) //only allow skipping one frame in a row
						videobuf_ready = true; //show anyway
					else
						bSkippedLastFrame = true;
					//printf("dropping frame %d (%.3fs behind)\n", frameNum, -delay);
				}
#else
				videobuf_ready = true; //show every frame
#endif
			} else {
				// need more data
				break;
			}
		}

		if (!hasdatatobuffer && !videobuf_ready && !audiobuf_ready) {
			isPlaying = false;
			playbackdone = true;
		}

		//If we're set for the next frame, sleep.
		//In other words, don't show frames too rapidly. 
		if((!theora_p || videobuf_ready) && 
			(!vorbis_p || audiobuf_ready))
		{
			const int ticks = (int)(1000*(videobuf_time-get_time()));
			if(ticks>0 && screen) //don't need to sleep if only testing file
				SDL_Delay(ticks);
		}
 
		if (videobuf_ready)
		{
			// time to write our cached frame
			if (screen)
			{
				const bool bRes = video_write(screen, yuv_overlay, x, y);
				if (!bRes) //couldn't display image
					playbackdone = bBreakout = true;
			}
			videobuf_ready=false;
			last_frame_time=get_time();
			bSkippedLastFrame = false;

			// if audio has not started (first frame) then start it
			if ((!isPlaying)&&(vorbis_p)) {
				start_audio();
				isPlaying = true;
			}
		}

		// HACK: always look for more audio data
		audiobuf_ready=false;

		// buffer compressed data every loop
		if (hasdatatobuffer) {
			hasdatatobuffer = buffer_data(&oy, buffer) > 0;
			if (!hasdatatobuffer) {
				//printf("Ogg buffering stopped, end of file reached.\n");
			}
		}
    
		if (ogg_sync_pageout(&oy,&og)>0)
			queue_page(&og);

	} // playbackdone

	// show number of video frames decoded
	//printf("\nFrames decoded: %d\n", frameNum);

	// deinit
	if (vorbis_p) {
		audio_close();

		ogg_stream_clear(&vo);
		vorbis_block_clear(&vb);
		vorbis_dsp_clear(&vd);
		vorbis_comment_clear(&vc);
		vorbis_info_clear(&vi); 
	}
	if (theora_p) {
		if (yuv_overlay)
			SDL_FreeYUVOverlay(yuv_overlay);

		ogg_stream_clear(&to);
		theora_clear(&td);
		theora_comment_clear(&tc);
		theora_info_clear(&ti);
	}
	ogg_sync_clear(&oy);

	//If broken out of testing, return false since entire file was not verified.
	return !bBreakout || screen != NULL;
}
コード例 #11
0
ファイル: player.c プロジェクト: DC-SWAT/DreamShell
int ffplay(const char *filename, const char *force_format) {

	char errbuf[256];
	int r = 0;
	
	int frameFinished;
	AVPacket packet;
	int audio_buf_size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
	int16_t *audio_buf = (int16_t *) malloc((AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2);
	
	if(!audio_buf) {
		ds_printf("DS_ERROR: No free memory\n");
		return -1;
	}
	
	memset(audio_buf, 0, (AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2);
	
	AVFormatContext *pFormatCtx = NULL;
	AVFrame *pFrame = NULL;
	AVCodecContext *pVideoCodecCtx = NULL, *pAudioCodecCtx = NULL;
	AVInputFormat *file_iformat = NULL;
	
	video_txr_t movie_txr;
	int videoStream = -1, audioStream = -1;
	
	maple_device_t *cont = NULL;
	cont_state_t *state = NULL;
	int pause = 0, done = 0;
	
	char fn[MAX_FN_LEN];
	sprintf(fn, "ds:%s", filename);

	memset(&movie_txr, 0, sizeof(movie_txr));
	
	if(!codecs_inited) {
		avcodec_register_all();
		avcodec_register(&mp1_decoder);
		avcodec_register(&mp2_decoder);
		avcodec_register(&mp3_decoder);
		avcodec_register(&vorbis_decoder);
		//avcodec_register(&mpeg4_decoder);
		codecs_inited = 1;
	}
	
	if(force_format)
      file_iformat = av_find_input_format(force_format);
    else
      file_iformat = NULL;


	// Open video file
	ds_printf("DS_PROCESS_FFMPEG: Opening file: %s\n", filename);
	if((r = av_open_input_file((AVFormatContext**)(&pFormatCtx), fn, file_iformat, /*FFM_PACKET_SIZE*/0, NULL)) != 0) {
		av_strerror(r, errbuf, 256);
		ds_printf("DS_ERROR_FFMPEG: %s\n", errbuf);
		free(audio_buf);
		return -1; // Couldn't open file
	}
	
	// Retrieve stream information
	ds_printf("DS_PROCESS_FFMPEG: Retrieve stream information...\n");
	if((r = av_find_stream_info(pFormatCtx)) < 0) {
		av_strerror(r, errbuf, 256);
		ds_printf("DS_ERROR_FFMPEG: %s\n", errbuf);
		av_close_input_file(pFormatCtx);
		free(audio_buf);
		return -1; // Couldn't find stream information
	}

	// Dump information about file onto standard error
	dump_format(pFormatCtx, 0, filename, 0);
	//thd_sleep(5000);
	
	pVideoCodecCtx = findDecoder(pFormatCtx, AVMEDIA_TYPE_VIDEO, &videoStream);
	pAudioCodecCtx = findDecoder(pFormatCtx, AVMEDIA_TYPE_AUDIO, &audioStream);
	
	//LockInput();
	
	if(pVideoCodecCtx) {
		
		//LockVideo();
		ShutdownVideoThread();
		SDL_DS_FreeScreenTexture(0);
		int format = 0;
		
		switch(pVideoCodecCtx->pix_fmt) {
			case PIX_FMT_YUV420P:
			case PIX_FMT_YUVJ420P:
			
				format = PVR_TXRFMT_YUV422;
#ifdef USE_HW_YUV				
				yuv_conv_init();
#endif
				break;
				
			case PIX_FMT_UYVY422:
			case PIX_FMT_YUVJ422P:
			
				format = PVR_TXRFMT_YUV422;
				break;
				
			default:
				format = PVR_TXRFMT_RGB565;
				break;
		}
		
		MakeVideoTexture(&movie_txr, pVideoCodecCtx->width, pVideoCodecCtx->height, format | PVR_TXRFMT_NONTWIDDLED, PVR_FILTER_BILINEAR);
		
#ifdef USE_HW_YUV				
		yuv_conv_setup(movie_txr.addr, PVR_YUV_MODE_MULTI, PVR_YUV_FORMAT_YUV420, movie_txr.width, movie_txr.height);
		pvr_dma_init();
#endif

	} else {
		ds_printf("DS_ERROR: Didn't find a video stream.\n");
	}
	
	
	if(pAudioCodecCtx) {
		
#ifdef USE_DIRECT_AUDIO
		audioinit(pAudioCodecCtx);
#else

		sprintf(fn, "%s/firmware/aica/ds_stream.drv", getenv("PATH"));
		
		if(snd_init_fw(fn) < 0) {
			goto exit_free;
		}
	
		if(aica_audio_open(pAudioCodecCtx->sample_rate, pAudioCodecCtx->channels, 8192) < 0) {
			goto exit_free;
		}
		//snd_cpu_clock(0x19);
		//snd_init_decoder(8192);
#endif
		
	} else {
		ds_printf("DS_ERROR: Didn't find a audio stream.\n");
	}
	
	//ds_printf("FORMAT: %d\n", pVideoCodecCtx->pix_fmt);

	// Allocate video frame
	pFrame = avcodec_alloc_frame();

	if(pFrame == NULL) {
		ds_printf("DS_ERROR: Can't alloc memory\n");
		goto exit_free;
	}
	
	int pressed = 0, framecnt = 0;
	uint32 fa = 0;
	
	fa = GET_EXPORT_ADDR("ffplay_format_handler");
	
	if(fa > 0 && fa != 0xffffffff) {
		EXPT_GUARD_BEGIN;
			void (*ff_format_func)(AVFormatContext *, AVCodecContext *, AVCodecContext *) = 
				(void (*)(AVFormatContext *, AVCodecContext *, AVCodecContext *))fa;
			ff_format_func(pFormatCtx, pVideoCodecCtx, pAudioCodecCtx);
		EXPT_GUARD_CATCH;
		EXPT_GUARD_END;
	}
	
	fa = GET_EXPORT_ADDR("ffplay_frame_handler");
	void (*ff_frame_func)(AVFrame *) = NULL;
	
	if(fa > 0 && fa != 0xffffffff) {
		EXPT_GUARD_BEGIN;
			ff_frame_func = (void (*)(AVFrame *))fa;
			// Test call
			ff_frame_func(NULL);
		EXPT_GUARD_CATCH;
			ff_frame_func = NULL;
		EXPT_GUARD_END;
	}
	
	fa = GET_EXPORT_ADDR("ffplay_render_handler");
	
	if(fa > 0 && fa != 0xffffffff) {
		EXPT_GUARD_BEGIN;
			movie_txr.render_cb = (void (*)(void *))fa;
			// Test call
			movie_txr.render_cb(NULL);
		EXPT_GUARD_CATCH;
			movie_txr.render_cb = NULL;
		EXPT_GUARD_END;
	}
	
	while(av_read_frame(pFormatCtx, &packet) >= 0 && !done) {
		
		do {
			if(ff_frame_func) 
				ff_frame_func(pFrame);
					
			cont = maple_enum_type(0, MAPLE_FUNC_CONTROLLER);
			framecnt++;

			if(cont) {
				state = (cont_state_t *)maple_dev_status(cont);
				
				if (!state) {
					break;
				}
				if (state->buttons & CONT_START || state->buttons & CONT_B) {
					av_free_packet(&packet);
					done = 1;
				}
				if (state->buttons & CONT_A) {
					if((framecnt - pressed) > 10) {
						pause = pause ? 0 : 1;
						if(pause) {
#ifdef USE_DIRECT_AUDIO
							audio_end();
#else
							stop_audio();
#endif
						} else {
#ifndef USE_DIRECT_AUDIO
							start_audio();
#endif
						}
					}
					pressed = framecnt;
				}
				
				if(state->buttons & CONT_DPAD_LEFT) {
					//av_seek_frame(pFormatCtx, -1, timestamp * ( AV_TIME_BASE / 1000 ), AVSEEK_FLAG_BACKWARD);
				}
				
				if(state->buttons & CONT_DPAD_RIGHT) {
					//av_seek_frame(pFormatCtx, -1, timestamp * ( AV_TIME_BASE / 1000 ), AVSEEK_FLAG_BACKWARD);
				}
			}
			
			if(pause) thd_sleep(100);
			
		} while(pause);
		
		//printf("Packet: size: %d data: %02x%02x%02x pst: %d\n", packet.size, packet.data[0], packet.data[1], packet.data[2], pFrame->pts);
		
		// Is this a packet from the video stream?
		if(packet.stream_index == videoStream) {
			//printf("video\n");
			// Decode video frame
			if((r = avcodec_decode_video2(pVideoCodecCtx, pFrame, &frameFinished, &packet)) < 0) {
				//av_strerror(r, errbuf, 256);
				//printf("DS_ERROR_FFMPEG: %s\n", errbuf);
			} else {
				
				// Did we get a video frame?
				if(frameFinished && !pVideoCodecCtx->hurry_up) {
					RenderVideo(&movie_txr, pFrame, pVideoCodecCtx);
				}
			}

		} else if(packet.stream_index == audioStream) {
			//printf("audio\n");
			//snd_decode((uint8*)audio_buf, audio_buf_size, AICA_CODEC_MP3);
			
			audio_buf_size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
			if((r = avcodec_decode_audio3(pAudioCodecCtx, audio_buf, &audio_buf_size, &packet)) < 0) {
				//av_strerror(r, errbuf, 256);
				//printf("DS_ERROR_FFMPEG: %s\n", errbuf);
				//continue;
			} else {
				
				if(audio_buf_size > 0 && !pAudioCodecCtx->hurry_up) {

#ifdef USE_DIRECT_AUDIO
					audio_write(pAudioCodecCtx, audio_buf, audio_buf_size);
#else
					aica_audio_write((char*)audio_buf, audio_buf_size);
#endif
				}
			}
		}

		// Free the packet that was allocated by av_read_frame
		av_free_packet(&packet);
	}
	
	goto exit_free;
	
exit_free:

	if(pFrame)
		av_free(pFrame);
	
	if(pFormatCtx)
		av_close_input_file(pFormatCtx);
	
	if(audioStream > -1) {
		if(pAudioCodecCtx)
			avcodec_close(pAudioCodecCtx);
#ifdef USE_DIRECT_AUDIO
		audio_end();
#else
		aica_audio_close();
		sprintf(fn, "%s/firmware/aica/kos_stream.drv", getenv("PATH"));
		snd_init_fw(fn);
#endif
	}
	
	if(audio_buf) {
		free(audio_buf);
	}
	
	if(videoStream > -1) {
		if(pVideoCodecCtx)
			avcodec_close(pVideoCodecCtx);
		FreeVideoTexture(&movie_txr);
		SDL_DS_AllocScreenTexture(GetScreen());
		InitVideoThread();
		//UnlockVideo();
	}
	
	//UnlockInput();
	ProcessVideoEventsUpdate(NULL);
	return 0;
}
コード例 #12
0
int main(int argc,char **argv)
{
	int ret;
	pthread_t view_id;
	pid_t view_pid=0,audio_pid=0;

       // LCD 初始化
	fb_init();
      // 触摸屏初始化
	ts_init();
       // 显示一张启动logo
	show_jpglogo("./image/welcome.jpg");
	sleep(3);
	// 显示控制主界面菜单
	show_menu();


	ret =  pthread_create(&view_id, NULL,picview_thread, NULL);
	if(ret == -1)
		goto pthread_create_err;



	while(1)
	{
		
		// 等待获取坐标
		pthread_mutex_lock(&mutex);
		printf(" main get lock!\n");
	
		m = get_motion();
		
		printf(" main put lock!\n");
		pthread_mutex_unlock(&mutex);
		usleep(1000);


		switch(m)
		{

			case button1:
				printf("button1 Prev\n");
				if(audio_pid != 0)
					kill(audio_pid,SIGKILL);
				audio_pid = start_audio(BACKWARD);
				//kill(view_pid,SIGUSR1);
				break;
				
			case button2:
				printf("button2 Next\n");
				if(audio_pid != 0)
					kill(audio_pid,SIGKILL);
				audio_pid = start_audio(FORWARD);
				//kill(view_pid,SIGUSR2);				
				break;
				
			case button3:
				printf("button3 Stop\n");
				kill(audio_pid,SIGSTOP);
				//kill(view_pid,SIGSTOP);				
				break;
				
			case button4:
				printf("button4 Continue\n");
				kill(audio_pid,SIGCONT);
				//kill(view_pid,SIGCONT);					
				break;

			case button5:
				printf("button5 Load\n");
				traverse_file_dir("/photo/dir");
				//view_pid = start_picview();
				signal(SIGCHLD,waitp);
				break;
				
		}


	}

	fb_uninit();
	ts_uninit();
	
	return 0;
	
pthread_create_err:
	printf("  pthread_create  err!\n");
	return -1;

}
コード例 #13
0
ファイル: player.cpp プロジェクト: Drenn1/GameYob
void play_stereo_samples( const short* samples, long count )
{
	start_audio( 2 );
	play_samples( samples, count );
}
コード例 #14
0
ファイル: gap_sdl_audioplayer.c プロジェクト: GNOME/gimp-gap
/* -------------------------------------
 * gap_sdl_cmd
 * -------------------------------------
 * perform simple audio_player command .
 * Note: some of the commands do not make sense
 *       in this SDL based implementation
 *       and are just dummies for compatibility with the
 *       API (that was designed base on the older wavplay client functions)
 */
int
gap_sdl_cmd(int cmd,int flags,GapSdlErrFunc erf) {
  static char *sdl_no_error_available = "";
  char *sdl_error;
  int rc = 1;
  AudioPlaybackThreadUserData  *usrPtr;


  sdl_error = sdl_no_error_available;
  usrPtr = getUsrPtr();
  if (usrPtr != NULL)
  {
    if(gap_debug)
    {
      printf("gap_sdl_cmd cmd:%d (%s) flags:%d SdlAudioStatus:%d\n"
            , cmd
           , msg_name(cmd)
           , flags
           , (int)SDL_GetAudioStatus()
           );
    }
    switch(cmd)
    {
      case GAP_SDL_CMD_Bye:
        stop_audio();
        close_files();
        rc = 0;  /* OK */
        break;
      case GAP_SDL_CMD_Play:
        rc = start_audio();
        sdl_error = SDL_GetError();  /* SDL_GetError uses sttically allocated message that must NOT be freed */
        break;
      case GAP_SDL_CMD_Pause:
        stop_audio();
        rc = 0;  /* OK */
        break;
      case GAP_SDL_CMD_Stop:
        stop_audio();
        close_files();
        rc = 0;  /* OK */
        break;
      case GAP_SDL_CMD_Restore:
        stop_audio();
        rc = 0;  /* OK */
        break;
      case GAP_SDL_CMD_SemReset:
        rc = 0;  /* OK */
        break;
    }

  }


  if ((rc != 0) && (erf != NULL ))
  {
    call_errfunc(erf, "%s: Sending cmd%d to audio_player failed (rc:%d) err:%s",
                        msg_name(cmd),
                        cmd,
                        sdl_error,
                        rc);
  }
  if(gap_debug)
  {
    printf("gap_sdl_cmd cmd:%d (%s) flags:%d retcode:%d\n", cmd, msg_name(cmd), flags, rc);
  }
  return rc;  /* Zero indicates success */

}  /* end gap_sdl_cmd */
コード例 #15
0
ファイル: splayer.c プロジェクト: kazutomi/xiphqt
int main( int argc, char* argv[] ){

  int i,j;
  ogg_packet op;
  SDL_Event event;
  int hasdatatobuffer = 1;
  int playbackdone = 0;
  double now, delay, last_frame_time = 0;

  int frameNum=0;
  int skipNum=0;

  /* takes first argument as file to play */
  /* this works better on Windows and is more convenient
     for drag and drop ogg files over the .exe */

  if( argc != 2 )
  {
    usage();
    exit(0);
  }

  infile  = fopen( argv[1], "rb" );

  /* start up Ogg stream synchronization layer */
  ogg_sync_init(&oy);

  /* init supporting Vorbis structures needed in header parsing */
  vorbis_info_init(&vi);
  vorbis_comment_init(&vc);

  /* init supporting Theora structures needed in header parsing */
  theora_comment_init(&tc);
  theora_info_init(&ti);

  parseHeaders();

  /* force audio off */
  /* vorbis_p = 0; */

  /* initialize decoders */
  if(theora_p){
    theora_decode_init(&td,&ti);
    printf("Ogg logical stream %x is Theora %dx%d %.02f fps video\n"
           "  Frame content is %dx%d with offset (%d,%d).\n",
	   to.serialno,ti.width,ti.height, (double)ti.fps_numerator/ti.fps_denominator,
	   ti.frame_width, ti.frame_height, ti.offset_x, ti.offset_y);
    report_colorspace(&ti);
    dump_comments(&tc);
  }else{
    /* tear down the partial theora setup */
    theora_info_clear(&ti);
    theora_comment_clear(&tc);
  }
  if(vorbis_p){
    vorbis_synthesis_init(&vd,&vi);
    vorbis_block_init(&vd,&vb);  
    printf("Ogg logical stream %x is Vorbis %d channel %d Hz audio.\n",
	   vo.serialno,vi.channels,vi.rate);
  }else{
    /* tear down the partial vorbis setup */
    vorbis_info_clear(&vi);
    vorbis_comment_clear(&vc);
  }
  /* open audio */
  if(vorbis_p)open_audio();
  /* open video */
  if(theora_p)open_video();
  
  /* our main loop */
  while(!playbackdone){

    /* break out on SDL quit event */
    if ( SDL_PollEvent ( &event ) )
    {
      if ( event.type == SDL_QUIT ) break ;
    }

    /* get some audio data */
    while(vorbis_p && !audiobuf_ready){
      int ret;
      float **pcm;
      int count = 0;
      int maxBytesToWrite;

      /* is there pending audio? does it fit our circular buffer without blocking? */
      ret=vorbis_synthesis_pcmout(&vd,&pcm);
      maxBytesToWrite = GetAudioStreamWriteable(aOutStream);

      if (maxBytesToWrite<=FRAMES_PER_BUFFER){
        /* break out until there is a significant amount of
           data to avoid a series of small write operations. */
        break;
      }
      /* if there's pending, decoded audio, grab it */
      if((ret>0)&&(maxBytesToWrite>0)){

	for(i=0;i<ret && i<(maxBytesToWrite/vi.channels);i++)
	  for(j=0;j<vi.channels;j++){
	    int val=(int)(pcm[j][i]*32767.f);
	    if(val>32767)val=32767;
	    if(val<-32768)val=-32768;
	    samples[count]=val;
	    count++;
	  }
	if(WriteAudioStream( aOutStream, samples, i )) {
	  if(count==maxBytesToWrite){
	    audiobuf_ready=1;
	  }
	}
        vorbis_synthesis_read(&vd,i);

	if(vd.granulepos>=0)
	  audiobuf_granulepos=vd.granulepos-ret+i;
	else
	  audiobuf_granulepos+=i;

      }else{
	
	/* no pending audio; is there a pending packet to decode? */
	if(ogg_stream_packetout(&vo,&op)>0){
	  if(vorbis_synthesis(&vb,&op)==0) /* test for success! */
	   vorbis_synthesis_blockin(&vd,&vb);
	}else	/* we need more data; break out to suck in another page */
	  break;
      }
    } /* end audio cycle */

    while(theora_p && !videobuf_ready){
      /* get one video packet... */
      if(ogg_stream_packetout(&to,&op)>0){
      
        theora_decode_packetin(&td,&op);

	  videobuf_granulepos=td.granulepos;
	  videobuf_time=theora_granule_time(&td,videobuf_granulepos);
	  /* update the frame counter */
	  frameNum++;

	  /* check if this frame time has not passed yet.
	     If the frame is late we need to decode additonal
	     ones and keep looping, since theora at this stage
	     needs to decode all frames */
	  now=get_time();
	  delay=videobuf_time-now;
	  if(delay>=0.0){
		/* got a good frame, not late, ready to break out */
		videobuf_ready=1;
	  }else if(now-last_frame_time>=1.0){
		/* display at least one frame per second, regardless */
		videobuf_ready=1;
	  }else{
		fprintf(stderr, "dropping frame %d (%.3fs behind)\n",
			frameNum, -delay);
	   }
      }else{
	/* need more data */
	break;
      }
    }

    if(!hasdatatobuffer && !videobuf_ready && !audiobuf_ready){
      isPlaying = 0;
      playbackdone = 1;
    }

    /* if we're set for the next frame, sleep */
    if((!theora_p || videobuf_ready) && 
       (!vorbis_p || audiobuf_ready)){
        int ticks = 1.0e3*(videobuf_time-get_time());
	if(ticks>0)
          SDL_Delay(ticks);
    }
 
    if(videobuf_ready){
      /* time to write our cached frame */
      video_write();
      videobuf_ready=0;
      last_frame_time=get_time();

      /* if audio has not started (first frame) then start it */
      if ((!isPlaying)&&(vorbis_p)){
        start_audio();
        isPlaying = 1;
      }
    }

    /* HACK: always look for more audio data */
    audiobuf_ready=0;

    /* buffer compressed data every loop */
    if(hasdatatobuffer){
      hasdatatobuffer=buffer_data(&oy);
      if(hasdatatobuffer==0){
        printf("Ogg buffering stopped, end of file reached.\n");
      }
    }
    
    if (ogg_sync_pageout(&oy,&og)>0){
      queue_page(&og);
    }

  } /* playbackdone */

  /* show number of video frames decoded */
  printf( "\n");
  printf( "Frames decoded: %d", frameNum );
  if(skipNum)
    printf( " (only %d shown)", frameNum-skipNum);
  printf( "\n" );

  /* tear it all down */
  fclose( infile );

  if(vorbis_p){
    audio_close();

    ogg_stream_clear(&vo);
    vorbis_block_clear(&vb);
    vorbis_dsp_clear(&vd);
    vorbis_comment_clear(&vc);
    vorbis_info_clear(&vi); 
  }
  if(theora_p){
    ogg_stream_clear(&to);
    theora_clear(&td);
    theora_comment_clear(&tc);
    theora_info_clear(&ti);
  }
  ogg_sync_clear(&oy);

  printf("\r                                                              "
	 "\nDone.\n");
	 
  SDL_Quit();

  return(0);

}