示例#1
0
static void
finalize (GObject *object)
{
  GeglProperties *o = GEGL_PROPERTIES (object);
  if (o->user_data)
    {
      Priv *p = (Priv*)o->user_data;
      flush_audio (o);
      flush_video (o);

      av_write_trailer (p->oc);

      if (p->video_st)
        close_video (p, p->oc, p->video_st);
      if (p->audio_st)
        close_audio (p, p->oc, p->audio_st);

      avio_closep (&p->oc->pb);
      avformat_free_context (p->oc);

      g_free (o->user_data);
      o->user_data = NULL;
    }

  G_OBJECT_CLASS (g_type_class_peek_parent (G_OBJECT_GET_CLASS (object)))->finalize (object);
}
示例#2
0
void retro_deinit(void)
{
   close_audio();
   close_voice();
   close_display();
   retro_destroybmp();
}
void finalizeAVFormatContext(AVFormatContext **out_oc, AVStream **out_video_st, AVFrame **out_picture, AVStream **out_audio_st, int16_t **out_samples){
	/* Write the trailer, if any. The trailer must be written before you
	 * close the CodecContexts open when you wrote the header; otherwise
	 * av_write_trailer() may try to use memory that was freed on
	 * av_codec_close(). */

	if(!out_oc || !out_video_st || !out_picture || !out_audio_st || !out_samples)
		LOGE("finalizeAVFormatContext argument is null");

	AVFormatContext *oc = *out_oc;
	AVStream *video_st = *out_video_st;
	AVFrame *picture = *out_picture;
	AVStream *audio_st = *out_audio_st;
	int16_t *samples = *out_samples;


	//LOGI("finalizeAVFormatContext");
	av_write_trailer(oc);
	//LOGI("av_write_trailer");

	/* Close each codec. */
	if (video_st)
		close_video(oc, video_st, picture);
	//LOGI("close_video");
	if (audio_st)
		close_audio(oc, audio_st, samples);
	//LOGI("close_audio");

	//LOGI("close audio / video");

	/* Free the streams. */
	for (i = 0; i < oc->nb_streams; i++) {
		av_freep(&oc->streams[i]->codec);
		av_freep(&oc->streams[i]);
	}

	//LOGI("free streams");

	if (!(fmt->flags & AVFMT_NOFILE))
		/* Close the output file. */
		avio_close(oc->pb);

	//LOGI("avio_close");

	/* free the stream */
	av_free(oc);

	//LOGI("av_free");

	*out_oc = NULL;
	*out_video_st = NULL;
    *out_picture = NULL;
	*out_audio_st = NULL;
	*out_samples = NULL;
}
示例#4
0
static int
clear_call(iapplication_t *ap, bchannel_t *bc)
{
	bchannel_t	*peer = NULL;

	if (ap->mode == AP_MODE_INTERN_CALL) {
		if (ap->data1 == bc) {
			peer = ap->data2;
			ap->data1 = NULL;
		} else if (ap->data2 == bc) {
			peer = ap->data1;
			ap->data2= NULL;
		}
		bc->rbuf = NULL;
		if (bc->sbuf)
			bc->sbuf->rsem = &bc->work;
		if (peer) {
			peer->Flags |= FLG_BC_PROGRESS;
			peer->cause_loc = bc->cause_loc;
			peer->cause_val = bc->cause_val;
			peer->rbuf = NULL;
			if (peer->sbuf)
				peer->sbuf->rsem = &peer->work;
			ap->mgr->app_bc(ap->mgr, PR_APP_HANGUP, peer);
		} else {
			free_application(ap);
		}
		if (bc)
			bc->app = NULL;
	} else if (ap->mode == AP_MODE_AUDIO_CALL) {
		if (ap->Flags & AP_FLG_AUDIO_ACTIV) {
			close_audio(ap, bc);
			ap->Flags &= ~AP_FLG_AUDIO_ACTIV;
			ap->vapp->flags &= ~AP_FLG_AUDIO_USED;
		}
		if (bc)
			bc->app = NULL;
		free_application(ap);
	} else if (ap->mode == AP_MODE_VOIP_OCALL) {
		if (ap->Flags & AP_FLG_VOIP_ACTIV) {
			close_voip(ap, bc);
		}
		release_voip(ap, bc);
		ap->mode = AP_MODE_IDLE;
		free_application(ap);
	} else if (ap->mode == AP_MODE_VOIP_ICALL) {
		if (ap->Flags & AP_FLG_VOIP_ACTIV) {
			close_voip(ap, bc);
		}
		release_voip(ap, bc);
		ap->mode = AP_MODE_IDLE;
		free_application(ap);
	}
	return(0);
}
示例#5
0
文件: main.c 项目: sigvef/Sinth
int main(int argc, char**argv)
{
    int running = 1;

    m = create_mixer();
    p = create_player(m);
    if(argc == 2){
        player_load(p, argv[1]);
    }else{
        player_load(p, "res/silent_light.mid");
    }
    /*
    mixer_add_instrument(m,create_instrument(OSC_SQU));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    mixer_add_instrument(m,create_instrument(OSC_SAW));
    */

    //SDL_SetVideoMode( 640, 480, 32, SDL_SWSURFACE );
	// Open the audio
	open_audio();
	while(running){
	    while(SDL_PollEvent( &event )){
	        switch( event.type ){
            case SDL_KEYDOWN:
                switch( event.key.keysym.sym ){
                    case SDLK_ESCAPE:
                        running = 0;
                    default:;
                }
                break;
            case SDL_QUIT:
                running = 0;
                break;
	        }
	    }
	    SDL_Delay(1);
	}
	// Close audio
	close_audio();
	return 0;
}
示例#6
0
static void destroy_call(gpointer data)
{
	struct call_data *call = data;

	g_print("call removed (%s)\n", call->path);

	close_audio(call->modem);

	g_free(call->path);
	g_free(call);
}
void webm_close(webm_context *ctx)
{
    quit_video = 1;
    thread_join(ctx->the_demux_thread);
    thread_join(ctx->the_video_thread);
    close_video(&(ctx->video_ctx));
    if (ctx->the_audio_thread) thread_join(ctx->the_audio_thread);
    if (ctx->audio_track >= 0) close_audio(&(ctx->audio_ctx));
    nestegg_destroy(ctx->nestegg_ctx);
    closepackfile(ctx->packhandle);
    free(ctx);
}
//close and clean up the video, audio, and input systems on the platform
//  returns int - negative on failure, otherwise success
int platform_close(void) {
  int result;

  result = close_video();
  if(result < 0) {
    return result;
  }

  result = close_audio();
  if(result < 0) {
    return result;
  }

  result = close_input();
  if(result < 0) {
    return result;
  }

  return 0;
}
示例#9
0
文件: vmachine.c 项目: montjoie/o2em2
/*============================================================================*/
void run()
{
	while(!key_done) {
		if (key_debug) {
			app_data.debug=1;
			set_textmode();
			mute_audio();
			mute_voice();
/*			debug(); TODO il faut le remettre plus tard, je lai enelve juste pour les warnings*/
			grmode();
			app_data.debug=0;
			o2em_init_keyboard();
			init_sound_stream();
		}
		cpu_exec();
	}
	close_audio();
	close_voice();
	close_display();
	/*o2em_clean_quit(EXIT_SUCCESS);*/
}
示例#10
0
void run(void){
	while(!key_done) {

		if (key_debug) {
			app_data.debug=1;
			set_textmode();
			mute_audio();
			mute_voice();
			debug();
			grmode();
			app_data.debug=0;
			init_keyboard();
			init_sound_stream();
		}			

		cpu_exec();

	}
	close_audio();
	close_voice();
	close_display();
}
示例#11
0
void shut_down(EncoderJob &jobSpec) {
	/* write the trailer, if any.  the trailer must be written
	* before you close the CodecContexts open when you wrote the
	* header; otherwise write_trailer may try to use memory that
	* was freed on av_codec_close() */

#ifdef NEW_M2TS
	// nothing
	jobSpec.p->CloseFile();
#else 
	av_write_trailer(jobSpec.oc);
#endif

	/* close each codec */ // one of these (or both) are failing.
	if (jobSpec.video_st && jobSpec.video_outbuf) close_video(jobSpec, jobSpec.oc, jobSpec.video_st);
	if (jobSpec.audio_st && jobSpec.audio_outbuf) close_audio(jobSpec, jobSpec.oc, jobSpec.audio_st);

	/* free the streams */
	for(unsigned int i = 0; i < jobSpec.oc->nb_streams; i++) {
		av_freep(&jobSpec.oc->streams[i]->codec);
		av_freep(&jobSpec.oc->streams[i]);
	}


#ifdef NEW_M2TS
	// nothing
#else 
	if (!(jobSpec.fmt->flags & AVFMT_NOFILE)) {
		// close the output file
		url_fclose(jobSpec.oc->pb);
	}
#endif

	// free the stream
	av_free(jobSpec.oc);
	delete jobSpec.p;
}
示例#12
0
void AudioALSA::setSampleFreq(int f)
{
  sampfreq = f;
  audio = close_audio();
  audio = init_audio();
}
示例#13
0
void st_shutdown(void)
{
  close_audio();
  SDL_Quit();
  saveconfig();
}
示例#14
0
soundLib::~soundLib()
{
	close_audio();
}
示例#15
0
文件: flockBumps.c 项目: scrime/FoB
int
main (int argc, char ** argv)
{
  char * device;
  int number_of_birds;
  double noise_level;
  flock_t flock = NULL;
  bird_data_t data_of_birds;
  int c;
  int count;
  int result;
  int flockfd = -1;
  int audiofd = -1;
  int maxfd;
  fd_set input_fd_set;
  fd_set output_fd_set;
  int play_noise;

  device = DEFAULT_FLOCK_DEVICE;
  number_of_birds = atoi (DEFAULT_NUMBER_OF_BIRDS);
  noise_level = atof (DEFAULT_NOISE_LEVEL);

  /* Parsing arguments. */
  opterr = 0;

  while ((c = getopt (argc, argv, "d:b:n:")) != -1)
    switch (c)
      {
      case 'd':
	device = optarg;
	break;
      case 'b':
	number_of_birds = atoi (optarg);
	break;
      case 'n':
	noise_level = atof (optarg);
	break;
      default:
	break;
      }

  if (argc - optind != 0)
    {
      usage (argv[0]);
      exit (EXIT_FAILURE);
    }

  flock = NULL;
  result = EXIT_SUCCESS;

  get_more_priority ();
  signal (SIGINT, handle_signal);

  fprintf (stderr, "Opening sound card.\n");

  if ((audiofd = open_audio ()) == 0)
    {
      result = EXIT_FAILURE;
      goto terminate;
    }

  play_noise = 0;

  fprintf (stderr, "Preparing flock device: %s, number of birds: %d.\n",
	   device, number_of_birds);

  if ((flock = flock_hl_open (device, number_of_birds,
			      flock_bird_record_mode_position_angles,
			      1, 1)) == NULL)
    {
      result = EXIT_FAILURE;
      goto terminate;
    }

  data_of_birds = (bird_data_t)
    malloc (number_of_birds * sizeof (struct bird_data_s));

  flockfd = flock_get_file_descriptor (flock);
  maxfd = (audiofd < flockfd) ? flockfd : audiofd;
  FD_ZERO (&input_fd_set);
  FD_SET (flockfd, &input_fd_set);
  FD_ZERO (&output_fd_set);
  FD_SET (audiofd, &output_fd_set);

  fprintf (stderr, "Getting data... (Hit Ctrl-C to stop.)\n");

  count = 0;

  /* First values. */
  {
    bird_data_t data;
    int bird;

    if (flock_next_record (flock, 1) == 0)
      {
	fprintf (stderr, "Can't get response from flock.\n");
	result = EXIT_FAILURE;
	goto terminate;
      }

    count++;

    for (bird = 0, data = data_of_birds;
	 bird < number_of_birds;
	 bird++, data++)
      {
	memcpy (&data->rec,
		flock_get_record (flock, bird + 1),
		sizeof (data->rec));
	data->zset = 0;
	data->zcount = 0;
	data->maxdz = 0;
	data->lastbump = 0;
	data->bumpcount = 0;
      }
  }

  while (!terminate)
    {
      fd_set read_fd_set;
      fd_set write_fd_set;

      read_fd_set = input_fd_set;
      write_fd_set = output_fd_set;

      /* Block until new data is available from the flock or we can
         write to the sound card. */
      if (select (maxfd + 1, &read_fd_set, &write_fd_set, NULL, NULL) == -1)
	{
	  perror (__FUNCTION__ ": select");
	  result = EXIT_FAILURE;
	  goto terminate;
	}

      if (FD_ISSET (flockfd, &read_fd_set))
	{
	  bird_data_t data;
	  int bird;

	  if (flock_next_record (flock, 1) == 0)
	    {
	      result = EXIT_FAILURE;
	      goto terminate;
	    }

	  count++;

	  for (bird = 0, data = data_of_birds;
	       bird < number_of_birds;
	       bird++, data++)
	    {
	      double dx, dy, dz;

	      /* Shifting previous record. */
	      memcpy (&data->prev_rec,
		      &data->rec,
		      sizeof (data->rec));

	      /* Copy bird's record. */
	      memcpy (&data->rec,
		      flock_get_record (flock, bird + 1),
		      sizeof (data->rec));

	      dx = data->rec.values.pa.x - data->prev_rec.values.pa.x;
	      dy = data->rec.values.pa.y - data->prev_rec.values.pa.y;
	      dz = data->rec.values.pa.z - data->prev_rec.values.pa.z;

	      if (dx < xthreshold)
		{
		  data->xset = 1;
		  data->xcount = count;
		}

	      if (dz > zthreshold)
		{
		  data->zset = 1;
		  data->zcount = count;
		  if (data->maxdz < dz)
		    data->maxdz = dz;
		}

	      if (!(data->xset && data->zset))
		continue;

	      /* Q: is this really useful? */
	      if (((count - data->xcount) > after_threshold_delay) ||
		  ((count - data->zcount) > after_threshold_delay))
		{
		  data->xset = data->zset = 0;
		  data->maxdz = 0;
		  continue;
		}

	      /* Proposition: delay could depend on maxdz. */
	      if ((dz < 0) && ((count - data->lastbump) > after_bump_delay))
		{
		  fprintf (stderr, "bird %d bumps (%g).\n", bird + 1, data->maxdz);
		  data->xset = data->zset = 0;
		  data->maxdz = 0;
		  data->lastbump = count;
		  data->bumpcount++;

		  play_noise = 1;
		}
	    }
	}

      if (FD_ISSET (audiofd, &write_fd_set))
	{
	  double buffer[AUDIO_BLOCK_SIZE];

	  memset (buffer, 0, sizeof (buffer));

	  if (play_noise)
	    {
	      int i;

	      play_noise = 0;
	      for (i = 0; i < sizeof (buffer) / sizeof (*buffer); i++)
		buffer[i] = ((double) RAND_MAX - 2 * random ()) / 2.0;
	    }

	  write_audio (audiofd, buffer);
	}
    }

 terminate:

  fprintf (stderr, "Exiting.\n");

  if (flock != NULL)
    flock_hl_close (flock);

  if (audiofd != -1)
    close_audio (audiofd);

  return result;
}
示例#16
0
int main(int argc, char **argv)
{
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVStream *audio_st, *video_st;
    AVCodec *audio_codec, *video_codec;
    double audio_time, video_time;
    int flush, ret;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    if (argc != 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "This program generates a synthetic audio and video stream, encodes and\n"
               "muxes them into a file named output_file.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename.\n"
               "\n", argv[0]);
        return 1;
    }

    filename = argv[1];

    /* allocate the output media context */
    avformat_alloc_output_context2(&oc, NULL, NULL, filename);
    if (!oc) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
    }
    if (!oc)
        return 1;

    fmt = oc->oformat;

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    video_st = NULL;
    audio_st = NULL;

    if (fmt->video_codec != AV_CODEC_ID_NONE)
        video_st = add_stream(oc, &video_codec, fmt->video_codec);
    if (fmt->audio_codec != AV_CODEC_ID_NONE)
        audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (video_st)
        open_video(oc, video_codec, video_st);
    if (audio_st)
        open_audio(oc, audio_codec, audio_st);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open '%s': %s\n", filename,
                    av_err2str(ret));
            return 1;
        }
    }

    /* Write the stream header, if any. */
    ret = avformat_write_header(oc, NULL);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file: %s\n",
                av_err2str(ret));
        return 1;
    }

    flush = 0;
    while ((video_st && !video_is_eof) || (audio_st && !audio_is_eof)) {
        /* Compute current audio and video time. */
        audio_time = (audio_st && !audio_is_eof) ? audio_st->pts.val * av_q2d(audio_st->time_base) : INFINITY;
        video_time = (video_st && !video_is_eof) ? video_st->pts.val * av_q2d(video_st->time_base) : INFINITY;

        if (!flush &&
            (!audio_st || audio_time >= STREAM_DURATION) &&
            (!video_st || video_time >= STREAM_DURATION)) {
            flush = 1;
        }

        /* write interleaved audio and video frames */
        if (audio_st && !audio_is_eof && audio_time <= video_time) {
            write_audio_frame(oc, audio_st, flush);
        } else if (video_st && !video_is_eof && video_time < audio_time) {
            write_video_frame(oc, video_st, flush);
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (video_st)
        close_video(oc, video_st);
    if (audio_st)
        close_audio(oc, audio_st);

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(oc->pb);

    /* free the stream */
    avformat_free_context(oc);

    return 0;
}
示例#17
0
文件: audio.c 项目: BitchX/BitchX-SVN
/* 
 * TODO: add some kind of error reporting here
 */
void play(char *inFileStr)
{
char *f;
long totalframes = 0;
long tseconds = 0;
struct AUDIO_HEADER header;
int bitrate, fs, g, cnt = 0;

	while ((f = new_next_arg(inFileStr, &inFileStr)))
	{
		if (!f || !*f)
			return;	
		if ((in_file=fopen(f,"r"))==NULL) 
		{
			if (!do_hook(MODULE_LIST, "AMP ERROR open %s", f))
				put_it("Could not open file: %s\n", f);
			continue;
		}



		filesize = file_size(f);
		initialise_globals();

		if ((g=gethdr(&header))!=0) 
		{
			report_header_error(g);
			continue;
		}

		if (header.protection_bit==0) 
			getcrc();

		if (setup_audio(&header)!=0) 
		{
			yell("Cannot set up audio. Exiting");
			continue;
		}
	
		filesize -= sizeof(header);

		switch (header.layer)
		{
			case 1:
			{
				if (layer3_frame(&header,cnt)) 
				{
					yell(" error. blip.");
					continue;
				}
				break;
			} 
			case 2:
			{
				if (layer2_frame(&header,cnt)) 
				{
					yell(" error. blip.");
					continue;
				}
				break;
			}
			default:
				continue;
		}

		bitrate=t_bitrate[header.ID][3-header.layer][header.bitrate_index];
	       	fs=t_sampling_frequency[header.ID][header.sampling_frequency];

	        if (header.ID) 
        		framesize=144000*bitrate/fs;
	       	else 
       			framesize=72000*bitrate/fs;



		totalframes = (filesize / (framesize + 1)) - 1;
		tseconds = (totalframes * 1152/
		    t_sampling_frequency[header.ID][header.sampling_frequency]);
                
		if (A_AUDIO_PLAY)
		{
			char *p = strrchr(f, '/');
			if (!p) p = f; else p++;
			if (!do_hook(MODULE_LIST, "AMP PLAY %lu %lu %s", tseconds, filesize, p))
				bitchsay("Playing: %s\n", p);
		}

		/*
		 * 
		 */
		if (!(fseek(in_file, 0, SEEK_END)))
		{
			char id3_tag[256];
			if (!fseek(in_file, -128, SEEK_END) && (fread(id3_tag,128, 1, in_file) == 1))
			{
				if (!strncmp(id3_tag, "TAG", 3))
					print_id3_tag(in_file, id3_tag);
			}
			fseek(in_file,0,SEEK_SET);
		}
		decodeMPEG(&header);
		do_hook(MODULE_LIST, "AMP CLOSE %s", f);
		close_audio();
		fclose(in_file);
	}
}
示例#18
0
int main(int argc, char *argv[])
{
	int c, i;
	char *fn, *fn2 = NULL;
	int cmd_decode = 0;
	int cmd_chg_channels = 0;
	int cmd_info = 0, cmd_play = 0;
	int cf_set_chans = 0;

	while ((c = getopt(argc, argv, "pdiMSqhrmsnvo:")) != -1) {
		switch (c) {
		case 'h':
			usage(0);
			break;
		case 'd':
			cmd_decode = 1;
			break;
		case 'i':
			cmd_info = 1;
			break;
		case 'p':
			cmd_play = 1;
			break;
		case 'M':
			cmd_chg_channels = 1;
			cf_set_chans = 1;
			break;
		case 'S':
			cmd_chg_channels = 1;
			cf_set_chans = 2;
			break;
		case 'q':
			cf_quiet = 1;
			break;
		case 'm':
			cf_force_chans = 1;
			break;
		case 's':
			cf_force_chans = 2;
			break;
		case 'r':
			cf_raw = 1;
			break;
		case 'n':
			cf_no_output = 1;
			break;
		case 'o':
			fn2 = optarg;
			break;
		case 'v':
			printf("%s\n", version);
			exit(0);
		default:
			fprintf(stderr, "bad arg: -%c\n", c);
			usage(1);
		}
	}
	i = cmd_chg_channels + cmd_info + cmd_decode + cmd_play;
	if (i < 1 || i > 1) {
		fprintf(stderr, "only one command at a time please\n");
		usage(1);
	}

	/* play file */
	if (cmd_play) {
#ifdef HAVE_AO
		ao_initialize();
		for (i = optind; i < argc; i++)
			play_file(argv[i]);
		close_audio();
		ao_shutdown();
		return 0;
#else
		fprintf(stderr, "For audio output, please compile with libao.\n");
		return 1;
#endif
	}

	/* show info */
	if (cmd_info) {
		for (i = optind; i < argc; i++)
			show_info(argv[i]);
		return 0;
	}
	
	/* channel changing */
	if (cmd_chg_channels) {
		for (i = optind; i < argc; i++)
			set_channels(argv[i], cf_set_chans);
		return 0;
	}
	
	/* regular converting */
	if (optind == argc)
		usage(1);
	if (fn2) {
		if (optind + 1 != argc)
			usage(1);
		fn = argv[optind];
		decode_file(fn, fn2);
	} else {
		while (optind < argc) {
			fn = argv[optind++];
			fn2 = makefn(fn, cf_raw ? ".raw" : ".wav");
			decode_file(fn, fn2);
			free(fn2);
		}
	}
	return 0;
}
示例#19
0
int main(int argc, char *argv[])
{
	int ret;
	const char *url;
	struct sound_file_info *file;
	struct decode *dec;
	struct fifo *fifo;
	pthread_t tid;
	struct load_thread_arg arg;
	u8 *lrc;
	u8 *icon;
	size_t lrc_size;
	size_t icon_size;
	u8 mp3_buff[MP3_BUFF_SIZE];
	u8 raw_buff[RAW_BUFF_SIZE];
	int mp3_size, raw_size;
	struct mp3_param mp3_pm;
	struct audio_output *out;
	struct window_info *win_info;

	if (argc < 2) {
		fprintf(stderr, "Usage: %s PATH\n", argv[0]);
		return -EINVAL;
	}

	url = argv[1];

	file = sound_file_open(url);
	if (NULL == file) {
		fprintf(stderr, "Fail to open sound file \"%s\"!\n", url);
		return -ENODEV;
	}

	fifo = fifo_open();
	if (NULL == fifo) {
		goto L1;
		ret = -ENOMEM;
	}

	ret = parse_mp3_tag(file, &lrc, &lrc_size, &icon, &icon_size);
	if (ret < 0) {
		DPRINT("\n");
		goto L2;
	}

	DPRINT("mp3_start = %lu, mp3_end = %lu, "
			"lrc = %p, lrc_size = %lu, icon = %p, icon_size = %lu\n",
			file->mp3_data_start, file->mp3_data_end,
			lrc, lrc_size, icon, icon_size);

	arg.fifo = fifo;
	arg.file = file;
	ret = pthread_create(&tid, NULL, load_mp3_data_to_fifo, &arg);
	if (ret < 0) {
		DPRINT("\n");
		goto L2;
	}

	dec = decode_open(MPAUDEC); // fixme!
	if (NULL == dec) {
		ret = -ENODEV;
		goto L2;
	}

	while (fifo->used < fifo->size / 3) usleep(1000);
	mp3_size = fifo_read(fifo, mp3_buff, sizeof(mp3_buff));

	get_mp3_param(dec, mp3_buff, mp3_size, &mp3_pm);

	win_info = window_init();
	win_info->icon = icon;
	win_info->icon_size = icon_size;
	win_info->lrc = lrc;
	win_info->lrc_size = lrc_size;
	win_info->total.tv_sec = (file->mp3_data_end - file->mp3_data_start) * 8 / mp3_pm.bit_rate;
	win_info->total.tv_usec = (file->mp3_data_end - file->mp3_data_start) * 8 * 1000000 / mp3_pm.bit_rate % 1000000;
	win_info->param = &mp3_pm;

	DPRINT("rate = %d, channels = %d, bps = %d, bitrate = %d\n",
			mp3_pm.rate, mp3_pm.channels, mp3_pm.bits_per_sample, mp3_pm.bit_rate);

	out = open_audio(AUDIO_ALSA, &mp3_pm);
	if (NULL == out) {
		ret = -ENODEV;
		goto L3;
	}

	while (1) {
		if (file->mp3_data_end == file->offset && mp3_size == 0)
			break;

		if (mp3_size > 0) {
			ret = decode(dec, raw_buff, &raw_size, mp3_buff, mp3_size);
			mp3_size -= ret;
			memmove(mp3_buff, mp3_buff + ret, mp3_size);
		}

		play_frames(out, raw_buff, raw_size, &mp3_pm);

		ret = fifo_read(fifo, mp3_buff + mp3_size, sizeof(mp3_buff) - mp3_size);

		mp3_size += ret;
	}

	close_audio(out);
	window_destroy();
L3:
	decode_close(dec);
L2:
	fifo_close(fifo);
L1:
	sound_file_close(file);

	return ret;
}
示例#20
0
int main(int argc, char **argv)
{
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVStream *audio_st, *video_st;
    double audio_pts, video_pts;
    int i;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    if (argc != 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename\n"
               "\n", argv[0]);
        return 1;
    }

    filename = argv[1];

    /* Autodetect the output format from the name. default is MPEG. */
    fmt = av_guess_format(NULL, filename, NULL);
    if (!fmt) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        fmt = av_guess_format("mpeg", NULL, NULL);
    }
    if (!fmt) {
        fprintf(stderr, "Could not find suitable output format\n");
        return 1;
    }

    /* Allocate the output media context. */
    oc = avformat_alloc_context();
    if (!oc) {
        fprintf(stderr, "Memory error\n");
        return 1;
    }
    oc->oformat = fmt;
    snprintf(oc->filename, sizeof(oc->filename), "%s", filename);

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    video_st = NULL;
    audio_st = NULL;
    if (fmt->video_codec != AV_CODEC_ID_NONE) {
        video_st = add_video_stream(oc, fmt->video_codec);
    }
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
        audio_st = add_audio_stream(oc, fmt->audio_codec);
    }

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (video_st)
        open_video(oc, video_st);
    if (audio_st)
        open_audio(oc, audio_st);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
            fprintf(stderr, "Could not open '%s'\n", filename);
            return 1;
        }
    }

    /* Write the stream header, if any. */
    avformat_write_header(oc, NULL);

    for (;;) {
        /* Compute current audio and video time. */
        if (audio_st)
            audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
        else
            audio_pts = 0.0;

        if (video_st)
            video_pts = (double)video_st->pts.val * video_st->time_base.num /
                        video_st->time_base.den;
        else
            video_pts = 0.0;

        if ((!audio_st || audio_pts >= STREAM_DURATION) &&
            (!video_st || video_pts >= STREAM_DURATION))
            break;

        /* write interleaved audio and video frames */
        if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
            write_audio_frame(oc, audio_st);
        } else {
            write_video_frame(oc, video_st);
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (video_st)
        close_video(oc, video_st);
    if (audio_st)
        close_audio(oc, audio_st);

    /* Free the streams. */
    for (i = 0; i < oc->nb_streams; i++) {
        av_freep(&oc->streams[i]->codec);
        av_freep(&oc->streams[i]);
    }

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(oc->pb);

    /* free the stream */
    av_free(oc);

    return 0;
}
示例#21
0
AudioALSA::~AudioALSA() 
{
  int ret = close_audio();
}
示例#22
0
void AudioALSA::setDSPName(QString name)
{
  audio = close_audio();
  dsp_devicename = QString(name);
  audio = init_audio();
}
示例#23
0
int
main (int argc, char **argv)
{

  int frequency, oversample, stereo;
  struct song *song;
  int index;
  int c;
  int opt, error_flag;


  /* supposed to make wildcard expansion */
  _wildcard(&argc,&argv);

  signal (2, nextsong);
  signal (3, goodbye);

  printf("Tracker1 V0.91 for the SBOS2 package\n");

  /* Read environment variables */
  frequency = read_env ("FREQUENCY", 0);
  oversample = read_env ("OVERSAMPLE", 1);
  transpose = read_env ("TRANSPOSE", 0);

  if (getenv ("MONO"))
    pref.stereo = 0;
  else if (getenv ("STEREO"))
    pref.stereo = 1;
  else
    pref.stereo = DEFAULT_CHANNELS - 1;
  pref.type = BOTH;
  pref.repeats = 1;
  pref.speed = 50;
  pref.tolerate = 2;
  pref.verbose = 0;
  set_mix (DEFAULT_MIX);        /* 0 = full stereo, 100 = mono */

  error_flag = 0;
  while ((opt = getopt_long_only (argc, argv, "", long_options, NULL)) != EOF)
    {
      switch (opt)
        {
        case 'H':               /* help */
          error_flag++;
          break;
        case 'Q':               /* quiet */
          quiet++;
          break;
        case 'P':               /* abort on faults (be picky) */
          pref.tolerate = 0;
          break;
        case 'N':               /* new tracker type */
          pref.type = NEW;
          break;
        case 'O':               /* old tracker type */
          pref.type = OLD;
          break;
        case 'B':               /* both tracker types */
          pref.type = BOTH;
          break;
        case 'M':               /* mono */
          pref.stereo = 0;
          break;
        case 'S':               /* stereo */
          pref.stereo = 1;
          break;
        case 'V':
          pref.verbose = 1;
          break;
        case 'f':               /* frequency */
          frequency = atoi (optarg);
          break;
        case 'o':               /* oversampling */
          oversample = atoi (optarg);
          break;
        case 't':               /* transpose half-steps*/
          transpose = atoi (optarg);
          break;
        case 'r':               /* number of repeats */
          pref.repeats = atoi (optarg);
          break;
        case 's':               /* speed */
          pref.speed = atoi (optarg);
          break;
        case 'm':               /* % of channel mix.  100=mono */
          set_mix (atoi (optarg));
          break;
        default:                /* ??? */
          error_flag++;
          break;
        }
    }
  if (error_flag || !argv[optind])
    {
      fprintf (stderr, "Usage: %s " USAGE, argv[0]);
      exit(1);
    }

  frequency = open_audio (frequency);
  init_player (oversample, frequency);

  while (argv[optind])
    {
      switch (pref.type)
        {
        case BOTH:
          song = do_read_song (argv[optind], NEW);
          if (!song)
            song = do_read_song (argv[optind], OLD);
          break;
        case OLD:
          song = do_read_song (argv[optind], pref.type);
          break;
        case NEW:
          /* this is explicitly flagged as a new module,
           * so we don't need to look for a signature.
           */
          song = do_read_song (argv[optind], NEW_NO_CHECK);
          break;
        }
      optind++;
      if (song == NULL)
        continue;

      dump_song (song);
      play_song (song, &pref);
      release_song (song);

      /* flush out anything remaining in DMA buffers */
      flush_DMA_buffers();

    }

  close_audio ();
  return 0;
}