Пример #1
0
void
write_audio_frame (GeglProperties *o, AVFormatContext * oc, AVStream * st)
{
  Priv *p = (Priv*)o->user_data;
  AVCodecContext *c = st->codec;
  int sample_count = 100000;
  static AVPacket  pkt = { 0 };

  if (pkt.size == 0)
  {
    av_init_packet (&pkt);
  }

  /* first we add incoming frames audio samples */
  {
    int i;
    int sample_count = gegl_audio_fragment_get_sample_count (o->audio);
    GeglAudioFragment *af = gegl_audio_fragment_new (gegl_audio_fragment_get_sample_rate (o->audio),
                                                     gegl_audio_fragment_get_channels (o->audio),
                                                     gegl_audio_fragment_get_channel_layout (o->audio),
                                                     sample_count);
    gegl_audio_fragment_set_sample_count (af, sample_count);
    for (i = 0; i < sample_count; i++)
      {
        af->data[0][i] = o->audio->data[0][i];
        af->data[1][i] = o->audio->data[1][i];
      }
    gegl_audio_fragment_set_pos (af, p->audio_pos);
    p->audio_pos += sample_count;
    p->audio_track = g_list_append (p->audio_track, af);
  }

  if (!(c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
    sample_count = c->frame_size;

  /* then we encode as much as we can in a loop using the codec frame size */

  
  while (p->audio_pos - p->audio_read_pos > sample_count)
  {
    long i;
    int ret;
    int got_packet = 0;
    AVFrame *frame = alloc_audio_frame (c->sample_fmt, c->channel_layout,
                                        c->sample_rate, sample_count);

    switch (c->sample_fmt) {
      case AV_SAMPLE_FMT_FLT:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((float*)frame->data[0])[c->channels*i+0] = left;
          ((float*)frame->data[0])[c->channels*i+1] = right;
        }
        break;
      case AV_SAMPLE_FMT_FLTP:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((float*)frame->data[0])[i] = left;
          ((float*)frame->data[1])[i] = right;
        }
        break;
      case AV_SAMPLE_FMT_S16:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int16_t*)frame->data[0])[c->channels*i+0] = left * (1<<15);
          ((int16_t*)frame->data[0])[c->channels*i+1] = right * (1<<15);
        }
        break;
      case AV_SAMPLE_FMT_S32:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int32_t*)frame->data[0])[c->channels*i+0] = left * (1<<31);
          ((int32_t*)frame->data[0])[c->channels*i+1] = right * (1<<31);
        }
        break;
      case AV_SAMPLE_FMT_S32P:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int32_t*)frame->data[0])[i] = left * (1<<31);
          ((int32_t*)frame->data[1])[i] = right * (1<<31);
        }
        break;
      case AV_SAMPLE_FMT_S16P:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int16_t*)frame->data[0])[i] = left * (1<<15);
          ((int16_t*)frame->data[1])[i] = right * (1<<15);
        }
        break;
      default:
        fprintf (stderr, "eeeek unhandled audio format\n");
        break;
    }
    frame->pts = p->next_apts;
    p->next_apts += sample_count;

    av_frame_make_writable (frame);
    ret = avcodec_encode_audio2 (c, &pkt, frame, &got_packet);

    av_packet_rescale_ts (&pkt, st->codec->time_base, st->time_base);
    if (ret < 0) {
      fprintf (stderr, "Error encoding audio frame: %s\n", av_err2str (ret));
    }

    if (got_packet)
    {
      pkt.stream_index = st->index;
      av_interleaved_write_frame (oc, &pkt);
      av_free_packet (&pkt);
    }

    av_frame_free (&frame);
    p->audio_read_pos += sample_count;
  }
}
Пример #2
0
/* maintain list of audio samples */
static int
decode_audio (GeglOperation *operation,
              gdouble        pts1,
              gdouble        pts2)
{
  GeglProperties *o = GEGL_PROPERTIES (operation);
  Priv       *p = (Priv*)o->user_data;

  pts1 -= 2.0;
  if (pts1 < 0.0)pts1 = 0.0;

  if(pts1 - 15.0 > p->prevapts){
  int64_t seek_target = av_rescale_q (pts1 * AV_TIME_BASE, AV_TIME_BASE_Q, p->audio_stream->time_base);
     clear_audio_track (o);
     p->prevapts = 0.0;

     if (av_seek_frame (p->audio_fcontext, p->audio_stream->index, seek_target, (AVSEEK_FLAG_BACKWARD)) < 0)
      fprintf (stderr, "audio seek error!\n");
     else
      avcodec_flush_buffers (p->audio_stream->codec);

  }

  while (p->prevapts <= pts2)
    {
      AVPacket  pkt = {0,};
      int       decoded_bytes;

      if (av_read_frame (p->audio_fcontext, &pkt) < 0)
         {
           av_free_packet (&pkt);
           return -1;
         }
      if (pkt.stream_index==p->audio_index && p->audio_stream)
        {
          static AVFrame frame;
          int got_frame;

          decoded_bytes = avcodec_decode_audio4(p->audio_stream->codec,
                                     &frame, &got_frame, &pkt);

          if (decoded_bytes < 0)
            {
              fprintf (stderr, "avcodec_decode_audio4 failed for %s\n",
                                o->path);
            }

          if (got_frame) {
            int samples_left = frame.nb_samples;
            int si = 0;

            while (samples_left)
            {
               int sample_count = samples_left;
               int channels = MIN(p->audio_stream->codec->channels, GEGL_MAX_AUDIO_CHANNELS);
               GeglAudioFragment *af = gegl_audio_fragment_new (o->audio_sample_rate, channels,
                          AV_CH_LAYOUT_STEREO, samples_left);
//);
               switch (p->audio_stream->codec->sample_fmt)
               {
                 case AV_SAMPLE_FMT_FLT:
                   for (gint i = 0; i < sample_count; i++)
                     for (gint c = 0; c < channels; c++)
                       af->data[c][i] = ((int16_t *)frame.data[0])[(i + si) * channels + c];
                   break;
                 case AV_SAMPLE_FMT_FLTP:
                   for (gint i = 0; i < sample_count; i++)
                     for (gint c = 0; c < channels; c++)
                       {
                         af->data[c][i] = ((float *)frame.data[c])[i + si];
                       }
                   break;
                 case AV_SAMPLE_FMT_S16:
                   for (gint i = 0; i < sample_count; i++)
                     for (gint c = 0; c < channels; c++)
                       af->data[c][i] = ((int16_t *)frame.data[0])[(i + si) * channels + c] / 32768.0;
                   break;
                 case AV_SAMPLE_FMT_S16P:
                   for (gint i = 0; i < sample_count; i++)
                     for (gint c = 0; c < channels; c++)
                       af->data[c][i] = ((int16_t *)frame.data[c])[i + si] / 32768.0;
                   break;
                 case AV_SAMPLE_FMT_S32:
                   for (gint i = 0; i < sample_count; i++)
                     for (gint c = 0; c < channels; c++)
                       af->data[c][i] = ((int32_t *)frame.data[0])[(i + si) * channels + c] / 2147483648.0;
                  break;
                case AV_SAMPLE_FMT_S32P:
                   for (gint i = 0; i < sample_count; i++)
                    for (gint c = 0; c < channels; c++)
                      af->data[c][i] = ((int32_t *)frame.data[c])[i + si] / 2147483648.0;
                  break;
                default:
                  g_warning ("undealt with sample format\n");
                }
                gegl_audio_fragment_set_sample_count (af, sample_count);
                gegl_audio_fragment_set_pos (af, 
  (long int)av_rescale_q ((pkt.pts), p->audio_stream->time_base, AV_TIME_BASE_Q) * o->audio_sample_rate /AV_TIME_BASE);

                p->audio_pos += sample_count;
                p->audio_track = g_list_append (p->audio_track, af);

                samples_left -= sample_count;
                si += sample_count;
              }
	  
            p->prevapts = pkt.pts * av_q2d (p->audio_stream->time_base);
          }
        }
      av_free_packet (&pkt);
    }
  return 0;
}