示例#1
0
文件: esmerge.c 项目: OpenQCam/qcam
/*
 * Merge the given elementary streams to the given output.
 *
 * Returns 0 if all goes well, 1 if something goes wrong.
 */
static int merge_with_h264(access_unit_context_p  video_context,
                           int                    audio_file,
                           TS_writer_p            output1,
 //                          TS_writer_p            output2,
                           int                    audio_type,
                           int                    audio_samples_per_frame,
                           int                    audio_sample_rate,
                           int                    video_frame_rate,
                           int                    pat_pmt_freq,
                           int                    quiet,
                           int                    verbose,
                           int                    debugging,
                           int                    serialno,
                           uint32_t               *rete)
{
  int  ii;
  int  err;
  uint32_t prog_pids[2];
  byte     prog_type[2];

  int video_frame_count = 0; //serialno/2*125;
  int audio_frame_count = 0;

  uint32_t video_pts_increment = 90000 / video_frame_rate;
  uint32_t audio_pts_increment = (90000 * audio_samples_per_frame) / audio_sample_rate;
  uint64_t video_pts = 0;//900000*serialno/2;
  if(serialno!=0){
  FILE *pFilePTS = fopen ("PTSCache.txt","r");
  uint32_t ret=fscanf(pFilePTS, "%lld", &video_pts);
  printf("ret of fscanf= %d",ret);
  fclose(pFilePTS);
  }
  uint64_t video_dts = 0;
  uint64_t audio_pts = 0;

  // The "actual" times are just for information, so we aren't too worried
  // about accuracy - thus floating point should be OK.
  double audio_time = 0.0;
  double video_time = 0.0;

  int got_video = TRUE;
  int got_audio = FALSE;

  if (verbose)
    fprint_msg("Video PTS increment %u\n"
               "Audio PTS increment %u\n",video_pts_increment,audio_pts_increment);

  // Start off our output with some null packets - this is in case the
  // reader needs some time to work out its byte alignment before it starts
  // looking for 0x47 bytes
  for (ii=0; ii<8; ii++)
  {
    err = write_TS_null_packet(output1);
  //  err = write_TS_null_packet(output2);
    if (err) return 1;
  }

  // Then write some program data
  // @@@ later on we might want to repeat this every so often
  prog_pids[0] = DEFAULT_VIDEO_PID;
  prog_pids[1] = DEFAULT_AUDIO_PID;
  prog_type[0] = AVC_VIDEO_STREAM_TYPE;

  /*switch (audio_type)
  {
  case AUDIO_ADTS:
  case AUDIO_ADTS_MPEG2:
  case AUDIO_ADTS_MPEG4:
    prog_type[1] = ADTS_AUDIO_STREAM_TYPE;
    break;
  case AUDIO_L2:
    prog_type[1] = MPEG2_AUDIO_STREAM_TYPE;
    break;
  case AUDIO_AC3:
    prog_type[1] = ATSC_DOLBY_AUDIO_STREAM_TYPE;
    break;
  default:              // what else can we do?
    prog_type[1] = ADTS_AUDIO_STREAM_TYPE;
    break;
  }*/

   // prog_type[1] = MPEG2_AUDIO_STREAM_TYPE;
  err = write_TS_program_data2(output1,
                               1, // transport stream id
                               1, // program number
                               DEFAULT_PMT_PID,
                               DEFAULT_VIDEO_PID,  // PCR pid
                               2,prog_pids,prog_type);
/*  err = write_TS_program_data2(output2,
                               1, // transport stream id
                               1, // program number
                               DEFAULT_PMT_PID,
                               DEFAULT_VIDEO_PID,  // PCR pid
                               2,prog_pids,prog_type);*/
  if (err)
  {
    print_err("### Error writing out TS program data\n");
    return 1;
  }

  //uint32_t restrictTime=serialno/2*125+125;
  uint32_t restrictTime=300;
  while ((got_video || got_audio) && video_frame_count < restrictTime)
  {
    access_unit_p  access_unit;
    audio_frame_p  aframe;

    // Start with a video frame
    if (got_video)
    {
      err = get_next_h264_frame(video_context,quiet,debugging,&access_unit);
      //printf("index of access_unit %d\n", access_unit->index);
      if (err == EOF)
      {
        if (verbose)
          print_msg("EOF: no more video data\n");
        got_video = FALSE;
      }
      else if (err)
        return 1;
    }

    if (got_video)
    {
      video_time = video_frame_count / (double) video_frame_rate;
      video_pts += video_pts_increment;
      video_dts += video_pts_increment;
      video_frame_count ++;
      if (verbose)
        fprint_msg("\n%s video frame %5d (@ %.2fs, " LLU_FORMAT ")\n",
                   (is_I_or_IDR_frame(access_unit)?"**":"++"),
                   video_frame_count,video_time,video_pts);

      if (pat_pmt_freq && !(video_frame_count % pat_pmt_freq ))
        {
          printf("**********write PAT PMT**********\n");
          if (verbose)
            {
              fprint_msg("\nwriting PAT and PMT (frame = %d, freq = %d).. ",
                         video_frame_count, pat_pmt_freq);
            }
          //if(video_frame_count<restrictTime/2) {
          err = write_TS_program_data2(output1,
                                       1, // tsid
                                       1, // Program number
                                       DEFAULT_PMT_PID,
                                       DEFAULT_VIDEO_PID, // PCR pid
                                       2, prog_pids, prog_type);
         /* }else {
          err = write_TS_program_data2(output2,
                                       1, // tsid
                                       1, // Program number
                                       DEFAULT_PMT_PID,
                                       DEFAULT_VIDEO_PID, // PCR pid
                                       2, prog_pids, prog_type);
          }*/
        }


      // PCR counts frames as seen in the stream, so is easy
      // The presentation and decoding time for B frames (if we ever get any)
      // could reasonably be the same as the PCR.
      // The presentation and decoding time for I and IDR frames is unlikely to
      // be the same as the PCR (since frames come out later...), but it may
      // work to pretend the PTS is the PCR plus a delay time (for decoding)...

      // We could output the timing information every video frame,
      // but might as well only do it on index frames.
      if (!is_I_or_IDR_frame(access_unit)){
        //if(video_frame_count<restrictTime/2) {
        err = write_access_unit_as_TS_with_pts_dts(access_unit,video_context,
                                                   output1,DEFAULT_VIDEO_PID,
                                                   //TRUE,video_pts+45000,
                                                   TRUE,video_pts,
                                                   TRUE,video_pts);
        /*}else {
        err = write_access_unit_as_TS_with_pts_dts(access_unit,video_context,
                                                   output2,DEFAULT_VIDEO_PID,
                                                   //TRUE,video_pts+45000,
                                                   TRUE,video_pts,
                                                   TRUE,video_pts);
        }*/
      }else {
        //if(video_frame_count<restrictTime/2) {
        err = write_access_unit_as_TS_with_PCR(access_unit,video_context,
                                               output1,DEFAULT_VIDEO_PID,
                                               video_pts,0);
        /*}else {
        err = write_access_unit_as_TS_with_PCR(access_unit,video_context,
                                               output2,DEFAULT_VIDEO_PID,
                                               video_pts,0);
        }*/
      }

      if (err)
      {
        free_access_unit(&access_unit);
        print_err("### Error writing access unit (frame)\n");
        return 1;
      }
      free_access_unit(&access_unit);

      // Did the logical video stream end after the last access unit?
      if (video_context->end_of_stream)
      {
        if (verbose)
          print_msg("Found End-of-stream NAL unit\n");
        got_video = FALSE;
      }
    }

    if (!got_audio)
      continue;

    // Then output enough audio frames to make up to a similar time
    while (audio_pts < video_pts || !got_video)
    {
    //  err = read_next_audio_frame(audio_file,audio_type,&aframe);
      if (!got_video)
        err = EOF;
      if (err == EOF)
      {
        if (verbose)
          print_msg("EOF: no more audio data\n");
        got_audio = FALSE;
        break;
      }
      else if (err)
        return 1;

      audio_time = audio_frame_count *
        audio_samples_per_frame / (double)audio_sample_rate;
      audio_pts += audio_pts_increment;
      audio_frame_count ++;
      if (verbose)
        fprint_msg("** audio frame %5d (@ %.2fs, " LLU_FORMAT ")\n",
                   audio_frame_count,audio_time,audio_pts);

/*      err = write_ES_as_TS_PES_packet_with_pts_dts(output,aframe->data,
                                                   aframe->data_len,
                                                   DEFAULT_AUDIO_PID,
                                                   DEFAULT_AUDIO_STREAM_ID,
                                                   TRUE,audio_pts,
                                                   TRUE,audio_pts);*/
      if (err)
      {
        free_audio_frame(&aframe);
        print_err("### Error writing audio frame\n");
        return 1;
      }
      free_audio_frame(&aframe);
    }
  }

  printf("video_pts is %lld",video_pts);
  FILE *pFilePTS = fopen ("PTSCache.txt","w");
  fprintf(pFilePTS, "%lld", video_pts);
  fclose(pFilePTS);
  /*uint32_t video_elapsed = 100*video_frame_count/video_frame_rate;
  FILE *pFileLen = fopen ("videoLength.txt","w");
  fprintf(pFileLen, "%.2f", video_elapsed/100.0);
  fclose(pFileLen);*/
  *rete = 100*video_frame_count/video_frame_rate;

  if (!quiet)
  {
    uint32_t video_elapsed = 100*video_frame_count/video_frame_rate;
    uint32_t audio_elapsed = 100*audio_frame_count*
      audio_samples_per_frame/audio_sample_rate;
    fprint_msg("Read %d video frame%s, %.2fs elapsed (%dm %.2fs)\n",
               video_frame_count,(video_frame_count==1?"":"s"),
               video_elapsed/100.0,video_elapsed/6000,(video_elapsed%6000)/100.0);
    fprint_msg("Read %d audio frame%s, %.2fs elapsed (%dm %.2fs)\n",
               audio_frame_count,(audio_frame_count==1?"":"s"),
               audio_elapsed/100.0,audio_elapsed/6000,(audio_elapsed%6000)/100.0);
  }

  return 0;

}
示例#2
0
/*
 * Merge the given elementary streams to the given output.
 *
 * Returns 0 if all goes well, 1 if something goes wrong.
 */
static int merge_with_avs(avs_context_p  video_context,
                          int            audio_file,
                          TS_writer_p    output,
                          int            audio_type,
                          int            audio_samples_per_frame,
                          int            audio_sample_rate,
                          double         video_frame_rate,
                          int            pat_pmt_freq,
                          int            quiet,
                          int            verbose,
                          int            debugging)
{
  int  ii;
  int  err;
  uint32_t prog_pids[2];
  byte     prog_type[2];

  int video_frame_count = 0;
  int audio_frame_count = 0;

  uint32_t video_pts_increment = (uint32_t)(90000.0 / video_frame_rate);
  //uint32_t audio_pts_increment = (90000 * audio_samples_per_frame) / audio_sample_rate;
  uint32_t audio_pts_increment = (uint32_t)(90000.0 / video_frame_rate);
  uint64_t video_pts = 0;
  uint64_t audio_pts = 0;

  // The "actual" times are just for information, so we aren't too worried
  // about accuracy - thus floating point should be OK.
  double audio_time = 0.0;
  double video_time = 0.0;

  int got_video = TRUE;
  int got_audio = TRUE;

  if (verbose)
    printf("Video PTS increment %u\n"
           "Audio PTS increment %u\n",video_pts_increment,audio_pts_increment);

  // Start off our output with some null packets - this is in case the
  // reader needs some time to work out its byte alignment before it starts
  // looking for 0x47 bytes
  for (ii=0; ii<8; ii++)
  {
    err = write_TS_null_packet(output);
    if (err) return 1;
  }

  // Then write some program data
  // @@@ later on we might want to repeat this every so often
  prog_pids[0] = DEFAULT_VIDEO_PID;
  prog_pids[1] = DEFAULT_AUDIO_PID;
  prog_type[0] = AVS_VIDEO_STREAM_TYPE;

  switch (audio_type)
  {
  case AUDIO_ADTS:
  case AUDIO_ADTS_MPEG2:
  case AUDIO_ADTS_MPEG4:
    prog_type[1] = ADTS_AUDIO_STREAM_TYPE;
    break;
  case AUDIO_L2:
    prog_type[1] = MPEG2_AUDIO_STREAM_TYPE;
    break;
  case AUDIO_AC3:
    prog_type[1] = ATSC_DOLBY_AUDIO_STREAM_TYPE;
    break;
  default:              // what else can we do?
    prog_type[1] = ADTS_AUDIO_STREAM_TYPE;
    break;
  }
  err = write_TS_program_data2(output,
                               1, // transport stream id
                               1, // program number
                               DEFAULT_PMT_PID,
                               DEFAULT_VIDEO_PID,  // PCR pid
                               2,prog_pids,prog_type);
  if (err)
  {
    fprintf(stderr,"### Error writing out TS program data\n");
    return 1;
  }

  while (got_video || got_audio)
  {
    avs_frame_p    avs_frame;
    audio_frame_p  aframe;

    // Start with a video frame
    if (got_video)
    {
      err = get_next_avs_frame(video_context,quiet,debugging,&avs_frame);
      if (err == EOF)
      {
        if (verbose)
          printf("EOF: no more video data\n");
        got_video = FALSE;
      }
      else if (err)
        return 1;

      if (!avs_frame->is_frame)
      {
        // It's not actually a *picture*
        // If we can, update the video frame rate to what we're told
        if (avs_frame->is_sequence_header)
          video_frame_rate = avs_frame_rate(avs_frame->frame_rate_code);
        // And output the data right away
        err = write_avs_frame_as_TS(output,avs_frame,DEFAULT_VIDEO_PID);
        if (err)
        {
          free_avs_frame(&avs_frame);
          fprintf(stderr,"### Error writing AVS frame (sequence header/end)\n");
          return 1;
        }
        continue;               // look for a "proper" frame
      }
    }

    if (got_video)
    {
      video_time = video_frame_count / video_frame_rate;
      video_pts += video_pts_increment;
      video_frame_count ++;
      if (verbose)
        printf("\n%s video frame %5d (@ %.2fs, " LLU_FORMAT ")\n",
               (is_avs_I_frame(avs_frame)?"**":"++"),
               video_frame_count,video_time,video_pts);

      if (pat_pmt_freq && !(video_frame_count % pat_pmt_freq))
        {
          if (verbose)
            {
              printf("\nwriting PAT and PMT (frame = %d, freq = %d).. ", 
                     video_frame_count, pat_pmt_freq);
            }

          err = write_TS_program_data2(output, 
                                       1, // tsid
                                       1, // Program number
                                       DEFAULT_PMT_PID,
                                       DEFAULT_VIDEO_PID, // PCR pid
                                       2, prog_pids, prog_type);
        }

      // PCR counts frames as seen in the stream, so is easy
      // The presentation and decoding time for B frames (if we ever get any)
      // could reasonably be the same as the PCR.
      // The presentation and decoding time for I and IDR frames is unlikely to
      // be the same as the PCR (since frames come out later...), but it may
      // work to pretend the PTS is the PCR plus a delay time (for decoding)...

      // We could output the timing information every video frame,
      // but might as well only do it on index frames.

      // (Actually, we *could* work out the proper PTS for I frames, but it's
      // easier just to add a delay to allow for progress through the decoder)
      if (is_avs_I_frame(avs_frame))
        err = write_avs_frame_as_TS_with_pts_dts(avs_frame,
                                                 output,DEFAULT_VIDEO_PID,
                                                 TRUE,video_pts + 30000,
                                                 TRUE,video_pts);
      else
        err = write_avs_frame_as_TS_with_PCR(avs_frame,
                                             output,DEFAULT_VIDEO_PID,
                                             video_pts,0);
      if (err)
      {
        free_avs_frame(&avs_frame);
        fprintf(stderr,"### Error writing AVS frame\n");
        return 1;
      }
      free_avs_frame(&avs_frame);
    }

    if (!got_audio)
      continue;

    // Then output enough audio frames to make up to a similar time
    while (audio_pts < video_pts || !got_video)
    {
      err = read_next_audio_frame(audio_file,audio_type,&aframe);
      if (err == EOF)
      {
        if (verbose)
          printf("EOF: no more audio data\n");
        got_audio = FALSE;
        break;
      }
      else if (err)
        return 1;

      audio_time = audio_frame_count *
        audio_samples_per_frame / (double)audio_sample_rate;
      audio_pts += audio_pts_increment;
      audio_frame_count ++;
      if (verbose)
        printf("** audio frame %5d (@ %.2fs, " LLU_FORMAT ")\n",
               audio_frame_count,audio_time,audio_pts);

      err = write_ES_as_TS_PES_packet_with_pts_dts(output,aframe->data,
                                                   aframe->data_len,
                                                   DEFAULT_AUDIO_PID,
                                                   DEFAULT_AUDIO_STREAM_ID,
                                                   TRUE,audio_pts,
                                                   TRUE,audio_pts);
      if (err)
      {
        free_audio_frame(&aframe);
        fprintf(stderr,"### Error writing audio frame\n");
        return 1;
      }
      free_audio_frame(&aframe);
    }    
  }

  if (!quiet)
  {
    uint32_t video_elapsed = (uint32_t)((double)(100*video_frame_count)/video_frame_rate);
    uint32_t audio_elapsed = 100*audio_frame_count*
      audio_samples_per_frame/audio_sample_rate;
    printf("Read %d video frame%s, %.2fs elapsed (%dm %.2fs)\n",
           video_frame_count,(video_frame_count==1?"":"s"),
           video_elapsed/100.0,video_elapsed/6000,(video_elapsed%6000)/100.0);
    printf("Read %d audio frame%s, %.2fs elapsed (%dm %.2fs)\n",
           audio_frame_count,(audio_frame_count==1?"":"s"),
           audio_elapsed/100.0,audio_elapsed/6000,(audio_elapsed%6000)/100.0);
  }

  return 0;

}