예제 #1
0
bool ExportFFmpeg::EncodeAudioFrame(int16_t *pFrame, int frameSize)
{
   AVPacket pkt;
   int nBytesToWrite = 0;
   uint8_t *pRawSamples = NULL;
   int nAudioFrameSizeOut = default_frame_size * mEncAudioCodecCtx->channels * sizeof(int16_t);
   int ret;

   nBytesToWrite = frameSize;
   pRawSamples  = (uint8_t*)pFrame;
   av_fifo_realloc2(mEncAudioFifo, av_fifo_size(mEncAudioFifo) + frameSize);

   // Put the raw audio samples into the FIFO.
   ret = av_fifo_generic_write(mEncAudioFifo, pRawSamples, nBytesToWrite,NULL);

   wxASSERT(ret == nBytesToWrite);

   if (nAudioFrameSizeOut > mEncAudioFifoOutBufSiz) {
      wxLogError(wxT("FFmpeg : ERROR - nAudioFrameSizeOut too large."));
      return false;
   }

   // Read raw audio samples out of the FIFO in nAudioFrameSizeOut byte-sized groups to encode.
   while ((ret = av_fifo_size(mEncAudioFifo)) >= nAudioFrameSizeOut)
   {
      ret = av_fifo_generic_read(mEncAudioFifo, mEncAudioFifoOutBuf, nAudioFrameSizeOut, NULL);

      av_init_packet(&pkt);

      int ret= encode_audio(mEncAudioCodecCtx,
         &pkt,                          // out
         (int16_t*)mEncAudioFifoOutBuf, // in
         default_frame_size);
      if (ret < 0)
      {
         wxLogError(wxT("FFmpeg : ERROR - Can't encode audio frame."));
         return false;
      }
      if (ret == 0)
         continue;

      // Rescale from the codec time_base to the AVStream time_base.
      if (pkt.pts != int64_t(AV_NOPTS_VALUE))
         pkt.pts = av_rescale_q(pkt.pts, mEncAudioCodecCtx->time_base, mEncAudioStream->time_base);
      if (pkt.dts != int64_t(AV_NOPTS_VALUE))
         pkt.dts = av_rescale_q(pkt.dts, mEncAudioCodecCtx->time_base, mEncAudioStream->time_base);
      //wxLogDebug(wxT("FFmpeg : (%d) Writing audio frame with PTS: %lld."), mEncAudioCodecCtx->frame_number, (long long) pkt.pts);

      pkt.stream_index = mEncAudioStream->index;

      // Write the encoded audio frame to the output file.
      if ((ret = av_interleaved_write_frame(mEncFormatCtx, &pkt)) < 0)
      {
         wxLogError(wxT("FFmpeg : ERROR - Failed to write audio frame to file."));
         return false;
      }
      av_free_packet(&pkt);
   }
   return true;
}
예제 #2
0
void finish_audio(void)
{
    OSStatus theError;
    
    // flush buffer
    if (audioBuffer.used > 0)
        encode_audio(&audioBuffer);
    
    //End media editing
    theError = EndMediaEdits(audioMedia);
    if (theError)
        log_debug("quicktime_audio: error ending media edits");

    theError = ExtendMediaDecodeDurationToDisplayEndTime(audioMedia, NULL);
    if (theError)
        log_debug("quicktime_audio: error setting decode duration!");

    //Add media to track
    theError = InsertMediaIntoTrack(audioTrack, 0, 0, GetMediaDisplayDuration(audioMedia), fixed1);
    if (theError)
        log_debug("quicktime_audio: error inserting media into track!");

    audioTrack=NULL;
    audioMedia=NULL;
    
    DisposeHandle((Handle)soundDescriptionHandle);

    // free buffer
    if (audioBuffer.buffer != NULL) {
        free(audioBuffer.buffer);
        audioBuffer.buffer = NULL;
    }

    audio_ready = 0;
}
예제 #3
0
static void ffmpeg_flush_audio(ffmpeg_t *handle, void *audio_buf,
      size_t audio_buf_size)
{
   size_t avail = fifo_read_avail(handle->audio_fifo);

   if (avail)
   {
      struct ffemu_audio_data aud = {0};

      fifo_read(handle->audio_fifo, audio_buf, avail);

      aud.frames = avail / (sizeof(int16_t) * handle->params.channels);
      aud.data = audio_buf;

      ffmpeg_push_audio_thread(handle, &aud, false);
   }

   for (;;)
   {
      AVPacket pkt;
      if (!encode_audio(handle, &pkt, true) || !pkt.size ||
            av_interleaved_write_frame(handle->muxer.ctx, &pkt) < 0)
         break;
   }
}
예제 #4
0
void CConferenceInfo::sendAudioFrame(const CLockMap<CConferenceMember*, CMemberData::pointer> & audios)
{
	if (audios.empty()) return;

	boost::mutex::scoped_lock lock(m_members.mutex());
	CLockMap<void*, CConferenceMember::pointer>::iterator iter;
	for (iter=m_members.begin(); iter!= m_members.end(); iter++)
	{
		// member don't receive audio
		if (!iter->second->getAudioRecv())
			continue;

		int timestamp = 0;
		if (mix_member_frame(iter->second, audios, timestamp))
		{
			DoRtpHandler::pointer pDoRtpHandler = iter->second->getAudioHandler();

			long sendSize = 0;
			unsigned char * tempBuffer = 0;
			encode_audio(pDoRtpHandler->doGetMediaType(), iter->second->audioBuffer(), &tempBuffer, sendSize);

			//int timestamp = 0;			
			pDoRtpHandler->doSendData(tempBuffer, sendSize, timestamp);
			delete[] tempBuffer;
		}
	}

}
예제 #5
0
파일: ffmpeg.c 프로젝트: wedesoft/aiscm
SCM ffmpeg_encode_audio(SCM scm_self)
{
  struct ffmpeg_t *self = get_self(scm_self);
  AVCodecContext *codec = audio_codec_ctx(self);
  AVFrame *audio_frame = self->audio_target_frame;
  audio_frame->pts = av_rescale_q(self->samples_count, (AVRational){1, codec->sample_rate}, codec->time_base);
  self->samples_count += audio_frame->nb_samples;

  encode_audio(self, self->audio_target_frame);
  return SCM_UNSPECIFIED;
}
예제 #6
0
static bool ffmpeg_push_audio_thread(ffmpeg_t *handle,
      struct ffemu_audio_data *aud, bool require_block)
{
   size_t written_frames = 0;

   ffmpeg_audio_resample(handle, aud);

   while (written_frames < aud->frames)
   {
      AVPacket pkt;
      size_t can_write    = handle->audio.codec->frame_size - 
         handle->audio.frames_in_buffer;
      size_t write_left   = aud->frames - written_frames;
      size_t write_frames = write_left > can_write ? can_write : write_left;
      size_t write_size   = write_frames * 
         handle->params.channels * handle->audio.sample_size;

      size_t bytes_in_buffer = handle->audio.frames_in_buffer * 
         handle->params.channels * handle->audio.sample_size;
      size_t written_bytes   = written_frames * 
         handle->params.channels * handle->audio.sample_size;

      memcpy(handle->audio.buffer + bytes_in_buffer,
            (const uint8_t*)aud->data + written_bytes,
            write_size);

      written_frames                 += write_frames;
      handle->audio.frames_in_buffer += write_frames;

      if ((handle->audio.frames_in_buffer 
               < (size_t)handle->audio.codec->frame_size) && require_block)
         break;

      if (!encode_audio(handle, &pkt, false))
         return false;

      handle->audio.frame_cnt       += handle->audio.frames_in_buffer;
      handle->audio.frames_in_buffer = 0;

      if (pkt.size)
      {
         if (av_interleaved_write_frame(handle->muxer.ctx, &pkt) < 0)
            return false;
      }
   }

   return true;
}
예제 #7
0
파일: ffemu.c 프로젝트: Wyrick/RetroArch
static bool ffemu_push_audio_thread(ffemu_t *handle, const struct ffemu_audio_data *data, bool require_block)
{
   size_t written_frames = 0;
   while (written_frames < data->frames)
   {
      size_t can_write = handle->audio.codec->frame_size - handle->audio.frames_in_buffer;
      size_t write_left = data->frames - written_frames;
      size_t write_frames = write_left > can_write ? can_write : write_left;
      size_t write_size = write_frames * handle->params.channels * sizeof(int16_t);

      size_t samples_in_buffer = handle->audio.frames_in_buffer * handle->params.channels;
      size_t written_samples = written_frames * handle->params.channels;

      memcpy(handle->audio.buffer + samples_in_buffer,
            data->data + written_samples,
            write_size);

      written_frames += write_frames;
      handle->audio.frames_in_buffer += write_frames;

      if ((handle->audio.frames_in_buffer < (size_t)handle->audio.codec->frame_size) && require_block)
         continue;

      AVPacket pkt;
      if (!encode_audio(handle, &pkt, false))
         return false;

      handle->audio.frames_in_buffer = 0;
      handle->audio.frame_cnt += handle->audio.codec->frame_size;

      if (pkt.size)
      {
         if (av_interleaved_write_frame(handle->muxer.ctx, &pkt) < 0)
            return false;
      }
   }

   return true;
}
예제 #8
0
int mgcp_transcoding_process_rtp(struct mgcp_endpoint *endp,
				struct mgcp_rtp_end *dst_end,
			     char *data, int *len, int buf_size)
{
	struct mgcp_process_rtp_state *state;
	const size_t rtp_hdr_size = sizeof(struct rtp_hdr);
	struct rtp_hdr *rtp_hdr = (struct rtp_hdr *) data;
	char *payload_data = (char *) &rtp_hdr->data[0];
	int payload_len = *len - rtp_hdr_size;
	uint8_t *src = (uint8_t *)payload_data;
	uint8_t *dst = (uint8_t *)payload_data;
	size_t nbytes = payload_len;
	size_t nsamples;
	size_t max_samples;
	uint32_t ts_no;
	int rc;

	state = check_transcode_state(endp, dst_end, rtp_hdr);
	if (!state)
		return 0;

	if (state->src_fmt == state->dst_fmt) {
		if (!state->dst_packet_duration)
			return 0;

		/* TODO: repackage without transcoding */
	}

	/* If the remaining samples do not fit into a fixed ptime,
	 * a) discard them, if the next packet is much later
	 * b) add silence and * send it, if the current packet is not
	 *    yet too late
	 * c) append the sample data, if the timestamp matches exactly
	 */

	/* TODO: check payload type (-> G.711 comfort noise) */

	if (payload_len > 0) {
		ts_no = ntohl(rtp_hdr->timestamp);
		if (!state->is_running) {
			state->next_seq = ntohs(rtp_hdr->sequence);
			state->next_time = ts_no;
			state->is_running = 1;
		}


		if (state->sample_cnt > 0) {
			int32_t delta = ts_no - state->next_time;
			/* TODO: check sequence? reordering? packet loss? */

			if (delta > state->sample_cnt) {
				/* There is a time gap between the last packet
				 * and the current one. Just discard the
				 * partial data that is left in the buffer.
				 * TODO: This can be improved by adding silence
				 * instead if the delta is small enough.
				 */
				LOGP(DMGCP, LOGL_NOTICE,
					"0x%x dropping sample buffer due delta=%d sample_cnt=%d\n",
					ENDPOINT_NUMBER(endp), delta, state->sample_cnt);
				state->sample_cnt = 0;
				state->next_time = ts_no;
			} else if (delta < 0) {
				LOGP(DMGCP, LOGL_NOTICE,
				     "RTP time jumps backwards, delta = %d, "
				     "discarding buffered samples\n",
				     delta);
				state->sample_cnt = 0;
				state->sample_offs = 0;
				return -EAGAIN;
			}

			/* Make sure the samples start without offset */
			if (state->sample_offs && state->sample_cnt)
				memmove(&state->samples[0],
					&state->samples[state->sample_offs],
					state->sample_cnt *
					sizeof(state->samples[0]));
		}

		state->sample_offs = 0;

		/* Append decoded audio to samples */
		decode_audio(state, &src, &nbytes);

		if (nbytes > 0)
			LOGP(DMGCP, LOGL_NOTICE,
			     "Skipped audio frame in RTP packet: %d octets\n",
			     nbytes);
	} else
		ts_no = state->next_time;

	if (state->sample_cnt < state->dst_packet_duration)
		return -EAGAIN;

	max_samples =
		state->dst_packet_duration ?
		state->dst_packet_duration : state->sample_cnt;

	nsamples = state->sample_cnt;

	rc = encode_audio(state, dst, buf_size, max_samples);
	/*
	 * There were no samples to encode?
	 * TODO: how does this work for comfort noise?
	 */
	if (rc == 0)
		return -ENOMSG;
	/* Any other error during the encoding */
	if (rc < 0)
		return rc;

	nsamples -= state->sample_cnt;

	*len = rtp_hdr_size + rc;
	rtp_hdr->sequence = htons(state->next_seq);
	rtp_hdr->timestamp = htonl(ts_no);

	state->next_seq += 1;
	state->next_time = ts_no + nsamples;

	/*
	 * XXX: At this point we should always have consumed
	 * samples. So doing OSMO_ASSERT(nsamples > 0) and returning
	 * rtp_hdr_size should be fine.
	 */
	return nsamples ? rtp_hdr_size : 0;
}
예제 #9
0
bool ExportFFmpeg::Finalize()
{
   int i, nEncodedBytes;

   // Flush the audio FIFO and encoder.
   for (;;)
   {
      AVPacket pkt;
      int nFifoBytes = av_fifo_size(mEncAudioFifo); // any bytes left in audio FIFO?

      av_init_packet(&pkt);

      nEncodedBytes = 0;
      int nAudioFrameSizeOut = default_frame_size * mEncAudioCodecCtx->channels * sizeof(int16_t);

      if (nAudioFrameSizeOut > mEncAudioFifoOutBufSiz || nFifoBytes > mEncAudioFifoOutBufSiz) {
         wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Too much remaining data.")),
                      _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION);
         return false;
      }

      // Flush the audio FIFO first if necessary. It won't contain a _full_ audio frame because
      // if it did we'd have pulled it from the FIFO during the last encodeAudioFrame() call -
      // the encoder must support short/incomplete frames for this to work.
      if (nFifoBytes > 0)
      {
         // Fill audio buffer with zeroes. If codec tries to read the whole buffer,
         // it will just read silence. If not - who cares?
         memset(mEncAudioFifoOutBuf,0,mEncAudioFifoOutBufSiz);
         const AVCodec *codec = mEncAudioCodecCtx->codec;

         // We have an incomplete buffer of samples left.  Is it OK to encode it?
         // If codec supports CODEC_CAP_SMALL_LAST_FRAME, we can feed it with smaller frame
         // Or if codec is FLAC, feed it anyway (it doesn't have CODEC_CAP_SMALL_LAST_FRAME, but it works)
         // Or if frame_size is 1, then it's some kind of PCM codec, they don't have frames and will be fine with the samples
         // Or if user configured the exporter to pad with silence, then we'll send audio + silence as a frame.
         if ((codec->capabilities & (CODEC_CAP_SMALL_LAST_FRAME|CODEC_CAP_VARIABLE_FRAME_SIZE))
            || mEncAudioCodecCtx->frame_size <= 1
            || gPrefs->Read(wxT("/FileFormats/OverrideSmallLastFrame"), true)
            )
         {
            int frame_size = default_frame_size;

            // The last frame is going to contain a smaller than usual number of samples.
            // For codecs without CODEC_CAP_SMALL_LAST_FRAME use normal frame size
            if (codec->capabilities & (CODEC_CAP_SMALL_LAST_FRAME|CODEC_CAP_VARIABLE_FRAME_SIZE))
               frame_size = nFifoBytes / (mEncAudioCodecCtx->channels * sizeof(int16_t));

            wxLogDebug(wxT("FFmpeg : Audio FIFO still contains %d bytes, writing %d sample frame ..."),
               nFifoBytes, frame_size);

            // Pull the bytes out from the FIFO and feed them to the encoder.
            if (av_fifo_generic_read(mEncAudioFifo, mEncAudioFifoOutBuf, nFifoBytes, NULL) == 0)
            {
               nEncodedBytes = encode_audio(mEncAudioCodecCtx, &pkt, (int16_t*)mEncAudioFifoOutBuf, frame_size);
            }
         }
      }

      // Now flush the encoder.
      if (nEncodedBytes <= 0)
         nEncodedBytes = encode_audio(mEncAudioCodecCtx, &pkt, NULL, 0);

      if (nEncodedBytes <= 0)
         break;

      pkt.stream_index = mEncAudioStream->index;

      // Set presentation time of frame (currently in the codec's timebase) in the stream timebase.
      if(pkt.pts != int64_t(AV_NOPTS_VALUE))
         pkt.pts = av_rescale_q(pkt.pts, mEncAudioCodecCtx->time_base, mEncAudioStream->time_base);
      if(pkt.dts != int64_t(AV_NOPTS_VALUE))
         pkt.dts = av_rescale_q(pkt.dts, mEncAudioCodecCtx->time_base, mEncAudioStream->time_base);

      if (av_interleaved_write_frame(mEncFormatCtx, &pkt) != 0)
      {
         wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Couldn't write last audio frame to output file.")),
                      _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION);
         break;
      }
      av_free_packet(&pkt);
   }

   // Write any file trailers.
   av_write_trailer(mEncFormatCtx);

   // Close the codecs.
   if (mEncAudioStream != NULL)
      avcodec_close(mEncAudioStream->codec);

   for (i = 0; i < (int)mEncFormatCtx->nb_streams; i++)
   {
      av_freep(&mEncFormatCtx->streams[i]->codec);
      av_freep(&mEncFormatCtx->streams[i]);
   }

   // Close the output file if we created it.
   if (!(mEncFormatDesc->flags & AVFMT_NOFILE))
      ufile_close(mEncFormatCtx->pb);

   // Free any buffers or structures we allocated.
   av_free(mEncFormatCtx);

   av_freep(&mEncAudioFifoOutBuf);
   mEncAudioFifoOutBufSiz = 0;

   av_fifo_free(mEncAudioFifo);

   mEncAudioFifo = NULL;

   return true;
}
예제 #10
0
PyObject*
encoders_encode_shn(PyObject *dummy,
                    PyObject *args, PyObject *keywds)
{
    static char *kwlist[] = {"filename",
                             "pcmreader",
                             "is_big_endian",
                             "signed_samples",
                             "header_data",
                             "footer_data",
                             "block_size",
                             NULL};
    char *filename;
    FILE *output_file;
    BitstreamWriter* writer;
    pcmreader* pcmreader;
    int is_big_endian = 0;
    int signed_samples = 0;
    char* header_data;
#ifdef PY_SSIZE_T_CLEAN
    Py_ssize_t header_size;
#else
    int header_size;
#endif
    char* footer_data = NULL;
#ifdef PY_SSIZE_T_CLEAN
    Py_ssize_t footer_size = 0;
#else
    int footer_size = 0;
#endif
    unsigned block_size = 256;
    unsigned bytes_written = 0;
    unsigned i;

    /*fetch arguments*/
    if (!PyArg_ParseTupleAndKeywords(args, keywds, "sO&iis#|s#I",
                                     kwlist,
                                     &filename,
                                     pcmreader_converter,
                                     &pcmreader,
                                     &is_big_endian,
                                     &signed_samples,
                                     &header_data,
                                     &header_size,

                                     &footer_data,
                                     &footer_size,
                                     &block_size))
        return NULL;

    /*ensure PCMReader is compatible with Shorten*/
    if ((pcmreader->bits_per_sample != 8) &&
        (pcmreader->bits_per_sample != 16)) {
        pcmreader->del(pcmreader);
        PyErr_SetString(PyExc_ValueError, "unsupported bits per sample");
        return NULL;
    }

    /*open given filename for writing*/
    if ((output_file = fopen(filename, "wb")) == NULL) {
        PyErr_SetFromErrnoWithFilename(PyExc_IOError, filename);
        pcmreader->del(pcmreader);
        return NULL;
    } else {
        writer = bw_open(output_file, BS_BIG_ENDIAN);
    }

    /*write magic number and version*/
    writer->build(writer, "4b 8u", "ajkg", 2);

    bw_add_callback(writer, byte_counter, &bytes_written);

    /*write Shorten header*/
    write_header(writer,
                 pcmreader->bits_per_sample,
                 is_big_endian,
                 signed_samples,
                 pcmreader->channels,
                 block_size);

    /*issue initial VERBATIM command with header data*/
    write_unsigned(writer, COMMAND_SIZE, FN_VERBATIM);
    write_unsigned(writer, VERBATIM_SIZE, header_size);
    for (i = 0; i < header_size; i++)
        write_unsigned(writer, VERBATIM_BYTE_SIZE, (uint8_t)header_data[i]);

    /*process PCM frames*/
    if (encode_audio(writer, pcmreader, signed_samples, block_size))
        goto error;

    /*if there's footer data, issue a VERBATIM command for it*/
    if ((footer_data != NULL) && (footer_size > 0)) {
        write_unsigned(writer, COMMAND_SIZE, FN_VERBATIM);
        write_unsigned(writer, VERBATIM_SIZE, footer_size);
        for (i = 0; i < footer_size; i++)
            write_unsigned(writer, VERBATIM_BYTE_SIZE, (uint8_t)footer_data[i]);
    }

    /*issue QUIT command*/
    write_unsigned(writer, COMMAND_SIZE, FN_QUIT);

    /*pad output (not including header) to a multiple of 4 bytes*/
    writer->byte_align(writer);
    while ((bytes_written % 4) != 0) {
        writer->write(writer, 8, 0);
    }

    /*deallocate temporary buffers and close files*/
    pcmreader->del(pcmreader);
    writer->close(writer);

    Py_INCREF(Py_None);
    return Py_None;
 error:
    pcmreader->del(pcmreader);
    writer->close(writer);

    return NULL;
}
예제 #11
0
avcomp_error avcomp_encode_data(avcomp_state *state, const UINT8 *source, UINT8 *dest, UINT32 *complength)
{
	const UINT8 *metastart, *videostart, *audiostart[MAX_CHANNELS];
	UINT32 metasize, channels, samples, width, height;
	UINT32 audioxor, videoxor, videostride;
	avcomp_error err;
	UINT32 dstoffs;
	int chnum;

	/* extract data from source if present */
	if (source != NULL)
	{
		/* validate the header */
		if (source[0] != 'c' || source[1] != 'h' || source[2] != 'a' || source[3] != 'v')
			return AVCERR_INVALID_DATA;

		/* extract info from the header */
		metasize = source[4];
		channels = source[5];
		samples = (source[6] << 8) + source[7];
		width = (source[8] << 8) + source[9];
		height = (source[10] << 8) + source[11];

		/* determine the start of each piece of data */
		source += 12;
		metastart = source;
		source += metasize;
		for (chnum = 0; chnum < channels; chnum++)
		{
			audiostart[chnum] = source;
			source += 2 * samples;
		}
		videostart = source;

		/* data is assumed to be big-endian already */
		audioxor = videoxor = 0;
		videostride = 2 * width;
	}

	/* otherwise, extract from the state */
	else
	{
		UINT16 betest = 0;

		/* extract metadata information */
		metastart = state->compress.metadata;
		metasize = state->compress.metalength;
		if ((metastart == NULL && metasize != 0) || (metastart != NULL && metasize == 0))
			return AVCERR_INVALID_CONFIGURATION;

		/* extract audio information */
		channels = state->compress.channels;
		samples = state->compress.samples;
		for (chnum = 0; chnum < channels; chnum++)
			audiostart[chnum] = (const UINT8 *)state->compress.audio[chnum];

		/* extract video information */
		videostart = NULL;
		videostride = width = height = 0;
		if (state->compress.video != NULL)
		{
			videostart = (const UINT8 *)state->compress.video->base;
			videostride = state->compress.video->rowpixels * 2;
			width = state->compress.video->width;
			height = state->compress.video->height;
		}

		/* data is assumed to be native-endian */
		*(UINT8 *)&betest = 1;
		audioxor = videoxor = (betest == 1) ? 1 : 0;
	}

	/* validate the info from the header */
	if (width > state->maxwidth || height > state->maxheight)
		return AVCERR_VIDEO_TOO_LARGE;
	if (channels > state->maxchannels)
		return AVCERR_AUDIO_TOO_LARGE;

	/* write the basics to the new header */
	dest[0] = metasize;
	dest[1] = channels;
	dest[2] = samples >> 8;
	dest[3] = samples;
	dest[4] = width >> 8;
	dest[5] = width;
	dest[6] = height >> 8;
	dest[7] = height;

	/* starting offsets */
	dstoffs = 10 + 2 * channels;

	/* copy the metadata first */
	if (metasize > 0)
	{
		memcpy(dest + dstoffs, metastart, metasize);
		dstoffs += metasize;
	}

	/* encode the audio channels */
	if (channels > 0)
	{
		/* encode the audio */
		err = encode_audio(state, channels, samples, audiostart, audioxor, dest + dstoffs, &dest[8]);
		if (err != AVCERR_NONE)
			return err;

		/* advance the pointers past the data */
		dstoffs += (dest[8] << 8) + dest[9];
		for (chnum = 0; chnum < channels; chnum++)
			dstoffs += (dest[10 + 2 * chnum] << 8) + dest[11 + 2 * chnum];
	}

	/* encode the video data */
	if (width > 0 && height > 0)
	{
		UINT32 vidlength = 0;

		/* encode the video */
		err = encode_video(state, width, height, videostart, videostride, videoxor, dest + dstoffs, &vidlength);
		if (err != AVCERR_NONE)
			return err;

		/* advance the pointers past the data */
		dstoffs += vidlength;
	}

	/* set the total compression */
	*complength = dstoffs;
	return AVCERR_NONE;
}
예제 #12
0
파일: ffmpeg.c 프로젝트: wedesoft/aiscm
SCM ffmpeg_destroy(SCM scm_self)
{
  struct ffmpeg_t *self = get_self_no_check(scm_self);

  if (self->header_written) {
    // Clear audio encoder pipeline
    if (self->audio_codec_ctx)
      while (encode_audio(self, NULL));

    // Clear video encoder pipeline
    if (self->video_codec_ctx)
      while (encode_video(self, NULL));
  };

  if (self->video_target_frame) {
    av_frame_unref(self->video_target_frame);
    av_frame_free(&self->video_target_frame);
    self->video_target_frame = NULL;
  };

  if (self->audio_packed_frame) {
    av_frame_unref(self->audio_packed_frame);
    av_frame_free(&self->audio_packed_frame);
    self->audio_packed_frame = NULL;
  };

  if (self->audio_target_frame) {
    av_frame_unref(self->audio_target_frame);
    av_frame_free(&self->audio_target_frame);
    self->audio_target_frame = NULL;
  };

  if (self->audio_buffer.buffer) {
    ringbuffer_destroy(&self->audio_buffer);
    self->audio_buffer.buffer = NULL;
  };

  if (self->header_written) {
    av_write_trailer(self->fmt_ctx);
    self->header_written = 0;
  };

  if (self->orig_pkt.data) {
    av_packet_unref(&self->orig_pkt);
    self->orig_pkt.data = NULL;
  };

  if (self->audio_codec_ctx) {
    avcodec_close(self->audio_codec_ctx);
    self->audio_codec_ctx = NULL;
  };

  if (self->video_codec_ctx) {
    avcodec_close(self->video_codec_ctx);
    self->video_codec_ctx = NULL;
  };

  if (self->output_file) {
    avio_close(self->fmt_ctx->pb);
    self->output_file = 0;
  };

  if (self->fmt_ctx) {
    if (is_input_context(self))
      avformat_close_input(&self->fmt_ctx);
    else
      avformat_free_context(self->fmt_ctx);
    self->fmt_ctx = NULL;
  };

  return SCM_UNSPECIFIED;
}