예제 #1
0
void audio_callback(void *userdata, Uint8 *stream, int len) 
{

  AudioState *audio = userdata;
  int len1, audio_size;
  double pts;

  while(len > 0) {
    if(audio->audio_buf_index >= audio->audio_buf_size) {
      /* We have already sent all our data; get more */
      audio_size = decode_audio(audio, audio->audioBuf, sizeof(audio->audioBuf), &pts);
      if(audio_size < 0) {
	/* If error, output silence */
	audio->audio_buf_size = 1024;
	memset(audio->audioBuf, 0, audio->audio_buf_size);
      } else {
	audio->audio_buf_size = audio_size;
      }
      audio->audio_buf_index = 0;
    }
    len1 = audio->audio_buf_size - audio->audio_buf_index;
    if(len1 > len)
      len1 = len;
    memcpy(stream, (uint8_t *)audio->audioBuf + audio->audio_buf_index, len1);
    len -= len1;
    stream += len1;
    audio->audio_buf_index += len1;
  }
}
	// it is running in sound mixer thread
	void as_netstream::audio_callback(Uint8* stream, int len)
	{
		while (len > 0 && m_status == PLAY)
		{
			// decode sound
			if (m_sound == NULL)
			{
				gc_ptr<av_packet> audio;
				if (m_aq.pop(&audio))
				{
					Sint16* sample;
					int size;
					decode_audio(audio->get_packet(), &sample, &size);
					m_sound = new decoded_sound(sample, size);
					continue;
				}
				break;
			}
			else
			{
				int n = m_sound->extract(stream, len);
				stream += n;
				len -= n;
				if (m_sound->size() == 0)
				{
					m_sound = NULL;
				}
			}
		}

	}
예제 #3
0
파일: ffmpeg.c 프로젝트: wedesoft/aiscm
SCM ffmpeg_decode_audio_video(SCM scm_self)
{
  SCM retval = SCM_BOOL_F;

  struct ffmpeg_t *self = get_self(scm_self);

  if (!is_input_context(self))
    scm_misc_error("ffmpeg-decode-audio/video", "Attempt to read frame from FFmpeg output video", SCM_EOL);

  while (scm_is_false(retval)) {
    if (packet_empty(self)) read_packet(self);

    int reading_cache = packet_empty(self);

    if (self->pkt.stream_index == self->audio_stream_idx) {
      av_frame_unref(self->audio_target_frame);
      retval = decode_audio(self, &self->pkt, self->audio_target_frame);
    } else if (self->pkt.stream_index == self->video_stream_idx) {
      av_frame_unref(self->video_target_frame);
      retval = decode_video(self, &self->pkt, self->video_target_frame);
    } else
      consume_packet_data(&self->pkt, self->pkt.size);

    if (scm_is_false(retval) && reading_cache) break;
  };

  return retval;
}
예제 #4
0
static int init(sh_audio_t *sh_audio)
{
    int ret=decode_audio(sh_audio,sh_audio->a_buffer,4096,sh_audio->a_buffer_size);
    if(ret<0){
        mp_msg(MSGT_DECAUDIO,MSGL_INFO,"ACM decoding error: %d\n",ret);
        return 0;
    }
    sh_audio->a_buffer_len=ret;
  return 1;
}
예제 #5
0
파일: qplayvid.c 프로젝트: macpod/openlase
void *decoder_thread(void *arg)
{
	PlayerCtx *ctx = arg;
	AVPacket packet;
	AVPacket cpacket;
	size_t decoded_bytes;
	int seekid = ctx->cur_seekid;

	printf("Decoder thread started\n");

	memset(&packet, 0, sizeof(packet));
	memset(&cpacket, 0, sizeof(cpacket));

	while (!ctx->exit) {
		int new_packet = 0;
		if (cpacket.size == 0) {
			if (packet.data)
				av_free_packet(&packet);
			pthread_mutex_lock(&ctx->seek_mutex);
			if (ctx->cur_seekid > seekid) {
				printf("Seek! %f\n", ctx->seek_pos);
				av_seek_frame(ctx->fmt_ctx, -1, (int64_t)(ctx->seek_pos * AV_TIME_BASE), 0);
				seekid = ctx->cur_seekid;
				// HACK! Avoid deadlock by waking up the video waiter
				pthread_mutex_lock(&ctx->v_buf_mutex);
				pthread_cond_signal(&ctx->v_buf_not_empty);
				pthread_mutex_unlock(&ctx->v_buf_mutex);
				if (ctx->audio_idx != -1)
					avcodec_flush_buffers(ctx->a_codec_ctx);
				avcodec_flush_buffers(ctx->v_codec_ctx);
			}
			if (av_read_frame(ctx->fmt_ctx, &packet) < 0) {
				fprintf(stderr, "EOF!\n");
				push_eof(ctx, seekid);
				pthread_cond_wait(&ctx->seek_cond, &ctx->seek_mutex);
				pthread_mutex_unlock(&ctx->seek_mutex);
				continue;
			}
			pthread_mutex_unlock(&ctx->seek_mutex);
			cpacket = packet;
			new_packet = 1;
		}
		if (ctx->audio_idx != -1 && cpacket.stream_index == ctx->audio_idx) {
			decoded_bytes = decode_audio(ctx, &cpacket, new_packet, seekid);
		} else if (cpacket.stream_index == ctx->video_idx) {
			decoded_bytes = decode_video(ctx, &cpacket, new_packet, seekid);
		} else {
			decoded_bytes = cpacket.size;
		}

		cpacket.data += decoded_bytes;
		cpacket.size -= decoded_bytes;
	}
	return NULL;
}
static int control(sh_audio_t *sh, int cmd, void* arg, ...) {
  if (cmd == ADCTRL_RESYNC_STREAM) {
    unsigned char *buf = malloc(MAX_FRAMESIZE);
    int i;
    int nr_ok = 0;
    for (i = 0; i < MAX_SEEK_DISCARD; i++) {
      int len = decode_audio(sh, buf, 0, MAX_FRAMESIZE);
      if (check_clip(buf, len)) nr_ok++; else nr_ok = 0;
      if (nr_ok > MIN_SEEK_GOOD) break;
    }
    free(buf);
  }
  return CONTROL_UNKNOWN;
}
예제 #7
0
static int init(sh_audio_t *sh_audio)
{
    struct MPOpts *opts = sh_audio->opts;
    AVCodecContext *lavc_context;
    AVCodec *lavc_codec;

    if (sh_audio->codec->dll) {
        lavc_codec = avcodec_find_decoder_by_name(sh_audio->codec->dll);
        if (!lavc_codec) {
            mp_tmsg(MSGT_DECAUDIO, MSGL_ERR,
                    "Cannot find codec '%s' in libavcodec...\n",
                    sh_audio->codec->dll);
            return 0;
        }
    } else if (!sh_audio->libav_codec_id) {
        mp_tmsg(MSGT_DECAUDIO, MSGL_INFO, "No Libav codec ID known. "
                "Generic lavc decoder is not applicable.\n");
        return 0;
    } else {
        lavc_codec = avcodec_find_decoder(sh_audio->libav_codec_id);
        if (!lavc_codec) {
            mp_tmsg(MSGT_DECAUDIO, MSGL_INFO, "Libavcodec has no decoder "
                   "for this codec\n");
            return 0;
        }
    }

    sh_audio->codecname = lavc_codec->long_name;
    if (!sh_audio->codecname)
        sh_audio->codecname = lavc_codec->name;

    struct priv *ctx = talloc_zero(NULL, struct priv);
    sh_audio->context = ctx;
    lavc_context = avcodec_alloc_context3(lavc_codec);
    ctx->avctx = lavc_context;
    ctx->avframe = avcodec_alloc_frame();

    // Always try to set - option only exists for AC3 at the moment
    av_opt_set_double(lavc_context, "drc_scale", opts->drc_level,
                      AV_OPT_SEARCH_CHILDREN);
    lavc_context->sample_rate = sh_audio->samplerate;
    lavc_context->bit_rate = sh_audio->i_bps * 8;
    if (sh_audio->wf) {
        lavc_context->channels = sh_audio->wf->nChannels;
        lavc_context->sample_rate = sh_audio->wf->nSamplesPerSec;
        lavc_context->bit_rate = sh_audio->wf->nAvgBytesPerSec * 8;
        lavc_context->block_align = sh_audio->wf->nBlockAlign;
        lavc_context->bits_per_coded_sample = sh_audio->wf->wBitsPerSample;
    }
    lavc_context->request_channels = opts->audio_output_channels;
    lavc_context->codec_tag = sh_audio->format; //FOURCC
    lavc_context->codec_type = AVMEDIA_TYPE_AUDIO;
    lavc_context->codec_id = lavc_codec->id; // not sure if required, imho not --A'rpi

    /* alloc extra data */
    if (sh_audio->wf && sh_audio->wf->cbSize > 0) {
        lavc_context->extradata = av_mallocz(sh_audio->wf->cbSize + FF_INPUT_BUFFER_PADDING_SIZE);
        lavc_context->extradata_size = sh_audio->wf->cbSize;
        memcpy(lavc_context->extradata, sh_audio->wf + 1,
               lavc_context->extradata_size);
    }

    // for QDM2
    if (sh_audio->codecdata_len && sh_audio->codecdata &&
            !lavc_context->extradata) {
        lavc_context->extradata = av_malloc(sh_audio->codecdata_len +
                                            FF_INPUT_BUFFER_PADDING_SIZE);
        lavc_context->extradata_size = sh_audio->codecdata_len;
        memcpy(lavc_context->extradata, (char *)sh_audio->codecdata,
               lavc_context->extradata_size);
    }

    /* open it */
    if (avcodec_open2(lavc_context, lavc_codec, NULL) < 0) {
        mp_tmsg(MSGT_DECAUDIO, MSGL_ERR, "Could not open codec.\n");
        uninit(sh_audio);
        return 0;
    }
    mp_msg(MSGT_DECAUDIO, MSGL_V, "INFO: libavcodec \"%s\" init OK!\n",
           lavc_codec->name);

    if (sh_audio->format == 0x3343414D) {
        // MACE 3:1
        sh_audio->ds->ss_div = 2 * 3; // 1 samples/packet
        sh_audio->ds->ss_mul = 2 * sh_audio->wf->nChannels; // 1 byte*ch/packet
    } else if (sh_audio->format == 0x3643414D) {
        // MACE 6:1
        sh_audio->ds->ss_div = 2 * 6; // 1 samples/packet
        sh_audio->ds->ss_mul = 2 * sh_audio->wf->nChannels; // 1 byte*ch/packet
    }

    // Decode at least 1 byte:  (to get header filled)
    for (int tries = 0;;) {
        int x = decode_audio(sh_audio, sh_audio->a_buffer, 1,
                             sh_audio->a_buffer_size);
        if (x > 0) {
            sh_audio->a_buffer_len = x;
            break;
        }
        if (++tries >= 5) {
            mp_msg(MSGT_DECAUDIO, MSGL_ERR,
                   "ad_ffmpeg: initial decode failed\n");
            uninit(sh_audio);
            return 0;
        }
    }

    sh_audio->i_bps = lavc_context->bit_rate / 8;
    if (sh_audio->wf && sh_audio->wf->nAvgBytesPerSec)
        sh_audio->i_bps = sh_audio->wf->nAvgBytesPerSec;

    return 1;
}
예제 #8
0
int mgcp_transcoding_process_rtp(struct mgcp_endpoint *endp,
				struct mgcp_rtp_end *dst_end,
			     char *data, int *len, int buf_size)
{
	struct mgcp_process_rtp_state *state;
	const size_t rtp_hdr_size = sizeof(struct rtp_hdr);
	struct rtp_hdr *rtp_hdr = (struct rtp_hdr *) data;
	char *payload_data = (char *) &rtp_hdr->data[0];
	int payload_len = *len - rtp_hdr_size;
	uint8_t *src = (uint8_t *)payload_data;
	uint8_t *dst = (uint8_t *)payload_data;
	size_t nbytes = payload_len;
	size_t nsamples;
	size_t max_samples;
	uint32_t ts_no;
	int rc;

	state = check_transcode_state(endp, dst_end, rtp_hdr);
	if (!state)
		return 0;

	if (state->src_fmt == state->dst_fmt) {
		if (!state->dst_packet_duration)
			return 0;

		/* TODO: repackage without transcoding */
	}

	/* If the remaining samples do not fit into a fixed ptime,
	 * a) discard them, if the next packet is much later
	 * b) add silence and * send it, if the current packet is not
	 *    yet too late
	 * c) append the sample data, if the timestamp matches exactly
	 */

	/* TODO: check payload type (-> G.711 comfort noise) */

	if (payload_len > 0) {
		ts_no = ntohl(rtp_hdr->timestamp);
		if (!state->is_running) {
			state->next_seq = ntohs(rtp_hdr->sequence);
			state->next_time = ts_no;
			state->is_running = 1;
		}


		if (state->sample_cnt > 0) {
			int32_t delta = ts_no - state->next_time;
			/* TODO: check sequence? reordering? packet loss? */

			if (delta > state->sample_cnt) {
				/* There is a time gap between the last packet
				 * and the current one. Just discard the
				 * partial data that is left in the buffer.
				 * TODO: This can be improved by adding silence
				 * instead if the delta is small enough.
				 */
				LOGP(DMGCP, LOGL_NOTICE,
					"0x%x dropping sample buffer due delta=%d sample_cnt=%d\n",
					ENDPOINT_NUMBER(endp), delta, state->sample_cnt);
				state->sample_cnt = 0;
				state->next_time = ts_no;
			} else if (delta < 0) {
				LOGP(DMGCP, LOGL_NOTICE,
				     "RTP time jumps backwards, delta = %d, "
				     "discarding buffered samples\n",
				     delta);
				state->sample_cnt = 0;
				state->sample_offs = 0;
				return -EAGAIN;
			}

			/* Make sure the samples start without offset */
			if (state->sample_offs && state->sample_cnt)
				memmove(&state->samples[0],
					&state->samples[state->sample_offs],
					state->sample_cnt *
					sizeof(state->samples[0]));
		}

		state->sample_offs = 0;

		/* Append decoded audio to samples */
		decode_audio(state, &src, &nbytes);

		if (nbytes > 0)
			LOGP(DMGCP, LOGL_NOTICE,
			     "Skipped audio frame in RTP packet: %d octets\n",
			     nbytes);
	} else
		ts_no = state->next_time;

	if (state->sample_cnt < state->dst_packet_duration)
		return -EAGAIN;

	max_samples =
		state->dst_packet_duration ?
		state->dst_packet_duration : state->sample_cnt;

	nsamples = state->sample_cnt;

	rc = encode_audio(state, dst, buf_size, max_samples);
	/*
	 * There were no samples to encode?
	 * TODO: how does this work for comfort noise?
	 */
	if (rc == 0)
		return -ENOMSG;
	/* Any other error during the encoding */
	if (rc < 0)
		return rc;

	nsamples -= state->sample_cnt;

	*len = rtp_hdr_size + rc;
	rtp_hdr->sequence = htons(state->next_seq);
	rtp_hdr->timestamp = htonl(ts_no);

	state->next_seq += 1;
	state->next_time = ts_no + nsamples;

	/*
	 * XXX: At this point we should always have consumed
	 * samples. So doing OSMO_ASSERT(nsamples > 0) and returning
	 * rtp_hdr_size should be fine.
	 */
	return nsamples ? rtp_hdr_size : 0;
}
예제 #9
0
파일: audio.c 프로젝트: wrl/mpv
int fill_audio_out_buffers(struct MPContext *mpctx, double endpts)
{
    struct MPOpts *opts = mpctx->opts;
    struct ao *ao = mpctx->ao;
    int playsize;
    int playflags = 0;
    bool audio_eof = false;
    bool signal_eof = false;
    bool partial_fill = false;
    sh_audio_t * const sh_audio = mpctx->sh_audio;
    bool modifiable_audio_format = !(ao->format & AF_FORMAT_SPECIAL_MASK);
    int unitsize = ao->channels.num * af_fmt2bits(ao->format) / 8;

    if (mpctx->paused)
        playsize = 1;   // just initialize things (audio pts at least)
    else
        playsize = ao_get_space(ao);

    // Coming here with hrseek_active still set means audio-only
    if (!mpctx->sh_video || !mpctx->sync_audio_to_video)
        mpctx->syncing_audio = false;
    if (!opts->initial_audio_sync || !modifiable_audio_format) {
        mpctx->syncing_audio = false;
        mpctx->hrseek_active = false;
    }

    int res;
    if (mpctx->syncing_audio || mpctx->hrseek_active)
        res = audio_start_sync(mpctx, playsize);
    else
        res = decode_audio(sh_audio, &ao->buffer, playsize);

    if (res < 0) {  // EOF, error or format change
        if (res == -2) {
            /* The format change isn't handled too gracefully. A more precise
             * implementation would require draining buffered old-format audio
             * while displaying video, then doing the output format switch.
             */
            if (!mpctx->opts->gapless_audio)
                uninit_player(mpctx, INITIALIZED_AO);
            reinit_audio_chain(mpctx);
            return -1;
        } else if (res == ASYNC_PLAY_DONE)
            return 0;
        else if (demux_stream_eof(mpctx->sh_audio->gsh))
            audio_eof = true;
    }

    if (endpts != MP_NOPTS_VALUE && modifiable_audio_format) {
        double bytes = (endpts - written_audio_pts(mpctx) + mpctx->audio_delay)
                       * ao->bps / opts->playback_speed;
        if (playsize > bytes) {
            playsize = MPMAX(bytes, 0);
            audio_eof = true;
            partial_fill = true;
        }
    }

    assert(ao->buffer.len % unitsize == 0);
    if (playsize > ao->buffer.len) {
        partial_fill = true;
        playsize = ao->buffer.len;
    }
    playsize -= playsize % unitsize;
    if (!playsize)
        return partial_fill && audio_eof ? -2 : -partial_fill;

    if (audio_eof && partial_fill) {
        if (opts->gapless_audio) {
            // With gapless audio, delay this to ao_uninit. There must be only
            // 1 final chunk, and that is handled when calling ao_uninit().
            signal_eof = true;
        } else {
            playflags |= AOPLAY_FINAL_CHUNK;
        }
    }

    assert(ao->buffer_playable_size <= ao->buffer.len);
    int played = write_to_ao(mpctx, ao->buffer.start, playsize, playflags,
                             written_audio_pts(mpctx));
    ao->buffer_playable_size = playsize - played;

    if (played > 0) {
        ao->buffer.len -= played;
        memmove(ao->buffer.start, ao->buffer.start + played, ao->buffer.len);
    } else if (!mpctx->paused && audio_eof && ao_get_delay(ao) < .04) {
        // Sanity check to avoid hanging in case current ao doesn't output
        // partial chunks and doesn't check for AOPLAY_FINAL_CHUNK
        signal_eof = true;
    }

    return signal_eof ? -2 : -partial_fill;
}
예제 #10
0
파일: audio.c 프로젝트: wrl/mpv
static int audio_start_sync(struct MPContext *mpctx, int playsize)
{
    struct ao *ao = mpctx->ao;
    struct MPOpts *opts = mpctx->opts;
    sh_audio_t * const sh_audio = mpctx->sh_audio;
    int res;

    // Timing info may not be set without
    res = decode_audio(sh_audio, &ao->buffer, 1);
    if (res < 0)
        return res;

    int bytes;
    bool did_retry = false;
    double written_pts;
    double bps = ao->bps / opts->playback_speed;
    bool hrseek = mpctx->hrseek_active;   // audio only hrseek
    mpctx->hrseek_active = false;
    while (1) {
        written_pts = written_audio_pts(mpctx);
        double ptsdiff;
        if (hrseek)
            ptsdiff = written_pts - mpctx->hrseek_pts;
        else
            ptsdiff = written_pts - mpctx->sh_video->pts - mpctx->delay
                      - mpctx->audio_delay;
        bytes = ptsdiff * bps;
        bytes -= bytes % (ao->channels.num * af_fmt2bits(ao->format) / 8);

        // ogg demuxers give packets without timing
        if (written_pts <= 1 && sh_audio->pts == MP_NOPTS_VALUE) {
            if (!did_retry) {
                // Try to read more data to see packets that have pts
                res = decode_audio(sh_audio, &ao->buffer, ao->bps);
                if (res < 0)
                    return res;
                did_retry = true;
                continue;
            }
            bytes = 0;
        }

        if (fabs(ptsdiff) > 300 || isnan(ptsdiff))   // pts reset or just broken?
            bytes = 0;

        if (bytes > 0)
            break;

        mpctx->syncing_audio = false;
        int a = MPMIN(-bytes, MPMAX(playsize, 20000));
        res = decode_audio(sh_audio, &ao->buffer, a);
        bytes += ao->buffer.len;
        if (bytes >= 0) {
            memmove(ao->buffer.start,
                    ao->buffer.start + ao->buffer.len - bytes, bytes);
            ao->buffer.len = bytes;
            if (res < 0)
                return res;
            return decode_audio(sh_audio, &ao->buffer, playsize);
        }
        ao->buffer.len = 0;
        if (res < 0)
            return res;
    }
    if (hrseek)
        // Don't add silence in audio-only case even if position is too late
        return 0;
    int fillbyte = 0;
    if ((ao->format & AF_FORMAT_SIGN_MASK) == AF_FORMAT_US)
        fillbyte = 0x80;
    if (bytes >= playsize) {
        /* This case could fall back to the one below with
         * bytes = playsize, but then silence would keep accumulating
         * in a_out_buffer if the AO accepts less data than it asks for
         * in playsize. */
        char *p = malloc(playsize);
        memset(p, fillbyte, playsize);
        write_to_ao(mpctx, p, playsize, 0, written_pts - bytes / bps);
        free(p);
        return ASYNC_PLAY_DONE;
    }
    mpctx->syncing_audio = false;
    decode_audio_prepend_bytes(&ao->buffer, bytes, fillbyte);
    return decode_audio(sh_audio, &ao->buffer, playsize);
}
예제 #11
0
파일: toxav.c 프로젝트: 13693100472/toxcore
static void *toxav_decoding(void *arg)
{
    void **pp = arg;
    ToxAv *av = pp[0];
    CallSpecific *call = pp[1];
    free(pp);

    while (1) {
        DECODE_PACKET *p;
        _Bool video = 0;

        pthread_mutex_lock(&call->decode_cond_mutex);

        if (call->exit) {
            break;
        }

        uint8_t r;

        /* first check for available packets, otherwise wait for condition*/
        r = call->audio_decode_read;
        p = call->audio_decode_queue[r];

        if (!p) {
            r = call->video_decode_read;
            p = call->video_decode_queue[r];

            if (!p) {
                pthread_cond_wait(&call->decode_cond, &call->decode_cond_mutex);
                r = call->audio_decode_read;
                p = call->audio_decode_queue[r];

                if (!p) {
                    r = call->video_decode_read;
                    p = call->video_decode_queue[r];
                    video = 1;
                }
            } else {
                video = 1;
            }
        }

        if (video) {
            if (p) {
                call->video_decode_queue[r] = NULL;
                call->video_decode_read = (r + 1) % VIDEO_DECODE_QUEUE_SIZE;
            }
        } else {
            call->audio_decode_queue[r] = NULL;
            call->audio_decode_read = (r + 1) % AUDIO_DECODE_QUEUE_SIZE;
        }

        pthread_mutex_unlock(&call->decode_cond_mutex);

        if (p) {
            if (video) {
                decode_video(av, call, p);
            } else {
                decode_audio(av, call, p);
            }
        }
    }

    call->exit = 0;
    pthread_cond_signal(&call->decode_cond);
    pthread_mutex_unlock(&call->decode_cond_mutex);

    return NULL;
}
예제 #12
0
파일: ad_ffmpeg.c 프로젝트: HermiG/mplayer2
static int init(sh_audio_t *sh_audio)
{
    struct MPOpts *opts = sh_audio->opts;
    AVCodecContext *lavc_context;
    AVCodec *lavc_codec;

    mp_msg(MSGT_DECAUDIO,MSGL_V,"FFmpeg's libavcodec audio codec\n");

    lavc_codec = avcodec_find_decoder_by_name(sh_audio->codec->dll);
    if(!lavc_codec){
	mp_tmsg(MSGT_DECAUDIO,MSGL_ERR,"Cannot find codec '%s' in libavcodec...\n",sh_audio->codec->dll);
	return 0;
    }

    lavc_context = avcodec_alloc_context();
    sh_audio->context=lavc_context;

    lavc_context->drc_scale = opts->drc_level;
    lavc_context->sample_rate = sh_audio->samplerate;
    lavc_context->bit_rate = sh_audio->i_bps * 8;
    if(sh_audio->wf){
	lavc_context->channels = sh_audio->wf->nChannels;
	lavc_context->sample_rate = sh_audio->wf->nSamplesPerSec;
	lavc_context->bit_rate = sh_audio->wf->nAvgBytesPerSec * 8;
	lavc_context->block_align = sh_audio->wf->nBlockAlign;
	lavc_context->bits_per_coded_sample = sh_audio->wf->wBitsPerSample;
    }
    lavc_context->request_channels = opts->audio_output_channels;
    lavc_context->codec_tag = sh_audio->format; //FOURCC
    lavc_context->codec_type = AVMEDIA_TYPE_AUDIO;
    lavc_context->codec_id = lavc_codec->id; // not sure if required, imho not --A'rpi

    /* alloc extra data */
    if (sh_audio->wf && sh_audio->wf->cbSize > 0) {
        lavc_context->extradata = av_mallocz(sh_audio->wf->cbSize + FF_INPUT_BUFFER_PADDING_SIZE);
        lavc_context->extradata_size = sh_audio->wf->cbSize;
        memcpy(lavc_context->extradata, sh_audio->wf + 1,
               lavc_context->extradata_size);
    }

    // for QDM2
    if (sh_audio->codecdata_len && sh_audio->codecdata && !lavc_context->extradata)
    {
        lavc_context->extradata = av_malloc(sh_audio->codecdata_len +
                                            FF_INPUT_BUFFER_PADDING_SIZE);
        lavc_context->extradata_size = sh_audio->codecdata_len;
        memcpy(lavc_context->extradata, (char *)sh_audio->codecdata,
               lavc_context->extradata_size);
    }

    /* open it */
    if (avcodec_open(lavc_context, lavc_codec) < 0) {
        mp_tmsg(MSGT_DECAUDIO,MSGL_ERR, "Could not open codec.\n");
        return 0;
    }
   mp_msg(MSGT_DECAUDIO,MSGL_V,"INFO: libavcodec \"%s\" init OK!\n", lavc_codec->name);

//   printf("\nFOURCC: 0x%X\n",sh_audio->format);
   if(sh_audio->format==0x3343414D){
       // MACE 3:1
       sh_audio->ds->ss_div = 2*3; // 1 samples/packet
       sh_audio->ds->ss_mul = 2*sh_audio->wf->nChannels; // 1 byte*ch/packet
   } else
   if(sh_audio->format==0x3643414D){
       // MACE 6:1
       sh_audio->ds->ss_div = 2*6; // 1 samples/packet
       sh_audio->ds->ss_mul = 2*sh_audio->wf->nChannels; // 1 byte*ch/packet
   }

   // Decode at least 1 byte:  (to get header filled)
   for (int tries = 0;;) {
       int x = decode_audio(sh_audio, sh_audio->a_buffer, 1,
                            sh_audio->a_buffer_size);
       if (x > 0) {
           sh_audio->a_buffer_len = x;
           break;
       }
       if (++tries >= 5) {
           mp_msg(MSGT_DECAUDIO, MSGL_ERR,
                  "ad_ffmpeg: initial decode failed\n");
           return 0;
       }
   }

  sh_audio->i_bps=lavc_context->bit_rate/8;
  if (sh_audio->wf && sh_audio->wf->nAvgBytesPerSec)
      sh_audio->i_bps=sh_audio->wf->nAvgBytesPerSec;

  switch (lavc_context->sample_fmt) {
      case SAMPLE_FMT_U8:
      case SAMPLE_FMT_S16:
      case SAMPLE_FMT_S32:
      case SAMPLE_FMT_FLT:
          break;
      default:
          return 0;
  }
  return 1;
}
예제 #13
0
static int init(sh_audio_t *sh_audio)
{
    int x;
    AVCodecContext *lavc_context;
    AVCodec *lavc_codec;

    mp_msg(MSGT_DECAUDIO,MSGL_V,"FFmpeg's libavcodec audio codec\n");
    if(!avcodec_inited){
      avcodec_init();
      avcodec_register_all();
      avcodec_inited=1;
    }
    
    lavc_codec = (AVCodec *)avcodec_find_decoder_by_name(sh_audio->codec->dll);
    if(!lavc_codec){
	mp_msg(MSGT_DECAUDIO,MSGL_ERR,MSGTR_MissingLAVCcodec,sh_audio->codec->dll);
	return 0;
    }
    
    lavc_context = avcodec_alloc_context();
    sh_audio->context=lavc_context;

    if(sh_audio->wf){
	lavc_context->channels = sh_audio->wf->nChannels;
	lavc_context->sample_rate = sh_audio->wf->nSamplesPerSec;
	lavc_context->bit_rate = sh_audio->wf->nAvgBytesPerSec * 8;
	lavc_context->block_align = sh_audio->wf->nBlockAlign;
	lavc_context->bits_per_sample = sh_audio->wf->wBitsPerSample;
    }
    lavc_context->codec_tag = sh_audio->format; //FOURCC
    lavc_context->codec_id = lavc_codec->id; // not sure if required, imho not --A'rpi

    /* alloc extra data */
    if (sh_audio->wf && sh_audio->wf->cbSize > 0) {
        lavc_context->extradata = av_mallocz(sh_audio->wf->cbSize + FF_INPUT_BUFFER_PADDING_SIZE);
        lavc_context->extradata_size = sh_audio->wf->cbSize;
        memcpy(lavc_context->extradata, (char *)sh_audio->wf + sizeof(WAVEFORMATEX), 
               lavc_context->extradata_size);
    }

    // for QDM2
    if (sh_audio->codecdata_len && sh_audio->codecdata && !lavc_context->extradata)
    {
        lavc_context->extradata = av_malloc(sh_audio->codecdata_len);
        lavc_context->extradata_size = sh_audio->codecdata_len;
        memcpy(lavc_context->extradata, (char *)sh_audio->codecdata, 
               lavc_context->extradata_size);	
    }

    /* open it */
    if (avcodec_open(lavc_context, lavc_codec) < 0) {
        mp_msg(MSGT_DECAUDIO,MSGL_ERR, MSGTR_CantOpenCodec);
        return 0;
    }
   mp_msg(MSGT_DECAUDIO,MSGL_V,"INFO: libavcodec init OK!\n");
   
//   printf("\nFOURCC: 0x%X\n",sh_audio->format);
   if(sh_audio->format==0x3343414D){
       // MACE 3:1
       sh_audio->ds->ss_div = 2*3; // 1 samples/packet
       sh_audio->ds->ss_mul = 2*sh_audio->wf->nChannels; // 1 byte*ch/packet
   } else
   if(sh_audio->format==0x3643414D){
       // MACE 6:1
       sh_audio->ds->ss_div = 2*6; // 1 samples/packet
       sh_audio->ds->ss_mul = 2*sh_audio->wf->nChannels; // 1 byte*ch/packet
   }

   // Decode at least 1 byte:  (to get header filled)
   x=decode_audio(sh_audio,sh_audio->a_buffer,1,sh_audio->a_buffer_size);
   if(x>0) sh_audio->a_buffer_len=x;

  sh_audio->channels=lavc_context->channels;
  sh_audio->samplerate=lavc_context->sample_rate;
  sh_audio->i_bps=lavc_context->bit_rate/8;
  if(sh_audio->wf){
      // If the decoder uses the wrong number of channels all is lost anyway.
      // sh_audio->channels=sh_audio->wf->nChannels;
      if (sh_audio->wf->nSamplesPerSec)
      sh_audio->samplerate=sh_audio->wf->nSamplesPerSec;
      if (sh_audio->wf->nAvgBytesPerSec)
      sh_audio->i_bps=sh_audio->wf->nAvgBytesPerSec;
  }
  sh_audio->samplesize=2;
  return 1;
}
예제 #14
0
static int init(sh_audio_t *sh_audio)
{
    int tries = 0;
    int x;
    AVCodecContext *lavc_context;
    AVCodec *lavc_codec;

    mp_msg(MSGT_DECAUDIO,MSGL_V,"FFmpeg's libavcodec audio codec\n");
    if(!avcodec_initialized){
      avcodec_init();
      avcodec_register_all();
      avcodec_initialized=1;
    }

    lavc_codec = (AVCodec *)avcodec_find_decoder_by_name(sh_audio->codec->dll);
    if(!lavc_codec){
	mp_tmsg(MSGT_DECAUDIO,MSGL_ERR,"Cannot find codec '%s' in libavcodec...\n",sh_audio->codec->dll);
	return 0;
    }

    lavc_context = avcodec_alloc_context();
    sh_audio->context=lavc_context;

    lavc_context->sample_rate = sh_audio->samplerate;
    lavc_context->bit_rate = sh_audio->i_bps * 8;
    if(sh_audio->wf){
	lavc_context->channels = sh_audio->wf->nChannels;
	lavc_context->sample_rate = sh_audio->wf->nSamplesPerSec;
	lavc_context->bit_rate = sh_audio->wf->nAvgBytesPerSec * 8;
	lavc_context->block_align = sh_audio->wf->nBlockAlign;
	lavc_context->bits_per_coded_sample = sh_audio->wf->wBitsPerSample;
    }
    lavc_context->request_channels = audio_output_channels;
    lavc_context->codec_tag = sh_audio->format; //FOURCC
    lavc_context->codec_type = CODEC_TYPE_AUDIO;
    lavc_context->codec_id = lavc_codec->id; // not sure if required, imho not --A'rpi

    /* alloc extra data */
    if (sh_audio->wf && sh_audio->wf->cbSize > 0) {
        lavc_context->extradata = av_mallocz(sh_audio->wf->cbSize + FF_INPUT_BUFFER_PADDING_SIZE);
        lavc_context->extradata_size = sh_audio->wf->cbSize;
        memcpy(lavc_context->extradata, (char *)sh_audio->wf + sizeof(WAVEFORMATEX),
               lavc_context->extradata_size);
    }

    // for QDM2
    if (sh_audio->codecdata_len && sh_audio->codecdata && !lavc_context->extradata)
    {
        lavc_context->extradata = av_malloc(sh_audio->codecdata_len);
        lavc_context->extradata_size = sh_audio->codecdata_len;
        memcpy(lavc_context->extradata, (char *)sh_audio->codecdata,
               lavc_context->extradata_size);
    }

    /* open it */
    if (avcodec_open(lavc_context, lavc_codec) < 0) {
        mp_tmsg(MSGT_DECAUDIO,MSGL_ERR, "Could not open codec.\n");
        return 0;
    }
   mp_msg(MSGT_DECAUDIO,MSGL_V,"INFO: libavcodec \"%s\" init OK!\n", lavc_codec->name);

//   printf("\nFOURCC: 0x%X\n",sh_audio->format);
   if(sh_audio->format==0x3343414D){
       // MACE 3:1
       sh_audio->ds->ss_div = 2*3; // 1 samples/packet
       sh_audio->ds->ss_mul = 2*sh_audio->wf->nChannels; // 1 byte*ch/packet
   } else
   if(sh_audio->format==0x3643414D){
       // MACE 6:1
       sh_audio->ds->ss_div = 2*6; // 1 samples/packet
       sh_audio->ds->ss_mul = 2*sh_audio->wf->nChannels; // 1 byte*ch/packet
   }

   // Decode at least 1 byte:  (to get header filled)
   do {
       x=decode_audio(sh_audio,sh_audio->a_buffer,1,sh_audio->a_buffer_size);
   } while (x <= 0 && tries++ < 5);
   if(x>0) sh_audio->a_buffer_len=x;

  sh_audio->channels=lavc_context->channels;
  sh_audio->samplerate=lavc_context->sample_rate;
  sh_audio->i_bps=lavc_context->bit_rate/8;
  switch (lavc_context->sample_fmt) {
      case SAMPLE_FMT_U8:  sh_audio->sample_format = AF_FORMAT_U8;       break;
      case SAMPLE_FMT_S16: sh_audio->sample_format = AF_FORMAT_S16_NE;   break;
      case SAMPLE_FMT_S32: sh_audio->sample_format = AF_FORMAT_S32_NE;   break;
      case SAMPLE_FMT_FLT: sh_audio->sample_format = AF_FORMAT_FLOAT_NE; break;
      default:
          mp_msg(MSGT_DECAUDIO, MSGL_FATAL, "Unsupported sample format\n");
          return 0;
  }
  /* If the audio is AAC the container level data may be unreliable
   * because of SBR handling problems (possibly half real sample rate at
   * container level). Default AAC decoding with ad_faad has used codec-level
   * values for a long time without generating complaints so it should be OK.
   */
  if (sh_audio->wf && lavc_context->codec_id != CODEC_ID_AAC) {
      // If the decoder uses the wrong number of channels all is lost anyway.
      // sh_audio->channels=sh_audio->wf->nChannels;
      if (sh_audio->wf->nSamplesPerSec)
      sh_audio->samplerate=sh_audio->wf->nSamplesPerSec;
      if (sh_audio->wf->nAvgBytesPerSec)
      sh_audio->i_bps=sh_audio->wf->nAvgBytesPerSec;
  }
  sh_audio->samplesize=af_fmt2bits(sh_audio->sample_format)/ 8;
  return 1;
}
예제 #15
0
static gboolean
process (GeglOperation       *operation,
         GeglBuffer          *output,
         const GeglRectangle *result,
         gint                 level)
{
  GeglProperties *o = GEGL_PROPERTIES (operation);
  Priv       *p = (Priv*)o->user_data;

  {
    if (p->video_fcontext && !decode_frame (operation, o->frame))
      {
        long sample_start = 0;

	if (p->audio_stream) 
        {
          int sample_count;
          gegl_audio_fragment_set_sample_rate (o->audio, p->audio_stream->codec->sample_rate);
          gegl_audio_fragment_set_channels    (o->audio, 2);
          gegl_audio_fragment_set_channel_layout    (o->audio, GEGL_CH_LAYOUT_STEREO);

          sample_count = samples_per_frame (o->frame,
               o->frame_rate, p->audio_stream->codec->sample_rate,
               &sample_start);
          gegl_audio_fragment_set_sample_count (o->audio, sample_count);

	  decode_audio (operation, p->prevpts, p->prevpts + 5.0);
          {
            int i;
            for (i = 0; i < sample_count; i++)
            {
              get_sample_data (p, sample_start + i, &o->audio->data[0][i],
                                  &o->audio->data[1][i]);
            }
          }
        }
	
        if (p->video_stream->codec->pix_fmt == AV_PIX_FMT_RGB24)
        {
          GeglRectangle extent = {0,0,p->width,p->height};
          gegl_buffer_set (output, &extent, 0, babl_format("R'G'B' u8"), p->lavc_frame->data[0], GEGL_AUTO_ROWSTRIDE);
        }
        else
        {
          struct SwsContext *img_convert_ctx;
          GeglRectangle extent = {0,0,p->width,p->height};

          img_convert_ctx = sws_getContext(p->width, p->height, p->video_stream->codec->pix_fmt,
                                           p->width, p->height, AV_PIX_FMT_RGB24,
                                           SWS_BICUBIC, NULL, NULL, NULL);
          if (!p->rgb_frame)
            p->rgb_frame = alloc_picture (AV_PIX_FMT_RGB24, p->width, p->height);
          sws_scale (img_convert_ctx, (void*)p->lavc_frame->data,
                     p->lavc_frame->linesize, 0, p->height, p->rgb_frame->data, p->rgb_frame->linesize);
          gegl_buffer_set (output, &extent, 0, babl_format("R'G'B' u8"), p->rgb_frame->data[0], GEGL_AUTO_ROWSTRIDE);
          sws_freeContext (img_convert_ctx);
        }
      }
  }
  return  TRUE;
}
예제 #16
0
size_t FFmpegDecoderAudio::decodeFrame(void * const buffer, const size_t size)
{
    for (;;)
    {
        // Decode current packet

        while (m_bytes_remaining > 0)
        {
            int data_size = size;

            const int bytes_decoded = decode_audio(m_context, reinterpret_cast<int16_t*>(buffer), &data_size, m_packet_data, m_bytes_remaining);

            if (bytes_decoded < 0)
            {
                // if error, skip frame
                m_bytes_remaining = 0;
                break;
            }

            m_bytes_remaining -= bytes_decoded;
            m_packet_data += bytes_decoded;

            // If we have some data, return it and come back for more later.
            if (data_size > 0)
                return data_size;
        }

        // Get next packet

        if (m_packet.valid())
            m_packet.clear();

        if (m_exit)
            return 0;

        bool is_empty = true;
        m_packet = m_packets.tryPop(is_empty);

        if (is_empty)
            return 0;

        if (m_packet.type == FFmpegPacket::PACKET_DATA)
        {
            if (m_packet.packet.pts != int64_t(AV_NOPTS_VALUE))
            {
                const double pts = av_q2d(m_stream->time_base) * m_packet.packet.pts;
                m_clocks.audioSetBufferEndPts(pts);
            }

            m_bytes_remaining = m_packet.packet.size;
            m_packet_data = m_packet.packet.data;
        }
        else if (m_packet.type == FFmpegPacket::PACKET_END_OF_STREAM)
        {
            m_end_of_stream = true;
        }
        else if (m_packet.type == FFmpegPacket::PACKET_FLUSH)
        {
            avcodec_flush_buffers(m_context);
        }

        // just output silence when we reached the end of stream
        if (m_end_of_stream)
        {
            memset(buffer, 0, size);
            return size;
        }
    }
}
static void *adec_armdec_loop(void *args)
{
    int ret;
	int rlen = 0;
	int inlen = 0;
	int dlen = 0;
	int declen = 0;
    aml_audio_dec_t *audec;
    audio_out_operations_t *aout_ops;
    adec_cmd_t *msg = NULL;
	AVPacket apkt;
	char *inbuf = NULL;
	char apkt_end[APACKET_END_SIZE];
	char outbuf[AVCODEC_MAX_AUDIO_FRAME_SIZE];
	int outlen = 0;
	AVCodecContext *ctxCodec = NULL;
	AVCodec *acodec = NULL;
	int in_ape_fp = -1;
	int out_ape_fp = -1;
	int audio_handle = -1;
	
	adec_print("adec_armdec_loop start!\n");

	audec = (aml_audio_dec_t *)args;
    aout_ops = &audec->aout_ops;
	av_init_packet(&apkt); 
	//memset(inbuf, 0, READ_ABUFFER_SIZE);
	memset(outbuf, 0, AVCODEC_MAX_AUDIO_FRAME_SIZE);

	//buffer_stream_t init and set adsp_ops param
	audec->bs=malloc(sizeof(buffer_stream_t));
	int ret_value=init_buff(audec->bs);
	if(ret_value==1)
		adec_print("=====pcm buffer init ok buf_size:%d buf_data:0x%x  end:0x%x !\n",audec->bs->buf_length,audec->bs->data,audec->bs->data+1024*1024);
	audec->adsp_ops.dsp_on=1;
	aout_ops->init(audec);
	aout_ops->start(audec);

	ctxCodec = avcodec_alloc_context();
	if(!ctxCodec) {
		adec_print("APE AVCodecContext allocate error!\n");
		ctxCodec = NULL;
	}
	adec_print("ctxCodec!\n");

	
	adec_print("adec_armdec_loop   audec->pcodec = %d, audec->pcodec->ctxCodec = %d!\n", audec->pcodec, audec->pcodec->ctxCodec);
		
	ctxCodec = audec->pcodec->ctxCodec;
	ctxCodec->codec_type = CODEC_TYPE_AUDIO;
		
	adec_print("open codec_id = %d--\n",ctxCodec->codec_id);
	acodec = avcodec_find_decoder(ctxCodec->codec_id);
	if (!acodec) {
		adec_print("acodec not found\n");
	}
	adec_print("open codec_id = %d----------------------------------\n",ctxCodec->codec_id);
		
	if (avcodec_open(ctxCodec, acodec) < 0) {
		adec_print("Could not open acodec = %d\n", acodec);
	}

	out_ape_fp = open("./dump/123.dat", O_CREAT | O_RDWR);
	if (out_ape_fp < 0) {
        adec_print("Create input file failed! fd=%d------------------------------\n", out_ape_fp);
    }
    adec_print("out_ape_fp = %d!", out_ape_fp);
	in_ape_fp = open("./dump/in.dat", O_CREAT | O_RDWR);
	if (in_ape_fp < 0) {
        adec_print("Create input file failed! fd=%d------------------------------\n", out_ape_fp);
    }
    adec_print("in_ape_fp = %d!", in_ape_fp);

	ret = uio_init();
	if (ret < 0){
		adec_print("uio init error! \n");
		goto error;
	}
		
    while (1){

		if (inlen > 0) {
			if (inbuf) {
				free(inbuf);
				inbuf = NULL;
			}
			inbuf = malloc(READ_ABUFFER_SIZE + inlen);
			memcpy(inbuf, apkt_end, inlen);
			rlen = read_buffer(inbuf+inlen, READ_ABUFFER_SIZE);
			rlen += inlen;
		}
		else {
			if (inbuf) {
				free(inbuf);
				inbuf = NULL;
			}
			inbuf = malloc(READ_ABUFFER_SIZE);
			rlen = read_buffer(inbuf+inlen, READ_ABUFFER_SIZE);
		}
		if (out_ape_fp >= 0) {
			write(in_ape_fp, inbuf, rlen);
			adec_print("write ape data in rlen = %d bytes\n",rlen);
		}
		declen = 0;
		if (rlen > 0){
			inlen = rlen;
			while (declen<rlen) {	
				outlen = AVCODEC_MAX_AUDIO_FRAME_SIZE;
				dlen = decode_audio(ctxCodec, outbuf, &outlen, inbuf+declen, inlen);
				if (dlen <= 0){
					adec_print("dlen = %d error----\n",dlen);
					if (inlen > 0) {
						adec_print("packet end %d bytes----\n",inlen);
						memcpy(apkt_end, (uint8_t *)(inbuf+declen), inlen);
					}
					break;
				}
				declen += dlen;
				inlen -= dlen;
				write_pcm_buffer(outbuf, audec->bs,outlen);
				//write_buffer(outbuf, outlen);
				if (outlen > 0) {
					if (out_ape_fp >= 0) {
						write(out_ape_fp, outbuf, outlen);
					}
				}
			} 
		
#if 0			
			outsize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
			apkt.data = inbuffer;
			apkt.size = rlen;
			while (apkt.size > 0){
				dlen = avcodec_decode_audio3(ctxCodec, (int16_t *)outbuffer, &outsize, &apkt);
				if (dlen <= 0){
					adec_print("dlen = %d error----\n",dlen);
				}
				if (outsize > 0) {
					if (out_ape_fp >= 0) {
						write(out_ape_fp, outbuffer, outsize);
						adec_print("write ape data%d bytes\n UUUUUUUU----\n",outsize);
					}
				}				
				declen += dlen;
				apkt.size -= dlen;					
				if (apkt.size > 0){
					apkt.data += dlen;
				}
				else if (apkt.size < 0){
					adec_print("wrong aptk.size = %d, declen = %d, dlen = %d!", apkt.size, declen, dlen);
				}					
			}
#endif			
		}
		else {			
			adec_print("rlen = %d", rlen);
			break;
		}
	}
    close(out_ape_fp);
    close(in_ape_fp);
	
    adec_print("Exit adec_armdec_loop Thread!");

error:	
    pthread_exit(NULL);
    return NULL;
}
예제 #18
0
static int init(sh_audio_t *sh_audio)
{
    int tries = 0;
    int x;
    AVCodecContext *lavc_context;
    AVCodec *lavc_codec;
    AVDictionary *opts = NULL;
    char tmpstr[50];

    mp_msg(MSGT_DECAUDIO,MSGL_V,"FFmpeg's libavcodec audio codec\n");
    init_avcodec();

    lavc_codec = avcodec_find_decoder_by_name(sh_audio->codec->dll);
    if(!lavc_codec){
	mp_msg(MSGT_DECAUDIO,MSGL_ERR,MSGTR_MissingLAVCcodec,sh_audio->codec->dll);
	return 0;
    }

    lavc_context = avcodec_alloc_context3(lavc_codec);
    sh_audio->context=lavc_context;

    snprintf(tmpstr, sizeof(tmpstr), "%f", drc_level);
    av_dict_set(&opts, "drc_scale", tmpstr, 0);
    lavc_context->sample_rate = sh_audio->samplerate;
    lavc_context->bit_rate = sh_audio->i_bps * 8;
    if(sh_audio->wf){
	lavc_context->channels = sh_audio->wf->nChannels;
	lavc_context->sample_rate = sh_audio->wf->nSamplesPerSec;
	lavc_context->bit_rate = sh_audio->wf->nAvgBytesPerSec * 8;
	lavc_context->block_align = sh_audio->wf->nBlockAlign;
	lavc_context->bits_per_coded_sample = sh_audio->wf->wBitsPerSample;
    }
    lavc_context->request_channels = audio_output_channels;
    lavc_context->codec_tag = sh_audio->format; //FOURCC
    lavc_context->codec_id = lavc_codec->id; // not sure if required, imho not --A'rpi

    /* alloc extra data */
    if (sh_audio->wf && sh_audio->wf->cbSize > 0) {
        lavc_context->extradata = av_mallocz(sh_audio->wf->cbSize + FF_INPUT_BUFFER_PADDING_SIZE);
        lavc_context->extradata_size = sh_audio->wf->cbSize;
        memcpy(lavc_context->extradata, sh_audio->wf + 1,
               lavc_context->extradata_size);
    }

    // for QDM2
    if (sh_audio->codecdata_len && sh_audio->codecdata && !lavc_context->extradata)
    {
        lavc_context->extradata = av_malloc(sh_audio->codecdata_len);
        lavc_context->extradata_size = sh_audio->codecdata_len;
        memcpy(lavc_context->extradata, (char *)sh_audio->codecdata,
               lavc_context->extradata_size);
    }

    /* open it */
    if (avcodec_open2(lavc_context, lavc_codec, &opts) < 0) {
        mp_msg(MSGT_DECAUDIO,MSGL_ERR, MSGTR_CantOpenCodec);
        return 0;
    }
    av_dict_free(&opts);
   mp_msg(MSGT_DECAUDIO,MSGL_V,"INFO: libavcodec \"%s\" init OK!\n", lavc_codec->name);

//   printf("\nFOURCC: 0x%X\n",sh_audio->format);
   if(sh_audio->format==0x3343414D){
       // MACE 3:1
       sh_audio->ds->ss_div = 2*3; // 1 samples/packet
       sh_audio->ds->ss_mul = 2*sh_audio->wf->nChannels; // 1 byte*ch/packet
   } else
   if(sh_audio->format==0x3643414D){
       // MACE 6:1
       sh_audio->ds->ss_div = 2*6; // 1 samples/packet
       sh_audio->ds->ss_mul = 2*sh_audio->wf->nChannels; // 1 byte*ch/packet
   }

   // Decode at least 1 byte:  (to get header filled)
   do {
       x=decode_audio(sh_audio,sh_audio->a_buffer,1,sh_audio->a_buffer_size);
   } while (x <= 0 && tries++ < 5);
   if(x>0) sh_audio->a_buffer_len=x;

  sh_audio->i_bps=lavc_context->bit_rate/8;
  if (sh_audio->wf && sh_audio->wf->nAvgBytesPerSec)
      sh_audio->i_bps=sh_audio->wf->nAvgBytesPerSec;

  switch (lavc_context->sample_fmt) {
      case AV_SAMPLE_FMT_U8:
      case AV_SAMPLE_FMT_S16:
      case AV_SAMPLE_FMT_S32:
      case AV_SAMPLE_FMT_FLT:
          break;
      default:
          return 0;
  }
  return 1;
}
/* pkt = NULL means EOF (needed to flush decoder buffers) */
static int output_packet(InputStream *ist, const AVPacket *pkt)
{
    int ret = 0, i;
    int got_output = 0;

    AVPacket avpkt;
    if (!ist->saw_first_ts) {
        ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
        ist->pts = 0;
        if (pkt != NULL && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
            ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
            ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
        }
        ist->saw_first_ts = 1;
    }

    if (ist->next_dts == AV_NOPTS_VALUE)
        ist->next_dts = ist->dts;
    if (ist->next_pts == AV_NOPTS_VALUE)
        ist->next_pts = ist->pts;

    if (pkt == NULL) {
        /* EOF handling */
        av_init_packet(&avpkt);
        avpkt.data = NULL;
        avpkt.size = 0;
        goto handle_eof;
    } else {
        avpkt = *pkt;
    }

    if (pkt->dts != AV_NOPTS_VALUE) {
        ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
        if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
            ist->next_pts = ist->pts = ist->dts;
    }

    // while we have more to decode or while the decoder did output something on EOF
    while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
        int duration;
    handle_eof:

        ist->pts = ist->next_pts;
        ist->dts = ist->next_dts;

        if (avpkt.size && avpkt.size != pkt->size &&
            !(ist->dec->capabilities & CODEC_CAP_SUBFRAMES)) {
            av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
                   "Multiple frames in a packet from stream %d\n", pkt->stream_index);
            ist->showed_multi_packet_warning = 1;
        }

        switch (ist->dec_ctx->codec_type) {
        case AVMEDIA_TYPE_AUDIO:
            ret = decode_audio    (ist, &avpkt, &got_output);
            break;
        case AVMEDIA_TYPE_VIDEO:
            ret = decode_video    (ist, &avpkt, &got_output);
            if (avpkt.duration) {
                duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
            } else if(ist->dec_ctx->time_base.num != 0 && ist->dec_ctx->time_base.den != 0) {
                int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
                duration = ((int64_t)AV_TIME_BASE *
                                ist->dec_ctx->time_base.num * ticks) /
                                ist->dec_ctx->time_base.den;
            } else
                duration = 0;

            if(ist->dts != AV_NOPTS_VALUE && duration) {
                ist->next_dts += duration;
            }else
                ist->next_dts = AV_NOPTS_VALUE;

            if (got_output)
                ist->next_pts += duration; //FIXME the duration is not correct in some cases
            break;
        case AVMEDIA_TYPE_SUBTITLE:
            ret = transcode_subtitles(ist, &avpkt, &got_output);
            break;
        default:
            return -1;
        }

        if (ret < 0)
            return ret;

        avpkt.dts=
        avpkt.pts= AV_NOPTS_VALUE;

        // touch data and size only if not EOF
        if (pkt) {
            if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
                ret = avpkt.size;
            avpkt.data += ret;
            avpkt.size -= ret;
        }
        if (!got_output) {
            continue;
        }
    }

    /* handle stream copy */
    if (!ist->decoding_needed) {
        ist->dts = ist->next_dts;
        switch (ist->dec_ctx->codec_type) {
        case AVMEDIA_TYPE_AUDIO:
            ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
                             ist->dec_ctx->sample_rate;
            break;
        case AVMEDIA_TYPE_VIDEO:
            if (ist->framerate.num) {
                // TODO: Remove work-around for c99-to-c89 issue 7
                AVRational time_base_q = AV_TIME_BASE_Q;
                int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
                ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
            } else if (pkt->duration) {
                ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
            } else if(ist->dec_ctx->time_base.num != 0) {
                int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
                ist->next_dts += ((int64_t)AV_TIME_BASE *
                                  ist->dec_ctx->time_base.num * ticks) /
                                  ist->dec_ctx->time_base.den;
            }
            break;
        }
        ist->pts = ist->dts;
        ist->next_pts = ist->next_dts;
    }
    for (i = 0; pkt && i < nb_output_streams; i++) {
        OutputStream *ost = output_streams[i];

        if (!check_output_constraints(ist, ost) || ost->encoding_needed)
            continue;

        do_streamcopy(ist, ost, pkt);
    }

    return 0;
}