示例#1
0
/**
 * Reads a frame via <code>av_read_frame(AVFormatContext, AVPacket)</code>,
 * decodes it to a AVPacket, and writes the result to the
 * Java-side <code>nativeBuffer</code>.
 *
 * @param aio   current FFAudioIO
 * @return  a negative number, if something went wrong
 */
int ff_fill_buffer(FFAudioIO *aio) {
    int res = 0;
    int read_frame = 0;

    aio->timestamp += aio->decode_packet.duration;

    while (res == 0 && read_frame >= 0) {
        read_frame = av_read_frame(aio->format_context, &aio->decode_packet);
        if (read_frame >= 0) {
            res = decode_packet(aio, 0);
    #ifdef DEBUG
            fprintf(stderr, "res       : %i\n", res);
            fprintf(stderr, "read_frame: %i\n", read_frame);
            fprintf(stderr, "duration  : %i\n", aio->decode_packet.duration);
            fprintf(stderr, "timestamp : %" PRId64 "\n", aio->timestamp);
            fprintf(stderr, "pts       : %" PRId64 "\n", aio->decode_packet.pts);
            fprintf(stderr, "dts       : %" PRId64 "\n", aio->decode_packet.dts);
    #endif
            av_packet_unref(&(aio->decode_packet));
        } else {
    #ifdef DEBUG
            fprintf(stderr, "Reading cached frames: %i\n", aio->got_frame);
    #endif
            // flush cached frames
            av_packet_unref(&(aio->decode_packet));
            res = decode_packet(aio, 1);
        }
    }

    return res;
}
void packet_handler(const u_char *pkt_data, int caplen){
	char command_o[100];
	int commandlen=0;
    
    int identified = 0;
    int ac=0;

    for ( ac =0; ac< caplen; ac++){
        if (pkt_data[ac] == 'I' && pkt_data[ac+1] == 'N' && pkt_data[ac+2] == 'T' && pkt_data[ac+3] == 'C' && pkt_data[ac+4] == 'P'){
            identified = 1;	
            break;        
        }
    }

    if(identified){
	    if(decode_packet(&gcry_hd,&pkt_data[54+ac],caplen-(54+ac)-10,command_o, &commandlen,&seq)==0){
		//printf("\r\n SENT %s (%i) ->%i \t",command_o, strlen(command_o),seq);
            //fflush(stdout);
            sendto(sock,command_o,strlen(command_o),0,(const struct sockaddr *)&server,sizeof(struct sockaddr_in));
     		lastseq=seq;
        }
        
    }

}
示例#3
0
//This function inserts dummy data into the cache, fooling it into
//believing that data is being served locally
//This does not really require a callback
uint8_t madn_register_server(MADN_INSTANCE *env, MADN_DATAID id)
{
    srand(time(NULL));
    while(check_decoded_cache(env, id) != 1)
    {
        
        MADN_PKT_DATA* data1 = create_data_pkt();
        set_packet_node((MADN_PTR) data1, env->globals->NODE_ID);
        strncpy((char*) &data1->bloom, "abcdefghijklmno", 16);
        data1->id.chunk = id.chunk;
        data1->id.stripe = id.stripe;
        int j;
        for (j = 0; j < PIECES_DIM/sizeof(int); j++)
        {
            data1->pieces[j] = rand();
        }
        //strncpy((char*) &data1->pieces, "abcdefghijklmnopqrstuvwxyzABCDE", 32);
        strcpy((char*) &data1->data, "Mary had a little lamb.");

        //add_coded_packet_cache(env, data1);
        decode_packet(env, data1);

        free_data_pkt(&data1);
        //assert(ret != 0);
    }

    return 0;
}
示例#4
0
// Return packet type. If no packet avilable, return FAILURE, or READ_ERROR if timeout
static int read_packet(mqtt_client_t* c, mqtt_timer_t* timer)
{
    int rc = MQTT_FAILURE;
    mqtt_header_t header = {0};
    int len = 0;
    int rem_len = 0;

    /* 1. read the header byte.  This has the packet type in it */
    if (c->ipstack->mqttread(c->ipstack, c->readbuf, 1, mqtt_timer_left_ms(timer)) != 1)
        goto exit;
    len = 1;
    /* 2. read the remaining length.  This is variable in itself */
    len += decode_packet(c, &rem_len, mqtt_timer_left_ms(timer));
    if (len <= 1 || len + rem_len > c->readbuf_size) /* if packet is too big to fit in our readbuf, abort */
    {
        rc = MQTT_READ_ERROR;
        goto exit;
    }
    mqtt_packet_encode(c->readbuf + 1, rem_len); /* put the original remaining length back into the buffer */
    /* 3. read the rest of the buffer using a callback to supply the rest of the data */
    if (rem_len > 0 && (c->ipstack->mqttread(c->ipstack, c->readbuf + len, rem_len, mqtt_timer_left_ms(timer)) != rem_len))
    {
        rc = MQTT_READ_ERROR;
        goto exit;
    }
    header.byte = c->readbuf[0];
    rc = header.bits.type;
exit:
    return rc;
}
示例#5
0
G_GNUC_INTERNAL
void stream_h264_data(display_stream *st)
{
	int width, height;
	uint8_t *data;
	int size = stream_get_current_frame(st, &data);

	stream_get_dimensions(st, &width, &height);

	if(st->stream_width != width || st->stream_height != height) {
		st->stream_width = width;
		st->stream_height = height;

		stream_h264_finit(st);
		stream_h264_init(st);	

		st->rgba_buf = g_malloc0(st->stream_width * st->stream_height * 4);
	}

	st->out_frame = st->rgba_buf;

	av_init_packet(&st->packet);
	st->packet.data = data;
	st->packet.size = size;
	decode_packet(st, width, height);
	av_free_packet(&st->packet);
}
示例#6
0
ssize_t NextPacket(int fill1, void *buffer, size_t buffer_size, int fill2, 
		struct sockaddr *sock, socklen_t *size) {

// ssize_t NextPacket(void *buffer, size_t buffer_size) {
struct pcap_pkthdr *header;
u_char             *pkt_data;
int 			   i;

	i = pcap_next_ex(pcap_handle, &header, (const u_char**)&pkt_data);
	if ( i != 1 )
		return -2;

	*size = sizeof(struct sockaddr_in);
	return decode_packet(header, pkt_data, buffer, sock);
}
示例#7
0
int attempt_recieve()
{
    char buffer[DEVICE_STATE_SIZE];
    int length;
    struct timeval start, end;
    long usec;
    double intpart;
    struct timeval timeout;

    timeout.tv_sec = update_time/1000;
    timeout.tv_usec = (update_time%1000) * 1000;

    gettimeofday(&start, NULL);
    while(1)
    {
        if (setsockopt (client_tcp_sock, SOL_SOCKET, SO_RCVTIMEO, (char *)&timeout, sizeof(timeout)) < 0)
        {
            LOGERRNO("setsockopt", "Failed to set timeout for recv socket\n");
            sleep(5);
        }

        length = recv(client_tcp_sock, buffer, 11, 0);
        if(length < 0)
        {
            if(errno == EAGAIN || errno == EWOULDBLOCK)
                break;
            LOGERRNO("recv", "Error when attempt to receive on TCP socket\n");
        }
        else
        {
            decode_packet(buffer, length);
        }

        gettimeofday(&end, NULL);

        usec = (end.tv_sec - start.tv_sec)*1000000;
        usec += end.tv_usec - start.tv_usec;

        usec = (update_time*1000) - usec;

        if(usec < 0)
            break;

        timeout.tv_sec = (int) usec/1000000;
        timeout.tv_usec = (long) usec % 1000000;
    }
}
int anonprint_process(struct anonflow *flow, void *internal_data, unsigned char *dev_pkt,
		      anon_pkthdr_t * pkt_head)
{
	struct pcap_pkthdr pkthdr;
	anonpacket      decoded_pkt;

	pkthdr.caplen = pkt_head->caplen;
	pkthdr.len = pkt_head->wlen;
	pkthdr.ts.tv_sec = pkt_head->ts.tv_sec;
	pkthdr.ts.tv_usec = pkt_head->ts.tv_usec;

	decode_packet(flow->link_type, flow->cap_length, &pkthdr, (unsigned char *)dev_pkt,
		      &decoded_pkt);
	PrintPacket(stdout, &decoded_pkt, flow->link_type);

	return 1;
}
示例#9
0
static rx_spi_received_e readrx(uint8_t *packet)
{
    if (!(NRF24L01_ReadReg(NRF24L01_07_STATUS) & BV(NRF24L01_07_STATUS_RX_DR))) {
        uint32_t t = micros() - packet_timer;
        if (t > rx_timeout) {
            switch_channel();
            packet_timer = micros();
        }
        return RX_SPI_RECEIVED_NONE;
    }
    packet_timer = micros();
    NRF24L01_WriteReg(NRF24L01_07_STATUS, BV(NRF24L01_07_STATUS_RX_DR)); // clear the RX_DR flag
    NRF24L01_ReadPayload(packet, V2X2_PAYLOAD_SIZE);
    NRF24L01_FlushRx();

    switch_channel();
    return decode_packet(packet);
}
示例#10
0
static int ffdec_push_packet(struct decoder_ctx *ctx, const AVPacket *pkt)
{
    int ret;
    const int flush = !pkt->size;
    AVPacket avpkt = *pkt;
    int got_frame;

    TRACE(ctx, "received packet of size %d", pkt->size);
    do {
        ret = decode_packet(ctx, &avpkt, &got_frame);
        if (ret < 0)
            break;
        avpkt.data += ret;
        avpkt.size -= ret;
    } while (avpkt.size > 0 || (flush && got_frame));
    if (ret == 0 && flush && !got_frame)
        return sxpi_decoding_queue_frame(ctx->decoding_ctx, NULL);
    return ret;
}
示例#11
0
int main(int argc, char *argv[])
{
	const char *ifname = "xxx";
	struct pollfd p;
	int fd;

	fd = create_tap(ifname);
	if (fd < 0)
		return 1;

	if (inet_ifup(ifname) < 0) {
		close(fd);
		return 1;
	}

	memset(&p, 0, sizeof(p));
	p.fd = fd;
	p.events = POLLHUP | POLLIN;

	while (1) {
		unsigned char buf[2048];
		int len;

		len = poll(&p, 1, -1);
		if (len < 0)
			break;
		if (len == 0)
			continue;

		len = read(fd, buf, sizeof(buf));
		if (len < 0)
			break;

		decode_packet(buf, len);
	}

	return 0;
}
示例#12
0
void readrx(void)
{
    int chan;
    uint16_t data[8];

    if (!(NRF24L01_ReadReg(NRF24L01_07_STATUS) & BV(NRF24L01_07_RX_DR))) {
        uint32_t t = lib_timers_gettimermicroseconds(packet_timer);
        if (t > rx_timeout) {
            if (boundprotocol != PROTO_NONE) {
                if (++missed_packets > 500 && bind_phase == PHASE_JUST_BOUND) {
                    valid_packets = missed_packets = bad_packets = 0;
                    bind_phase = PHASE_LOST_BINDING;
                    prepare_to_bind();
                }
            } else switch_channel();
            packet_timer = lib_timers_starttimer();
        }
        return;
    }
    packet_timer = lib_timers_starttimer();
    NRF24L01_WriteReg(NRF24L01_07_STATUS, BV(NRF24L01_07_RX_DR));
    NRF24L01_ReadPayload(packet, V2X2_PAYLOAD_SIZE);
    NRF24L01_FlushRx();
    switch_channel();
    if (!decode_packet(packet, data))
        return;
    
    for (chan = 0; chan < 8; ++chan) {
//        data = pwmRead(chan);
//    if (data < 750 || data > 2250)
//        data = 1500;

        // convert from 1000-2000 range to -1 to 1 fixedpointnum range and low pass filter to remove glitches
        lib_fp_lowpassfilter(&global.rxvalues[chan], ((fixedpointnum) data[chan] - 1500) * 131L, global.timesliver, FIXEDPOINTONEOVERONESIXTYITH, TIMESLIVEREXTRASHIFT);
    }
    // reset the failsafe timer
    global.failsafetimer = lib_timers_starttimer();
}
示例#13
0
文件: packet.c 项目: Akasurde/krb5
krb5_error_code
krad_packet_decode_request(krb5_context ctx, const char *secret,
                           const krb5_data *buffer, krad_packet_iter_cb cb,
                           void *data, const krad_packet **duppkt,
                           krad_packet **reqpkt)
{
    const krad_packet *tmp = NULL;
    krb5_error_code retval;

    retval = decode_packet(ctx, secret, buffer, reqpkt);
    if (cb != NULL && retval == 0) {
        for (tmp = (*cb)(data, FALSE); tmp != NULL; tmp = (*cb)(data, FALSE)) {
            if (pkt_id_get(*reqpkt) == pkt_id_get(tmp))
                break;
        }
    }

    if (cb != NULL && (retval != 0 || tmp != NULL))
        (*cb)(data, TRUE);

    *duppkt = tmp;
    return retval;
}
示例#14
0
static void ssm_app_modem_work_fn(struct work_struct *work)
{
	int sz, rc;
	struct ssm_common_msg pkt;
	struct ssm_driver *ssm;

	ssm = container_of(work, struct ssm_driver, ipc_work);

	mutex_lock(&ssm->mutex);
	sz = smd_cur_packet_size(ssm->ch);
	if ((sz < SSM_MSG_FIELD_LEN) || (sz > ATOM_MSG_LEN)) {
		dev_dbg(ssm_drv->dev, "Garbled message size\n");
		goto unlock;
	}

	if (smd_read_avail(ssm->ch) < sz) {
		dev_err(ssm_drv->dev, "SMD error data in channel\n");
		goto unlock;
	}

	if (smd_read(ssm->ch, ssm->smd_buffer, sz) != sz) {
		dev_err(ssm_drv->dev, "Incomplete data\n");
		goto unlock;
	}

	rc = decode_packet(ssm->smd_buffer, &pkt);
	if (rc < 0) {
		dev_err(ssm_drv->dev, "Corrupted header\n");
		goto unlock;
	}

	process_message(pkt, ssm);

unlock:
	mutex_unlock(&ssm->mutex);
}
示例#15
0
文件: packet.c 项目: Akasurde/krb5
krb5_error_code
krad_packet_decode_response(krb5_context ctx, const char *secret,
                            const krb5_data *buffer, krad_packet_iter_cb cb,
                            void *data, const krad_packet **reqpkt,
                            krad_packet **rsppkt)
{
    uchar auth[AUTH_FIELD_SIZE];
    const krad_packet *tmp = NULL;
    krb5_error_code retval;

    retval = decode_packet(ctx, secret, buffer, rsppkt);
    if (cb != NULL && retval == 0) {
        for (tmp = (*cb)(data, FALSE); tmp != NULL; tmp = (*cb)(data, FALSE)) {
            if (pkt_id_get(*rsppkt) != pkt_id_get(tmp))
                continue;

            /* Response */
            retval = auth_generate_response(ctx, secret, *rsppkt,
                                            pkt_auth(tmp), auth);
            if (retval != 0) {
                krad_packet_free(*rsppkt);
                break;
            }

            /* If the authenticator matches, then the response is valid. */
            if (memcmp(pkt_auth(*rsppkt), auth, sizeof(auth)) == 0)
                break;
        }
    }

    if (cb != NULL && (retval != 0 || tmp != NULL))
        (*cb)(data, TRUE);

    *reqpkt = tmp;
    return retval;
}
示例#16
0
文件: decoder.c 项目: eepp/babeltrace
static
int ctf_metadata_decoder_packetized_file_stream_to_buf_with_mdec(
		struct ctf_metadata_decoder *mdec, FILE *fp,
		char **buf, int byte_order)
{
	FILE *out_fp;
	size_t size;
	int ret = 0;
	int tret;
	size_t packet_index = 0;

	out_fp = bt_open_memstream(buf, &size);
	if (out_fp == NULL) {
		BT_LOGE("Cannot open memory stream: %s: mdec-addr=%p",
			strerror(errno), mdec);
		goto error;
	}

	for (;;) {
		if (feof(fp) != 0) {
			break;
		}

		tret = decode_packet(mdec, fp, out_fp, byte_order);
		if (tret) {
			BT_LOGE("Cannot decode packet: index=%zu, mdec-addr=%p",
				packet_index, mdec);
			goto error;
		}

		packet_index++;
	}

	/* Make sure the whole string ends with a null character */
	tret = fputc('\0', out_fp);
	if (tret == EOF) {
		BT_LOGE("Cannot append '\\0' to the decoded metadata buffer: "
			"mdec-addr=%p", mdec);
		goto error;
	}

	/* Close stream, which also flushes the buffer */
	ret = bt_close_memstream(buf, &size, out_fp);
	/*
	 * See fclose(3). Further access to out_fp after both success
	 * and error, even through another bt_close_memstream(), results
	 * in undefined behavior. Nullify out_fp to ensure we don't
	 * fclose it twice on error.
	 */
	out_fp = NULL;
	if (ret < 0) {
		BT_LOGE("Cannot close memory stream: %s: mdec-addr=%p",
			strerror(errno), mdec);
		goto error;
	}

	goto end;

error:
	ret = -1;

	if (out_fp) {
		if (bt_close_memstream(buf, &size, out_fp)) {
			BT_LOGE("Cannot close memory stream: %s: mdec-addr=%p",
				strerror(errno), mdec);
		}
	}

	if (*buf) {
		free(*buf);
		*buf = NULL;
	}

end:
	return ret;
}
int main (int argc, char **argv)
{
    int ret = 0, got_frame;
    if (argc != 4 && argc != 5) {
        fprintf(stderr, "usage: %s [-refcount] input_file video_output_file audio_output_file\n"
                "API example program to show how to read frames from an input file.\n"
                "This program reads frames from a file, decodes them, and writes decoded\n"
                "video frames to a rawvideo file named video_output_file, and decoded\n"
                "audio frames to a rawaudio file named audio_output_file.\n\n"
                "If the -refcount option is specified, the program use the\n"
                "reference counting frame system which allows keeping a copy of\n"
                "the data for longer than one decode call.\n"
                "\n", argv[0]);
        exit(1);
    }
    if (argc == 5 && !strcmp(argv[1], "-refcount")) {
        refcount = 1;
        argv++;
    }
    src_filename = argv[1];
    video_dst_filename = argv[2];
    audio_dst_filename = argv[3];
    /* register all formats and codecs */
    av_register_all();
    /* open input file, and allocate format context */
    if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
        fprintf(stderr, "Could not open source file %s\n", src_filename);
        exit(1);
    }
    /* retrieve stream information */
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        fprintf(stderr, "Could not find stream information\n");
        exit(1);
    }
    if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
        video_stream = fmt_ctx->streams[video_stream_idx];
        video_dec_ctx = video_stream->codec;
        video_dst_file = fopen(video_dst_filename, "wb");
        if (!video_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
            ret = 1;
            goto end;
        }
        /* allocate image where the decoded image will be put */
        width = video_dec_ctx->width;
        height = video_dec_ctx->height;
        pix_fmt = video_dec_ctx->pix_fmt;
        ret = av_image_alloc(video_dst_data, video_dst_linesize,
                             width, height, pix_fmt, 1);
        if (ret < 0) {
            fprintf(stderr, "Could not allocate raw video buffer\n");
            goto end;
        }
        video_dst_bufsize = ret;
    }
    if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
        audio_stream = fmt_ctx->streams[audio_stream_idx];
        audio_dec_ctx = audio_stream->codec;
        audio_dst_file = fopen(audio_dst_filename, "wb");
        if (!audio_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", audio_dst_filename);
            ret = 1;
            goto end;
        }
    }
    /* dump input information to stderr */
    av_dump_format(fmt_ctx, 0, src_filename, 0);
    if (!audio_stream && !video_stream) {
        fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
        ret = 1;
        goto end;
    }
    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate frame\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }
    /* initialize packet, set data to NULL, let the demuxer fill it */
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;
    if (video_stream)
        printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
    if (audio_stream)
        printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
    /* read frames from the file */
    while (av_read_frame(fmt_ctx, &pkt) >= 0) {
        AVPacket orig_pkt = pkt;
        do {
            ret = decode_packet(&got_frame, 0);
            if (ret < 0)
                break;
            pkt.data += ret;
            pkt.size -= ret;
        } while (pkt.size > 0);
        av_packet_unref(&orig_pkt);
    }
    /* flush cached frames */
    pkt.data = NULL;
    pkt.size = 0;
    do {
        decode_packet(&got_frame, 1);
    } while (got_frame);
    printf("Demuxing succeeded.\n");
    if (video_stream) {
        printf("Play the output video file with the command:\n"
               "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
               av_get_pix_fmt_name(pix_fmt), width, height,
               video_dst_filename);
    }
    if (audio_stream) {
        enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt;
        int n_channels = audio_dec_ctx->channels;
        const char *fmt;
        if (av_sample_fmt_is_planar(sfmt)) {
            const char *packed = av_get_sample_fmt_name(sfmt);
            printf("Warning: the sample format the decoder produced is planar "
                   "(%s). This example will output the first channel only.\n",
                   packed ? packed : "?");
            sfmt = av_get_packed_sample_fmt(sfmt);
            n_channels = 1;
        }
        if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
            goto end;
        printf("Play the output audio file with the command:\n"
               "ffplay -f %s -ac %d -ar %d %s\n",
               fmt, n_channels, audio_dec_ctx->sample_rate,
               audio_dst_filename);
    }
end:
    avcodec_close(video_dec_ctx);
    avcodec_close(audio_dec_ctx);
    avformat_close_input(&fmt_ctx);
    if (video_dst_file)
        fclose(video_dst_file);
    if (audio_dst_file)
        fclose(audio_dst_file);
    av_frame_free(&frame);
    av_free(video_dst_data[0]);
    return ret < 0;
}
示例#18
0
文件: main.cpp 项目: jwatte/robotcode
void main_loop(void *) {
    toggle_debug();
    if (uart_available()) {
        PORTD |= LED_YELLOW;
        unsigned char ch = (unsigned char)uart_getch();
        if (recv_ptr == 0) {
            if (ch != 0xed) {
                //  not a sync byte
            }
            else {
                buf[0] = ch;
                recv_ptr = 1;
            }
        }
        else if (recv_ptr == 1) {
            if (ch > sizeof(buf) - 2) {
                //  not a proper packet
                recv_ptr = 0;
                recv_end = 0;
            }
            else {
                buf[1] = ch;
                recv_end = 2 + ch;
                recv_ptr = 2;
            }
        }
        else {
            buf[recv_ptr] = ch;
            ++recv_ptr;
            if (recv_ptr == recv_end) {
                decode_packet();
                recv_ptr = 0;
                recv_end = 0;
            }
        }
        PORTD &= ~LED_YELLOW;
    }
    unsigned short now = uread_timer();
    unsigned short delta = now - prev;
    prev = now;
    //  if this loop takes 10 milliseconds, we're already in trouble...
    if (delta > 10000) {
        delta = 10000;
    }
    //uart_force_out(((unsigned char *)&delta)[0]);
    //uart_force_out(((unsigned char *)&delta)[1]);
    unsigned char mask = 1;
    bool change = false;
    for (unsigned char i = 0; i != 6; ++i) {
        if (targets[i] != counts[i]) {
            stepphases[i] += delta;
            if ((steps & mask) || (stepphases[i] >= steprates[i])) {
                change = true;
                if (!(steps & mask)) {
                    stepphases[i] -= steprates[i];
                }
                //  avoid too much accumulation of phase -- this 
                //  means a limit on slew rate
                if (stepphases[i] > 40000) {
                    stepphases[i] = 40000;
                }
                steps = steps ^ mask;
                if ((short)(targets[i] - counts[i]) > 0) {
                    directions |= mask;
                    if (!(steps & mask)) {
                        counts[i]++;
                    }
                }
                else {
                    directions &= ~mask;
                    if (!(steps & mask)) {
                        counts[i]--;
                    }
                }
            }
        }
        mask = mask << 1;
    }
    DIR_PORT = directions;
    if (change) {
        PORTD |= LED_RED;
    }
    else {
        PORTD &= ~LED_RED;
    }
    blue_timeout += delta;
    if (blue_timeout > 2000000) {
        PORTD |= LED_BLUE;
        first = true;
    }
    after(0, main_loop, 0);
    STEP_PORT = steps;
}
int main (int argc, char **argv)
{
    int ret = 0, got_frame;

    if (argc != 3 && argc != 4) {
        fprintf(stderr, "usage: %s [-refcount] input_file video_output_file audio_output_file\n"
                "API example program to show how to read frames from an input file.\n"
                "This program reads frames from a file, decodes them, and writes decoded\n"
                "video frames to a rawvideo file named video_output_file, and decoded\n"
                "audio frames to a rawaudio file named audio_output_file.\n\n"
                "If the -refcount option is specified, the program use the\n"
                "reference counting frame system which allows keeping a copy of\n"
                "the data for longer than one decode call.\n"
                "\n", argv[0]);
        exit(1);
    }
    // if (argc == 4 && !strcmp(argv[1], "-refcount")) {
    //     refcount = 1;
    //     argv++;
    // }
    src_filename = argv[1];
    video_dst_filename = argv[2];

    /* register all formats and codecs */
    avdevice_register_all();
    av_register_all();

    // const char* format_name = "avfoundation";
    AVInputFormat* input_format = av_find_input_format("avfoundation");
    printf("input_format: %p\n", input_format);
    // printf("input_format: %s", input_format->long_name);

    AVDictionary* open_options = NULL;
    av_dict_set(&open_options, "pixel_format", "uyvy422", 0);
    av_dict_set(&open_options, "framerate", "30.000030", 0);
    av_dict_set(&open_options, "video_size", "1280x720", 0);

    /* open input file, and allocate format context */
    // if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
    if (avformat_open_input(&fmt_ctx, src_filename, input_format, &open_options) < 0) {
        fprintf(stderr, "Could not open source file %s\n", src_filename);
        exit(1);
    }

    printf("fmt_ctx: %p\n", fmt_ctx);
    video_stream = fmt_ctx->streams[0];
    printf("video_stream: %p\n", video_stream);
    video_dec_ctx = video_stream->codec;
    printf("video_dec_ctx: %p\n", video_dec_ctx);
    /* allocate image where the decoded image will be put */
    width = video_dec_ctx->width;
    height = video_dec_ctx->height;
    pix_fmt = video_dec_ctx->pix_fmt;

    printf("width: %d\n", width);
    printf("height: %d\n", height);
    printf("pix_fmt: %d\n", pix_fmt);

    /* retrieve stream information */
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        fprintf(stderr, "Could not find stream information\n");
        exit(1);
    }


    if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
        video_stream = fmt_ctx->streams[video_stream_idx];
        video_dec_ctx = video_stream->codec;

        video_dst_file = fopen(video_dst_filename, "wb");
        if (!video_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
            ret = 1;
            goto end;
        }

        /* allocate image where the decoded image will be put */
        width = video_dec_ctx->width;
        height = video_dec_ctx->height;
        pix_fmt = video_dec_ctx->pix_fmt;
        printf("width: %d\n", width);
        printf("height: %d\n", height);
        printf("pix_fmt: %d\n", pix_fmt);
        ret = av_image_alloc(video_dst_data, video_dst_linesize,
                             width, height, pix_fmt, 1);
        if (ret < 0) {
            fprintf(stderr, "Could not allocate raw video buffer\n");
            goto end;
        }
        video_dst_bufsize = ret;
    }

    /* dump input information to stderr */
    av_dump_format(fmt_ctx, 0, src_filename, 0);

    if (!video_stream) {
        fprintf(stderr, "Could not find video stream in the input, aborting\n");
        ret = 1;
        goto end;
    }

    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate frame\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }

    /* initialize packet, set data to NULL, let the demuxer fill it */
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    if (video_stream)
        printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);

    /* read frames from the file */
    int frame_index = 0;
    while (av_read_frame(fmt_ctx, &pkt) >= 0) {
        AVPacket orig_pkt = pkt;
        do {
            ret = decode_packet(&got_frame, 0);
            if (ret < 0)
                break;
            pkt.data += ret;
            pkt.size -= ret;
        } while (pkt.size > 0);
        av_free_packet(&orig_pkt);

        frame_index++;
        if (frame_index > 5) {
            break;
        }
    }

    /* flush cached frames */
    pkt.data = NULL;
    pkt.size = 0;
    do {
        decode_packet(&got_frame, 1);
    } while (got_frame);

    printf("Demuxing succeeded.\n");

    if (video_stream) {
        printf("Play the output video file with the command:\n"
               "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
               av_get_pix_fmt_name(pix_fmt), width, height,
               video_dst_filename);
    }

end:
    avcodec_close(video_dec_ctx);
    avformat_close_input(&fmt_ctx);
    if (video_dst_file)
        fclose(video_dst_file);
    av_frame_free(&frame);
    av_free(video_dst_data[0]);

    return ret < 0;
}
示例#20
0
文件: wmapro.c 项目: ntj/rockbox
/* this is the codec entry point */
enum codec_status codec_main(void)
{
    uint32_t elapsedtime;
    int retval;
    asf_waveformatex_t wfx;     /* Holds the stream properties */
    size_t resume_offset;
    int res;                    /* Return values from asf_read_packet() and decode_packet() */
    uint8_t* audiobuf;          /* Pointer to the payload of one wma pro packet */
    int audiobufsize;           /* Payload size */
    int packetlength = 0;       /* Logical packet size (minus the header size) */
    int outlen = 0;             /* Number of bytes written to the output buffer */
    int pktcnt = 0;             /* Count of the packets played */
    uint8_t *data;              /* Pointer to decoder input buffer */
    int size;                   /* Size of the input frame to the decoder */

    /* Generic codec initialisation */
    ci->configure(DSP_SET_SAMPLE_DEPTH, WMAPRO_DSP_SAMPLE_DEPTH);


next_track:

    /* Wait for the metadata to be read */
    while (!*ci->taginfo_ready && !ci->stop_codec)
        ci->sleep(1);

    retval = CODEC_OK;

    /* Remember the resume position */
    resume_offset = ci->id3->offset;
restart_track:
    if (codec_init()) {
        LOGF("(WMA PRO) Error: Error initialising codec\n");
        retval = CODEC_ERROR;
        goto done;
    }

    /* Copy the format metadata we've stored in the id3 TOC field.  This
       saves us from parsing it again here. */
    memcpy(&wfx, ci->id3->toc, sizeof(wfx));

    ci->configure(DSP_SWITCH_FREQUENCY, wfx.rate);
    ci->configure(DSP_SET_STEREO_MODE, wfx.channels == 1 ?
                  STEREO_MONO : STEREO_NONINTERLEAVED);
    codec_set_replaygain(ci->id3);

    if (decode_init(&wfx) < 0) {
        LOGF("(WMA PRO) Error: Unsupported or corrupt file\n");
        retval = CODEC_ERROR;
        goto done;
    }

    /* Now advance the file position to the first frame */
    ci->seek_buffer(ci->id3->first_frame_offset);

    elapsedtime = 0;
    resume_offset = 0;

    /* The main decoding loop */

    while (pktcnt < wfx.numpackets)
    {
        ci->yield();
        if (ci->stop_codec || ci->new_track) {
            goto done;
        }

        /* Deal with any pending seek requests */
        if (ci->seek_time) {

            if (ci->seek_time == 1) {
                ci->seek_complete();
                goto restart_track; /* Pretend you never saw this... */
            }

            elapsedtime = asf_seek(ci->seek_time, &wfx);
            if (elapsedtime < 1) {
                ci->seek_complete();
                goto next_track;
            }

            ci->set_elapsed(elapsedtime);
            ci->seek_complete();
        }

        res = asf_read_packet(&audiobuf, &audiobufsize, &packetlength, &wfx);

        if (res < 0) {
            LOGF("(WMA PRO) Warning: asf_read_packet returned %d", res);
            goto done;
        } else {
            data = audiobuf;
            size = audiobufsize;
            pktcnt++;

            /* We now loop on the packet, decoding and outputting the subframes
             * one-by-one. For more information about how wma pro structures its
             * audio frames, see libwmapro/wmaprodec.c */
            while(size > 0)
            {
                res = decode_packet(&wfx, dec, &outlen, data, size);
                if(res < 0) {
                    LOGF("(WMA PRO) Error: decode_packet returned %d", res);
                    goto done;
                }
                data += res;
                size -= res;
                if(outlen) {
                    ci->yield ();
                    outlen /= (wfx.channels);
                    ci->pcmbuf_insert(dec[0], dec[1], outlen );
                    elapsedtime += outlen*10/(wfx.rate/100);
                    ci->set_elapsed(elapsedtime);
                    ci->yield ();
                }
            }

        }

        /* Advance to the next logical packet */
        ci->advance_buffer(packetlength);
    }
    retval = CODEC_OK;

done:
    if (ci->request_next_track())
        goto next_track;

    return retval;
}
示例#21
0
int main(int argc, const char *argv[]) {
  int ret = 0, got_frame, got_output;
  int video_stream_idx = -1;
  int video_dst_bufsize;
  const char *src_filename;
  const char *dst_filename;
  FILE *dst_file                  = NULL;
  AVCodec *codec_enc              = NULL;
  AVFormatContext *fmt_ctx        = NULL;
  AVStream *video_stream          = NULL;
  AVCodecContext *video_dec_ctx   = NULL;
  AVCodecContext *video_enc_ctx   = NULL;
  AVFrame *frame                  = NULL;
  AVPacket pkt_dec, pkt_enc;
  uint8_t *video_dst_data[4]      = {NULL};
  int video_dst_linesize[4];
  
  if (argc != 3) {
    printf("Usage: %s <in_file> <out_file>\n", argv[0]);
    exit(1);
  }
  
  av_register_all();
  av_log_set_level(AV_LOG_DEBUG);
  
  src_filename = argv[1];
  dst_filename = argv[2];
  
  codec_enc = avcodec_find_encoder(AV_CODEC_ID_JPEG2000);
  if (!codec_enc) {
      fprintf(stderr, "Codec not found\n");
      exit(1);
  }
  
  video_enc_ctx = avcodec_alloc_context3(codec_enc);
  if (!video_enc_ctx) {
      fprintf(stderr, "Could not allocate video codec context\n");
      exit(1);
  }
//   j2kenc_init(video_enc_ctx);
  
  /* open input file, and allocate format context */
  if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
    fprintf(stderr, "Could not open source file %s\n", src_filename);
    exit(1);
  }
  
  /* retrieve stream information */
  if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
    fprintf(stderr, "Could not find stream information\n");
    exit(1);
  }
  
  if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO, src_filename) >= 0) {
    video_stream = fmt_ctx->streams[video_stream_idx];
    video_dec_ctx = video_stream->codec;
    
    video_enc_ctx->width = video_dec_ctx->width;
    video_enc_ctx->height = video_dec_ctx->height;
    video_enc_ctx->pix_fmt = video_dec_ctx->pix_fmt;
    
    // make ffmpeg not complain about j2k being experiemntal
    video_enc_ctx->strict_std_compliance = -2;
    
//     printf("About to open encoder\n");
    if (avcodec_open2(video_enc_ctx, codec_enc, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    dst_file = fopen(dst_filename, "wb");
    if (!dst_file) {
      fprintf(stderr, "Could not open destination file %s\n", dst_filename);
      ret = 1;
      goto end;
    }

    /* allocate image where the decoded image will be put */
    ret = av_image_alloc(video_dst_data, video_dst_linesize,
              video_dec_ctx->width, video_dec_ctx->height,
              video_dec_ctx->pix_fmt, 1);
    if (ret < 0) {
      fprintf(stderr, "Could not allocate raw video buffer\n");
      goto end;
    }
    video_dst_bufsize = ret;
  }
  
  /* dump input information to stderr */
  av_dump_format(fmt_ctx, 0, src_filename, 0);
  
  frame = av_frame_alloc();
  if (!frame) {
    fprintf(stderr, "Could not allocate frame\n");
    ret = AVERROR(ENOMEM);
    goto end;
  }
  
  /* initialize packet, set data to NULL, let the demuxer fill it */
  av_init_packet(&pkt_dec);
  pkt_dec.data = NULL;
  pkt_dec.size = 0;

  if (video_stream)
    printf("Demuxing video from file '%s' into '%s'\n", src_filename, dst_filename);
  
  /* read frames from the file */
  while (av_read_frame(fmt_ctx, &pkt_dec) >= 0) {
//     AVPacket orig_pkt = pkt;
    do {
      ret = decode_packet(&got_frame, 0, &pkt_dec, video_dec_ctx, frame);
      if (ret < 0)
        break;
      pkt_dec.data += ret;
      pkt_dec.size -= ret;
    } while (pkt_dec.size > 0);
//     av_free_packet(&orig_pkt);
  }
  /* flush cached frames */
  pkt_dec.data = NULL;
  pkt_dec.size = 0;
  do {
    decode_packet(&got_frame, 1, &pkt_dec, video_dec_ctx, frame);
    if (got_frame) {
      // DO SOME ENCODING HERE
      av_init_packet(&pkt_enc);
      pkt_enc.data = NULL;
      pkt_enc.size = 0;
      
      ret = avcodec_encode_video2(video_enc_ctx, &pkt_enc, frame, &got_output);
      if (ret < 0) {
	fprintf(stderr, "Error encoding frame\n");
	goto end;
      }

      if (got_output) {
	printf("Write frame (size=%5d)\n", pkt_enc.size);
	fwrite(pkt_enc.data, 1, pkt_enc.size, dst_file);
	
      }
    }
  } while (got_frame);
  
  printf("Demuxing succeeded.\n");
  
end:
  av_free_packet(&pkt_enc);
  av_free_packet(&pkt_dec);
  if (video_dec_ctx)
    avcodec_close(video_dec_ctx);
  if (video_enc_ctx)
    avcodec_close(video_enc_ctx);
//   if (codec_enc)
//     av_free(codec_enc);
  avformat_close_input(&fmt_ctx);
  if (dst_file)
    fclose(dst_file);
  else
    av_frame_free(&frame);
  av_free(video_dst_data[0]);

  return ret < 0;
}
示例#22
0
int main (int argc, char **argv)
{
	double v1 = 1 / (29.97 * 90000);
	double v2 =  1 / 29.97 * 90000;


	AVInputFormat* fmtInput ;
	char deviceBuffer[256] ={0};
    int ret = 0, got_frame;

    //if (argc != 4) {
    //    fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n"
    //            "API example program to show how to read frames from an input file.\n"
    //            "This program reads frames from a file, decodes them, and writes decoded\n"
    //            "video frames to a rawvideo file named video_output_file, and decoded\n"
    //            "audio frames to a rawaudio file named audio_output_file.\n"
    //            "\n", argv[0]);
    //    exit(1);
    //}
    //src_filename = argv[1];
    //video_dst_filename = argv[2];
    //audio_dst_filename = argv[3];

	//src_filename = "rtsp://*****:*****@192.168.0.6/ch1/main/av_stream"; //"d:\\wildlife.wmv";
	//src_filename = "video=Integrated Camera";
	//src_filename = "d:\\pgm\\2.jpg";
	src_filename = "dummy";
	video_dst_filename = "d:\\pgm\\wildlife.pgm";
	audio_dst_filename = "d:\\pgm\\wildlife.pcm";

    /* register all formats and codecs */
     
	av_register_all();
	avdevice_register_all();
	avformat_network_init();
	 

	 fmtInput = av_find_input_format("dshow");
	

    /* open input file, and allocate format context */
    if (avformat_open_input(&fmt_ctx, src_filename, fmtInput, NULL) < 0) {
	//if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
        fprintf(stderr, "Could not open source file %s\n", src_filename);
        exit(1);
    }

    /* retrieve stream information */
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        fprintf(stderr, "Could not find stream information\n");
        exit(1);
    }

    if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
        video_stream = fmt_ctx->streams[video_stream_idx];
        video_dec_ctx = video_stream->codec;

        video_dst_file = fopen(video_dst_filename, "wb");
        if (!video_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
            ret = 1;
            goto end;
        }

        /* allocate image where the decoded image will be put */
        ret = av_image_alloc(video_dst_data, video_dst_linesize,
                             video_dec_ctx->width, video_dec_ctx->height,
                             video_dec_ctx->pix_fmt, 1);
        if (ret < 0) {
            fprintf(stderr, "Could not allocate raw video buffer\n");
            goto end;
        }
        video_dst_bufsize = ret;
    }

    if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
        int nb_planes;

        audio_stream = fmt_ctx->streams[audio_stream_idx];
        audio_dec_ctx = audio_stream->codec;
        audio_dst_file = fopen(audio_dst_filename, "wb");
        if (!audio_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
            ret = 1;
            goto end;
        }

        nb_planes = av_sample_fmt_is_planar(audio_dec_ctx->sample_fmt) ?
            audio_dec_ctx->channels : 1;
        audio_dst_data = av_mallocz(sizeof(uint8_t *) * nb_planes);
        if (!audio_dst_data) {
            fprintf(stderr, "Could not allocate audio data buffers\n");
            ret = AVERROR(ENOMEM);
            goto end;
        }
    }

    /* dump input information to stderr */
    av_dump_format(fmt_ctx, 0, src_filename, 0);

    if (!audio_stream && !video_stream) {
        fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
        ret = 1;
        goto end;
    }

    frame = avcodec_alloc_frame();
    if (!frame) {
        fprintf(stderr, "Could not allocate frame\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }

    /* initialize packet, set data to NULL, let the demuxer fill it */
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    if (video_stream)
        printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
    if (audio_stream)
        printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);

    /* read frames from the file */
    while (av_read_frame(fmt_ctx, &pkt) >= 0) {
        decode_packet(&got_frame, 0);
        av_free_packet(&pkt);
    }

    /* flush cached frames */
    pkt.data = NULL;
    pkt.size = 0;
    do {
        decode_packet(&got_frame, 1);
    } while (got_frame);

    printf("Demuxing succeeded.\n");

    if (video_stream) {
        printf("Play the output video file with the command:\n"
               "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
               av_get_pix_fmt_name(video_dec_ctx->pix_fmt), video_dec_ctx->width, video_dec_ctx->height,
               video_dst_filename);
    }

    if (audio_stream) {
        const char *fmt;

        if ((ret = get_format_from_sample_fmt(&fmt, audio_dec_ctx->sample_fmt)) < 0)
            goto end;
        printf("Play the output audio file with the command:\n"
               "ffplay -f %s -ac %d -ar %d %s\n",
               fmt, audio_dec_ctx->channels, audio_dec_ctx->sample_rate,
               audio_dst_filename);
    }

end:
    if (video_dec_ctx)
        avcodec_close(video_dec_ctx);
    if (audio_dec_ctx)
        avcodec_close(audio_dec_ctx);
    avformat_close_input(&fmt_ctx);
    if (video_dst_file)
        fclose(video_dst_file);
    if (audio_dst_file)
        fclose(audio_dst_file);
    av_free(frame);
    av_free(video_dst_data[0]);
    av_free(audio_dst_data);

    return ret < 0;
}
示例#23
0
int main(int argc, char **argv)
{
    int ret = 0, got_frame;

    if (argc != 2) {
        fprintf(stderr, "Usage: %s <video>\n", argv[0]);
        exit(1);
    }
    src_filename = argv[1];

    av_register_all();

    if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
        fprintf(stderr, "Could not open source file %s\n", src_filename);
        exit(1);
    }

    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        fprintf(stderr, "Could not find stream information\n");
        exit(1);
    }

    if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
        video_stream = fmt_ctx->streams[video_stream_idx];
        video_dec_ctx = video_stream->codec;
    }

    //av_dump_format(fmt_ctx, 0, src_filename, 0);

    if (!video_stream) {
        fprintf(stderr, "Could not find video stream in the input, aborting\n");
        ret = 1;
        goto end;
    }

    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate frame\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }

    //printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags\n");

    /* initialize packet, set data to NULL, let the demuxer fill it */
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    /* print the csv header */
    printf(";Bildtyp: ;Eingangsbitrate: ;Kodierte Bildgröße: ;Zeitstempel des Bildes: ;Aktuelles Bild: ;Anzeigedauer des Bildes: ;Bisherige Spielzeit: ;Quelldateiname: ;Decoderbildrate: ;Decoder: ;Filmbildrate:\n");
    /* read frames from the file */
    while (av_read_frame(fmt_ctx, &pkt) >= 0) {
        AVPacket orig_pkt = pkt;
        do {
            ret = decode_packet(&got_frame, 0);
            if (ret < 0)
                break;
            pkt.data += ret;
            pkt.size -= ret;
        } while (pkt.size > 0);
        av_free_packet(&orig_pkt);
    }

    /* flush cached frames */
    pkt.data = NULL;
    pkt.size = 0;
    do {
        decode_packet(&got_frame, 1);
    } while (got_frame);

end:
    avcodec_close(video_dec_ctx);
    avformat_close_input(&fmt_ctx);
    av_frame_free(&frame);
    return ret < 0;
}
示例#24
0
static void log_loop(tUartHandle hUart)
{
    double last, curr;
    int count = 0;
    uint8_t msg[1000];
  
    last = hires_time();        

    do 
    {
        double waited;

        uint8_t receive;
        int got = 1;

        UartRead(hUart, &receive, &got);
        curr = hires_time();        
        waited = curr - last;
        last = curr;
        if (got == 0)
            continue; // should never happen
        
        if (waited > 0.01)
        {
            if (count)
            {
                dump(msg, count);
                printf("  (stale bytes)");
                count = 0;
            }
            printf("\n"); // block separator, for nicer readability
        }

        msg[count] = receive;
        count++;

        if (count >= 4) // minimum packet length
        {
            int i;
            // CRC candidate
            uint16_t cand = msg[count-2] + msg[count-1] * 256; // little endian

            // search for size + CRC match, assume packet end if both matches
            for (i=0; i<count-2; i++)
            {
                int packetlen = msg[i+1] & 0x7F; // from msg candidate
                if (packetlen == count-i-4 && crc16(msg+i, count-2-i) == cand)
                {   
                    if (i>0)
                    { // dump heading garbage
                        dump(msg, i);
                        printf(" Garbage (possible collision)\n");
                    }
                    {
                        // print timestamp
                        char *timeline;
#ifdef WIN32
                        struct _timeb timebuffer;
                        _ftime( &timebuffer );
#else
                        struct timeb timebuffer;
                        ftime( &timebuffer );
#endif
                        timeline = ctime( & ( timebuffer.time ) );
                        printf( "%.8s.%03hu  ", &timeline[11], timebuffer.millitm);
                    }

                    dump (msg+i, count-i); // dump the packet
                    decode_packet(msg+i, count-i);
                    printf("\n");
                    //out_rcv_packet(&out, msg+i, count-i); // pass to emulation
                    count = 0;
                } // if crc match
            } // for i
        } // if (count > 2)


    } while(1);
}
示例#25
0
int main (int argc, char **argv)
{
    int ret = 0;
    const char *src_filename = NULL;
    const char *dst_filename = NULL;
    char* format             = NULL;
    char* codec              = NULL;

    if (argc != 5 && argc != 3) {
        fprintf(stderr, "usage: %s input_file output_file [format codec]\n"
                "API example program to show how to read frames from an input file.\n"
                "This program reads frames from a file, decodes them, and writes decoded\n"
                "frames to a rawvideo/rawaudio file named output_file.\n"
                "Optionally format and codec can be specified.\n\n", argv[0]);
        exit(1);
    }
    src_filename = argv[1];
    dst_filename = argv[2];
    if (argc == 5) {
        format = argv[3];
        codec  = argv[4];
    }

    /* log all debug messages */
    av_log_set_level(AV_LOG_DEBUG);

    /* register all formats and codecs */
    av_register_all();

#ifdef __AFL_HAVE_MANUAL_CONTROL
    while (__AFL_LOOP(1000))
#endif
    {
        AVFormatContext *fmt_ctx = NULL;
        AVInputFormat *fmt       = NULL;
        AVCodecContext *dec_ctx  = NULL;
        FILE *dst_file           = NULL;
        AVFrame *frame           = NULL;
        int got_frame            = 0;
        int frame_count          = 0;
        AVPacket pkt             = { 0 };
        AVDictionary *opts       = NULL;
        ret = 0;
        width = 0;
        height = 0;
        pix_fmt = AV_PIX_FMT_NONE;
        video_dst_bufsize = 0;
        memset(video_dst_data, 0, sizeof(video_dst_data));
        memset(video_dst_linesize, 0, sizeof(video_dst_linesize));

        /* set the whitelists for formats and codecs */
        if (av_dict_set(&opts, "codec_whitelist", codec, 0) < 0) {
            fprintf(stderr, "Could not set codec_whitelist.\n");
            ret = 1;
            goto end;
        }
        if (av_dict_set(&opts, "format_whitelist", format, 0) < 0) {
            fprintf(stderr, "Could not set format_whitelist.\n");
            ret = 1;
            goto end;
        }

        if (format) {
            fmt = av_find_input_format(format);
            if (!fmt) {
                fprintf(stderr, "Could not find input format %s\n", format);
                ret = 1;
                goto end;
            }
        }

        /* open input file, and allocate format context */
        if (avformat_open_input(&fmt_ctx, src_filename, fmt, &opts) < 0) {
            fprintf(stderr, "Could not open source file %s\n", src_filename);
            ret = 1;
            goto end;
        }

        /* retrieve stream information */
        if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
            fprintf(stderr, "Could not find stream information\n");
        }

        /* find stream with specified codec */
        if (open_codec_context(&dec_ctx, fmt_ctx, codec) < 0) {
            fprintf(stderr, "Could not open any stream in input file '%s'\n",
                    src_filename);
            ret = 1;
            goto end;
        }

        /* open output file */
        dst_file = fopen(dst_filename, "wb");
        if (!dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", dst_filename);
            ret = 1;
            goto end;
        }

        if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
            /* allocate image where the decoded image will be put */
            width = dec_ctx->width;
            height = dec_ctx->height;
            pix_fmt = dec_ctx->pix_fmt;
            video_dst_bufsize = av_image_alloc(video_dst_data, video_dst_linesize,
                                 width, height, pix_fmt, 1);
            if (video_dst_bufsize < 0) {
                fprintf(stderr, "Could not allocate raw video buffer\n");
                ret = 1;
                goto end;
            }
        }

        /* dump input information to stderr */
        av_dump_format(fmt_ctx, 0, src_filename, 0);

        /* allocate frame */
        frame = av_frame_alloc();
        if (!frame) {
            fprintf(stderr, "Could not allocate frame\n");
            ret = 1;
            goto end;
        }

        printf("Demuxing from file '%s' into '%s'\n", src_filename, dst_filename);

        /* read frames from the file */
        while (av_read_frame(fmt_ctx, &pkt) >= 0) {
            do {
                int decoded = decode_packet(dec_ctx, dst_file, frame, &got_frame, &frame_count, &pkt);
                if (decoded < 0)
                    break;
                /* increase data pointer and decrease size of remaining data buffer */
                pkt.data += decoded;
                pkt.size -= decoded;
            } while (pkt.size > 0);
            av_free_packet(&pkt);
        }

        printf("Flushing cached frames.\n");
        pkt.data = NULL;
        pkt.size = 0;
        do {
            decode_packet(dec_ctx, dst_file, frame, &got_frame, &frame_count, &pkt);
        } while (got_frame);

        printf("Demuxing done.\n");

end:
        /* free allocated memory */
        av_dict_free(&opts);
        avcodec_close(dec_ctx);
        avformat_close_input(&fmt_ctx);
        if (dst_file)
            fclose(dst_file);
        av_frame_free(&frame);
        av_free(video_dst_data[0]);
    }

    return ret;
}
示例#26
0
static int netcam_read_rtsp_image(netcam_context_ptr netcam)
{
  if (netcam->rtsp == NULL) {
    if (rtsp_connect(netcam) < 0) {
      return -1;
    }
  }

  AVCodecContext *cc = netcam->rtsp->codec_context;
  AVFormatContext *fc = netcam->rtsp->format_context;
  netcam_buff_ptr buffer;

  /* Point to our working buffer. */
  buffer = netcam->receiving;
  buffer->used = 0;

  AVFrame *frame = avcodec_alloc_frame();

  AVPacket packet;
  
  av_init_packet(&packet);

  packet.data = NULL;
  packet.size = 0;

  int size_decoded = 0;
  static int usual_size_decoded = 0;

  while (size_decoded == 0 && av_read_frame(fc, &packet) >= 0) {

    if(packet.stream_index != netcam->rtsp->video_stream_index) {
      // not our packet, skip
      continue;
    }

    size_decoded = decode_packet(&packet, buffer, frame, cc);
  }

  if (size_decoded == 0) {
    // something went wrong, end of stream?
    MOTION_LOG(ERR, TYPE_NETCAM, SHOW_ERRNO, "%s: invalid frame!");
    return -1;
  }

  if (size_decoded != usual_size_decoded) {
    MOTION_LOG(WRN, TYPE_NETCAM, SHOW_ERRNO, "%s: unusual frame size of %d!", size_decoded);
    usual_size_decoded = size_decoded;
  }

  // at this point, we are finished with the packet and frame, so free them.
  av_free_packet(&packet);
  av_free(frame);
  
  struct timeval curtime;
  
  if (gettimeofday(&curtime, NULL) < 0) {
    MOTION_LOG(WRN, TYPE_NETCAM, SHOW_ERRNO, "%s: gettimeofday");
  }
  
  netcam->receiving->image_time = curtime;
  
  /*
   * Calculate our "running average" time for this netcam's
   * frame transmissions (except for the first time).
   * Note that the average frame time is held in microseconds.
   */
  if (netcam->last_image.tv_sec) {
    netcam->av_frame_time = ((9.0 * netcam->av_frame_time) + 1000000.0 *
			     (curtime.tv_sec - netcam->last_image.tv_sec) +
			     (curtime.tv_usec- netcam->last_image.tv_usec)) / 10.0;
    
    MOTION_LOG(DBG, TYPE_NETCAM, NO_ERRNO, "%s: Calculated frame time %f",
	       netcam->av_frame_time);
  }
  
  netcam->last_image = curtime;
  
  netcam_buff *xchg;
  
  /*
   * read is complete - set the current 'receiving' buffer atomically
   * as 'latest', and make the buffer previously in 'latest' become
   * the new 'receiving'.
   */
  pthread_mutex_lock(&netcam->mutex);

  xchg = netcam->latest;
  netcam->latest = netcam->receiving;
  netcam->receiving = xchg;
  netcam->imgcnt++;
  
  /*
   * We have a new frame ready.  We send a signal so that
   * any thread (e.g. the motion main loop) waiting for the
   * next frame to become available may proceed.
   */
  pthread_cond_signal(&netcam->pic_ready);
  
  pthread_mutex_unlock(&netcam->mutex);
  
  return 0;
}
示例#27
0
bool mp_decode_next(struct mp_decode *d)
{
	bool eof = d->m->eof;
	int got_frame;
	int ret;

	d->frame_ready = false;

	if (!eof && !d->packets.size)
		return true;

	while (!d->frame_ready) {
		if (!d->packet_pending) {
			if (!d->packets.size) {
				if (eof) {
					d->pkt.data = NULL;
					d->pkt.size = 0;
				} else {
					return true;
				}
			} else {
				circlebuf_pop_front(&d->packets, &d->orig_pkt,
						sizeof(d->orig_pkt));
				d->pkt = d->orig_pkt;
				d->packet_pending = true;
			}
		}

		ret = decode_packet(d, &got_frame);

		if (!got_frame && ret == 0) {
			d->eof = true;
			return true;
		}
		if (ret < 0) {
#ifdef DETAILED_DEBUG_INFO
			blog(LOG_DEBUG, "MP: decode failed: %s",
					av_err2str(ret));
#endif

			if (d->packet_pending) {
				av_packet_unref(&d->orig_pkt);
				av_init_packet(&d->orig_pkt);
				av_init_packet(&d->pkt);
				d->packet_pending = false;
			}
			return true;
		}

		d->frame_ready = !!got_frame;

		if (d->packet_pending) {
			if (d->pkt.size) {
				d->pkt.data += ret;
				d->pkt.size -= ret;
			}

			if (d->pkt.size <= 0) {
				av_packet_unref(&d->orig_pkt);
				av_init_packet(&d->orig_pkt);
				av_init_packet(&d->pkt);
				d->packet_pending = false;
			}
		}
	}

	if (d->frame_ready) {
		int64_t last_pts = d->frame_pts;

		if (d->frame->best_effort_timestamp == AV_NOPTS_VALUE)
			d->frame_pts = d->next_pts;
		else
			d->frame_pts = av_rescale_q(
					d->frame->best_effort_timestamp,
					d->stream->time_base,
					(AVRational){1, 1000000000});

		int64_t duration = d->frame->pkt_duration;
		if (!duration)
			duration = get_estimated_duration(d, last_pts);
		else
			duration = av_rescale_q(duration,
					d->stream->time_base,
					(AVRational){1, 1000000000});

		d->last_duration = duration;
		d->next_pts = d->frame_pts + duration;
	}

	return true;
}
示例#28
0
int main (int argc, char **argv)
{
    int ret = 0, got_frame;

    if (argc != 4) {
        fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n"
                "API example program to show how to read frames from an input file.\n"
                "This program reads frames from a file, decodes them, and writes decoded\n"
                "video frames to a rawvideo file named video_output_file, and decoded\n"
                "audio frames to a rawaudio file named audio_output_file.\n"
                "\n", argv[0]);
        exit(1);
    }
    src_filename = argv[1];
    video_dst_filename = argv[2];
    audio_dst_filename = argv[3];

    /* register all formats and codecs */
    av_register_all();

    /* open input file, and allocate format context */
    if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
        fprintf(stderr, "Could not open source file %s\n", src_filename);
        exit(1);
    }

    /* retrieve stream information */
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        fprintf(stderr, "Could not find stream information\n");
        exit(1);
    }

    if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
        video_stream = fmt_ctx->streams[video_stream_idx];
        video_dec_ctx = video_stream->codec;

        video_dst_file = fopen(video_dst_filename, "wb");
        if (!video_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
            ret = 1;
            goto end;
        }

        /* allocate image where the decoded image will be put */
        ret = av_image_alloc(video_dst_data, video_dst_linesize,
                             video_dec_ctx->width, video_dec_ctx->height,
                             video_dec_ctx->pix_fmt, 1);
        if (ret < 0) {
            fprintf(stderr, "Could not allocate raw video buffer\n");
            goto end;
        }
        video_dst_bufsize = ret;
    }

    if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
        int nb_planes;

        audio_stream = fmt_ctx->streams[audio_stream_idx];
        audio_dec_ctx = audio_stream->codec;
        audio_dst_file = fopen(audio_dst_filename, "wb");
        if (!audio_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
            ret = 1;
            goto end;
        }

        nb_planes = av_sample_fmt_is_planar(audio_dec_ctx->sample_fmt) ? audio_dec_ctx->channels : 1;
        audio_dst_data = (uint8_t**)av_mallocz(sizeof(uint8_t *) * nb_planes);
        if (!audio_dst_data) {
            fprintf(stderr, "Could not allocate audio data buffers\n");
            ret = AVERROR(ENOMEM);
            goto end;
        }
    }

    /* dump input information to stderr */
    av_dump_format(fmt_ctx, 0, src_filename, 0);

    if (!audio_stream && !video_stream) {
        fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
        ret = 1;
        goto end;
    }

    frame = avcodec_alloc_frame();
    if (!frame) {
        fprintf(stderr, "Could not allocate frame\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }

    /* initialize packet, set data to NULL, let the demuxer fill it */
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    if (video_stream)
        printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
    if (audio_stream)
        printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);

    /* read frames from the file */
    while (av_read_frame(fmt_ctx, &pkt) >= 0) {
        //decode_packet(&got_frame, 0);
        if (audio_stream_idx == pkt.stream_index)
            printf("Frame found!\n");
        else
            continue;
        fwrite(pkt.data, 1, pkt.size, audio_dst_file);
    }

    /* flush cached frames */
#if 0
    pkt.data = NULL;
    pkt.size = 0;
    do {
        decode_packet(&got_frame, 1);
    } while (got_frame);
#endif

    printf("Demuxing succeeded.\n");

    if (video_stream) {
        printf("Play the output video file with the command:\n"
               "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
               av_get_pix_fmt_name(video_dec_ctx->pix_fmt), video_dec_ctx->width, video_dec_ctx->height,
               video_dst_filename);
    }

    if (audio_stream) {
        const char *fmt;

        if ((ret = get_format_from_sample_fmt(&fmt, audio_dec_ctx->sample_fmt) < 0))
            goto end;
        printf("Play the output audio file with the command:\n"
               "ffplay -f %s -ac %d -ar %d %s\n",
               fmt, audio_dec_ctx->channels, audio_dec_ctx->sample_rate,
               audio_dst_filename);
    }

end:
    if (video_dec_ctx)
        avcodec_close(video_dec_ctx);
    if (audio_dec_ctx)
        avcodec_close(audio_dec_ctx);
    avformat_close_input(&fmt_ctx);
    if (video_dst_file)
        fclose(video_dst_file);
    if (audio_dst_file)
        fclose(audio_dst_file);
    av_free(frame);
    av_free(video_dst_data[0]);
    av_free(audio_dst_data);

    return ret < 0;
}
示例#29
0
int main (int argc, char **argv){
    int ret = 0, got_frame;
    AVFormatContext *ofmt_ctx = NULL;
    AVOutputFormat *ofmt = NULL;
    


    uint8_t *sample_buf;
    
    
    if (argc != 4 && argc != 5) {
        fprintf(stderr, "input  1.source file:%s\n"
                "2.output_video\n"
                "3.output_audio\n"
                "4.mux video file(Optional)\n"
                "\n", argv[0]);
        exit(1);
    }
    
    src_filename = argv[1];
    video_dst_filename = argv[2];
    audio_dst_filename = argv[3];
    //optional mux to any type video
    if(argc == 5){
        out_filename = argv[4];
    }
    
    /* register all formats and codecs */
    av_register_all();
    //for network stream
    avformat_network_init();
    
    ret = init_input();
    if(ret){
        goto end;
    }


    ret = init_video_out_context();
    if(ret){
        goto end;
    }


    ret = init_audio_out_context(sample_buf);
    if(ret){
        goto end;
    }else{
        int aud_buffer_size;
        //alloc frame and packet
        AudFrame = av_frame_alloc();
        AudFrame->nb_samples     = AudCodecCtx->frame_size;
        AudFrame->format         = AudCodecCtx->sample_fmt;
        AudFrame->channel_layout = AudCodecCtx->channel_layout;
        
        aud_buffer_size = av_samples_get_buffer_size(NULL, AudCodecCtx->channels,AudCodecCtx->frame_size,AudCodecCtx->sample_fmt, 1);
        sample_buf = (uint8_t *)av_malloc(aud_buffer_size);
        avcodec_fill_audio_frame(AudFrame, AudCodecCtx->channels, AudCodecCtx->sample_fmt,(const uint8_t*)sample_buf, aud_buffer_size, 1);
        av_new_packet(&AudPkt,aud_buffer_size);
    }
    
    
    if(argc == 5){
        //alloc memory
        avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
        if (!ofmt_ctx) {
            printf( "Could not create output context\n");
            ret = AVERROR_UNKNOWN;
            return 1;
        }
        ofmt = ofmt_ctx->oformat;

        ret = init_output(ofmt_ctx);
        if(ret){
            printf("Init output ERROR\n");
            goto end;
        }
    }
    
    if (!(ofmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            printf( "Could not open output file '%s'", out_filename);
            goto end;
        }
    }

    ret = avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0) {
        printf( "Error occurred when opening output file\n");
        goto end;
    }
    
    //this will fill up by decoder(|read frame|->packet->|decoder|->frame)
    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate frame\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }
    
    if (video_stream)
        printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
    if (audio_stream)
        printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
    
    
    //Write video Header
    avformat_write_header(pFormatCtx,NULL);
    //Write audio Header
    avformat_write_header(AudFormatCtx,NULL);
    
    //alloc packet to get copy from pkt
    av_new_packet(&epkt,picture_size);
    
    /*setup the convert parameter
     *due to input sample format AV_SAMPLE_FMT_FLTP
     *can't be converted to AV_SAMPLE_FMT_S16
     *which only accepted by the aac encoder
     */
    swr = swr_alloc();
    av_opt_set_int(swr, "in_channel_layout",  audio_dec_ctx->channel_layout, 0);
    av_opt_set_int(swr, "out_channel_layout", AudCodecCtx->channel_layout,  0);
    av_opt_set_int(swr, "in_sample_rate",     audio_dec_ctx->sample_rate, 0);
    av_opt_set_int(swr, "out_sample_rate",    AudCodecCtx->sample_rate, 0);
    av_opt_set_sample_fmt(swr, "in_sample_fmt",  AV_SAMPLE_FMT_FLTP, 0);
    av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_S16,  0);
    swr_init(swr);
    
    
    
    
    /*start read frames from the file */
    while (av_read_frame(fmt_ctx, &pkt) >= 0) {
        //do demux & decode -> encode -> output h264 & aac file
        ret = decode_packet();

        if (ret < 0)
            break;
        if(argc == 5){
            remux_packet(ofmt_ctx,&pkt);
        }
        
        av_free_packet(&pkt);
    }
    
    /* flush cached frames */
    pkt.data = NULL;
    pkt.size = 0;
    
    
    //Flush Encoder
    int retfe = flush_encoder(pFormatCtx,0);
    if (retfe < 0) {
        printf("Flushing encoder failed\n");
        return -1;
    }
    
    //Flush Encoder
    ret = flush_encoder(pFormatCtx,0);
    if (ret < 0) {
        printf("Flushing encoder failed\n");
        return -1;
    }
    
    //Write video trailer
    av_write_trailer(pFormatCtx);
    
    //Write audio Trailer
    av_write_trailer(AudFormatCtx);
    
    //Write remux Trailer
    if(argc == 5){
        av_write_trailer(ofmt_ctx);
    }
    
    
    printf("Output succeeded!!!!\n");
    
    
    
    
    
    
    
    
    
end:
    //free remux
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
        avio_close(ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);
    
    //free audio
    if (audio_st){
        avcodec_close(audio_st->codec);
        av_free(AudFrame);
        av_free(sample_buf);
    }
    avio_close(AudFormatCtx->pb);
    avformat_free_context(AudFormatCtx);
    
    //free video
    if (video_st){
        avcodec_close(video_st->codec);
        av_free(pFrame);
        av_free(picture_buf);
    }
    avio_close(pFormatCtx->pb);  
    avformat_free_context(pFormatCtx);
    
    //free decode
    avcodec_close(video_dec_ctx);
    avcodec_close(audio_dec_ctx);
    avformat_close_input(&fmt_ctx);
    if (video_dst_file)
        fclose(video_dst_file);
    if (audio_dst_file)
        fclose(audio_dst_file);    
    av_frame_free(&frame);
    return ret < 0;
}
示例#30
0
jint Java_org_telegram_ui_Components_AnimatedFileDrawable_getVideoFrame(JNIEnv *env, jclass clazz, jobject ptr, jobject bitmap, jintArray data) {
    if (ptr == NULL || bitmap == nullptr) {
        return 0;
    }
    VideoInfo *info = (VideoInfo *) ptr;
    int ret = 0;
    int got_frame = 0;
    
    while (true) {
        if (info->pkt.size == 0) {
            ret = av_read_frame(info->fmt_ctx, &info->pkt);
            //LOGD("got packet with size %d", info->pkt.size);
            if (ret >= 0) {
                info->orig_pkt = info->pkt;
            }
        }
        
        if (info->pkt.size > 0) {
            ret = decode_packet(info, &got_frame);
            if (ret < 0) {
                if (info->has_decoded_frames) {
                    ret = 0;
                }
                info->pkt.size = 0;
            } else {
                //LOGD("read size %d from packet", ret);
                info->pkt.data += ret;
                info->pkt.size -= ret;
            }
            
            if (info->pkt.size == 0) {
                av_free_packet(&info->orig_pkt);
            }
        } else {
            info->pkt.data = NULL;
            info->pkt.size = 0;
            ret = decode_packet(info, &got_frame);
            if (ret < 0) {
                LOGE("can't decode packet flushed %s", info->src);
                return 0;
            }
            if (got_frame == 0) {
                if (info->has_decoded_frames) {
                    //LOGD("file end reached %s", info->src);
                    if ((ret = avformat_seek_file(info->fmt_ctx, -1, std::numeric_limits<int64_t>::min(), 0, std::numeric_limits<int64_t>::max(), 0)) < 0) {
                        LOGE("can't seek to begin of file %s, %s", info->src, av_err2str(ret));
                        return 0;
                    } else {
                        avcodec_flush_buffers(info->video_dec_ctx);
                    }
                }
            }
        }
        if (ret < 0) {
            return 0;
        }
        if (got_frame) {
            //LOGD("decoded frame with w = %d, h = %d, format = %d", info->frame->width, info->frame->height, info->frame->format);
            if (info->frame->format == AV_PIX_FMT_YUV420P || info->frame->format == AV_PIX_FMT_BGRA || info->frame->format == AV_PIX_FMT_YUVJ420P) {
                jint *dataArr = env->GetIntArrayElements(data, 0);
                if (dataArr != nullptr) {
                    dataArr[3] = (int) (1000 * info->frame->pkt_pts * av_q2d(info->video_stream->time_base));
                    env->ReleaseIntArrayElements(data, dataArr, 0);
                }
                
                void *pixels;
                if (AndroidBitmap_lockPixels(env, bitmap, &pixels) >= 0) {
                    if (info->frame->format == AV_PIX_FMT_YUV420P || info->frame->format == AV_PIX_FMT_YUVJ420P) {
                        //LOGD("y %d, u %d, v %d, width %d, height %d", info->frame->linesize[0], info->frame->linesize[2], info->frame->linesize[1], info->frame->width, info->frame->height);
                        libyuv::I420ToARGB(info->frame->data[0], info->frame->linesize[0], info->frame->data[2], info->frame->linesize[2], info->frame->data[1], info->frame->linesize[1], (uint8_t *) pixels, info->frame->width * 4, info->frame->width, info->frame->height);
                    } else if (info->frame->format == AV_PIX_FMT_BGRA) {
                        libyuv::ABGRToARGB(info->frame->data[0], info->frame->linesize[0], (uint8_t *) pixels, info->frame->width * 4, info->frame->width, info->frame->height);
                    }
                    AndroidBitmap_unlockPixels(env, bitmap);
                }
            }
            info->has_decoded_frames = true;
            av_frame_unref(info->frame);
            return 1;
        }
    }
    return 0;
}