static int demux_thread(void *data) { webm_context *ctx = (webm_context *)data; nestegg_packet *pkt; int r; while ((r = nestegg_read_packet(ctx->nestegg_ctx, &pkt)) > 0) { unsigned int track; nestegg_packet_track(pkt, &track); if (track == ctx->audio_track) { if (queue_insert(ctx->audio_ctx.packet_queue, pkt) < 0) { nestegg_free_packet(pkt); break; } } else if (track == ctx->video_track) { if (queue_insert(ctx->video_ctx.packet_queue, pkt) < 0) { nestegg_free_packet(pkt); break; } } if (quit_video) break; } queue_insert(ctx->video_ctx.packet_queue, NULL); if (ctx->audio_track >= 0) queue_insert(ctx->audio_ctx.packet_queue, NULL); return 0; }
void webm_free(struct WebmInputContext *webm_ctx) { if (webm_ctx && webm_ctx->nestegg_ctx) { if (webm_ctx->pkt) nestegg_free_packet(webm_ctx->pkt); nestegg_destroy(webm_ctx->nestegg_ctx); } }
int webm_read_frame(struct WebmInputContext *webm_ctx, uint8_t **buffer, size_t *bytes_in_buffer, size_t *buffer_size) { if (webm_ctx->chunk >= webm_ctx->chunks) { uint32_t track; do { /* End of this packet, get another. */ if (webm_ctx->pkt) { nestegg_free_packet(webm_ctx->pkt); webm_ctx->pkt = NULL; } if (nestegg_read_packet(webm_ctx->nestegg_ctx, &webm_ctx->pkt) <= 0 || nestegg_packet_track(webm_ctx->pkt, &track)) { return 1; } } while (track != webm_ctx->video_track); if (nestegg_packet_count(webm_ctx->pkt, &webm_ctx->chunks)) return 1; webm_ctx->chunk = 0; } if (nestegg_packet_data(webm_ctx->pkt, webm_ctx->chunk, buffer, bytes_in_buffer)) { return 1; } webm_ctx->chunk++; return 0; }
int webm_guess_framerate(struct WebmInputContext *webm_ctx, struct VpxInputContext *vpx_ctx) { uint32_t i; uint64_t tstamp = 0; /* Check to see if we can seek before we parse any data. */ if (nestegg_track_seek(webm_ctx->nestegg_ctx, webm_ctx->video_track, 0)) { warn("Failed to guess framerate (no Cues), set to 30fps.\n"); vpx_ctx->framerate.numerator = 30; vpx_ctx->framerate.denominator = 1; return 0; } /* Guess the framerate. Read up to 1 second, or 50 video packets, * whichever comes first. */ for (i = 0; tstamp < 1000000000 && i < 50;) { nestegg_packet *pkt; uint32_t track; if (nestegg_read_packet(webm_ctx->nestegg_ctx, &pkt) <= 0) break; nestegg_packet_track(pkt, &track); if (track == webm_ctx->video_track) { nestegg_packet_tstamp(pkt, &tstamp); ++i; } nestegg_free_packet(pkt); } if (nestegg_track_seek(webm_ctx->nestegg_ctx, webm_ctx->video_track, 0)) goto fail; vpx_ctx->framerate.numerator = (i - 1) * 1000000; vpx_ctx->framerate.denominator = (int)(tstamp / 1000); return 0; fail: nestegg_destroy(webm_ctx->nestegg_ctx); webm_ctx->nestegg_ctx = NULL; rewind(vpx_ctx->file); return 1; }
static void close_audio(audio_context *audio_ctx) { // empty and free the packet queue while(audio_ctx->packet_queue->size) { nestegg_packet *packet = queue_get(audio_ctx->packet_queue); if(packet) nestegg_free_packet(packet); } queue_destroy(audio_ctx->packet_queue); // close the vorbis decoding context vorbis_destroy(&(audio_ctx->vorbis_ctx)); // set the state of soundmix to how it was before int i; musicchannel.active = 0; for(i = 0; i < MUSIC_NUM_BUFFERS; i++) { free(musicchannel.buf[i]); musicchannel.buf[i] = NULL; } }
static int audio_decode_frame(audio_context *audio_ctx, uint8_t *audio_buf, int buf_size) { vorbis_context *vorbis_ctx = &audio_ctx->vorbis_ctx; //audio_clock += 1000000000LL * audio_ctx->last_samples / audio_ctx->frequency; int samples = buf_size / (vorbis_ctx->channels * 2); audio_ctx->last_samples = samples; while (samples) { if (audio_ctx->avail_samples == 0) { nestegg_packet *pkt; uint64_t timestamp; unsigned chunk, num_chunks; debug_printf("audio queue size=%i\n", audio_ctx->packet_queue->size); if ((pkt = queue_get(audio_ctx->packet_queue)) == NULL) return -1; nestegg_packet_tstamp(pkt, ×tamp); //audio_clock = timestamp; nestegg_packet_count(pkt, &num_chunks); for (chunk=0; chunk<num_chunks; chunk++) { unsigned char *data; size_t data_size; nestegg_packet_data(pkt, chunk, &data, &data_size); audio_ctx->avail_samples = vorbis_packet(vorbis_ctx, data, data_size); } nestegg_free_packet(pkt); } int samples_read = MIN(audio_ctx->avail_samples, samples); vorbis_getpcm(vorbis_ctx, audio_buf, samples_read); audio_buf += 2 * vorbis_ctx->channels * samples_read; audio_ctx->avail_samples -= samples_read; samples -= samples_read; } return buf_size; }
static void close_video(video_context *video_ctx) { if(vpx_codec_destroy(&(video_ctx->vpx_ctx))) { printf("Warning: failed to destroy libvpx context: %s\n", vpx_codec_error(&video_ctx->vpx_ctx)); } if(video_ctx->packet_queue) { while(video_ctx->packet_queue && video_ctx->packet_queue->size) { nestegg_packet *packet = queue_get(video_ctx->packet_queue); if(packet) nestegg_free_packet(packet); } queue_destroy(video_ctx->packet_queue); } if(video_ctx->frame_queue) { while(video_ctx->frame_queue && video_ctx->frame_queue->size) { yuv_frame_destroy((yuv_frame *) queue_get(video_ctx->frame_queue)); } queue_destroy(video_ctx->frame_queue); } }
static int read_frame(struct input_ctx *input, uint8_t **buf, size_t *buf_sz, size_t *buf_alloc_sz) { char raw_hdr[IVF_FRAME_HDR_SZ]; size_t new_buf_sz; FILE *infile = input->infile; enum file_kind kind = input->kind; if (kind == WEBM_FILE) { if (input->chunk >= input->chunks) { unsigned int track; do { /* End of this packet, get another. */ if (input->pkt) nestegg_free_packet(input->pkt); if (nestegg_read_packet(input->nestegg_ctx, &input->pkt) <= 0 || nestegg_packet_track(input->pkt, &track)) return 1; } while (track != input->video_track); if (nestegg_packet_count(input->pkt, &input->chunks)) return 1; input->chunk = 0; } if (nestegg_packet_data(input->pkt, input->chunk, buf, buf_sz)) return 1; input->chunk++; return 0; } /* For both the raw and ivf formats, the frame size is the first 4 bytes * of the frame header. We just need to special case on the header * size. */ else if (fread(raw_hdr, kind == IVF_FILE ? IVF_FRAME_HDR_SZ : RAW_FRAME_HDR_SZ, 1, infile) != 1) { if (!feof(infile)) fprintf(stderr, "Failed to read frame size\n"); new_buf_sz = 0; } else { new_buf_sz = mem_get_le32(raw_hdr); if (new_buf_sz > 256 * 1024 * 1024) { fprintf(stderr, "Error: Read invalid frame size (%u)\n", (unsigned int)new_buf_sz); new_buf_sz = 0; } if (kind == RAW_FILE && new_buf_sz > 256 * 1024) fprintf(stderr, "Warning: Read invalid frame size (%u)" " - not a raw file?\n", (unsigned int)new_buf_sz); if (new_buf_sz > *buf_alloc_sz) { uint8_t *new_buf = realloc(*buf, 2 * new_buf_sz); if (new_buf) { *buf = new_buf; *buf_alloc_sz = 2 * new_buf_sz; } else { fprintf(stderr, "Failed to allocate compressed data buffer\n"); new_buf_sz = 0; } } } *buf_sz = new_buf_sz; if (!feof(infile)) { if (fread(*buf, 1, *buf_sz, infile) != *buf_sz) { fprintf(stderr, "Failed to read full frame\n"); return 1; } return 0; } return 1; }
int main(int argc, char * argv[]) { FILE * fp; int r, type; nestegg * ctx; nestegg_audio_params aparams; nestegg_packet * pkt; nestegg_video_params vparams; size_t length, size; uint64_t duration, tstamp, pkt_tstamp; unsigned char * codec_data, * ptr; unsigned int cnt, i, j, track, tracks, pkt_cnt, pkt_track; unsigned int data_items = 0; nestegg_io io = { stdio_read, stdio_seek, stdio_tell, NULL }; if (argc != 2) return EXIT_FAILURE; fp = fopen(argv[1], "rb"); if (!fp) return EXIT_FAILURE; io.userdata = fp; ctx = NULL; r = nestegg_init(&ctx, io, log_callback, -1); if (r != 0) return EXIT_FAILURE; nestegg_track_count(ctx, &tracks); r = nestegg_duration(ctx, &duration); if (r == 0) { #if defined(DEBUG) fprintf(stderr, "media has %u tracks and duration %fs\n", tracks, duration / 1e9); #endif } else { #if defined(DEBUG) fprintf(stderr, "media has %u tracks and unknown duration, using 10s default\n", tracks); #endif duration = 10000000000; } for (i = 0; i < tracks; ++i) { type = nestegg_track_type(ctx, i); #if defined(DEBUG) fprintf(stderr, "track %u: type: %d codec: %d", i, type, nestegg_track_codec_id(ctx, i)); #endif nestegg_track_codec_data_count(ctx, i, &data_items); for (j = 0; j < data_items; ++j) { nestegg_track_codec_data(ctx, i, j, &codec_data, &length); #if defined(DEBUG) fprintf(stderr, " (%p, %u)", codec_data, (unsigned int) length); #endif } if (type == NESTEGG_TRACK_VIDEO) { nestegg_track_video_params(ctx, i, &vparams); #if defined(DEBUG) fprintf(stderr, " video: %ux%u (d: %ux%u %ux%ux%ux%u)", vparams.width, vparams.height, vparams.display_width, vparams.display_height, vparams.crop_top, vparams.crop_left, vparams.crop_bottom, vparams.crop_right); #endif } else if (type == NESTEGG_TRACK_AUDIO) { nestegg_track_audio_params(ctx, i, &aparams); #if defined(DEBUG) fprintf(stderr, " audio: %.2fhz %u bit %u channels", aparams.rate, aparams.depth, aparams.channels); #endif } #if defined(DEBUG) fprintf(stderr, "\n"); #endif } #if defined(SEEK_TEST) #if defined(DEBUG) fprintf(stderr, "seek to middle\n"); #endif r = nestegg_track_seek(ctx, 0, duration / 2); if (r == 0) { #if defined(DEBUG) fprintf(stderr, "middle "); #endif r = nestegg_read_packet(ctx, &pkt); if (r == 1) { nestegg_packet_track(pkt, &track); nestegg_packet_count(pkt, &cnt); nestegg_packet_tstamp(pkt, &tstamp); #if defined(DEBUG) fprintf(stderr, "* t %u pts %f frames %u\n", track, tstamp / 1e9, cnt); #endif nestegg_free_packet(pkt); } else { #if defined(DEBUG) fprintf(stderr, "middle seek failed\n"); #endif } } #if defined(DEBUG) fprintf(stderr, "seek to ~end\n"); #endif r = nestegg_track_seek(ctx, 0, duration - (duration / 10)); if (r == 0) { #if defined(DEBUG) fprintf(stderr, "end "); #endif r = nestegg_read_packet(ctx, &pkt); if (r == 1) { nestegg_packet_track(pkt, &track); nestegg_packet_count(pkt, &cnt); nestegg_packet_tstamp(pkt, &tstamp); #if defined(DEBUG) fprintf(stderr, "* t %u pts %f frames %u\n", track, tstamp / 1e9, cnt); #endif nestegg_free_packet(pkt); } else { #if defined(DEBUG) fprintf(stderr, "end seek failed\n"); #endif } } #if defined(DEBUG) fprintf(stderr, "seek to ~start\n"); #endif r = nestegg_track_seek(ctx, 0, duration / 10); if (r == 0) { #if defined(DEBUG) fprintf(stderr, "start "); #endif r = nestegg_read_packet(ctx, &pkt); if (r == 1) { nestegg_packet_track(pkt, &track); nestegg_packet_count(pkt, &cnt); nestegg_packet_tstamp(pkt, &tstamp); #if defined(DEBUG) fprintf(stderr, "* t %u pts %f frames %u\n", track, tstamp / 1e9, cnt); #endif nestegg_free_packet(pkt); } else { #if defined(DEBUG) fprintf(stderr, "start seek failed\n"); #endif } } #endif while (nestegg_read_packet(ctx, &pkt) > 0) { nestegg_packet_track(pkt, &pkt_track); nestegg_packet_count(pkt, &pkt_cnt); nestegg_packet_tstamp(pkt, &pkt_tstamp); #if defined(DEBUG) fprintf(stderr, "t %u pts %f frames %u: ", pkt_track, pkt_tstamp / 1e9, pkt_cnt); #endif for (i = 0; i < pkt_cnt; ++i) { nestegg_packet_data(pkt, i, &ptr, &size); #if defined(DEBUG) fprintf(stderr, "%u ", (unsigned int) size); #endif } #if defined(DEBUG) fprintf(stderr, "\n"); #endif nestegg_free_packet(pkt); } nestegg_destroy(ctx); fclose(fp); return EXIT_SUCCESS; }
static int video_thread(void *data) { video_context *ctx = (video_context*) data; uint64_t timestamp; while(!quit_video) { unsigned int chunk, chunks; nestegg_packet *pkt; debug_printf("video queue size=%i\n", ctx->packet_queue->size); pkt = queue_get(ctx->packet_queue); if (quit_video || pkt == NULL) break; nestegg_packet_count(pkt, &chunks); nestegg_packet_tstamp(pkt, ×tamp); for (chunk = 0; chunk < chunks; ++chunk) { unsigned char *data; size_t data_size; nestegg_packet_data(pkt, chunk, &data, &data_size); vpx_image_t *img; vpx_codec_iter_t iter = NULL; if (vpx_codec_decode(&ctx->vpx_ctx, data, data_size, NULL, 0)) { printf("Error: libvpx failed to decode chunk\n"); quit_video = 1; break; } while((img = vpx_codec_get_frame(&ctx->vpx_ctx, &iter))) { assert(img->d_w == ctx->width); assert(img->d_h == ctx->height); yuv_frame *frame = yuv_frame_create(img->d_w, img->d_h); frame->timestamp = timestamp; int y; for(y = 0; y < img->d_h; y++) memcpy(frame->lum+(y*img->d_w), img->planes[0]+(y*img->stride[0]), img->d_w); for(y = 0; y < img->d_h / 2; y++) { memcpy(frame->cr+(y*img->d_w/2), img->planes[1]+(y*img->stride[1]), img->d_w / 2); memcpy(frame->cb+(y*img->d_w/2), img->planes[2]+(y*img->stride[2]), img->d_w / 2); } if (queue_insert(ctx->frame_queue, (void *)frame) < 0) { debug_printf("destroying last frame\n"); yuv_frame_destroy(frame); break; } timestamp += ctx->frame_delay; } } nestegg_free_packet(pkt); } queue_insert(ctx->frame_queue, NULL); return 0; }