static int read_chomp_line(AVIOContext *s, char *buf, int maxlen) { int len = ff_get_line(s, buf, maxlen); while (len > 0 && isspace(buf[len - 1])) buf[--len] = '\0'; if(len==0){ if(url_feof(s)) return AVERROR_EOF; if(url_ferror(s)) return url_ferror(s); } return len; }
/** * @return bytes read, 0 on end of file, or <0 on error */ static int wtvfile_read_packet(void *opaque, uint8_t *buf, int buf_size) { WtvFile *wf = opaque; ByteIOContext *pb = wf->pb_filesystem; int nread = 0; if (wf->error || url_ferror(pb)) return -1; if (wf->position >= wf->length || url_feof(pb)) return 0; buf_size = FFMIN(buf_size, wf->length - wf->position); while(nread < buf_size) { int n; int remaining_in_sector = (1 << wf->sector_bits) - (wf->position & ((1 << wf->sector_bits) - 1)); int read_request = FFMIN(buf_size - nread, remaining_in_sector); n = get_buffer(pb, buf, read_request); if (n <= 0) break; nread += n; buf += n; wf->position += n; if (n == remaining_in_sector) { int i = wf->position >> wf->sector_bits; if (i >= wf->nb_sectors || (wf->sectors[i] != wf->sectors[i - 1] + (1 << (wf->sector_bits - WTV_SECTOR_BITS)) && url_fseek(pb, (int64_t)wf->sectors[i] << WTV_SECTOR_BITS, SEEK_SET) < 0)) { wf->error = 1; break; } } }
int queue_av_pkt(void *arg) { LOGI("created reading thread"); AVPacket *pkt = av_mallocz(sizeof(AVPacket)); State *state = (State *)arg; init_queue(&state->audio->raw_data_buf); init_queue(&state->video->raw_data_buf); while(true) { if(state->video->raw_data_buf.size > MAX_VIDEO_QUEUE_SIZE || state->audio->raw_data_buf.size > MAX_AUDIO_QUEUE_SIZE) { SDL_Delay(10); // LOGW("queue is full, waiting for decoding"); continue; } if(av_read_frame(state->content->format_ctx, pkt) < 0) { if(url_ferror(state->content->format_ctx->pb) == 0) { SDL_Delay(100); continue; }else { LOGW("read frame error, maybe come to end of content : queue_av_pkt"); break; // error or end of content } } //put packet into AV queue if(pkt->stream_index == state->video->track) { add_to_queue(&state->video->raw_data_buf, pkt); }else if(pkt->stream_index == state->audio->track) { add_to_queue(&state->audio->raw_data_buf, pkt); }else { LOGV("neither audio or video track : queue_av_pkt"); av_free_packet(pkt); } } return 0; }
/* * Read up to len samples of type sox_sample_t from file into buf[]. * Return number of samples read. */ static size_t read_samples(sox_format_t * ft, sox_sample_t *buf, size_t len) { priv_t * ffmpeg = (priv_t *)ft->priv; AVPacket *pkt = &ffmpeg->audio_pkt; int ret; size_t nsamp = 0, nextra; /* Read data repeatedly until buf is full or no more can be read */ do { /* If input buffer empty, read more data */ if (ffmpeg->audio_buf_index * 2 >= ffmpeg->audio_buf_size) { if ((ret = av_read_frame(ffmpeg->ctxt, pkt)) < 0 && (ret == AVERROR_EOF || url_ferror(ffmpeg->ctxt->pb))) break; ffmpeg->audio_buf_size = audio_decode_frame(ffmpeg, ffmpeg->audio_buf_aligned, AVCODEC_MAX_AUDIO_FRAME_SIZE); ffmpeg->audio_buf_index = 0; } /* Convert data into SoX samples up to size of buffer */ nextra = min((ffmpeg->audio_buf_size - ffmpeg->audio_buf_index) / 2, (int)(len - nsamp)); for (; nextra > 0; nextra--) buf[nsamp++] = SOX_SIGNED_16BIT_TO_SAMPLE(((int16_t *)ffmpeg->audio_buf_aligned)[ffmpeg->audio_buf_index++], ft->clips); } while (nsamp < len && nextra > 0); return nsamp; }
int queue_av_pkt(void *arg) { AVPacket *pkt = av_mallocz(sizeof(AVPacket)); State *state = (State *)arg; init_queue(&state->audio->audioq); init_queue(&state->video->videoq); while(true) { //if queue is full, wait for eat if(state->video->videoq.size > MAX_VIDEO_QUEUE_SIZE || state->audio->audioq.size > MAX_AUDIO_QUEUE_SIZE) { SDL_Delay(5); continue; } //if packet is valid if(av_read_frame(state->file->pFormatCtx, pkt) < 0) { if(url_ferror(state->file->pFormatCtx->pb) == 0) { SDL_Delay(100); continue; }else { break; // error or end of file } } //put packet into AV queue if(pkt->stream_index == state->video->videoTrack) { add_to_queue(&state->video->videoq, pkt); }else if(pkt->stream_index == state->audio->audioTrack) { add_to_queue(&state->audio->audioq, pkt); }else { av_free_packet(pkt); } } }
static int cin_read_frame_header(CinDemuxContext *cin, ByteIOContext *pb) { CinFrameHeader *hdr = &cin->frame_header; hdr->video_frame_type = get_byte(pb); hdr->audio_frame_type = get_byte(pb); hdr->pal_colors_count = get_le16(pb); hdr->video_frame_size = get_le32(pb); hdr->audio_frame_size = get_le32(pb); if (url_feof(pb) || url_ferror(pb)) return AVERROR(EIO); if (get_le32(pb) != 0xAA55AA55) return AVERROR_INVALIDDATA; return 0; }
bool FFmpegDecoder::readNextPacketNormal() { AVPacket packet; if (! m_pending_packet) { bool end_of_stream = false; // Read the next frame packet if (av_read_frame(m_format_context.get(), &packet) < 0) { if (url_ferror(m_format_context->pb) == 0) end_of_stream = true; else throw std::runtime_error("av_read_frame() failed"); } if (end_of_stream) { // If we reach the end of the stream, change the decoder state if (loop()) { m_clocks.reset(m_start); rewindButDontFlushQueues(); } else m_state = END_OF_STREAM; return false; } else { // Make the packet data available beyond av_read_frame() logical scope. if (av_dup_packet(&packet) < 0) throw std::runtime_error("av_dup_packet() failed"); m_pending_packet = FFmpegPacket(packet); } } // Send data packet if (m_pending_packet.type == FFmpegPacket::PACKET_DATA) { if (m_pending_packet.packet.stream_index == m_audio_index) { if (m_audio_queue.timedPush(m_pending_packet, 10)) { m_pending_packet.release(); return true; } } else if (m_pending_packet.packet.stream_index == m_video_index) { if (m_video_queue.timedPush(m_pending_packet, 10)) { m_pending_packet.release(); return true; } } else { m_pending_packet.clear(); return true; } } return false; }
static void *decode_thread(ALLEGRO_THREAD *t, void *arg) { VideoState *is = (VideoState *) arg; AVFormatContext *format_context = is->format_context; AVPacket pkt1, *packet = &pkt1; is->videoStream = -1; is->audioStream = -1; if (is->audio_index >= 0) { stream_component_open(is, is->audio_index); } if (is->video_index >= 0) { stream_component_open(is, is->video_index); } if (is->videoStream < 0 && is->audioStream < 0) { ALLEGRO_ERROR("%s: could not open codecs\n", is->filename); goto fail; } for (;;) { if (is->quit) { break; } if (is->seek_req) { int stream_index = -1; int64_t seek_target = is->seek_pos; if (is->videoStream >= 0) stream_index = is->videoStream; else if (is->audioStream >= 0) stream_index = is->audioStream; if (stream_index >= 0) { seek_target = av_rescale_q(seek_target, AV_TIME_BASE_Q, format_context->streams[stream_index]->time_base); } if (av_seek_frame(is->format_context, stream_index, seek_target, is->seek_flags) < 0) { ALLEGRO_WARN("%s: error while seeking (%d, %lu)\n", is->format_context->filename, stream_index, seek_target); } else { if (is->audioStream >= 0) { packet_queue_flush(&is->audioq); packet_queue_put(&is->audioq, &flush_pkt); } if (is->videoStream >= 0) { packet_queue_flush(&is->videoq); packet_queue_put(&is->videoq, &flush_pkt); } } is->seek_req = 0; is->after_seek_sync = true; } if (is->audioq.size > MAX_AUDIOQ_SIZE || is->videoq.size > MAX_VIDEOQ_SIZE) { al_rest(0.01); continue; } if (av_read_frame(is->format_context, packet) < 0) { #ifdef FFMPEG_0_8 if (!format_context->pb->eof_reached && !format_context->pb->error) { #else if (url_ferror((void *)&format_context->pb) == 0) { #endif al_rest(0.01); continue; } else { break; } } // Is this a packet from the video stream? if (packet->stream_index == is->videoStream) { packet_queue_put(&is->videoq, packet); } else if (packet->stream_index == is->audioStream) { packet_queue_put(&is->audioq, packet); } else { av_free_packet(packet); } } /* all done - wait for it */ while (!is->quit) { al_rest(0.1); } fail: return t; } /* We want to be able to send an event to the user exactly at the time * a new video frame should be displayed. */ static void *timer_thread(ALLEGRO_THREAD *t, void *arg) { VideoState *is = (VideoState *) arg; double ot = 0, nt = 0; while (!is->quit) { ALLEGRO_EVENT event; double d; /* Wait here until someone signals to us when a new frame was * scheduled at is->show_next. */ al_lock_mutex(is->timer_mutex); al_wait_cond(is->timer_cond, is->timer_mutex); al_unlock_mutex(is->timer_mutex); if (is->quit) break; /* Wait until that time. This wait is why we have our own thread * here so the user doesn't need to do it. */ while (1) { d = is->show_next - get_master_clock(is); if (d <= 0) break; //printf("waiting %4.1f ms\n", d * 1000); al_rest(d); } nt = get_master_clock(is); //printf("event after %4.1f ms\n", (nt - ot) * 1000); ot = nt; /* Now is the time. */ event.type = ALLEGRO_EVENT_VIDEO_FRAME_SHOW; event.user.data1 = (intptr_t)is->video; al_emit_user_event(&is->video->es, &event, NULL); } return t; }
int decode_thread(void *arg) { VideoState *is = (VideoState *)arg; AVFormatContext *pFormatCtx; AVPacket pkt1, *packet = &pkt1; int video_index = -1; int audio_index = -1; int i; is->videoStream=-1; is->audioStream=-1; global_video_state = is; // will interrupt blocking functions if we quit! url_set_interrupt_cb(decode_interrupt_cb); // Open video file if(av_open_input_file(&pFormatCtx, is->filename, NULL, 0, NULL)!=0) return -1; // Couldn't open file is->pFormatCtx = pFormatCtx; // Retrieve stream information if(av_find_stream_info(pFormatCtx)<0) return -1; // Couldn't find stream information // Dump information about file onto standard error dump_format(pFormatCtx, 0, is->filename, 0); // Find the first video stream for(i=0; i<pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO && video_index < 0) { video_index=i; } if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO && audio_index < 0) { audio_index=i; } } if(audio_index >= 0) { stream_component_open(is, audio_index); } if(video_index >= 0) { stream_component_open(is, video_index); } if(is->videoStream < 0 || is->audioStream < 0) { fprintf(stderr, "%s: could not open codecs\n", is->filename); goto fail; } // main decode loop for(;;) { if(is->quit) { break; } // seek stuff goes here if(is->seek_req) { int stream_index= -1; int64_t seek_target = is->seek_pos; if (is->videoStream >= 0) stream_index = is->videoStream; else if(is->audioStream >= 0) stream_index = is->audioStream; if(stream_index>=0){ seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, pFormatCtx->streams[stream_index]->time_base); } if(!av_seek_frame(is->pFormatCtx, stream_index, seek_target, is->seek_flags)) { fprintf(stderr, "%s: error while seeking\n", is->pFormatCtx->filename); } else { if(is->audioStream >= 0) { packet_queue_flush(&is->audioq); packet_queue_put(&is->audioq, &flush_pkt); } if(is->videoStream >= 0) { packet_queue_flush(&is->videoq); packet_queue_put(&is->videoq, &flush_pkt); } } is->seek_req = 0; } if(is->audioq.size > MAX_AUDIOQ_SIZE || is->videoq.size > MAX_VIDEOQ_SIZE) { SDL_Delay(10); continue; } if(av_read_frame(is->pFormatCtx, packet) < 0) { if(url_ferror(&pFormatCtx->pb) == 0) { SDL_Delay(100); /* no error; wait for user input */ continue; } else { break; } } // Is this a packet from the video stream? if(packet->stream_index == is->videoStream) { packet_queue_put(&is->videoq, packet); } else if(packet->stream_index == is->audioStream) { packet_queue_put(&is->audioq, packet); } else { av_free_packet(packet); } } /* all done - wait for it */ while(!is->quit) { SDL_Delay(100); } fail: { SDL_Event event; event.type = FF_QUIT_EVENT; event.user.data1 = is; SDL_PushEvent(&event); } return 0; }
/* * Export data out of GPDB. */ Datum gphdfsprotocol_export(PG_FUNCTION_ARGS) { URL_FILE *myData; char *data; int datlen; size_t wrote = 0; static char ebuf[512] = {0}; int ebuflen = 512; /* Must be called via the external table format manager */ if (!CALLED_AS_EXTPROTOCOL(fcinfo)) elog(ERROR, "cannot execute gphdfsprotocol_export outside protocol manager"); /* Get our internal description of the protocol */ myData = (URL_FILE *) EXTPROTOCOL_GET_USER_CTX(fcinfo); /* ======================================================================= * DO CLOSE * ======================================================================= */ if (EXTPROTOCOL_IS_LAST_CALL(fcinfo)) { if (myData) url_fclose(myData, true, "gphdfs protocol"); PG_RETURN_INT32(0); } /* ======================================================================= * DO OPEN * ======================================================================= */ if (myData == NULL) { myData = gphdfs_fopen(fcinfo, true); EXTPROTOCOL_SET_USER_CTX(fcinfo, myData); /* add schema info to pipe */ StringInfo schema_data = makeStringInfo(); Relation relation = FORMATTER_GET_RELATION(fcinfo); ExtTableEntry *exttbl = GetExtTableEntry(relation->rd_id); if (fmttype_is_avro(exttbl->fmtcode) || fmttype_is_parquet(exttbl->fmtcode) ) { int relNameLen = strlen(relation->rd_rel->relname.data); appendIntToBuffer(schema_data, relNameLen); appendBinaryStringInfo(schema_data, relation->rd_rel->relname.data, relNameLen); int ncolumns = relation->rd_att->natts; appendIntToBuffer(schema_data, ncolumns); int i = 0; for (; i< ncolumns; i++) { Oid type = relation->rd_att->attrs[i]->atttypid; /* add attname,atttypid,attnotnull,attndims to schema_data filed */ int attNameLen = strlen(relation->rd_att->attrs[i]->attname.data); appendIntToBuffer(schema_data, attNameLen); appendBinaryStringInfo(schema_data, relation->rd_att->attrs[i]->attname.data, attNameLen); appendIntToBuffer(schema_data, type); bool notNull = relation->rd_att->attrs[i]->attnotnull; appendInt1ToBuffer(schema_data, notNull?1:0); appendIntToBuffer(schema_data, relation->rd_att->attrs[i]->attndims); /* add type delimiter, for udt, it can be anychar */ char delim = 0; int16 typlen; bool typbyval; char typalien; Oid typioparam; Oid func; get_type_io_data(type, IOFunc_input, &typlen, &typbyval, &typalien, &delim, &typioparam, &func); appendInt1ToBuffer(schema_data, delim); } StringInfo schema_head = makeStringInfo(); appendIntToBuffer(schema_head, schema_data->len + 2); appendInt2ToBuffer(schema_head, 2); url_execute_fwrite(schema_head->data, schema_head->len, myData, NULL); url_execute_fwrite(schema_data->data, schema_data->len, myData, NULL); pfree(schema_head->data); pfree(schema_data->data); } } /* ======================================================================= * DO THE EXPORT * ======================================================================= */ data = EXTPROTOCOL_GET_DATABUF(fcinfo); datlen = EXTPROTOCOL_GET_DATALEN(fcinfo); if (datlen > 0) wrote = url_execute_fwrite(data, datlen, myData, NULL); if (url_ferror(myData, wrote, ebuf, ebuflen)) { ereport(ERROR, (errcode_for_file_access(), strlen(ebuf) > 0 ? errmsg("could not write to external resource:\n%s",ebuf) : errmsg("could not write to external resource: %m"))); } PG_RETURN_INT32((int)wrote); }
int slimaudio_decoder_aac_process(slimaudio_t *audio) { char streamformat[16]; int out_size; int len = 0; int iRC; u8_t *outbuf; u8_t *inbuf; /* It is not really correct to assume that all MP4 files (which were not * otherwise recognized as ALAC or MOV by the scanner) are AAC, but that * is the current server side status. * * Container type and bitstream format: * * '1' (adif), * '2' (adts), * '3' (latm within loas), * '4' (rawpkts), * '5' (mp4ff), * '6' (latm within rawpkts) * * This is a hack that assumes: * (1) If the original content-type of the track is MP4 or SLS then we * are streaming an MP4 file (without any transcoding); * (2) All other AAC streams will be adts. * * So the server will only set aac_format to '2' or '5'. */ DEBUGF ("aac: decoder_format:%d '%c'\n", audio->aac_format, audio->aac_format); int audioStream = 0; /* Always zero for aac decoder */ switch ( audio->aac_format ) { case '2': strncpy ( streamformat, "aac", sizeof (streamformat) ); break; case '5': strncpy ( streamformat, "m4a", sizeof (streamformat) ); break; default: fprintf (stderr, "aac: unknown container type: %c\n" ,audio->aac_format ); return -1; } DEBUGF ("aac: play audioStream: %d\n", audioStream); inbuf = av_malloc(AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE); if ( !inbuf ) { DEBUGF("aac: inbuf alloc failed.\n"); return -1; } AVIOContext *AVIOCtx; AVIOCtx = avio_alloc_context(inbuf, AUDIO_CHUNK_SIZE, 0, audio, av_read_data, NULL, NULL); if ( AVIOCtx == NULL ) { DEBUGF("aac: avio_alloc_context failed.\n"); return -1; } else { AVIOCtx->is_streamed = 1; } AVInputFormat* pAVInputFormat = av_find_input_format(streamformat); if( !pAVInputFormat ) { DEBUGF("aac: probe failed\n"); return -1; } else { DEBUGF("aac: probe ok name:%s lname:%s\n", pAVInputFormat->name, pAVInputFormat->long_name); } AVFormatContext *pFormatCtx; pFormatCtx = avformat_alloc_context(); if( pFormatCtx == NULL ) { DEBUGF("aac: avformat_alloc_context failed.\n"); } else { pFormatCtx->pb = AVIOCtx; } AVCodecContext *pCodecCtx; iRC = avformat_open_input(&pFormatCtx, "", pAVInputFormat, NULL); if (iRC < 0) { DEBUGF("aac: input stream open failed:%d\n", iRC); return -1; } else { iRC = av_find_stream_info(pFormatCtx); if ( iRC < 0 ) { DEBUGF("aac: find stream info failed:%d\n", iRC); return -1; } else { if ( pFormatCtx->nb_streams < audioStream ) { DEBUGF("aac: invalid stream.\n"); return -1; } if ( pFormatCtx->streams[audioStream]->codec->codec_type != CODEC_TYPE_AUDIO ) { DEBUGF("aac: stream: %d is not audio.\n", audioStream ); return -1; } else { pCodecCtx = pFormatCtx->streams[audioStream]->codec; } } } AVCodec *pCodec; /* Find the WMA audio decoder */ pCodec = avcodec_find_decoder(pCodecCtx->codec_id); if ( !pCodec ) { DEBUGF("aac: codec not found.\n"); return -1; } /* Open codec */ iRC = avcodec_open(pCodecCtx, pCodec); if ( iRC < 0) { DEBUGF("aac: could not open codec:%d\n", iRC); return -1; } outbuf = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); if ( !outbuf ) { DEBUGF("aac: outbuf alloc failed.\n"); return -1; } bool eos = false; AVPacket avpkt; while ( ! eos ) { iRC = av_read_frame (pFormatCtx, &avpkt); /* Some decoders fail to read the last packet so additional handling is required */ if (iRC < 0) { DEBUGF("aac: av_read_frame error: %d\n", iRC); if ( (iRC == AVERROR_EOF) ) { DEBUGF("aac: AVERROR_EOF\n"); eos=true; } if ( pFormatCtx->pb->eof_reached ) { DEBUGF("aac: url_feof\n"); eos=true; } if ( url_ferror(pFormatCtx->pb) ) { DEBUGF("aac: url_ferror\n"); #if 0 break; #endif } } out_size = AVCODEC_MAX_AUDIO_FRAME_SIZE; len = avcodec_decode_audio3(pCodecCtx, (int16_t *)outbuf, &out_size, &avpkt); if (len < 0) { DEBUGF("aac: no audio to decode\n"); //av_free_packet (&avpkt); //break; } if (out_size > 0) { /* if a frame has been decoded, output it */ slimaudio_buffer_write(audio->output_buffer, (char*)outbuf, out_size); } av_free_packet (&avpkt); } if ( inbuf != NULL ) av_free(inbuf); if ( outbuf != NULL ) av_free(outbuf); DEBUGF ("aac: avcodec_close\n"); avcodec_close(pCodecCtx); /* Close the stream */ DEBUGF ("aac: av_close_input_stream\n"); av_close_input_stream(pFormatCtx); return 0; }