static void dec_process(MSFilter *f){ DecData *d=(DecData*)f->data; mblk_t *im; MSQueue nalus; AVFrame orig; ms_queue_init(&nalus); while((im=ms_queue_get(f->inputs[0]))!=NULL){ /*push the sps/pps given in sprop-parameter-sets if any*/ if (d->packet_num==0 && d->sps && d->pps){ mblk_set_timestamp_info(d->sps,mblk_get_timestamp_info(im)); mblk_set_timestamp_info(d->pps,mblk_get_timestamp_info(im)); rfc3984_unpack(&d->unpacker,d->sps,&nalus); rfc3984_unpack(&d->unpacker,d->pps,&nalus); d->sps=NULL; d->pps=NULL; } rfc3984_unpack(&d->unpacker,im,&nalus); if (!ms_queue_empty(&nalus)){ int size; uint8_t *p,*end; bool_t need_reinit=FALSE; size=nalusToFrame(d,&nalus,&need_reinit); if (need_reinit) dec_reinit(d); p=d->bitstream; end=d->bitstream+size; while (end-p>0) { int len; int got_picture=0; AVPacket pkt; avcodec_get_frame_defaults(&orig); av_init_packet(&pkt); pkt.data = p; pkt.size = end-p; len=avcodec_decode_video2(&d->av_context,&orig,&got_picture,&pkt); if (len<=0) { ms_warning("ms_AVdecoder_process: error %i.",len); if ((f->ticker->time - d->last_error_reported_time)>5000 || d->last_error_reported_time==0) { d->last_error_reported_time=f->ticker->time; ms_filter_notify_no_arg(f,MS_VIDEO_DECODER_DECODING_ERRORS); } break; } if (got_picture) { ms_queue_put(f->outputs[0],get_as_yuvmsg(f,d,&orig)); if (!d->first_image_decoded) { ms_filter_notify_no_arg(f,MS_VIDEO_DECODER_FIRST_IMAGE_DECODED); d->first_image_decoded = TRUE; } } p+=len; } } d->packet_num++; } }
static void dtmfgen_process(MSFilter *f){ mblk_t *m; DtmfGenState *s=(DtmfGenState*)f->data; int nsamples; ms_filter_lock(f); if (ms_queue_empty(f->inputs[0])){ s->nosamples_time+=f->ticker->interval; if ((s->playing || s->silence!=0) && s->nosamples_time>NO_SAMPLES_THRESHOLD){ /*after 100 ms without stream we decide to generate our own sample instead of writing into incoming stream samples*/ nsamples=(f->ticker->interval*s->rate)/1000; m=allocb(nsamples*s->nchannels*2,0); if (s->silence==0){ if (s->pos==0){ MSDtmfGenEvent ev; ev.tone_start_time=f->ticker->time; strncpy(ev.tone_name,s->current_tone.tone_name,sizeof(ev.tone_name)); ms_filter_notify(f,MS_DTMF_GEN_EVENT,&ev); } write_dtmf(s,(int16_t*)m->b_wptr,nsamples); }else{ memset(m->b_wptr,0,nsamples*s->nchannels*2); s->silence-=f->ticker->interval; if (s->silence<0) s->silence=0; } m->b_wptr+=nsamples*s->nchannels*2; ms_queue_put(f->outputs[0],m); } }else{ s->nosamples_time=0; if (s->current_tone.interval > 0) { s->silence-=f->ticker->interval; if (s->silence<0) s->silence=0; } else s->silence=0; while((m=ms_queue_get(f->inputs[0]))!=NULL){ if (s->playing && s->silence==0){ if (s->pos==0){ MSDtmfGenEvent ev; ev.tone_start_time=f->ticker->time; strncpy(ev.tone_name,s->current_tone.tone_name,sizeof(ev.tone_name)); ms_filter_notify(f,MS_DTMF_GEN_EVENT,&ev); } nsamples=(int)(m->b_wptr-m->b_rptr)/(2*s->nchannels); write_dtmf(s, (int16_t*)m->b_rptr,nsamples); } ms_queue_put(f->outputs[0],m); } } ms_filter_unlock(f); }
/*process NALUs and pack them into rtp payloads */ void rfc3984_pack(Rfc3984Context *ctx, MSQueue *naluq, MSQueue *rtpq, uint32_t ts){ mblk_t *m,*prevm=NULL; int prevsz=0,sz; bool_t end; while((m=ms_queue_get(naluq))!=NULL){ end=ms_queue_empty(naluq); sz=m->b_wptr-m->b_rptr; if (prevm!=NULL){ if ((prevsz+sz)<(ctx->maxsz-2)){ prevm=concat_nalus(prevm,m); m=NULL; prevsz+=sz+2;/*+2 for the stapa size field*/ continue; }else{ /*send prevm packet: either single nal or STAP-A*/ if (prevm->b_cont!=NULL){ ms_debug("Sending STAP-A"); }else ms_debug("Sending previous msg as single NAL"); send_packet(rtpq,ts,prevm,FALSE); prevm=NULL; prevsz=0; } } if (sz<(ctx->maxsz/2)){ /*try to aggregate it with next packet*/ prevm=m; prevsz=sz+3; /*STAP-A header + size*/ m=NULL; }else{ /*send as single nal or FU-A*/ if (sz>ctx->maxsz){ ms_debug("Sending FU-A packets"); frag_nalu_and_send(rtpq,ts,m,end, ctx->maxsz); }else{ ms_debug("Sending Single NAL"); send_packet(rtpq,ts,m,end); } } } if (prevm){ ms_debug("Sending Single NAL (2)"); send_packet(rtpq,ts,prevm,TRUE); } }
/*process incoming rtp data and output NALUs, whenever possible*/ void rfc3984_unpack(Rfc3984Context *ctx, mblk_t *im, MSQueue *out){ uint8_t type=nal_header_get_type(im->b_rptr); uint8_t *p; if (im->b_cont) msgpullup(im,-1); if (type==TYPE_STAP_A){ ms_message("Receiving STAP-A"); /*split into nalus*/ uint16_t sz; uint8_t *buf=(uint8_t*)&sz; mblk_t *nal; for(p=im->b_rptr+1;p<im->b_wptr;){ buf[0]=p[0]; buf[1]=p[1]; sz=ntohs(sz); nal=dupb(im); p+=2; nal->b_rptr=p; p+=sz; nal->b_wptr=p; if (p>im->b_wptr){ ms_error("Malformed STAP-A packet"); freemsg(nal); break; } ms_queue_put(&ctx->q,nal); } freemsg(im); }else if (type==TYPE_FU_A){ ms_message("Receiving FU-A"); mblk_t *o=aggregate_fua(ctx,im); if (o) ms_queue_put(&ctx->q,o); }else{ /*single nal unit*/ ms_message("Receiving single NAL"); ms_queue_put(&ctx->q,im); } if (mblk_get_marker_info(im)){ /*end of frame, output everything*/ while(!ms_queue_empty(&ctx->q)){ ms_queue_put(out,ms_queue_get(&ctx->q)); } } }
static void dtmfgen_process(MSFilter *f){ mblk_t *m; DtmfGenState *s=(DtmfGenState*)f->data; int nsamples; ms_filter_lock(f); if (ms_queue_empty(f->inputs[0])){ s->nosamples_time+=f->ticker->interval; if ((s->dtmf!=0 || s->silence!=0) && s->nosamples_time>NO_SAMPLES_THRESHOLD){ /*after 100 ms without stream we decide to generate our own sample instead of writing into incoming stream samples*/ nsamples=(f->ticker->interval*s->rate)/1000; m=allocb(nsamples*2,0); if (s->silence==0){ write_dtmf(s,(int16_t*)m->b_wptr,nsamples); }else{ memset(m->b_wptr,0,nsamples*2); s->silence-=f->ticker->interval; if (s->silence<0) s->silence=0; } m->b_wptr+=nsamples*2; ms_queue_put(f->outputs[0],m); } }else{ s->nosamples_time=0; s->silence=0; while((m=ms_queue_get(f->inputs[0]))!=NULL){ if (s->dtmf!=0){ nsamples=(m->b_wptr-m->b_rptr)/2; write_dtmf(s, (int16_t*)m->b_rptr,nsamples); } ms_queue_put(f->outputs[0],m); } } ms_filter_unlock(f); }
static void dec_process(MSFilter *f){ DecData *d=(DecData*)f->data; MSPicture pic = {0}; mblk_t *im,*om = NULL; ssize_t oBufidx = -1; size_t bufsize; bool_t need_reinit=FALSE; bool_t request_pli=FALSE; MSQueue nalus; AMediaCodecBufferInfo info; ms_queue_init(&nalus); while((im=ms_queue_get(f->inputs[0]))!=NULL){ if (d->packet_num==0 && d->sps && d->pps){ mblk_set_timestamp_info(d->sps,mblk_get_timestamp_info(im)); mblk_set_timestamp_info(d->pps,mblk_get_timestamp_info(im)); rfc3984_unpack(&d->unpacker, d->sps, &nalus); rfc3984_unpack(&d->unpacker, d->pps, &nalus); d->sps=NULL; d->pps=NULL; } if(rfc3984_unpack(&d->unpacker,im,&nalus) <0){ request_pli=TRUE; } if (!ms_queue_empty(&nalus)){ int size; uint8_t *buf=NULL; ssize_t iBufidx; size=nalusToFrame(d,&nalus,&need_reinit); if (need_reinit) { //In case of rotation, the decoder needs to flushed in order to restart with the new video size AMediaCodec_flush(d->codec); d->first_buffer_queued = FALSE; } /*First put our H264 bitstream into the decoder*/ iBufidx = AMediaCodec_dequeueInputBuffer(d->codec, TIMEOUT_US); if (iBufidx >= 0) { buf = AMediaCodec_getInputBuffer(d->codec, iBufidx, &bufsize); if(buf == NULL) { ms_error("MSMediaCodecH264Dec: AMediaCodec_getInputBuffer() returned NULL"); break; } if((size_t)size > bufsize) { ms_error("Cannot copy the bitstream into the input buffer size : %i and bufsize %i",size,(int) bufsize); break; } else { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); memcpy(buf,d->bitstream,(size_t)size); AMediaCodec_queueInputBuffer(d->codec, iBufidx, 0, (size_t)size, (ts.tv_nsec/1000) + 10000LL, 0); d->first_buffer_queued = TRUE; } }else if (iBufidx == AMEDIA_ERROR_UNKNOWN){ ms_error("MSMediaCodecH264Dec: AMediaCodec_dequeueInputBuffer() had an exception"); } } d->packet_num++; if (d->sps && d->pps) request_pli = FALSE; else request_pli = TRUE; } /*secondly try to get decoded frames from the decoder, this is performed every tick*/ while (d->first_buffer_queued && (oBufidx = AMediaCodec_dequeueOutputBuffer(d->codec, &info, TIMEOUT_US)) >= 0){ AMediaFormat *format; int width = 0, height = 0, color = 0; uint8_t *buf = AMediaCodec_getOutputBuffer(d->codec, oBufidx, &bufsize); if(buf == NULL){ ms_filter_notify_no_arg(f,MS_VIDEO_DECODER_DECODING_ERRORS); ms_error("MSMediaCodecH264Dec: AMediaCodec_getOutputBuffer() returned NULL"); } format = AMediaCodec_getOutputFormat(d->codec); if(format != NULL){ AMediaFormat_getInt32(format, "width", &width); AMediaFormat_getInt32(format, "height", &height); AMediaFormat_getInt32(format, "color-format", &color); d->vsize.width=width; d->vsize.height=height; AMediaFormat_delete(format); } if(buf != NULL && d->sps && d->pps){ /*some decoders output garbage while no sps or pps have been received yet !*/ if(width != 0 && height != 0 ){ if(color == 19) { //YUV int ysize = width*height; int usize = ysize/4; om = ms_yuv_buf_allocator_get(d->buf_allocator,&pic,width,height); memcpy(pic.planes[0],buf,ysize); memcpy(pic.planes[1],buf+ysize,usize); memcpy(pic.planes[2],buf+ysize+usize,usize); } else { uint8_t* cbcr_src = (uint8_t*) (buf + width * height); om = copy_ycbcrbiplanar_to_true_yuv_with_rotation_and_down_scale_by_2(d->buf_allocator, buf, cbcr_src, 0, width, height, width, width, TRUE, FALSE); } if (!d->first_image_decoded) { ms_message("First frame decoded %ix%i",width,height); d->first_image_decoded = true; ms_filter_notify_no_arg(f, MS_VIDEO_DECODER_FIRST_IMAGE_DECODED); } ms_queue_put(f->outputs[0], om); }else{ ms_error("MSMediaCodecH264Dec: width and height are not known !"); } } AMediaCodec_releaseOutputBuffer(d->codec, oBufidx, FALSE); } if (oBufidx == AMEDIA_ERROR_UNKNOWN){ ms_error("MSMediaCodecH264Dec: AMediaCodec_dequeueOutputBuffer() had an exception"); } if (d->avpf_enabled && request_pli) { ms_filter_notify_no_arg(f, MS_VIDEO_DECODER_SEND_PLI); } ms_queue_flush(f->inputs[0]); }
void MSOpenH264Decoder::feed() { if (!isInitialized()) { ms_error("MSOpenH264Decoder::feed(): not initialized"); ms_queue_flush(mFilter->inputs[0]); return; } MSQueue nalus; ms_queue_init(&nalus); mblk_t *im; while ((im = ms_queue_get(mFilter->inputs[0])) != NULL) { if ((getIDRPicId() == 0) && (mSPS != 0) && (mPPS != 0)) { // Push the sps/pps given in sprop-parameter-sets if any mblk_set_timestamp_info(mSPS, mblk_get_timestamp_info(im)); mblk_set_timestamp_info(mPPS, mblk_get_timestamp_info(im)); rfc3984_unpack(mUnpacker, mSPS, &nalus); rfc3984_unpack(mUnpacker, mPPS, &nalus); mSPS = 0; mPPS = 0; } rfc3984_unpack(mUnpacker, im, &nalus); if (!ms_queue_empty(&nalus)) { void * pData[3] = { 0 }; SBufferInfo sDstBufInfo = { 0 }; int len = nalusToFrame(&nalus); DECODING_STATE state = mDecoder->DecodeFrame2(mBitstream, len, (uint8_t**)pData, &sDstBufInfo); if (state != dsErrorFree) { ms_error("OpenH264 decoder: DecodeFrame2 failed: 0x%x", state); if (((mFilter->ticker->time - mLastErrorReportTime) > 5000) || (mLastErrorReportTime == 0)) { mLastErrorReportTime = mFilter->ticker->time; ms_filter_notify_no_arg(mFilter, MS_VIDEO_DECODER_DECODING_ERRORS); } } if (sDstBufInfo.iBufferStatus == 1) { uint8_t * pDst[3] = { 0 }; pDst[0] = (uint8_t *)pData[0]; pDst[1] = (uint8_t *)pData[1]; pDst[2] = (uint8_t *)pData[2]; // Update video size and (re)allocate YUV buffer if needed if ((mWidth != sDstBufInfo.UsrData.sSystemBuffer.iWidth) || (mHeight != sDstBufInfo.UsrData.sSystemBuffer.iHeight)) { if (mYUVMsg) { freemsg(mYUVMsg); } mWidth = sDstBufInfo.UsrData.sSystemBuffer.iWidth; mHeight = sDstBufInfo.UsrData.sSystemBuffer.iHeight; mYUVMsg = ms_yuv_buf_alloc(&mOutbuf, mWidth, mHeight); ms_filter_notify_no_arg(mFilter,MS_FILTER_OUTPUT_FMT_CHANGED); } // Scale/copy frame to destination mblk_t for (int i = 0; i < 3; i++) { uint8_t *dst = mOutbuf.planes[i]; uint8_t *src = pDst[i]; int h = mHeight >> (( i > 0) ? 1 : 0); for(int j = 0; j < h; j++) { memcpy(dst, src, mOutbuf.strides[i]); dst += mOutbuf.strides[i]; src += sDstBufInfo.UsrData.sSystemBuffer.iStride[(i == 0) ? 0 : 1]; } } ms_queue_put(mFilter->outputs[0], dupmsg(mYUVMsg)); // Update average FPS if (ms_average_fps_update(&mFPS, mFilter->ticker->time)) { ms_message("OpenH264 decoder: Frame size: %dx%d", mWidth, mHeight); } // Notify first decoded image if (!mFirstImageDecoded) { mFirstImageDecoded = true; ms_filter_notify_no_arg(mFilter, MS_VIDEO_DECODER_FIRST_IMAGE_DECODED); } #if MSOPENH264_DEBUG ms_message("OpenH264 decoder: IDR pic id: %d, Frame num: %d, Temporal id: %d, VCL NAL: %d", getIDRPicId(), getFrameNum(), getTemporalId(), getVCLNal()); #endif } }
static void dec_process(MSFilter *f){ DecData *d=(DecData*)f->data; MSPicture pic = {0}; mblk_t *im,*om = NULL; bool_t need_reinit=FALSE; bool_t request_pli=FALSE; MSQueue nalus; ms_queue_init(&nalus); while((im=ms_queue_get(f->inputs[0]))!=NULL){ if (d->packet_num==0 && d->sps && d->pps){ mblk_set_timestamp_info(d->sps,mblk_get_timestamp_info(im)); mblk_set_timestamp_info(d->pps,mblk_get_timestamp_info(im)); rfc3984_unpack(&d->unpacker, d->sps, &nalus); rfc3984_unpack(&d->unpacker, d->pps, &nalus); d->sps=NULL; d->pps=NULL; } if(rfc3984_unpack(&d->unpacker,im,&nalus) <0){ request_pli=TRUE; } if (!ms_queue_empty(&nalus)){ AMediaCodecBufferInfo info; int size; int width = 0, height = 0, color = 0; uint8_t *buf=NULL; size_t bufsize; ssize_t iBufidx, oBufidx; size=nalusToFrame(d,&nalus,&need_reinit); if (need_reinit) { //In case of rotation, the decoder needs to flushed in order to restart with the new video size AMediaCodec_flush(d->codec); } iBufidx = AMediaCodec_dequeueInputBuffer(d->codec, TIMEOUT_US); if (iBufidx >= 0) { buf = AMediaCodec_getInputBuffer(d->codec, iBufidx, &bufsize); if(buf == NULL) { break; } if((size_t)size > bufsize) { ms_error("Cannot copy the bitstream into the input buffer size : %i and bufsize %i",size,(int) bufsize); } else { memcpy(buf,d->bitstream,(size_t)size); AMediaCodec_queueInputBuffer(d->codec, iBufidx, 0, (size_t)size, TIMEOUT_US, 0); } } oBufidx = AMediaCodec_dequeueOutputBuffer(d->codec, &info, TIMEOUT_US); if(oBufidx >= 0){ AMediaFormat *format; buf = AMediaCodec_getOutputBuffer(d->codec, oBufidx, &bufsize); if(buf == NULL){ ms_filter_notify_no_arg(f,MS_VIDEO_DECODER_DECODING_ERRORS); break; } format = AMediaCodec_getOutputFormat(d->codec); if(format != NULL){ AMediaFormat_getInt32(format, "width", &width); AMediaFormat_getInt32(format, "height", &height); AMediaFormat_getInt32(format, "color-format", &color); d->vsize.width=width; d->vsize.height=height; AMediaFormat_delete(format); } } if(buf != NULL){ //YUV if(width != 0 && height != 0 ){ if(color == 19) { int ysize = width*height; int usize = ysize/4; om=ms_yuv_buf_alloc(&pic,width,height); memcpy(pic.planes[0],buf,ysize); memcpy(pic.planes[1],buf+ysize,usize); memcpy(pic.planes[2],buf+ysize+usize,usize); } else { uint8_t* cbcr_src = (uint8_t*) (buf + width * height); om = copy_ycbcrbiplanar_to_true_yuv_with_rotation_and_down_scale_by_2(d->buf_allocator, buf, cbcr_src, 0, width, height, width, width, TRUE, FALSE); } if (!d->first_image_decoded) { ms_message("First frame decoded %ix%i",width,height); d->first_image_decoded = true; ms_filter_notify_no_arg(f, MS_VIDEO_DECODER_FIRST_IMAGE_DECODED); } ms_queue_put(f->outputs[0], om); } if(oBufidx > 0) { AMediaCodec_releaseOutputBuffer(d->codec, oBufidx, FALSE); } } } d->packet_num++; } if (d->avpf_enabled && request_pli) { ms_filter_notify_no_arg(f, MS_VIDEO_DECODER_SEND_PLI); } }