static int Process(amrwb* p, const packet* Packet, const flowstate* State) { int Size; if (Packet) { if (Packet->RefTime >= 0) p->Codec.Packet.RefTime = Packet->RefTime; // add new packet to buffer BufferPack(&p->Buffer,0); BufferWrite(&p->Buffer,Packet->Data[0],Packet->Length,256); } else p->Codec.Packet.RefTime = TIME_UNKNOWN; if (p->Buffer.WritePos - p->Buffer.ReadPos < 1) return ERR_NEED_MORE_DATA; if (p->Buffer.Data[p->Buffer.ReadPos] == '#' && p->Buffer.WritePos - p->Buffer.ReadPos > 9 && memcmp(p->Buffer.Data+p->Buffer.ReadPos,"#!AMR-WB\n",9)==0) p->Buffer.ReadPos += 9; Size = block_size[(p->Buffer.Data[p->Buffer.ReadPos] >> 3) & 0xF]; if (p->Buffer.WritePos - p->Buffer.ReadPos < Size) return ERR_NEED_MORE_DATA; D_IF_decode(p->Decoder, p->Buffer.Data+p->Buffer.ReadPos, p->Synth, _good_frame); p->Buffer.ReadPos += Size; p->Codec.Packet.Length = sizeof(p->Synth); p->Codec.Packet.Data[0] = p->Synth; return ERR_NONE; }
static int Process( ffmpeg_video* p, const packet* Packet, const flowstate* State ) { int Picture; int Len; if (Packet) { if (State->DropLevel) { if (State->DropLevel>1) { p->SkipToKey = 1; p->DropToKey = 1; p->Dropping = 1; p->Context->hurry_up = 5; } else p->Context->hurry_up = 1; if (!SupportDrop(p)) p->Context->hurry_up = 0; } else p->Context->hurry_up = 0; if (!Packet->Key && p->DropToKey) { if (p->Dropping) { flowstate DropState; DropState.CurrTime = TIME_UNKNOWN; DropState.DropLevel = 1; p->Codec.Out.Process(p->Codec.Out.Pin.Node,NULL,&DropState); } if (SupportDrop(p)) avcodec_flush_buffers(p->Context); return ERR_DROPPING; } if (p->DropToKey) p->DropToKey = 0; if (Packet->RefTime >= 0) p->Codec.Packet.RefTime = Packet->RefTime; BufferPack(&p->Buffer,0); if(p->Codec.In.Pin.Node->Class==FOURCC('R','V','_','0')&& (p->Codec.In.Format.Format.Video.Pixel.FourCC == FOURCC('R','V','1','0')|| p->Codec.In.Format.Format.Video.Pixel.FourCC == FOURCC('R','V','2','0')|| p->Codec.In.Format.Format.Video.Pixel.FourCC == FOURCC('R','V','3','0')|| p->Codec.In.Format.Format.Video.Pixel.FourCC == FOURCC('R','V','4','0'))) { int32_t ret = rm_assemble_video_frame(p,Packet); if(ret != ERR_NONE) return ERR_NEED_MORE_DATA; } else { BufferWrite(&p->Buffer,Packet->Data[0],Packet->Length,2048); } } else { if (p->FrameTime<0) p->Codec.Packet.RefTime = TIME_UNKNOWN; else if (!State) p->Codec.Packet.RefTime += p->FrameTime; if (!State && p->Buffer.WritePos == p->Buffer.ReadPos) return ERR_NEED_MORE_DATA; } if (p->SkipToKey) p->Picture->pict_type = 0; Len = avcodec_decode_video(p->Context, p->Picture, &Picture, p->Buffer.Data + p->Buffer.ReadPos, p->Buffer.WritePos - p->Buffer.ReadPos); if (Len < 0) { BufferDrop(&p->Buffer); return ERR_INVALID_DATA; } p->Buffer.ReadPos += Len; if (!Picture) { if (p->SkipToKey>1 && p->Picture->pict_type) --p->SkipToKey; return ERR_NEED_MORE_DATA; } if (p->SkipToKey>0) { if ((!p->Picture->key_frame && p->Picture->pict_type) || p->SkipToKey>1) { if (p->SkipToKey>1) --p->SkipToKey; if (p->Dropping) { flowstate DropState; DropState.CurrTime = TIME_UNKNOWN; DropState.DropLevel = 1; p->Codec.Out.Process(p->Codec.Out.Pin.Node,NULL,&DropState); } return ERR_DROPPING; } p->SkipToKey = 0; } if (p->Context->pix_fmt != p->PixelFormat || p->Context->sample_aspect_ratio.num != p->Aspect.num || p->Context->sample_aspect_ratio.den != p->Aspect.den || p->Context->width != p->Codec.Out.Format.Format.Video.Width || p->Context->height != p->Codec.Out.Format.Format.Video.Height || p->Picture->linesize[0] != p->Codec.Out.Format.Format.Video.Pitch) { if (!BuildOutputFormat(p)) return ERR_INVALID_DATA; ConnectionUpdate(&p->Codec.Node,CODEC_OUTPUT,p->Codec.Out.Pin.Node,p->Codec.Out.Pin.No); } p->Codec.Packet.Data[0] = p->Picture->data[0]; p->Codec.Packet.Data[1] = p->Picture->data[1]; p->Codec.Packet.Data[2] = p->Picture->data[2]; return ERR_NONE; }
static int Process(codecidct* p, const packet* Packet, const flowstate* State) { int Result; idct* IDCT = p->IDCT.Ptr; if (p->IDCT.Count<=0 && p->IDCT.Width>0 && p->IDCT.Height>0) return ERR_INVALID_DATA; p->State.CurrTime = State->CurrTime; if (State->DropLevel > 1) Discontinuity(p); if (p->Show>=0) // pending frame? { Result = IDCT->Send(IDCT,p->RefTime,&p->State); if (Result == ERR_BUFFER_FULL) return Result; p->Show = -1; } if (!Packet) // end of file or dropped return IDCT->Null(IDCT,State,0); if ((p->In.Format.Video.Pixel.Flags & PF_FRAGMENTED) && p->FindNext) { bool_t Processed = 0; if (p->RefTime >= 0) p->RefTime += p->FrameTime; for (;;) { if (!p->FindNext(p)) { if (Processed) { Result = ERR_NEED_MORE_DATA; break; } p->FrameEnd -= p->Buffer.ReadPos; BufferPack(&p->Buffer,0); BufferWrite(&p->Buffer,Packet->Data[0],Packet->Length,32768); Processed = 1; if (Packet->RefTime >= 0) { p->RefTime = Packet->RefTime; // if (p->IDCT.Count >= 3 && p->FrameTime>0 && p->RefTime >= p->FrameTime) // p->RefTime -= p->FrameTime; } } else { p->State.DropLevel = p->RefTime >= 0 && State->CurrTime >= 0 && p->RefTime < (State->CurrTime - p->DropTolerance); if (State->DropLevel > 1) { p->IDCT.Ptr->Null(p->IDCT.Ptr,NULL,0); Result = ERR_NONE; } else Result = p->Frame(p,p->Buffer.Data+p->Buffer.ReadPos,p->FrameEnd-p->Buffer.ReadPos); p->Buffer.ReadPos = p->FrameEnd; if (Result==ERR_NONE && p->Show>=0) { if (!Processed) Result = ERR_BUFFER_FULL; // resend packet next time break; } } } } else { if (State->DropLevel > 1) { p->IDCT.Ptr->Null(p->IDCT.Ptr,NULL,0); Result = ERR_NONE; } else { p->State.DropLevel = State->DropLevel; p->RefTime = Packet->RefTime; // if (p->IDCT.Count >= 3 && p->FrameTime>0 && p->RefTime >= p->FrameTime) // p->RefTime -= p->FrameTime; Result = p->Frame(p,Packet->Data[0],Packet->Length); } } if (p->Show>=0 && IDCT->Send(IDCT,p->RefTime,&p->State) != ERR_BUFFER_FULL) p->Show = -1; return Result; }