// MT safe int sio_readexpect(std::string &msg, const char * prefix, int * preflen, int timeout) { std::deque<std::string> rejected; // true timeout int64 stoptime = time64() + timeout*1000; int rv; while (stoptime > time64()) { int delta = (int)( (stoptime - time64()) / 1000 ); rv=sio_read(msg, (delta>0)?delta:0); if (rv <= 0) break; // error or timeout rv = sio_matchpref(msg.c_str(), prefix, preflen); if (rv > 0) break; // found it rejected.push_back(msg); //printf("rejected: [%s] (%d)\n", msg.c_str(), rejected.size()); }; // push back the unanswered messages... std::string tmp; while (!rejected.empty()) { tmp = rejected.back(); rejected.pop_back(); postponed.push_front(tmp); //printf("postponed: [%s] (%d)\n", tmp.c_str(), postponed.size()); }; //printf("MESSAGE [%s] is number %d in list of\n%s\n", msg.c_str(), rv, prefix); return rv; };
M3_DLL_EXPORT m3_time_t __cdecl Utime__time(m3_time_t* tloc) { #ifdef _TIME64_T time64_t b = tloc ? (time64_t)*tloc : 0; time64_t a = time64(tloc ? &b : 0); #else time_t b = tloc ? (time_t)*tloc : 0; time_t a = time(tloc ? &b : 0); #endif if (tloc) *tloc = b; return a; }
/** * @brief * * @return */ int H264Encoder::run() { // TODO - This section needs to be rewritten to read the configuration from the values saved // for the streams via the web gui AVDictionary *opts = NULL; //avSetH264Preset( &opts, "default" ); //avSetH264Profile( &opts, "main" ); //avDictSet( &opts, "level", "4.1" ); avSetH264Preset( &opts, "ultrafast" ); //avSetH264Profile( &opts, "baseline" ); avDictSet( &opts, "level", "31" ); avDictSet( &opts, "g", "24" ); //avDictSet( &opts, "b", (int)mBitRate ); //avDictSet( &opts, "bitrate", (int)mBitRate ); //avDictSet( &opts, "crf", "24" ); //avDictSet( &opts, "framerate", (double)mFrameRate ); //avDictSet( &opts, "fps", (double)mFrameRate ); //avDictSet( &opts, "r", (double)mFrameRate ); //avDictSet( &opts, "timebase", "1/90000" ); avDumpDict( opts ); // Make sure ffmpeg is compiled with libx264 support AVCodec *codec = avcodec_find_encoder( CODEC_ID_H264 ); if ( !codec ) Fatal( "Can't find encoder codec" ); mCodecContext = avcodec_alloc_context3( codec ); mCodecContext->width = mWidth; mCodecContext->height = mHeight; //mCodecContext->time_base = TimeBase( 1, 90000 ); mCodecContext->time_base = mFrameRate.timeBase(); mCodecContext->bit_rate = mBitRate; mCodecContext->pix_fmt = mPixelFormat; mCodecContext->gop_size = 24; //mCodecContext->max_b_frames = 1; Debug( 2, "Time base = %d/%d", mCodecContext->time_base.num, mCodecContext->time_base.den ); Debug( 2, "Pix fmt = %d", mCodecContext->pix_fmt ); /* open it */ if ( avcodec_open2( mCodecContext, codec, &opts ) < 0 ) Fatal( "Unable to open encoder codec" ); avDumpDict( opts ); AVFrame *inputFrame = avcodec_alloc_frame(); Info( "%s:Waiting", cidentity() ); if ( waitForProviders() ) { Info( "%s:Waited", cidentity() ); // Find the source codec context uint16_t inputWidth = videoProvider()->width(); uint16_t inputHeight = videoProvider()->height(); PixelFormat inputPixelFormat = videoProvider()->pixelFormat(); //FrameRate inputFrameRate = videoProvider()->frameRate(); //Info( "CONVERT: %d-%dx%d -> %d-%dx%d", //inputPixelFormat, inputWidth, inputHeight, //mPixelFormat, mWidth, mHeight //); // Make space for anything that is going to be output AVFrame *outputFrame = avcodec_alloc_frame(); ByteBuffer outputBuffer; outputBuffer.size( avpicture_get_size( mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height ) ); avpicture_fill( (AVPicture *)outputFrame, outputBuffer.data(), mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height ); // Prepare for image format and size conversions struct SwsContext *convertContext = sws_getContext( inputWidth, inputHeight, inputPixelFormat, mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL ); if ( !convertContext ) Fatal( "Unable to create conversion context for encoder" ); int outSize = 0; uint64_t timeInterval = mFrameRate.intervalUsec(); uint64_t currTime = time64(); uint64_t nextTime = currTime; //outputFrame->pts = currTime; outputFrame->pts = 0; uint32_t ptsInterval = 90000/mFrameRate.toInt(); //uint32_t ptsInterval = mFrameRate.intervalPTS( mCodecContext->time_base ); while ( !mStop ) { // Synchronise the output with the desired output frame rate while ( currTime < nextTime ) { currTime = time64(); usleep( 1000 ); } nextTime += timeInterval; FramePtr framePtr; mQueueMutex.lock(); if ( !mFrameQueue.empty() ) { if ( mInitialFrame.empty() || !mConsumers.empty() ) { FrameQueue::iterator iter = mFrameQueue.begin(); framePtr = *iter; } mFrameQueue.clear(); } mQueueMutex.unlock(); if ( framePtr.get() ) { const FeedFrame *frame = framePtr.get(); const VideoFrame *inputVideoFrame = dynamic_cast<const VideoFrame *>(frame); //Info( "Provider: %s, Source: %s, Frame: %d", inputVideoFrame->provider()->cidentity(), inputVideoFrame->originator()->cidentity(), inputVideoFrame->id() ); //Info( "PF:%d @ %dx%d", inputVideoFrame->pixelFormat(), inputVideoFrame->width(), inputVideoFrame->height() ); avpicture_fill( (AVPicture *)inputFrame, inputVideoFrame->buffer().data(), inputPixelFormat, inputWidth, inputHeight ); //outputFrame->pts = currTime; //Debug( 5, "PTS %jd", outputFrame->pts ); // Reformat the input frame to fit the desired output format //Info( "SCALE: %d -> %d", int(inputFrame->data[0])%16, int(outputFrame->data[0])%16 ); if ( sws_scale( convertContext, inputFrame->data, inputFrame->linesize, 0, inputHeight, outputFrame->data, outputFrame->linesize ) < 0 ) Fatal( "Unable to convert input frame (%d@%dx%d) to output frame (%d@%dx%d) at frame %ju", inputPixelFormat, inputWidth, inputHeight, mCodecContext->pix_fmt, mCodecContext->width, mCodecContext->height, mFrameCount ); // Encode the image outSize = avcodec_encode_video( mCodecContext, outputBuffer.data(), outputBuffer.capacity(), outputFrame ); Debug( 5, "Encoding reports %d bytes", outSize ); if ( outSize > 0 ) { //Info( "CPTS: %jd", mCodecContext->coded_frame->pts ); outputBuffer.size( outSize ); //Debug( 5, "PTS2 %jd", mCodecContext->coded_frame->pts ); if ( mInitialFrame.empty() ) { Debug( 3, "Looking for H.264 stream info" ); const uint8_t *startPos = outputBuffer.head(); startPos = h264StartCode( startPos, outputBuffer.tail() ); while ( startPos < outputBuffer.tail() ) { while( !*(startPos++) ) ; const uint8_t *nextStartPos = h264StartCode( startPos, outputBuffer.tail() ); int frameSize = nextStartPos-startPos; unsigned char type = startPos[0] & 0x1F; unsigned char nri = startPos[0] & 0x60; Debug( 1, "Type %d, NRI %d (%02x)", type, nri>>5, startPos[0] ); if ( type == NAL_SEI ) { // SEI mSei.assign( startPos, frameSize ); } else if ( type == NAL_SPS ) { // SPS Hexdump( 2, startPos, frameSize ); mSps.assign( startPos, frameSize ); if ( frameSize < 4 ) Panic( "H.264 NAL type 7 frame too short (%d bytes) to extract level/profile", frameSize ); mAvcLevel = startPos[3]; mAvcProfile = startPos[1]; Debug( 2, "Got AVC level %d, profile %d", mAvcLevel, mAvcProfile ); } else if ( type == NAL_PPS ) { // PPS Hexdump( 2, startPos, frameSize ); mPps.assign( startPos, frameSize ); } startPos = nextStartPos; } mInitialFrame = outputBuffer; //VideoFrame *outputVideoFrame = new VideoFrame( this, ++mFrameCount, mCodecContext->coded_frame->pts, mInitialFrame ); } else { //av_rescale_q(cocontext->coded_frame->pts, cocontext->time_base, videostm->time_base); VideoFrame *outputVideoFrame = new VideoFrame( this, ++mFrameCount, mCodecContext->coded_frame->pts, outputBuffer ); distributeFrame( FramePtr( outputVideoFrame ) ); } } outputFrame->pts += ptsInterval; ///< FIXME - This can't be right, but it works... }
uint64 time64wrap() throw(tcat::exception::base) { return time64(); }
/** * @brief * * @return */ int Mp4FileOutput::run() { //const int MAX_EVENT_HEAD_AGE = 2; ///< Number of seconds of video before event to save const int MAX_EVENT_TAIL_AGE = 3; ///< Number of seconds of video after event to save typedef enum { IDLE, PREALARM, ALARM, ALERT } AlarmState; if ( waitForProviders() ) { /* auto detect the output format from the name. default is mpeg. */ AVOutputFormat *outputFormat = av_guess_format( mExtension.c_str(), NULL, NULL ); if ( !outputFormat ) Fatal( "Could not deduce output format from '%s'", mExtension.c_str() ); //AVFormatContext *outputContext = openFile( outputFormat ); AVFormatContext *outputContext = NULL; double videoTimeOffset = 0.0L; uint64_t videoFrameCount = 0; AlarmState alarmState = IDLE; uint64_t alarmTime = 0; int eventCount = 0; while( !mStop ) { while( !mStop ) { mQueueMutex.lock(); if ( !mFrameQueue.empty() ) { for ( FrameQueue::iterator iter = mFrameQueue.begin(); iter != mFrameQueue.end(); iter++ ) { const FeedFrame *frame = iter->get(); Debug( 3, "Frame type %d", frame->mediaType() ); if ( frame->mediaType() == FeedFrame::FRAME_TYPE_VIDEO ) { // This is an alarm detection frame const MotionFrame *motionFrame = dynamic_cast<const MotionFrame *>(frame); //const VideoProvider *provider = dynamic_cast<const VideoProvider *>(frame->provider()); AlarmState lastAlarmState = alarmState; uint64_t now = time64(); Debug( 3, "Motion frame, alarmed %d", motionFrame->alarmed() ); if ( motionFrame->alarmed() ) { alarmState = ALARM; alarmTime = now; if ( lastAlarmState == IDLE ) { // Create new event eventCount++; std::string path = stringtf( "%s/img-%s-%d-%ju.jpg", mLocation.c_str(), mName.c_str(), eventCount, motionFrame->id() ); //Info( "PF:%d @ %dx%d", motionFrame->pixelFormat(), motionFrame->width(), motionFrame->height() ); Image image( motionFrame->pixelFormat(), motionFrame->width(), motionFrame->height(), motionFrame->buffer().data() ); image.writeJpeg( path.c_str() ); } } else if ( lastAlarmState == ALARM ) { alarmState = ALERT; } else if ( lastAlarmState == ALERT ) { Debug( 3, "Frame age %.2lf", frame->age( alarmTime ) ); if ( (0.0l-frame->age( alarmTime )) > MAX_EVENT_TAIL_AGE ) alarmState = IDLE; } else { alarmState = IDLE; } Debug( 3, "Alarm state %d (%d)", alarmState, lastAlarmState ); } else { bool keyFrame = false; const uint8_t *startPos = h264StartCode( frame->buffer().head(), frame->buffer().tail() ); while ( startPos < frame->buffer().tail() ) { while( !*(startPos++) ) ; const uint8_t *nextStartPos = h264StartCode( startPos, frame->buffer().tail() ); int frameSize = nextStartPos-startPos; unsigned char type = startPos[0] & 0x1F; unsigned char nri = startPos[0] & 0x60; Debug( 3, "Frame Type %d, NRI %d (%02x), %d bytes, ts %jd", type, nri>>5, startPos[0], frameSize, frame->timestamp() ); if ( type == NAL_IDR_SLICE ) keyFrame = true; startPos = nextStartPos; } videoTimeOffset += (double)mVideoParms.frameRate().num / mVideoParms.frameRate().den; if ( keyFrame ) { // We can do file opening/closing now if ( alarmState != IDLE && !outputContext ) { outputContext = openFile( outputFormat ); videoTimeOffset = 0.0L; videoFrameCount = 0; } else if ( alarmState == IDLE && outputContext ) { closeFile( outputContext ); outputContext = NULL; } } /*if ( keyFrame && (videoTimeOffset >= mMaxLength) ) { closeFile( outputContext ); outputContext = openFile( outputFormat ); videoTimeOffset = 0.0L; videoFrameCount = 0; }*/ if ( outputContext ) { AVStream *videoStream = outputContext->streams[0]; AVCodecContext *videoCodecContext = videoStream->codec; AVPacket packet; av_init_packet(&packet); packet.flags |= keyFrame ? AV_PKT_FLAG_KEY : 0; packet.stream_index = videoStream->index; packet.data = (uint8_t*)frame->buffer().data(); packet.size = frame->buffer().size(); //packet.pts = packet.dts = AV_NOPTS_VALUE; packet.pts = packet.dts = (videoFrameCount * mVideoParms.frameRate().num * videoCodecContext->time_base.den) / (mVideoParms.frameRate().den * videoCodecContext->time_base.num); Info( "vfc: %ju, vto: %.2lf, kf: %d, pts: %jd", videoFrameCount, videoTimeOffset, keyFrame, packet.pts ); int result = av_interleaved_write_frame(outputContext, &packet); if ( result != 0 ) Fatal( "Error while writing video frame: %d", result ); } videoFrameCount++; } } mFrameQueue.clear(); } mQueueMutex.unlock(); checkProviders(); usleep( INTERFRAME_TIMEOUT ); } } if ( outputContext ) closeFile( outputContext ); } cleanup(); return 0; }
/** * @brief * * @param request * * @return */ bool RtspConnection::handleRequest( const std::string &request ) { Debug( 2, "Handling RTSP request: %s (%zd bytes)", request.c_str(), request.size() ); StringTokenList lines( request, "\r\n" ); if ( lines.size() <= 0 ) { Error( "Unable to split request '%s' into tokens", request.c_str() ); return( false ); } StringTokenList parts( lines[0], " " ); if ( parts.size() != 3 ) { Error( "Unable to split request part '%s' into tokens", lines[0].c_str() ); return( false ); } std::string requestType = parts[0]; Debug( 4, "Got request '%s'", requestType.c_str() ); std::string requestUrl = parts[1]; Debug( 4, "Got requestUrl '%s'", requestUrl.c_str() ); std::string requestVer = parts[2]; Debug( 4, "Got requestVer '%s'", requestVer.c_str() ); if ( requestVer != "RTSP/1.0" ) { Error( "Unexpected RTSP version '%s'", requestVer.c_str() ); return( false ); } // Extract headers from request Headers requestHeaders; for ( int i = 1; i < lines.size(); i++ ) { StringTokenList parts( lines[i], ": " ); if ( parts.size() != 2 ) { Error( "Unable to split request header '%s' into tokens", lines[i].c_str() ); return( false ); } Debug( 4, "Got header '%s', value '%s'", parts[0].c_str(), parts[1].c_str() ); requestHeaders.insert( Headers::value_type( parts[0], parts[1] ) ); } if ( requestHeaders.find("CSeq") == requestHeaders.end() ) { Error( "No CSeq header found" ); return( false ); } Debug( 4, "Got sequence number %s", requestHeaders["CSeq"].c_str() ); uint32_t session = 0; if ( requestHeaders.find("Session") != requestHeaders.end() ) { Debug( 4, "Got session header, '%s', passing to session", requestHeaders["Session"].c_str() ); session = strtol( requestHeaders["Session"].c_str(), NULL, 16 ); } Headers responseHeaders; responseHeaders.insert( Headers::value_type( "CSeq", requestHeaders["CSeq"] ) ); if ( requestType == "OPTIONS" ) { responseHeaders.insert( Headers::value_type( "Public", "DESCRIBE, SETUP, PLAY, GET_PARAMETER, TEARDOWN" ) ); return( sendResponse( responseHeaders ) ); } else if ( requestType == "DESCRIBE" ) { FeedProvider *provider = validateRequestUrl( requestUrl ); if ( !provider ) { sendResponse( responseHeaders, "", 404, "Not Found" ); return( false ); } const VideoProvider *videoProvider = dynamic_cast<const VideoProvider *>(provider); int codec = AV_CODEC_ID_MPEG4; int width = videoProvider->width(); int height = videoProvider->height(); FrameRate frameRate = 15; //FrameRate frameRate = videoProvider->frameRate(); int bitRate = 90000; int quality = 70; std::string sdpFormatString = "v=0\r\n" "o=- %jd %jd IN IP4 %s\r\n" "s=ZoneMinder Stream\r\n" "i=Media Streamers\r\n" "c=IN IP4 0.0.0.0\r\n" "t=0 0\r\n" "a=control:*\r\n" "a=range:npt=0.000000-\r\n"; uint64_t now64 = time64(); char hostname[HOST_NAME_MAX] = ""; if ( gethostname( hostname, sizeof(hostname) ) < 0 ) Fatal( "Can't gethostname: %s", strerror(errno) ); std::string sdpString = stringtf( sdpFormatString, now64, now64, hostname ); if ( codec == AV_CODEC_ID_H264 ) { if ( provider->cl4ss() == "RawH264Input" ) { std::string encoderKey = H264Relay::getPoolKey( provider->identity(), width, height, frameRate, bitRate, quality ); if ( !(mEncoder = Encoder::getPooledEncoder( encoderKey )) ) { H264Relay *h264Relay = NULL; mEncoder = h264Relay = new H264Relay( provider->identity(), width, height, frameRate, bitRate, quality ); mEncoder->registerProvider( *provider ); Encoder::poolEncoder( mEncoder ); h264Relay->start(); } sdpString += mEncoder->sdpString( 1 ); // XXX - Should be variable responseHeaders.insert( Headers::value_type( "Content-length", stringtf( "%zd", sdpString.length() ) ) ); } else { std::string encoderKey = H264Encoder::getPoolKey( provider->identity(), width, height, frameRate, bitRate, quality ); if ( !(mEncoder = Encoder::getPooledEncoder( encoderKey )) ) { H264Encoder *h264Encoder = NULL; mEncoder = h264Encoder = new H264Encoder( provider->identity(), width, height, frameRate, bitRate, quality ); mEncoder->registerProvider( *provider ); Encoder::poolEncoder( mEncoder ); h264Encoder->start(); } sdpString += mEncoder->sdpString( 1 ); // XXX - Should be variable responseHeaders.insert( Headers::value_type( "Content-length", stringtf( "%zd", sdpString.length() ) ) ); } } else if ( codec == AV_CODEC_ID_MPEG4 ) { std::string encoderKey = MpegEncoder::getPoolKey( provider->identity(), width, height, frameRate, bitRate, quality ); if ( !(mEncoder = Encoder::getPooledEncoder( encoderKey )) ) { MpegEncoder *mpegEncoder = NULL; mEncoder = mpegEncoder = new MpegEncoder( provider->identity(), width, height, frameRate, bitRate, quality ); mEncoder->registerProvider( *provider ); Encoder::poolEncoder( mEncoder ); mpegEncoder->start(); } sdpString += mEncoder->sdpString( 1 ); // XXX - Should be variable responseHeaders.insert( Headers::value_type( "Content-length", stringtf( "%zd", sdpString.length() ) ) ); } return( sendResponse( responseHeaders, sdpString ) ); } else if ( requestType == "SETUP" ) { // These commands are handled by RTSP session so pass them on and send any required responses RtspSession *rtspSession = 0; if ( session ) { rtspSession = mRtspController->getSession( session ); } else { rtspSession = mRtspController->newSession( this, mEncoder ); } if ( rtspSession->recvRequest( requestType, requestUrl, requestHeaders, responseHeaders ) ) return( sendResponse( responseHeaders ) ); return( false ); } else if ( requestType == "PLAY" || requestType == "GET_PARAMETER" || requestType == "TEARDOWN" ) { // These commands are handled by RTSP session so pass them on and send any required responses RtspSession *rtspSession = 0; if ( session ) { rtspSession = mRtspController->getSession( session ); if ( rtspSession && rtspSession->recvRequest( requestType, requestUrl, requestHeaders, responseHeaders ) ) return( sendResponse( responseHeaders ) ); } return( sendResponse( responseHeaders, "", 454, "Session Not Found" ) ); } Error( "Unrecognised RTSP command '%s'", requestType.c_str() ); return( sendResponse( responseHeaders, "", 405, "Method not implemented" ) ); }
int sio_read(std::string &result, int timeout, int pollfd) { int once = 1; GLOB_LOCK(); if (!postponed.empty()) { result = postponed.front(); postponed.pop_front(); //printf("accepted: [%s] (%d)\n", result.c_str(), postponed.size()); GLOB_UNLOCK(); return result.length(); }; if (inside_read) { GLOB_UNLOCK(); sio_write(SIO_ERROR, "recursive sio_read"); return -1; }; int64 stoptime = time64() + timeout*1000; while (once || (stoptime > time64())) { once = 0; int eol_pos = readbuf.find(eol_char); if (eol_pos != std::string::npos) { result = readbuf.substr(0, eol_pos); readbuf = readbuf.substr(eol_pos+1); if (sio_commdump & 2) { printf("READ\t%s\n", result.c_str()); }; GLOB_UNLOCK(); int rv = _sio_handlemsg(result); // might call other functs if (rv == 0) { GLOB_LOCK(); continue; }; if (rv < 0) return rv; return result.length(); }; inside_read = 1; GLOB_UNLOCK(); // wait for some more data... fd_set fdr; struct timeval tv; int delta = (int)( (stoptime - time64()) / 1000 ); if (delta < 0) delta = 0; tv.tv_sec = delta/1000; tv.tv_usec = (delta%1000)*1000; FD_ZERO(&fdr); FD_SET(ser_fd_read, &fdr); if (pollfd!=-1) FD_SET(pollfd, &fdr); int rv = select(max(ser_fd_read, pollfd)+1, &fdr, 0, 0, (timeout>=0)?&tv:0); if ( (rv<0) && (errno != EINTR) ) { inside_read = 0; return -1; // error }; if (pollfd != -1) if (FD_ISSET(pollfd, &fdr)) { inside_read = 0; return -3; // pollfd is select'ed }; GLOB_LOCK(); inside_read = 0; char bf[257]; rv = read(ser_fd_read, bf, sizeof(bf)-1); if ( (rv<=0) && (errno!=EINTR) && (errno != EAGAIN)) { GLOB_UNLOCK(); return -1; // error or timeout }; if (rv<0) continue; if (rv==0) { GLOB_UNLOCK(); return -1; // EOF from server }; bf[rv]=0; //printf("\n\nREAD[[[%s]]]\n", bf); readbuf = readbuf + bf; }; GLOB_UNLOCK(); return 0; // timeout };