コード例 #1
0
ファイル: AudioResampler.cpp プロジェクト: mmanley/Antares
void
AudioResampler::SetSource(AudioReader* source)
{
	if (!source) {
		TRACE("AudioResampler::SetSource() - NULL source\n");
		return;
	}
		
	if (source->Format().type != B_MEDIA_RAW_AUDIO) {
		TRACE("AudioResampler::SetSource() - not B_MEDIA_RAW_AUDIO\n");
		return;
	}

	uint32 hostByteOrder
		= (B_HOST_IS_BENDIAN) ? B_MEDIA_BIG_ENDIAN : B_MEDIA_LITTLE_ENDIAN;
	if (source->Format().u.raw_audio.byte_order != hostByteOrder) {
		TRACE("AudioResampler::SetSource() - not host byte order\n");
		return;
	}
	
	float frameRate = FrameRate();
		// don't overwrite previous audio frame rate
	fSource = source;
	fFormat = source->Format();
	fFormat.u.raw_audio.frame_rate = frameRate;
}
コード例 #2
0
ファイル: utils.cpp プロジェクト: janelia-idf/bias
 FrameRateList getListOfFrameRates()
 {
     FrameRateList list;
     for (int i=0; i < int(NUMBER_OF_FRAMERATE); i++) 
     {
         list.push_back(FrameRate(i));
     }
     return list;
 }
コード例 #3
0
ファイル: FrameRate.cpp プロジェクト: simophin/remotevision
FrameRate FrameRate::fromString(const String & str)
{
	std::vector<String> args;
	args = Utils::split< std::vector<String>  >(str,'/');
	if (args.size() != 2) return FrameRate();
	FrameRate ret;
	ret.num = Utils::stringToInteger(args.at(0));
	ret.den = Utils::stringToInteger(args.at(1));
	return ret;
}
コード例 #4
0
void
VideoDeviceImpl::setDeviceParams(const DeviceParams& params)
{
    if (params.width and params.height) {
        auto pmt = capMap_.at(std::make_pair(params.width, params.height));
        if (pmt != nullptr) {
            ((VIDEOINFOHEADER*) pmt->pbFormat)->AvgTimePerFrame = (FrameRate(1e7) / params.framerate).real();
            if (FAILED(cInterface->streamConf_->SetFormat(pmt))) {
                RING_ERR("Could not set settings.");
            }
        }
    }
}
コード例 #5
0
ファイル: OnLoop.cpp プロジェクト: d1m1tur/Tower-Defense
void Game::OnLoop()
{
    //Gets the start ticks
    OnStartup();

    //Set default cursor every frame
    SDL_SetCursor(SDL_GetDefaultCursor());


    if(LeftButtonPressed == true)
    {
        Map1.building = true;
    }

    //Caps the frame rate depending on the ticks that have past
    FrameRate(FPS);
}
コード例 #6
0
ファイル: example6.cpp プロジェクト: jianhongwei/ozonebase
//
// Run motion detection on a saved file, provide debug images in matrix mode over HTTP
//
int main( int argc, const char *argv[] )
{
    debugInitialise( "example6", "", 5 );

    Info( "Starting" );

    avInit();

    Application app;

    NetworkAVInput input( "input", "/tmp/movie.mp4" );
    app.addThread( &input );

    MotionDetector motionDetector( "modect" );
    motionDetector.registerProvider( input );
    //EventRecorder eventRecorder( "/transfer/ozx" );
    app.addThread( &motionDetector );

    MatrixVideo matrixVideo( "matrix", PIX_FMT_YUV420P, 640, 480, FrameRate( 1, 10 ), 2, 2 );
    matrixVideo.registerProvider( *motionDetector.refImageSlave() );
    matrixVideo.registerProvider( *motionDetector.compImageSlave() );
    matrixVideo.registerProvider( *motionDetector.deltaImageSlave() );
    matrixVideo.registerProvider( *motionDetector.varImageSlave() );
    app.addThread( &matrixVideo );

    Listener listener;
    app.addThread( &listener );

    HttpController httpController( "p8080", 8080 );
    listener.addController( &httpController );

    httpController.addStream( "file", input );
    httpController.addStream( "debug", SlaveVideo::cClass() );
    httpController.addStream( "debug", matrixVideo );
    httpController.addStream( "debug", motionDetector );

    app.run();
}
コード例 #7
0
ファイル: Modulation.cpp プロジェクト: idiap/tracter
Tracter::Modulation::Modulation(
    Component<float>* iInput, const char* iObjectName
)
{
    mObjectName = iObjectName;
    mInput = iInput;
    Connect(iInput);

    mFrame.size = 1;
    assert(iInput->Frame().size == 1);

    /* For a 100Hz frame rate and bin 1 = 4Hz, we have nBins = 100/4 =
     * 25 */
    float freq = GetEnv("Freq", 4.0f);
    int bin = GetEnv("Bin", 1);
    mNBins = (int)(FrameRate() / freq + 0.5f);
    mDFT.SetRotation(bin, mNBins);
    mLookAhead = mNBins / 2; // Round down
    mLookBehind = mNBins - mLookAhead - 1;
    MinSize(mInput, mNBins, mLookAhead);
    mIndex = -1;

    Verbose(2, "NBins=%d (-%d+%d)\n", mNBins, mLookBehind, mLookAhead);
}
コード例 #8
0
ファイル: AVFormatReader.cpp プロジェクト: RAZVOR/haiku
status_t
AVFormatReader::Stream::Init(int32 virtualIndex)
{
	TRACE("AVFormatReader::Stream::Init(%ld)\n", virtualIndex);

	status_t ret = StreamBase::Init(virtualIndex);
	if (ret != B_OK)
		return ret;

	// Get a pointer to the AVCodecContext for the stream at streamIndex.
	AVCodecContext* codecContext = fStream->codec;

#if 0
// stippi: Here I was experimenting with the question if some fields of the
// AVCodecContext change (or get filled out at all), if the AVCodec is opened.
	class CodecOpener {
	public:
		CodecOpener(AVCodecContext* context)
		{
			fCodecContext = context;
			AVCodec* codec = avcodec_find_decoder(context->codec_id);
			fCodecOpen = avcodec_open(context, codec) >= 0;
			if (!fCodecOpen)
				TRACE("  failed to open the codec!\n");
		}
		~CodecOpener()
		{
			if (fCodecOpen)
				avcodec_close(fCodecContext);
		}
	private:
		AVCodecContext*		fCodecContext;
		bool				fCodecOpen;
	} codecOpener(codecContext);
#endif

	// initialize the media_format for this stream
	media_format* format = &fFormat;
	memset(format, 0, sizeof(media_format));

	media_format_description description;

	// Set format family and type depending on codec_type of the stream.
	switch (codecContext->codec_type) {
		case AVMEDIA_TYPE_AUDIO:
			if ((codecContext->codec_id >= CODEC_ID_PCM_S16LE)
				&& (codecContext->codec_id <= CODEC_ID_PCM_U8)) {
				TRACE("  raw audio\n");
				format->type = B_MEDIA_RAW_AUDIO;
				description.family = B_ANY_FORMAT_FAMILY;
				// This will then apparently be handled by the (built into
				// BMediaTrack) RawDecoder.
			} else {
				TRACE("  encoded audio\n");
				format->type = B_MEDIA_ENCODED_AUDIO;
				description.family = B_MISC_FORMAT_FAMILY;
				description.u.misc.file_format = 'ffmp';
			}
			break;
		case AVMEDIA_TYPE_VIDEO:
			TRACE("  encoded video\n");
			format->type = B_MEDIA_ENCODED_VIDEO;
			description.family = B_MISC_FORMAT_FAMILY;
			description.u.misc.file_format = 'ffmp';
			break;
		default:
			TRACE("  unknown type\n");
			format->type = B_MEDIA_UNKNOWN_TYPE;
			return B_ERROR;
			break;
	}

	if (format->type == B_MEDIA_RAW_AUDIO) {
		// We cannot describe all raw-audio formats, some are unsupported.
		switch (codecContext->codec_id) {
			case CODEC_ID_PCM_S16LE:
				format->u.raw_audio.format
					= media_raw_audio_format::B_AUDIO_SHORT;
				format->u.raw_audio.byte_order
					= B_MEDIA_LITTLE_ENDIAN;
				break;
			case CODEC_ID_PCM_S16BE:
				format->u.raw_audio.format
					= media_raw_audio_format::B_AUDIO_SHORT;
				format->u.raw_audio.byte_order
					= B_MEDIA_BIG_ENDIAN;
				break;
			case CODEC_ID_PCM_U16LE:
//				format->u.raw_audio.format
//					= media_raw_audio_format::B_AUDIO_USHORT;
//				format->u.raw_audio.byte_order
//					= B_MEDIA_LITTLE_ENDIAN;
				return B_NOT_SUPPORTED;
				break;
			case CODEC_ID_PCM_U16BE:
//				format->u.raw_audio.format
//					= media_raw_audio_format::B_AUDIO_USHORT;
//				format->u.raw_audio.byte_order
//					= B_MEDIA_BIG_ENDIAN;
				return B_NOT_SUPPORTED;
				break;
			case CODEC_ID_PCM_S8:
				format->u.raw_audio.format
					= media_raw_audio_format::B_AUDIO_CHAR;
				break;
			case CODEC_ID_PCM_U8:
				format->u.raw_audio.format
					= media_raw_audio_format::B_AUDIO_UCHAR;
				break;
			default:
				return B_NOT_SUPPORTED;
				break;
		}
	} else {
		if (description.family == B_MISC_FORMAT_FAMILY)
			description.u.misc.codec = codecContext->codec_id;

		BMediaFormats formats;
		status_t status = formats.GetFormatFor(description, format);
		if (status < B_OK)
			TRACE("  formats.GetFormatFor() error: %s\n", strerror(status));

		format->user_data_type = B_CODEC_TYPE_INFO;
		*(uint32*)format->user_data = codecContext->codec_tag;
		format->user_data[4] = 0;
	}

	format->require_flags = 0;
	format->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;

	switch (format->type) {
		case B_MEDIA_RAW_AUDIO:
			format->u.raw_audio.frame_rate = (float)codecContext->sample_rate;
			format->u.raw_audio.channel_count = codecContext->channels;
			format->u.raw_audio.channel_mask = codecContext->channel_layout;
			format->u.raw_audio.byte_order
				= avformat_to_beos_byte_order(codecContext->sample_fmt);
			format->u.raw_audio.format
				= avformat_to_beos_format(codecContext->sample_fmt);
			format->u.raw_audio.buffer_size = 0;

			// Read one packet and mark it for later re-use. (So our first
			// GetNextChunk() call does not read another packet.)
			if (_NextPacket(true) == B_OK) {
				TRACE("  successfully determined audio buffer size: %d\n",
					fPacket.size);
				format->u.raw_audio.buffer_size = fPacket.size;
			}
			break;

		case B_MEDIA_ENCODED_AUDIO:
			format->u.encoded_audio.bit_rate = codecContext->bit_rate;
			format->u.encoded_audio.frame_size = codecContext->frame_size;
			// Fill in some info about possible output format
			format->u.encoded_audio.output
				= media_multi_audio_format::wildcard;
			format->u.encoded_audio.output.frame_rate
				= (float)codecContext->sample_rate;
			// Channel layout bits match in Be API and FFmpeg.
			format->u.encoded_audio.output.channel_count
				= codecContext->channels;
			format->u.encoded_audio.multi_info.channel_mask
				= codecContext->channel_layout;
			format->u.encoded_audio.output.byte_order
				= avformat_to_beos_byte_order(codecContext->sample_fmt);
			format->u.encoded_audio.output.format
				= avformat_to_beos_format(codecContext->sample_fmt);
			if (codecContext->block_align > 0) {
				format->u.encoded_audio.output.buffer_size
					= codecContext->block_align;
			} else {
				format->u.encoded_audio.output.buffer_size
					= codecContext->frame_size * codecContext->channels
						* (format->u.encoded_audio.output.format
							& media_raw_audio_format::B_AUDIO_SIZE_MASK);
			}
			break;

		case B_MEDIA_ENCODED_VIDEO:
// TODO: Specifying any of these seems to throw off the format matching
// later on.
//			format->u.encoded_video.avg_bit_rate = codecContext->bit_rate;
//			format->u.encoded_video.max_bit_rate = codecContext->bit_rate
//				+ codecContext->bit_rate_tolerance;

//			format->u.encoded_video.encoding
//				= media_encoded_video_format::B_ANY;

//			format->u.encoded_video.frame_size = 1;
//			format->u.encoded_video.forward_history = 0;
//			format->u.encoded_video.backward_history = 0;

			format->u.encoded_video.output.field_rate = FrameRate();
			format->u.encoded_video.output.interlace = 1;

			format->u.encoded_video.output.first_active = 0;
			format->u.encoded_video.output.last_active
				= codecContext->height - 1;
				// TODO: Maybe libavformat actually provides that info
				// somewhere...
			format->u.encoded_video.output.orientation
				= B_VIDEO_TOP_LEFT_RIGHT;

			// Calculate the display aspect ratio
			AVRational displayAspectRatio;
		    if (codecContext->sample_aspect_ratio.num != 0) {
				av_reduce(&displayAspectRatio.num, &displayAspectRatio.den,
					codecContext->width
						* codecContext->sample_aspect_ratio.num,
					codecContext->height
						* codecContext->sample_aspect_ratio.den,
					1024 * 1024);
				TRACE("  pixel aspect ratio: %d/%d, "
					"display aspect ratio: %d/%d\n",
					codecContext->sample_aspect_ratio.num,
					codecContext->sample_aspect_ratio.den,
					displayAspectRatio.num, displayAspectRatio.den);
		    } else {
				av_reduce(&displayAspectRatio.num, &displayAspectRatio.den,
					codecContext->width, codecContext->height, 1024 * 1024);
				TRACE("  no display aspect ratio (%d/%d)\n",
					displayAspectRatio.num, displayAspectRatio.den);
		    }
			format->u.encoded_video.output.pixel_width_aspect
				= displayAspectRatio.num;
			format->u.encoded_video.output.pixel_height_aspect
				= displayAspectRatio.den;

			format->u.encoded_video.output.display.format
				= pixfmt_to_colorspace(codecContext->pix_fmt);
			format->u.encoded_video.output.display.line_width
				= codecContext->width;
			format->u.encoded_video.output.display.line_count
				= codecContext->height;
			TRACE("  width/height: %d/%d\n", codecContext->width,
				codecContext->height);
			format->u.encoded_video.output.display.bytes_per_row = 0;
			format->u.encoded_video.output.display.pixel_offset = 0;
			format->u.encoded_video.output.display.line_offset = 0;
			format->u.encoded_video.output.display.flags = 0; // TODO

			break;

		default:
			// This is an unknown format to us.
			break;
	}

	// Add the meta data, if any
	if (codecContext->extradata_size > 0) {
		format->SetMetaData(codecContext->extradata,
			codecContext->extradata_size);
		TRACE("  extradata: %p\n", format->MetaData());
	}

	TRACE("  extradata_size: %d\n", codecContext->extradata_size);
//	TRACE("  intra_matrix: %p\n", codecContext->intra_matrix);
//	TRACE("  inter_matrix: %p\n", codecContext->inter_matrix);
//	TRACE("  get_buffer(): %p\n", codecContext->get_buffer);
//	TRACE("  release_buffer(): %p\n", codecContext->release_buffer);

#ifdef TRACE_AVFORMAT_READER
	char formatString[512];
	if (string_for_format(*format, formatString, sizeof(formatString)))
		TRACE("  format: %s\n", formatString);

	uint32 encoding = format->Encoding();
	TRACE("  encoding '%.4s'\n", (char*)&encoding);
#endif

	return B_OK;
}
コード例 #9
0
ファイル: AVFormatReader.cpp プロジェクト: RAZVOR/haiku
status_t
StreamBase::Seek(uint32 flags, int64* frame, bigtime_t* time)
{
	BAutolock _(fStreamLock);

	if (fContext == NULL || fStream == NULL)
		return B_NO_INIT;

	TRACE_SEEK("StreamBase::Seek(%ld,%s%s%s%s, %lld, "
		"%lld)\n", VirtualIndex(),
		(flags & B_MEDIA_SEEK_TO_FRAME) ? " B_MEDIA_SEEK_TO_FRAME" : "",
		(flags & B_MEDIA_SEEK_TO_TIME) ? " B_MEDIA_SEEK_TO_TIME" : "",
		(flags & B_MEDIA_SEEK_CLOSEST_BACKWARD)
			? " B_MEDIA_SEEK_CLOSEST_BACKWARD" : "",
		(flags & B_MEDIA_SEEK_CLOSEST_FORWARD)
			? " B_MEDIA_SEEK_CLOSEST_FORWARD" : "",
		*frame, *time);

	double frameRate = FrameRate();
	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
		// Seeking is always based on time, initialize it when client seeks
		// based on frame.
		*time = (bigtime_t)(*frame * 1000000.0 / frameRate + 0.5);
	}

	int64_t timeStamp = *time;

	int searchFlags = AVSEEK_FLAG_BACKWARD;
	if ((flags & B_MEDIA_SEEK_CLOSEST_FORWARD) != 0)
		searchFlags = 0;

	if (fSeekByBytes) {
		searchFlags |= AVSEEK_FLAG_BYTE;

		BAutolock _(fSourceLock);
		int64_t fileSize;
		if (fSource->GetSize(&fileSize) != B_OK)
			return B_NOT_SUPPORTED;
		int64_t duration = Duration();
		if (duration == 0)
			return B_NOT_SUPPORTED;

		timeStamp = int64_t(fileSize * ((double)timeStamp / duration));
		if ((flags & B_MEDIA_SEEK_CLOSEST_BACKWARD) != 0) {
			timeStamp -= 65536;
			if (timeStamp < 0)
				timeStamp = 0;
		}

		bool seekAgain = true;
		bool seekForward = true;
		bigtime_t lastFoundTime = -1;
		int64_t closestTimeStampBackwards = -1;
		while (seekAgain) {
			if (avformat_seek_file(fContext, -1, INT64_MIN, timeStamp,
				INT64_MAX, searchFlags) < 0) {
				TRACE("  avformat_seek_file() (by bytes) failed.\n");
				return B_ERROR;
			}
			seekAgain = false;

			// Our last packet is toast in any case. Read the next one so we
			// know where we really seeked.
			fReusePacket = false;
			if (_NextPacket(true) == B_OK) {
				while (fPacket.pts == kNoPTSValue) {
					fReusePacket = false;
					if (_NextPacket(true) != B_OK)
						return B_ERROR;
				}
				if (fPacket.pos >= 0)
					timeStamp = fPacket.pos;
				bigtime_t foundTime
					= _ConvertFromStreamTimeBase(fPacket.pts);
				if (foundTime != lastFoundTime) {
					lastFoundTime = foundTime;
					if (foundTime > *time) {
						if (closestTimeStampBackwards >= 0) {
							timeStamp = closestTimeStampBackwards;
							seekAgain = true;
							seekForward = false;
							continue;
						}
						int64_t diff = int64_t(fileSize
							* ((double)(foundTime - *time) / (2 * duration)));
						if (diff < 8192)
							break;
						timeStamp -= diff;
						TRACE_SEEK("  need to seek back (%lld) (time: %.2f "
							"-> %.2f)\n", timeStamp, *time / 1000000.0,
							foundTime / 1000000.0);
						if (timeStamp < 0)
							foundTime = 0;
						else {
							seekAgain = true;
							continue;
						}
					} else if (seekForward && foundTime < *time - 100000) {
						closestTimeStampBackwards = timeStamp;
						int64_t diff = int64_t(fileSize
							* ((double)(*time - foundTime) / (2 * duration)));
						if (diff < 8192)
							break;
						timeStamp += diff;
						TRACE_SEEK("  need to seek forward (%lld) (time: "
							"%.2f -> %.2f)\n", timeStamp, *time / 1000000.0,
							foundTime / 1000000.0);
						if (timeStamp > duration)
							foundTime = duration;
						else {
							seekAgain = true;
							continue;
						}
					}
				}
				TRACE_SEEK("  found time: %lld -> %lld (%.2f)\n", *time,
					foundTime, foundTime / 1000000.0);
				*time = foundTime;
				*frame = (uint64)(*time * frameRate / 1000000LL + 0.5);
				TRACE_SEEK("  seeked frame: %lld\n", *frame);
			} else {
				TRACE_SEEK("  _NextPacket() failed!\n");
				return B_ERROR;
			}
		}
	} else {
		// We may not get a PTS from the next packet after seeking, so
		// we try to get an expected time from the index.
		int64_t streamTimeStamp = _ConvertToStreamTimeBase(*time);
		int index = av_index_search_timestamp(fStream, streamTimeStamp,
			searchFlags);
		if (index < 0) {
			TRACE("  av_index_search_timestamp() failed\n");
		} else {
			if (index > 0) {
				const AVIndexEntry& entry = fStream->index_entries[index];
				streamTimeStamp = entry.timestamp;
			} else {
				// Some demuxers use the first index entry to store some
				// other information, like the total playing time for example.
				// Assume the timeStamp of the first entry is alays 0.
				// TODO: Handle start-time offset?
				streamTimeStamp = 0;
			}
			bigtime_t foundTime = _ConvertFromStreamTimeBase(streamTimeStamp);
			bigtime_t timeDiff = foundTime > *time
				? foundTime - *time : *time - foundTime;

			if (timeDiff > 1000000
				&& (fStreamBuildsIndexWhileReading
					|| index == fStream->nb_index_entries - 1)) {
				// If the stream is building the index on the fly while parsing
				// it, we only have entries in the index for positions already
				// decoded, i.e. we cannot seek into the future. In that case,
				// just assume that we can seek where we want and leave
				// time/frame unmodified. Since successfully seeking one time
				// will generate index entries for the seeked to position, we
				// need to remember this in fStreamBuildsIndexWhileReading,
				// since when seeking back there will be later index entries,
				// but we still want to ignore the found entry.
				fStreamBuildsIndexWhileReading = true;
				TRACE_SEEK("  Not trusting generic index entry. "
					"(Current count: %d)\n", fStream->nb_index_entries);
			} else {
				// If we found a reasonably time, write it into *time.
				// After seeking, we will try to read the sought time from
				// the next packet. If the packet has no PTS value, we may
				// still have a more accurate time from the index lookup.
				*time = foundTime;
			}
		}

		if (avformat_seek_file(fContext, -1, INT64_MIN, timeStamp, INT64_MAX,
				searchFlags) < 0) {
			TRACE("  avformat_seek_file() failed.\n");
			// Try to fall back to av_seek_frame()
			timeStamp = _ConvertToStreamTimeBase(timeStamp);
			if (av_seek_frame(fContext, fStream->index, timeStamp,
				searchFlags) < 0) {
				TRACE("  avformat_seek_frame() failed as well.\n");
				// Fall back to seeking to the beginning by bytes
				timeStamp = 0;
				if (av_seek_frame(fContext, fStream->index, timeStamp,
						AVSEEK_FLAG_BYTE) < 0) {
					TRACE("  avformat_seek_frame() by bytes failed as "
						"well.\n");
					// Do not propagate error in any case. We fail if we can't
					// read another packet.
				} else
					*time = 0;
			}
		}

		// Our last packet is toast in any case. Read the next one so
		// we know where we really sought.
		bigtime_t foundTime = *time;

		fReusePacket = false;
		if (_NextPacket(true) == B_OK) {
			if (fPacket.pts != kNoPTSValue)
				foundTime = _ConvertFromStreamTimeBase(fPacket.pts);
			else
				TRACE_SEEK("  no PTS in packet after seeking\n");
		} else
			TRACE_SEEK("  _NextPacket() failed!\n");

		*time = foundTime;
		TRACE_SEEK("  sought time: %.2fs\n", *time / 1000000.0);
		*frame = (uint64)(*time * frameRate / 1000000.0 + 0.5);
		TRACE_SEEK("  sought frame: %lld\n", *frame);
	}

	return B_OK;
}
コード例 #10
0
ファイル: AVFormatReader.cpp プロジェクト: RAZVOR/haiku
status_t
AVFormatReader::Stream::FindKeyFrame(uint32 flags, int64* frame,
	bigtime_t* time) const
{
	BAutolock _(&fLock);

	if (fContext == NULL || fStream == NULL)
		return B_NO_INIT;

	TRACE_FIND("AVFormatReader::Stream::FindKeyFrame(%ld,%s%s%s%s, "
		"%lld, %lld)\n", VirtualIndex(),
		(flags & B_MEDIA_SEEK_TO_FRAME) ? " B_MEDIA_SEEK_TO_FRAME" : "",
		(flags & B_MEDIA_SEEK_TO_TIME) ? " B_MEDIA_SEEK_TO_TIME" : "",
		(flags & B_MEDIA_SEEK_CLOSEST_BACKWARD)
			? " B_MEDIA_SEEK_CLOSEST_BACKWARD" : "",
		(flags & B_MEDIA_SEEK_CLOSEST_FORWARD)
			? " B_MEDIA_SEEK_CLOSEST_FORWARD" : "",
		*frame, *time);

	bool inLastRequestedRange = false;
	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
		if (fLastReportedKeyframe.reportedFrame
			<= fLastReportedKeyframe.requestedFrame) {
			inLastRequestedRange
				= *frame >= fLastReportedKeyframe.reportedFrame
					&& *frame <= fLastReportedKeyframe.requestedFrame;
		} else {
			inLastRequestedRange
				= *frame >= fLastReportedKeyframe.requestedFrame
					&& *frame <= fLastReportedKeyframe.reportedFrame;
		}
	} else if ((flags & B_MEDIA_SEEK_TO_FRAME) == 0) {
		if (fLastReportedKeyframe.reportedTime
			<= fLastReportedKeyframe.requestedTime) {
			inLastRequestedRange
				= *time >= fLastReportedKeyframe.reportedTime
					&& *time <= fLastReportedKeyframe.requestedTime;
		} else {
			inLastRequestedRange
				= *time >= fLastReportedKeyframe.requestedTime
					&& *time <= fLastReportedKeyframe.reportedTime;
		}
	}

	if (inLastRequestedRange) {
		*frame = fLastReportedKeyframe.reportedFrame;
		*time = fLastReportedKeyframe.reportedTime;
		TRACE_FIND("  same as last reported keyframe\n");
		return B_OK;
	}

	double frameRate = FrameRate();
	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0)
		*time = (bigtime_t)(*frame * 1000000.0 / frameRate + 0.5);

	status_t ret;
	if (fGhostStream == NULL) {
		BAutolock _(fSourceLock);

		fGhostStream = new(std::nothrow) StreamBase(fSource, fSourceLock,
			&fLock);
		if (fGhostStream == NULL) {
			TRACE("  failed to allocate ghost stream\n");
			return B_NO_MEMORY;
		}

		ret = fGhostStream->Open();
		if (ret != B_OK) {
			TRACE("  ghost stream failed to open: %s\n", strerror(ret));
			return B_ERROR;
		}

		ret = fGhostStream->Init(fVirtualIndex);
		if (ret != B_OK) {
			TRACE("  ghost stream failed to init: %s\n", strerror(ret));
			return B_ERROR;
		}
	}
	fLastReportedKeyframe.requestedFrame = *frame;
	fLastReportedKeyframe.requestedTime = *time;
	fLastReportedKeyframe.seekFlags = flags;

	ret = fGhostStream->Seek(flags, frame, time);
	if (ret != B_OK) {
		TRACE("  ghost stream failed to seek: %s\n", strerror(ret));
		return B_ERROR;
	}

	fLastReportedKeyframe.reportedFrame = *frame;
	fLastReportedKeyframe.reportedTime = *time;

	TRACE_FIND("  found time: %.2fs\n", *time / 1000000.0);
	if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) {
		*frame = int64_t(*time * FrameRate() / 1000000.0 + 0.5);
		TRACE_FIND("  found frame: %lld\n", *frame);
	}

	return B_OK;
}
コード例 #11
0
ファイル: AVFormatReader.cpp プロジェクト: RAZVOR/haiku
status_t
AVFormatReader::Stream::GetStreamInfo(int64* frameCount,
	bigtime_t* duration, media_format* format, const void** infoBuffer,
	size_t* infoSize) const
{
	BAutolock _(&fLock);

	TRACE("AVFormatReader::Stream::GetStreamInfo(%ld)\n",
		VirtualIndex());

	double frameRate = FrameRate();
	TRACE("  frameRate: %.4f\n", frameRate);

	#ifdef TRACE_AVFORMAT_READER
	if (fStream->start_time != kNoPTSValue) {
		bigtime_t startTime = _ConvertFromStreamTimeBase(fStream->start_time);
		TRACE("  start_time: %lld or %.5fs\n", startTime,
			startTime / 1000000.0);
		// TODO: Handle start time in FindKeyFrame() and Seek()?!
	}
	#endif // TRACE_AVFORMAT_READER

	*duration = Duration();

	TRACE("  duration: %lld or %.5fs\n", *duration, *duration / 1000000.0);

	#if 0
	if (fStream->nb_index_entries > 0) {
		TRACE("  dump of index entries:\n");
		int count = 5;
		int firstEntriesCount = min_c(fStream->nb_index_entries, count);
		int i = 0;
		for (; i < firstEntriesCount; i++) {
			AVIndexEntry& entry = fStream->index_entries[i];
			bigtime_t timeGlobal = entry.timestamp;
			bigtime_t timeNative = _ConvertFromStreamTimeBase(timeGlobal);
			TRACE("    [%d] native: %.5fs global: %.5fs\n", i,
				timeNative / 1000000.0f, timeGlobal / 1000000.0f);
		}
		if (fStream->nb_index_entries - count > i) {
			i = fStream->nb_index_entries - count;
			TRACE("    ...\n");
			for (; i < fStream->nb_index_entries; i++) {
				AVIndexEntry& entry = fStream->index_entries[i];
				bigtime_t timeGlobal = entry.timestamp;
				bigtime_t timeNative = _ConvertFromStreamTimeBase(timeGlobal);
				TRACE("    [%d] native: %.5fs global: %.5fs\n", i,
					timeNative / 1000000.0f, timeGlobal / 1000000.0f);
			}
		}
	}
	#endif

	*frameCount = fStream->nb_frames;
//	if (*frameCount == 0) {
		// Calculate from duration and frame rate
		*frameCount = (int64)(*duration * frameRate / 1000000LL);
		TRACE("  frameCount calculated: %lld, from context: %lld\n",
			*frameCount, fStream->nb_frames);
//	} else
//		TRACE("  frameCount: %lld\n", *frameCount);

	*format = fFormat;

	*infoBuffer = fStream->codec->extradata;
	*infoSize = fStream->codec->extradata_size;

	return B_OK;
}
コード例 #12
0
 TestQuant (int origin=0)
   : FixedFrameQuantiser( FrameRate(GAVL_TIME_SCALE, 3 ), TimeValue(origin))
   { }
コード例 #13
0
ファイル: ogg.c プロジェクト: Erikhht/TCPMP
static bool_t OGMHeader(ogg* p, oggstream* s, const char* Data, int Length )
{
	int i;
	if (Length<40 || (*Data & PACKET_TYPE_BITS) != PACKET_TYPE_HEADER)
		return 0;

	if (strncmp(Data+1, "Direct Show Samples embedded in Ogg", 35) == 0)
	{
		// old header

		if (INT32LE(*(int32_t*)(Data+96)) == 0x05589F80)
		{
			PacketFormatClear(&s->Stream.Format);
			s->Stream.Format.Type = PACKET_VIDEO;
			s->Stream.Format.Format.Video.Width = INT32LE(*(int32_t*)(Data+176));
			s->Stream.Format.Format.Video.Height = INT32LE(*(int32_t*)(Data+180));
			s->Stream.Format.Format.Video.Pixel.FourCC = INT32LE(*(int32_t*)(Data+68));
			s->Stream.Format.Format.Video.Pixel.BitCount = INT16LE(*(int16_t*)(Data+182));

			i = INT16LE(*(int16_t*)(Data+136));	// extrasize
			if (i && PacketFormatExtra(&s->Stream.Format,i))
				memcpy(s->Stream.Format.Extra,Data+142,s->Stream.Format.ExtraLength);

			s->MediaRateDen = INT64LE(*(int32_t*)(Data+164))*TICKSPERSEC;
			s->MediaRateNum = 10000000;
			s->DefaultLen = 1;

			FrameRate(&s->Stream.Format.PacketRate,s->MediaRateNum,s->MediaRateDen/TICKSPERSEC);
			return 1;
		}

		if (INT32LE(*(int32_t*)(Data+96)) == 0x05589F81)
		{
			PacketFormatClear(&s->Stream.Format);
			s->Stream.Format.Type = PACKET_AUDIO;
			s->Stream.Format.Format.Audio.Format = INT16LE(*(int16_t*)(Data+124));
			s->Stream.Format.Format.Audio.Channels = INT16LE(*(int16_t*)(Data+126));
			s->Stream.Format.Format.Audio.BlockAlign = INT16LE(*(int16_t*)(Data+136));
			s->Stream.Format.Format.Audio.Bits = INT16LE(*(int16_t*)(Data+138));
			s->Stream.Format.Format.Audio.SampleRate = INT32LE(*(int32_t*)(Data+128));
			s->Stream.Format.ByteRate = INT32LE(*(int32_t*)(p+132));

			i = INT16LE(*(int16_t*)(Data+136));	// extrasize
			if (i && PacketFormatExtra(&s->Stream.Format,i))
				memcpy(s->Stream.Format.Extra,Data+142,s->Stream.Format.ExtraLength);

			s->MediaRateDen = TICKSPERSEC;
			s->MediaRateNum = INT32LE(*(int32_t*)(Data+128));
			s->DefaultLen = 1;
			return 1;
		}
	}
	else
	if (Length >= sizeof(ogm_header)+1)
	{
		ogm_header Head;
		memcpy(&Head,Data+1,sizeof(Head));

		// new header
		if (strncmp(Head.streamtype, MT_Video, strlen(MT_Video)) == 0)
		{
			PacketFormatClear(&s->Stream.Format);
			s->Stream.Format.Type = PACKET_VIDEO;
			s->Stream.Format.Format.Video.Width = INT32LE(Head.format.video.width);
			s->Stream.Format.Format.Video.Height = INT32LE(Head.format.video.height);
			s->Stream.Format.Format.Video.Pixel.FourCC = INT32LE(*(int32_t*)Head.subtype);
			s->Stream.Format.Format.Video.Pixel.BitCount = INT16LE(Head.bits_per_sample);

			s->MediaRateDen = INT64LE(Head.time_unit)*TICKSPERSEC;
			s->MediaRateNum = INT64LE(Head.samples_per_unit) * 10000000;
			s->DefaultLen = INT32LE(Head.default_len);

			FrameRate(&s->Stream.Format.PacketRate,s->MediaRateNum,s->MediaRateDen/TICKSPERSEC);
			i = Length - (sizeof(ogm_header)+1);
			if (i && PacketFormatExtra(&s->Stream.Format,i))
				memcpy(s->Stream.Format.Extra,Data+1+sizeof(ogm_header),s->Stream.Format.ExtraLength);
			return 1;
		}
		
		if (strncmp(Head.streamtype, MT_Audio, strlen(MT_Audio)) == 0)
		{
			PacketFormatClear(&s->Stream.Format);
			s->Stream.Format.Type = PACKET_AUDIO;
			s->Stream.Format.Format.Audio.Format = 0;
			for (i=0;i<4;++i)
				if (Head.subtype[i])
					s->Stream.Format.Format.Audio.Format = s->Stream.Format.Format.Audio.Format*16+Hex(Head.subtype[i]);
			s->Stream.Format.Format.Audio.Channels = INT16LE(Head.format.audio.channels);
			s->Stream.Format.Format.Audio.Bits = INT16LE(Head.bits_per_sample);
			s->Stream.Format.Format.Audio.BlockAlign = INT16LE(Head.format.audio.blockalign);
			s->Stream.Format.ByteRate = INT32LE(Head.format.audio.avgbytespersec);
			s->Stream.Format.Format.Audio.SampleRate = (int)INT64LE(Head.samples_per_unit);

			s->MediaRateDen = INT64LE(Head.time_unit)*TICKSPERSEC;
			s->MediaRateNum = INT64LE(Head.samples_per_unit) * 10000000;
			s->DefaultLen = INT32LE(Head.default_len);

			i = Length - (sizeof(ogm_header)+1);
			if (i && PacketFormatExtra(&s->Stream.Format,i))
				memcpy(s->Stream.Format.Extra,Data+1+sizeof(ogm_header),s->Stream.Format.ExtraLength);
			return 1;
		}

		if (strncmp(Data+1, MT_Text,  strlen(MT_Text)) == 0)
		{
			PacketFormatClear(&s->Stream.Format);
			s->Stream.Format.Type = PACKET_SUBTITLE;
			s->Stream.Format.Format.Subtitle.FourCC = SUBTITLE_OEM; //???

			s->MediaRateDen = INT64LE(Head.time_unit)*TICKSPERSEC;
			s->MediaRateNum = INT64LE(Head.samples_per_unit) * 10000000;
			s->DefaultLen = INT32LE(Head.default_len);

			i = Length - (sizeof(ogm_header)+1);
			if (i && PacketFormatExtra(&s->Stream.Format,i))
				memcpy(s->Stream.Format.Extra,Data+1+sizeof(ogm_header),s->Stream.Format.ExtraLength);
			return 1;
		}
	}

	return 0;
}