示例#1
0
void AVIDecoder::readNextPacket() {
	uint32 nextTag = _fileStream->readUint32BE();
	uint32 size = _fileStream->readUint32LE();

	if (_fileStream->eos())
		return;

	if (nextTag == ID_LIST) {
		// A list of audio/video chunks
		int32 startPos = _fileStream->pos();

		if (_fileStream->readUint32BE() != ID_REC)
			error("Expected 'rec ' LIST");

		size -= 4; // subtract list type

		// Decode chunks in the list
		while (_fileStream->pos() < startPos + (int32)size)
			readNextPacket();

		return;
	} else if (nextTag == ID_JUNK || nextTag == ID_IDX1) {
		skipChunk(size);
		return;
	}

	Track *track = getTrack(getStreamIndex(nextTag));

	if (!track)
		error("Cannot get track from tag '%s'", tag2str(nextTag));

	Common::SeekableReadStream *chunk = 0;

	if (size != 0) {
		chunk = _fileStream->readStream(size);
		_fileStream->skip(size & 1);
	}

	if (track->getTrackType() == Track::kTrackTypeAudio) {
		if (getStreamType(nextTag) != kStreamTypeAudio)
			error("Invalid audio track tag '%s'", tag2str(nextTag));

		assert(chunk);
		((AVIAudioTrack *)track)->queueSound(chunk);
	} else {
		AVIVideoTrack *videoTrack = (AVIVideoTrack *)track;

		if (getStreamType(nextTag) == kStreamTypePaletteChange) {
			// Palette Change
			videoTrack->loadPaletteFromChunk(chunk);
		} else if (getStreamType(nextTag) == kStreamTypeRawVideo) {
			// TODO: Check if this really is uncompressed. Many videos
			// falsely put compressed data in here.
			error("Uncompressed AVI frame found");
		} else {
			// Otherwise, assume it's a compressed frame
			videoTrack->decodeFrame(chunk);
		}
	}
}
示例#2
0
void HDTSpecForm::fillHDTSpecification(hdt::HDTSpecification &hdt)
{
    hdt.set("triplesOrder", hdt::getOrderStr((hdt::TripleComponentOrder)(ui->triplesOrderCombo->currentIndex()+1)));

    hdt.set("header.type", hdt::HDTVocabulary::DICTIONARY_TYPE_PLAIN);

    switch(ui->dictionaryTypeCombo->currentIndex()) {
    case 0:
        // FourSectionDictionary
        hdt.set("dictionary.type", hdt::HDTVocabulary::DICTIONARY_TYPE_FOUR);
        break;
    case 1:
        // PlainDictionary
        hdt.set("dictionary.type", hdt::HDTVocabulary::DICTIONARY_TYPE_PLAIN);
        break;
    case 2:
        // LiteralDictionary
        hdt.set("dictionary.type", hdt::HDTVocabulary::DICTIONARY_TYPE_LITERAL);
        break;
    }

    switch(ui->triplesTypeCombo->currentIndex()) {
    case 0:
        // BitmapTriples
        hdt.set("triples.type", hdt::HDTVocabulary::TRIPLES_TYPE_BITMAP);
        break;
    case 1:
        // TriplesList
        hdt.set("triples.type", hdt::HDTVocabulary::TRIPLES_TYPE_TRIPLESLIST);
        break;
    case 2:
        // PlainTriples
        hdt.set("triples.type", hdt::HDTVocabulary::TRIPLES_TYPE_PLAIN);
        break;
    case 3:
        // CompactTriples
        hdt.set("triples.type", hdt::HDTVocabulary::TRIPLES_TYPE_COMPACT);
        break;
    }

    if(ui->streamXcombo->isEnabled()) {
        hdt.set("stream.x", getStreamType(ui->streamXcombo->currentIndex()));
    }

    if(ui->streamYcombo->isEnabled()) {
        hdt.set("stream.y", getStreamType(ui->streamYcombo->currentIndex()));
    }

    if(ui->streamZcombo->isEnabled()) {
        hdt.set("stream.z", getStreamType(ui->streamZcombo->currentIndex()));
    }
}
示例#3
0
P2pRpcNetwork::P2pRpcNetwork(PeerNetworkSender& networkSender,
    ACE_Message_Block& recvBlock, ACE_Message_Block& sendBlock,
    bool useBitPacking) :
    networkSender_(networkSender),
    useBitPacking_(useBitPacking),
    rstreamBuffer_(new MessageBlockStreamBuffer(&recvBlock)),
    istream_(
        srpc::StreamFactory::createIStream(shouldUseUtf8ForString,
            getStreamType(useBitPacking_), *rstreamBuffer_)),
    wstreamBuffer_(new MessageBlockStreamBuffer(&sendBlock)),
    ostream_(
        srpc::StreamFactory::createOStream(shouldUseUtf8ForString,
            getStreamType(useBitPacking_), *wstreamBuffer_))
{
}
示例#4
0
size_t ConnectOutputToInput(IOutput* prev, ModuleType* next, IProcessExecutor * const executor = defaultExecutor) {
	auto prevMetadata = safe_cast<const IMetadataCap>(prev)->getMetadata();
	auto nextMetadata = next->getMetadata();
	if (prevMetadata && nextMetadata) {
		if (prevMetadata->getStreamType() != next->getMetadata()->getStreamType())
			throw std::runtime_error("Module connection: incompatible types");
		Log::msg(Info, "--------- Connect: metadata OK");
	} else {
		if (prevMetadata && !nextMetadata) {
#if 0 //rely on data to propagate type instead of inputs or outputs - this way sent data type is on the output, processed data is on the input
			next->setMetadata(prevMetadata);
			log(Info, "--------- Connect: metadata Propagate to next");
#endif
		} else if (!prevMetadata && nextMetadata) {
			safe_cast<IMetadataCap>(prev)->setMetadata(nextMetadata);
			Log::msg(Info, "--------- Connect: metadata Propagate to prev (backward)");
		} else {
			Log::msg(Info, "--------- Connect: no metadata");
		}
	}

	next->connect();
	return prev->getSignal().connect(
		[=](Data data)
	{
		next->push(data);
		(*executor)(MEMBER_FUNCTOR_PROCESS(next));
	}
	);
}
示例#5
0
Array File::getMetaData() {
  Array ret = Array::Create();
  ret.set("wrapper_type", o_getClassName());
  ret.set("stream_type",  getStreamType());
  ret.set("mode",         String(m_mode));
  ret.set("unread_bytes", 0);
  ret.set("seekable",     seekable());
  ret.set("uri",          String(m_name));
  ret.set("timed_out",    false);
  ret.set("blocked",      true);
  ret.set("eof",          eof());
  return ret;
}
void
Engine::transportStateChanged()
{
    std::string newTransportState = transportState();

    if (newTransportState == "Ended") {
        newTransportState = AvTransportArgument::TRANSPORT_STATE_STOPPED;
        _pEndOfStreamTimer = new Poco::Timer(10);
        _pEndOfStreamTimer->start(Poco::TimerCallback<Engine> (*this, &Engine::endOfStream));
    }

    Variant val;
    val.setValue(newTransportState);
    LOG(upnpav, debug, "engine sets new transport state: " + newTransportState);
    // notify via upnp events over network of new transport state (usefull to update controllers view of remote renderers)
    _pAVTransportImpl->_pLastChange->setStateVar(_instanceId, AvTransportEventedStateVar::TRANSPORT_STATE, val);
    // also notify locally within process of new transport state and stream type (usefull for gui to e.g. bring video window to front)
    std::string streamType = StreamTypeOther;
    if (newTransportState == AvTransportArgument::TRANSPORT_STATE_PLAYING) {
        streamType = getStreamType();
    }
    Poco::NotificationCenter::defaultCenter().postNotification(new StreamTypeNotification(_instanceId, newTransportState, streamType));
}
示例#7
0
const Graphics::Surface *AviDecoder::decodeNextFrame() {
	uint32 nextTag = _fileStream->readUint32BE();

	if (_fileStream->eos())
		return NULL;

	if (_curFrame == -1)
		_startTime = g_system->getMillis();

	if (nextTag == ID_LIST) {
		// A list of audio/video chunks
		uint32 listSize = _fileStream->readUint32LE() - 4;
		int32 startPos = _fileStream->pos();

		if (_fileStream->readUint32BE() != ID_REC)
			error ("Expected 'rec ' LIST");

		// Decode chunks in the list and see if we get a frame
		const Graphics::Surface *frame = NULL;
		while (_fileStream->pos() < startPos + (int32)listSize) {
			const Graphics::Surface *temp = decodeNextFrame();
			if (temp)
				frame = temp;
		}

		return frame;
	} else if (getStreamType(nextTag) == 'wb') {
		// Audio Chunk
		uint32 chunkSize = _fileStream->readUint32LE();
		queueAudioBuffer(chunkSize);
		_fileStream->skip(chunkSize & 1); // Alignment
	} else if (getStreamType(nextTag) == 'dc' || getStreamType(nextTag) == 'id' ||
	           getStreamType(nextTag) == 'AM' || getStreamType(nextTag) == '32' ||
			   getStreamType(nextTag) == 'iv') {
		// Compressed Frame
		_curFrame++;
		uint32 chunkSize = _fileStream->readUint32LE();

		if (chunkSize == 0) // Keep last frame on screen
			return NULL;

		Common::SeekableReadStream *frameData = _fileStream->readStream(chunkSize);
		const Graphics::Surface *surface = _videoCodec->decodeImage(frameData);
		delete frameData;
		_fileStream->skip(chunkSize & 1); // Alignment
		return surface;
	} else if (getStreamType(nextTag) == 'pc') {
		// Palette Change
		_fileStream->readUint32LE(); // Chunk size, not needed here
		byte firstEntry = _fileStream->readByte();
		uint16 numEntries = _fileStream->readByte();
		_fileStream->readUint16LE(); // Reserved

		// 0 entries means all colors are going to be changed
		if (numEntries == 0)
			numEntries = 256;

		for (uint16 i = firstEntry; i < numEntries + firstEntry; i++) {
			_palette[i * 3] = _fileStream->readByte();
			_palette[i * 3 + 1] = _fileStream->readByte();
			_palette[i * 3 + 2] = _fileStream->readByte();
			_fileStream->readByte(); // Flags that don't serve us any purpose
		}

		_dirtyPalette = true;

		// No alignment necessary. It's always even.
	} else if (nextTag == ID_JUNK) {
		runHandle(ID_JUNK);
	} else if (nextTag == ID_IDX1) {
		runHandle(ID_IDX1);
	} else
		error("Tag = \'%s\', %d", tag2str(nextTag), _fileStream->pos());

	return NULL;
}
示例#8
0
bool AVIDecoder::seekIntern(const Audio::Timestamp &time) {
	// Can't seek beyond the end
	if (time > getDuration())
		return false;

	// Track down our video track (optionally audio too).
	// We only support seeking with one track right now.
	AVIVideoTrack *videoTrack = 0;
	AVIAudioTrack *audioTrack = 0;
	int videoIndex = -1;
	int audioIndex = -1;
	uint trackID = 0;

	for (TrackListIterator it = getTrackListBegin(); it != getTrackListEnd(); it++, trackID++) {
		if ((*it)->getTrackType() == Track::kTrackTypeVideo) {
			if (videoTrack) {
				// Already have one
				// -> Not supported
				return false;
			}

			videoTrack = (AVIVideoTrack *)*it;
			videoIndex = trackID;
		} else if ((*it)->getTrackType() == Track::kTrackTypeAudio) {
			if (audioTrack) {
				// Already have one
				// -> Not supported
				return false;
			}

			audioTrack = (AVIAudioTrack *)*it;
			audioIndex = trackID;
		}
	}

	// Need a video track to go forwards
	// If there isn't a video track, why would anyone be using AVI then?
	if (!videoTrack)
		return false;

	// If we seek directly to the end, just mark the tracks as over
	if (time == getDuration()) {
		videoTrack->setCurFrame(videoTrack->getFrameCount() - 1);

		if (audioTrack)
			audioTrack->resetStream();

		return true;
	}

	// Get the frame we should be on at this time
	uint frame = videoTrack->getFrameAtTime(time);

	// Reset any palette, if necessary
	videoTrack->useInitialPalette();

	int lastKeyFrame = -1;
	int frameIndex = -1;
	int lastRecord = -1;
	uint curFrame = 0;

	// Go through and figure out where we should be
	// If there's a palette, we need to find the palette too
	for (uint32 i = 0; i < _indexEntries.size(); i++) {
		const OldIndex &index = _indexEntries[i];

		if (index.id == ID_REC) {
			// Keep track of any records we find
			lastRecord = i;
		} else {
			if (getStreamIndex(index.id) != videoIndex)
				continue;

			uint16 streamType = getStreamType(index.id);

			if (streamType == kStreamTypePaletteChange) {
				// We need to handle any palette change we see since there's no
				// flag to tell if this is a "key" palette.
				// Decode the palette
				_fileStream->seek(_indexEntries[i].offset + 8);
				Common::SeekableReadStream *chunk = 0;

				if (_indexEntries[i].size != 0)
					chunk = _fileStream->readStream(_indexEntries[i].size);

				videoTrack->loadPaletteFromChunk(chunk);
			} else {
				// Check to see if this is a keyframe
				// The first frame has to be a keyframe
				if ((_indexEntries[i].flags & AVIIF_INDEX) || curFrame == 0)
					lastKeyFrame = i;

				// Did we find the target frame?
				if (frame == curFrame) {
					frameIndex = i;
					break;
				}

				curFrame++;
			}
		}
	}

	if (frameIndex < 0) // This shouldn't happen.
		return false;

	if (audioTrack) {
		// We need to find where the start of audio should be.
		// Which is exactly 'initialFrames' audio chunks back from where
		// our found frame is.

		// Recreate the audio stream
		audioTrack->resetStream();

		uint framesNeeded = _header.initialFrames;
		uint startAudioChunk = 0;
		int startAudioSearch = (lastRecord < 0) ? (frameIndex - 1) : (lastRecord - 1);

		for (int i = startAudioSearch; i >= 0; i--) {
			if (getStreamIndex(_indexEntries[i].id) != audioIndex)
				continue;

			assert(getStreamType(_indexEntries[i].id) == kStreamTypeAudio);

			framesNeeded--;

			if (framesNeeded == 0) {
				startAudioChunk = i;
				break;
			}
		}

		// Now go forward and queue them all
		for (int i = startAudioChunk; i <= startAudioSearch; i++) {
			if (_indexEntries[i].id == ID_REC)
				continue;

			if (getStreamIndex(_indexEntries[i].id) != audioIndex)
				continue;

			assert(getStreamType(_indexEntries[i].id) == kStreamTypeAudio);

			_fileStream->seek(_indexEntries[i].offset + 8);
			Common::SeekableReadStream *chunk = _fileStream->readStream(_indexEntries[i].size);
			audioTrack->queueSound(chunk);
		}

		// Skip any audio to bring us to the right time
		audioTrack->skipAudio(time, videoTrack->getFrameTime(frame));
	}

	// Decode from keyFrame to curFrame - 1
	for (int i = lastKeyFrame; i < frameIndex; i++) {
		if (_indexEntries[i].id == ID_REC)
			continue;

		if (getStreamIndex(_indexEntries[i].id) != videoIndex)
			continue;

		uint16 streamType = getStreamType(_indexEntries[i].id);

		// Ignore palettes, they were already handled
		if (streamType == kStreamTypePaletteChange)
			continue;

		// Frame, hopefully
		_fileStream->seek(_indexEntries[i].offset + 8);
		Common::SeekableReadStream *chunk = 0;

		if (_indexEntries[i].size != 0)
			chunk = _fileStream->readStream(_indexEntries[i].size);

		videoTrack->decodeFrame(chunk);
	}

	// Seek to the right spot
	// To the beginning of the last record, or frame if that doesn't exist
	if (lastRecord >= 0)
		_fileStream->seek(_indexEntries[lastRecord].offset);
	else
		_fileStream->seek(_indexEntries[frameIndex].offset);

	videoTrack->setCurFrame((int)frame - 1);

	return true;
}
示例#9
0
void AVIDecoder::readNextPacket() {
	uint32 nextTag = _fileStream->readUint32BE();
	uint32 size = _fileStream->readUint32LE();

	if (_fileStream->eos())
		return;

	if (nextTag == ID_LIST) {
		// A list of audio/video chunks
		int32 startPos = _fileStream->pos();

		if (_fileStream->readUint32BE() != ID_REC)
			error("Expected 'rec ' LIST");

		size -= 4; // subtract list type

		// Decode chunks in the list
		while (_fileStream->pos() < startPos + (int32)size)
			readNextPacket();

		return;
	} else if (nextTag == ID_JUNK || nextTag == ID_IDX1) {
		skipChunk(size);
		return;
	}

	Track *track = getTrack(getStreamIndex(nextTag));

	if (!track)
		error("Cannot get track from tag '%s'", tag2str(nextTag));

	Common::SeekableReadStream *chunk = 0;

	if (size != 0) {
		chunk = _fileStream->readStream(size);
		_fileStream->skip(size & 1);
	}

	if (track->getTrackType() == Track::kTrackTypeAudio) {
		if (getStreamType(nextTag) != MKTAG16('w', 'b'))
			error("Invalid audio track tag '%s'", tag2str(nextTag));

		assert(chunk);
		((AVIAudioTrack *)track)->queueSound(chunk);
	} else {
		AVIVideoTrack *videoTrack = (AVIVideoTrack *)track;

		if (getStreamType(nextTag) == MKTAG16('p', 'c')) {
			// Palette Change
			assert(chunk);
			byte firstEntry = chunk->readByte();
			uint16 numEntries = chunk->readByte();
			chunk->readUint16LE(); // Reserved

			// 0 entries means all colors are going to be changed
			if (numEntries == 0)
				numEntries = 256;

			byte *palette = const_cast<byte *>(videoTrack->getPalette());

			for (uint16 i = firstEntry; i < numEntries + firstEntry; i++) {
				palette[i * 3] = chunk->readByte();
				palette[i * 3 + 1] = chunk->readByte();
				palette[i * 3 + 2] = chunk->readByte();
				chunk->readByte(); // Flags that don't serve us any purpose
			}

			delete chunk;
			videoTrack->markPaletteDirty();
		} else if (getStreamType(nextTag) == MKTAG16('d', 'b')) {
			// TODO: Check if this really is uncompressed. Many videos
			// falsely put compressed data in here.
			error("Uncompressed AVI frame found");
		} else {
			// Otherwise, assume it's a compressed frame
			videoTrack->decodeFrame(chunk);
		}
	}
}
示例#10
0
文件: D2V.cpp 项目: dubhater/D2VWitch
bool D2V::printSettings() {
    int stream_type = getStreamType(f->fctx->iformat->name);

    int video_id = video_stream->id;
    int audio_id = 0;
    int64_t ts_packetsize = 0;
    if (stream_type == TRANSPORT_STREAM) {
        const AVStream *audio_stream = nullptr;
        for (unsigned i = 0; i < f->fctx->nb_streams; i++) {
            if (f->fctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
                audio_stream = f->fctx->streams[i];
                break;
            }
        }

        if (audio_stream)
            audio_id = audio_stream->id;

        if (av_opt_get_int(f->fctx, "ts_packetsize", AV_OPT_SEARCH_CHILDREN, &ts_packetsize) < 0)
            ts_packetsize = 0;
    }

    int mpeg_type = 0;
    if (video_stream->codec->codec_id == AV_CODEC_ID_MPEG1VIDEO)
        mpeg_type = 1;
    else if (video_stream->codec->codec_id == AV_CODEC_ID_MPEG2VIDEO)
        mpeg_type = 2;

    int yuvrgb_scale = input_range == ColourRangeLimited ? 1 : 0;

    int width, height;
    if (av_opt_get_image_size(video_stream->codec, "video_size", 0, &width, &height) < 0)
        width = height = -1;

    AVRational sar;
    if (av_opt_get_q(video_stream->codec, "aspect", 0, &sar) < 0)
        sar = { 1, 1 };
    AVRational dar = av_mul_q(av_make_q(width, height), sar);
    av_reduce(&dar.num, &dar.den, dar.num, dar.den, 1024);

    // No AVOption for framerate?
    AVRational frame_rate = video_stream->codec->framerate;

    std::string settings;

    settings += "Stream_Type=" + std::to_string(stream_type) + "\n";
    if (stream_type == TRANSPORT_STREAM) {
        char pids[100] = { 0 };
        snprintf(pids, 100, "%x,%x,%x", video_id, audio_id, 0);
        settings += "MPEG2_Transport_PID=";
        settings += pids;
        settings += "\n";

        settings += "Transport_Packet_Size=" + std::to_string(ts_packetsize) + "\n";
    }
    settings += "MPEG_Type=" + std::to_string(mpeg_type) + "\n";
    settings += "iDCT_Algorithm=6\n"; // "32-bit SSEMMX (Skal)". No one cares anyway.
    settings += "YUVRGB_Scale=" + std::to_string(yuvrgb_scale) + "\n";
    settings += "Luminance_Filter=0,0\n"; // We don't care.
    settings += "Clipping=0,0,0,0\n"; // We don't crop here.
    settings += "Aspect_Ratio=" + std::to_string(dar.num) + ":" + std::to_string(dar.den) + "\n";
    settings += "Picture_Size=" + std::to_string(width) + "x" + std::to_string(height) + "\n";
    settings += "Field_Operation=0\n"; // Always tell them honor the pulldown flags.
    settings += "Frame_Rate=" + std::to_string((int)((float)frame_rate.num * 1000 / frame_rate.den)) + " (" + std::to_string(frame_rate.num) + "/" + std::to_string(frame_rate.den) + ")\n";
    settings += "Location=0,0,0,0\n"; // Whatever.

    if (fprintf(d2v_file, "%s", settings.c_str()) < 0) {
        error = "Failed to print d2v settings section: fprintf() failed.";
        return false;
    }

    return true;
}
示例#11
0
bool AVIDecoder::seekIntern(const Audio::Timestamp &time) {
	// Can't seek beyond the end
	if (time > getDuration())
		return false;

	// Get our video
	AVIVideoTrack *videoTrack = (AVIVideoTrack *)_videoTracks[0].track;
	uint32 videoIndex = _videoTracks[0].index;

	// If we seek directly to the end, just mark the tracks as over
	if (time == getDuration()) {
		videoTrack->setCurFrame(videoTrack->getFrameCount() - 1);

		for (TrackListIterator it = getTrackListBegin(); it != getTrackListEnd(); it++)
			if ((*it)->getTrackType() == Track::kTrackTypeAudio)
				((AVIAudioTrack *)*it)->resetStream();

		return true;
	}

	// Get the frame we should be on at this time
	uint frame = videoTrack->getFrameAtTime(time);

	// Reset any palette, if necessary
	videoTrack->useInitialPalette();

	int lastKeyFrame = -1;
	int frameIndex = -1;
	uint curFrame = 0;

	// Go through and figure out where we should be
	// If there's a palette, we need to find the palette too
	for (uint32 i = 0; i < _indexEntries.size(); i++) {
		const OldIndex &index = _indexEntries[i];

		// We don't care about RECs
		if (index.id == ID_REC)
			continue;

		// We're only looking at entries for this track
		if (getStreamIndex(index.id) != videoIndex)
			continue;

		uint16 streamType = getStreamType(index.id);

		if (streamType == kStreamTypePaletteChange) {
			// We need to handle any palette change we see since there's no
			// flag to tell if this is a "key" palette.
			// Decode the palette
			_fileStream->seek(_indexEntries[i].offset + 8);
			Common::SeekableReadStream *chunk = 0;

			if (_indexEntries[i].size != 0)
				chunk = _fileStream->readStream(_indexEntries[i].size);

			videoTrack->loadPaletteFromChunk(chunk);
		} else {
			// Check to see if this is a keyframe
			// The first frame has to be a keyframe
			if ((_indexEntries[i].flags & AVIIF_INDEX) || curFrame == 0)
				lastKeyFrame = i;

			// Did we find the target frame?
			if (frame == curFrame) {
				frameIndex = i;
				break;
			}

			curFrame++;
		}
	}

	if (frameIndex < 0) // This shouldn't happen.
		return false;

	// Update all the audio tracks
	for (uint32 i = 0; i < _audioTracks.size(); i++) {
		AVIAudioTrack *audioTrack = (AVIAudioTrack *)_audioTracks[i].track;

		// Recreate the audio stream
		audioTrack->resetStream();

		// Set the chunk index for the track
		audioTrack->setCurChunk(frame);

		uint32 chunksFound = 0;
		for (uint32 j = 0; j < _indexEntries.size(); j++) {
			const OldIndex &index = _indexEntries[j];

			// Continue ignoring RECs
			if (index.id == ID_REC)
				continue;

			if (getStreamIndex(index.id) == _audioTracks[i].index) {
				if (chunksFound == frame) {
					_fileStream->seek(index.offset + 8);
					Common::SeekableReadStream *audioChunk = _fileStream->readStream(index.size);
					audioTrack->queueSound(audioChunk);
					_audioTracks[i].chunkSearchOffset = (j == _indexEntries.size() - 1) ? _movieListEnd : _indexEntries[j + 1].offset;
					break;
				}

				chunksFound++;
			}
		}

		// Skip any audio to bring us to the right time
		audioTrack->skipAudio(time, videoTrack->getFrameTime(frame));
	}

	// Decode from keyFrame to curFrame - 1
	for (int i = lastKeyFrame; i < frameIndex; i++) {
		if (_indexEntries[i].id == ID_REC)
			continue;

		if (getStreamIndex(_indexEntries[i].id) != videoIndex)
			continue;

		uint16 streamType = getStreamType(_indexEntries[i].id);

		// Ignore palettes, they were already handled
		if (streamType == kStreamTypePaletteChange)
			continue;

		// Frame, hopefully
		_fileStream->seek(_indexEntries[i].offset + 8);
		Common::SeekableReadStream *chunk = 0;

		if (_indexEntries[i].size != 0)
			chunk = _fileStream->readStream(_indexEntries[i].size);

		videoTrack->decodeFrame(chunk);
	}

	// Set the video track's frame
	videoTrack->setCurFrame((int)frame - 1);

	// Set the video track's search offset to the right spot
	_videoTracks[0].chunkSearchOffset = _indexEntries[frameIndex].offset;
	return true;
}
示例#12
0
void AVIDecoder::handleNextPacket(TrackStatus &status) {
	// If there's no more to search, bail out
	if (status.chunkSearchOffset + 8 >= _movieListEnd) {
		if (status.track->getTrackType() == Track::kTrackTypeVideo) {
			// Horrible AVI video has a premature end
			// Force the frame to be the last frame
			debug(7, "Forcing end of AVI video");
			((AVIVideoTrack *)status.track)->forceTrackEnd();
		}

		return;
	}

	// See if audio needs to be buffered and break out if not
	if (status.track->getTrackType() == Track::kTrackTypeAudio && !shouldQueueAudio(status))
		return;

	// Seek to where we shall start searching
	_fileStream->seek(status.chunkSearchOffset);

	for (;;) {
		// If there's no more to search, bail out
		if ((uint32)_fileStream->pos() + 8 >= _movieListEnd) {
			if (status.track->getTrackType() == Track::kTrackTypeVideo) {
				// Horrible AVI video has a premature end
				// Force the frame to be the last frame
				debug(7, "Forcing end of AVI video");
				((AVIVideoTrack *)status.track)->forceTrackEnd();
			}

			break;
		}

		uint32 nextTag = _fileStream->readUint32BE();
		uint32 size = _fileStream->readUint32LE();

		if (nextTag == ID_LIST) {
			// A list of audio/video chunks
			if (_fileStream->readUint32BE() != ID_REC)
				error("Expected 'rec ' LIST");

			continue;
		} else if (nextTag == ID_JUNK || nextTag == ID_IDX1) {
			skipChunk(size);
			continue;
		}

		// Only accept chunks for this stream
		uint32 streamIndex = getStreamIndex(nextTag);
		if (streamIndex != status.index) {
			skipChunk(size);
			continue;
		}

		Common::SeekableReadStream *chunk = 0;

		if (size != 0) {
			chunk = _fileStream->readStream(size);
			_fileStream->skip(size & 1);
		}

		if (status.track->getTrackType() == Track::kTrackTypeAudio) {
			if (getStreamType(nextTag) != kStreamTypeAudio)
				error("Invalid audio track tag '%s'", tag2str(nextTag));

			assert(chunk);
			((AVIAudioTrack *)status.track)->queueSound(chunk);

			// Break out if we have enough audio
			if (!shouldQueueAudio(status))
				break;
		} else {
			AVIVideoTrack *videoTrack = (AVIVideoTrack *)status.track;

			if (getStreamType(nextTag) == kStreamTypePaletteChange) {
				// Palette Change
				videoTrack->loadPaletteFromChunk(chunk);
			} else {
				// Otherwise, assume it's a compressed frame
				videoTrack->decodeFrame(chunk);
				break;
			}
		}
	}

	// Start us off in this position next time
	status.chunkSearchOffset = _fileStream->pos();
}
示例#13
0
// ストリームを返す
CStreamData CSCJsonParser::getStreamData(const string& json) const
{
    CStreamData result;
    value u;

    // 文字列が空の場合
    if (json == ""){
        // ストリームはないと判断する
        result.m_type = CStreamData::TYPE_NOSTREAM;
        return result;
    }

    // ストリームの種類を取得
    value v = parseSCJson(json);
    CStreamData::TYPE type = getStreamType(v);

    // ストリームの種類により分岐
    switch (type){
    case CStreamData::TYPE_MESSAGE_ADD: // メッセージの投稿があった

        // メッセージの取得
        u = v.get("message");

        // 値を取得する
        result.m_type = CStreamData::TYPE_MESSAGE_ADD;
        result.m_message.m_id = u.get("id").get<double>();
        result.m_message.m_username = wxString(u.get("user_name").get<string>().c_str(), wxConvUTF8);
        result.m_message.m_body = wxString(u.get("body").get<string>().c_str(), wxConvUTF8);
        result.m_message.m_channel = wxString(u.get("channel_name").get<string>().c_str(), wxConvUTF8);
        result.m_message.m_time = (time_t)u.get("created_at").get<double>();
        if (u.get("temporary_nick")){ // テンポラリニックネーム(あれば)
            result.m_message.m_tempNick = wxString(u.get("temporary_nick").get<string>().c_str(), wxConvUTF8);
        }
        break;

    case CStreamData::TYPE_CHANNEL_MEMBER_ADD: // チャンネルにメンバー追加

        result.m_type = CStreamData::TYPE_CHANNEL_MEMBER_ADD;
        result.m_member.m_name = wxString(v.get("user_name").get<string>().c_str(), wxConvUTF8);
        result.m_channel.m_name = wxString(v.get("channel_name").get<string>().c_str(), wxConvUTF8);
        break;

    case CStreamData::TYPE_CHANNEL_MEMBER_SUB: // チャンネルからメンバー離脱

        result.m_type = CStreamData::TYPE_CHANNEL_MEMBER_SUB;
        result.m_member.m_name = wxString(v.get("user_name").get<string>().c_str(), wxConvUTF8);
        result.m_channel.m_name = wxString(v.get("channel_name").get<string>().c_str(), wxConvUTF8);
        break;

    case CStreamData::TYPE_CHANNEL_UPDATE: // チャンネル情報更新

        u = v.get("channel");

        result.m_type = CStreamData::TYPE_CHANNEL_UPDATE;
        result.m_channel.m_name = wxString(u.get("name").get<string>().c_str(), wxConvUTF8);
        result.m_channel.m_topic = wxString(u.get("topic").get("body").get<string>().c_str(), wxConvUTF8);
        break;

    case CStreamData::TYPE_USER_UPDATE: // ユーザ情報更新

        u =  v.get("user");

        result.m_type = CStreamData::TYPE_USER_UPDATE;
        result.m_member.m_name = wxString(u.get("name").get<string>().c_str(), wxConvUTF8);
        result.m_member.m_nick = wxString(u.get("nick").get<string>().c_str(), wxConvUTF8);
        break;

    default: // 解析不能
        result.m_type = CStreamData::TYPE_UNKNOWN;
        break;
    }

    return result;

}
示例#14
0
void AVIDecoder::readNextPacket() {
	if ((uint32)_fileStream->pos() >= _movieListEnd) {
		// Ugh, reached the end premature.
		forceVideoEnd();
		return;
	}

	uint32 nextTag = _fileStream->readUint32BE();
	uint32 size = _fileStream->readUint32LE();

	if (_fileStream->eos()) {
		// Also premature end.
		forceVideoEnd();
		return;
	}

	if (nextTag == ID_LIST) {
		// A list of audio/video chunks
		int32 startPos = _fileStream->pos();

		if (_fileStream->readUint32BE() != ID_REC)
			error("Expected 'rec ' LIST");

		size -= 4; // subtract list type

		// Decode chunks in the list
		while (_fileStream->pos() < startPos + (int32)size)
			readNextPacket();

		return;
	} else if (nextTag == ID_JUNK || nextTag == ID_IDX1) {
		skipChunk(size);
		return;
	}

	Track *track = getTrack(getStreamIndex(nextTag));

	if (!track)
		error("Cannot get track from tag '%s'", tag2str(nextTag));

	Common::SeekableReadStream *chunk = 0;

	if (size != 0) {
		chunk = _fileStream->readStream(size);
		_fileStream->skip(size & 1);
	}

	if (track->getTrackType() == Track::kTrackTypeAudio) {
		if (getStreamType(nextTag) != kStreamTypeAudio)
			error("Invalid audio track tag '%s'", tag2str(nextTag));

		assert(chunk);
		((AVIAudioTrack *)track)->queueSound(chunk);
	} else {
		AVIVideoTrack *videoTrack = (AVIVideoTrack *)track;

		if (getStreamType(nextTag) == kStreamTypePaletteChange) {
			// Palette Change
			videoTrack->loadPaletteFromChunk(chunk);
		} else {
			// Otherwise, assume it's a compressed frame
			videoTrack->decodeFrame(chunk);
		}
	}
}