示例#1
0
CCActionInterval* CCFlash::reverse() {
    return CCFlash::create(getDuration(), m_color);
}
示例#2
0
ostream& HumdrumLine::printXml(ostream& out, int level, const string& indent) {

	if (hasSpines()) {
		out << Convert::repeatString(indent, level) << "<frame";
		out << " n=\"" << getLineIndex() << "\"";
		out << " xml:id=\"" << getXmlId() << "\"";
		out << ">\n";
		level++;

		out << Convert::repeatString(indent, level) << "<frameInfo>\n";
		level++;

		out << Convert::repeatString(indent, level) << "<fieldCount>";
		out << getTokenCount() << "</fieldCount>\n";

		out << Convert::repeatString(indent, level);
		out << "<frameStart";
		out << Convert::getHumNumAttributes(getDurationFromStart());
		out << "/>\n";

		out << Convert::repeatString(indent, level);
		out << "<frameDuration";
		out << Convert::getHumNumAttributes(getDuration());
		out << "/>\n";

		out << Convert::repeatString(indent, level) << "<frameType>";
		if (isData()) {
			out << "data";
		} else if (isBarline()) {
			out << "barline";
		} else if (isInterpretation()) {
			out << "interpretation";
		} else if (isLocalComment()) {
			out << "local-comment";
		}
		out << "</frameType>\n";

		if (isBarline()) {
			// print the duration to the next barline or to the end of the score
			// if there is no barline at the end of the score.
			out << Convert::repeatString(indent, level);
			out << "<barlineDuration";
			out << Convert::getHumNumAttributes(getBarlineDuration());
			out << "/>\n";
		}

		bool bstart = isKernBoundaryStart();
		bool bend   = isKernBoundaryEnd();
		if (bstart || bend) {
			out << Convert::repeatString(indent, level);
			cout << "<kernBoundary";
			cout << " start=\"";
			if (bstart) {
				cout << "true";
			} else {
				cout << "false";
			}
			cout << "\"";
			cout << " end=\"";
			if (bend) {
				cout << "true";
			} else {
				cout << "false";
			}
			cout << "\"";
			cout << "/>\n";
		}

		level--;
		out << Convert::repeatString(indent, level) << "</frameInfo>\n";

		out << Convert::repeatString(indent, level) << "<fields>\n";
		level++;
		for (int i=0; i<getFieldCount(); i++) {
			token(i)->printXml(out, level, indent);
		}
		level--;
		out << Convert::repeatString(indent, level) << "</fields>\n";
		
		level--;
		out << Convert::repeatString(indent, level) << "</frame>\n";

	} else {
		// global comments, reference records, or blank lines print here.
		out << Convert::repeatString(indent, level) << "<metaFrame";
		out << " n=\"" << getLineIndex() << "\"";
		out << " token=\"" << Convert::encodeXml(((string)(*this))) << "\"";
		out << " xml:id=\"" << getXmlId() << "\"";
		out << "/>\n";
		level++;

		out << Convert::repeatString(indent, level) << "<frameInfo>\n";
		level++;

		out << Convert::repeatString(indent, level);
		out << "<startTime";
		out << Convert::getHumNumAttributes(getDurationFromStart());
		out << "/>\n";

		out << Convert::repeatString(indent, level) << "<frameType>";
		if (isReference()) {
			out << "reference";
		} else if (isBlank()) {
			out << "empty";
		} else {
			out << "global-comment";
		}
		out << "</frameType>\n";

		if (isReference()) {
			out << Convert::repeatString(indent, level);
			out << "<referenceKey>" << Convert::encodeXml(getReferenceKey());
			out << "</referenceKey>\n";

			out << Convert::repeatString(indent, level);
			out << "<referenceValue>" << Convert::encodeXml(getReferenceValue());
			out << "</referenceValue>\n";
		}

		level--;
		out << Convert::repeatString(indent, level) << "<frameInfo>\n";


		level--;
		out << Convert::repeatString(indent, level) << "</metaFrame>\n";
	}

	return out;
}
示例#3
0
bool AVIDecoder::seekIntern(const Audio::Timestamp &time) {
	// Can't seek beyond the end
	if (time > getDuration())
		return false;

	// Track down our video track.
	// We only support seeking with one video track right now.
	AVIVideoTrack *videoTrack = 0;
	int videoIndex = -1;
	uint trackID = 0;

	for (TrackListIterator it = getTrackListBegin(); it != getTrackListEnd(); it++, trackID++) {
		if ((*it)->getTrackType() == Track::kTrackTypeVideo) {
			if (videoTrack) {
				// Already have one
				// -> Not supported
				return false;
			}

			videoTrack = (AVIVideoTrack *)*it;
			videoIndex = trackID;
		}
	}

	// Need a video track to go forwards
	// If there isn't a video track, why would anyone be using AVI then?
	if (!videoTrack)
		return false;

	// If we seek directly to the end, just mark the tracks as over
	if (time == getDuration()) {
		videoTrack->setCurFrame(videoTrack->getFrameCount() - 1);

		for (TrackListIterator it = getTrackListBegin(); it != getTrackListEnd(); it++)
			if ((*it)->getTrackType() == Track::kTrackTypeAudio)
				((AVIAudioTrack *)*it)->resetStream();

		return true;
	}

	// Get the frame we should be on at this time
	uint frame = videoTrack->getFrameAtTime(time);

	// Reset any palette, if necessary
	videoTrack->useInitialPalette();

	int lastKeyFrame = -1;
	int frameIndex = -1;
	int lastRecord = -1;
	uint curFrame = 0;

	// Go through and figure out where we should be
	// If there's a palette, we need to find the palette too
	for (uint32 i = 0; i < _indexEntries.size(); i++) {
		const OldIndex &index = _indexEntries[i];

		if (index.id == ID_REC) {
			// Keep track of any records we find
			lastRecord = i;
		} else {
			if (getStreamIndex(index.id) != videoIndex)
				continue;

			uint16 streamType = getStreamType(index.id);

			if (streamType == kStreamTypePaletteChange) {
				// We need to handle any palette change we see since there's no
				// flag to tell if this is a "key" palette.
				// Decode the palette
				_fileStream->seek(_indexEntries[i].offset + 8);
				Common::SeekableReadStream *chunk = 0;

				if (_indexEntries[i].size != 0)
					chunk = _fileStream->readStream(_indexEntries[i].size);

				videoTrack->loadPaletteFromChunk(chunk);
			} else {
				// Check to see if this is a keyframe
				// The first frame has to be a keyframe
				if ((_indexEntries[i].flags & AVIIF_INDEX) || curFrame == 0)
					lastKeyFrame = i;

				// Did we find the target frame?
				if (frame == curFrame) {
					frameIndex = i;
					break;
				}

				curFrame++;
			}
		}
	}

	if (frameIndex < 0) // This shouldn't happen.
		return false;

	// Update all the audio tracks
	uint audioIndex = 0;

	for (TrackListIterator it = getTrackListBegin(); it != getTrackListEnd(); it++, audioIndex++) {
		if ((*it)->getTrackType() != Track::kTrackTypeAudio)
			continue;

		AVIAudioTrack *audioTrack = (AVIAudioTrack *)*it;

		// We need to find where the start of audio should be.
		// Which is exactly 'initialFrames' audio chunks back from where
		// our found frame is.

		// Recreate the audio stream
		audioTrack->resetStream();

		uint framesNeeded = _header.initialFrames;
		uint startAudioChunk = 0;
		int startAudioSearch = (lastRecord < 0) ? (frameIndex - 1) : (lastRecord - 1);

		for (int i = startAudioSearch; i >= 0; i--) {
			if (getStreamIndex(_indexEntries[i].id) != audioIndex)
				continue;

			assert(getStreamType(_indexEntries[i].id) == kStreamTypeAudio);

			framesNeeded--;

			if (framesNeeded == 0) {
				startAudioChunk = i;
				break;
			}
		}

		// Now go forward and queue them all
		for (int i = startAudioChunk; i <= startAudioSearch; i++) {
			if (_indexEntries[i].id == ID_REC)
				continue;

			if (getStreamIndex(_indexEntries[i].id) != audioIndex)
				continue;

			assert(getStreamType(_indexEntries[i].id) == kStreamTypeAudio);

			_fileStream->seek(_indexEntries[i].offset + 8);
			Common::SeekableReadStream *chunk = _fileStream->readStream(_indexEntries[i].size);
			audioTrack->queueSound(chunk);
		}

		// Skip any audio to bring us to the right time
		audioTrack->skipAudio(time, videoTrack->getFrameTime(frame));
	}

	// Decode from keyFrame to curFrame - 1
	for (int i = lastKeyFrame; i < frameIndex; i++) {
		if (_indexEntries[i].id == ID_REC)
			continue;

		if (getStreamIndex(_indexEntries[i].id) != videoIndex)
			continue;

		uint16 streamType = getStreamType(_indexEntries[i].id);

		// Ignore palettes, they were already handled
		if (streamType == kStreamTypePaletteChange)
			continue;

		// Frame, hopefully
		_fileStream->seek(_indexEntries[i].offset + 8);
		Common::SeekableReadStream *chunk = 0;

		if (_indexEntries[i].size != 0)
			chunk = _fileStream->readStream(_indexEntries[i].size);

		videoTrack->decodeFrame(chunk);
	}

	// Seek to the right spot
	// To the beginning of the last record, or frame if that doesn't exist
	if (lastRecord >= 0)
		_fileStream->seek(_indexEntries[lastRecord].offset);
	else
		_fileStream->seek(_indexEntries[frameIndex].offset);

	videoTrack->setCurFrame((int)frame - 1);

	return true;
}
示例#4
0
void Animate3D::update(float t)
{
    if (_target)
    {
        if (_state == Animate3D::Animate3DState::FadeIn && _lastTime > 0.f)
        {
            _accTransTime += (t - _lastTime) * getDuration();
            
            _weight = _accTransTime / _transTime;
            if (_weight >= 1.0f)
            {
                _accTransTime = _transTime;
                _weight = 1.0f;
                _state = Animate3D::Animate3DState::Running;
                s_fadeInAnimates.erase(_target);
                s_runningAnimates[_target] = this;
            }
        }
        else if (_state == Animate3D::Animate3DState::FadeOut && _lastTime > 0.f)
        {
            _accTransTime += (t - _lastTime) * getDuration();
            
            _weight = 1 - _accTransTime / _transTime;
            if (_weight <= 0.0f)
            {
                _accTransTime = _transTime;
                _weight = 0.0f;
                
                s_fadeOutAnimates.erase(_target);
                _target->stopAction(this);
                return;
            }
        }
        float lastTime = _lastTime;
        _lastTime = t;
        
        if (_quality != Animate3DQuality::QUALITY_NONE)
        {
            if (_weight > 0.0f)
            {
                float transDst[3], rotDst[4], scaleDst[3];
                float* trans = nullptr, *rot = nullptr, *scale = nullptr;
                if (_playReverse){
                    t = 1 - t;
                    lastTime = 1.0 - lastTime;
                }
                
                t = _start + t * _last;
                lastTime = _start + lastTime * _last;
                
                for (const auto& it : _boneCurves) {
                    auto bone = it.first;
                    auto curve = it.second;
                    if (curve->translateCurve)
                    {
                        curve->translateCurve->evaluate(t, transDst, _translateEvaluate);
                        trans = &transDst[0];
                    }
                    if (curve->rotCurve)
                    {
                        curve->rotCurve->evaluate(t, rotDst, _roteEvaluate);
                        rot = &rotDst[0];
                    }
                    if (curve->scaleCurve)
                    {
                        curve->scaleCurve->evaluate(t, scaleDst, _scaleEvaluate);
                        scale = &scaleDst[0];
                    }
                    bone->setAnimationValue(trans, rot, scale, this, _weight);
                }
                
                for (const auto& it : _nodeCurves)
                {
                    auto node = it.first;
                    auto curve = it.second;
                    Mat4 transform;
                    if (curve->translateCurve)
                    {
                        curve->translateCurve->evaluate(t, transDst, _translateEvaluate);
                        transform.translate(transDst[0], transDst[1], transDst[2]);
                    }
                    if (curve->rotCurve)
                    {
                        curve->rotCurve->evaluate(t, rotDst, _roteEvaluate);
                        Quaternion qua(rotDst[0], rotDst[1], rotDst[2], rotDst[3]);
                        transform.rotate(qua);
                    }
                    if (curve->scaleCurve)
                    {
                        curve->scaleCurve->evaluate(t, scaleDst, _scaleEvaluate);
                        transform.scale(scaleDst[0], scaleDst[1], scaleDst[2]);
                    }
                    node->setAdditionalTransform(&transform);
                }
                if (!_keyFrameUserInfos.empty()){
                    float prekeyTime = lastTime * getDuration() * _frameRate;
                    float keyTime = t * getDuration() * _frameRate;
                    std::vector<Animate3DDisplayedEventInfo*> eventInfos;
                    for (auto keyFrame : _keyFrameUserInfos)
                    {
                        if ((!_playReverse && keyFrame.first >= prekeyTime && keyFrame.first < keyTime)
                            || (_playReverse && keyFrame.first >= keyTime && keyFrame.first < prekeyTime))
                            {
                                auto& frameEvent = _keyFrameEvent[keyFrame.first];
                                if (frameEvent == nullptr)
                                    frameEvent = new (std::nothrow) EventCustom(Animate3DDisplayedNotification);
                                auto eventInfo = &_displayedEventInfo[keyFrame.first];
                                eventInfo->target = _target;
                                eventInfo->frame = keyFrame.first;
                                eventInfo->userInfo = &_keyFrameUserInfos[keyFrame.first];
                                eventInfos.push_back(eventInfo);
                                frameEvent->setUserData((void*)eventInfo);
                            }
                    }
                    std::sort(eventInfos.begin(), eventInfos.end(), _playReverse ? cmpEventInfoDes : cmpEventInfoAsc);
                    for (auto eventInfo : eventInfos) {
                        Director::getInstance()->getEventDispatcher()->dispatchEvent(_keyFrameEvent[eventInfo->frame]);
                    }
                }
            }
        }
    }
}
// Rendering the PVA_FF_Atom in proper format (bitlengths, etc.) to an ostream.
bool
PVA_FF_TrackHeaderAtom::renderToFileStream(MP4_AUTHOR_FF_FILE_IO_WRAP *fp)
{
    int32 rendered = 0; // Keep track of number of bytes rendered
    uint32 trackID = 0;
    // Render PVA_FF_Atom type and size
    if (!renderAtomBaseMembers(fp))
    {
        return false;
    }
    rendered += getDefaultSize();

    if (!PVA_FF_AtomUtils::render32(fp, getCreationTime()))
    {
        return false;
    }
    if (!PVA_FF_AtomUtils::render32(fp, getModificationTime()))
    {
        return false;
    }

    trackID = getTrackID();


    if (!PVA_FF_AtomUtils::render32(fp, trackID))
    {
        return false;
    }
    rendered += 12;

    if (!PVA_FF_AtomUtils::render32(fp, _reserved1))
    {
        return false;
    }
    rendered += 4;

    /*
     * To ensure that the total track duration includes the duration of the
     * last sample as well, which in our case is same as the last but one.
     */
    //uint32 totalDuration = getDuration() + _deltaTS;
    uint32 totalDuration = getDuration();
    if (!PVA_FF_AtomUtils::render32(fp, totalDuration))
    {
        return false;
    }
    rendered += 4;

    if (!PVA_FF_AtomUtils::render32(fp, _reserved2[0]))
    {
        return false;
    }
    if (!PVA_FF_AtomUtils::render32(fp, _reserved2[1]))
    {
        return false;
    }
    if (!PVA_FF_AtomUtils::render32(fp, _reserved2[2]))
    {
        return false;
    }
    if (!PVA_FF_AtomUtils::render16(fp, _reserved3))
    {
        return false;
    }
    if (!PVA_FF_AtomUtils::render16(fp, _reserved4))
    {
        return false;
    }
    if (!PVA_FF_AtomUtils::render32(fp, _reserved5[0]))
    {
        return false;
    }
    if (!PVA_FF_AtomUtils::render32(fp, _reserved5[1]))
    {
        return false;
    }
    if (!PVA_FF_AtomUtils::render32(fp, _reserved5[2]))
    {
        return false;
    }
    if (!PVA_FF_AtomUtils::render32(fp, _reserved5[3]))
    {
        return false;
    }
    if (!PVA_FF_AtomUtils::render32(fp, _reserved5[4]))
    {
        return false;
    }
    if (!PVA_FF_AtomUtils::render32(fp, _reserved5[5]))
    {
        return false;
    }
    if (!PVA_FF_AtomUtils::render32(fp, _reserved5[6]))
    {
        return false;
    }
    if (!PVA_FF_AtomUtils::render32(fp, _reserved5[7]))
    {
        return false;
    }
    if (!PVA_FF_AtomUtils::render32(fp, _reserved5[8]))
    {
        return false;
    }
    if (!PVA_FF_AtomUtils::render32(fp, _reserved6))
    {
        return false;
    }
    if (!PVA_FF_AtomUtils::render32(fp, _reserved7))
    {
        return false;
    }
    rendered += 60;

    return true;
}
示例#6
0
void TSThread::setTime(F32 time)
{
   setPos(timeScale * time/getDuration());
}
示例#7
0
bool RTSPSource::isLiveStream() {
    int64_t duration = 0;
    getDuration(&duration);
    return duration == 0;
}
double ASplineVec3::getNormalizedTime(double t) const 
{
    return (t / getDuration());
}
 void TextureAtlasAnimation::playFromCenter(double now)
 {
     float t = now / getDuration();
     drawFromCenter(t);
 }
示例#10
0
void EnvDisplay::paint(Graphics &g) {
    int h = getHeight();
    char *rates = pvalues;
    char *levels = pvalues + 4;
    
    double d[4];
    double keyoff = 0.0;
    double release = 0.0;
    d[0] = getDuration(rates[0], levels[3], levels[0]);
    d[1] = getDuration(rates[1], levels[0], levels[1]);
    d[2] = getDuration(rates[2], levels[1], levels[2]);
    
    double ko = 0.0;
    for(int j=0; j<3; ++j)
        ko += d[j];
    if (ko>keyoff)
        keyoff=ko;
    
    d[3] = getDuration(rates[3], levels[2], levels[3]);
    if ( d[3]>release ) {
        release = d[3];
    }
    keyoff += 10.0;
    double w = getWidth() / (keyoff + release);

    g.setColour(Colour(0xF0000000));
    g.fillRoundedRectangle (keyoff*w, 0.0f, (float) getWidth(), (float) getHeight(), 1.0f);
    g.setColour(Colours::white);

    int x, y;
    
    Path p;
    p.startNewSubPath(0, 32);
    
    // 0
    x = 0;
    y = h - h / 99.0 * levels[3];
    p.lineTo(x, y);
    if ( vPos == 0 || vPos == 1 ) {
        g.fillEllipse(x-2, y-2, 4, 4);
    }
    
    // 1
    x = d[0]*w;
    y = h - h / 99.0 * levels[0];
    p.lineTo( x, y );
    if ( vPos == 1 || vPos == 2 ) {
        g.fillEllipse(x-2, y-2, 4, 4);
    }
    
    // 2
    x = (d[0]+d[1])*w;
    y = h - h / 99.0 * levels[1];
    p.lineTo( x, y );
    if ( vPos == 2 || vPos == 3 ) {
        g.fillEllipse(x-2, y-2, 4, 4);
    }
    
    // 3
    x = (d[0]+d[1]+d[2])*w;
    y = h - h / 99.0 * levels[2];
    p.lineTo( x, y );
    if ( vPos == 3 || vPos == 4) {
        g.fillEllipse(x-2, y-2, 4, 4);
    }
    
    // 4
    x = keyoff*w;
    y = h - h / 99.0 * levels[2];
    p.lineTo( x, y );
    if ( vPos == 4 ) {
        g.fillEllipse(x-2, y-2, 4, 4);
    }
    
    // 5
    p.lineTo( (d[0]+d[1]+d[2]+keyoff+d[3])*w, h - h / 99.0 * levels[3] );
    
    p.lineTo(96,32);
    p.lineTo(0, 32);

    g.setColour(DXLookNFeel::fillColour);
    g.fillPath(p);
    
    g.setColour(Colour(0xFFFFFFFF));
    String len;
    len << ((int) vPos);
    g.drawText(len, 5, 1, 72, 14, Justification::left, true);
}
示例#11
0
void ArmRaiseProcess::advance(int delta)
{
    Process::advance(delta);
    if (getElapsedTime() == delta) {
        ProcessList::add(new Animation(Interpolation::Linear, m_arm->armAttitude.pitch, ARM_LOWERED_PITCH, getDuration()));
    }
}
示例#12
0
bool MxmlMeasure::parseMeasure(xml_node mel) {
	bool output = true;
	vector<vector<int> > staffVoiceCounts;
	setStartTimeOfMeasure();

	HumNum starttime = getStartTime();
	HumNum st   = starttime;
	HumNum maxst = starttime;

	xml_node nextel;
	for (auto el = mel.first_child(); el; el = el.next_sibling()) {
		MxmlEvent* event = new MxmlEvent(this);
		m_events.push_back(event);
		nextel = el.next_sibling();
		output &= event->parseEvent(el, nextel, starttime);
		starttime += event->getDuration();
		if (starttime > maxst) {
			maxst = starttime;
		}
	}
	setDuration(maxst - st);

	// Should no longer be needed:
	// calculateDuration();

   bool needdummy = false;

   MxmlMeasure* pmeasure = getPreviousMeasure();
   if (getTimeSigDur() <= 0) {
      if (pmeasure) {
         setTimeSigDur(pmeasure->getTimeSigDur());
      }
   }

   if (getDuration() == 0) {
      if (pmeasure) {
         setDuration(pmeasure->getTimeSigDur());
      } else {
         setTimeSigDur(getTimeSigDur());
      }
      needdummy = true;
   }

	// Maybe check for overfull measures around here

   if (needdummy || getEventCount() == 0) {
      // if the duration of the measure is zero, then set the duration
      // of the measure to the duration of the time signature
      // This is needed for certain cases of multi-measure rests, where no
      // full-measure rest is given in the measure (Sibelius does this).
      setDuration(getTimeSigDur());
		addDummyRest();
   }

   // Neeed to check for empty voice/layers occuring lower in the
   // voice index list than layers which contain notes.  For example
   // if voice/layer 2 contains notes, but voice/layer 1 does not, then
   // a dummy full-measure rest should fill voice/layer 1.  The voice
   // layer 1 should be filled with the duration of the measure according
   // to the other voice/layers in the measure.  This is done later
   // after a voice analysis has been done in
   // musicxml2hum_interface::insertMeasure(), specifically:
	// musicxml2hum_interface::checkForDummyRests().

	sortEvents();

	return output;
}
示例#13
0
文件: item.cpp 项目: cp1337/devland
xmlNodePtr Item::serialize()
{
	xmlNodePtr nodeItem = xmlNewNode(NULL,(const xmlChar*)"item");

	std::stringstream ss;
	ss.str("");
	ss << getID();
	xmlSetProp(nodeItem, (const xmlChar*)"id", (const xmlChar*)ss.str().c_str());

	if(hasSubType()){
		ss.str("");
		ss << (int32_t)getSubType();
		xmlSetProp(nodeItem, (const xmlChar*)"count", (const xmlChar*)ss.str().c_str());
	}

	if(getSpecialDescription() != ""){
		ss.str("");
		ss << getSpecialDescription();
		xmlSetProp(nodeItem, (const xmlChar*)"special_description", (const xmlChar*)ss.str().c_str());
	}


	if(getText() != ""){
		ss.str("");
		ss << getText();
		xmlSetProp(nodeItem, (const xmlChar*)"text", (const xmlChar*)ss.str().c_str());
	}

	if(getWrittenDate() != 0){
		ss.str("");
		ss << getWrittenDate();
		xmlSetProp(nodeItem, (const xmlChar*)"written_date", (const xmlChar*)ss.str().c_str());
	}

	if(getWriter() != ""){
		ss.str("");
		ss << getWriter();
		xmlSetProp(nodeItem, (const xmlChar*)"writer", (const xmlChar*)ss.str().c_str());
	}

	if(!isNotMoveable() /*moveable*/){
		if(getActionId() != 0){
			ss.str("");
			ss << getActionId();
			xmlSetProp(nodeItem, (const xmlChar*)"actionId", (const xmlChar*)ss.str().c_str());
		}
	}

	if(hasAttribute(ATTR_ITEM_DURATION)){
		uint32_t duration = getDuration();
		ss.str("");
		ss << duration;
		xmlSetProp(nodeItem, (const xmlChar*)"duration", (const xmlChar*)ss.str().c_str());
	}

	uint32_t decayState = getDecaying();
	if(decayState == DECAYING_TRUE || decayState == DECAYING_PENDING){
		ss.str("");
		ss << decayState;
		xmlSetProp(nodeItem, (const xmlChar*)"decayState", (const xmlChar*)ss.str().c_str());
	}

	return nodeItem;
}
status_t NuPlayer::RTSPSource::dequeueAccessUnit(
        bool audio, sp<ABuffer> *accessUnit) {
    if (mBuffering) {
        if (!haveSufficientDataOnAllTracks()) {
            return -EWOULDBLOCK;
        }

        mBuffering = false;

        sp<AMessage> notify = dupNotify();
        notify->setInt32("what", kWhatBufferingEnd);
        notify->post();
    }

    sp<AnotherPacketSource> source = getSource(audio);

    if (source == NULL) {
        return -EWOULDBLOCK;
    }

    status_t finalResult;
    if (!source->hasBufferAvailable(&finalResult)) {
        if (finalResult == OK) {
            int64_t mediaDurationUs = 0;
            getDuration(&mediaDurationUs);
            sp<AnotherPacketSource> otherSource = getSource(!audio);
            status_t otherFinalResult;

            // If other source already signaled EOS, this source should also signal EOS
            if (otherSource != NULL &&
                    !otherSource->hasBufferAvailable(&otherFinalResult) &&
                    otherFinalResult == ERROR_END_OF_STREAM) {
                source->signalEOS(ERROR_END_OF_STREAM);
                return ERROR_END_OF_STREAM;
            }

            // If this source has detected near end, give it some time to retrieve more
            // data before signaling EOS
            if (source->isFinished(mediaDurationUs)) {
                int64_t eosTimeout = audio ? mEOSTimeoutAudio : mEOSTimeoutVideo;
                if (eosTimeout == 0) {
                    setEOSTimeout(audio, ALooper::GetNowUs());
                } else if ((ALooper::GetNowUs() - eosTimeout) > kNearEOSTimeoutUs) {
                    setEOSTimeout(audio, 0);
                    source->signalEOS(ERROR_END_OF_STREAM);
                    return ERROR_END_OF_STREAM;
                }
                return -EWOULDBLOCK;
            }

            if (!(otherSource != NULL && otherSource->isFinished(mediaDurationUs))) {
                // We should not enter buffering mode
                // if any of the sources already have detected EOS.
                mBuffering = true;

                sp<AMessage> notify = dupNotify();
                notify->setInt32("what", kWhatBufferingStart);
                notify->post();
            }

            return -EWOULDBLOCK;
        }
        return finalResult;
    }

    setEOSTimeout(audio, 0);

    return source->dequeueAccessUnit(accessUnit);
}
示例#15
0
F32 TSThread::getScaledDuration()
{
   return getDuration() / mFabs(timeScale);
}
 void TextureAtlasAnimation::play(double now, float rx, float ry)
 {
     float t = now / getDuration();
     draw(t, rx, ry);
 }
示例#17
0
void TSThread::advanceTime(F32 delta)
{
   advancePos(timeScale * delta / getDuration());
}
示例#18
0
void Animate3D::update(float t)
{
    if (_target)
    {
        if (_state == Animate3D::Animate3DState::FadeIn && _lastTime > 0.f)
        {
            _accTransTime += (t - _lastTime) * getDuration();
            
            _weight = _accTransTime / _transTime;
            if (_weight >= 1.0f)
            {
                _accTransTime = _transTime;
                _weight = 1.0f;
                _state = Animate3D::Animate3DState::Running;
                s_fadeInAnimates.erase(_target);
                s_runningAnimates[_target] = this;
            }
        }
        else if (_state == Animate3D::Animate3DState::FadeOut && _lastTime > 0.f)
        {
            _accTransTime += (t - _lastTime) * getDuration();
            
            _weight = 1 - _accTransTime / _transTime;
            if (_weight <= 0.0f)
            {
                _accTransTime = _transTime;
                _weight = 0.0f;
                
                s_fadeOutAnimates.erase(_target);
            }
        }
        _lastTime = t;
        
        if (_weight > 0.0f)
        {
            float transDst[3], rotDst[4], scaleDst[3];
            float* trans = nullptr, *rot = nullptr, *scale = nullptr;
            if (_playReverse)
                t = 1 - t;
            
            t = _start + t * _last;
 
            for (const auto& it : _boneCurves) {
                auto bone = it.first;
                auto curve = it.second;
                if (curve->translateCurve)
                {
                    curve->translateCurve->evaluate(t, transDst, _translateEvaluate);
                    trans = &transDst[0];
                }
                if (curve->rotCurve)
                {
                    curve->rotCurve->evaluate(t, rotDst, _roteEvaluate);
                    rot = &rotDst[0];
                }
                if (curve->scaleCurve)
                {
                    curve->scaleCurve->evaluate(t, scaleDst, _scaleEvaluate);
                    scale = &scaleDst[0];
                }
                bone->setAnimationValue(trans, rot, scale, this, _weight);
            }
            
            for (const auto& it : _nodeCurves)
            {
                auto node = it.first;
                auto curve = it.second;
                Mat4 transform;
                if (curve->translateCurve)
                {
                    curve->translateCurve->evaluate(t, transDst, _translateEvaluate);
                    transform.translate(transDst[0], transDst[1], transDst[2]);
                }
                if (curve->rotCurve)
                {
                    curve->rotCurve->evaluate(t, rotDst, _roteEvaluate);
                    Quaternion qua(rotDst[0], rotDst[1], rotDst[2], rotDst[3]);
                    transform.rotate(qua);
                }
                if (curve->scaleCurve)
                {
                    curve->scaleCurve->evaluate(t, scaleDst, _scaleEvaluate);
                    transform.scale(scaleDst[0], scaleDst[1], scaleDst[2]);
                }
                node->setAdditionalTransform(&transform);
            }
        }
    }
}
示例#19
0
CCActionInterval* CCClipIn::reverse() {
    return CCClipOut::create(getDuration(), ccp(-m_direction.x, -m_direction.y));
}
示例#20
0
int main( int argc, char ** argv ) {

	size_t requestNumber = 1000000000;
	int c;	

	while( (c = getopt( argc, argv, "i:" )) != -1 ) {
		switch( c ) {
		case ('i'):
			requestNumber = strtoll( optarg, NULL, 10 );
			break;
		case ('?'):
			if( optopt == 'i' ) {
				fprintf( stderr, "-i requires an iteration argument\n" );
			} else {
				fprintf( stderr, "Unknown option '-%c'.\n", optopt );
			}
			return -1;
		default:
			abort();
		}
	}

	/*if( argc != 2 ) {
		fprintf( stderr, "Too many parameters %d--Takes only iterations.\n", argc - 1 );
		return -1;
	}*/

	const size_t iterations = requestNumber; //strtoll( argv[ 1 ], NULL, 10 );

	enum Tests {
		BRANCH,
		JUMP,
		CONTROL,
		NTESTS
	};

	struct timeval begin;
	struct timeval end;
	struct timeval results[ NTESTS ];

	/* branch test */
	size_t * branchCondition = NULL;
	void * tmpPtr;

	gettimeofday( &begin, NULL );

	for( size_t itr = 0; itr < iterations; itr++ ) {
		if( branchCondition == NULL ) {
			branchCondition = TEST_DO( sizeof( size_t ) );
		}
		tmpPtr = TEST_DO( sizeof( size_t ) );
		TEST_UNDO( tmpPtr );
	}

	gettimeofday( &end, NULL );

	TEST_UNDO( branchCondition );	

	getDuration( &begin, &end, &results[ BRANCH ] );

	/* jump test */
	void * (*mallocPtr)( size_t ) = &TEST_DO;
	void ( * freePtr )( void * ) = &TEST_UNDO;

	gettimeofday( &begin, NULL );

	for( size_t itr = 0; itr < iterations; itr++ ) {
		tmpPtr = mallocPtr( sizeof( size_t ) );
		freePtr( tmpPtr );
	}

	gettimeofday( &end, NULL );

	getDuration( &begin, &end, &results[ JUMP ] );

	/* direct test */
	gettimeofday( &begin, NULL );

	for( size_t itr = 0; itr < iterations; itr++ ) {
		tmpPtr = TEST_DO( sizeof( size_t ) );
		TEST_UNDO( tmpPtr );
	}

	gettimeofday( &end, NULL );

	getDuration( &begin, &end, &results[ CONTROL ] );

	const char * testNames[ NTESTS ] = { "Branch", "Jump", "Control" };

	/* results */
	for( size_t index = 0; index < NTESTS; index++ ) {
		printf( "Test '%s': %llu.%05llu sec for %llu iterations\n", 
			testNames[ index ],
			(unsigned long long) results[ index ].tv_sec, 
			(unsigned long long) results[ index ].tv_usec, 
			(unsigned long long) iterations );
	}

	return 0;
}
void Animate3D::update(float t)
{
    if (_target)
    {
        if (_state == Animate3D::Animate3DState::FadeIn && _lastTime > 0.f)
        {
            _accTransTime += (t - _lastTime) * getDuration();
            
            _weight = _accTransTime / _transTime;
            if (_weight >= 1.0f)
            {
                _accTransTime = _transTime;
                _weight = 1.0f;
                _state = Animate3D::Animate3DState::Running;
                Sprite3D* sprite = static_cast<Sprite3D*>(_target);
                s_fadeInAnimates.erase(sprite);
                s_runningAnimates[sprite] = this;
            }
        }
        else if (_state == Animate3D::Animate3DState::FadeOut && _lastTime > 0.f)
        {
            _accTransTime += (t - _lastTime) * getDuration();
            
            _weight = 1 - _accTransTime / _transTime;
            if (_weight <= 0.0f)
            {
                _accTransTime = _transTime;
                _weight = 0.0f;
                
                Sprite3D* sprite = static_cast<Sprite3D*>(_target);
                s_fadeOutAnimates.erase(sprite);
            }
        }
        _lastTime = t;
        
        if (_weight > 0.0f)
        {
            float transDst[3], rotDst[4], scaleDst[3];
            float* trans = nullptr, *rot = nullptr, *scale = nullptr;
            if (_playReverse)
                t = 1 - t;
            
            t = _start + t * _last;
            for (const auto& it : _boneCurves) {
                auto bone = it.first;
                auto curve = it.second;
                if (curve->translateCurve)
                {
                    curve->translateCurve->evaluate(t, transDst, EvaluateType::INT_LINEAR);
                    trans = &transDst[0];
                }
                if (curve->rotCurve)
                {
                    curve->rotCurve->evaluate(t, rotDst, EvaluateType::INT_QUAT_SLERP);
                    rot = &rotDst[0];
                }
                if (curve->scaleCurve)
                {
                    curve->scaleCurve->evaluate(t, scaleDst, EvaluateType::INT_LINEAR);
                    scale = &scaleDst[0];
                }
                bone->setAnimationValue(trans, rot, scale, this, _weight);
            }
        }
    }
}
	// return 0 to do nothing
	// return 1 to play last_state
	// return 2 to play silence
	int update( _DataType control_bit )
	{
		__DEBUG__NORMAL__ printf( "%u: Updating %s\n", timer_counter, GAUtil::Str<_DataType>::str( control_bit ).c_str() );

		int result = 0;
		bool do_commit = false;
		switch ( control_bit.toggle_type )
		{
		//bool - toggle if( value )
		case AudioBitEncodings::commit:
			do_commit = control_bit.toggle_value;
			if ( do_commit ) __DEBUG__NORMAL__ printf( "Saving current state:\n%s\n", state.toString().c_str() );
			// save our current state for reference; start making changes to our new state
			last_state = state;
			has_previous_state = true;
			break;
		case AudioBitEncodings::key_rotate:
			state.key_index += control_bit.toggle_value ? 1 : -1;
			range( state.key_index, (int) 0, (int) 12, true );
			__DEBUG__NORMAL__ printf( "Updated key_index: %u\n", state.key_index );
			break;
		case AudioBitEncodings::key_flip:
			if ( control_bit.toggle_value ) state.key_is_major = !state.key_is_major;
			__DEBUG__NORMAL__ printf( "Updated key_is_major: %u\n", state.key_is_major );
			break;

			// int - increment by value
		case AudioBitEncodings::duration:
			state.duration_index += control_bit.toggle_value;
			range( state.duration_index, (int) 0, (int) 4 );
			__DEBUG__NORMAL__ printf( "Updated duration_index: %u\n", state.duration_index );
			break;
		case AudioBitEncodings::time:
			state.beat_frequency_index += control_bit.toggle_value;
			range( state.beat_frequency_index, (int) 0, (int) 3 );
			__DEBUG__NORMAL__ printf( "Updated beat_frequency_index: %u\n", state.beat_frequency_index );
			break;
		case AudioBitEncodings::pitch:
			state.pitch_index += control_bit.toggle_value;
			range( state.pitch_index, (int) 21, (int) 68 );
			__DEBUG__NORMAL__ printf( "Updated pitch_index: %u\n", state.pitch_index );
			break;
		}

		if ( timer_enabled ) ++timer_counter;
		if ( timer_counter == timer_max ) timer_enabled = false;

		if ( !has_previous_state ) return 0;

		// commit reached
		if ( do_commit )
		{
			// our counter now contains the number of cycles for which we should play silence
			if ( counting_silence )
			{
				result = 2;
				last_note_duration = timer_counter;
				__DEBUG__NORMAL__ printf( "We should play silence for %u cycles\n", last_note_duration );
			}
			// we interrupted playback of the previous note; our counter shows how many cycles we should play it for
			else
			{
				result = 1;
				last_note_duration = timer_counter;
				__DEBUG__NORMAL__ printf( "Note interrupted. We should play sound for %u cycles\n", last_note_duration );
			}

			// start a timer to count the number of cycles the current note is active
			startTimer( getDuration( state.duration_index ) );
		}
		// we timed out, store how long we were playing the note for and start tracking how long we should play silence
		else if ( !timer_enabled )
		{
			result = 1;
			counting_silence = true;
			last_note_duration = timer_counter;
			__DEBUG__NORMAL__ printf( "Note finished playing normally. We should play sound for %u cycles\n", last_note_duration );
			startTimer();
		}

		return result;
	}
示例#23
0
bool AVIDecoder::seekIntern(const Audio::Timestamp &time) {
	// Can't seek beyond the end
	if (time > getDuration())
		return false;

	// Get our video
	AVIVideoTrack *videoTrack = (AVIVideoTrack *)_videoTracks[0].track;
	uint32 videoIndex = _videoTracks[0].index;

	// If we seek directly to the end, just mark the tracks as over
	if (time == getDuration()) {
		videoTrack->setCurFrame(videoTrack->getFrameCount() - 1);

		for (TrackListIterator it = getTrackListBegin(); it != getTrackListEnd(); it++)
			if ((*it)->getTrackType() == Track::kTrackTypeAudio)
				((AVIAudioTrack *)*it)->resetStream();

		return true;
	}

	// Get the frame we should be on at this time
	uint frame = videoTrack->getFrameAtTime(time);

	// Reset any palette, if necessary
	videoTrack->useInitialPalette();

	int lastKeyFrame = -1;
	int frameIndex = -1;
	uint curFrame = 0;

	// Go through and figure out where we should be
	// If there's a palette, we need to find the palette too
	for (uint32 i = 0; i < _indexEntries.size(); i++) {
		const OldIndex &index = _indexEntries[i];

		// We don't care about RECs
		if (index.id == ID_REC)
			continue;

		// We're only looking at entries for this track
		if (getStreamIndex(index.id) != videoIndex)
			continue;

		uint16 streamType = getStreamType(index.id);

		if (streamType == kStreamTypePaletteChange) {
			// We need to handle any palette change we see since there's no
			// flag to tell if this is a "key" palette.
			// Decode the palette
			_fileStream->seek(_indexEntries[i].offset + 8);
			Common::SeekableReadStream *chunk = 0;

			if (_indexEntries[i].size != 0)
				chunk = _fileStream->readStream(_indexEntries[i].size);

			videoTrack->loadPaletteFromChunk(chunk);
		} else {
			// Check to see if this is a keyframe
			// The first frame has to be a keyframe
			if ((_indexEntries[i].flags & AVIIF_INDEX) || curFrame == 0)
				lastKeyFrame = i;

			// Did we find the target frame?
			if (frame == curFrame) {
				frameIndex = i;
				break;
			}

			curFrame++;
		}
	}

	if (frameIndex < 0) // This shouldn't happen.
		return false;

	// Update all the audio tracks
	for (uint32 i = 0; i < _audioTracks.size(); i++) {
		AVIAudioTrack *audioTrack = (AVIAudioTrack *)_audioTracks[i].track;

		// Recreate the audio stream
		audioTrack->resetStream();

		// Set the chunk index for the track
		audioTrack->setCurChunk(frame);

		uint32 chunksFound = 0;
		for (uint32 j = 0; j < _indexEntries.size(); j++) {
			const OldIndex &index = _indexEntries[j];

			// Continue ignoring RECs
			if (index.id == ID_REC)
				continue;

			if (getStreamIndex(index.id) == _audioTracks[i].index) {
				if (chunksFound == frame) {
					_fileStream->seek(index.offset + 8);
					Common::SeekableReadStream *audioChunk = _fileStream->readStream(index.size);
					audioTrack->queueSound(audioChunk);
					_audioTracks[i].chunkSearchOffset = (j == _indexEntries.size() - 1) ? _movieListEnd : _indexEntries[j + 1].offset;
					break;
				}

				chunksFound++;
			}
		}

		// Skip any audio to bring us to the right time
		audioTrack->skipAudio(time, videoTrack->getFrameTime(frame));
	}

	// Decode from keyFrame to curFrame - 1
	for (int i = lastKeyFrame; i < frameIndex; i++) {
		if (_indexEntries[i].id == ID_REC)
			continue;

		if (getStreamIndex(_indexEntries[i].id) != videoIndex)
			continue;

		uint16 streamType = getStreamType(_indexEntries[i].id);

		// Ignore palettes, they were already handled
		if (streamType == kStreamTypePaletteChange)
			continue;

		// Frame, hopefully
		_fileStream->seek(_indexEntries[i].offset + 8);
		Common::SeekableReadStream *chunk = 0;

		if (_indexEntries[i].size != 0)
			chunk = _fileStream->readStream(_indexEntries[i].size);

		videoTrack->decodeFrame(chunk);
	}

	// Set the video track's frame
	videoTrack->setCurFrame((int)frame - 1);

	// Set the video track's search offset to the right spot
	_videoTracks[0].chunkSearchOffset = _indexEntries[frameIndex].offset;
	return true;
}
/**
 * @brief TimeManipulator::isDuration
 * @param duration
 * @return
 */
bool TimeManipulator::isDuration(std::string duration)
{
  return TimeManipulator::isDuration(getDuration(duration));
}
示例#25
0
status_t BnMediaPlayer::onTransact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    switch(code) {
        case DISCONNECT: {
            CHECK_INTERFACE(IMediaPlayer, data, reply);
            disconnect();
            return NO_ERROR;
        } break;
        case SET_VIDEO_SURFACE: {
            CHECK_INTERFACE(IMediaPlayer, data, reply);
            sp<ISurface> surface = interface_cast<ISurface>(data.readStrongBinder());
            reply->writeInt32(setVideoSurface(surface));
            return NO_ERROR;
        } break;
        case PREPARE_ASYNC: {
            CHECK_INTERFACE(IMediaPlayer, data, reply);
            reply->writeInt32(prepareAsync());
            return NO_ERROR;
        } break;
        case START: {
            CHECK_INTERFACE(IMediaPlayer, data, reply);
            reply->writeInt32(start());
            return NO_ERROR;
        } break;
        case STOP: {
            CHECK_INTERFACE(IMediaPlayer, data, reply);
            reply->writeInt32(stop());
            return NO_ERROR;
        } break;
        case IS_PLAYING: {
            CHECK_INTERFACE(IMediaPlayer, data, reply);
            bool state;
            status_t ret = isPlaying(&state);
            reply->writeInt32(state);
            reply->writeInt32(ret);
            return NO_ERROR;
        } break;
        case PAUSE: {
            CHECK_INTERFACE(IMediaPlayer, data, reply);
            reply->writeInt32(pause());
            return NO_ERROR;
        } break;
        case SEEK_TO: {
            CHECK_INTERFACE(IMediaPlayer, data, reply);
            reply->writeInt32(seekTo(data.readInt32()));
            return NO_ERROR;
        } break;
        case GET_CURRENT_POSITION: {
            CHECK_INTERFACE(IMediaPlayer, data, reply);
            int msec;
            status_t ret = getCurrentPosition(&msec);
            reply->writeInt32(msec);
            reply->writeInt32(ret);
            return NO_ERROR;
        } break;
        case GET_DURATION: {
            CHECK_INTERFACE(IMediaPlayer, data, reply);
            int msec;
            status_t ret = getDuration(&msec);
            reply->writeInt32(msec);
            reply->writeInt32(ret);
            return NO_ERROR;
        } break;
        case RESET: {
            CHECK_INTERFACE(IMediaPlayer, data, reply);
            reply->writeInt32(reset());
            return NO_ERROR;
        } break;
        case SET_AUDIO_STREAM_TYPE: {
            CHECK_INTERFACE(IMediaPlayer, data, reply);
            reply->writeInt32(setAudioStreamType(data.readInt32()));
            return NO_ERROR;
        } break;
        case SET_LOOPING: {
            CHECK_INTERFACE(IMediaPlayer, data, reply);
            reply->writeInt32(setLooping(data.readInt32()));
            return NO_ERROR;
        } break;
        case SET_VOLUME: {
            CHECK_INTERFACE(IMediaPlayer, data, reply);
            reply->writeInt32(setVolume(data.readFloat(), data.readFloat()));
            return NO_ERROR;
        } break;
        case INVOKE: {
            CHECK_INTERFACE(IMediaPlayer, data, reply);
            invoke(data, reply);
            return NO_ERROR;
        } break;
        case SET_METADATA_FILTER: {
            CHECK_INTERFACE(IMediaPlayer, data, reply);
            reply->writeInt32(setMetadataFilter(data));
            return NO_ERROR;
        } break;
        case SUSPEND: {
            CHECK_INTERFACE(IMediaPlayer, data, reply);
            reply->writeInt32(suspend());
            return NO_ERROR;
        } break;
        case RESUME: {
            CHECK_INTERFACE(IMediaPlayer, data, reply);
            reply->writeInt32(resume());
            return NO_ERROR;
        } break;
        case GET_METADATA: {
            CHECK_INTERFACE(IMediaPlayer, data, reply);
            const status_t retcode = getMetadata(data.readInt32(), data.readInt32(), reply);
            reply->setDataPosition(0);
            reply->writeInt32(retcode);
            reply->setDataPosition(0);
            return NO_ERROR;
        } break;
        case SET_AUX_EFFECT_SEND_LEVEL: {
            CHECK_INTERFACE(IMediaPlayer, data, reply);
            reply->writeInt32(setAuxEffectSendLevel(data.readFloat()));
            return NO_ERROR;
        } break;
        case ATTACH_AUX_EFFECT: {
            CHECK_INTERFACE(IMediaPlayer, data, reply);
            reply->writeInt32(attachAuxEffect(data.readInt32()));
            return NO_ERROR;
	}break;
        case SET_PARAMETERS: {
            CHECK_INTERFACE(IMediaPlayer, data, reply);
            reply->writeInt32(setParameters(data.readString8()));
            return NO_ERROR;
        } break;
        default:
            return BBinder::onTransact(code, data, reply, flags);
    }
}
 inline void rise(){
   period = pos;
   fallMark = ((uint32_t)period*getDuration())>>12;
   pos = 0;
   on();
 }
示例#27
0
void
Sound_as::loadSound(const std::string& file, bool streaming)
{
    if (!_mediaHandler || !_soundHandler) {
        log_debug("No media or sound handlers, won't load any sound");
        return;
    }

    /// If we are already streaming stop doing so as we'll replace
    /// the media parser
    if (_inputStream) {
        _soundHandler->unplugInputStream(_inputStream);
        _inputStream = 0;
    }
    
    /// Mark sound as not being loaded
    // TODO: should we check for _soundLoaded == true?
    _soundLoaded = false;

    /// Delete any media parser being used (make sure we have detached!)
    _mediaParser.reset();

    /// Start at offset 0, in case a previous ::start() call
    /// changed that.
    _startTime = 0;

    const RunResources& rr = getRunResources(owner());
    URL url(file, rr.streamProvider().baseURL());

    const RcInitFile& rcfile = RcInitFile::getDefaultInstance();

    const StreamProvider& streamProvider = rr.streamProvider();
    std::auto_ptr<IOChannel> inputStream(streamProvider.getStream(url,
                rcfile.saveStreamingMedia()));

    if (!inputStream.get()) {
        log_error(_("Gnash could not open this URL: %s"), url );
        // dispatch onLoad (false)
        callMethod(&owner(), NSV::PROP_ON_LOAD, false);
        return;
    }

    externalSound = true;
    isStreaming = streaming;

    _mediaParser.reset(_mediaHandler->createMediaParser(inputStream).release());
    if (!_mediaParser) {
        log_error(_("Unable to create parser for Sound at %s"), url);
        // not necessarely correct, the stream might have been found...
        // dispatch onLoad (false)
        callMethod(&owner(), NSV::PROP_ON_LOAD, false);
        return;
    }

    // TODO: use global _soundbuftime
    if (isStreaming) {
        _mediaParser->setBufferTime(60000); // one minute buffer... should be fine
    } else {
        // If this is an event sound, we must not limit buffering (parsing),
        // because onLoad will not be called until we have finished doing so.
        _mediaParser->setBufferTime(std::numeric_limits<boost::uint64_t>::max());
    }

    startProbeTimer();

    owner().set_member(NSV::PROP_DURATION, getDuration());
    owner().set_member(NSV::PROP_POSITION, getPosition());
}
示例#28
0
CCActionInterval* CBJumpBy::reverse() {
    return CBJumpBy::create(getDuration(), -m_delta, m_height, m_nJumps, m_autoHeadOn, 180 - m_initAngle);
}