void nsSynthVoiceRegistry::Speak(const nsAString& aText, const nsAString& aLang, const nsAString& aUri, const float& aVolume, const float& aRate, const float& aPitch, nsSpeechTask* aTask) { LOG(LogLevel::Debug, ("nsSynthVoiceRegistry::Speak text='%s' lang='%s' uri='%s' rate=%f pitch=%f", NS_ConvertUTF16toUTF8(aText).get(), NS_ConvertUTF16toUTF8(aLang).get(), NS_ConvertUTF16toUTF8(aUri).get(), aRate, aPitch)); VoiceData* voice = FindBestMatch(aUri, aLang); if (!voice) { NS_WARNING("No voices found."); aTask->DispatchError(0, 0); return; } aTask->SetChosenVoiceURI(voice->mUri); LOG(LogLevel::Debug, ("nsSynthVoiceRegistry::Speak - Using voice URI: %s", NS_ConvertUTF16toUTF8(voice->mUri).get())); SpeechServiceType serviceType; DebugOnly<nsresult> rv = voice->mService->GetServiceType(&serviceType); NS_WARN_IF_FALSE(NS_SUCCEEDED(rv), "Failed to get speech service type"); if (serviceType == nsISpeechService::SERVICETYPE_INDIRECT_AUDIO) { aTask->SetIndirectAudio(true); } else { if (!mStream) { mStream = MediaStreamGraph::GetInstance()->CreateTrackUnionStream(nullptr); } aTask->BindStream(mStream); } voice->mService->Speak(aText, voice->mUri, aVolume, aRate, aPitch, aTask); }
const std::list<FeatureMatch> match_features(const cv::Mat &image, const std::list<const Feature*> &features) { QTime a; int i = 0; a.start(); std::list<FeatureMatch> feature_matches; for (const Feature *feature : features) { int found = 0; if (feature->maxCount == 1) { for (const Sprite &sprite : feature->sprites) { i++; const Match m = FindBestMatch(image, MatchTemplate(sprite.img, sprite.mask), CV_TM_CCORR_NORMED); if (m.value > 0) { feature_matches.push_back(FeatureMatch(feature, &sprite, m)); found++; } } if (!found) { qDebug() << "Not found" << feature->humanName; } } else { int found = 0; for (const Sprite &sprite : feature->sprites) { // FIXME: limited maxCount i++; const std::list<Match> matches = FindAllMatches(image, MatchTemplate(sprite.img, sprite.mask), CV_TM_CCORR_NORMED, sprite.detectionThreshold); for (const Match &m : matches) { feature_matches.push_back(FeatureMatch(feature, &sprite, m)); } found += matches.size(); } qDebug() << "Found" << found << "of" << feature->humanName; } } qDebug() << "elapsed" << a.elapsed() << "per search" << a.elapsed() / i; return feature_matches; }
/*===========================================================================* * * BMotionSearchExhaust * * does an exhaustive search for B-frame motion vectors * see BMotionSearch for generic description * * DESCRIPTION: * 1) find best backward and forward vectors * 2) use exhaustive search to find best interpolating vectors * 3) return the best of the 3 choices * *===========================================================================*/ static int BMotionSearchExhaust(LumBlock currentBlock, MpegFrame *prev, MpegFrame *next, int by, int bx, int *fmy, int *fmx, int *bmy, int *bmx, int oldMode) { register int mx, my; int32 diff, bestDiff; int stepSize; LumBlock forwardBlock; int32 forwardErr, backErr; int newbmy, newbmx; int leftMY, leftMX; int rightMY, rightMX; boolean result; /* STEP 1 */ BMotionSearchNoInterp(currentBlock, prev, next, by, bx, fmy, fmx, &forwardErr, bmy, bmx, &backErr, FALSE); if ( forwardErr <= backErr ) { bestDiff = forwardErr; result = MOTION_FORWARD; } else { bestDiff = backErr; result = MOTION_BACKWARD; } /* STEP 2 */ stepSize = (pixelFullSearch ? 2 : 1); COMPUTE_MOTION_BOUNDARY(by,bx,stepSize,leftMY,leftMX,rightMY,rightMX); if ( searchRangeB < rightMY ) { rightMY = searchRangeB; } if ( searchRangeB < rightMX ) { rightMX = searchRangeB; } for ( my = -searchRangeB; my < rightMY; my += stepSize ) { if ( my < leftMY ) { continue; } for ( mx = -searchRangeB; mx < rightMX; mx += stepSize ) { if ( mx < leftMX ) { continue; } ComputeBMotionLumBlock(prev, next, by, bx, MOTION_FORWARD, my, mx, 0, 0, forwardBlock); newbmy = my; newbmx = mx; diff = FindBestMatch(forwardBlock, currentBlock, next, by, bx, &newbmy, &newbmx, bestDiff, searchRangeB); if ( diff < bestDiff ) { *fmy = my; *fmx = mx; *bmy = newbmy; *bmx = newbmx; bestDiff = diff; result = MOTION_INTERPOLATE; } } } return result; }
/*===========================================================================* * * BMotionSearchCross2 * * does a cross-2 search for B-frame motion vectors * see BMotionSearch for generic description * * DESCRIPTION: * 1) find best backward and forward vectors * 2) find best matching interpolating vectors * 3) return the best of the 4 choices * *===========================================================================*/ static int BMotionSearchCross2(LumBlock currentBlock, MpegFrame *prev, MpegFrame *next, int by, int bx, int *fmy, int *fmx, int *bmy, int *bmx, int oldMode) { LumBlock forwardBlock, backBlock; int32 forwardErr, backErr, interpErr; int newfmy, newfmx, newbmy, newbmx; int32 interpErr2; int32 bestErr; /* STEP 1 */ BMotionSearchNoInterp(currentBlock, prev, next, by, bx, fmy, fmx, &forwardErr, bmy, bmx, &backErr, TRUE); bestErr = min(forwardErr, backErr); /* STEP 2 */ ComputeBMotionLumBlock(prev, next, by, bx, MOTION_FORWARD, *fmy, *fmx, 0, 0, forwardBlock); ComputeBMotionLumBlock(prev, next, by, bx, MOTION_BACKWARD, 0, 0, *bmy, *bmx, backBlock); /* try a cross-search; total of 4 local searches */ newbmy = *bmy; newbmx = *bmx; newfmy = *fmy; newfmx = *fmx; interpErr = FindBestMatch(forwardBlock, currentBlock, next, by, bx, &newbmy, &newbmx, bestErr, searchRangeB); bestErr = min(bestErr, interpErr); interpErr2 = FindBestMatch(backBlock, currentBlock, prev, by, bx, &newfmy, &newfmx, bestErr, searchRangeB); /* STEP 3 */ if ( interpErr <= interpErr2 ) { newfmy = *fmy; newfmx = *fmx; } else { newbmy = *bmy; newbmx = *bmx; interpErr = interpErr2; } if ( interpErr <= forwardErr ) { if ( interpErr <= backErr ) { *fmy = newfmy; *fmx = newfmx; *bmy = newbmy; *bmx = newbmx; return MOTION_INTERPOLATE; } else return MOTION_BACKWARD; } else if ( forwardErr <= backErr ) { return MOTION_FORWARD; } else { return MOTION_BACKWARD; } }
EMovementTransitionState CMovementTransitions::Update( const uint8 allowedTransitionFlags, const Lineseg& safeLine, const CTimeValue runningDuration, const bool bHasLockedBodyTarget, const Vec3& playerPos, const SMovementTransitionsSample& oldSample, const SMovementTransitionsSample& newSample, const float entitySpeed2D, const float entitySpeed2DAvg, const SExactPositioningTarget*const pExactPositioningTarget, CMovementTransitionsController*const pController, CPlayer*const pPlayer, CMovementRequest*const pRequest, SActorFrameMovementParams*const pMoveParams, float*const pJukeTurnRateFraction, Vec3*const pBodyTarget, const char**const pBodyTargetType ) const { if (!g_pGame->GetCVars()->g_movementTransitions_enable) return eMTS_None; if (!m_isDataValid) return eMTS_None; Vec3 targetBodyDirection = (*pBodyTarget - playerPos).GetNormalizedSafe(newSample.bodyDirection); const EStance upcomingStance = pController->GetUpcomingStance(); const EStance stance = (upcomingStance == STANCE_NULL) ? pPlayer->GetStance() : upcomingStance; STransitionSelectionParams transParams( *this, *pPlayer, *pRequest, playerPos, oldSample, newSample, bHasLockedBodyTarget, targetBodyDirection, safeLine, runningDuration, allowedTransitionFlags, entitySpeed2D, entitySpeed2DAvg, pExactPositioningTarget, stance, pMoveParams); const STransition* pTransition = NULL; int index = -1; STransitionMatch bestMatch; EMovementTransitionState newState = eMTS_None; if (transParams.m_transitionType != eTT_None) { FindBestMatch(transParams, &pTransition, &index, &bestMatch); if (pTransition) { newState = pTransition->Update(*this, transParams, bestMatch, playerPos, oldSample.moveDirection, newSample.moveDirection, pJukeTurnRateFraction, pBodyTarget, pBodyTargetType, pPlayer, pController); } } #ifndef _RELEASE { bool bSignaled = (newState == eMTS_Requesting_Succeeded); // Log if (g_pGame->GetCVars()->g_movementTransitions_log && pTransition && bSignaled) { CRY_ASSERT(index < (int)m_transitions.size()); CryLog("Transition\tentity=%s\tindex=%i\t%s\t%s", pPlayer->GetEntity()->GetName(), index, pTransition->GetDescription().c_str(), transParams.m_bPredicted?"Predicted":"Immediate"); } // Debug if (g_pGame->GetCVars()->g_movementTransitions_debug) { if (MovementTransitionsDebug::s_debug_frame != gEnv->pRenderer->GetFrameID()) { MovementTransitionsDebug::s_debug_frame = gEnv->pRenderer->GetFrameID(); MovementTransitionsDebug::s_debug_y = 50; } float dist = transParams.m_transitionDistance; if (pRequest->HasMoveTarget()) { dist = sqrtf(pRequest->GetMoveTarget().GetSquaredDistance2D(playerPos)); } gEnv->pRenderer->Draw2dLabel( 8.f, (float)MovementTransitionsDebug::s_debug_y, 1.5f, bSignaled ? MovementTransitionsDebug::s_dbg_color_signaled : MovementTransitionsDebug::s_dbg_color_unsignaled, false, "entity=%s\ttype=%s\tspeed=%s\tdist=%3.2f\tTAngle=%3.2f\tarrivalAngle=%3.2f\ttargTAngle=%3.2f\tjukeAngle=%3.2f\tflags=%d", pPlayer->GetEntity()->GetName(), MovementTransitionsDebug::GetTransitionTypeName(transParams.m_transitionType), MovementTransitionsDebug::GetPseudoSpeedName(transParams.m_pseudoSpeed), dist, RAD2DEG(transParams.m_travelAngle), RAD2DEG(transParams.m_arrivalAngle), RAD2DEG(transParams.m_targetTravelAngle), RAD2DEG(transParams.m_jukeAngle), (const unsigned int)allowedTransitionFlags ); MovementTransitionsDebug::s_debug_y += 14; if (pTransition) { gEnv->pRenderer->Draw2dLabel( 8.f, (float)MovementTransitionsDebug::s_debug_y, 1.5f, bSignaled ? MovementTransitionsDebug::s_dbg_color_signaled : MovementTransitionsDebug::s_dbg_color_unsignaled, false, "Transition\tindex=%i\t%s\tangleDiff=%3.2f", index, pTransition->GetDescription().c_str(), RAD2DEG(bestMatch.angleDifference) ); } MovementTransitionsDebug::s_debug_y += 14; } } #endif return newState; }
void GetRequestTheDM1Way (VideoRequestRecPtr requestRecPtr, GDHandle walkDevice) { AuxDCEHandle myAuxDCEHandle; unsigned long depthMode; unsigned long displayMode; OSErr error; OSErr errorEndOfTimings; short height; short jCount = 0; Boolean modeOk; SpBlock spAuxBlock; SpBlock spBlock; unsigned long switchFlags; VPBlock *vpData; short width; myAuxDCEHandle = (AuxDCEHandle) GetDCtlEntry((**walkDevice).gdRefNum); spBlock.spSlot = (**myAuxDCEHandle).dCtlSlot; spBlock.spID = (**myAuxDCEHandle).dCtlSlotId; spBlock.spExtDev = (**myAuxDCEHandle).dCtlExtDev; spBlock.spHwDev = 0; // we are going to get this pup spBlock.spParamData = 1<<foneslot; // this slot, enabled, and it better be here. spBlock.spTBMask = 3; // don't have constants for this yet errorEndOfTimings = SGetSRsrc(&spBlock); // get the spDrvrHW so we know the ID of this puppy. This is important // since some video cards support more than one display, and the spDrvrHW // ID can, and will, be used to differentiate them. if ( noErr == errorEndOfTimings ) { // reinit the param block for the SGetTypeSRsrc loop, keep the spDrvrHW we just got spBlock.spID = 0; // start at zero, spBlock.spTBMask = 2; // 0b0010 - ignore DrvrSW - why ignore the SW side? Is it not important for video? spBlock.spParamData = (1<<fall) + (1<<foneslot) + (1<<fnext); // 0b0111 - this slot, enabled or disabled, so we even get 640x399 on Blackbird spBlock.spCategory=catDisplay; spBlock.spCType=typeVideo; errorEndOfTimings = SGetTypeSRsrc(&spBlock); // but only on 7.0 systems, not a problem since we require DM1.0 // now, loop through all the timings for this GDevice if ( noErr == errorEndOfTimings ) do { // now, loop through all possible depth modes for this timing mode displayMode = (unsigned char)spBlock.spID; // "timing mode, ie:resource ref number" for (jCount = firstVidMode; jCount<= sixthVidMode; jCount++) { depthMode = jCount; // vid mode error = DMCheckDisplayMode(walkDevice,displayMode,depthMode,&switchFlags,0,&modeOk); // only if the mode is safe or we override it with the kAllValidModesBit request flag if ( noErr == error && modeOk && ( switchFlags & 1<<kNoSwitchConfirmBit || requestRecPtr->requestFlags & 1<<kAllValidModesBit ) ) { // have a good displayMode/depthMode combo - now lets look inside spAuxBlock = spBlock; // don't ruin the iteration spBlock!! spAuxBlock.spID = depthMode; // vid mode error=SFindStruct(&spAuxBlock); // get back a new spsPointer if (noErr == error) // keep going if no errorÉ { spAuxBlock.spID = 0x01; // mVidParams request error=SGetBlock (&spAuxBlock); // use the new spPointer and get back...a NewPtr'ed spResult if (noErr == error) // Ékeep going if no errorÉ { // We have data! lets have a look vpData = (VPBlock*)spAuxBlock.spResult; height = vpData->vpBounds.bottom; // left and top are usually zero width = vpData->vpBounds.right; if (FindBestMatch (requestRecPtr, vpData->vpPixelSize, vpData->vpBounds.right, vpData->vpBounds.bottom)) { requestRecPtr->screenDevice = walkDevice; requestRecPtr->availBitDepth = vpData->vpPixelSize; requestRecPtr->availHorizontal = vpData->vpBounds.right; requestRecPtr->availVertical = vpData->vpBounds.bottom; requestRecPtr->displayMode = displayMode; requestRecPtr->depthMode = depthMode; requestRecPtr->switchInfo.csMode = depthMode; // fill in for completeness requestRecPtr->switchInfo.csData = displayMode; requestRecPtr->switchInfo.csPage = 0; requestRecPtr->switchInfo.csBaseAddr = 0; requestRecPtr->switchInfo.csReserved = 0; if (switchFlags & 1<<kNoSwitchConfirmBit) requestRecPtr->availFlags = 0; // mode safe else requestRecPtr->availFlags = 1<<kModeValidNotSafeBit; // mode valid but not safe, requires user validation of mode switch } if (spAuxBlock.spResult) DisposePtr ((Ptr)spAuxBlock.spResult); // toss this puppy when done } } } } // go around again, looking for timing modes for this GDevice spBlock.spTBMask = 2; // ignore DrvrSW spBlock.spParamData = (1<<fall) + (1<<foneslot) + (1<<fnext); // next resource, this slot, whether enabled or disabled errorEndOfTimings = SGetTypeSRsrc(&spBlock); // and get the next timing mode } while ( noErr == errorEndOfTimings ); // until the end of this GDevice } }
void TextMateAnnotator::Tokenize( const char* docStart, const char* docEnd ) { TextMatePatterns* patterns = m_language.defaultPatterns; std::vector<TextMatePatterns*> patternsStack; uint32_t style = m_defaultStyle; std::vector<uint32_t> styleStack; const char* lineStart = docStart; const char* searchStart = lineStart; const char* lineEnd = std::find( lineStart, docEnd, 0x0A ); bool isFirstSearch = true; while ( searchStart != docEnd ) { TextMateBestMatch match; if ( searchStart != lineEnd ) match = FindBestMatch( lineStart, searchStart, lineEnd, isFirstSearch, patterns->regexes ); if ( match.index >= 0 ) { AddTokensUpTo( lineStart - docStart + match.start, style ); AddTokens( patterns->regexes[match.index], patterns->captures[match.index], patterns->styles[match.index], lineStart - docStart ); searchStart = lineStart + match.start + match.length; if ( match.index == 0 && !patternsStack.empty() ) { style = styleStack.back(); styleStack.pop_back(); patterns = patternsStack.back(); patternsStack.pop_back(); isFirstSearch = true; } else if ( patterns->patterns[match.index] ) { styleStack.push_back( style ); style = patterns->styles[match.index]; patternsStack.push_back( patterns ); patterns = patterns->patterns[match.index]; isFirstSearch = true; } } else { lineStart = (std::min)( lineEnd + 1, docEnd ); searchStart = lineStart; lineEnd = std::find( lineStart, docEnd, 0x0A ); isFirstSearch = true; } } AddTokensUpTo( docEnd - docStart, style ); }
//take input pattern, process it and remember the output //pattern is array of length InputCount containing the input pattern bool SubRegion::FeedForward( unsigned *pattern, bool memorize, unsigned pLowUsageThreshold ) { bool lRetCode = true; // CurrentSequence is the single sequence that is being worked on // Sequence length is always one. CurrentSequence->AddPattern(pattern); if(CurrentSequence->Complete()) { unsigned int lLearnedIndex; double lMatchPrecision; BestMatch lBestMatch; FindBestMatch(*CurrentSequence, lLearnedIndex, lMatchPrecision); lBestMatch.Precision = lMatchPrecision; lBestMatch.Index = lLearnedIndex; if(memorize && lBestMatch.Precision < 1) // potentially treat as a new sequence { int lRetIndex = 0; lRetIndex = MyRegion.AddSequence(*CurrentSequence); if ( -1 == lRetIndex ) { // New at version 1.4 // Memory full so add it to the best place we can find // But only for a reasonably good match // if ( lBestMatch.Precision > 0.95 ) { MyRegion.Memory(lBestMatch.Index).IncFrequency(); } else { int lLowUsageIndex = FindLowUsage(pLowUsageThreshold); if ( lLowUsageIndex != -1 ) { // Replace the memory with the one at the top MyRegion.ForgetMemory( lLowUsageIndex ); // Add the new sequence to the top lRetIndex = MyRegion.AddSequence(*CurrentSequence); if ( -1 == lRetIndex ) { lRetCode = false; lBestMatch.Index = NeoParameters::OutputNone; } else { lBestMatch.Index = (unsigned int) lRetIndex; // this should be the top of memory } } else { // Sequence was not added. lRetCode = false; lBestMatch.Index = NeoParameters::OutputNone; // Would be nice to log - but we do not have logging in this class right now. //std::ostringstream lLogStream; //lLogStream << "Cannot find a low-usage slot for new sequence." << " Memory is full."; //<< std::endl; //cGuiUtils->Log( std::string( lLogStream.str() ) ); } } } else { // Memory already had space for the new sequence (frequency is already set to 1) lBestMatch.Index = (unsigned int) lRetIndex; } } else { if ( memorize && lBestMatch.Precision == 1 ) { // Match precision is 100% MyRegion.Memory(lBestMatch.Index).IncFrequency(); } } NameOutput = lBestMatch.Index; //index of the best match for the new sequence cMemIndex = lBestMatch.Index; //used in Contextual CurrentSequence->Clear(); } return lRetCode; }