// update updates the volume and frequency according to the user's intervention
// and resets the reference time
//
void Audio::update(int now) {

    long dz = 0;
    int delta = now - lastUpdate;

    // joystick input
    int iy = context->get(GF_CT_POSY);
    if (iy)
        dz = (long)(iy * CTR_DISPLACEMENT_FACTOR);

	if (now - lastVolumeChange > SOUND_LATENCY)
	{
		lastVolumeChange = now;

		if (context->pressed(AUD_VOLUME_DEC))
			attenuate(-1);
		if (context->pressed(AUD_VOLUME_INC))
			attenuate(1);

		// drop frequency if AUD_SLOW_DOWN
    if (context->pressed(AUD_SLOW_DOWN)) {
        int newfreq = freq - STEP_FREQUENCY;
        freq = newfreq < MIN_FREQUENCY ? MIN_FREQUENCY : newfreq;
        context->set(GF_AU_FREQ, freq);
    }
    // increase frequency if AUD_SPEED_UP
    else if (context->pressed(AUD_SPEED_UP)) {
        int newfreq = freq + STEP_FREQUENCY;
        freq = newfreq < MAX_FREQUENCY ? MAX_FREQUENCY : newfreq;
        context->set(GF_AU_FREQ, freq);
    }
	}
	

    

    // update the Sound objects
    for (int i = 0; i < noSounds; i++)
		if (sound[i]) 
			sound[i]->update();

	// reset reference time
    lastUpdate = now;
}
void main() {
	float brightness = attenuate(distance(lightPosition, varPosition), 4.0f, 6.0f);
	vec4 textureColor = texture2D(diffuse, texCoordVar);
	gl_FragColor = textureColor * brightness;
	gl_FragColor.a = textureColor.a;
}
示例#3
0
/** 
 * motionMagnify	-	eulerian motion magnification
 *
 */
void VideoProcessor::motionMagnify()
{
    // set filter
    setSpatialFilter(LAPLACIAN);
    setTemporalFilter(IIR);

    // create a temp file
    createTemp();

    // current frame
    cv::Mat input;
    // output frame
    cv::Mat output;

    // motion image
    cv::Mat motion;

    std::vector<cv::Mat> pyramid;
    std::vector<cv::Mat> filtered;

    // if no capture device has been set
    if (!isOpened())
        return;

    // set the modify flag to be true
    modify = true;

    // is processing
    stop = false;

    // save the current position
    long pos = curPos;
    // jump to the first frame
    jumpTo(0);

    while (!isStop()) {

        // read next frame if any
        if (!getNextFrame(input))
            break;

        input.convertTo(input, CV_32FC3, 1.0/255.0f);

        // 1. convert to Lab color space
        cv::cvtColor(input, input, CV_BGR2Lab);

        // 2. spatial filtering one frame
        cv::Mat s = input.clone();
        spatialFilter(s, pyramid);

        // 3. temporal filtering one frame's pyramid
        // and amplify the motion
        if (fnumber == 0){      // is first frame
            lowpass1 = pyramid;
            lowpass2 = pyramid;
            filtered = pyramid;
        } else {
            for (int i=0; i<levels; ++i) {
                curLevel = i;
                temporalFilter(pyramid.at(i), filtered.at(i));
            }

            // amplify each spatial frequency bands
            // according to Figure 6 of paper            
            cv::Size filterSize = filtered.at(0).size();
            int w = filterSize.width;
            int h = filterSize.height;

            delta = lambda_c/8.0/(1.0+alpha);
            // the factor to boost alpha above the bound
            // (for better visualization)
            exaggeration_factor = 2.0;

            // compute the representative wavelength lambda
            // for the lowest spatial frequency band of Laplacian pyramid
            lambda = sqrt(w*w + h*h)/3;  // 3 is experimental constant

            for (int i=levels; i>=0; i--) {
                curLevel = i;

                amplify(filtered.at(i), filtered.at(i));

                // go one level down on pyramid
                // representative lambda will reduce by factor of 2
                lambda /= 2.0;
            }
        }

        // 4. reconstruct motion image from filtered pyramid
        reconImgFromLaplacianPyramid(filtered, levels, motion);

        // 5. attenuate I, Q channels
        attenuate(motion, motion);

        // 6. combine source frame and motion image
        if (fnumber > 0)    // don't amplify first frame
            s += motion;

        // 7. convert back to rgb color space and CV_8UC3
        output = s.clone();
        cv::cvtColor(output, output, CV_Lab2BGR);
        output.convertTo(output, CV_8UC3, 255.0, 1.0/255.0);

        // write the frame to the temp file
        tempWriter.write(output);

        // update process
        std::string msg= "Processing...";
        emit updateProcessProgress(msg, floor((fnumber++) * 100.0 / length));
    }
    if (!isStop()){
        emit revert();
    }
    emit closeProgressDialog();

    // release the temp writer
    tempWriter.release();

    // change the video to the processed video 
    setInput(tempFile);

    // jump back to the original position
    jumpTo(pos);
}