示例#1
0
/**
 * Originally a part of xf86PostMotionEvent; modifies valuators
 * in-place. Retained mostly for embedded scenarios.
 */
void
acceleratePointerLightweight(
    DeviceIntPtr dev,
    ValuatorMask* val,
    CARD32 ignored)
{
    double mult = 0.0, tmpf;
    double dx = 0.0, dy = 0.0;

    if (valuator_mask_isset(val, 0)) {
        dx = valuator_mask_get(val, 0);
    }

    if (valuator_mask_isset(val, 1)) {
        dy = valuator_mask_get(val, 1);
    }

    if (valuator_mask_num_valuators(val) == 0)
        return;

    if (dev->ptrfeed && dev->ptrfeed->ctrl.num) {
        /* modeled from xf86Events.c */
        if (dev->ptrfeed->ctrl.threshold) {
            if ((fabs(dx) + fabs(dy)) >= dev->ptrfeed->ctrl.threshold) {
                if (dx != 0.0) {
                    tmpf = (dx * (double)(dev->ptrfeed->ctrl.num)) /
                           (double)(dev->ptrfeed->ctrl.den);
                    valuator_mask_set_double(val, 0, tmpf);
                }

                if (dy != 0.0) {
                    tmpf = (dy * (double)(dev->ptrfeed->ctrl.num)) /
                           (double)(dev->ptrfeed->ctrl.den);
                    valuator_mask_set_double(val, 1, tmpf);
                }
            }
        }
        else {
	    mult = pow(dx * dx + dy * dy,
                       ((double)(dev->ptrfeed->ctrl.num) /
                        (double)(dev->ptrfeed->ctrl.den) - 1.0) /
                       2.0) / 2.0;
            if (dx != 0.0)
                valuator_mask_set_double(val, 0, mult * dx);
            if (dy != 0.0)
                valuator_mask_set_double(val, 1, mult * dy);
        }
    }
}
/**
 * Originally a part of xf86PostMotionEvent; modifies valuators
 * in-place. Retained mostly for embedded scenarios.
 */
void
acceleratePointerLightweight(
    DeviceIntPtr dev,
    ValuatorMask* val,
    CARD32 ignored)
{
    float mult = 0.0, tmpf;
    int dx = 0, dy = 0, tmpi;

    if (valuator_mask_isset(val, 0)) {
        dx = valuator_mask_get(val, 0);
    }

    if (valuator_mask_isset(val, 1)) {
        dy = valuator_mask_get(val, 1);
    }

    if (!dx && !dy)
        return;

    if (dev->ptrfeed && dev->ptrfeed->ctrl.num) {
        /* modeled from xf86Events.c */
        if (dev->ptrfeed->ctrl.threshold) {
            if ((abs(dx) + abs(dy)) >= dev->ptrfeed->ctrl.threshold) {
                tmpf = ((float)dx *
                        (float)(dev->ptrfeed->ctrl.num)) /
                       (float)(dev->ptrfeed->ctrl.den) +
                       dev->last.remainder[0];
                if (dx) {
                    tmpi = (int) tmpf;
                    valuator_mask_set(val, 0, tmpi);
                    dev->last.remainder[0] = tmpf - (float)tmpi;
                }

                tmpf = ((float)dy *
                        (float)(dev->ptrfeed->ctrl.num)) /
                       (float)(dev->ptrfeed->ctrl.den) +
                       dev->last.remainder[1];
                if (dy) {
                    tmpi = (int) tmpf;
                    valuator_mask_set(val, 1, tmpi);
                    dev->last.remainder[1] = tmpf - (float)tmpi;
                }
            }
        }
        else {
            mult = pow((float)dx * (float)dx + (float)dy * (float)dy,
                       ((float)(dev->ptrfeed->ctrl.num) /
                        (float)(dev->ptrfeed->ctrl.den) - 1.0) /
                       2.0) / 2.0;
            if (dx) {
                tmpf = mult * (float)dx +
                       dev->last.remainder[0];
                tmpi = (int) tmpf;
                valuator_mask_set(val, 0, tmpi);
                dev->last.remainder[0] = tmpf - (float)tmpi;
            }
            if (dy) {
                tmpf = mult * (float)dy +
                       dev->last.remainder[1];
                tmpi = (int)tmpf;
                valuator_mask_set(val, 1, tmpi);
                dev->last.remainder[1] = tmpf - (float)tmpi;
            }
        }
    }
}
/**
 * Modifies valuators in-place.
 * This version employs a velocity approximation algorithm to
 * enable fine-grained predictable acceleration profiles.
 */
void
acceleratePointerPredictable(
    DeviceIntPtr dev,
    ValuatorMask* val,
    CARD32 evtime)
{
    float fdx, fdy, tmp, mult; /* no need to init */
    int dx = 0, dy = 0, tmpi;
    DeviceVelocityPtr velocitydata = GetDevicePredictableAccelData(dev);
    Bool soften = TRUE;

    if (!velocitydata)
        return;

    if (velocitydata->statistics.profile_number == AccelProfileNone &&
        velocitydata->const_acceleration == 1.0f) {
        return; /*we're inactive anyway, so skip the whole thing.*/
    }

    if (valuator_mask_isset(val, 0)) {
        dx = valuator_mask_get(val, 0);
    }

    if (valuator_mask_isset(val, 1)) {
        dy = valuator_mask_get(val, 1);
    }

    if (dx || dy){
        /* reset non-visible state? */
        if (ProcessVelocityData2D(velocitydata, dx , dy, evtime)) {
            soften = FALSE;
        }

        if (dev->ptrfeed && dev->ptrfeed->ctrl.num) {
            /* invoke acceleration profile to determine acceleration */
            mult = ComputeAcceleration (dev, velocitydata,
                                        dev->ptrfeed->ctrl.threshold,
                                        (float)dev->ptrfeed->ctrl.num /
                                            (float)dev->ptrfeed->ctrl.den);

            if(mult != 1.0f || velocitydata->const_acceleration != 1.0f) {
                ApplySofteningAndConstantDeceleration(velocitydata,
                                                      dx, dy,
                                                      &fdx, &fdy,
                                                      (mult > 1.0f) && soften);

                if (dx) {
                    tmp = mult * fdx + dev->last.remainder[0];
                    /* Since it may not be apparent: lrintf() does not offer
                     * strong statements about rounding; however because we
                     * process each axis conditionally, there's no danger
                     * of a toggling remainder. Its lack of guarantees likely
                     * makes it faster on the average target. */
                    tmpi = lrintf(tmp);
                    valuator_mask_set(val, 0, tmpi);
                    dev->last.remainder[0] = tmp - (float)tmpi;
                }
                if (dy) {
                    tmp = mult * fdy + dev->last.remainder[1];
                    tmpi = lrintf(tmp);
                    valuator_mask_set(val, 1, tmpi);
                    dev->last.remainder[1] = tmp - (float)tmpi;
                }
                DebugAccelF("pos (%i | %i) remainders x: %.3f y: %.3f delta x:%.3f y:%.3f\n",
                            *px, *py, dev->last.remainder[0], dev->last.remainder[1], fdx, fdy);
            }
        }
    }
    /* remember last motion delta (for softening/slow movement treatment) */
    velocitydata->last_dx = dx;
    velocitydata->last_dy = dy;
}