Example #1
0
int Viewfinder::qt_metacall(QMetaObject::Call _c, int _id, void **_a)
{
    _id = QWidget::qt_metacall(_c, _id, _a);
    if (_id < 0)
        return _id;
    if (_c == QMetaObject::InvokeMetaMethod) {
        switch (_id) {
        case 0: processFrames((*reinterpret_cast< int(*)>(_a[1]))); break;
        case 1: processFrame(); break;
        case 2: toggleCube(); break;
        case 3: toggleGourd(); break;
        case 4: paintCube(); break;
        case 5: paintGourd(); break;
        case 6: changeX(); break;
        case 7: changeY(); break;
        case 8: changeZ(); break;
        case 9: rotateX(); break;
        case 10: rotateY(); break;
        case 11: rotateZ(); break;
        case 12: plus(); break;
        case 13: minus(); break;
        case 14: openDirectory(); break;
        default: ;
        }
        _id -= 15;
    }
    return _id;
}
Example #2
0
int getFrames(int id, char* fname, callback cb) {

  state s;

  s.id = id;
  s.buffer = NULL;
  s.pCodec = NULL;
  s.pFrame = NULL; 
  s.sws_ctx = NULL;
  s.pCodecCtx = NULL;
  s.pFrameRGB = NULL;
  s.pFormatCtx = NULL;
  s.optionsDict = NULL;

  if (-1 == initState(fname, &s)) {
    return -1;
  }

  processFrames(&s, cb);

  cleanupState(&s);
  return 7;
}
status_t TimestretchBufferProvider::getNextBuffer(
        AudioBufferProvider::Buffer *pBuffer, int64_t pts)
{
    ALOGV("TimestretchBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)",
            this, pBuffer, pBuffer->frameCount, pts);

    // BYPASS
    //return mTrackBufferProvider->getNextBuffer(pBuffer, pts);

    // check if previously processed data is sufficient.
    if (pBuffer->frameCount <= mRemaining) {
        ALOGV("previous sufficient");
        pBuffer->raw = mLocalBufferData;
        return OK;
    }

    // do we need to resize our buffer?
    if (pBuffer->frameCount > mLocalBufferFrameCount) {
        void *newmem;
        if (posix_memalign(&newmem, 32, pBuffer->frameCount * mFrameSize) == OK) {
            if (mRemaining != 0) {
                memcpy(newmem, mLocalBufferData, mRemaining * mFrameSize);
            }
            free(mLocalBufferData);
            mLocalBufferData = newmem;
            mLocalBufferFrameCount = pBuffer->frameCount;
        }
    }

    // need to fetch more data
    const size_t outputDesired = pBuffer->frameCount - mRemaining;
    size_t dstAvailable;
    do {
        mBuffer.frameCount = mPlaybackRate.mSpeed == AUDIO_TIMESTRETCH_SPEED_NORMAL
                ? outputDesired : outputDesired * mPlaybackRate.mSpeed + 1;

        status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer, pts);

        ALOG_ASSERT(res == OK || mBuffer.frameCount == 0);
        if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe.
            ALOGV("upstream provider cannot provide data");
            if (mRemaining == 0) {
                pBuffer->raw = NULL;
                pBuffer->frameCount = 0;
                return res;
            } else { // return partial count
                pBuffer->raw = mLocalBufferData;
                pBuffer->frameCount = mRemaining;
                return OK;
            }
        }

        // time-stretch the data
        dstAvailable = min(mLocalBufferFrameCount - mRemaining, outputDesired);
        size_t srcAvailable = mBuffer.frameCount;
        processFrames((uint8_t*)mLocalBufferData + mRemaining * mFrameSize, &dstAvailable,
                mBuffer.raw, &srcAvailable);

        // release all data consumed
        mBuffer.frameCount = srcAvailable;
        mTrackBufferProvider->releaseBuffer(&mBuffer);
    } while (dstAvailable == 0); // try until we get output data or upstream provider fails.

    // update buffer vars with the actual data processed and return with buffer
    mRemaining += dstAvailable;

    pBuffer->raw = mLocalBufferData;
    pBuffer->frameCount = mRemaining;

    return OK;
}