void BufferQueue::freeAllBuffersLocked() {
    ALOGW_IF(!mQueue.isEmpty(),
            "freeAllBuffersLocked called but mQueue is not empty");
    mQueue.clear();
    mBufferHasBeenQueued = false;
    for (int i = 0; i < NUM_BUFFER_SLOTS; i++) {
        freeBufferLocked(i);
    }
}
bool audio_track_cblk_t::stepServer(size_t stepCount, size_t frameCount, bool isOut)
{
    ALOGV("stepserver %08x %08x %d", user, server, stepCount);

    if (!tryLock()) {
        ALOGW("stepServer() could not lock cblk");
        return false;
    }

    uint32_t s = server;
    bool flushed = (s == user);

    s += stepCount;
    if (isOut) {
        // Mark that we have read the first buffer so that next time stepUser() is called
        // we switch to normal obtainBuffer() timeout period
        if (bufferTimeoutMs == MAX_STARTUP_TIMEOUT_MS) {
            bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS - 1;
        }
        // It is possible that we receive a flush()
        // while the mixer is processing a block: in this case,
        // stepServer() is called After the flush() has reset u & s and
        // we have s > u
        if (flushed) {
            ALOGW("stepServer occurred after track reset");
            s = user;
        }
    }

    if (s >= loopEnd) {
        ALOGW_IF(s > loopEnd, "stepServer: s %u > loopEnd %u", s, loopEnd);
        s = loopStart;
        if (--loopCount == 0) {
            loopEnd = UINT_MAX;
            loopStart = UINT_MAX;
        }
    }

    if (s >= frameCount) {
        // common case, server didn't just wrap
        if (s - frameCount >= serverBase ) {
            serverBase += frameCount;
        }
    } else if (s >= serverBase + frameCount) {
        // server just wrapped
        serverBase += frameCount;
    }

    server = s;

    if (!(flags & CBLK_INVALID)) {
        cv.signal();
    }
    lock.unlock();
    return true;
}
status_t EmulatedQemuCameraDevice::getCurrentPreviewFrame(void* buffer)
{
    ALOGW_IF(mPreviewFrame == NULL, "%s: No preview frame", __FUNCTION__);
    if (mPreviewFrame != NULL) {
        memcpy(buffer, mPreviewFrame, mTotalPixels * 4);
        return 0;
    } else {
        return EmulatedCameraDevice::getCurrentPreviewFrame(buffer);
    }
}
status_t GraphicBufferMapper::unlock(buffer_handle_t handle)
{
    ATRACE_CALL();
    status_t err;

    err = mAllocMod->unlock(mAllocMod, handle);

    ALOGW_IF(err, "unlock(...) failed %d (%s)", err, strerror(-err));
    return err;
}
status_t GraphicBufferMapper::unregisterBuffer(buffer_handle_t handle)
{
    status_t err;

    err = mAllocMod->unregisterBuffer(mAllocMod, handle);

    ALOGW_IF(err, "unregisterBuffer(%p) failed %d (%s)",
            handle, err, strerror(-err));
    return err;
}
Exemplo n.º 6
0
HWComposer::HWComposer(
        const sp<SurfaceFlinger>& flinger,
        EventHandler& handler,
        nsecs_t refreshPeriod)
    : mFlinger(flinger),
      mModule(0), mHwc(0), mList(0), mCapacity(0),
      mNumOVLayers(0), mNumFBLayers(0),
      mDpy(EGL_NO_DISPLAY), mSur(EGL_NO_SURFACE),
      mEventHandler(handler),
      mRefreshPeriod(refreshPeriod),
      mVSyncCount(0), mDebugForceFakeVSync(false)
{
#ifdef OMAP_ENHANCEMENT
    mListExt = NULL;
#endif
    char value[PROPERTY_VALUE_MAX];
    property_get("debug.sf.no_hw_vsync", value, "0");
    mDebugForceFakeVSync = atoi(value);

    bool needVSyncThread = false;
    int err = hw_get_module(HWC_HARDWARE_MODULE_ID, &mModule);
    ALOGW_IF(err, "%s module not found", HWC_HARDWARE_MODULE_ID);
    if (err == 0) {
        err = hwc_open(mModule, &mHwc);
        ALOGE_IF(err, "%s device failed to initialize (%s)",
                HWC_HARDWARE_COMPOSER, strerror(-err));
        if (err == 0) {
            if (mHwc->registerProcs) {
                mCBContext.hwc = this;
                mCBContext.procs.invalidate = &hook_invalidate;
                mCBContext.procs.vsync = &hook_vsync;
#ifdef OMAP_ENHANCEMENT
                mCBContext.procs.extension_cb = &hook_extension_cb;
#endif
                mHwc->registerProcs(mHwc, &mCBContext.procs);
                memset(mCBContext.procs.zero, 0, sizeof(mCBContext.procs.zero));
            }
            if (mHwc->common.version >= HWC_DEVICE_API_VERSION_0_3) {
                if (mDebugForceFakeVSync) {
                    // make sure to turn h/w vsync off in "fake vsync" mode
                    mHwc->methods->eventControl(mHwc, HWC_EVENT_VSYNC, 0);
                }
            } else {
                needVSyncThread = true;
            }
        }
    } else {
        needVSyncThread = true;
    }

    if (needVSyncThread) {
        // we don't have VSYNC support, we need to fake it
        mVSyncThread = new VSyncThread(*this);
    }
}
EGLBoolean egl_display_t::terminate() {

    Mutex::Autolock _l(lock);

    if (refs == 0) {
        /*
         * From the EGL spec (3.2):
         * "Termination of a display that has already been terminated,
         *  (...), is allowed, but the only effect of such a call is
         *  to return EGL_TRUE (...)
         */
        return EGL_TRUE;
    }

    // this is specific to Android, display termination is ref-counted.
    if (refs > 1) {
        refs--;
        return EGL_TRUE;
    }

    EGLBoolean res = EGL_FALSE;
    egl_connection_t* const cnx = &gEGLImpl;
    if (cnx->dso && disp.state == egl_display_t::INITIALIZED) {
        if (cnx->egl.eglTerminate(disp.dpy) == EGL_FALSE) {
            ALOGW("eglTerminate(%p) failed (%s)", disp.dpy,
                    egl_tls_t::egl_strerror(cnx->egl.eglGetError()));
        }
        // REVISIT: it's unclear what to do if eglTerminate() fails
        disp.state = egl_display_t::TERMINATED;
        res = EGL_TRUE;
    }

    mHibernation.setDisplayValid(false);

    // Reset the extension string since it will be regenerated if we get
    // reinitialized.
    mExtensionString.setTo("");

    // Mark all objects remaining in the list as terminated, unless
    // there are no reference to them, it which case, we're free to
    // delete them.
    size_t count = objects.size();
    ALOGW_IF(count, "eglTerminate() called w/ %d objects remaining", count);
    for (size_t i=0 ; i<count ; i++) {
        egl_object_t* o = objects.itemAt(i);
        o->destroy();
    }

    // this marks all object handles are "terminated"
    objects.clear();

    refs--;
    return res;
}
status_t GraphicBufferMapper::lock(buffer_handle_t handle, 
        int usage, const Rect& bounds, void** vaddr)
{
    status_t err;

    err = mAllocMod->lock(mAllocMod, handle, usage,
            bounds.left, bounds.top, bounds.width(), bounds.height(),
            vaddr);

    ALOGW_IF(err, "lock(...) failed %d (%s)", err, strerror(-err));
    return err;
}
Exemplo n.º 9
0
status_t GraphicBufferAllocator::alloc(uint32_t w, uint32_t h, PixelFormat format,
        int usage, buffer_handle_t* handle, int32_t* stride)
{
    ATRACE_CALL();
    // make sure to not allocate a N x 0 or 0 x N buffer, since this is
    // allowed from an API stand-point allocate a 1x1 buffer instead.
    if (!w || !h)
        w = h = 1;

    // we have a h/w allocator and h/w buffer is requested
    status_t err;

    // If too many async frees are queued up then wait for some of them to
    // complete before attempting to allocate more memory.  This is exercised
    // by the android.opengl.cts.GLSurfaceViewTest CTS test.
    BufferLiberatorThread::maybeWaitForLiberation();

    err = mAllocDev->alloc(mAllocDev, w, h, format, usage, handle, stride);

    if (err != NO_ERROR) {
        ALOGW("WOW! gralloc alloc failed, waiting for pending frees!");
        BufferLiberatorThread::waitForLiberation();
        err = mAllocDev->alloc(mAllocDev, w, h, format, usage, handle, stride);
    }

    ALOGW_IF(err, "alloc(%u, %u, %d, %08x, ...) failed %d (%s)",
            w, h, format, usage, err, strerror(-err));

    if (err == NO_ERROR) {
        Mutex::Autolock _l(sLock);
        KeyedVector<buffer_handle_t, alloc_rec_t>& list(sAllocList);
        int bpp = bytesPerPixel(format);
        if (bpp < 0) {
            // probably a HAL custom format. in any case, we don't know
            // what its pixel size is.
            bpp = 0;
        }
        alloc_rec_t rec;
        rec.w = w;
        rec.h = h;
        rec.s = *stride;
        rec.format = format;
        rec.usage = usage;
        rec.size = h * stride[0] * bpp;
        // [MTK] {{{
        rec.pid = IPCThreadState::self()->getCallingPid();
        // [MTK] }}}
        list.add(*handle, rec);
    }

    return err;
}
Exemplo n.º 10
0
static void *loadExtractorPlugin() {
    void *ret = NULL;
    char lib[PROPERTY_VALUE_MAX];
    if (property_get("media.stagefright.extractor-plugin", lib, "libFFmpegExtractor.so")) {
        if (void *extractorLib = ::dlopen(lib, RTLD_LAZY)) {
            ret = ::dlsym(extractorLib, "getExtractorPlugin");
            ALOGW_IF(!ret, "Failed to find symbol, dlerror: %s", ::dlerror());
        } else {
            ALOGV("Failed to load %s, dlerror: %s", lib, ::dlerror());
        }
    }
    return ret;
}
EmulatedCameraDevice::WorkerThread::SelectRes
EmulatedCameraDevice::WorkerThread::Select(int fd, int timeout)
{
    fd_set fds[1];
    struct timeval tv, *tvp = NULL;

    const int fd_num = (fd >= 0) ? max(fd, mControlFD) + 1 :
                                   mControlFD + 1;
    FD_ZERO(fds);
    FD_SET(mControlFD, fds);
    if (fd >= 0) {
        FD_SET(fd, fds);
    }
    if (timeout) {
        tv.tv_sec = timeout / 1000000;
        tv.tv_usec = timeout % 1000000;
        tvp = &tv;
    }
    int res = TEMP_FAILURE_RETRY(select(fd_num, fds, NULL, NULL, tvp));
    if (res < 0) {
        ALOGE("%s: select returned %d and failed: %d -> %s",
             __FUNCTION__, res, errno, strerror(errno));
        return ERROR;
    } else if (res == 0) {
        /* Timeout. */
        return TIMEOUT;
    } else if (FD_ISSET(mControlFD, fds)) {
        /* A control event. Lets read the message. */
        ControlMessage msg;
        res = TEMP_FAILURE_RETRY(read(mControlFD, &msg, sizeof(msg)));
        if (res != sizeof(msg)) {
            ALOGE("%s: Unexpected message size %d, or an error %d -> %s",
                 __FUNCTION__, res, errno, strerror(errno));
            return ERROR;
        }
        /* THREAD_STOP is the only message expected here. */
        if (msg == THREAD_STOP) {
            ALOGV("%s: THREAD_STOP message is received", __FUNCTION__);
            return EXIT_THREAD;
        } else {
            ALOGE("Unknown worker thread message %d", msg);
            return ERROR;
        }
    } else {
        /* Must be an FD. */
        ALOGW_IF(fd < 0 || !FD_ISSET(fd, fds), "%s: Undefined 'select' result",
                __FUNCTION__);
        return READY;
    }
}
Exemplo n.º 12
0
Allocation::~Allocation()
{
    size_t freedOffset = getOffset();
    size_t freedSize   = getSize();
    if (freedSize) {
        /* NOTE: it's VERY important to not free allocations of size 0 because
         * they're special as they don't have any record in the allocator
         * and could alias some real allocation (their offset is zero). */

        // keep the size to unmap in excess
        size_t pagesize = getpagesize();
        size_t start = freedOffset;
        size_t end = start + freedSize;
        start &= ~(pagesize-1);
        end = (end + pagesize-1) & ~(pagesize-1);

        // give back to the kernel the pages we don't need
        size_t free_start = freedOffset;
        size_t free_end = free_start + freedSize;
        if (start < free_start)
            start = free_start;
        if (end > free_end)
            end = free_end;
        start = (start + pagesize-1) & ~(pagesize-1);
        end &= ~(pagesize-1);

        if (start < end) {
            void* const start_ptr = (void*)(intptr_t(getHeap()->base()) + start);
            size_t size = end-start;

#ifndef NDEBUG
            memset(start_ptr, 0xdf, size);
#endif

            // MADV_REMOVE is not defined on Dapper based Goobuntu
#ifdef MADV_REMOVE
            if (size) {
                int err = madvise(start_ptr, size, MADV_REMOVE);
                ALOGW_IF(err, "madvise(%p, %u, MADV_REMOVE) returned %s",
                        start_ptr, size, err<0 ? strerror(errno) : "Ok");
            }
#endif
        }

        // This should be done after madvise(MADV_REMOVE), otherwise madvise()
        // might kick out the memory region that's allocated and/or written
        // right after the deallocation.
        mDealer->deallocate(freedOffset);
    }
}
NuPlayer::DecoderPassThrough::DecoderPassThrough(
        const sp<AMessage> &notify,
        const sp<Source> &source,
        const sp<Renderer> &renderer)
    : DecoderBase(notify),
      mSource(source),
      mRenderer(renderer),
      mSkipRenderingUntilMediaTimeUs(-1ll),
      mReachedEOS(true),
      mPendingAudioErr(OK),
      mPendingBuffersToDrain(0),
      mCachedBytes(0),
      mComponentName("pass through decoder") {
    ALOGW_IF(renderer == NULL, "expect a non-NULL renderer");
}
status_t GraphicBufferAllocator::free(buffer_handle_t handle)
{
    ATRACE_CALL();
    status_t err;

    err = mAllocDev->free(mAllocDev, handle);

    ALOGW_IF(err, "free(...) failed %d (%s)", err, strerror(-err));
    if (err == NO_ERROR) {
        Mutex::Autolock _l(sLock);
        KeyedVector<buffer_handle_t, alloc_rec_t>& list(sAllocList);
        list.removeItem(handle);
    }

    return err;
}
status_t GraphicBufferAllocator::alloc(uint32_t w, uint32_t h, PixelFormat format,
        int usage, buffer_handle_t* handle, int32_t* stride)
{
    ATRACE_CALL();
    // make sure to not allocate a N x 0 or 0 x N buffer, since this is
    // allowed from an API stand-point allocate a 1x1 buffer instead.
    if (!w || !h)
        w = h = 1;

    // we have a h/w allocator and h/w buffer is requested
    status_t err; 

#ifdef EXYNOS4_ENHANCEMENTS
    if ((format == 0x101) || (format == 0x105) || (format == 0x107)) {
        // 0x101 = HAL_PIXEL_FORMAT_YCbCr_420_P (Samsung-specific pixel format)
        // 0x105 = HAL_PIXEL_FORMAT_YCbCr_420_SP (Samsung-specific pixel format)
        // 0x107 = HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED (Samsung-specific pixel format)
        usage |= GRALLOC_USAGE_HW_FIMC1; // Exynos HWC wants FIMC-friendly memory allocation
    }
#endif

    err = mAllocDev->alloc(mAllocDev, w, h, format, usage, handle, stride);

    ALOGW_IF(err, "alloc(%u, %u, %d, %08x, ...) failed %d (%s)",
            w, h, format, usage, err, strerror(-err));
    
    if (err == NO_ERROR) {
        Mutex::Autolock _l(sLock);
        KeyedVector<buffer_handle_t, alloc_rec_t>& list(sAllocList);
        int bpp = bytesPerPixel(format);
        if (bpp < 0) {
            // probably a HAL custom format. in any case, we don't know
            // what its pixel size is.
            bpp = 0;
        }
        alloc_rec_t rec;
        rec.w = w;
        rec.h = h;
        rec.s = *stride;
        rec.format = format;
        rec.usage = usage;
        rec.size = h * stride[0] * bpp;
        list.add(*handle, rec);
    }

    return err;
}
Exemplo n.º 16
0
status_t GraphicBufferAllocator::alloc(uint32_t w, uint32_t h, PixelFormat format,
        int usage, buffer_handle_t* handle, int32_t* stride)
{
    ATRACE_CALL();
    // make sure to not allocate a N x 0 or 0 x N buffer, since this is
    // allowed from an API stand-point allocate a 1x1 buffer instead.
    if (!w || !h)
        w = h = 1;

    // we have a h/w allocator and h/w buffer is requested
    status_t err; 
    
    err = mAllocDev->alloc(mAllocDev, w, h, format, usage, handle, stride);

    ALOGW_IF(err, "alloc(%u, %u, %d, %08x, ...) failed %d (%s)",
            w, h, format, usage, err, strerror(-err));
    
    if (err == NO_ERROR) {
        Mutex::Autolock _l(sLock);
        KeyedVector<buffer_handle_t, alloc_rec_t>& list(sAllocList);
        int bpp = bytesPerPixel(format);
        if (bpp < 0) {
            // probably a HAL custom format. in any case, we don't know
            // what its pixel size is.
            bpp = 0;
        }
        alloc_rec_t rec;
        rec.w = w;
        rec.h = h;
        rec.s = *stride;
        rec.format = format;
        rec.usage = usage;
        rec.size = h * stride[0] * bpp;
        list.add(*handle, rec);
    }

#ifdef MTK_AOSP_ENHANCEMENT 
    // dump call stack here after handle value got
    if (true == mIsDumpCallStack) {
        ALOGD("[GraphicBufferAllocator::alloc] handle:%p", *handle);
        CallStack stack("    ");
    }
#endif

    return err;
}
status_t EmulatedCameraDevice::WorkerThread::readyToRun()
{
    ALOGV("Starting emulated camera device worker thread...");

    ALOGW_IF(mThreadControl >= 0 || mControlFD >= 0,
            "%s: Thread control FDs are opened", __FUNCTION__);
    /* Create a pair of FDs that would be used to control the thread. */
    int thread_fds[2];
    if (pipe(thread_fds) == 0) {
        mThreadControl = thread_fds[1];
        mControlFD = thread_fds[0];
        ALOGV("Emulated device's worker thread has been started.");
        return NO_ERROR;
    } else {
        ALOGE("%s: Unable to create thread control FDs: %d -> %s",
             __FUNCTION__, errno, strerror(errno));
        return errno;
    }
}
status_t GraphicBufferMapper::unregisterBuffer(buffer_handle_t handle)
{
    // [MTK] {{{
    if (true == mIsDumpCallStack) {
        XLOGD("[GraphicBufferMapper::unregisterBuffer] handle:%p", handle);
        CallStack cs;
        cs.update();
        cs.dump("    ");
    }    
    // [MTK] }}}

    ATRACE_CALL();
    status_t err;

    err = mAllocMod->unregisterBuffer(mAllocMod, handle);

    ALOGW_IF(err, "unregisterBuffer(%p) failed %d (%s)",
            handle, err, strerror(-err));
    return err;
}
Exemplo n.º 19
0
HWComposer::HWComposer(const sp<SurfaceFlinger>& flinger)
    : mFlinger(flinger),
      mModule(0), mHwc(0), mList(0), mCapacity(0),
      mNumOVLayers(0), mNumFBLayers(0),
      mDpy(EGL_NO_DISPLAY), mSur(EGL_NO_SURFACE)
{
    int err = hw_get_module(HWC_HARDWARE_MODULE_ID, &mModule);
    ALOGW_IF(err, "%s module not found", HWC_HARDWARE_MODULE_ID);
    if (err == 0) {
        err = hwc_open(mModule, &mHwc);
        ALOGE_IF(err, "%s device failed to initialize (%s)",
                HWC_HARDWARE_COMPOSER, strerror(-err));
        if (err == 0) {
            if (mHwc->registerProcs) {
                mCBContext.hwc = this;
                mCBContext.procs.invalidate = &hook_invalidate;
                mHwc->registerProcs(mHwc, &mCBContext.procs);
            }
        }
    }
}
OMX_U8* OMXVideoDecoderAVCSecure::MemAllocDataBuffer(OMX_U32 nSizeBytes) {

    ALOGW_IF(nSizeBytes != INPORT_BUFFER_SIZE,
        "%s: size of memory to allocate is %" PRIu32 ", but will allocate %zu",
        __FUNCTION__, nSizeBytes, sizeof(ProtectedDataBuffer));
    
    if (mNumInportBuffers >= INPORT_ACTUAL_BUFFER_COUNT)
    {
        ALOGE("%s: cannot allocate buffer: number of inport buffers is %u, which is already at maximum",
            __FUNCTION__, mNumInportBuffers);
        return NULL;
    }


    int fd = ashmem_create_region("protectd-content-buffer", sizeof(ProtectedDataBuffer));
    if(fd < 0) {
        ALOGE("Unable to create ashmem region");
        return NULL;
    }

    native_handle_t *native = native_handle_create(1, 2);

    native->data[0] = fd;
    ProtectedDataBuffer *pBuffer =(ProtectedDataBuffer *) mmap(NULL, sizeof(ProtectedDataBuffer), PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
    if (pBuffer == MAP_FAILED) {
        ALOGE("%s: mmap failed", __FUNCTION__);
        return NULL;
    }
    native->data[1] = (int) pBuffer;
    // Use a random value as the buffer id
    native->data[2] = rand();
    ++mNumInportBuffers;

    Init_ProtectedDataBuffer(pBuffer);
    
    pBuffer->size = INPORT_BUFFER_SIZE;

    ALOGV("Allocating native=[%p] buffer = %#x, data = %#x data_end=  %#x size=%d",(OMX_U8 *)native,(uint32_t)pBuffer, (uint32_t)pBuffer->data, (uint32_t)pBuffer->data + sizeof(ProtectedDataBuffer) ,sizeof(ProtectedDataBuffer));
    return (OMX_U8 *) native;
}
Exemplo n.º 21
0
status_t GraphicBufferAllocator::free(buffer_handle_t handle)
{
#ifdef MTK_AOSP_ENHANCEMENT 
    if (true == mIsDumpCallStack) {
        ALOGD("[GraphicBufferAllocator::free] handle:%p", handle);
        CallStack stack("    ");
    }
#endif

    ATRACE_CALL();
    status_t err;

    err = mAllocDev->free(mAllocDev, handle);

    ALOGW_IF(err, "free(...) failed %d (%s)", err, strerror(-err));
    if (err == NO_ERROR) {
        Mutex::Autolock _l(sLock);
        KeyedVector<buffer_handle_t, alloc_rec_t>& list(sAllocList);
        list.removeItem(handle);
    }

    return err;
}
// outputs a number of frames less or equal to *outFrameCount and updates *outFrameCount
// with the actual number of frames produced.
int resampler_resample_from_provider(struct resampler_itfe *resampler,
                       int16_t *out,
                       size_t *outFrameCount)
{
    struct resampler *rsmp = (struct resampler *)resampler;

    if (rsmp == NULL || out == NULL || outFrameCount == NULL) {
        return -EINVAL;
    }
    if (rsmp->provider == NULL) {
        *outFrameCount = 0;
        return -ENOSYS;
    }

    size_t framesRq = *outFrameCount;
    // update and cache the number of frames needed at the input sampling rate to produce
    // the number of frames requested at the output sampling rate
    if (framesRq != rsmp->frames_rq) {
        rsmp->frames_needed = (framesRq * rsmp->in_sample_rate) / rsmp->out_sample_rate + 1;
        rsmp->frames_rq = framesRq;
    }

    size_t framesWr = 0;
    spx_uint32_t inFrames = 0;
    while (framesWr < framesRq) {
        if (rsmp->frames_in < rsmp->frames_needed) {
            // make sure that the number of frames present in rsmp->in_buf (rsmp->frames_in) is at
            // least the number of frames needed to produce the number of frames requested at
            // the output sampling rate
            if (rsmp->in_buf_size < rsmp->frames_needed) {
                rsmp->in_buf_size = rsmp->frames_needed;
                rsmp->in_buf = (int16_t *)realloc(rsmp->in_buf,
                                        rsmp->in_buf_size * rsmp->channel_count * sizeof(int16_t));
            }
            struct resampler_buffer buf;
            buf.frame_count = rsmp->frames_needed - rsmp->frames_in;
            rsmp->provider->get_next_buffer(rsmp->provider, &buf);
            if (buf.raw == NULL) {
                break;
            }
            memcpy(rsmp->in_buf + rsmp->frames_in * rsmp->channel_count,
                    buf.raw,
                    buf.frame_count * rsmp->channel_count * sizeof(int16_t));
            rsmp->frames_in += buf.frame_count;
            rsmp->provider->release_buffer(rsmp->provider, &buf);
        }

        spx_uint32_t outFrames = framesRq - framesWr;
        inFrames = rsmp->frames_in;
        if (rsmp->channel_count == 1) {
            speex_resampler_process_int(rsmp->speex_resampler,
                                        0,
                                        rsmp->in_buf,
                                        &inFrames,
                                        out + framesWr,
                                        &outFrames);
        } else {
            speex_resampler_process_interleaved_int(rsmp->speex_resampler,
                                        rsmp->in_buf,
                                        &inFrames,
                                        out + framesWr * rsmp->channel_count,
                                        &outFrames);
        }
        framesWr += outFrames;
        rsmp->frames_in -= inFrames;
        ALOGW_IF((framesWr != framesRq) && (rsmp->frames_in != 0),
                "ReSampler::resample() remaining %zu frames in and %zu frames out",
                rsmp->frames_in, (framesRq - framesWr));
    }
    if (rsmp->frames_in) {
        memmove(rsmp->in_buf,
                rsmp->in_buf + inFrames * rsmp->channel_count,
                rsmp->frames_in * rsmp->channel_count * sizeof(int16_t));
    }
    *outFrameCount = framesWr;

    return 0;
}
//-----------------------------------------------------------------------------
static void player_handleMediaPlayerEventNotifications(int event, int data1, int data2, void* user)
{

    // FIXME This code is derived from similar code in sfplayer_handlePrefetchEvent.  The two
    // versions are quite similar, but still different enough that they need to be separate.
    // At some point they should be re-factored and merged if feasible.
    // As with other OpenMAX AL implementation code, this copy mostly uses SL_ symbols
    // rather than XA_ unless the difference is significant.

    if (NULL == user) {
        return;
    }

    CMediaPlayer* mp = (CMediaPlayer*) user;
    if (!android::CallbackProtector::enterCbIfOk(mp->mCallbackProtector)) {
        // it is not safe to enter the callback (the media player is about to go away)
        return;
    }
    union {
        char c[sizeof(int)];
        int i;
    } u;
    u.i = event;
    SL_LOGV("player_handleMediaPlayerEventNotifications(event='%c%c%c%c' (%d), data1=%d, data2=%d, "
            "user=%p) from AVPlayer", u.c[3], u.c[2], u.c[1], u.c[0], event, data1, data2, user);
    switch(event) {

      case android::GenericPlayer::kEventPrepared: {
        SL_LOGV("Received GenericPlayer::kEventPrepared for CMediaPlayer %p", mp);

        // assume no callback
        slPrefetchCallback callback = NULL;
        void* callbackPContext;
        XAuint32 events;

        object_lock_exclusive(&mp->mObject);

        // mark object as prepared; same state is used for successful or unsuccessful prepare
        assert(mp->mAndroidObjState == ANDROID_PREPARING);
        mp->mAndroidObjState = ANDROID_READY;

        if (PLAYER_SUCCESS == data1) {
            // Most of successful prepare completion for mp->mAVPlayer
            // is handled by GenericPlayer and its subclasses.
        } else {
            // AVPlayer prepare() failed prefetching, there is no event in XAPrefetchStatus to
            //  indicate a prefetch error, so we signal it by sending simultaneously two events:
            //  - SL_PREFETCHEVENT_FILLLEVELCHANGE with a level of 0
            //  - SL_PREFETCHEVENT_STATUSCHANGE with a status of SL_PREFETCHSTATUS_UNDERFLOW
            SL_LOGE(ERROR_PLAYER_PREFETCH_d, data1);
            if (IsInterfaceInitialized(&mp->mObject, MPH_XAPREFETCHSTATUS)) {
                mp->mPrefetchStatus.mLevel = 0;
                mp->mPrefetchStatus.mStatus = SL_PREFETCHSTATUS_UNDERFLOW;
                if (!(~mp->mPrefetchStatus.mCallbackEventsMask &
                        (SL_PREFETCHEVENT_FILLLEVELCHANGE | SL_PREFETCHEVENT_STATUSCHANGE))) {
                    callback = mp->mPrefetchStatus.mCallback;
                    callbackPContext = mp->mPrefetchStatus.mContext;
                    events = SL_PREFETCHEVENT_FILLLEVELCHANGE | SL_PREFETCHEVENT_STATUSCHANGE;
                }
            }
        }

        object_unlock_exclusive(&mp->mObject);

        // callback with no lock held
        if (NULL != callback) {
            (*callback)(&mp->mPrefetchStatus.mItf, callbackPContext, events);
        }

        break;
      }

      case android::GenericPlayer::kEventHasVideoSize: {
        SL_LOGV("Received AVPlayer::kEventHasVideoSize (%d,%d) for CMediaPlayer %p",
                data1, data2, mp);

        object_lock_exclusive(&mp->mObject);

        // remove an existing video info entry (here we only have one video stream)
        for(size_t i=0 ; i < mp->mStreamInfo.mStreamInfoTable.size() ; i++) {
            if (XA_DOMAINTYPE_VIDEO == mp->mStreamInfo.mStreamInfoTable.itemAt(i).domain) {
                mp->mStreamInfo.mStreamInfoTable.removeAt(i);
                break;
            }
        }
        // update the stream information with a new video info entry
        StreamInfo streamInfo;
        streamInfo.domain = XA_DOMAINTYPE_VIDEO;
        streamInfo.videoInfo.codecId = 0;// unknown, we don't have that info FIXME
        streamInfo.videoInfo.width = (XAuint32)data1;
        streamInfo.videoInfo.height = (XAuint32)data2;
        streamInfo.videoInfo.bitRate = 0;// unknown, we don't have that info FIXME
        streamInfo.videoInfo.frameRate = 0;
        streamInfo.videoInfo.duration = XA_TIME_UNKNOWN;
        StreamInfo &contInfo = mp->mStreamInfo.mStreamInfoTable.editItemAt(0);
        contInfo.containerInfo.numStreams = 1;
        ssize_t index = mp->mStreamInfo.mStreamInfoTable.add(streamInfo);

        // callback is unconditional; there is no bitmask of enabled events
        xaStreamEventChangeCallback callback = mp->mStreamInfo.mCallback;
        void* callbackPContext = mp->mStreamInfo.mContext;

        object_unlock_exclusive(&mp->mObject);

        // enqueue notification (outside of lock) that the stream information has been updated
        if ((NULL != callback) && (index >= 0)) {
#ifndef USE_ASYNCHRONOUS_STREAMCBEVENT_PROPERTYCHANGE_CALLBACK
            (*callback)(&mp->mStreamInfo.mItf, XA_STREAMCBEVENT_PROPERTYCHANGE /*eventId*/,
                    1 /*streamIndex, only one stream supported here, 0 is reserved*/,
                    NULL /*pEventData, always NULL in OpenMAX AL 1.0.1*/,
                    callbackPContext /*pContext*/);
#else
            SLresult res = EnqueueAsyncCallback_piipp(mp, callback,
                    /*p1*/ &mp->mStreamInfo.mItf,
                    /*i1*/ XA_STREAMCBEVENT_PROPERTYCHANGE /*eventId*/,
                    /*i2*/ 1 /*streamIndex, only one stream supported here, 0 is reserved*/,
                    /*p2*/ NULL /*pEventData, always NULL in OpenMAX AL 1.0.1*/,
                    /*p3*/ callbackPContext /*pContext*/);
            ALOGW_IF(SL_RESULT_SUCCESS != res,
                        "Callback %p(%p, XA_STREAMCBEVENT_PROPERTYCHANGE, 1, NULL, %p) dropped",
                        callback, &mp->mStreamInfo.mItf, callbackPContext);
#endif
        }
        break;
      }

      case android::GenericPlayer::kEventEndOfStream: {
        SL_LOGV("Received AVPlayer::kEventEndOfStream for CMediaPlayer %p", mp);

        object_lock_exclusive(&mp->mObject);
        // should be xaPlayCallback but we're sharing the itf between SL and AL
        slPlayCallback playCallback = NULL;
        void * playContext = NULL;
        // XAPlayItf callback or no callback?
        if (mp->mPlay.mEventFlags & XA_PLAYEVENT_HEADATEND) {
            playCallback = mp->mPlay.mCallback;
            playContext = mp->mPlay.mContext;
        }
        mp->mPlay.mState = XA_PLAYSTATE_PAUSED;
        object_unlock_exclusive(&mp->mObject);

        // enqueue callback with no lock held
        if (NULL != playCallback) {
#ifndef USE_ASYNCHRONOUS_PLAY_CALLBACK
            (*playCallback)(&mp->mPlay.mItf, playContext, XA_PLAYEVENT_HEADATEND);
#else
            SLresult res = EnqueueAsyncCallback_ppi(mp, playCallback, &mp->mPlay.mItf, playContext,
                    XA_PLAYEVENT_HEADATEND);
            ALOGW_IF(SL_RESULT_SUCCESS != res,
                    "Callback %p(%p, %p, SL_PLAYEVENT_HEADATEND) dropped", playCallback,
                    &mp->mPlay.mItf, playContext);
#endif
        }
        break;
      }

      case android::GenericPlayer::kEventChannelCount: {
        SL_LOGV("kEventChannelCount channels = %d", data1);
        object_lock_exclusive(&mp->mObject);
        if (UNKNOWN_NUMCHANNELS == mp->mNumChannels && UNKNOWN_NUMCHANNELS != data1) {
            mp->mNumChannels = data1;
            android_Player_volumeUpdate(mp);
        }
        object_unlock_exclusive(&mp->mObject);
      }
      break;

      case android::GenericPlayer::kEventPrefetchFillLevelUpdate: {
        SL_LOGV("kEventPrefetchFillLevelUpdate");
        if (!IsInterfaceInitialized(&mp->mObject, MPH_XAPREFETCHSTATUS)) {
            break;
        }
        slPrefetchCallback callback = NULL;
        void* callbackPContext = NULL;

        // SLPrefetchStatusItf callback or no callback?
        interface_lock_exclusive(&mp->mPrefetchStatus);
        if (mp->mPrefetchStatus.mCallbackEventsMask & SL_PREFETCHEVENT_FILLLEVELCHANGE) {
            callback = mp->mPrefetchStatus.mCallback;
            callbackPContext = mp->mPrefetchStatus.mContext;
        }
        mp->mPrefetchStatus.mLevel = (SLpermille)data1;
        interface_unlock_exclusive(&mp->mPrefetchStatus);

        // callback with no lock held
        if (NULL != callback) {
            (*callback)(&mp->mPrefetchStatus.mItf, callbackPContext,
                    SL_PREFETCHEVENT_FILLLEVELCHANGE);
        }
      }
      break;

      case android::GenericPlayer::kEventPrefetchStatusChange: {
        SL_LOGV("kEventPrefetchStatusChange");
        if (!IsInterfaceInitialized(&mp->mObject, MPH_XAPREFETCHSTATUS)) {
            break;
        }
        slPrefetchCallback callback = NULL;
        void* callbackPContext = NULL;

        // SLPrefetchStatusItf callback or no callback?
        object_lock_exclusive(&mp->mObject);
        if (mp->mPrefetchStatus.mCallbackEventsMask & SL_PREFETCHEVENT_STATUSCHANGE) {
            callback = mp->mPrefetchStatus.mCallback;
            callbackPContext = mp->mPrefetchStatus.mContext;
        }
        if (data1 >= android::kStatusIntermediate) {
            mp->mPrefetchStatus.mStatus = SL_PREFETCHSTATUS_SUFFICIENTDATA;
        } else if (data1 < android::kStatusIntermediate) {
            mp->mPrefetchStatus.mStatus = SL_PREFETCHSTATUS_UNDERFLOW;
        }
        object_unlock_exclusive(&mp->mObject);

        // callback with no lock held
        if (NULL != callback) {
            (*callback)(&mp->mPrefetchStatus.mItf, callbackPContext, SL_PREFETCHEVENT_STATUSCHANGE);
        }
      }
      break;

      case android::GenericPlayer::kEventPlay: {
        SL_LOGV("kEventPlay");

        interface_lock_shared(&mp->mPlay);
        slPlayCallback callback = mp->mPlay.mCallback;
        void* callbackPContext = mp->mPlay.mContext;
        interface_unlock_shared(&mp->mPlay);

        if (NULL != callback) {
            (*callback)(&mp->mPlay.mItf, callbackPContext, (SLuint32) data1); // SL_PLAYEVENT_HEAD*
        }
      }
      break;

      case android::GenericPlayer::kEventErrorAfterPrepare: {
        SL_LOGV("kEventErrorAfterPrepare");

        // assume no callback
        slPrefetchCallback callback = NULL;
        void* callbackPContext = NULL;

        object_lock_exclusive(&mp->mObject);
        if (IsInterfaceInitialized(&mp->mObject, MPH_XAPREFETCHSTATUS)) {
            mp->mPrefetchStatus.mLevel = 0;
            mp->mPrefetchStatus.mStatus = SL_PREFETCHSTATUS_UNDERFLOW;
            if (!(~mp->mPrefetchStatus.mCallbackEventsMask &
                    (SL_PREFETCHEVENT_FILLLEVELCHANGE | SL_PREFETCHEVENT_STATUSCHANGE))) {
                callback = mp->mPrefetchStatus.mCallback;
                callbackPContext = mp->mPrefetchStatus.mContext;
            }
        }
        object_unlock_exclusive(&mp->mObject);

        // FIXME there's interesting information in data1, but no API to convey it to client
        SL_LOGE("Error after prepare: %d", data1);

        // callback with no lock held
        if (NULL != callback) {
            (*callback)(&mp->mPrefetchStatus.mItf, callbackPContext,
                    SL_PREFETCHEVENT_FILLLEVELCHANGE | SL_PREFETCHEVENT_STATUSCHANGE);
        }

      }
      break;

      default: {
        SL_LOGE("Received unknown event %d, data %d from AVPlayer", event, data1);
      }
    }

    mp->mCallbackProtector->exitCb();
}
void NuPlayer::DecoderPassThrough::onSetRenderer(
        const sp<Renderer> &renderer) {
    // renderer can't be changed during offloading
    ALOGW_IF(renderer != mRenderer,
            "ignoring request to change renderer");
}
Device::~Device() {
    int status = audio_hw_device_close(mDevice);
    ALOGW_IF(status, "Error closing audio hw device %p: %s", mDevice,
             strerror(-status));
    mDevice = nullptr;
}