예제 #1
0
void dvmHeapSourceRegisterNativeAllocation(int bytes)
{
    /* If we have just done a GC, ensure that the finalizers are done and update
     * the native watermarks.
     */
    if (gHs->nativeNeedToRunFinalization) {
        dvmRunFinalization();
        dvmHeapSourceUpdateMaxNativeFootprint();
        gHs->nativeNeedToRunFinalization = false;
    }

    android_atomic_add(bytes, &gHs->nativeBytesAllocated);

    if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintGCWatermark) {
        /* The second watermark is higher than the gc watermark. If you hit
         * this it means you are allocating native objects faster than the GC
         * can keep up with. If this occurs, we do a GC for alloc.
         */
        if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintLimit) {
            Thread* self = dvmThreadSelf();
            dvmRunFinalization();
            if (dvmCheckException(self)) {
                return;
            }
            dvmLockHeap();
            bool waited = dvmWaitForConcurrentGcToComplete();
            dvmUnlockHeap();
            if (waited) {
                // Just finished a GC, attempt to run finalizers.
                dvmRunFinalization();
                if (dvmCheckException(self)) {
                    return;
                }
            }

            // If we still are over the watermark, attempt a GC for alloc and run finalizers.
            if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintLimit) {
                dvmLockHeap();
                dvmWaitForConcurrentGcToComplete();
                dvmCollectGarbageInternal(GC_FOR_MALLOC);
                dvmUnlockHeap();
                dvmRunFinalization();
                gHs->nativeNeedToRunFinalization = false;
                if (dvmCheckException(self)) {
                    return;
                }
            }
            /* We have just run finalizers, update the native watermark since
             * it is very likely that finalizers released native managed
             * allocations.
             */
            dvmHeapSourceUpdateMaxNativeFootprint();
        } else {
            dvmSignalCond(&gHs->gcThreadCond);
        }
    }
}
예제 #2
0
/*
 * Explicitly initiate garbage collection.
 */
void dvmCollectGarbage(bool collectSoftReferences)
{
    dvmLockHeap();
    while (gDvm.gcHeap->gcRunning) {
        dvmWaitForConcurrentGcToComplete();
    }
    dvmCollectGarbageInternal(collectSoftReferences, GC_EXPLICIT);
    dvmUnlockHeap();
}
예제 #3
0
/*
 * Explicitly initiate garbage collection.
 */
void dvmCollectGarbage()
{
    if (gDvm.disableExplicitGc) {
        return;
    }
    dvmLockHeap();
    dvmWaitForConcurrentGcToComplete();
    dvmCollectGarbageInternal(GC_EXPLICIT);
    dvmUnlockHeap();
}
예제 #4
0
/*
 * Removes any growth limits.  Allows the user to allocate up to the
 * maximum heap size.
 */
void dvmClearGrowthLimit()
{
    HS_BOILERPLATE();
    dvmLockHeap();
    dvmWaitForConcurrentGcToComplete();
    gDvm.gcHeap->cardTableLength = gDvm.gcHeap->cardTableMaxLength;
    gHs->growthLimit = gHs->maximumSize;
    size_t overhead = oldHeapOverhead(gHs, false);
    gHs->heaps[0].maximumSize = gHs->maximumSize - overhead;
    gHs->heaps[0].limit = gHs->heaps[0].base + gHs->heaps[0].maximumSize;
    dvmUnlockHeap();
}
예제 #5
0
/* Runs the main thread daemmon loop looking for incoming messages from its
 * parallel thread on what action it should take. */
static void* thread_daemon(void* pself) {
  Thread* self = (Thread*)pself;
  while(1) {
    u1 event = offReadU1(self);
    if(!gDvm.offConnected) {
      ALOGI("THREAD %d LOST CONNECTION", self->threadId);
      return NULL;
    }
ALOGI("THREAD %d GOT EVENT %d", self->threadId, event);
    switch(event) {
      case OFF_ACTION_RESUME: {
        /* We got a resume message, drop back to our caller. */
        return NULL;
      } break;
      case OFF_ACTION_LOCK: {
        offPerformLock(self);
      } break;
      case OFF_ACTION_NOTIFY: {
        offPerformNotify(self);
      } break;
      case OFF_ACTION_BROADCAST: {
        offPerformNotifyAll(self);
      } break;
      case OFF_ACTION_DEX_QUERYDEX: {
        offPerformQueryDex(self);
      } break;
      case OFF_ACTION_SYNC: {
        offSyncPull();
        offWriteU1(self, 0);
      } break;
      case OFF_ACTION_INTERRUPT: {
        offPerformInterrupt(self);
      } break;
      case OFF_ACTION_TRIMGC: {
        dvmLockHeap();
        self->offTrimSignaled = true;
        if (gDvm.gcHeap->gcRunning) {
          dvmWaitForConcurrentGcToComplete();
        }
        dvmCollectGarbageInternal(GC_BEFORE_OOM);
        self->offTrimSignaled = false;
        dvmUnlockHeap();
      } break;
      case OFF_ACTION_GRABVOL: {
        offPerformGrabVolatiles(self);
      } break;
      case OFF_ACTION_MIGRATE: {
        if(offPerformMigrate(self)) {
          return NULL;
        }
      } break;
      case OFF_ACTION_CLINIT: {
        offPerformClinit(self);
      } break;
      case OFF_ACTION_DEATH: {
        self->offFlagDeath = true;
        return NULL;
      } break;
      default: {
        ALOGE("Unknown action %d sent to thread %d",
             event, self->threadId);
        dvmAbort();
      }
    }
  }
}
예제 #6
0
/* Try as hard as possible to allocate some memory.
 */
static void *tryMalloc(size_t size)
{
    void *ptr;

    /* Don't try too hard if there's no way the allocation is
     * going to succeed.  We have to collect SoftReferences before
     * throwing an OOME, though.
     */
    if (size >= gDvm.heapGrowthLimit) {
        ALOGW("%zd byte allocation exceeds the %zd byte maximum heap size",
             size, gDvm.heapGrowthLimit);
        ptr = NULL;
        goto collect_soft_refs;
    }

//TODO: figure out better heuristics
//    There will be a lot of churn if someone allocates a bunch of
//    big objects in a row, and we hit the frag case each time.
//    A full GC for each.
//    Maybe we grow the heap in bigger leaps
//    Maybe we skip the GC if the size is large and we did one recently
//      (number of allocations ago) (watch for thread effects)
//    DeflateTest allocs a bunch of ~128k buffers w/in 0-5 allocs of each other
//      (or, at least, there are only 0-5 objects swept each time)

    ptr = dvmHeapSourceAlloc(size);
    if (ptr != NULL) {
        return ptr;
    }

    /*
     * The allocation failed.  If the GC is running, block until it
     * completes and retry.
     */
    if (gDvm.gcHeap->gcRunning) {
        /*
         * The GC is concurrently tracing the heap.  Release the heap
         * lock, wait for the GC to complete, and retrying allocating.
         */
        dvmWaitForConcurrentGcToComplete();
        ptr = dvmHeapSourceAlloc(size);
        if (ptr != NULL) {
            return ptr;
        }
    }
    /*
     * Another failure.  Our thread was starved or there may be too
     * many live objects.  Try a foreground GC.  This will have no
     * effect if the concurrent GC is already running.
     */
    gcForMalloc(false);
    ptr = dvmHeapSourceAlloc(size);
    if (ptr != NULL) {
        return ptr;
    }

    /* Even that didn't work;  this is an exceptional state.
     * Try harder, growing the heap if necessary.
     */
    ptr = dvmHeapSourceAllocAndGrow(size);
    if (ptr != NULL) {
        size_t newHeapSize;

        newHeapSize = dvmHeapSourceGetIdealFootprint();
//TODO: may want to grow a little bit more so that the amount of free
//      space is equal to the old free space + the utilization slop for
//      the new allocation.
        LOGI_HEAP("Grow heap (frag case) to "
                "%zu.%03zuMB for %zu-byte allocation",
                FRACTIONAL_MB(newHeapSize), size);
        return ptr;
    }

    /* Most allocations should have succeeded by now, so the heap
     * is really full, really fragmented, or the requested size is
     * really big.  Do another GC, collecting SoftReferences this
     * time.  The VM spec requires that all SoftReferences have
     * been collected and cleared before throwing an OOME.
     */
//TODO: wait for the finalizers from the previous GC to finish
collect_soft_refs:
    LOGI_HEAP("Forcing collection of SoftReferences for %zu-byte allocation",
            size);
    gcForMalloc(true);
    ptr = dvmHeapSourceAllocAndGrow(size);
    if (ptr != NULL) {
        return ptr;
    }
//TODO: maybe wait for finalizers and try one last time

    LOGE_HEAP("Out of memory on a %zd-byte allocation.", size);
//TODO: tell the HeapSource to dump its state
    dvmDumpThread(dvmThreadSelf(), false);

    return NULL;
}
예제 #7
0
/*
 * The heap worker thread sits quietly until the GC tells it there's work
 * to do.
 */
static void* heapWorkerThreadStart(void* arg)
{
    Thread *self = dvmThreadSelf();

    UNUSED_PARAMETER(arg);

    LOGV("HeapWorker thread started (threadid=%d)\n", self->threadId);

    /* tell the main thread that we're ready */
    lockMutex(&gDvm.heapWorkerLock);
    gDvm.heapWorkerReady = true;
    dvmSignalCond(&gDvm.heapWorkerCond);
    dvmUnlockMutex(&gDvm.heapWorkerLock);

    lockMutex(&gDvm.heapWorkerLock);
    while (!gDvm.haltHeapWorker) {
        struct timespec trimtime;
        bool timedwait = false;

        /* We're done running interpreted code for now. */
        dvmChangeStatus(NULL, THREAD_VMWAIT);

        /* Signal anyone who wants to know when we're done. */
        dvmBroadcastCond(&gDvm.heapWorkerIdleCond);

        /* Trim the heap if we were asked to. */
        trimtime = gDvm.gcHeap->heapWorkerNextTrim;
        if (trimtime.tv_sec != 0 && trimtime.tv_nsec != 0) {
            struct timespec now;

#ifdef HAVE_TIMEDWAIT_MONOTONIC
            clock_gettime(CLOCK_MONOTONIC, &now);       // relative time
#else
            struct timeval tvnow;
            gettimeofday(&tvnow, NULL);                 // absolute time
            now.tv_sec = tvnow.tv_sec;
            now.tv_nsec = tvnow.tv_usec * 1000;
#endif

            if (trimtime.tv_sec < now.tv_sec ||
                (trimtime.tv_sec == now.tv_sec &&
                 trimtime.tv_nsec <= now.tv_nsec))
            {
                size_t madvisedSizes[HEAP_SOURCE_MAX_HEAP_COUNT];

                /*
                 * Acquire the gcHeapLock.  The requires releasing the
                 * heapWorkerLock before the gcHeapLock is acquired.
                 * It is possible that the gcHeapLock may be acquired
                 * during a concurrent GC in which case heapWorkerLock
                 * is held by the GC and we are unable to make forward
                 * progress.  We avoid deadlock by releasing the
                 * gcHeapLock and then waiting to be signaled when the
                 * GC completes.  There is no guarantee that the next
                 * time we are run will coincide with GC inactivity so
                 * the check and wait must be performed within a loop.
                 */
                dvmUnlockMutex(&gDvm.heapWorkerLock);
                dvmLockHeap();
                while (gDvm.gcHeap->gcRunning) {
                    dvmWaitForConcurrentGcToComplete();
                }
                dvmLockMutex(&gDvm.heapWorkerLock);

                memset(madvisedSizes, 0, sizeof(madvisedSizes));
                dvmHeapSourceTrim(madvisedSizes, HEAP_SOURCE_MAX_HEAP_COUNT);
                dvmLogMadviseStats(madvisedSizes, HEAP_SOURCE_MAX_HEAP_COUNT);

                dvmUnlockHeap();

                trimtime.tv_sec = 0;
                trimtime.tv_nsec = 0;
                gDvm.gcHeap->heapWorkerNextTrim = trimtime;
            } else {
                timedwait = true;
            }
        }

        /* sleep until signaled */
        if (timedwait) {
            int cc __attribute__ ((__unused__));
#ifdef HAVE_TIMEDWAIT_MONOTONIC
            cc = pthread_cond_timedwait_monotonic(&gDvm.heapWorkerCond,
                    &gDvm.heapWorkerLock, &trimtime);
#else
            cc = pthread_cond_timedwait(&gDvm.heapWorkerCond,
                    &gDvm.heapWorkerLock, &trimtime);
#endif
            assert(cc == 0 || cc == ETIMEDOUT);
        } else {
            dvmWaitCond(&gDvm.heapWorkerCond, &gDvm.heapWorkerLock);
        }

        /*
         * Return to the running state before doing heap work.  This
         * will block if the GC has initiated a suspend.  We release
         * the heapWorkerLock beforehand for the GC to make progress
         * and wait to be signaled after the GC completes.  There is
         * no guarantee that the next time we are run will coincide
         * with GC inactivity so the check and wait must be performed
         * within a loop.
         */
        dvmUnlockMutex(&gDvm.heapWorkerLock);
        dvmChangeStatus(NULL, THREAD_RUNNING);
        dvmLockHeap();
        while (gDvm.gcHeap->gcRunning) {
            dvmWaitForConcurrentGcToComplete();
        }
        dvmLockMutex(&gDvm.heapWorkerLock);
        dvmUnlockHeap();
        LOGV("HeapWorker is awake\n");

        /* Process any events in the queue.
         */
        doHeapWork(self);
    }
    dvmUnlockMutex(&gDvm.heapWorkerLock);

    if (gDvm.verboseShutdown)
        LOGD("HeapWorker thread shutting down\n");
    return NULL;
}