void dvmDdmSendHeapSegments(bool shouldLock, bool native) { u1 heapId[sizeof(u4)]; GcHeap *gcHeap = gDvm.gcHeap; int when, what; bool merge; /* Don't even grab the lock if there's nothing to do when we're called. */ if (!native) { when = gcHeap->ddmHpsgWhen; what = gcHeap->ddmHpsgWhat; if (when == HPSG_WHEN_NEVER) { return; } } else { when = gcHeap->ddmNhsgWhen; what = gcHeap->ddmNhsgWhat; if (when == HPSG_WHEN_NEVER) { return; } } if (shouldLock && !dvmLockHeap()) { LOGW("Can't lock heap for DDM HPSx dump\n"); return; } /* Figure out what kind of chunks we'll be sending. */ if (what == HPSG_WHAT_MERGED_OBJECTS) { merge = true; } else if (what == HPSG_WHAT_DISTINCT_OBJECTS) { merge = false; } else { assert(!"bad HPSG.what value"); return; } /* First, send a heap start chunk. */ set4BE(heapId, DEFAULT_HEAP_ID); dvmDbgDdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), sizeof(u4), heapId); /* Send a series of heap segment chunks. */ walkHeap(merge, native); /* Finally, send a heap end chunk. */ dvmDbgDdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(u4), heapId); if (shouldLock) { dvmUnlockHeap(); } }
/* * Send a notification when a thread starts or stops. * * Because we broadcast the full set of threads when the notifications are * first enabled, it's possible for "thread" to be actively executing. */ void dvmDdmSendThreadNotification(Thread* thread, bool started) { if (!gDvm.ddmThreadNotification) return; StringObject* nameObj = NULL; Object* threadObj = thread->threadObj; if (threadObj != NULL) { nameObj = (StringObject*) dvmGetFieldObject(threadObj, gDvm.offJavaLangThread_name); } int type, len; u1 buf[256]; if (started) { const u2* chars; u2* outChars; size_t stringLen; type = CHUNK_TYPE("THCR"); if (nameObj != NULL) { stringLen = dvmStringLen(nameObj); chars = dvmStringChars(nameObj); } else { stringLen = 0; chars = NULL; } /* leave room for the two integer fields */ if (stringLen > (sizeof(buf) - sizeof(u4)*2) / 2) stringLen = (sizeof(buf) - sizeof(u4)*2) / 2; len = stringLen*2 + sizeof(u4)*2; set4BE(&buf[0x00], thread->threadId); set4BE(&buf[0x04], stringLen); /* copy the UTF-16 string, transforming to big-endian */ outChars = (u2*) &buf[0x08]; while (stringLen--) set2BE((u1*) (outChars++), *chars++); } else { type = CHUNK_TYPE("THDE"); len = 4; set4BE(&buf[0x00], thread->threadId); } dvmDbgDdmSendChunk(type, len, buf); }
/* * Send a notification when a thread's name changes. */ void dvmDdmSendThreadNameChange(int threadId, StringObject* newName) { if (!gDvm.ddmThreadNotification) return; size_t stringLen = dvmStringLen(newName); const u2* chars = dvmStringChars(newName); /* * Output format: * (4b) thread ID * (4b) stringLen * (xb) string chars */ int bufLen = 4 + 4 + (stringLen * 2); u1 buf[bufLen]; set4BE(&buf[0x00], threadId); set4BE(&buf[0x04], stringLen); u2* outChars = (u2*) &buf[0x08]; while (stringLen--) set2BE((u1*) (outChars++), *chars++); dvmDbgDdmSendChunk(CHUNK_TYPE("THNM"), bufLen, buf); }
static void walkHeap(bool merge, bool native) { HeapChunkContext ctx; memset(&ctx, 0, sizeof(ctx)); ctx.bufLen = HPSx_CHUNK_SIZE; ctx.buf = (u1 *)malloc(ctx.bufLen); if (ctx.buf == NULL) { return; } ctx.merge = merge; if (native) { ctx.type = CHUNK_TYPE("NHSG"); } else { if (ctx.merge) { ctx.type = CHUNK_TYPE("HPSG"); } else { ctx.type = CHUNK_TYPE("HPSO"); } } ctx.p = ctx.buf; ctx.needHeader = true; if (native) { #ifdef USE_DLMALLOC dlmalloc_inspect_all(heap_chunk_callback, (void*)&ctx); #endif } else { dvmHeapSourceWalk(heap_chunk_callback, (void *)&ctx); } if (ctx.p > ctx.buf) { flush_hpsg_chunk(&ctx); } free(ctx.buf); }
void dvmDdmSendHeapInfo(int reason, bool shouldLock) { struct timeval now; u8 nowMs; u1 *buf, *b; buf = (u1 *)malloc(HPIF_SIZE(1)); if (buf == NULL) { return; } b = buf; /* If there's a one-shot 'when', reset it. */ if (reason == gDvm.gcHeap->ddmHpifWhen) { if (shouldLock && ! dvmLockHeap()) { ALOGW("%s(): can't lock heap to clear when", __func__); goto skip_when; } if (reason == gDvm.gcHeap->ddmHpifWhen) { if (gDvm.gcHeap->ddmHpifWhen == HPIF_WHEN_NEXT_GC) { gDvm.gcHeap->ddmHpifWhen = HPIF_WHEN_NEVER; } } if (shouldLock) { dvmUnlockHeap(); } } skip_when: /* The current time, in milliseconds since 0:00 GMT, 1/1/70. */ if (gettimeofday(&now, NULL) < 0) { nowMs = 0; } else { nowMs = (u8)now.tv_sec * 1000 + now.tv_usec / 1000; } /* number of heaps */ set4BE(b, 1); b += 4; /* For each heap (of which there is one) */ { /* heap ID */ set4BE(b, DEFAULT_HEAP_ID); b += 4; /* timestamp */ set8BE(b, nowMs); b += 8; /* 'when' value */ *b++ = (u1)reason; /* max allowed heap size in bytes */ set4BE(b, dvmHeapSourceGetMaximumSize()); b += 4; /* current heap size in bytes */ set4BE(b, dvmHeapSourceGetValue(HS_FOOTPRINT, NULL, 0)); b += 4; /* number of bytes allocated */ set4BE(b, dvmHeapSourceGetValue(HS_BYTES_ALLOCATED, NULL, 0)); b += 4; /* number of objects allocated */ set4BE(b, dvmHeapSourceGetValue(HS_OBJECTS_ALLOCATED, NULL, 0)); b += 4; } assert((intptr_t)b == (intptr_t)buf + (intptr_t)HPIF_SIZE(1)); dvmDbgDdmSendChunk(CHUNK_TYPE("HPIF"), b - buf, buf); }
/* * Called by dlmalloc_inspect_all. If used_bytes != 0 then start is * the start of a malloc-ed piece of memory of size used_bytes. If * start is 0 then start is the beginning of any free space not * including dlmalloc's book keeping and end the start of the next * dlmalloc chunk. Regions purely containing book keeping don't * callback. */ static void heap_chunk_callback(void* start, void* end, size_t used_bytes, void* arg) { u1 state; HeapChunkContext *ctx = (HeapChunkContext *)arg; UNUSED_PARAMETER(end); if (used_bytes == 0) { if (start == NULL) { // Reset for start of new heap. ctx->startOfNextMemoryChunk = NULL; flush_hpsg_chunk(ctx); } // Only process in use memory so that free region information // also includes dlmalloc book keeping. return; } /* If we're looking at the native heap, we'll just return * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks */ bool native = ctx->type == CHUNK_TYPE("NHSG"); if (ctx->startOfNextMemoryChunk != NULL) { // Transmit any pending free memory. Native free memory of // over kMaxFreeLen could be because of the use of mmaps, so // don't report. If not free memory then start a new segment. bool flush = true; if (start > ctx->startOfNextMemoryChunk) { const size_t kMaxFreeLen = 2 * SYSTEM_PAGE_SIZE; void* freeStart = ctx->startOfNextMemoryChunk; void* freeEnd = start; size_t freeLen = (char*)freeEnd - (char*)freeStart; if (!native || freeLen < kMaxFreeLen) { append_chunk(ctx, HPSG_STATE(SOLIDITY_FREE, 0), freeStart, freeLen); flush = false; } } if (flush) { ctx->startOfNextMemoryChunk = NULL; flush_hpsg_chunk(ctx); } } const Object *obj = (const Object *)start; /* It's an allocated chunk. Figure out what it is. */ //TODO: if ctx.merge, see if this chunk is different from the last chunk. // If it's the same, we should combine them. if (!native && dvmIsValidObject(obj)) { ClassObject *clazz = obj->clazz; if (clazz == NULL) { /* The object was probably just created * but hasn't been initialized yet. */ state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT); } else if (dvmIsTheClassClass(clazz)) { state = HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT); } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) { if (IS_CLASS_FLAG_SET(clazz, CLASS_ISOBJECTARRAY)) { state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4); } else { switch (clazz->elementClass->primitiveType) { case PRIM_BOOLEAN: case PRIM_BYTE: state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1); break; case PRIM_CHAR: case PRIM_SHORT: state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2); break; case PRIM_INT: case PRIM_FLOAT: state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4); break; case PRIM_DOUBLE: case PRIM_LONG: state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8); break; default: assert(!"Unknown GC heap object type"); state = HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN); break; } } } else { state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT); } } else { obj = NULL; // it's not actually an object state = HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE); } append_chunk(ctx, state, start, used_bytes + HEAP_SOURCE_CHUNK_OVERHEAD); ctx->startOfNextMemoryChunk = (char*)start + used_bytes + HEAP_SOURCE_CHUNK_OVERHEAD; }
bool KPngPlugin::readInfo( KFileMetaInfo& info, uint what) { if ( info.path().isEmpty() ) // remote file return false; QFile f(info.path()); if ( !f.open(IO_ReadOnly) ) return false; QIODevice::Offset fileSize = f.size(); if (fileSize < 29) return false; // the technical group will be read from the first 29 bytes. If the file // is smaller, we can't even read this. bool readComments = false; if (what & (KFileMetaInfo::Fastest | KFileMetaInfo::DontCare | KFileMetaInfo::ContentInfo)) readComments = true; else fileSize = 29; // No need to read more uchar *data = new uchar[fileSize+1]; f.readBlock(reinterpret_cast<char*>(data), fileSize); data[fileSize]='\n'; // find the start if (data[0] == 137 && data[1] == 80 && data[2] == 78 && data[3] == 71 && data[4] == 13 && data[5] == 10 && data[6] == 26 && data[7] == 10 ) { // ok // the IHDR chunk should be the first if (!strncmp((char*)&data[12], "IHDR", 4)) { // we found it, get the dimensions ulong x,y; x = (data[16]<<24) + (data[17]<<16) + (data[18]<<8) + data[19]; y = (data[20]<<24) + (data[21]<<16) + (data[22]<<8) + data[23]; uint type = data[25]; uint bpp = data[24]; kdDebug(7034) << "dimensions " << x << "*" << y << endl; // the bpp are only per channel, so we need to multiply the with // the channel count switch (type) { case 0: break; // Grayscale case 2: bpp *= 3; break; // RGB case 3: break; // palette case 4: bpp *= 2; break; // grayscale w. alpha case 6: bpp *= 4; break; // RGBA default: // we don't get any sensible value here bpp = 0; } KFileMetaInfoGroup techgroup = appendGroup(info, "Technical"); appendItem(techgroup, "Dimensions", QSize(x, y)); appendItem(techgroup, "BitDepth", bpp); appendItem(techgroup, "ColorMode", (type < sizeof(colors)/sizeof(colors[0])) ? i18n(colors[data[25]]) : i18n("Unknown")); appendItem(techgroup, "Compression", (data[26] < sizeof(compressions)/sizeof(compressions[0])) ? i18n(compressions[data[26]]) : i18n("Unknown")); appendItem(techgroup, "InterlaceMode", (data[28] < sizeof(interlaceModes)/sizeof(interlaceModes[0])) ? i18n(interlaceModes[data[28]]) : i18n("Unknown")); } // look for a tEXt chunk if (readComments) { uint index = 8; index += CHUNK_SIZE(data, index) + CHUNK_HEADER_SIZE; KFileMetaInfoGroup commentGroup = appendGroup(info, "Comment"); while(index<fileSize-12) { while (index < fileSize - 12 && strncmp((char*)CHUNK_TYPE(data,index), "tEXt", 4) && strncmp((char*)CHUNK_TYPE(data,index), "zTXt", 4)) { if (!strncmp((char*)CHUNK_TYPE(data,index), "IEND", 4)) goto end; index += CHUNK_SIZE(data, index) + CHUNK_HEADER_SIZE; } if (index < fileSize - 12) { // we found a tEXt or zTXt field // get the key, it's a null terminated string at the // chunk start uchar* key = &CHUNK_DATA(data,index,0); int keysize=0; for (;key[keysize]!=0; keysize++) // look if we reached the end of the file // (it might be corrupted) if (8+index+keysize>=fileSize) goto end; QByteArray arr; if(!strncmp((char*)CHUNK_TYPE(data,index), "zTXt", 4)) { kdDebug(7034) << "We found a zTXt field\n"; // we get the compression method after the key uchar* compressionMethod = &CHUNK_DATA(data,index,keysize+1); if ( *compressionMethod != 0x00 ) { // then it isn't zlib compressed and we are sunk kdDebug(7034) << "Non-standard compression method." << endl; goto end; } // compressed string after the compression technique spec uchar* compressedText = &CHUNK_DATA(data, index, keysize+2); uint compressedTextSize = CHUNK_SIZE(data, index)-keysize-2; // security check, also considering overflow wraparound from the addition -- // we may endup with a /smaller/ index if we wrap all the way around uint firstIndex = (uint)(compressedText - data); uint onePastLastIndex = firstIndex + compressedTextSize; if ( onePastLastIndex > fileSize || onePastLastIndex <= firstIndex) goto end; uLongf uncompressedLen = compressedTextSize * 2; // just a starting point int zlibResult; do { arr.resize(uncompressedLen); zlibResult = uncompress((Bytef*)arr.data(), &uncompressedLen, compressedText, compressedTextSize); if (Z_OK == zlibResult) { // then it is all OK arr.resize(uncompressedLen); } else if (Z_BUF_ERROR == zlibResult) { // the uncompressedArray needs to be larger // kdDebug(7034) << "doubling size for decompression" << endl; uncompressedLen *= 2; // DoS protection. can't be bigger than 64k if ( uncompressedLen > 131072 ) break; } else { // something bad happened goto end; } } while (Z_BUF_ERROR == zlibResult); if (Z_OK != zlibResult) goto end; } else if (!strncmp((char*)CHUNK_TYPE(data,index), "tEXt", 4)) { kdDebug(7034) << "We found a tEXt field\n"; // the text comes after the key, but isn't null terminated uchar* text = &CHUNK_DATA(data,index, keysize+1); uint textsize = CHUNK_SIZE(data, index)-keysize-1; // security check, also considering overflow wraparound from the addition -- // we may endup with a /smaller/ index if we wrap all the way around uint firstIndex = (uint)(text - data); uint onePastLastIndex = firstIndex + textsize; if ( onePastLastIndex > fileSize || onePastLastIndex <= firstIndex) goto end; arr.resize(textsize); arr = QByteArray(textsize).duplicate((const char*)text, textsize); } else { kdDebug(7034) << "We found a field, not expected though\n"; goto end; } appendItem(commentGroup, QString(reinterpret_cast<char*>(key)), QString(arr)); kdDebug(7034) << "adding " << key << " / " << QString(arr) << endl; index += CHUNK_SIZE(data, index) + CHUNK_HEADER_SIZE; } } } } end: delete[] data; return true; }
static void heap_chunk_callback(const void *chunkptr, size_t chunklen, const void *userptr, size_t userlen, void *arg) { HeapChunkContext *ctx = (HeapChunkContext *)arg; u1 state; UNUSED_PARAMETER(userlen); assert((chunklen & (ALLOCATION_UNIT_SIZE-1)) == 0); /* Make sure there's enough room left in the buffer. * We need to use two bytes for every fractional 256 * allocation units used by the chunk. */ { size_t needed = (((chunklen/ALLOCATION_UNIT_SIZE + 255) / 256) * 2); size_t bytesLeft = ctx->bufLen - (size_t)(ctx->p - ctx->buf); if (bytesLeft < needed) { flush_hpsg_chunk(ctx); } bytesLeft = ctx->bufLen - (size_t)(ctx->p - ctx->buf); if (bytesLeft < needed) { LOGW("chunk is too big to transmit (chunklen=%zd, %zd bytes)\n", chunklen, needed); return; } } //TODO: notice when there's a gap and start a new heap, or at least a new range. if (ctx->needHeader) { /* * Start a new HPSx chunk. */ /* [u4]: heap ID */ set4BE(ctx->p, DEFAULT_HEAP_ID); ctx->p += 4; /* [u1]: size of allocation unit, in bytes */ *ctx->p++ = 8; /* [u4]: virtual address of segment start */ set4BE(ctx->p, (uintptr_t)chunkptr); ctx->p += 4; /* [u4]: offset of this piece (relative to the virtual address) */ set4BE(ctx->p, 0); ctx->p += 4; /* [u4]: length of piece, in allocation units * We won't know this until we're done, so save the offset * and stuff in a dummy value. */ ctx->pieceLenField = ctx->p; set4BE(ctx->p, 0x55555555); ctx->p += 4; ctx->needHeader = false; } /* Determine the type of this chunk. */ if (userptr == NULL) { /* It's a free chunk. */ state = HPSG_STATE(SOLIDITY_FREE, 0); } else { const DvmHeapChunk *hc = (const DvmHeapChunk *)userptr; const Object *obj = chunk2ptr(hc); /* If we're looking at the native heap, we'll just return * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks */ bool native = ctx->type == CHUNK_TYPE("NHSG"); /* It's an allocated chunk. Figure out what it is. */ //TODO: if ctx.merge, see if this chunk is different from the last chunk. // If it's the same, we should combine them. if (!native && dvmIsValidObject(obj)) { ClassObject *clazz = obj->clazz; if (clazz == NULL) { /* The object was probably just created * but hasn't been initialized yet. */ state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT); } else if (clazz == gDvm.unlinkedJavaLangClass || clazz == gDvm.classJavaLangClass) { state = HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT); } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) { if (IS_CLASS_FLAG_SET(clazz, CLASS_ISOBJECTARRAY)) { state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4); } else { switch (clazz->elementClass->primitiveType) { case PRIM_BOOLEAN: case PRIM_BYTE: state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1); break; case PRIM_CHAR: case PRIM_SHORT: state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2); break; case PRIM_INT: case PRIM_FLOAT: state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4); break; case PRIM_DOUBLE: case PRIM_LONG: state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8); break; default: assert(!"Unknown GC heap object type"); state = HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN); break; } } } else { state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT); } } else { obj = NULL; // it's not actually an object state = HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE); } } /* Write out the chunk description. */ chunklen /= ALLOCATION_UNIT_SIZE; // convert to allocation units ctx->totalAllocationUnits += chunklen; while (chunklen > 256) { *ctx->p++ = state | HPSG_PARTIAL; *ctx->p++ = 255; // length - 1 chunklen -= 256; } *ctx->p++ = state; *ctx->p++ = chunklen - 1; }
/* * Finish up the hprof dump. Returns true on success. */ bool hprofShutdown(hprof_context_t *tailCtx) { FILE *fp = NULL; /* flush output to the temp file, then prepare the output file */ hprofFlushCurrentRecord(tailCtx); LOGI("hprof: dumping heap strings to \"%s\".\n", tailCtx->fileName); if (!tailCtx->directToDdms) { fp = fopen(tailCtx->fileName, "w"); if (fp == NULL) { LOGE("can't open %s: %s\n", tailCtx->fileName, strerror(errno)); hprofFreeContext(tailCtx); return false; } } /* * Create a new context struct for the start of the file. We * heap-allocate it so we can share the "free" function. */ hprof_context_t *headCtx = malloc(sizeof(*headCtx)); if (headCtx == NULL) { LOGE("hprof: can't allocate context.\n"); if (fp != NULL) fclose(fp); hprofFreeContext(tailCtx); return NULL; } hprofContextInit(headCtx, strdup(tailCtx->fileName), fp, true, tailCtx->directToDdms); hprofDumpStrings(headCtx); hprofDumpClasses(headCtx); /* Write a dummy stack trace record so the analysis * tools don't freak out. */ hprofStartNewRecord(headCtx, HPROF_TAG_STACK_TRACE, HPROF_TIME); hprofAddU4ToRecord(&headCtx->curRec, HPROF_NULL_STACK_TRACE); hprofAddU4ToRecord(&headCtx->curRec, HPROF_NULL_THREAD); hprofAddU4ToRecord(&headCtx->curRec, 0); // no frames #if WITH_HPROF_STACK hprofDumpStackFrames(headCtx); hprofDumpStacks(headCtx); #endif hprofFlushCurrentRecord(headCtx); hprofShutdown_Class(); hprofShutdown_String(); #if WITH_HPROF_STACK hprofShutdown_Stack(); hprofShutdown_StackFrame(); #endif if (tailCtx->directToDdms) { /* flush to ensure memstream pointer and size are updated */ fflush(headCtx->fp); fflush(tailCtx->fp); /* send the data off to DDMS */ struct iovec iov[2]; iov[0].iov_base = headCtx->fileDataPtr; iov[0].iov_len = headCtx->fileDataSize; iov[1].iov_base = tailCtx->fileDataPtr; iov[1].iov_len = tailCtx->fileDataSize; dvmDbgDdmSendChunkV(CHUNK_TYPE("HPDS"), iov, 2); } else { /* * Append the contents of the temp file to the output file. The temp * file was removed immediately after being opened, so it will vanish * when we close it. */ rewind(tailCtx->fp); if (!copyFileToFile(headCtx->fp, tailCtx->fp)) { LOGW("hprof: file copy failed, hprof data may be incomplete\n"); /* finish up anyway */ } } hprofFreeContext(headCtx); hprofFreeContext(tailCtx); /* throw out a log message for the benefit of "runhat" */ LOGI("hprof: heap dump completed, temp file removed\n"); return true; }