SmartCardMonitoringThread::SmartCardMonitoringThread(SECMODModule *module_) : mThread(nsnull) { mModule = SECMOD_ReferenceModule(module_); // simple hash functions, most modules have less than 3 slots, so 10 buckets // should be plenty mHash = PL_NewHashTable(10, unity, PL_CompareValues, PL_CompareStrings, nsnull, 0); }
static void RecreateBloatView() { gBloatView = PL_NewHashTable(256, PL_HashString, PL_CompareStrings, PL_CompareValues, &bloatViewHashAllocOps, nullptr); }
// static nsresult nsHTMLTags::AddRefTable(void) { if (gTableRefCount++ == 0) { NS_ASSERTION(!gTagTable, "pre existing hash!"); gTagTable = PL_NewHashTable(64, HTMLTagsHashCodeUCPtr, HTMLTagsKeyCompareUCPtr, PL_CompareValues, nsnull, nsnull); NS_ENSURE_TRUE(gTagTable, NS_ERROR_OUT_OF_MEMORY); // Fill in gTagTable with the above static PRUnichar strings as // keys and the value of the corresponding enum as the value in // the table. PRInt32 i; for (i = 0; i < NS_HTML_TAG_MAX; ++i) { PRUint32 len = nsCRT::strlen(kTagUnicodeTable[i]); PL_HashTableAdd(gTagTable, kTagUnicodeTable[i], NS_INT32_TO_PTR(i + 1)); if (len > sMaxTagNameLength) { sMaxTagNameLength = len; } } //NS_ASSERTION(sMaxTagNameLength == NS_HTMLTAG_NAME_MAX_LENGTH, // "NS_HTMLTAG_NAME_MAX_LENGTH not set correctly!"); // Fill in our static atom pointers NS_RegisterStaticAtoms(kTagAtoms_info, NS_ARRAY_LENGTH(kTagAtoms_info)); #ifdef DEBUG { // let's verify that all names in the the table are lowercase... for (i = 0; i < NS_HTML_TAG_MAX; ++i) { nsCAutoString temp1(kTagAtoms_info[i].mString); nsCAutoString temp2(kTagAtoms_info[i].mString); ToLowerCase(temp1); NS_ASSERTION(temp1.Equals(temp2), "upper case char in table"); } // let's verify that all names in the unicode strings above are // correct. for (i = 0; i < NS_HTML_TAG_MAX; ++i) { nsAutoString temp1(kTagUnicodeTable[i]); nsAutoString temp2; temp2.AssignWithConversion(kTagAtoms_info[i].mString); NS_ASSERTION(temp1.Equals(temp2), "Bad unicode tag name!"); } } #endif } return NS_OK; }
/* * nssHash_create * */ NSS_IMPLEMENT nssHash * nssHash_Create ( NSSArena *arenaOpt, PRUint32 numBuckets, PLHashFunction keyHash, PLHashComparator keyCompare, PLHashComparator valueCompare ) { nssHash *rv; NSSArena *arena; PRBool i_alloced; #ifdef NSSDEBUG if( arenaOpt && PR_SUCCESS != nssArena_verifyPointer(arenaOpt) ) { nss_SetError(NSS_ERROR_INVALID_POINTER); return (nssHash *)NULL; } #endif /* NSSDEBUG */ if (arenaOpt) { arena = arenaOpt; i_alloced = PR_FALSE; } else { arena = nssArena_Create(); i_alloced = PR_TRUE; } rv = nss_ZNEW(arena, nssHash); if( (nssHash *)NULL == rv ) { goto loser; } rv->mutex = PZ_NewLock(nssILockOther); if( (PZLock *)NULL == rv->mutex ) { goto loser; } rv->plHashTable = PL_NewHashTable(numBuckets, keyHash, keyCompare, valueCompare, &nssArenaHashAllocOps, arena); if( (PLHashTable *)NULL == rv->plHashTable ) { (void)PZ_DestroyLock(rv->mutex); goto loser; } rv->count = 0; rv->arena = arena; rv->i_alloced_arena = i_alloced; return rv; loser: (void)nss_ZFreeIf(rv); return (nssHash *)NULL; }
// Must be single-threaded here, early in primordial thread. static void InitAutoLockStatics() { (void) PR_NewThreadPrivateIndex(&LockStackTPI, 0); OrderTable = PL_NewHashTable(64, _hash_pointer, PL_CompareValues, PL_CompareValues, &_hash_alloc_ops, 0); if (OrderTable && !(OrderTableLock = PR_NewLock())) { PL_HashTableDestroy(OrderTable); OrderTable = 0; } PR_CSetOnMonitorRecycle(OnSemaphoreRecycle); }
/* * nssCKFWHash_Create * */ NSS_IMPLEMENT nssCKFWHash * nssCKFWHash_Create ( NSSCKFWInstance *fwInstance, NSSArena *arena, CK_RV *pError ) { nssCKFWHash *rv; #ifdef NSSDEBUG if (!pError) { return (nssCKFWHash *)NULL; } if( PR_SUCCESS != nssArena_verifyPointer(arena) ) { *pError = CKR_ARGUMENTS_BAD; return (nssCKFWHash *)NULL; } #endif /* NSSDEBUG */ rv = nss_ZNEW(arena, nssCKFWHash); if (!rv) { *pError = CKR_HOST_MEMORY; return (nssCKFWHash *)NULL; } rv->mutex = nssCKFWInstance_CreateMutex(fwInstance, arena, pError); if (!rv->mutex) { if( CKR_OK == *pError ) { (void)nss_ZFreeIf(rv); *pError = CKR_GENERAL_ERROR; } return (nssCKFWHash *)NULL; } rv->plHashTable = PL_NewHashTable(0, nss_ckfw_identity_hash, PL_CompareValues, PL_CompareValues, &nssArenaHashAllocOps, arena); if (!rv->plHashTable) { (void)nssCKFWMutex_Destroy(rv->mutex); (void)nss_ZFreeIf(rv); *pError = CKR_HOST_MEMORY; return (nssCKFWHash *)NULL; } rv->count = 0; return rv; }
nsresult nsHttpAuthCache::Init() { NS_ENSURE_TRUE(!mDB, NS_ERROR_ALREADY_INITIALIZED); LOG(("nsHttpAuthCache::Init\n")); mDB = PL_NewHashTable(128, (PLHashFunction) PL_HashString, (PLHashComparator) PL_CompareStrings, (PLHashComparator) 0, &gHashAllocOps, this); if (!mDB) return NS_ERROR_OUT_OF_MEMORY; return NS_OK; }
tmreader *tmreader_new(const char *program, void *data) { tmreader *tmr; tmr = calloc(1, sizeof *tmr); if (!tmr) return NULL; tmr->program = program; tmr->data = data; PL_INIT_ARENA_POOL(&tmr->arena, "TMReader", 256*1024); tmr->libraries = PL_NewHashTable(100, hash_serial, PL_CompareValues, PL_CompareStrings, &graphnode_hashallocops, &tmr->arena); tmr->filenames = PL_NewHashTable(100, hash_serial, PL_CompareValues, PL_CompareStrings, &filename_hashallocops, &tmr->arena); tmr->components = PL_NewHashTable(10000, PL_HashString, PL_CompareStrings, PL_CompareValues, &component_hashallocops, &tmr->arena); tmr->methods = PL_NewHashTable(10000, hash_serial, PL_CompareValues, PL_CompareStrings, &method_hashallocops, &tmr->arena); tmr->callsites = PL_NewHashTable(200000, hash_serial, PL_CompareValues, PL_CompareValues, &callsite_hashallocops, &tmr->arena); tmr->calltree_root.entry.value = (void*) strdup("root"); if (!tmr->libraries || !tmr->components || !tmr->methods || !tmr->callsites || !tmr->calltree_root.entry.value || !tmr->filenames) { tmreader_destroy(tmr); return NULL; } return tmr; }
/** ************************************************************************** * Internal initialization. Always called, from lib/httpdaemon/WebServer.cpp * */ void reqlimit_init_crits() { reqlimit_crit = crit_init(); anon_bucket.count = 0; anon_bucket.time = time(NULL); anon_bucket.state = REQ_NOACTION; anon_bucket.conc = 0; hashtable = PL_NewHashTable(0, PL_HashString, PL_CompareStrings, PL_CompareValues, NULL, NULL); next_timeout = time(NULL) + purge_timeout; // Initializes a "slot" for our reqlimit_conc_done() function. See // call to request_set_data() in check_request_limits() for more. req_cleanup = request_alloc_slot(&reqlimit_conc_done); assert(req_cleanup != -1); }
nsTopProgressManager::nsTopProgressManager(MWContext* context) : nsProgressManager(context), fActualStart(PR_Now()), fProgressBarStart(PR_Now()), fDefaultStatus(NULL) { fURLs = PL_NewHashTable(OBJECT_TABLE_INIT_SIZE, pm_HashURL, pm_CompareURLs, PL_CompareValues, &AllocOps, NULL); // Start the progress manager fTimeout = FE_SetTimeout(nsTopProgressManager::TimeoutCallback, (void*) this, 500); PR_ASSERT(fTimeout); // to avoid "strobe" mode... fProgress = 1; FE_SetProgressBarPercent(fContext, fProgress); }
ReplicaUpdateDNList replica_groupdn_list_new(const Slapi_ValueSet *vs) { PLHashTable *hash; if (vs == NULL) { return NULL; } /* allocate table */ hash = PL_NewHashTable(4, PL_HashString, PL_CompareStrings, updatedn_compare_dns, NULL, NULL); if (hash == NULL) { slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_new_updatedn_list - " "Failed to allocate hash table; NSPR error - %d\n", PR_GetError ()); return NULL; } replica_updatedn_list_delete(hash, NULL); /* delete all values */ replica_updatedn_list_add_ext(hash, vs, 1); return (ReplicaUpdateDNList)hash; }
nsNodeInfoManager::nsNodeInfoManager() : mDocument(nsnull), mPrincipal(nsnull), mTextNodeInfo(nsnull), mCommentNodeInfo(nsnull), mDocumentNodeInfo(nsnull), mBindingManager(nsnull) { nsLayoutStatics::AddRef(); #ifdef PR_LOGGING if (!gNodeInfoManagerLeakPRLog) gNodeInfoManagerLeakPRLog = PR_NewLogModule("NodeInfoManagerLeak"); if (gNodeInfoManagerLeakPRLog) PR_LOG(gNodeInfoManagerLeakPRLog, PR_LOG_DEBUG, ("NODEINFOMANAGER %p created", this)); #endif mNodeInfoHash = PL_NewHashTable(32, GetNodeInfoInnerHashValue, NodeInfoInnerKeyCompare, PL_CompareValues, nsnull, nsnull); }
static void InitTraceLog(void) { if (gInitialized) return; gInitialized = true; bool defined; defined = InitLog("XPCOM_MEM_BLOAT_LOG", "bloat/leaks", &gBloatLog); if (!defined) gLogLeaksOnly = InitLog("XPCOM_MEM_LEAK_LOG", "leaks", &gBloatLog); if (defined || gLogLeaksOnly) { RecreateBloatView(); if (!gBloatView) { NS_WARNING("out of memory"); gBloatLog = nullptr; gLogLeaksOnly = false; } } (void)InitLog("XPCOM_MEM_REFCNT_LOG", "refcounts", &gRefcntsLog); (void)InitLog("XPCOM_MEM_ALLOC_LOG", "new/delete", &gAllocLog); defined = InitLog("XPCOM_MEM_LEAKY_LOG", "for leaky", &gLeakyLog); if (defined) { gLogToLeaky = true; PRFuncPtr p = nullptr, q = nullptr; #ifdef HAVE_DLOPEN { PRLibrary *lib = nullptr; p = PR_FindFunctionSymbolAndLibrary("__log_addref", &lib); if (lib) { PR_UnloadLibrary(lib); lib = nullptr; } q = PR_FindFunctionSymbolAndLibrary("__log_release", &lib); if (lib) { PR_UnloadLibrary(lib); } } #endif if (p && q) { leakyLogAddRef = (void (*)(void*,int,int)) p; leakyLogRelease = (void (*)(void*,int,int)) q; } else { gLogToLeaky = false; fprintf(stdout, "### ERROR: XPCOM_MEM_LEAKY_LOG defined, but can't locate __log_addref and __log_release symbols\n"); fflush(stdout); } } const char* classes = getenv("XPCOM_MEM_LOG_CLASSES"); #ifdef HAVE_CPP_DYNAMIC_CAST_TO_VOID_PTR if (classes) { (void)InitLog("XPCOM_MEM_COMPTR_LOG", "nsCOMPtr", &gCOMPtrLog); } else { if (getenv("XPCOM_MEM_COMPTR_LOG")) { fprintf(stdout, "### XPCOM_MEM_COMPTR_LOG defined -- but XPCOM_MEM_LOG_CLASSES is not defined\n"); } } #else const char* comptr_log = getenv("XPCOM_MEM_COMPTR_LOG"); if (comptr_log) { fprintf(stdout, "### XPCOM_MEM_COMPTR_LOG defined -- but it will not work without dynamic_cast\n"); } #endif if (classes) { // if XPCOM_MEM_LOG_CLASSES was set to some value, the value is interpreted // as a list of class names to track gTypesToLog = PL_NewHashTable(256, PL_HashString, PL_CompareStrings, PL_CompareValues, &typesToLogHashAllocOps, NULL); if (!gTypesToLog) { NS_WARNING("out of memory"); fprintf(stdout, "### XPCOM_MEM_LOG_CLASSES defined -- unable to log specific classes\n"); } else { fprintf(stdout, "### XPCOM_MEM_LOG_CLASSES defined -- only logging these classes: "); const char* cp = classes; for (;;) { char* cm = (char*) strchr(cp, ','); if (cm) { *cm = '\0'; } PL_HashTableAdd(gTypesToLog, nsCRT::strdup(cp), (void*)1); fprintf(stdout, "%s ", cp); if (!cm) break; *cm = ','; cp = cm + 1; } fprintf(stdout, "\n"); } gSerialNumbers = PL_NewHashTable(256, HashNumber, PL_CompareValues, PL_CompareValues, &serialNumberHashAllocOps, NULL); } const char* objects = getenv("XPCOM_MEM_LOG_OBJECTS"); if (objects) { gObjectsToLog = PL_NewHashTable(256, HashNumber, PL_CompareValues, PL_CompareValues, NULL, NULL); if (!gObjectsToLog) { NS_WARNING("out of memory"); fprintf(stdout, "### XPCOM_MEM_LOG_OBJECTS defined -- unable to log specific objects\n"); } else if (! (gRefcntsLog || gAllocLog || gCOMPtrLog)) { fprintf(stdout, "### XPCOM_MEM_LOG_OBJECTS defined -- but none of XPCOM_MEM_(REFCNT|ALLOC|COMPTR)_LOG is defined\n"); } else { fprintf(stdout, "### XPCOM_MEM_LOG_OBJECTS defined -- only logging these objects: "); const char* cp = objects; for (;;) { char* cm = (char*) strchr(cp, ','); if (cm) { *cm = '\0'; } intptr_t top = 0; intptr_t bottom = 0; while (*cp) { if (*cp == '-') { bottom = top; top = 0; ++cp; } top *= 10; top += *cp - '0'; ++cp; } if (!bottom) { bottom = top; } for (intptr_t serialno = bottom; serialno <= top; serialno++) { PL_HashTableAdd(gObjectsToLog, (const void*)serialno, (void*)1); fprintf(stdout, "%ld ", serialno); } if (!cm) break; *cm = ','; cp = cm + 1; } fprintf(stdout, "\n"); } } if (gBloatLog || gRefcntsLog || gAllocLog || gLeakyLog || gCOMPtrLog) { gLogging = true; } gTraceLock = PR_NewLock(); }
static void InitTraceLog() { if (gInitialized) { return; } gInitialized = true; bool defined = InitLog("XPCOM_MEM_BLOAT_LOG", "bloat/leaks", &gBloatLog); if (!defined) { gLogLeaksOnly = InitLog("XPCOM_MEM_LEAK_LOG", "leaks", &gBloatLog); } if (defined || gLogLeaksOnly) { RecreateBloatView(); if (!gBloatView) { NS_WARNING("out of memory"); maybeUnregisterAndCloseFile(gBloatLog); gLogLeaksOnly = false; } } InitLog("XPCOM_MEM_REFCNT_LOG", "refcounts", &gRefcntsLog); InitLog("XPCOM_MEM_ALLOC_LOG", "new/delete", &gAllocLog); const char* classes = getenv("XPCOM_MEM_LOG_CLASSES"); #ifdef HAVE_CPP_DYNAMIC_CAST_TO_VOID_PTR if (classes) { InitLog("XPCOM_MEM_COMPTR_LOG", "nsCOMPtr", &gCOMPtrLog); } else { if (getenv("XPCOM_MEM_COMPTR_LOG")) { fprintf(stdout, "### XPCOM_MEM_COMPTR_LOG defined -- but XPCOM_MEM_LOG_CLASSES is not defined\n"); } } #else const char* comptr_log = getenv("XPCOM_MEM_COMPTR_LOG"); if (comptr_log) { fprintf(stdout, "### XPCOM_MEM_COMPTR_LOG defined -- but it will not work without dynamic_cast\n"); } #endif if (classes) { // if XPCOM_MEM_LOG_CLASSES was set to some value, the value is interpreted // as a list of class names to track gTypesToLog = PL_NewHashTable(256, PL_HashString, PL_CompareStrings, PL_CompareValues, &typesToLogHashAllocOps, nullptr); if (!gTypesToLog) { NS_WARNING("out of memory"); fprintf(stdout, "### XPCOM_MEM_LOG_CLASSES defined -- unable to log specific classes\n"); } else { fprintf(stdout, "### XPCOM_MEM_LOG_CLASSES defined -- only logging these classes: "); const char* cp = classes; for (;;) { char* cm = (char*)strchr(cp, ','); if (cm) { *cm = '\0'; } PL_HashTableAdd(gTypesToLog, strdup(cp), (void*)1); fprintf(stdout, "%s ", cp); if (!cm) { break; } *cm = ','; cp = cm + 1; } fprintf(stdout, "\n"); } gSerialNumbers = PL_NewHashTable(256, HashNumber, PL_CompareValues, PL_CompareValues, &serialNumberHashAllocOps, nullptr); } const char* objects = getenv("XPCOM_MEM_LOG_OBJECTS"); if (objects) { gObjectsToLog = PL_NewHashTable(256, HashNumber, PL_CompareValues, PL_CompareValues, nullptr, nullptr); if (!gObjectsToLog) { NS_WARNING("out of memory"); fprintf(stdout, "### XPCOM_MEM_LOG_OBJECTS defined -- unable to log specific objects\n"); } else if (!(gRefcntsLog || gAllocLog || gCOMPtrLog)) { fprintf(stdout, "### XPCOM_MEM_LOG_OBJECTS defined -- but none of XPCOM_MEM_(REFCNT|ALLOC|COMPTR)_LOG is defined\n"); } else { fprintf(stdout, "### XPCOM_MEM_LOG_OBJECTS defined -- only logging these objects: "); const char* cp = objects; for (;;) { char* cm = (char*)strchr(cp, ','); if (cm) { *cm = '\0'; } intptr_t top = 0; intptr_t bottom = 0; while (*cp) { if (*cp == '-') { bottom = top; top = 0; ++cp; } top *= 10; top += *cp - '0'; ++cp; } if (!bottom) { bottom = top; } for (intptr_t serialno = bottom; serialno <= top; serialno++) { PL_HashTableAdd(gObjectsToLog, (const void*)serialno, (void*)1); fprintf(stdout, "%" PRIdPTR " ", serialno); } if (!cm) { break; } *cm = ','; cp = cm + 1; } fprintf(stdout, "\n"); } } if (gBloatLog) { gLogging = OnlyBloatLogging; } if (gRefcntsLog || gAllocLog || gCOMPtrLog) { gLogging = FullLogging; } gTraceLock = PR_NewLock(); }
/********************************************************************* * * m a i n */ int main(int argc, char *argv[]) { PRBool readOnly; int retval = 0; outputFD = PR_STDOUT; errorFD = PR_STDERR; progName = argv[0]; if (argc < 2) { Usage(); } excludeDirs = PL_NewHashTable(10, PL_HashString, PL_CompareStrings, PL_CompareStrings, NULL, NULL); extensions = PL_NewHashTable(10, PL_HashString, PL_CompareStrings, PL_CompareStrings, NULL, NULL); if (parse_args(argc, argv)) { retval = -1; goto cleanup; } /* Parse the command file if one was given */ if (cmdFile) { if (ProcessCommandFile()) { retval = -1; goto cleanup; } } /* Set up output redirection */ if (outfile) { if (PR_Access(outfile, PR_ACCESS_EXISTS) == PR_SUCCESS) { /* delete the file if it is already present */ PR_fprintf(errorFD, "warning: %s already exists and will be overwritten.\n", outfile); warningCount++; if (PR_Delete(outfile) != PR_SUCCESS) { PR_fprintf(errorFD, "ERROR: unable to delete %s.\n", outfile); errorCount++; exit(ERRX); } } outputFD = PR_Open(outfile, PR_WRONLY | PR_CREATE_FILE | PR_TRUNCATE, 0777); if (!outputFD) { PR_fprintf(errorFD, "ERROR: Unable to create %s.\n", outfile); errorCount++; exit(ERRX); } errorFD = outputFD; } /* This seems to be a fairly common user error */ if (verify && list_certs > 0) { PR_fprintf(errorFD, "%s: Can't use -l and -v at the same time\n", PROGRAM_NAME); errorCount++; retval = -1; goto cleanup; } /* -J assumes -Z now */ if (javascript && zipfile) { PR_fprintf(errorFD, "%s: Can't use -J and -Z at the same time\n", PROGRAM_NAME); PR_fprintf(errorFD, "%s: -J option will create the jar files for you\n", PROGRAM_NAME); errorCount++; retval = -1; goto cleanup; } /* -X needs -Z */ if (xpi_arc && !zipfile) { PR_fprintf(errorFD, "%s: option XPI (-X) requires option jarfile (-Z)\n", PROGRAM_NAME); errorCount++; retval = -1; goto cleanup; } /* Less common mixing of -L with various options */ if (list_certs > 0 && (tell_who || zipfile || javascript || scriptdir || extensionsGiven || exclusionsGiven || install_script)) { PR_fprintf(errorFD, "%s: Can't use -l or -L with that option\n", PROGRAM_NAME); errorCount++; retval = -1; goto cleanup; } if (!cert_dir) cert_dir = get_default_cert_dir(); VerifyCertDir(cert_dir, keyName); if (compression_level < MIN_COMPRESSION_LEVEL || compression_level > MAX_COMPRESSION_LEVEL) { PR_fprintf(errorFD, "Compression level must be between %d and %d.\n", MIN_COMPRESSION_LEVEL, MAX_COMPRESSION_LEVEL); errorCount++; retval = -1; goto cleanup; } if (jartree && !keyName) { PR_fprintf(errorFD, "You must specify a key with which to sign.\n"); errorCount++; retval = -1; goto cleanup; } readOnly = (genkey == NULL); /* only key generation requires write */ if (InitCrypto(cert_dir, readOnly)) { PR_fprintf(errorFD, "ERROR: Cryptographic initialization failed.\n"); errorCount++; retval = -1; goto cleanup; } if (enableOCSP) { SECStatus rv = CERT_EnableOCSPChecking(CERT_GetDefaultCertDB()); if (rv != SECSuccess) { PR_fprintf(errorFD, "ERROR: Attempt to enable OCSP Checking failed.\n"); errorCount++; retval = -1; } } if (verify) { if (VerifyJar(verify)) { errorCount++; retval = -1; goto cleanup; } } else if (list_certs) { if (ListCerts(keyName, list_certs)) { errorCount++; retval = -1; goto cleanup; } } else if (list_modules) { JarListModules(); } else if (genkey) { if (GenerateCert(genkey, keySize, token)) { errorCount++; retval = -1; goto cleanup; } } else if (tell_who) { if (JarWho(tell_who)) { errorCount++; retval = -1; goto cleanup; } } else if (javascript && jartree) { /* make sure directory exists */ PRDir *dir; dir = PR_OpenDir(jartree); if (!dir) { PR_fprintf(errorFD, "ERROR: unable to open directory %s.\n", jartree); errorCount++; retval = -1; goto cleanup; } else { PR_CloseDir(dir); } /* undo junk from prior runs of signtool*/ if (RemoveAllArc(jartree)) { PR_fprintf(errorFD, "Error removing archive directories under %s\n", jartree); errorCount++; retval = -1; goto cleanup; } /* traverse all the htm|html files in the directory */ if (InlineJavaScript(jartree, !noRecurse)) { retval = -1; goto cleanup; } /* sign any resultant .arc directories created in above step */ if (SignAllArc(jartree, keyName, javascript, metafile, install_script, optimize, !noRecurse)) { retval = -1; goto cleanup; } if (!leaveArc) { RemoveAllArc(jartree); } if (errorCount > 0 || warningCount > 0) { PR_fprintf(outputFD, "%d error%s, %d warning%s.\n", errorCount, errorCount == 1 ? "" : "s", warningCount, warningCount == 1 ? "" : "s"); } else { PR_fprintf(outputFD, "Directory %s signed successfully.\n", jartree); } } else if (jartree) { SignArchive(jartree, keyName, zipfile, javascript, metafile, install_script, optimize, !noRecurse); } else Usage(); cleanup: if (extensions) { PL_HashTableDestroy(extensions); extensions = NULL; } if (excludeDirs) { PL_HashTableDestroy(excludeDirs); excludeDirs = NULL; } if (outputFD != PR_STDOUT) { PR_Close(outputFD); } rm_dash_r(TMP_OUTPUT); if (retval == 0) { if (NSS_Shutdown() != SECSuccess) { exit(1); } } return retval; }
void collateHistory (RDF r, RDF_Resource u, PRBool byDateFlag) { HASHINFO hash = { 4*1024, 0, 0, 0, 0, 0}; DBT key, data; time_t last,first,numaccess; PRBool firstOne = 0; DB* db = CallDBOpenUsingFileURL(gGlobalHistoryURL, O_RDONLY ,0600, DB_HASH, &hash); grdf = r; if (db != NULL) { if (!byDateFlag) { hostHash = PL_NewHashTable(500, idenHash, PL_CompareValues, PL_CompareValues, null, null); } else ByDateOpened = 1; while (0 == (*db->seq)(db, &key, &data, (firstOne ? R_NEXT : R_FIRST))) { char* title = ((char*)data.data + 16); /* title */ char* url = (char*)key.data; /* url */ int32 flag = (int32)*((char*)data.data + 3*sizeof(int32)); firstOne = 1; #ifdef XP_UNIX if ((/*1 == flag &&*/ displayHistoryItem((char*)key.data))) { #else if (1 == flag && displayHistoryItem((char*)key.data)) { #endif COPY_INT32(&last, (time_t *)((char *)data.data)); COPY_INT32(&first, (time_t *)((char *)data.data + 4)); COPY_INT32(&numaccess, (time_t *)((char *)data.data + 8)); collateOneHist(r, u,url,title, last, first, numaccess, byDateFlag); } } (*db->close)(db); } } void collateOneHist (RDF r, RDF_Resource u, char* url, char* title, time_t lastAccessDate, time_t firstAccessDate, uint32 numAccesses, PRBool byDateFlag) { RDF_Resource hostUnit, urlUnit; char* existingName = NULL; if (startsWith("404", title)) return; urlUnit = HistCreate(url, 1); existingName = nlocalStoreGetSlotValue(gLocalStore, urlUnit, gCoreVocab->RDF_name, RDF_STRING_TYPE, 0, 1); if (existingName == NULL) { if (title[0] != '\0') remoteAddName(urlUnit, title); } else freeMem(existingName); if (byDateFlag) { hostUnit = hostUnitOfDate(r, u, lastAccessDate); } else { hostUnit = hostUnitOfURL(r, u, urlUnit, title); } if (hostUnit == NULL) return; if (hostUnit != urlUnit) remoteAddParent(urlUnit, hostUnit); remoteStoreAdd(gRemoteStore, urlUnit, gWebData->RDF_lastVisitDate, (void *)lastAccessDate, RDF_INT_TYPE, 1); remoteStoreAdd(gRemoteStore, urlUnit, gWebData->RDF_firstVisitDate, (void *)firstAccessDate, RDF_INT_TYPE, 1); if (numAccesses==0) ++numAccesses; remoteStoreAdd(gRemoteStore, urlUnit, gWebData->RDF_numAccesses, (void *)numAccesses, RDF_INT_TYPE, 1); }
int main(int argc, char **argv) { if (argc != 2) { fprintf(stderr, "Expected usage: %s <sd-leak-file>\n" " sd-leak-file: Output of --shutdown-leaks=<file> option.\n", argv[0]); return 1; } NS_InitXPCOM2(NULL, NULL, NULL); ADLog log; if (!log.Read(argv[1])) { fprintf(stderr, "%s: Error reading input file %s.\n", argv[0], argv[1]); } const size_t count = log.count(); PLHashTable *memory_map = PL_NewHashTable(count * 8, hash_pointer, PL_CompareValues, PL_CompareValues, 0, 0); if (!memory_map) { fprintf(stderr, "%s: Out of memory.\n", argv[0]); return 1; } // Create one |AllocationNode| object for each log entry, and create // entries in the hashtable pointing to it for each byte it occupies. AllocationNode *nodes = new AllocationNode[count]; if (!nodes) { fprintf(stderr, "%s: Out of memory.\n", argv[0]); return 1; } { AllocationNode *cur_node = nodes; for (ADLog::const_iterator entry = log.begin(), entry_end = log.end(); entry != entry_end; ++entry, ++cur_node) { const ADLog::Entry *e = cur_node->entry = *entry; cur_node->reached = false; for (ADLog::Pointer p = e->address, p_end = e->address + e->datasize; p != p_end; ++p) { PLHashEntry *e = PL_HashTableAdd(memory_map, p, cur_node); if (!e) { fprintf(stderr, "%s: Out of memory.\n", argv[0]); return 1; } } } } // Construct graph based on pointers. for (AllocationNode *node = nodes, *node_end = nodes + count; node != node_end; ++node) { const ADLog::Entry *e = node->entry; for (const char *d = e->data, *d_end = e->data + e->datasize - e->datasize % sizeof(ADLog::Pointer); d != d_end; d += sizeof(ADLog::Pointer)) { AllocationNode *target = (AllocationNode*) PL_HashTableLookup(memory_map, *(void**)d); if (target) { target->pointers_from.AppendElement(node); node->pointers_to.AppendElement(target); } } } // Do a depth-first search on the graph (i.e., by following // |pointers_to|) and assign the post-order index to |index|. { PRUint32 dfs_index = 0; nsVoidArray stack; for (AllocationNode *n = nodes, *n_end = nodes+count; n != n_end; ++n) { if (n->reached) { continue; } stack.AppendElement(n); do { PRUint32 pos = stack.Count() - 1; AllocationNode *n = static_cast<AllocationNode*>(stack[pos]); if (n->reached) { n->index = dfs_index++; stack.RemoveElementAt(pos); } else { n->reached = true; // When doing post-order processing, we have to be // careful not to put reached nodes into the stack. nsVoidArray &pt = n->pointers_to; for (PRInt32 i = pt.Count() - 1; i >= 0; --i) { if (!static_cast<AllocationNode*>(pt[i])->reached) { stack.AppendElement(pt[i]); } } } } while (stack.Count() > 0); } } // Sort the nodes by their DFS index, in reverse, so that the first // node is guaranteed to be in a root SCC. AllocationNode **sorted_nodes = new AllocationNode*[count]; if (!sorted_nodes) { fprintf(stderr, "%s: Out of memory.\n", argv[0]); return 1; } { for (size_t i = 0; i < count; ++i) { sorted_nodes[i] = nodes + i; } NS_QuickSort(sorted_nodes, count, sizeof(AllocationNode*), sort_by_reverse_index, 0); } // Put the nodes into their strongly-connected components. PRUint32 num_sccs = 0; { for (size_t i = 0; i < count; ++i) { nodes[i].reached = false; } nsVoidArray stack; for (AllocationNode **sn = sorted_nodes, **sn_end = sorted_nodes + count; sn != sn_end; ++sn) { if ((*sn)->reached) { continue; } // We found a new strongly connected index. stack.AppendElement(*sn); do { PRUint32 pos = stack.Count() - 1; AllocationNode *n = static_cast<AllocationNode*>(stack[pos]); stack.RemoveElementAt(pos); if (!n->reached) { n->reached = true; n->index = num_sccs; stack.AppendElements(n->pointers_from); } } while (stack.Count() > 0); ++num_sccs; } } // Identify which nodes are leak roots by using DFS, and watching // for component transitions. PRUint32 num_root_nodes = count; { for (size_t i = 0; i < count; ++i) { nodes[i].is_root = true; } nsVoidArray stack; for (AllocationNode *n = nodes, *n_end = nodes+count; n != n_end; ++n) { if (!n->is_root) { continue; } // Loop through pointers_to, and add any that are in a // different SCC to stack: for (int i = n->pointers_to.Count() - 1; i >= 0; --i) { AllocationNode *target = static_cast<AllocationNode*>(n->pointers_to[i]); if (n->index != target->index) { stack.AppendElement(target); } } while (stack.Count() > 0) { PRUint32 pos = stack.Count() - 1; AllocationNode *n = static_cast<AllocationNode*>(stack[pos]); stack.RemoveElementAt(pos); if (n->is_root) { n->is_root = false; --num_root_nodes; stack.AppendElements(n->pointers_to); } } } } // Sort the nodes by their SCC index. NS_QuickSort(sorted_nodes, count, sizeof(AllocationNode*), sort_by_index, 0); // Print output. { printf("<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01//EN\">\n" "<html>\n" "<head>\n" "<title>Leak analysis</title>\n" "<style type=\"text/css\">\n" " .root { background: white; color: black; }\n" " .nonroot { background: #ccc; color: black; }\n" "</style>\n" "</head>\n"); printf("<body>\n\n" "<p>Generated %d entries (%d in root SCCs) and %d SCCs.</p>\n\n", count, num_root_nodes, num_sccs); for (size_t i = 0; i < count; ++i) { nodes[i].reached = false; } // Loop over the sorted nodes twice, first printing the roots // and then the non-roots. for (PRInt32 root_type = true; root_type == true || root_type == false; --root_type) { if (root_type) { printf("\n\n" "<div class=\"root\">\n" "<h1 id=\"root\">Root components</h1>\n"); } else { printf("\n\n" "<div class=\"nonroot\">\n" "<h1 id=\"nonroot\">Non-root components</h1>\n"); } PRUint32 component = (PRUint32)-1; bool one_object_component; for (const AllocationNode *const* sn = sorted_nodes, *const* sn_end = sorted_nodes + count; sn != sn_end; ++sn) { const AllocationNode *n = *sn; if (n->is_root != root_type) continue; const ADLog::Entry *e = n->entry; if (n->index != component) { component = n->index; one_object_component = sn + 1 == sn_end || (*(sn+1))->index != component; if (!one_object_component) printf("\n\n<h2 id=\"c%d\">Component %d</h2>\n", component, component); } if (one_object_component) { printf("\n\n<div id=\"c%d\">\n", component); printf("<h2 id=\"o%d\">Object %d " "(single-object component %d)</h2>\n", n-nodes, n-nodes, component); } else { printf("\n\n<h3 id=\"o%d\">Object %d</h3>\n", n-nodes, n-nodes); } printf("<pre>\n"); printf("%p <%s> (%d)\n", e->address, e->type, e->datasize); for (size_t d = 0; d < e->datasize; d += sizeof(ADLog::Pointer)) { AllocationNode *target = (AllocationNode*) PL_HashTableLookup(memory_map, *(void**)(e->data + d)); if (target) { printf(" <a href=\"#o%d\">0x%08X</a> <%s>", target - nodes, *(unsigned int*)(e->data + d), target->entry->type); if (target->index != n->index) { printf(", component %d", target->index); } printf("\n"); } else { printf(" 0x%08X\n", *(unsigned int*)(e->data + d)); } } if (n->pointers_from.Count()) { printf("\nPointers from:\n"); for (PRUint32 i = 0, i_end = n->pointers_from.Count(); i != i_end; ++i) { AllocationNode *t = static_cast<AllocationNode*> (n->pointers_from[i]); const ADLog::Entry *te = t->entry; printf(" <a href=\"#o%d\">%s</a> (Object %d, ", t - nodes, te->type, t - nodes); if (t->index != n->index) { printf("component %d, ", t->index); } if (t == n) { printf("self)\n"); } else { printf("%p)\n", te->address); } } } print_escaped(stdout, e->allocation_stack); printf("</pre>\n"); if (one_object_component) { printf("</div>\n"); } } printf("</div>\n"); } printf("</body>\n" "</html>\n"); } delete [] sorted_nodes; delete [] nodes; NS_ShutdownXPCOM(NULL); return 0; }
State() { m_pHash = PL_NewHashTable(32, PL_HashString, PL_CompareStrings, PL_CompareValues, &m_sAllocOps, NULL); };