int main(int argc, char *argv[]) /* The program */ { struct psl *pslList = NULL, *psl; struct hash *queryHash, *targetHash; struct lineFile *vulg; aaSeq *querySeqs; struct dnaSeq *targetSeqs; if (argc != 5) usage(); /* Load up everything at beginning */ vulg = lineFileOpen(argv[1], TRUE); querySeqs = dnaLoadAll(argv[2]); targetSeqs = dnaLoadAll(argv[3]); queryHash = seqHash(querySeqs); targetHash = seqHash(targetSeqs); /* Main business */ pslList = vulgarToPsl(vulg, queryHash, targetHash); pslWriteAll(pslList, argv[4], FALSE); /* Free up everything */ freeDnaSeqList(&querySeqs); freeDnaSeqList(&targetSeqs); freeHash(&targetHash); freeHash(&queryHash); pslFreeList(&pslList); lineFileClose(&vulg); return 0; }
void g2gSeqOverlap(char *pairFileName, char *g2gPsl, char *outName) /* g2gSeqOverlap - make a big .fa file with overlap sequence. */ { boolean firstA; FILE *f = mustOpen(outName, "w"); struct hash *cloneHash = newHash(16); struct hash *pairHash = newHash(17); struct seqPair *pair, *pairList; int dotty = 0; printf("Reading %s. ", pairFileName); fflush(stdout); pairList = readPairList(pairFileName, cloneHash, pairHash); printf("Got %d pairs\n", slCount(pairList)); printf("Processing %s\n", g2gPsl); fillInPsls(g2gPsl, pairHash); printf("Writing overlaps to %s\n", outName); for (pair = pairList; pair != NULL; pair = pair->next) { if ((++dotty % 100) == 0) { printf("."); fflush(stdout); } if (pair->a.overlap >= pair->b.overlap) writeOverlaps(f, &pair->a); else writeOverlaps(f, &pair->b); } printf("\n"); fclose(f); freeHash(&cloneHash); freeHash(&pairHash); }
static void checkTagIsInside(struct htmlPage *page, char *outsiders, char *insiders, struct htmlTag *startTag, struct htmlTag *endTag) /* Check that insiders are all bracketed by outsiders. */ { char *outDupe = cloneString(outsiders); char *inDupe = cloneString(insiders); char *line, *word; int depth = 0; struct htmlTag *tag; struct hash *outOpen = newHash(8); struct hash *outClose = newHash(8); struct hash *inHash = newHash(8); char buf[256]; /* Create hashes of all insiders */ line = inDupe; while ((word = nextWord(&line)) != NULL) { touppers(word); hashAdd(inHash, word, NULL); } /* Create hash of open and close outsiders. */ line = outDupe; while ((word = nextWord(&line)) != NULL) { touppers(word); hashAdd(outOpen, word, NULL); safef(buf, sizeof(buf), "/%s", word); hashAdd(outClose, buf, NULL); } /* Stream through tags making sure that insiders are * at least one deep inside of outsiders. */ for (tag = startTag; tag != NULL; tag = tag->next) { char *type = tag->name; if (hashLookup(outOpen, type )) ++depth; else if (hashLookup(outClose, type)) --depth; else if (hashLookup(inHash, type)) { if (depth <= 0) { if (!startsWith("<INPUT TYPE=HIDDEN NAME=", tag->start)) // one exception hardwired tagAbort(page, tag, "%s outside of any of %s", type, outsiders); } } } freeHash(&inHash); freeHash(&outOpen); freeHash(&outClose); freeMem(outDupe); freeMem(inDupe); }
void jkUniq(char *fileName) /* Remove dupe lines from file. */ { struct slName *lineList = NULL, *lineEl; struct lineFile *lf = lineFileOpen(fileName, TRUE); char *line; int lineSize; struct hash *hash = newHash(0); FILE *f; while (lineFileNext(lf, &line, &lineSize)) { if (!hashLookup(hash, line)) { hashAdd(hash, line, NULL); lineEl = newSlName(line); slAddHead(&lineList, lineEl); } } slReverse(&lineList); lineFileClose(&lf); f = mustOpen(fileName, "w"); for (lineEl = lineList; lineEl != NULL; lineEl = lineEl->next) { fputs(lineEl->name, f); fputc('\n', f); } fclose(f); slFreeList(&lineList); freeHash(&hash); }
int main (int argc, char *argv[]) { char benchmarkPath[BUFFERSIZE], auxFile[BUFFERSIZE], placefile[BUFFERSIZE]; if(argc != 4) { printf("Usage: %s <benchmark_dir> <aux_file> <placement_file>\n", argv[0]); printf(" <benchmark_dir> is the benchmark file directory.\n"); printf(" <aux_file> is the bookshelf format auxiliary file"); printf(" (assume in <benchmark_dir>).\n"); printf(" <placement_file> is the placement file"); printf(" (assume in current directory).\n"); exit(1); } strcpy(benchmarkPath, argv[1]); strcpy(auxFile, argv[2]); strcpy(placefile, argv[3]); readAuxFile(benchmarkPath, auxFile); createHash(benchmarkPath, nodesFile); readNodesFile(benchmarkPath, nodesFile); readNetsFile(benchmarkPath, netsFile); readPlFile(".", placefile); freeHash(); readLUT(); printf("Half-perimeter wirelength: %.2f\n", HPwl()); printf("FLUTE wirelength : %.2f\n", flutewl()); }
void findStanAlignments(char *db, char *stan, char *image, char *pslOut) { struct hash *iHash = newHash(5); struct stanMad *smList = NULL, *sm = NULL; FILE *out = mustOpen(pslOut, "w"); int count =0; struct sqlConnection *conn = NULL; warn("Getting sql Connection..."); conn = hAllocConn(db); warn("Reading in image clones..."); readInImageHash(iHash, image); warn("Loading Stanford Alignments.."); smList = stanMadLoadAll(stan); warn("Finding best Alignments..."); for(sm = smList; sm != NULL; sm = sm->next) { if(differentString(sm->type,"Control")) { if((count++ % 10000) ==0) { printf("."); fflush(stdout); } outputAlignmentForStan(conn, sm, iHash, out); } } printf("\n"); warn("Done. Cleaning up..."); stanMadFreeList(&smList); freeHash(&iHash); hFreeConn(&conn); }
struct dgEdgeRef *dgFindSubEdges(struct diGraph *dg, struct dgNodeRef *subGraph) /* Return list of edges in graph that connected together nodes in subGraph. */ { struct hash *hash = newHash(0); struct dgNodeRef *nr; struct dgConnection *con; struct dgEdgeRef *erList = NULL, *er; struct dgNode *node; /* Build up hash of nodes in subGraph. */ for (nr = subGraph; nr != NULL; nr = nr->next) { node = nr->node; hashAdd(hash, node->name, node); } for (nr = subGraph; nr != NULL; nr = nr->next) { node = nr->node; for (con = node->nextList; con != NULL; con = con->next) { if (hashLookup(hash, con->node->name)) { AllocVar(er); er->edge = con->edgeOnList->val; slAddHead(&erList, er); } } } freeHash(&hash); return erList; }
void tabPepPred(char *database, int fileCount, char *fileNames[], char *table) /* Load a tab separated peptide file. */ { struct hash *uniq = newHash(16); struct lineFile *lf = lineFileOpen(fileNames[0], TRUE); char *words[2]; if (fileCount != 1) errAbort("Only one file allowed for tab separated peptides"); makeCustomTable(database, table, createString); printf("Processing %s\n", fileNames[0]); while (lineFileRow(lf, words)) { char *upperCase; if (hashLookupUpperCase(uniq, words[0]) != NULL) errAbort("Duplicate (case insensitive) '%s' line %d of %s", words[0], lf->lineIx, lf->fileName); upperCase = cloneString(words[0]); touppers(upperCase); hashAdd(uniq, upperCase, NULL); freeMem(upperCase); } lineFileClose(&lf); printf("Loading %s\n", fileNames[0]); loadTableFromTabFile(database, table, fileNames[0]); freeHash(&uniq); }
static struct trackDb * pruneRelease(struct trackDb *tdbList) /* Prune out alternate track entries for another release */ { /* Build up list that only includes things in this release. Release * can be inherited from parents. */ struct trackDb *tdb; struct trackDb *relList = NULL; struct hash *haveHash = hashNew(3); while ((tdb = slPopHead(&tdbList)) != NULL) { char *rel = trackDbSetting(tdb, "release"); unsigned trackRelBits = buildReleaseBits(rel); if (trackRelBits & releaseBit) { /* we want to include this track, check to see if we already have it */ struct hashEl *hel; if ((hel = hashLookup(haveHash, tdb->track)) != NULL) errAbort("found two copies of table %s: one with release %s, the other %s\n", tdb->track, (char *)hel->val, release); hashAdd(haveHash, tdb->track, rel); hashRemove(tdb->settingsHash, "release"); slAddHead(&relList, tdb); } else verbose(3,"pruneRelease: removing '%s', release: '%s' != '%s'\n", tdb->track, rel, release); } freeHash(&haveHash); return relList; }
void agpHashFree(struct hash **pAgpHash) /* Free up the hash created with agpLoadAll. */ { struct hash *agpHash = *pAgpHash; hashTraverseEls(agpHash, freeAgpHashEl); freeHash(&agpHash); *pAgpHash = NULL; }
void joinerFree(struct joiner **pJoiner) /* Free up memory associated with joiner */ { struct joiner *joiner = *pJoiner; if (joiner != NULL) { freeMem(joiner->fileName); joinerSetFreeList(&joiner->jsList); freeHashAndVals(&joiner->symHash); hashFreeList(&joiner->exclusiveSets); freeHash(&joiner->databasesChecked); freeHash(&joiner->databasesIgnored); joinerDependencyFreeList(&joiner->dependencyList); joinerIgnoreFreeList(&joiner->tablesIgnored); freez(pJoiner); } }
static void associationCellPrint(struct column *col, struct genePos *gp, struct sqlConnection *conn) /* Print cell in association table. */ { char query[1024]; struct sqlResult *sr; char **row; boolean gotOne = FALSE; char *key = (col->protKey ? (kgVersion == KG_III ? lookupProtein(conn, gp->name) : gp->protein) : gp->name); struct hash *uniqHash = NULL; if (col->weedDupes) uniqHash = newHash(8); hPrintf("<TD>"); safef(query, sizeof(query), col->queryOne, key); sr = sqlGetResult(conn, query); while ((row = sqlNextRow(sr)) != NULL) { char *s = row[0]; boolean needQuote; if (uniqHash != NULL) { if (hashLookup(uniqHash, s)) continue; else hashAdd(uniqHash, s, NULL); } needQuote = hasWhiteSpace(s); if (!gotOne) gotOne = TRUE; else hPrintf(" "); if (needQuote) hPrintf("'"); if (col->itemUrl) { hPrintf("<A HREF=\""); hPrintf(col->itemUrl, row[1]); hPrintf("\" TARGET=_blank>"); } hPrintEncodedNonBreak(s); if (col->itemUrl) { hPrintf("</A>"); } if (needQuote) hPrintf("'"); } sqlFreeResult(&sr); if (!gotOne) { hPrintf("n/a"); } hPrintf("</TD>"); freeHash(&uniqHash); }
void getAllSplices(char *database, FILE *f) /* Write out table linking flybase genes with BDGP transcripts -- * unfortunately bdgpGeneInfo lacks -R* transcript/isoform identifiers, * so strip those off of bdgpGene.name. * This is not necessary with flyBaseGene/flyBase2004Xref where -R*'s * are preserved. */ { struct sqlConnection *conn = sqlConnect(database); struct sqlResult *sr; char query[256], **row; struct geneAlt *altList = NULL, *alt; struct hash *bdgpHash = newHash(16); /* Keyed by bdgp gene id. */ struct slName *n; /* First build up list of all genes with flybase and bdgp ids. */ sqlSafef(query, sizeof(query), "select bdgpName,flyBaseId from bdgpGeneInfo"); sr = sqlGetResult(conn, query); while ((row = sqlNextRow(sr)) != NULL) { AllocVar(alt); alt->bdgpName = cloneString(row[0]); alt->fbName = cloneString(row[1]); slAddHead(&altList, alt); hashAdd(bdgpHash, alt->bdgpName, alt); } sqlFreeResult(&sr); slReverse(&altList); /* Now associate splicing variants. */ sqlSafef(query, sizeof(query), "select name from %s", geneTable); sr = sqlGetResult(conn, query); while ((row = sqlNextRow(sr)) != NULL) { char *s = row[0]; char *e = rStringIn("-R", s); int size = e ? (e - s) : strlen(s); char bdgpGene[16]; if (size >= sizeof(bdgpGene)) errAbort("'%s' too big", s); memcpy(bdgpGene, s, size); bdgpGene[size] = 0; alt = hashMustFindVal(bdgpHash, bdgpGene); n = slNameNew(s); slAddTail(&alt->isoformList, n); } sqlFreeResult(&sr); sqlDisconnect(&conn); for (alt = altList; alt != NULL; alt = alt->next) { for (n = alt->isoformList; n != NULL; n = n->next) fprintf(f, "%s\t%s\n", alt->fbName, n->name); } freeHash(&bdgpHash); }
void checkOurDir(char *ourDir, struct contig *contigList, struct hash *hash) /* Check that our directories look ok. */ { struct us { struct us *next; /* Next in list */ char *contig; /* NT_XXXXXX or NG_XXXXXX */ char *chrom; /* 1, 2, 3, etc. */ }; struct hash *ourHash = newHash(0); struct us *usList = NULL, *us; struct fileInfo *chromList = NULL, *chromFi, *ctgList = NULL, *ctgFi; char chromDir[512], ctgDir[512]; struct contig *contig; int problemCount = 0; /* Build up a hash that says where each contig is. */ chromList = listDirX(ourDir, "*", FALSE); for (chromFi = chromList; chromFi != NULL; chromFi = chromFi->next) { if (chromFi->isDir && strlen(chromFi->name) <= 2) { sprintf(chromDir, "%s/%s", ourDir, chromFi->name); ctgList = listDirX(chromDir, "N?_*", FALSE); for (ctgFi = ctgList; ctgFi != NULL; ctgFi = ctgFi->next) { if (ctgFi->isDir) { AllocVar(us); slAddHead(&usList, us); us->contig = ctgFi->name; us->chrom = chromFi->name; hashAdd(ourHash, us->contig, us); } } } } printf("We have %d contigs\n", slCount(usList)); /* Check each contig. */ for (contig = contigList; contig != NULL; contig = contig->next) { if ((us = hashFindVal(ourHash, contig->name)) == NULL) { ++problemCount; printf("%s is not in %s\n", contig->name, ourDir); } else { sprintf(ctgDir, "%s/%s/%s", ourDir, us->chrom, us->contig); problemCount += checkOurContig(ctgDir, contig); } } freeHash(&ourHash); }
void freeHashAndVals(struct hash **pHash) /* Free up hash table and all values associated with it. * (Just calls freeMem on each hel->val) */ { struct hash *hash; if ((hash = *pHash) != NULL) { hashTraverseVals(hash, freeMem); freeHash(pHash); } }
void dgFree(struct diGraph **pGraph) /* Free a directed graph. */ { struct diGraph *dg = *pGraph; if (dg == NULL) return; freeHash(&dg->nodeHash); dgNodeFreeList(&dg->nodeList); freeDlListAndVals(&dg->edgeList); freez(pGraph); }
void makeCdnaToGene(struct cdnaInfo *cdnaList) /* Make cdna to gene translation file. */ { struct hash *hash = newHash(12); struct cdnaInfo *ci; struct fineAli *fa; struct geneHit *gh; struct geneHitList *geneHitList = NULL; struct geneHitList *ghl; struct hashEl *he; uglyf("Making cdnaToGene file<BR>\n"); for (ci = cdnaList; ci != NULL; ci = ci->next) { if (ci->isDupe) continue; for (fa = ci->fineAli; fa != NULL; fa = fa->next) { if (fa->isDupe || !fa->isGood) continue; if ((he = hashLookup(hash, fa->geneName)) == NULL) { AllocVar(ghl); ghl->geneName = fa->geneName; ghl->next = geneHitList; geneHitList = ghl; he = hashAdd(hash, fa->geneName, ghl); } ghl = (struct geneHitList *)(he->val); AllocVar(gh); gh->cdnaName = ci->name; gh->chromOffset = fa->hStart; gh->next = ghl->hits; ghl->hits = gh; } } slSort(&geneHitList, cmpGhlName); for (ghl=geneHitList; ghl!=NULL; ghl = ghl->next) { slReverse(&ghl->hits); slSort(&ghl->hits, cmpGhOffset); fprintf(cdnaToGeneFile, "%s ", ghl->geneName); for (gh = ghl->hits; gh != NULL; gh = gh->next) fprintf(cdnaToGeneFile, "%s ", gh->cdnaName); fprintf(cdnaToGeneFile, "\n"); } freeHash(&hash); slFreeList(&geneHitList); uglyf("Done making cdnaToGene file<BR>\n"); }
void raToStructReaderFree(struct raToStructReader **pReader) /* Free up memory associated with reader. */ { struct raToStructReader *reader = *pReader; if (reader != NULL) { freeMem(reader->name); freeHash(&reader->fieldIds); freeMem(reader->fieldIds); freeMem(reader->fieldsObserved); freez(pReader); } }
void genomeRangeTreeFree(struct genomeRangeTree **pTree) /* Free up genomeRangeTree. */ { /* need to manually free object due to thee way rbTreeNewDetailed is done */ struct hashCookie hc = hashFirst((*pTree)->hash); struct hashEl *hel; while ((hel = hashNext(&hc)) != NULL) freeMem(hel->val); lmCleanup(&((*pTree)->lm)); /* clean up all the memory for all nodes for all trees */ freeHash(&((*pTree)->hash)); /* free the hash table including names (trees are freed by lmCleanup) */ freez(pTree); /* free this */ }
int main(int argc, char *argv[]) { struct hash *pslHash = newHash(5); struct hash *bedHash = newHash(5); struct expRecord *erList = NULL, *er=NULL; int i; FILE *erOut = NULL; if(argc <4) usage(); warn("Reading in psls..."); readInPslHash(pslHash, argv[1]); warn("Creating beds..."); createBeds(bedHash, pslHash, argv[4], (argc-4)); warn("Appending Experiements..."); for(i=4; i < argc; i++) { printf("%d,",i-4); fflush(stdout); appendNewExperiment(argv[i], bedHash, pslHash,&erList, (i-4)); } warn("\tDone."); warn("Writing to files..."); erOut = mustOpen(argv[2],"w"); bedOut= mustOpen(argv[3],"w"); for(er = erList; er != NULL; er = er->next) { expRecordTabOut(er, erOut); } hashTraverseVals(bedHash, averageValues); hashTraverseVals(bedHash, bedHashOutput); carefulClose(&erOut); carefulClose(&bedOut); freeHash(&pslHash); freeHash(&bedHash); warn("Finished."); return 0; }
void eisenInput(char *database, char *outFile) /* eisenInput - Create input for Eisen-style cluster program. */ { struct slName *chromList = NULL, *chromEl; FILE *f = mustOpen(outFile, "w"); char *chrom; struct hash *refLinkHash = hashNew(0); struct refLink *refLinkList; struct hash *erHash = hashNew(0); struct expRecord *erList = NULL, *er; /* Load info good for all chromosomes. */ refLinkList = loadRefLink(database, refLinkHash); erList = loadExpRecord(expRecordTable, "hgFixed"); for (er = erList; er != NULL; er = er->next) { char sid[16]; snprintf(sid, sizeof(sid), "%u", er->id); hashAdd(erHash, sid, er); } /* Do it chromosome by chromosome. */ chromList = hAllChromNames(database); for (chromEl = chromList; chromEl != NULL; chromEl = chromEl->next) { chrom = chromEl->name; uglyf("%s\n", chrom); oneChromInput(database, chrom, hChromSize(database, chrom), "rnaCluster", expTrack, refLinkHash, erHash, f); } /* Cleanup time! */ expRecordFreeList(&erList); freeHash(&erHash); refLinkFreeList(&refLinkList); freeHash(&refLinkHash); }
void hgClonePos(char *database, char *ooDir, char *seqInfoName, char *gsDir) /* hgClonePos - create clonePos table in browser database. */ { struct hash *cloneHash = newHash(16); struct clonePos *cloneList = NULL; cloneList = readClonesFromOoDir(ooDir, cloneHash); /* addStageInfo(gsDir, cloneHash); */ addSeqInfo(seqInfoName, cloneHash); checkClonePos(cloneList); saveClonePos(cloneList, database); freeHash(&cloneHash); clonePosFreeList(&cloneList); }
SEXP BWGFile_fromWIG(SEXP r_infile, SEXP r_seqlengths, SEXP r_outfile) { pushRHandlers(); struct lm *lm = lmInit(0); struct hash *lenHash = createIntHash(r_seqlengths); struct bwgSection *sections = bwgParseWig((char *)CHAR(asChar(r_infile)), FALSE, lenHash, itemsPerSlot, lm); bwgCreate(sections, lenHash, blockSize, itemsPerSlot, TRUE, TRUE, TRUE, (char *)CHAR(asChar(r_outfile))); lmCleanup(&lm); freeHash(&lenHash); popRHandlers(); return r_outfile; }
void prune(int *args, Hash **hashTable, unsigned int time) { Hash *h = *hashTable; Hash *newHash = malloc(sizeof(Hash)); initialize(newHash, 1 << (args[1]+1), 1, time); int i=4; if (!args[3]) for(i=4;i<260; i++) insert(newHash, EMPTY, (char)(i-4), h->shadowArray[i]->time); for(;i<h->numElements; i++) if((time - h->shadowArray[i]->time) <= args[2]) recInsert(h->shadowArray[i], h, newHash, newHash->numElements+1); freeHash(h); *hashTable = newHash; }
static struct qaSeq *qaFaRead(char *qaName, char *faName, boolean mustReadQa) /* Read both QA(C) and FA files. */ { FILE *f = NULL; struct qaSeq *qaList = NULL, *qa; struct hash *hash = newHash(0); struct qaSeq seq; /* Read in all the .fa files. */ f = mustOpen(faName, "r"); while (faFastReadNext(f, &seq.dna, &seq.size, &seq.name)) { if (hashLookup(hash, seq.name) != NULL) { warn("Duplicate %s, ignoring all but first.", seq.name); continue; } AllocVar(qa); hashAdd(hash, seq.name, qa); qa->name = cloneString(seq.name); qa->dna = cloneMem(seq.dna, seq.size+1); qa->size = seq.size; slAddHead(&qaList, qa); } fclose(f); /* Read in corresponding .qa files and make sure they correspond. * If no file exists then fake it. */ if (qaName) { if (!mustReadQa && !fileExists(qaName)) { warn("No quality file %s", qaName); for (qa = qaList; qa != NULL; qa = qa->next) qaMakeFake(qa); } else { if (isQacFile(qaName)) fillInQac(qaName, hash, qaList); else fillInQa(qaName, hash, qaList); } } freeHash(&hash); slReverse(&qaList); return qaList; }
void endRedoHash() { char *redoName = "redo.txt"; FILE *redoFile = mustOpen(redoName, "w"); struct hashEl *hel; int i; for (i=0; i<redoHash->size; ++i) { for (hel = redoHash->table[i]; hel != NULL; hel = hel->next) { fprintf(redoFile, "%s %s\n", hel->name, hel->val); } } freeHash(&redoHash); }
/* --- .Call ENTRY POINT --- */ SEXP BWGSectionList_write(SEXP r_sections, SEXP r_seqlengths, SEXP r_compress, SEXP r_file) { struct bwgSection *sections = NULL; struct hash *lenHash = createIntHash(r_seqlengths); if (r_sections != R_NilValue) { sections = R_ExternalPtrAddr(r_sections); slReverse(§ions); } pushRHandlers(); bwgCreate(sections, lenHash, blockSize, itemsPerSlot, asLogical(r_compress), (char *)CHAR(asChar(r_file))); freeHash(&lenHash); popRHandlers(); return r_file; }
void ctgInfoFreeHash(struct hash **hash) /* Free a hash of dynamically allocated ctgInfo's */ { struct ctgInfo *el; struct hashCookie cookie = hashFirst(*hash); struct hashEl *hashEl; while ((hashEl = hashNext(&cookie)) != NULL) { el = (struct ctgInfo *) hashEl->val; ctgInfoFree(&el); /* freez(&hashEl->name); */ } freeHash(hash); }
static struct hgFindSpec * pruneRelease(struct hgFindSpec *hfsList) /* Prune out alternate track entries for another release */ { /* Build up list that only includes things in this release. Release * can be inherited from parents. */ struct hgFindSpec *hfs; struct hgFindSpec *relList = NULL; struct hash *haveHash = hashNew(3); while ((hfs = slPopHead(&hfsList)) != NULL) { char *rel = hgFindSpecSetting(hfs, "release"); unsigned hfsRelBits = buildReleaseBits(rel); if (hfsRelBits & releaseBit) { /* we want to include this track, check to see if we already have it */ struct hashEl *hel; if ((hel = hashLookup(haveHash, hfs->searchName)) != NULL) { // TODO restore this warning to errAbort // This has been temporarily changed to a warning to avoid everybody being held up. char *one = (char *)hel->val; char *other = release; if (!one) one = "none"; if (!other) other = "none"; warn("ERROR: found two or more copies of %s: one with release %s, the other %s\n", hfs->searchName, one, other); } else { hashAdd(haveHash, hfs->searchName, rel); hashRemove(hfs->settingsHash, "release"); slAddHead(&relList, hfs); } } else verbose(3,"pruneRelease: removing '%s', release: '%s' != '%s'\n", hfs->searchName, rel, release); } freeHash(&haveHash); return relList; }
char *associationCellVal(struct column *col, struct genePos *gp, struct sqlConnection *conn) /* Make comma separated list of matches to association table. */ { char query[1024]; struct sqlResult *sr; char **row; boolean gotOne = FALSE; struct dyString *dy = newDyString(512); char *result = NULL; char *key = (col->protKey ? (kgVersion == KG_III ? lookupProtein(conn, gp->name) : gp->protein) : gp->name); struct hash *uniqHash = NULL; if (col->weedDupes) uniqHash = newHash(8); safef(query, sizeof(query), col->queryOne, key); sr = sqlGetResult(conn, query); while ((row = sqlNextRow(sr)) != NULL) { char *s = row[0]; boolean needQuote; if (uniqHash != NULL) { if (hashLookup(uniqHash, s)) continue; else hashAdd(uniqHash, s, NULL); } needQuote = hasWhiteSpace(s); if (needQuote) dyStringAppendC(dy, '\''); dyStringAppend(dy, s); if (needQuote) dyStringAppendC(dy, '\''); dyStringAppend(dy, ","); gotOne = TRUE; } sqlFreeResult(&sr); if (gotOne) result = cloneString(dy->string); dyStringFree(&dy); freeHash(&uniqHash); return result; }