struct entity *mergeEntities(struct entity *entityList) /* Merge two entities. */ { struct entity *root = entityList, *ent; struct dlNode *node; if (root == NULL || root->next == NULL) return root; for (ent = root->next; ent != NULL; ent = ent->next) { node = ent->node; if (node != NULL) { dlRemove(node); ent->node = NULL; } root->exonList = slCat(root->exonList, ent->exonList); root->intronList = slCat(root->intronList, ent->intronList); root->cdaRefList = slCat(root->cdaRefList, ent->cdaRefList); if (root->start > ent->start) root->start = ent->start; if (root->end < ent->end) root->end = ent->end; } slSort(&root->exonList, cmpExons); root->exonList = weedDupeExons(root->exonList); slSort(&root->intronList, cmpIntrons); root->intronList = weedDupeIntrons(root->intronList); return root; }
struct gpFx *gpFxPredEffect(struct variant *variant, struct genePred *pred, struct dnaSeq *transcriptSequence, struct lm *lm) // return the predicted effect(s) of a variation list on a genePred { struct gpFx *effectsList = NULL; // make sure we can deal with the variants that are coming in checkVariantList(variant); for (; variant != NULL; variant = variant->next) { // If only the reference allele has been observed, skip it: //#*** Some might like to keep variants e.g. in VCF output... //#*** aha, Ensembl has requested a term for 'no change' from SONG. //#*** Add that to soTerm when it exists... if (! hasAltAllele(variant->alleles)) return NULL; // check to see if SNP is up or downstream effectsList = slCat(effectsList, gpFxCheckUpDownstream(variant, pred, lm)); // check to see if SNP is in the transcript effectsList = slCat(effectsList, gpFxCheckTranscript(variant, pred, transcriptSequence, lm)); } return effectsList; }
static struct gpFx *gpFxInExon(struct variant *variant, struct txCoords *txc, int exonIx, struct genePred *pred, boolean predIsNmd, struct dnaSeq *transcriptSeq, struct lm *lm) /* Given a variant that overlaps an exon of pred, figure out what each allele does. */ { struct gpFx *effectsList = NULL; struct allele *allele = variant->alleles; for ( ; allele ; allele = allele->next) { if (!allele->isReference) { if (pred->cdsStart != pred->cdsEnd) { // first find effects of allele in UTR, if any effectsList = slCat(effectsList, gpFxCheckUtr(allele, pred, txc, exonIx, predIsNmd, lm)); if (txc->startInCds >= 0) effectsList = slCat(effectsList, gpFxChangedCds(allele, pred, txc, exonIx, predIsNmd, transcriptSeq, lm)); } else effectsList = slCat(effectsList, gpFxChangedNoncodingExon(allele, pred, txc, exonIx, lm)); if (!predIsNmd) { // Was entire exon deleted? int exonNumPos = exonIx; if (pred->strand[0] == '-') exonNumPos = pred->exonCount - 1 - exonIx; uint exonStart = pred->exonStarts[exonNumPos], exonEnd = pred->exonEnds[exonNumPos]; if (variant->chromStart <= exonStart && variant->chromEnd >= exonEnd) { struct gpFx *effect = gpFxNew(allele->sequence, pred->name, exon_loss, nonCodingExon, lm); setNCExonVals(effect, exonIx, txc->startInCdna); slAddTail(&effectsList, effect); } else { // If variant is in exon *but* within 3 bases of splice site, // it also qualifies as splice_region_variant: if ((variant->chromEnd > exonEnd-3 && variant->chromStart < exonEnd && exonIx < pred->exonCount - 1) || (variant->chromEnd > exonStart && variant->chromStart < exonStart+3 && exonIx > 0)) { struct gpFx *effect = gpFxNew(allele->sequence, pred->name, splice_region_variant, nonCodingExon, lm); setNCExonVals(effect, exonIx, txc->startInCdna); slAddTail(&effectsList, effect); } } } } } return effectsList; }
struct slName* getTableList(struct sqlConnection *conn) /* get the list of existing tables */ { struct slName *tables = NULL; tables = slCat(tables, gbMetaDataListTables(conn)); tables = slCat(tables, gbAlignDataListTables(conn)); gbAddTableIfExists(conn, "gbLoaded", &tables); gbAddTableIfExists(conn, "gbStatus", &tables); /* must be last */ slReverse(&tables); return tables; }
struct chain *aggregateChains( struct chain *chainIn) { struct chain *outChain = NULL; struct chain *chain1, *chain2, *chain1Next; //printf("%s%c%s\n",chainIn->qName, chainIn->qStrand, chainIn->tName); for(chain1 = chainIn; chain1 ; chain1 = chain1Next) { chain1Next = chain1->next; for(chain2 = chain1Next; chain2 ; chain2 = chain2->next) { if ((chain1->tStart <= chain2->tStart) && (chain1->tEnd + maxGap > chain2->tStart) && (chain1->qStart <= chain2->qStart) && (chain1->qEnd + maxGap > chain2->qStart) && (chain1->qEnd < chain2->qStart) && (chain1->tEnd < chain2->tStart)) { assert(chain1->tStart < chain1->tEnd); assert(chain1->qStart < chain1->qEnd); assert(chain2->tStart < chain2->tEnd); assert(chain2->qStart < chain2->qEnd); assert(chain1->qEnd < chain2->qStart); assert(chain1->qEnd < chain2->qStart); chain2->tStart = chain1->tStart; chain2->qStart = chain1->qStart; chain2->tEnd = max(chain1->tEnd, chain2->tEnd); chain2->qEnd = max(chain1->qEnd, chain2->qEnd); chain2->blockList = slCat(chain1->blockList,chain2->blockList); // slSort(&chain2->blockList, boxInCmpBoth); freez(&chain1); break; } else if ((chain1->tStart >= chain2->tEnd) && (chain1->tStart - maxGap < chain2->tEnd) && (chain1->qStart >= chain2->qEnd) && (chain1->qStart - maxGap < chain2->qEnd)) { errAbort("shouldn't get here\n"); chain2->tEnd = chain1->tEnd; chain2->qEnd = chain1->qEnd; chain2->blockList = slCat(chain2->blockList,chain1->blockList); freez(&chain1); break; } } if (chain2 == NULL) { slAddHead(&outChain, chain1); } } return outChain; }
static void addToBigBundleList(struct ssBundle **pOneList, struct hash *bunHash, struct ssBundle **pBigList, struct dnaSeq *query) /* Add bundles in one list to bigList, consolidating bundles that refer * to the same target sequence. This will destroy oneList in the process. */ { struct ssBundle *oneBun, *bigBun; for (oneBun = *pOneList; oneBun != NULL; oneBun = oneBun->next) { char *name = oneBun->genoSeq->name; if ((bigBun = hashFindVal(bunHash, name)) == NULL) { AllocVar(bigBun); slAddHead(pBigList, bigBun); hashAdd(bunHash, name, bigBun); bigBun->qSeq = query; bigBun->genoSeq = oneBun->genoSeq; bigBun->isProt = oneBun->isProt; bigBun->avoidFuzzyFindKludge = oneBun->avoidFuzzyFindKludge; } bigBun->ffList = slCat(bigBun->ffList, oneBun->ffList); oneBun->ffList = NULL; } ssBundleFreeList(pOneList); }
void deleteOutdated(struct sqlConnection *conn, struct gbSelect* select, struct gbStatusTbl* statusTbl, char* tmpDir) /* delete outdated alignments and metadata from the database. */ { gbVerbEnter(3, "delete outdated"); /* first the alignments */ gbVerbMsg(4, "delete outdated alignments"); gbAlignDataDeleteOutdated(gDatabase, conn, select, statusTbl, &gOptions, tmpDir); /* now drop metadata entries */ gbVerbMsg(4, "delete outdated metadata"); gbMetaDataDeleteOutdated(conn, select, statusTbl, &gOptions, tmpDir); /* Now it's safe to drop deleted entries from the database status table. */ gbVerbMsg(4, "delete outdated gbStatus"); gbStatusTblRemoveDeleted(statusTbl, conn); /* orphaned now become new */ statusTbl->newList = slCat(statusTbl->newList, statusTbl->orphanList); statusTbl->orphanList = NULL; statusTbl->numNew += statusTbl->numOrphan; statusTbl->numOrphan = 0; gbVerbLeave(3, "delete outdated"); }
void addGroup(struct listOfList *list, struct pairList *pairs, struct group **groups) { if (list == NULL) { struct group *group; AllocVar(group); slAddHead(groups, group); group->pairs = pairs; //group->viewHash = newHash(10); // printf("adding group: "); // for(;pairs; pairs = pairs->next) // printf("%s + %s,",pairs->name, pairs->val); // printf("\n"); return; } struct pairList *pair; for (pair=list->pairs; pair; pair = pair->next) { struct pairList *newPairs = dupList(pairs); struct pairList *newPair; struct pairList *newList = NULL; AllocVar(newPair); newPair->name = pair->name; newPair->val = pair->val; newList = slCat(newPairs, newPair); addGroup( list->next, newList, groups); } }
void tackOnFrag(struct chain *chain, struct chain *frag) /* Make sure chain and frag belong together; then expand chain's range * to encompass frag's range and give frag's blockList and score to chain. */ { if (chain->id != frag->id) errAbort("tackOnFrag: chain->id (%d) must be equal to frag->id (%d)", chain->id, frag->id); if (!sameString(chain->tName, frag->tName)) errAbort("Inconsistent tName for chain id %d: %s vs. %s", chain->id, chain->tName, frag->tName); if (!sameString(chain->qName, frag->qName)) errAbort("Inconsistent qName for chain id %d: %s vs. %s", chain->id, chain->qName, frag->qName); if (chain->qStrand != frag->qStrand) errAbort("Inconsistent qStrand for chain id %d: %c vs. %c", chain->id, chain->qStrand, frag->qStrand); if (frag->tStart < chain->tStart) chain->tStart = frag->tStart; if (frag->tEnd > chain->tEnd) chain->tEnd = frag->tEnd; if (frag->qStart < chain->qStart) chain->qStart = frag->qStart; if (frag->qEnd > chain->qEnd) chain->qEnd = frag->qEnd; chain->blockList = slCat(chain->blockList, frag->blockList); frag->blockList = NULL; chain->score += frag->score; }
void extendAroundBestBlock(struct splatAlign *ali, struct axtScoreScheme *scoreScheme) /* Return realignment created by extending in both directions from the mid-point of the * best block in the existing alignment. */ { int maxSingleGap = 9; int maxTotalGaps = 3*maxSingleGap; struct chain *chain = ali->chain; struct cBlock *anchor = bestBlock(chain); int qMid = (anchor->qStart + anchor->qEnd)/2; int tMid = (anchor->tStart + anchor->tEnd)/2; int symAlloc = chain->qSize * 2; char *qSym, *tSym; AllocArray(qSym, symAlloc); AllocArray(tSym, symAlloc); int qBeforeSize = qMid; struct cBlock *blocksBefore = extendInRegion(ali->qDna, 0, qMid, ali->tDna, tMid - qBeforeSize - maxTotalGaps, tMid, -1, qSym, tSym, symAlloc, scoreScheme, maxSingleGap); int qAfterSize = chain->qSize - qMid; struct cBlock *blocksAfter = extendInRegion(ali->qDna, qMid, chain->qSize, ali->tDna, tMid, tMid + qAfterSize + maxTotalGaps, 1, qSym, tSym, symAlloc, scoreScheme, maxSingleGap); struct cBlock *allBlocks = slCat(blocksBefore, blocksAfter); slFreeList(&chain->blockList); chain->blockList = allBlocks; chainMergeAbutting(chain); chainCalcBounds(chain); chainAddAxtScore(chain, ali->qDna, ali->tDna, scoreScheme); freeMem(qSym); freeMem(tSym); }
void serverStart(char *files[], int fileCount) /* Load DNA. Build up indexes, set up listing port, and fall into * accept loop. */ { struct blatzIndex *indexList = NULL; int i; int acceptor; struct bzp *bzp = bzpDefault(); /* Daemonize self. */ bzpSetOptions(bzp); /* Load up all sequences. */ for (i=0; i<fileCount; ++i) { struct dnaLoad *dl = dnaLoadOpen(files[i]); struct blatzIndex *oneList = blatzIndexDl(dl, bzp->weight, bzp->unmask); indexList = slCat(indexList, oneList); dnaLoadClose(&dl); } bzpTime("Loaded and indexed %d sequences", slCount(indexList)); verbose(1, "Ready for queries\n"); /* Turn self into proper daemon. */ logDaemonize("blatzServer"); acceptor = netAcceptingSocket(port, 100); serviceLoop(acceptor, bzp, indexList); }
void cgapSageLoadItems(struct track *tg) /* This function loads the beds in the current window into a linkedFeatures list. */ /* Each bed entry may turn into multiple linkedFeatures because one is made for */ /* each library at a given tag (bed). */ { struct linkedFeatures *itemList = NULL; struct sqlConnection *conn = hAllocConn(database); struct hash *libHash = libTissueHash(conn); struct hash *libTotHash = getTotTagsHashFromTable(conn); struct sqlResult *sr = NULL; char **row; int rowOffset; sr = hOrderedRangeQuery(conn, tg->table, chromName, winStart, winEnd, NULL, &rowOffset); if ((winEnd - winStart) > CGAP_SAGE_DENSE_GOVERNOR) tg->visibility = tvDense; while ((row = sqlNextRow(sr)) != NULL) { struct cgapSage *tag = cgapSageLoad(row+rowOffset); struct linkedFeatures *oneLfList = cgapSageToLinkedFeatures(tag, libHash, libTotHash, tg->visibility); itemList = slCat(oneLfList, itemList); } slReverse(&itemList); sqlFreeResult(&sr); hFreeConn(&conn); tg->items = itemList; }
static struct chromRange *buildRanges(int size, struct chromSize *chroms) /* build a randomly sorted list of ranges for the given read size */ { struct chromRange *ranges = NULL; struct chromSize *chrom; for (chrom = chroms; chrom != NULL; chrom = chrom->next) ranges = slCat(ranges, buildChromRanges(size, chrom)); slSort(&ranges, chromRangeCmp); return ranges; }
static void pslPartsAdd(struct pslParts *parts, struct psl* newPart, char *outDir) /* add a new partition, writing out pending parts if max size reached, * and adding new ones. */ { int newSize = slCount(newPart); if (((parts->size + newSize) > gPartSize) && (parts->psls != NULL)) pslPartsWrite(parts, outDir); parts->psls = slCat(parts->psls, newPart); parts->size += newSize; }
struct genePred *loadGenes(int numGenePreds, char **genePredFiles) /* load and sort genes */ { int i; struct genePred *genes = NULL; for (i = 0; i < numGenePreds; i++) genes = slCat(genes, genePredReaderLoadFile(genePredFiles[i], NULL)); slSort(&genes, genePredCmp); return genes; }
static struct hashEl *buildDefaulted(struct gbConf *conf) /* build list of conf elements, with defaults filled in. A bit of work and * guessing because the default mechanism is designed around explicitly asking * for values */ { struct hash* prefixMap = splitByPrefix(conf); struct prefixElems *defaultElems = hashMustFindVal(prefixMap, "default"); struct hashCookie cookie = hashFirst(prefixMap); struct hashEl *prefixMapEl; struct hashEl *confEls = NULL; // build list of entries while ((prefixMapEl = hashNext(&cookie)) != NULL) { struct prefixElems *prefixElems = prefixMapEl->val; confEls = slCat(confEls, hashElCloneList(prefixElems->elems)); if (isGenomeDb(prefixElems->prefix)) confEls = slCat(confEls, genomeDbGetDefaults(prefixElems, defaultElems)); } return confEls; }
static struct hgFindSpec *loadFindSpecs(char *db, char *where) /* Load find specs for the given where. */ { struct hgFindSpec *hfsList = NULL; struct slName *hgFindSpecList = hgFindSpecNameList(db); struct slName *oneSpec; for (oneSpec = hgFindSpecList; oneSpec != NULL; oneSpec = oneSpec->next) hfsList = slCat(hfsList, loadFindSpecsTbl(db, oneSpec->name, where)); slSort(&hfsList, hgFindSpecPriCmp); return(hfsList); }
struct trackDb *hubCollectTracks( char *database, struct grp **pGroupList) /* Generate trackDb structures for all the tracks in attached hubs. * Make grp structures for each hub. Returned group list is reversed. */ { // return the cached copy if it exists static struct trackDb *hubTrackDbs; static struct grp *hubGroups; if (hubTrackDbs != NULL) { if (pGroupList != NULL) *pGroupList = hubGroups; return hubTrackDbs; } struct hubConnectStatus *hub, *hubList = hubConnectGetHubs(); struct trackDb *tdbList = NULL; for (hub = hubList; hub != NULL; hub = hub->next) { if (isEmpty(hub->errorMessage)) { /* error catching in so it won't just abort */ struct errCatch *errCatch = errCatchNew(); if (errCatchStart(errCatch)) { struct trackDb *thisList = hubAddTracks(hub, database); tdbList = slCat(tdbList, thisList); } errCatchEnd(errCatch); if (errCatch->gotError) { warn("%s", errCatch->message->string); hubUpdateStatus( errCatch->message->string, hub); } else { if (!trackHubDatabase(database)) { struct grp *grp = grpFromHub(hub); slAddHead(&hubGroups, grp); } hubUpdateStatus(NULL, hub); } errCatchFree(&errCatch); } } hubTrackDbs = tdbList; if (pGroupList != NULL) *pGroupList = hubGroups; return tdbList; }
static void addTablesAccordingToTrackType(char *db, struct slName **pList, struct hash *uniqHash, struct trackDb *track) /* Parse out track->type and if necessary add some tables from it. */ { struct slName *name; char *trackDupe = cloneString(track->type); if (trackDupe != NULL && trackDupe[0] != 0) { char *s = trackDupe; char *type = nextWord(&s); if (sameString(type, "wigMaf")) { static char *wigMafAssociates[] = {"frames", "summary"}; int i; for (i=0; i<ArraySize(wigMafAssociates); ++i) { char *setting = wigMafAssociates[i]; char *table = trackDbSetting(track, setting); if (table != NULL) { name = slNameNew(table); slAddHead(pList, name); hashAdd(uniqHash, table, NULL); } } /* include conservation wiggle tables */ struct consWiggle *wig, *wiggles = wigMafWiggles(db, track); slReverse(&wiggles); for (wig = wiggles; wig != NULL; wig = wig->next) { name = slNameNew(wig->table); slAddHead(pList, name); hashAdd(uniqHash, wig->table, NULL); } } if (track->subtracks) { struct slName *subList = NULL; struct slRef *tdbRefList = trackDbListGetRefsToDescendantLeaves(track->subtracks); slSort(&tdbRefList, trackDbRefCmp); struct slRef *tdbRef; for (tdbRef = tdbRefList; tdbRef != NULL; tdbRef = tdbRef->next) { struct trackDb *subTdb = tdbRef->val; name = slNameNew(subTdb->table); slAddTail(&subList, name); hashAdd(uniqHash, subTdb->table, NULL); } pList = slCat(pList, subList); } } freez(&trackDupe); }
struct bed *hWholeTrackAsBedList(char *track) /* Get entire track as a list of beds. */ { struct slName *chrom, *chromList = hAllChromNames(); struct bed *bedList = NULL; for (chrom = chromList; chrom != NULL; chrom = chrom->next) { struct bed *chromBedList = hGetBedRange(track, chrom->name, 0, 0, NULL); bedList = slCat(chromBedList, bedList); } slFreeList(&chromList); return bedList; }
static void mergeOrAddEdge(struct rbTree *edgeTree, struct edge *edge) /* Add edge back if it is still unique, otherwise move evidence from * edge into existing edge. */ { struct edge *existing = rbTreeFind(edgeTree, edge); if (existing) { existing->evList = slCat(existing->evList, edge->evList); edge->evList = NULL; } else rbTreeAdd(edgeTree, edge); }
struct bed *cookedBedsOnRegions(struct sqlConnection *conn, char *table, struct region *regionList, struct lm *lm, int *retFieldCount) /* Get cooked beds on all regions. */ { struct bed *bedList = NULL; struct region *region; for (region = regionList; region != NULL; region = region->next) { struct bed *rBedList = getIntersectedBeds(conn, table, region, lm, retFieldCount); bedList = slCat(bedList, rBedList); } return bedList; }
static struct ssBundle *gfTransTransFindBundles(struct genoFind *gfs[3], struct dnaSeq *qSeq, struct hash *t3Hash, boolean isRc, int minMatch, boolean isRna) /* Look for alignment to three translations of qSeq in three translated reading frames. * Save alignment via outFunction/outData. */ { struct trans3 *qTrans = trans3New(qSeq); int qFrame, tFrame; struct gfClump *clumps[3][3], *clump; struct gfRange *rangeList = NULL, *range; int tileSize = gfs[0]->tileSize; bioSeq *targetSeq; struct ssBundle *bun, *bunList = NULL; int hitCount; struct lm *lm = lmInit(0); enum ffStringency stringency = (isRna ? ffCdna : ffLoose); gfTransTransFindClumps(gfs, qTrans->trans, clumps, lm, &hitCount); for (qFrame = 0; qFrame<3; ++qFrame) { for (tFrame=0; tFrame<3; ++tFrame) { for (clump = clumps[qFrame][tFrame]; clump != NULL; clump = clump->next) { struct gfRange *rangeSet = NULL; clumpToHspRange(clump, qTrans->trans[qFrame], tileSize, tFrame, NULL, &rangeSet, TRUE, FALSE); untranslateRangeList(rangeSet, qFrame, tFrame, t3Hash, NULL, 0); rangeList = slCat(rangeSet, rangeList); } } } slSort(&rangeList, gfRangeCmpTarget); rangeList = gfRangesBundle(rangeList, 2000); for (range = rangeList; range != NULL; range = range->next) { targetSeq = range->tSeq; AllocVar(bun); bun->qSeq = qSeq; bun->genoSeq = targetSeq; bun->ffList = gfRangesToFfItem(range->components, qSeq); ssStitch(bun, stringency, minMatch, ssAliCount); slAddHead(&bunList, bun); } for (qFrame = 0; qFrame<3; ++qFrame) for (tFrame=0; tFrame<3; ++tFrame) gfClumpFreeList(&clumps[qFrame][tFrame]); gfRangeFreeList(&rangeList); trans3Free(&qTrans); lmCleanup(&lm); slReverse(&bunList); return bunList; }
struct boxClump *boxLump(struct boxIn **pBoxList) /* Convert list of boxes to a list of lumps. The lumps * are a smaller number of boxes that between them contain * all of the input boxes. Note that * the original boxList is overwritten as the boxes * are moved from it to the lumps. */ { struct boxClump *qClumpList = NULL, *tClumpList = NULL, *tClump; if (*pBoxList == NULL) return NULL; tClumpList = lumpOneDimension(*pBoxList, FALSE); for (tClump = tClumpList; tClump != NULL; tClump = tClump->next) { struct boxClump *oneList = lumpOneDimension(tClump->boxList, TRUE); if (slCount(oneList) > 1) { struct boxClump *clump; for (clump = oneList; clump != NULL; clump = clump->next) { struct boxClump *subList = boxLump(&clump->boxList); qClumpList = slCat(subList, qClumpList); } boxClumpFreeList(&oneList); } else { qClumpList = slCat(oneList, qClumpList); } tClump->boxList = NULL; } boxClumpFreeList(&tClumpList); *pBoxList = NULL; return qClumpList; }
static void addAttrVals(struct gff3Ann *g3a, char *attr, char *valStr) /* Add an attribute to the list of attributes. If attribute has already been * specified, values are merged. Attribute name must already be unescaped, * attribute values will be split and then unescaped. */ { struct gff3AttrVals *attrVals = gff3AnnFindAttr(g3a, attr); if (attrVals == NULL) { attrVals = gff3FileAlloc(g3a->file, sizeof(struct gff3AttrVals)); attrVals->attr = gff3FileCloneStr(g3a->file, attr); slAddHead(&g3a->attrs, attrVals); } attrVals->vals = slCat(attrVals->vals, parseAttrVals(g3a, attr, valStr)); }
boolean rElmTreeClimb(const struct elmNode *node, struct slList *parent, void *extra, elmNodePickFunction *elmPick, struct slList **results) // Recursively climbs tree and examines every node using the supplied function. // Each call on a node iterates through its siblings, recursively calling itself on any children. // This function might be used to build a list of objects from each or a subset of nodes. // If all examinations resulted in a structure, then the list will be in REVERSE traversal order. // If you immediately slReverse(results) then the list will ascend to the furthest leaf before // moving on to sibling leaves, twigs and branches. // Note: if results are returned, then "parent" is filled with nearest parent's result. // Return FALSE from the elmPick function to stop the traversal. Thus, a complete traversal // returns TRUE, but one that has been stopped (after finding one node?) returns FALSE. { const struct elmNode *sibling = elmNodeIsRoot(node) ? node->firstChild: node; for (;sibling!=NULL;sibling = sibling->sibling) { // Some nodes are subsets rather than alleles struct slList *localParent = NULL; struct slList *result = NULL; boolean ret = elmPick(sibling->content,parent,extra,&result); if (result) { //assert(results != NULL); slAddHead(results,result); localParent = result; result = NULL; } else localParent = parent; if (!ret) return FALSE; // Stop traversing // a node points to only one child, but that child may have siblings struct elmNode *child = sibling->firstChild; // additional children are siblings if (child) { assert(child->content != NULL); ret = rElmTreeClimb(child, localParent, extra, elmPick, &result); if (result != NULL) { //assert(results != NULL); *results = slCat(result,*results); // newer results go to front! } if (!ret) return FALSE; // Stop traversing } } return TRUE; // continue traversing }
static void hgPar(char *db, char *parSpecFile, char *parTable, boolean fileOutput) /* hgPar - create PAR track. */ { struct parSpec *parSpec, *parSpecs = parSpecLoadAll(parSpecFile); checkSpecs(db, parSpecs); struct bed4 *beds = NULL; for (parSpec = parSpecs; parSpec != NULL; parSpec = parSpec->next) beds = slCat(beds, convertParSpec(parSpec)); slSort(&beds, bedCmp); if (fileOutput) writeTable(beds, parTable); else loadTable(beds, db, parTable); }
struct mdbObj *getMdbList(char *databases[], int databaseCount) /* Get list of metaDb objects for database. */ { struct mdbObj *mdbList = NULL; int i; for (i=0; i<databaseCount; ++i) { /* Grab list of all metaDb obj. */ char *database = databases[i]; struct sqlConnection *conn = sqlConnect(database); struct mdbObj *oneList = mdbObjsQueryAll(conn, metaTable); verbose(2, "%d objects in %s.%s\n", slCount(mdbList), database, metaTable); mdbList = slCat(mdbList, oneList); sqlDisconnect(&conn); } return mdbList; }
void motifSig(char *outName, char *seqDir, char *motifDir, int controlCount, char *controls[]) /* motifSig - Combine info from multiple control runs and main improbizer run. */ { FILE *f = mustOpen(outName, "w"); struct slName *mfList = listDir(motifDir, "*"), *mf; struct improbRunInfo *iriList = NULL, *iriSmallList = NULL, *iri; for (mf = mfList; mf != NULL; mf = mf->next) { iriSmallList = analyseOneMotifRun(mf->name, seqDir, motifDir, controlCount, controls); iriList = slCat(iriList, iriSmallList); } for (iri = iriList; iri != NULL; iri = iri->next) improbRunInfoTabOut(iri, f); carefulClose(&f); }
static struct trackDb *getFullTrackList(struct cart *cart, char *db, struct grp **pHubGroups) { struct trackDb *list = hTrackDb(db); struct customTrack *ctList, *ct; /* exclude any track with a 'tableBrowser off' setting */ struct trackDb *tdb, *nextTdb, *newList = NULL; for (tdb = list; tdb != NULL; tdb = nextTdb) { nextTdb = tdb->next; if (tdbIsDownloadsOnly(tdb) || tdb->table == NULL) { //freeMem(tdb); // should not free tdb's. // While hdb.c should and says it does cache the tdbList, it doesn't. // The most notable reason that the tdbs are not cached is this hgTables CGI !!! // It needs to be rewritten to make tdbRef structures for the lists it creates here! continue; } char *tbOff = trackDbSetting(tdb, "tableBrowser"); if (useAC && tbOff != NULL && startsWithWord("off", tbOff)) slAddHead(&forbiddenTrackList, tdb); else slAddHead(&newList, tdb); } slReverse(&newList); list = newList; /* add wikiTrack if enabled */ if (wikiTrackEnabled(db, NULL)) slAddHead(&list, wikiTrackDb()); slSort(&list, trackDbCmp); // Add hub tracks at head of list struct trackDb *hubTdbList = hubCollectTracks(db, pHubGroups); list = slCat(list, hubTdbList); // Add custom tracks at head of list ctList = customTracksParseCart(db, cart, NULL, NULL); for (ct = ctList; ct != NULL; ct = ct->next) { slAddHead(&list, ct->tdb); } return list; }