boolean checkWigDataFilter(char *db, char *table, char **constraint, double *ll, double *ul) /* check if filter exists, return its values, call with db="ct" for * custom tracks */ { char varPrefix[128]; struct hashEl *varList, *var; char *pat = NULL; char *cmp = NULL; if (constraint != NULL) *constraint = NULL; // Make sure return variable gets set to something at least. if (isCustomTrack(table)) db = "ct"; safef(varPrefix, sizeof(varPrefix), "%s%s.%s.", hgtaFilterVarPrefix, db, table); varList = cartFindPrefix(cart, varPrefix); if (varList == NULL) return FALSE; /* check varList, look for dataValue.pat and dataValue.cmp */ for (var = varList; var != NULL; var = var->next) { if (endsWith(var->name, ".pat")) { char *name; name = cloneString(var->name); tolowers(name); /* make sure we are actually looking at datavalue */ if (stringIn("datavalue", name) || stringIn("score", name)) { pat = cloneString(var->val); } freeMem(name); } if (endsWith(var->name, ".cmp")) { char *name; name = cloneString(var->name); tolowers(name); /* make sure we are actually looking at datavalue */ if (stringIn("datavalue", name) || stringIn("score", name)) { cmp = cloneString(var->val); tolowers(cmp); if (stringIn("ignored", cmp)) freez(&cmp); } freeMem(name); } } /* Must get them both for this to work */ if (cmp && pat) { int wordCount = 0; char *words[2]; char *dupe = cloneString(pat); wordCount = chopString(dupe, ", \t\n", words, ArraySize(words)); switch (wordCount) { case 2: if (ul) *ul = sqlDouble(words[1]); case 1: if (ll) *ll = sqlDouble(words[0]); break; default: warn("dataValue filter must be one or two numbers (two for 'in range'). " "Please click the filter edit button and either set the comparison to 'ignored' " "or set the dataValue threshold."); } if (sameWord(cmp,"in range") && (wordCount != 2)) errAbort("'in range' dataValue filter must have two numbers input\n"); if (constraint) *constraint = cmp; return TRUE; } else return FALSE; } /* static boolean checkWigDataFilter() */
static void fetchIntoBuf(struct bbiFile *bwf, char *chrom, bits32 start, bits32 end, struct bigWigValsOnChrom *chromVals) /* Get data for interval. Return list allocated out of lm. */ { /* A lot of code duplicated with bigWigIntervalQuery, but here the clipping * is simplified since always working across full chromosome, and the output is * different. Since both of these are in inner loops and speed critical, it's hard * to factor out without perhaps making it worse than the bit of duplication. */ if (bwf->typeSig != bigWigSig) errAbort("Trying to do fetchIntoBuf on a non big-wig file."); bbiAttachUnzoomedCir(bwf); struct fileOffsetSize *blockList = bbiOverlappingBlocks(bwf, bwf->unzoomedCir, chrom, start, end, NULL); struct fileOffsetSize *block, *beforeGap, *afterGap; struct udcFile *udc = bwf->udc; boolean isSwapped = bwf->isSwapped; float val; int i; Bits *covBuf = chromVals->covBuf; double *valBuf = chromVals->valBuf; /* Set up for uncompression optionally. */ char *uncompressBuf = NULL; if (bwf->uncompressBufSize > 0) uncompressBuf = needLargeMem(bwf->uncompressBufSize); /* This loop is a little complicated because we merge the read requests for efficiency, but we * have to then go back through the data one unmerged block at a time. */ for (block = blockList; block != NULL; ) { /* Find contigious blocks and read them into mergedBuf. */ fileOffsetSizeFindGap(block, &beforeGap, &afterGap); bits64 mergedOffset = block->offset; bits64 mergedSize = beforeGap->offset + beforeGap->size - mergedOffset; udcSeek(udc, mergedOffset); char *mergedBuf = needLargeMem(mergedSize); udcMustRead(udc, mergedBuf, mergedSize); char *blockBuf = mergedBuf; /* Loop through individual blocks within merged section. */ for (;block != afterGap; block = block->next) { /* Uncompress if necessary. */ char *blockPt, *blockEnd; if (uncompressBuf) { blockPt = uncompressBuf; int uncSize = zUncompress(blockBuf, block->size, uncompressBuf, bwf->uncompressBufSize); blockEnd = blockPt + uncSize; } else { blockPt = blockBuf; blockEnd = blockPt + block->size; } /* Deal with insides of block. */ struct bwgSectionHead head; bwgSectionHeadFromMem(&blockPt, &head, isSwapped); switch (head.type) { case bwgTypeBedGraph: { for (i=0; i<head.itemCount; ++i) { bits32 s = memReadBits32(&blockPt, isSwapped); bits32 e = memReadBits32(&blockPt, isSwapped); bitSetRange(covBuf, s, e-s); val = memReadFloat(&blockPt, isSwapped); bits32 j; for (j=s; j<e; ++j) valBuf[j] = val; } break; } case bwgTypeVariableStep: { for (i=0; i<head.itemCount; ++i) { bits32 s = memReadBits32(&blockPt, isSwapped); val = memReadFloat(&blockPt, isSwapped); bitSetRange(covBuf, s, head.itemSpan); bits32 e = s + head.itemSpan; bits32 j; for (j=s; j<e; ++j) valBuf[j] = val; } break; } case bwgTypeFixedStep: { /* Do a little optimization for the most common and worst case - step1/span1 */ if (head.itemStep == 1 && head.itemSpan == 1) { bits32 s = head.start; bits32 e = head.end; bitSetRange(covBuf, s, e-s); bits32 j; for (j=s; j<e; ++j) valBuf[j] = memReadFloat(&blockPt, isSwapped); } else { bits32 s = head.start; bits32 e = s + head.itemSpan; for (i=0; i<head.itemCount; ++i) { bitSetRange(covBuf, s, head.itemSpan); val = memReadFloat(&blockPt, isSwapped); bits32 j; for (j=s; j<e; ++j) valBuf[j] = val; s += head.itemStep; e += head.itemStep; } } break; } default: internalErr(); break; } assert(blockPt == blockEnd); blockBuf += block->size; } freeMem(mergedBuf); } freeMem(uncompressBuf); slFreeList(&blockList); }
void bigBedTabOut(char *db, char *table, struct sqlConnection *conn, char *fields, FILE *f) /* Print out selected fields from Big Bed. If fields is NULL, then print out all fields. */ { if (f == NULL) f = stdout; /* Convert comma separated list of fields to array. */ int fieldCount = chopByChar(fields, ',', NULL, 0); char **fieldArray; AllocArray(fieldArray, fieldCount); chopByChar(fields, ',', fieldArray, fieldCount); /* Get list of all fields in big bed and turn it into a hash of column indexes keyed by * column name. */ struct hash *fieldHash = hashNew(0); struct slName *bb, *bbList = bigBedGetFields(table, conn); int i; for (bb = bbList, i=0; bb != NULL; bb = bb->next, ++i) hashAddInt(fieldHash, bb->name, i); // If bigBed has name column, look up pasted/uploaded identifiers if any: struct hash *idHash = NULL; if (slCount(bbList) >= 4) idHash = identifierHash(db, table); /* Create an array of column indexes corresponding to the selected field list. */ int *columnArray; AllocArray(columnArray, fieldCount); for (i=0; i<fieldCount; ++i) { columnArray[i] = hashIntVal(fieldHash, fieldArray[i]); } /* Output row of labels */ fprintf(f, "#%s", fieldArray[0]); for (i=1; i<fieldCount; ++i) fprintf(f, "\t%s", fieldArray[i]); fprintf(f, "\n"); /* Open up bigBed file. */ char *fileName = bigBedFileName(table, conn); struct bbiFile *bbi = bigBedFileOpen(fileName); struct asObject *as = bigBedAsOrDefault(bbi); struct asFilter *filter = NULL; if (anyFilter()) { filter = asFilterFromCart(cart, db, table, as); if (filter) { fprintf(f, "# Filtering on %d columns\n", slCount(filter->columnList)); } } /* Loop through outputting each region */ struct region *region, *regionList = getRegions(); for (region = regionList; region != NULL; region = region->next) { struct lm *lm = lmInit(0); struct bigBedInterval *iv, *ivList = bigBedIntervalQuery(bbi, region->chrom, region->start, region->end, 0, lm); char *row[bbi->fieldCount]; char startBuf[16], endBuf[16]; for (iv = ivList; iv != NULL; iv = iv->next) { bigBedIntervalToRow(iv, region->chrom, startBuf, endBuf, row, bbi->fieldCount); if (asFilterOnRow(filter, row)) { if ((idHash != NULL) && (hashLookup(idHash, row[3]) == NULL)) continue; int i; fprintf(f, "%s", row[columnArray[0]]); for (i=1; i<fieldCount; ++i) fprintf(f, "\t%s", row[columnArray[i]]); fprintf(f, "\n"); } } lmCleanup(&lm); } /* Clean up and exit. */ bbiFileClose(&bbi); hashFree(&fieldHash); freeMem(fieldArray); freeMem(columnArray); }
void hgncFree(struct hgnc **pEl) /* Free a single dynamically allocated hgnc such as created * with hgncLoad(). */ { struct hgnc *el; if ((el = *pEl) == NULL) return; freeMem(el->hgncId); freeMem(el->symbol); freeMem(el->name); freeMem(el->status); freeMem(el->locusType); freeMem(el->locusGroup); freeMem(el->prvSymbols); freeMem(el->prvNames); freeMem(el->synonyms); freeMem(el->nameSyns); freeMem(el->chrom); freeMem(el->dateApprv); freeMem(el->dateMod); freeMem(el->dateSymChange); freeMem(el->dateNmChange); freeMem(el->accession); freeMem(el->enzymeIds); freeMem(el->entrezId); freeMem(el->ensId); freeMem(el->mgdId); freeMem(el->miscDbs); freeMem(el->miscIds); freeMem(el->pubMed); freeMem(el->refSeqIds); freeMem(el->geneFamilyNm); freeMem(el->geneFamilyDesc); freeMem(el->recType); freeMem(el->primaryId); freeMem(el->secondaryId); freeMem(el->ccdsId); freeMem(el->vegaId); freeMem(el->locusDbs); freeMem(el->gdbMapped); freeMem(el->entrezMapped); freeMem(el->omimMapped); freeMem(el->refSeqMapped); freeMem(el->uniProtMapped); freeMem(el->ensMapped); freeMem(el->ucscMapped); freeMem(el->mgiMapped); freeMem(el->rgdMapped); freez(pEl); }
void rdf_complete(NET_StreamClass *stream) { RDFFile f = (RDFFile)stream->data_object; if (strcmp(f->url, gNavCntrUrl) == 0) { if (f->resourceCount == 0) { parseNextRDFXMLBlob(stream, gDefaultNavcntr, strlen(gDefaultNavcntr)); } else { RDF_Resource browser = RDF_GetResource(NULL, "netscape:browser", 1); RDF_Resource updateID = RDF_GetResource(NULL, "updateID", 1); char* id = RDF_GetSlotValue(gNCDB, browser, updateID, RDF_STRING_TYPE, false, true); RDF_Resource updateFrom = RDF_GetResource(NULL, "updateURL", 1); char* uf = RDF_GetSlotValue(gNCDB, browser, updateFrom, RDF_STRING_TYPE, false, true); RDF_Resource fileSize = RDF_GetResource(NULL, "fileSize", 1); char* fs = RDF_GetSlotValue(gNCDB, browser, fileSize, RDF_STRING_TYPE, false, true); uint32 fSize; if (fs == NULL) { fSize = 3000; } else { sscanf("%lu", fs, &fSize); freeMem(fs); } if ((uf != NULL) && (id != NULL)) { #ifdef MOZ_SMARTUPDATE AutoUpdateConnnection autoupdt; autoupdt = AutoUpdate_Setup(FE_GetRDFContext(), id, uf, fSize, "http://warp/u/raman/docs/js/download.html"); autoupdate_Resume(autoupdt); #endif /* MOZ_SMARTUPDATE */ freeMem(uf); freeMem(id); } /* A temporary hack to demo AutoUpdate on windows */ #ifndef MOZ_SMARTUPDATE #ifndef XP_MAC /* { AutoUpdate_LoadMainScript(FE_GetRDFContext(), "http://warp/u/raman/docs/js/download.html"); } */ #endif /* !XP_MAC */ #endif /* MOZ_SMARTUPDATE */ } } if (f) { freeMem(f->line); freeMem(f->currentSlot); freeMem(f->holdOver); freeNamespaces(f) ; f->line = NULL; f->currentSlot = NULL; f->holdOver = NULL; } }
void trimUniq(bioSeq *seqList) /* Check that all seq's in list have a unique name. Try and * abbreviate longer sequence names. */ { struct hash *hash = newHash(0); bioSeq *seq; for (seq = seqList; seq != NULL; seq = seq->next) { char *saferString = needMem(strlen(seq->name)+1); char *c, *s; /* Some chars are safe to allow through, other chars cause * problems. It isn't necessarily a URL safe string that is * being calculated here. The original problem was a user had * the fasta header line of: * chr8|59823648:59825047|+ * The plus sign was being taken as the query name and this * created problems as that name was passed on to hgc via * the ss cart variable. The + sign became part of a URL * eventually. This loop allows only isalnum and =_/.:;_| * to get through as part of the header name. These characters * all proved to be safe as single character names, or all * together. */ s = saferString; for (c = seq->name; *c != '\0'; ++c) { if (c && (*c != '\0')) { if ( isalnum(*c) || (*c == '=') || (*c == '-') || (*c == '/') || (*c == '.') || (*c == ':') || (*c == ';') || (*c == '_') || (*c == '|') ) *s++ = *c; } } *s = '\0'; freeMem(seq->name); if (*saferString == '\0') { freeMem(saferString); saferString = cloneString("YourSeq"); } seq->name = saferString; if (strlen(seq->name) > 14) /* Try and get rid of long NCBI .fa cruft. */ { char *nameClone = NULL; char *abbrv = NULL; char *words[32]; int wordCount; boolean isEns = (stringIn("ENSEMBL:", seq->name) != NULL); nameClone = cloneString(seq->name); wordCount = chopString(nameClone, "|", words, ArraySize(words)); if (wordCount > 1) /* Looks like it's an Ensembl/NCBI * long name alright. */ { if (isEns) { abbrv = words[0]; if (abbrv[0] == 0) abbrv = words[1]; } else if (sameString(words[1], "dbSNP")) { if (wordCount > 2) abbrv = words[2]; else abbrv = nameClone; } else { abbrv = words[wordCount-1]; if (abbrv[0] == 0) abbrv = words[wordCount-2]; } if (hashLookup(hash, abbrv) == NULL) { freeMem(seq->name); seq->name = cloneString(abbrv); } freez(&nameClone); } } hashAddUnique(hash, seq->name, hash); } freeHash(&hash); }
void doMiddle() { char *seqName; boolean intronsLowerCase = TRUE; boolean intronsParenthesized = FALSE; boolean hiliteNear = FALSE; int startRange = 0; int endRange = 0; boolean gotRange = FALSE; struct dnaSeq *cdnaSeq; boolean isChromRange = FALSE; DNA *dna; char *translation = NULL; seqName = cgiString("geneName"); seqName = trimSpaces(seqName); if (cgiVarExists("intronsLowerCase")) intronsLowerCase = cgiBoolean("intronsLowerCase"); if (cgiVarExists("intronsParenthesized")) intronsParenthesized = cgiBoolean("intronsParenthesized"); if (cgiVarExists("startRange") && cgiVarExists("endRange" )) { startRange = cgiInt("startRange"); endRange = cgiInt("endRange"); gotRange = TRUE; } if (cgiVarExists("hiliteNear")) { hiliteNear = TRUE; } fprintf(stdout, "<P><TT>\n"); /* The logic here is a little complex to optimize speed. * If we can decide what type of thing the name refers to by * simply looking at the name we do. Otherwise we have to * search the database in various ways until we get a hit. */ if (wormIsNamelessCluster(seqName)) { isChromRange = TRUE; } else if (wormIsChromRange(seqName)) { isChromRange = TRUE; } else if (getWormGeneDna(seqName, &dna, TRUE)) { if (cgiBoolean("litLink")) { char nameBuf[64]; char *geneName = NULL; char *productName = NULL; char *coding; int transSize; struct wormCdnaInfo info; printf("<H3>Information and Links for %s</H3>\n", seqName); if (wormInfoForGene(seqName, &info)) { if (info.description) printf("<P>%s</P>\n", info.description); geneName = info.gene; productName = info.product; } else { if (wormIsGeneName(seqName)) geneName = seqName; else if (wormGeneForOrf(seqName, nameBuf, sizeof(nameBuf))) geneName = nameBuf; } coding = cloneUpperOnly(dna); transSize = 1 + (strlen(coding)+2)/3; translation = needMem(1+strlen(coding)/3); dnaTranslateSome(coding, translation, transSize); freez(&coding); if (geneName) { printf("<A HREF=\"http://www.ncbi.nlm.nih.gov/htbin-post/Entrez/query?form=4&db=m" "&term=C+elegans+%s&dispmax=50&relentrezdate=No+Limit\">", geneName); printf("PubMed search on gene: </A>%s<BR>\n", geneName); } if (productName) { char *encoded = cgiEncode(productName); printf("<A HREF=\"http://www.ncbi.nlm.nih.gov/htbin-post/Entrez/query?form=4&db=m" "&term=%s&dispmax=50&relentrezdate=No+Limit\">", encoded); printf("PubMed search on product:</A> %s<BR>\n", productName); freeMem(encoded); } /* Process name to get rid of isoform letter for Proteome. */ if (geneName) strcpy(nameBuf, geneName); else { strcpy(nameBuf, seqName); #ifdef NEVER /* Sometimes Proteome requires the letter after the orf name * in alt-spliced cases, sometimes it can't handle it.... */ nameLen = strlen(nameBuf); if (wormIsOrfName(nameBuf) && isalpha(nameBuf[nameLen-1])) { char *dotPos = strrchr(nameBuf, '.'); if (dotPos != NULL && isdigit(dotPos[1])) nameBuf[nameLen-1] = 0; } #endif /* NEVER */ } printf("<A HREF=\"http://www.wormbase.org/db/seq/sequence?name=%s;class=Sequence\">", seqName); printf("WormBase link on:</A> %s<BR>\n", seqName); printf("<A HREF=\"http://www.proteome.com/databases/WormPD/reports/%s.html\">", nameBuf); printf("Proteome link on:</A> %s<BR>\n<BR>\n", nameBuf); printf("<A HREF=#DNA>Genomic DNA Sequence</A><BR>\n"); if (hiliteNear) printf("<A HREF=\"#CLICKED\">Shortcut to where you clicked in gene</A><BR>"); printf("<A HREF=#protein>Translated Protein Sequence</A><BR>\n"); htmlHorizontalLine(); printf("<A NAME=DNA></A>"); printf("<H3>%s Genomic DNA sequence</H3>", seqName); } if (!intronsLowerCase) tolowers(dna); if (hiliteNear) { if (!gotRange) { double nearPos = cgiDouble("hiliteNear"); int rad = 5; int dnaSize = strlen(dna); long mid = (int)(dnaSize * nearPos); startRange = mid - rad; if (startRange < 0) startRange = 0; endRange = mid + rad; if (endRange >= dnaSize) endRange = dnaSize - 1; } } outputSeq(dna, strlen(dna), hiliteNear, startRange, endRange, stdout); freez(&dna); } else if (wormCdnaSeq(seqName, &cdnaSeq, NULL)) { outputSeq(cdnaSeq->dna, cdnaSeq->size, FALSE, 0, 0, stdout); } else { isChromRange = TRUE; } if (isChromRange) { char *chromId; int start, end; char strand = '+'; int size; if (!wormGeneRange(seqName, &chromId, &strand, &start, &end)) errAbort("Can't find %s",seqName); size = end - start; if (intronsLowerCase) dna = wormChromPartExonsUpper(chromId, start, size); else { dna = wormChromPart(chromId, start, size); touppers(dna); } if (cgiVarExists("strand")) strand = cgiString("strand")[0]; if (strand == '-') reverseComplement(dna, size); outputSeq(dna, size, FALSE, 0, 0, stdout); } if (translation != NULL) { htmlHorizontalLine(); printf("<A NAME=protein></A>"); printf("<H3>Translated Protein of %s</H3>\n", seqName); outputSeq(translation, strlen(translation), FALSE, 0, 0, stdout); freez(&translation); } fprintf(stdout, "</TT></P>\n"); }
void printFreeMemToSerial(char* message) { Serial.print(message); Serial.print(":\t"); Serial.println(freeMem(&biggest)); }
~shared_array() { *refCount--; if(*refCount == 0) freeMem(); }
void finalizeReads() { if (!seqCompressed) { fclose(_r_fp1); if ( pairedEndMode && _r_fp2 != _r_fp1 ) { fclose(_r_fp2); } } else { gzclose(_r_gzfp1); if ( pairedEndMode && _r_gzfp2 != _r_gzfp1) { gzclose(_r_gzfp2); } } freeMem(_r_seq, sizeof(Read)*_r_maxSeqCnt); freeMem(_r_samplingLocs, sizeof(int)*(_r_samplingLocsSize+1)); int size = sizeof(int)*_r_samplingLocsSize; freeMem(_r_samplingLocsSeg, size); freeMem(_r_samplingLocsOffset, size); freeMem(_r_samplingLocsLen, size); freeMem(_r_samplingLocsLenFull, size); freeMem(_r_alphIndex, 128); if (pairedEndMode && _r_buf1 != _r_buf2) { freeMem(_r_buf2, 10000000); freeMem(_r_buf2_pos, sizeof(int)); freeMem(_r_buf2_size, sizeof(int)); } freeMem(_r_buf1, 10000000); freeMem(_r_buf1_pos, sizeof(int)); freeMem(_r_buf1_size, sizeof(int)); if (!nohitDisabled) { fclose(_r_umfp); } }
char *mdbSelectsHtmlRows(struct sqlConnection *conn,struct slPair *mdbSelects, struct slPair *mdbVars,int cols,boolean fileSearch) // genereates the html for the table rows containing mdb var and val selects. // Assume tableSearch unless fileSearch { struct dyString *output = dyStringNew(1024); dyStringPrintf(output,"<tr><td colspan='%d' align='right' class='lineOnTop' style='height:20px; " "max-height:20px;'><em style='color:%s; width:200px;'>ENCODE terms</em>" "</td></tr>\n", cols,COLOR_DARKGREY); struct slPair *mdbSelect = mdbSelects; int row = 0; for (;mdbSelect != NULL; mdbSelect = mdbSelect->next) { char buf[256]; char *dropDownHtml = NULL; #define PLUS_MINUS_BUTTON "<input type='button' id='%sButton%d' value='%c' " \ "style='font-size:.7em;' title='%s' " \ "onclick='findTracks.mdbSelectPlusMinus(this,%d)'>" #define ADD_PM_BUTTON(type,num,value) \ dyStringPrintf(output,PLUS_MINUS_BUTTON, (type), (num), (value), \ ((value) == '+' ? "add another row after":"delete"), (num)) dyStringAppend(output,"<tr valign='top' class='mdbSelect'><td nowrap>\n"); row++; if (slCount(mdbSelects) > 2 || row > 2) ADD_PM_BUTTON("minus", row, '-'); else dyStringAppend(output," "); ADD_PM_BUTTON("plus", row, '+'); dyStringAppend(output,"</td><td>and </td><td colspan=3 nowrap>\n"); safef(buf, sizeof(buf), "%s%i", METADATA_NAME_PREFIX, row); // Left side select of vars dropDownHtml = cgiMakeSingleSelectDropList(buf, mdbVars,mdbSelect->name, NULL,"mdbVar", "style='font-size:.9em;' onchange='findTracks.mdbVarChanged(this);'"); if (dropDownHtml) { dyStringAppend(output,dropDownHtml); freeMem(dropDownHtml); } // Right side select of vals safef(buf, sizeof(buf), "%s%i", METADATA_VALUE_PREFIX, row); enum cvSearchable searchBy = cvSearchMethod(mdbSelect->name); if (searchBy == cvSearchBySingleSelect || searchBy == cvSearchByMultiSelect) { dyStringPrintf(output,"</td>\n<td align='right' id='isLike%i' style='width:10px; " "white-space:nowrap;'>is%s</td>\n<td nowrap id='%s' " "style='max-width:600px;'>\n", row,(searchBy == cvSearchByMultiSelect?" among":""),buf); struct slPair *pairs = mdbValLabelSearch(conn, mdbSelect->name, MDB_VAL_STD_TRUNCATION, FALSE, !fileSearch, fileSearch); // not tags, either a file or table search if (slCount(pairs) > 0) { char *dropDownHtml = cgiMakeSelectDropList((searchBy == cvSearchByMultiSelect), buf, pairs,mdbSelect->val, ANYLABEL,"mdbVal", "style='min-width:200px; font-size:.9em;' " "onchange='findTracks.mdbValChanged(this);'"); if (dropDownHtml) { dyStringAppend(output,dropDownHtml); freeMem(dropDownHtml); } slPairFreeList(&pairs); } } else if (searchBy == cvSearchByFreeText) { dyStringPrintf(output,"</td><td align='right' id='isLike%i' style='width:10px; " "white-space:nowrap;'>contains</td>\n<td nowrap id='%s' " "style='max-width:600px;'>\n",row,buf); dyStringPrintf(output,"<input type='text' name='%s' value='%s' class='mdbVal freeText' " "style='max-width:310px; width:310px; font-size:.9em;' " "onchange='findTracks.mdbVarChanged(true);'>\n", buf,(mdbSelect->val ? (char *)mdbSelect->val: "")); } else if (searchBy == cvSearchByWildList) { dyStringPrintf(output,"</td><td align='right' id='isLike%i' style='width:10px; " "white-space:nowrap;'>is among</td>\n<td nowrap id='%s' " "style='max-width:600px;'>\n",row,buf); dyStringPrintf(output,"<input type='text' name='%s' value='%s' class='mdbVal wildList' " "title='enter comma separated list of values' " "style='max-width:310px; width:310px; font-size:.9em;' " "onchange='findTracks.mdbVarChanged(true);'>\n", buf,(mdbSelect->val ? (char *)mdbSelect->val: "")); } //else if (searchBy == cvSearchByDateRange || searchBy == cvSearchByIntegerRange) // { // // TO BE IMPLEMENTED // } dyStringPrintf(output,"<span id='helpLink%i'> </span></td>\n", row); dyStringPrintf(output,"</tr>\n"); } dyStringPrintf(output,"<tr><td colspan='%d' align='right' style='height:10px; " "max-height:10px;'> </td></tr>", cols); return dyStringCannibalize(&output); }
void *preProcessReads(int *idp) { int id = *idp; int i=0, j=0, pos=0, tmpSize=0; int32_t hvtmp, cstmp; int div = _r_seqCnt / THREAD_COUNT; div += (_r_seqCnt % THREAD_COUNT)?1:0; Pair *tmp = getMem(sizeof(Pair)*(div * _r_samplingLocsSize*2)); char alphCnt[5]; char *a, *b; for (i=id*div; i<div*(id+1) && i<_r_seqCnt; i++) { alphCnt[0]=alphCnt[1]=alphCnt[2]=alphCnt[3]=alphCnt[4]=0; a = _r_seq[i].seq; b = _r_seq[i].rseq+SEQ_LENGTH; *(b--)='\0'; for (j = 0; j<SEQ_LENGTH; j++) { *a = toupper(*a); *b = reverseCompleteChar(*a); alphCnt[_r_alphIndex[*a]]++; a++; b--; } if (alphCnt[4]>errThreshold) _r_seq[i].hits[0]=1; _r_seq[i].alphCnt[0] = alphCnt[0]; _r_seq[i].alphCnt[1] = alphCnt[1]; _r_seq[i].alphCnt[2] = alphCnt[2]; _r_seq[i].alphCnt[3] = alphCnt[3]; compressSequence(_r_seq[i].seq, SEQ_LENGTH, _r_seq[i].cseq); compressSequence(_r_seq[i].rseq, SEQ_LENGTH, _r_seq[i].crseq); if (_r_seq[i].hits[0] == 1) // marked reads are not indexed { _r_seq[i].hits[0] = 0; for (j=0; j< 2*_r_samplingLocsSize; j++) { tmp[pos].hv = -1; tmp[pos].checksum = 0; tmp[pos].seqInfo = pos +(div*id*2*_r_samplingLocsSize); pos++; } } else { for (j=0; j< _r_samplingLocsSize; j++) { hvtmp = hashVal(_r_seq[i].seq+_r_samplingLocs[j]); cstmp = checkSumVal(_r_seq[i].seq+_r_samplingLocs[j]+WINDOW_SIZE); if (hvtmp == -1 || cstmp == -1) { tmp[pos].hv = -1; tmp[pos].checksum = 0; } else { tmp[pos].hv = hvtmp; tmp[pos].checksum = cstmp; } tmp[pos].seqInfo = pos +(div*id*2*_r_samplingLocsSize); pos++; } for (j=0; j<_r_samplingLocsSize; j++) { hvtmp = hashVal(_r_seq[i].rseq+_r_samplingLocs[j]); cstmp = checkSumVal(_r_seq[i].rseq+_r_samplingLocs[j]+WINDOW_SIZE); if (hvtmp == -1 || cstmp == -1) { tmp[pos].hv = -1; tmp[pos].checksum = 0; } else { tmp[pos].hv = hvtmp; tmp[pos].checksum = cstmp; } tmp[pos].seqInfo = pos+(div*id*2*_r_samplingLocsSize); pos++; } } tmpSize+=2*_r_samplingLocsSize; } introSortPair( tmp, 0, tmpSize-1); int uniq = 0; int prev = -2; int beg = -1; int end = -1; for (i=0; i<tmpSize; i++) { if (prev != tmp[i].hv) { uniq ++; prev = tmp[i].hv; } } _r_readIndexSize[id] = uniq; _r_readIndex[id] = getMem(sizeof(ReadIndexTable)*_r_readIndexSize[id]); prev = -2; j=0; beg =0; while (beg < tmpSize) { end = beg; while (end+1<tmpSize && tmp[end+1].hv==tmp[beg].hv) end++; _r_readIndex[id][j].hv = tmp[beg].hv; _r_readIndex[id][j].list = getMem(sizeof(GeneralIndex)*(end-beg+2)); _r_readIndex[id][j].list[0].info = end-beg+1; for (i=1; i <= _r_readIndex[id][j].list[0].info; i++) { _r_readIndex[id][j].list[i].info=tmp[beg+i-1].seqInfo; _r_readIndex[id][j].list[i].checksum=tmp[beg+i-1].checksum; } j++; beg = end+1; } freeMem(tmp, sizeof(Pair)*(div*_r_samplingLocsSize*2)); return NULL; }
void doSummaryStatsWiggle(struct sqlConnection *conn) /* Put up page showing summary stats for wiggle track. */ { // grab the right trackDb for the current table. The curTrack variable // has the composite trackDb in it struct trackDb *track = hTrackDbForTrack(database, curTable); char *table = curTable; struct region *region, *regionList = getRegions(); char *regionName = getRegionName(); long long regionSize = 0; long long gapTotal = 0; long startTime = 0, wigFetchTime = 0; char splitTableOrFileName[HDB_MAX_TABLE_STRING]; struct customTrack *ct = NULL; boolean isCustom = FALSE; struct wiggleDataStream *wds = NULL; unsigned long long valuesMatched = 0; int regionCount = 0; int regionsDone = 0; unsigned span = 0; char *dataConstraint; double ll = 0.0; double ul = 0.0; boolean hasConstraint = FALSE; char *table2 = NULL; boolean fullGenome = FALSE; boolean statsHeaderDone = FALSE; boolean gotSome = FALSE; char *shortLabel = table; long long statsItemCount = 0; /* global accumulators for overall */ int statsSpan = 0; /* stats summary on a multiple region */ double statsSumData = 0.0; /* output */ double statsSumSquares = 0.0; /* " " */ double lowerLimit = INFINITY; /* " " */ double upperLimit = -1.0 * INFINITY; /* " " */ startTime = clock1000(); if (track != NULL) shortLabel = track->shortLabel; /* Count the regions, when only one, we can do more stats */ for (region = regionList; region != NULL; region = region->next) ++regionCount; htmlOpen("%s (%s) Wiggle Summary Statistics", shortLabel, table); if (anySubtrackMerge(database, curTable)) hPrintf("<P><EM><B>Note:</B> subtrack merge is currently ignored on this " "page (not implemented yet). Statistics shown here are only for " "the primary table %s (%s).</EM>", shortLabel, table); fullGenome = fullGenomeRegion(); WIG_INIT; /* ct, isCustom, hasConstraint, wds and table2 are set here */ for (region = regionList; region != NULL; region = region->next) { struct bed *intersectBedList = NULL; int operations; ++regionsDone; if (table2) intersectBedList = bedTable2(conn, region, table2); operations = wigFetchStats; #if defined(NOT) /* can't do the histogram now, that operation times out */ if (1 == regionCount) operations |= wigFetchAscii; #endif wds->setChromConstraint(wds, region->chrom); if (fullGenome) wds->setPositionConstraint(wds, 0, 0); else wds->setPositionConstraint(wds, region->start, region->end); if (hasConstraint) wds->setDataConstraint(wds, dataConstraint, ll, ul); /* depending on what is coming in on regionList, we may need to be * smart about how often we call getData for these custom tracks * since that is potentially a large file read each time. */ if (isCustom) { if (ct->dbTrack) { struct sqlConnection *trashConn = hAllocConn(CUSTOM_TRASH); struct trackDb *tdb = findTdbForTable(database, curTrack, table, ctLookupName); span = minSpan(trashConn, splitTableOrFileName, region->chrom, region->start, region->end, cart, tdb); wds->setSpanConstraint(wds, span); valuesMatched = getWigglePossibleIntersection(wds, region, CUSTOM_TRASH, table2, &intersectBedList, splitTableOrFileName, operations); hFreeConn(&trashConn); } else { valuesMatched = getWigglePossibleIntersection(wds, region, NULL, table2, &intersectBedList, splitTableOrFileName, operations); /* XXX We need to properly get the smallest span for custom tracks */ /* This is not necessarily the correct answer here */ if (wds->stats) span = wds->stats->span; else span = 1; } } else { if (hFindSplitTable(database, region->chrom, table, splitTableOrFileName, sizeof splitTableOrFileName, NULL)) { span = minSpan(conn, splitTableOrFileName, region->chrom, region->start, region->end, cart, track); wds->setSpanConstraint(wds, span); valuesMatched = getWigglePossibleIntersection(wds, region, database, table2, &intersectBedList, splitTableOrFileName, operations); if (intersectBedList) span = 1; } } /* when doing multiple regions, we need to print out each result as * it happens to keep the connection open to the browser and * prevent any timeout since this could take a while. * (worst case test is quality track on panTro1) */ if (wds->stats) statsItemCount += wds->stats->count; if (wds->stats && (regionCount > 1) && (valuesMatched > 0)) { double sumData = wds->stats->mean * wds->stats->count; double sumSquares; if (wds->stats->count > 1) sumSquares = (wds->stats->variance * (wds->stats->count - 1)) + ((sumData * sumData)/wds->stats->count); else sumSquares = sumData * sumData; /* global accumulators for overall summary */ statsSpan = wds->stats->span; statsSumData += sumData; statsSumSquares += sumSquares; if (wds->stats->lowerLimit < lowerLimit) lowerLimit = wds->stats->lowerLimit; if ((wds->stats->lowerLimit + wds->stats->dataRange) > upperLimit) upperLimit = wds->stats->lowerLimit + wds->stats->dataRange; if (statsHeaderDone) wds->statsOut(wds, database, "stdout", TRUE, TRUE, FALSE, TRUE); else { wds->statsOut(wds, database, "stdout", TRUE, TRUE, TRUE, TRUE); statsHeaderDone = TRUE; } wds->freeStats(wds); gotSome = TRUE; } if ((regionCount > MAX_REGION_DISPLAY) && (regionsDone >= MAX_REGION_DISPLAY)) { hPrintf("<TR><TH ALIGN=CENTER COLSPAN=12> Can not display more " "than %d regions, <BR> would take too much time </TH></TR>\n", MAX_REGION_DISPLAY); break; /* exit this for loop */ } } /*for (region = regionList; region != NULL; region = region->next) */ if (hasConstraint) freeMem(dataConstraint); /* been cloned into wds */ if (1 == regionCount) { statsPreamble(wds, regionList->chrom, regionList->start, regionList->end, span, valuesMatched, table2); /* 3 X TRUE = sort results, html table output, with header, * the FALSE means close the table after printing, no more rows to * come. The case in the if() statement was already taken care of * in the statsPreamble() printout. No need to do that again. */ if ( ! ((valuesMatched == 0) && table2) ) wds->statsOut(wds, database, "stdout", TRUE, TRUE, TRUE, FALSE); regionSize = basesInRegion(regionList,0); gapTotal = gapsInRegion(conn, regionList,0); } else { /* this is a bit of a kludge here since these printouts are done in the * library source wigDataStream.c statsOut() function and * this is a clean up of that. That function should be * pulled out of there and made independent and more * versatile. */ long long realSize; double variance; double stddev; /* Too expensive to lookup the numbers for thousands of regions */ regionSize = basesInRegion(regionList,MAX_REGION_DISPLAY); gapTotal = gapsInRegion(conn, regionList,MAX_REGION_DISPLAY); realSize = regionSize - gapTotal; /* close the table which was left open in the loop above */ if (!gotSome) hPrintf("<TR><TH ALIGN=CENTER COLSPAN=12> No data found matching this request </TH></TR>\n"); hPrintf("<TR><TH ALIGN=LEFT> SUMMARY: </TH>\n"); hPrintf("\t<TD> </TD>\n"); /* chromStart */ hPrintf("\t<TD> </TD>\n"); /* chromEnd */ hPrintf("\t<TD ALIGN=RIGHT> "); printLongWithCommas(stdout, statsItemCount); hPrintf(" </TD>\n" ); hPrintf("\t<TD ALIGN=RIGHT> %d </TD>\n", statsSpan); hPrintf("\t<TD ALIGN=RIGHT> "); printLongWithCommas(stdout, statsItemCount*statsSpan); hPrintf(" (%.2f%%) </TD>\n", 100.0*(double)(statsItemCount*statsSpan)/(double)realSize); hPrintf("\t<TD ALIGN=RIGHT> %g </TD>\n", lowerLimit); hPrintf("\t<TD ALIGN=RIGHT> %g </TD>\n", upperLimit); hPrintf("\t<TD ALIGN=RIGHT> %g </TD>\n", upperLimit - lowerLimit); if (statsItemCount > 0) hPrintf("\t<TD ALIGN=RIGHT> %g </TD>\n", statsSumData/statsItemCount); else hPrintf("\t<TD ALIGN=RIGHT> 0.0 </TD>\n"); stddev = 0.0; variance = 0.0; if (statsItemCount > 1) { variance = (statsSumSquares - ((statsSumData * statsSumData)/(double) statsItemCount)) / (double) (statsItemCount - 1); if (variance > 0.0) stddev = sqrt(variance); } hPrintf("\t<TD ALIGN=RIGHT> %g </TD>\n", variance); hPrintf("\t<TD ALIGN=RIGHT> %g </TD>\n", stddev); hPrintf("</TR>\n"); wigStatsTableHeading(stdout, TRUE); hPrintf("</TABLE></TD></TR></TABLE></P>\n"); } #if defined(NOT) /* can't do the histogram now, that operation times out */ /* Single region, we can do the histogram */ if ((valuesMatched > 1) && (1 == regionCount)) { float *valuesArray = NULL; size_t valueCount = 0; struct histoResult *histoGramResult; /* convert the ascii data listings to one giant float array */ valuesArray = wds->asciiToDataArray(wds, valuesMatched, &valueCount); /* histoGram() may return NULL if it doesn't work */ histoGramResult = histoGram(valuesArray, valueCount, NAN, (unsigned) 0, NAN, (float) wds->stats->lowerLimit, (float) (wds->stats->lowerLimit + wds->stats->dataRange), (struct histoResult *)NULL); printHistoGram(histoGramResult, TRUE); /* TRUE == html output */ freeHistoGram(&histoGramResult); wds->freeAscii(wds); wds->freeArray(wds); } #endif wds->freeStats(wds); wiggleDataStreamFree(&wds); wigFetchTime = clock1000() - startTime; webNewSection("Region and Timing Statistics"); hTableStart(); stringStatRow("region", regionName); numberStatRow("bases in region", regionSize); numberStatRow("bases in gaps", gapTotal); floatStatRow("load and calc time", 0.001*wigFetchTime); wigFilterStatRow(conn); stringStatRow("intersection", cartUsualString(cart, hgtaIntersectTable, "off")); hTableEnd(); htmlClose(); } /* void doSummaryStatsWiggle(struct sqlConnection *conn) */
struct bed *getWiggleAsBed( char *db, char *table, /* Database and table. */ struct region *region, /* Region to get data for. */ char *filter, /* Filter to add to SQL where clause if any. */ struct hash *idHash, /* Restrict to id's in this hash if non-NULL. */ struct lm *lm, /* Where to allocate memory. */ struct sqlConnection *conn) /* SQL connection to work with */ /* Return a bed list of all items in the given range in table. * Cleanup result via lmCleanup(&lm) rather than bedFreeList. */ /* filter, idHash and lm are currently unused, perhaps future use */ { struct bed *bedList=NULL; char splitTableOrFileName[HDB_MAX_TABLE_STRING]; struct customTrack *ct = NULL; boolean isCustom = FALSE; boolean hasConstraint = FALSE; struct wiggleDataStream *wds = NULL; unsigned long long valuesMatched = 0; int operations = wigFetchBed; char *dataConstraint; double ll = 0.0; double ul = 0.0; char *table2 = NULL; struct bed *intersectBedList = NULL; int maxOut; WIG_INIT; /* ct, isCustom, hasConstraint, wds and table2 are set here */ if (hasConstraint) freeMem(dataConstraint); /* been cloned into wds */ maxOut = bigFileMaxOutput(); wds->setMaxOutput(wds, maxOut); wds->setChromConstraint(wds, region->chrom); wds->setPositionConstraint(wds, region->start, region->end); if (table2) intersectBedList = bedTable2(conn, region, table2); if (isCustom) { if (ct->dbTrack) { unsigned span = 0; struct sqlConnection *trashConn = hAllocConn(CUSTOM_TRASH); struct trackDb *tdb = findTdbForTable(database, curTrack, table, ctLookupName); valuesMatched = getWigglePossibleIntersection(wds, region, CUSTOM_TRASH, table2, &intersectBedList, splitTableOrFileName, operations); span = minSpan(trashConn, splitTableOrFileName, region->chrom, region->start, region->end, cart, tdb); wds->setSpanConstraint(wds, span); hFreeConn(&trashConn); } else valuesMatched = getWigglePossibleIntersection(wds, region, NULL, table2, &intersectBedList, splitTableOrFileName, operations); } else { if (conn == NULL) errAbort( "getWiggleAsBed: NULL conn given for database table"); if (hFindSplitTable(database, region->chrom, table, splitTableOrFileName, sizeof splitTableOrFileName, NULL)) { struct trackDb *tdb = findTdbForTable(database, curTrack, table, ctLookupName); unsigned span = 0; /* XXX TBD, watch for a span limit coming in as an SQL filter */ span = minSpan(conn, splitTableOrFileName, region->chrom, region->start, region->end, cart, tdb); wds->setSpanConstraint(wds, span); valuesMatched = getWigglePossibleIntersection(wds, region, database, table2, &intersectBedList, splitTableOrFileName, operations); } } if (valuesMatched > 0) { struct bed *bed; wds->sortResults(wds); for (bed = wds->bed; bed != NULL; bed = bed->next) { struct bed *copy = lmCloneBed(bed, lm); slAddHead(&bedList, copy); } slReverse(&bedList); } wiggleDataStreamFree(&wds); return bedList; } /* struct bed *getWiggleAsBed() */
// ###################################################################### BitObject::BitObject() { freeMem(); }
void liftChain(char *destFile, struct hash *liftHash, int sourceCount, char *sources[], boolean querySide) /* Lift up coordinates in .chain file. */ { FILE *f = mustOpen(destFile, "w"); int sourceIx; int dotMod = dots; for (sourceIx = 0; sourceIx < sourceCount; ++sourceIx) { char *source = sources[sourceIx]; struct lineFile *lf = lineFileOpen(source, TRUE); struct chain *chain; lineFileSetMetaDataOutput(lf, f); verbose(1, "Lifting %s\n", source); while ((chain = chainRead(lf)) != NULL) { struct liftSpec *spec; char *seqName = querySide ? chain->qName : chain->tName; spec = findLift(liftHash, seqName, lf); if (spec == NULL) { if (how != carryMissing) { chainFree(&chain); continue; } } else { struct cBlock *b = NULL; int offset = spec->offset; if (spec->strand == '-') { if (querySide) { int qSpan = chain->qEnd - chain->qStart; if (chain->qStrand == '-') chain->qStart += spec->offset; else { chain->qStart = spec->newSize - spec->offset - (chain->qSize - chain->qStart); } chain->qEnd = chain->qStart + qSpan; chain->qStrand = flipStrand(chain->qStrand); freeMem(chain->qName); chain->qName = cloneString(spec->newName); chain->qSize = spec->newSize; /* We don't need to mess with the blocks here * since they are all relative to the start. */ } else { /* We try and keep the target strand positive, so we end up * flipping in both target and query and flipping the target * strand. */ reverseIntRange(&chain->qStart, &chain->qEnd, chain->qSize); reverseIntRange(&chain->tStart, &chain->tEnd, chain->tSize); chain->qStrand = flipStrand(chain->qStrand); /* Flip around blocks and add offset. */ for (b=chain->blockList; b != NULL; b=b->next) { reverseIntRange(&b->qStart, &b->qEnd, chain->qSize); reverseIntRange(&b->tStart, &b->tEnd, chain->tSize); b->tStart += offset; b->tEnd += offset; } slReverse(&chain->blockList); /* On target side add offset as well and update name and size. */ chain->tStart += offset; chain->tEnd += offset; freeMem(chain->tName); chain->tName = cloneString(spec->newName); chain->tSize = spec->newSize; } } else { if (querySide) { if (chain->qStrand == '-') offset = spec->newSize - (spec->offset + spec->oldSize); freeMem(chain->qName); chain->qName = cloneString(spec->newName); chain->qSize = spec->newSize; chain->qStart += offset; chain->qEnd += offset; for (b=chain->blockList; b != NULL; b=b->next) { b->qStart += offset; b->qEnd += offset; } } else { freeMem(chain->tName); chain->tName = cloneString(spec->newName); chain->tSize = spec->newSize; chain->tStart += offset; chain->tEnd += offset; for (b=chain->blockList; b != NULL; b=b->next) { b->tStart += offset; b->tEnd += offset; } } } } chainWrite(chain, f); chainFree(&chain); doDots(&dotMod); } lineFileClose(&lf); if (dots) verbose(1, "\n"); } }
void firstPass(char *aList, char *bList, char *outName) /* Do first pass - find areas of homology between a and b, * save to outName. */ { char *aNameBuf, **aNames; char *bNameBuf, **bNames; int aCount, bCount; struct nt4Seq **bNts, *bNt, *bNtList = NULL; int bNtCount; int i; FILE *out = mustOpen(outName, "w"); /* Read in fa file lists . */ readAllWordsOrFa(aList, &aNames, &aCount, &aNameBuf); readAllWordsOrFa(bList, &bNames, &bCount, &bNameBuf); /* Convert second list to nt4 (packed) format in memory. */ printf("Loading and packing dna in %s\n", bList); for (i=0; i<bCount; ++i) { char *bName = bNames[i]; struct dnaSeq *seqList, *seq; seqList = faReadAllDna(bName); for (seq = seqList; seq != NULL; seq = seq->next) { char uniqName[512]; sprintf(uniqName, "%s@%s", seq->name, bName); bNt = newNt4(seq->dna, seq->size, uniqName); slAddHead(&bNtList, bNt); } freeDnaSeqList(&seqList); } slReverse(&bNtList); bNtCount = slCount(bNtList); AllocArray(bNts, bNtCount); for (i=0, bNt=bNtList; i<bNtCount; ++i, bNt=bNt->next) bNts[i] = bNt; printf("Loaded %d contigs from %d files\n", bNtCount, bCount); /* Align elements of A list one at a time against B list. */ for (i=0; i<aCount; ++i) { char *aName = aNames[i]; struct dnaSeq *seqList, *seq; printf("Aligning %s against %s\n", aName, bList); seqList = faReadAllDna(aName); for (seq = seqList; seq != NULL; seq = seq->next) { doCrude(aName, seq, bNts, bNtCount, out); } printf("\n"); freeDnaSeqList(&seqList); } /* Cleanup time. */ for (i=0; i<bNtCount; ++i) freeNt4(&bNts[i]); freeMem(bNts); freeMem(aNames); freeMem(bNames); freeMem(aNameBuf); freeMem(bNameBuf); fclose(out); }
AcadoIntegratorInternal::~AcadoIntegratorInternal(){ freeMem(); }
void blatSeq(char *userSeq, char *organism) /* Blat sequence user pasted in. */ { FILE *f; struct dnaSeq *seqList = NULL, *seq; struct tempName pslTn, faTn; int maxSingleSize, maxTotalSize, maxSeqCount; int minSingleSize = minMatchShown; char *genome, *db; char *type = cgiString("type"); char *seqLetters = cloneString(userSeq); struct serverTable *serve; int conn; int oneSize, totalSize = 0, seqCount = 0; boolean isTx = FALSE; boolean isTxTx = FALSE; boolean txTxBoth = FALSE; struct gfOutput *gvo; boolean qIsProt = FALSE; enum gfType qType, tType; struct hash *tFileCache = gfFileCacheNew(); boolean feelingLucky = cgiBoolean("Lucky"); getDbAndGenome(cart, &db, &genome, oldVars); if(!feelingLucky) cartWebStart(cart, db, "%s BLAT Results", trackHubSkipHubName(organism)); /* Load user sequence and figure out if it is DNA or protein. */ if (sameWord(type, "DNA")) { seqList = faSeqListFromMemText(seqLetters, TRUE); uToT(seqList); isTx = FALSE; } else if (sameWord(type, "translated RNA") || sameWord(type, "translated DNA")) { seqList = faSeqListFromMemText(seqLetters, TRUE); uToT(seqList); isTx = TRUE; isTxTx = TRUE; txTxBoth = sameWord(type, "translated DNA"); } else if (sameWord(type, "protein")) { seqList = faSeqListFromMemText(seqLetters, FALSE); isTx = TRUE; qIsProt = TRUE; } else { seqList = faSeqListFromMemTextRaw(seqLetters); isTx = !seqIsDna(seqList); if (!isTx) { for (seq = seqList; seq != NULL; seq = seq->next) { seq->size = dnaFilteredSize(seq->dna); dnaFilter(seq->dna, seq->dna); toLowerN(seq->dna, seq->size); subChar(seq->dna, 'u', 't'); } } else { for (seq = seqList; seq != NULL; seq = seq->next) { seq->size = aaFilteredSize(seq->dna); aaFilter(seq->dna, seq->dna); toUpperN(seq->dna, seq->size); } qIsProt = TRUE; } } if (seqList != NULL && seqList->name[0] == 0) { freeMem(seqList->name); seqList->name = cloneString("YourSeq"); } trimUniq(seqList); /* If feeling lucky only do the first on. */ if(feelingLucky && seqList != NULL) { seqList->next = NULL; } /* Figure out size allowed. */ maxSingleSize = (isTx ? 10000 : 75000); maxTotalSize = maxSingleSize * 2.5; #ifdef LOWELAB maxSeqCount = 200; #else maxSeqCount = 25; #endif /* Create temporary file to store sequence. */ trashDirFile(&faTn, "hgSs", "hgSs", ".fa"); faWriteAll(faTn.forCgi, seqList); /* Create a temporary .psl file with the alignments against genome. */ trashDirFile(&pslTn, "hgSs", "hgSs", ".pslx"); f = mustOpen(pslTn.forCgi, "w"); gvo = gfOutputPsl(0, qIsProt, FALSE, f, FALSE, TRUE); serve = findServer(db, isTx); /* Write header for extended (possibly protein) psl file. */ if (isTx) { if (isTxTx) { qType = gftDnaX; tType = gftDnaX; } else { qType = gftProt; tType = gftDnaX; } } else { qType = gftDna; tType = gftDna; } pslxWriteHead(f, qType, tType); if (qType == gftProt) { minSingleSize = 14; } else if (qType == gftDnaX) { minSingleSize = 36; } /* Loop through each sequence. */ for (seq = seqList; seq != NULL; seq = seq->next) { printf(" "); fflush(stdout); /* prevent apache cgi timeout by outputting something */ oneSize = realSeqSize(seq, !isTx); if ((seqCount&1) == 0) // Call bot delay every 2nd time starting with first time hgBotDelay(); if (++seqCount > maxSeqCount) { warn("More than 25 input sequences, stopping at %s.", seq->name); break; } if (oneSize > maxSingleSize) { warn("Sequence %s is %d letters long (max is %d), skipping", seq->name, oneSize, maxSingleSize); continue; } if (oneSize < minSingleSize) { warn("Warning: Sequence %s is only %d letters long (%d is the recommended minimum)", seq->name, oneSize, minSingleSize); // we could use "continue;" here to actually enforce skipping, // but let's give the short sequence a chance, it might work. // minimum possible length = tileSize+stepSize, so mpl=16 for dna stepSize=5, mpl=10 for protein. if (qIsProt && oneSize < 1) // protein does not tolerate oneSize==0 continue; } totalSize += oneSize; if (totalSize > maxTotalSize) { warn("Sequence %s would take us over the %d letter limit, stopping here.", seq->name, maxTotalSize); break; } conn = gfConnect(serve->host, serve->port); if (isTx) { gvo->reportTargetStrand = TRUE; if (isTxTx) { gfAlignTransTrans(&conn, serve->nibDir, seq, FALSE, 5, tFileCache, gvo, !txTxBoth); if (txTxBoth) { reverseComplement(seq->dna, seq->size); conn = gfConnect(serve->host, serve->port); gfAlignTransTrans(&conn, serve->nibDir, seq, TRUE, 5, tFileCache, gvo, FALSE); } } else { gfAlignTrans(&conn, serve->nibDir, seq, 5, tFileCache, gvo); } } else { gfAlignStrand(&conn, serve->nibDir, seq, FALSE, minMatchShown, tFileCache, gvo); reverseComplement(seq->dna, seq->size); conn = gfConnect(serve->host, serve->port); gfAlignStrand(&conn, serve->nibDir, seq, TRUE, minMatchShown, tFileCache, gvo); } gfOutputQuery(gvo, f); } carefulClose(&f); showAliPlaces(pslTn.forCgi, faTn.forCgi, serve->db, qType, tType, organism, feelingLucky); if(!feelingLucky) cartWebEnd(); gfFileCacheFree(&tFileCache); }
void AcadoIntegratorInternal::init(){ // Call the base class init IntegratorInternal::init(); // Free memory and set pointers to NULL freeMem(); setNull(); // The following will make sure that no data lingers in ACADO ACADO::AlgebraicState().clearStaticCounters(); ACADO::Control().clearStaticCounters(); ACADO::DifferentialState().clearStaticCounters(); ACADO::DifferentialStateDerivative().clearStaticCounters(); ACADO::Disturbance().clearStaticCounters(); ACADO::IntegerControl().clearStaticCounters(); ACADO::IntegerParameter().clearStaticCounters(); ACADO::IntermediateState().clearStaticCounters(); ACADO::Parameter().clearStaticCounters(); // Get the number of differential and algebraic equations nxa_ = getOption("num_algebraic"); nxd_ = nx_ - nxa_; nt_ = bool(getOption("time_dependence")) ? 1 : 0; // Create wrapper function rhs_ = AcadoFunction(f_); rhs_.init(); // Declare ACADO variables t_ = new ACADO::TIME(); xd_ = new ACADO::DifferentialState[nxd_]; xa_ = new ACADO::AlgebraicState[nxa_]; p_ = new ACADO::Parameter[np_]; // Temporary vector x_tmp_ = new ACADO::Vector(nxd_); if(np_>0) p_tmp_ = new ACADO::Vector(np_); // Augmented state vector arg_ = new ACADO::IntermediateState(nt_+nxd_+nxa_+np_); int ind=0; for(int i=0; i<nt_; ++i) (*arg_)(ind++) = t_[i]; for(int i=0; i<nxd_; ++i) (*arg_)(ind++) = xd_[i]; for(int i=0; i<nxa_; ++i) (*arg_)(ind++) = xa_[i]; for(int i=0; i<np_; ++i) (*arg_)(ind++) = p_[i]; // Differential equation diff_eq_ = new ACADO::DifferentialEquation(); *diff_eq_ << (*rhs_.fcn_)(*arg_); // Allocate an integrator integrator_ = new ACADO::IntegratorBDF(*diff_eq_); // Grid points num_grid_points_ = getOption("num_grid_points"); interval_ = new ACADO::Grid( t0_, tf_, num_grid_points_); // Variablesgrid for the solution if(nxd_>0) differentialStates_ = new ACADO::VariablesGrid(); if(nxa_>0) algebraicStates_ = new ACADO::VariablesGrid(); // Temporary tmp_ = new ACADO::Vector(); }
static void __del(void* inst) { iOFBackData data = Data(inst); freeMem( data ); freeMem( inst ); instCnt--; }
int crunchOne(char *input, FILE *f, char *initialSeq, int minPos) /* Transform output to crunched format and append to file. */ { long offset = 0, lastOffset = 0; struct lineFile *lf = lineFileOpen(input, TRUE); char *words[4]; int wordCount; char *data = NULL; char *seq = initialSeq, *lastSeq = NULL; boolean newSeq; verbose(1, "%s\n", input); while ((wordCount = lineFileChop(lf, words)) != 0) { /* Read line that may already be crunched. */ if (wordCount == 1) { ++offset; data = words[0]; } else if (wordCount == 2) { offset = lineFileNeedNum(lf, words, 0); data = words[1]; } else if (wordCount == 3) { seq = words[0]; offset = lineFileNeedNum(lf, words, 1); data = words[2]; } else { errAbort("Expecting no more than 3 words, got %d line %d of %s", wordCount, lf->lineIx, lf->fileName); } if (wordCount != 3) seq = lastSeq; /* Memory here will last until next line. */ /* If we've gotten this far, seq should be defined or it's a syntax error. */ if (seq == NULL) { errAbort("No sequence name defined line %d of %s", lf->lineIx, lf->fileName); } /* See if it's a new sequence, and if so reset last offset. */ newSeq = (lastSeq == NULL || !sameString(seq, lastSeq) ); if (newSeq) minPos = lastOffset = 0; /* Check for stepping backwards. Either error out, or if * command line is set, skip over things until we go forward * again. */ if (offset < lastOffset && offset >= minPos) { if (clFixOverlap) { minPos = lastOffset; verbose(1, "Removing overlap %d-%d line %d of %s\n", lastOffset, offset, lf->lineIx, lf->fileName); } else { errAbort("Offsets going backwards line %d of %s", lf->lineIx, lf->fileName); } } /* Check to see we are not screening out this as part of an overlap. * If not, print it. */ if (offset >= minPos) { if (clOneToThree) { if (newSeq) { fprintf(f, "%s\t", seq); fprintf(f, "%ld\t", offset); } else if (lastOffset + 1 != offset) fprintf(f, "%ld\t", offset); } else { if (newSeq || lastOffset + 1 != offset) { fprintf(f, "fixedStep chrom=%s start=%ld step=1\n", seq, offset); } } fprintf(f, "%s\n", data); } /* Update variables that keep track of previous line. */ if (newSeq) { freeMem(lastSeq); lastSeq = cloneString(seq); } lastOffset = offset; } freeMem(lastSeq); lineFileClose(&lf); return offset; }
int rdf_GetURL (MWContext *cx, int method, Net_GetUrlExitFunc *exit_routine, RDFFile rdfFile) { MozillaEvent_rdf_GetURL *event; URL_Struct *urls = NULL; char *url; #ifdef DEBUG_gagan return 0; #endif if (cx == NULL) return 0; if (rdfFile->refreshingp && rdfFile->updateURL) { url = rdfFile->updateURL; } else { url = rdfFile->url; } if (strcmp(url, gNavCntrUrl) == 0) { urls = NET_CreateURLStruct(url, NET_CACHE_ONLY_RELOAD); #ifdef NU_CACHE if (!CacheManager_Contains(url)) { #else if (NET_IsURLInDiskCache(urls) || NET_IsURLInMemCache(urls)) { } else { #endif NET_FreeURLStruct(urls); urls = NULL; } } if (!urls) urls = NET_CreateURLStruct(url, (rdfFile->refreshingp ? NET_SUPER_RELOAD : NET_NORMAL_RELOAD)); if (urls == NULL) return 0; urls->fe_data = rdfFile; if (method) urls->method = method; if (PR_CurrentThread() == mozilla_thread) { htLoadBegins(urls, url); NET_GetURL(urls, FO_CACHE_AND_RDF, cx, rdf_GetUrlExitFunc); } else { /* send event to Mozilla thread */ if (mozilla_event_queue == NULL) return(0); event = PR_NEW(MozillaEvent_rdf_GetURL); if (event == NULL) return(0); PR_InitEvent(&(event->ce.event), cx, (PRHandleEventProc)rdf_HandleEvent_GetURL, (PRDestroyEventProc)rdf_DisposeEvent_GetURL); event->url = copyString(url); event->urls = urls; event->method = FO_CACHE_AND_RDF; event->cx = cx; event->exitFunc = rdf_GetUrlExitFunc; PR_PostEvent(mozilla_event_queue, &(event->ce.event)); } return 1; } #endif /* MOZILLA_CLIENT */ void possiblyRereadRDFFiles (void* data) { possiblyRefreshRDFFiles(); /* timerID = FE_SetTimeout(possiblyRereadRDFFiles, NULL, 1000 * 60 * 10); once every 10 minutes diabled for legal reasons.*/ } void RDFglueInitialize() { #ifdef MOZILLA_CLIENT timerID = FE_SetTimeout(possiblyRereadRDFFiles, NULL, 1000 * 60 * 10); /* once every 10 minutes */ if (gRLForbiddenDomains != NULL) { freeMem(gRLForbiddenDomains); gRLForbiddenDomains = NULL; } if (PREF_CopyCharPref("browser.relatedLinksDisabledForDomains", &gRLForbiddenDomains) != PREF_OK) { gRLForbiddenDomains = NULL; } #endif /* MOZILLA_CLIENT */ }
static void __writer( void* threadinst ) { iOThread th = (iOThread)threadinst; iOLocoNet loconet = (iOLocoNet)ThreadOp.getParm( th ); iOLocoNetData data = Data(loconet); char ln[0x7F]; int echoTimer = 0; int busyTimer = 0; TraceOp.trc( "ulni", TRCLEVEL_INFO, __LINE__, 9999, "ULNI writer started." ); while( data->run ) { Boolean ok = False; if( data->serial == NULL ) { ThreadOp.sleep(1000); continue; } /* TODO: copy packet for the reader to compair */ if( !data->busy && data->subSendEcho && !QueueOp.isEmpty(data->subWriteQueue) ) { byte* p = (byte*)QueueOp.get(data->subWriteQueue); int size = p[0] & 0x7F; busyTimer = 0; MemOp.copy( ln, &p[1], min(size, 127) ); freeMem(p); ok = SerialOp.write( data->serial, (char*)ln, size ); if(ok) { TraceOp.dump ( "ulni", TRCLEVEL_BYTE, (char*)ln, size ); echoTimer = 0; data->subSendLen = size; MemOp.copy( data->subSendPacket, ln, min(size,127) ); data->subSendEcho = False; } } else { TraceOp.trc( "ulni", TRCLEVEL_DEBUG, __LINE__, 9999, "could not read queue %d", QueueOp.count(data->subWriteQueue) ); } if( !data->subSendEcho ) { echoTimer++; if( echoTimer >= 100 ) { TraceOp.trc( "ulni", TRCLEVEL_EXCEPTION, __LINE__, 9999, "echo timer timed out for OPCODE 0x%02X", data->subSendPacket[0] & 0xFF ); echoTimer = 0; data->subSendEcho = True; } } if( data->busy ) { busyTimer++; if( busyTimer >= 100 ) { TraceOp.trc( "ulni", TRCLEVEL_EXCEPTION, __LINE__, 9999, "busy timer timed out" ); busyTimer = 0; data->busy = False; } } ThreadOp.sleep(10); }; TraceOp.trc( "ulni", TRCLEVEL_INFO, __LINE__, 9999, "ULNI writer stopped." ); }
void getfreemem(char *unused) { DC('B'); DU((uint16_t)__brkval, 5); DNL(); DC('S'); DU((uint16_t)__malloc_heap_start,5); DNL(); DC('E'); DU((uint16_t)__malloc_heap_end, 5); DNL(); DC('F'); DU((uint16_t)freeMem(), 5); DNL(); }
/** ------------------------------------------------------------ * public main() * usage: po2lang -i <po-file> -o <xml-file> [-l <lang>] * * @param argc Number of commanline arguments. * @param argv Commanline arguments. * @return Applications exit code. */ int main( int argc, const char* argv[] ) { int rc = 0; iOCmdLn arg = NULL; const char* infile = NULL; const char* outfile = NULL; const char* lang = NULL; iOTrace trc = TraceOp.inst( TRCLEVEL_INFO, "po2lang", True ); TraceOp.setAppID( trc, "a" ); /* Enable coredump for Linux platforms. */ #if defined __linux__ { struct rlimit rl; getrlimit( RLIMIT_CORE, &rl ); /* Default = 0 */ rl.rlim_cur = 10240 * 10240; setrlimit( RLIMIT_CORE, &rl ); } #endif /* Resets memory statistics. */ MemOp.resetDump(); /* Check commandline arguments. */ arg = CmdLnOp.inst( argc, argv ); infile = CmdLnOp.getStrDef( arg, "-i", NULL ); outfile = CmdLnOp.getStrDef( arg, "-o", NULL ); lang = CmdLnOp.getStrDef( arg, "-l", NULL ); package = CmdLnOp.getStrDef( arg, "-p", NULL ); /* Read const.xml */ if( infile != NULL && FileOp.exist(infile) && lang != NULL && outfile != NULL && FileOp.exist(outfile) ) { TraceOp.println( "Processing %s...", infile ); { iOFile constXml = FileOp.inst( outfile, OPEN_READONLY ); char* xmlStr = allocMem( FileOp.size( constXml ) + 1 ); iODoc doc = NULL; iONode root = NULL; TraceOp.println( "Reading %s...", outfile ); FileOp.read( constXml, xmlStr, FileOp.size( constXml ) ); FileOp.close( constXml ); FileOp.base.del( constXml ); TraceOp.println( "Parsing %s...", outfile ); doc = DocOp.parse( xmlStr ); freeMem( xmlStr ); if( doc == NULL ) { TraceOp.println( "invalid file %s...", outfile ); return -1; } root = DocOp.getRootNode( doc ); __createMap(root); TraceOp.println( "Reading %s...", infile ); { iOFile pofile = FileOp.inst( infile, OPEN_READONLY ); char* po = allocMem( FileOp.size( pofile ) + 1 ); FileOp.read( pofile, po, FileOp.size( pofile ) ); FileOp.close( pofile ); FileOp.base.del( pofile ); __po2lang( po, root, lang ); iOFile constXml = FileOp.inst( outfile, OPEN_WRITE ); TraceOp.println( "serialize..." ); xmlStr = NodeOp.base.toString(root); TraceOp.println( "writing %s...", outfile ); FileOp.write( constXml, xmlStr, StrOp.len( xmlStr ) ); FileOp.close( constXml ); FileOp.base.del( constXml ); } } } else { TraceOp.println( "usage: po2lang -i <po-file> -o <xml-file> -l <lang>" ); } return rc; }
char *linkGetUrl(struct link *link, struct sqlConnection *conn, char *geneId) /* Return URL string if possible or NULL if not. FreeMem this when done. */ { char query[512]; struct sqlResult *sr; char **row; char *url = NULL; /* Some special case code here for things that need to * do more than check a table. */ if (sameString(link->name, "family")) { if (!hgNearOk(database)) return NULL; } if (sameString(link->name, "protBrowser")) { if (!hgPbOk(database)) return NULL; /* special processing for PB, since we need the protein ID, instead everything key off from gene ID */ /* use UniProt accession instead of displayID, because display ID sometimes changes */ if (swissProtAcc == NULL || swissProtAcc[0] == 0) return NULL; if (isRgdGene(conn)) { safef(query, sizeof(query), "../cgi-bin/pbGlobal?proteinID=%s", swissProtAcc); } else { safef(query, sizeof(query), "../cgi-bin/pbTracks?db=%s&proteinID=%s", database, swissProtAcc); } return(cloneString(query)); } if (sameString(link->name, "tbSchema")) { char *geneTable = genomeSetting("knownGene"); struct trackDb *tdb = hTrackDbForTrack(sqlGetDatabase(conn), geneTable); struct dyString *dy = NULL; if (tdb == NULL) return NULL; dy = newDyString(256); dyStringPrintf(dy, link->url, tdb->grp, geneTable, geneTable); trackDbFree(&tdb); addLinkExtras(link, dy); return dyStringCannibalize(&dy); } geneId = cloneAndCut(geneId, link->preCutAt); safef(query, sizeof(query), link->idSql, geneId); sr = sqlGetResult(conn, query); row = sqlNextRow(sr); if (row != NULL && row[0][0] != 0) /* If not null or empty */ { struct dyString *dy = newDyString(0); char *name = cloneAndCut(row[0], link->postCutAt); dyStringPrintf(dy, link->url, name, row[1], row[2], row[3]); addLinkExtras(link, dy); url = dyStringCannibalize(&dy); freez(&name); } sqlFreeResult(&sr); freeMem(geneId); return url; }
void freeHashEl(struct hashEl *hel) /* Free hash element. Use only on non-local memory version. */ { freeMem(hel->name); freeMem(hel); }
void showSchemaBigBed(char *table, struct trackDb *tdb) /* Show schema on bigBed. */ { /* Figure out bigBed file name and open it. Get contents for first chromosome as an example. */ struct sqlConnection *conn = NULL; if (!trackHubDatabase(database)) conn = hAllocConn(database); char *fileName = bigBedFileName(table, conn); struct bbiFile *bbi = bigBedFileOpen(fileName); struct bbiChromInfo *chromList = bbiChromList(bbi); struct lm *lm = lmInit(0); struct bigBedInterval *ivList = getNElements(bbi, chromList, lm, 10); /* Get description of columns, making it up from BED records if need be. */ struct asObject *as = bigBedAsOrDefault(bbi); hPrintf("<B>Database:</B> %s", database); hPrintf(" <B>Primary Table:</B> %s<br>", table); hPrintf("<B>Big Bed File:</B> %s", fileName); if (bbi->version >= 2) { hPrintf("<BR><B>Item Count:</B> "); printLongWithCommas(stdout, bigBedItemCount(bbi)); } hPrintf("<BR>\n"); hPrintf("<B>Format description:</B> %s<BR>", as->comment); /* Put up table that describes fields. */ hTableStart(); hPrintf("<TR><TH>field</TH>"); if (ivList != NULL) hPrintf("<TH>example</TH>"); hPrintf("<TH>description</TH> "); puts("</TR>\n"); struct asColumn *col; int colCount = 0; char *row[bbi->fieldCount]; char startBuf[16], endBuf[16]; if (ivList != NULL) { char *dupeRest = lmCloneString(lm, ivList->rest); /* Manage rest-stomping side-effect */ bigBedIntervalToRow(ivList, chromList->name, startBuf, endBuf, row, bbi->fieldCount); ivList->rest = dupeRest; } for (col = as->columnList; col != NULL; col = col->next) { hPrintf("<TR><TD><TT>%s</TT></TD>", col->name); if (ivList != NULL) hPrintf("<TD>%s</TD>", row[colCount]); hPrintf("<TD>%s</TD></TR>", col->comment); ++colCount; } /* If more fields than descriptions put up minimally helpful info (at least has example). */ for ( ; colCount < bbi->fieldCount; ++colCount) { hPrintf("<TR><TD><TT>column%d</TT></TD>", colCount+1); if (ivList != NULL) hPrintf("<TD>%s</TD>", row[colCount]); hPrintf("<TD>n/a</TD></TR>\n"); } hTableEnd(); if (ivList != NULL) { /* Put up another section with sample rows. */ webNewSection("Sample Rows"); hTableStart(); /* Print field names as column headers for example */ hPrintf("<TR>"); int colIx = 0; for (col = as->columnList; col != NULL; col = col->next) { hPrintf("<TH>%s</TH>", col->name); ++colIx; } for (; colIx < colCount; ++colIx) hPrintf("<TH>column%d</TH>", colIx+1); hPrintf("</TR>\n"); /* Print sample lines. */ struct bigBedInterval *iv; for (iv=ivList; iv != NULL; iv = iv->next) { bigBedIntervalToRow(iv, chromList->name, startBuf, endBuf, row, bbi->fieldCount); hPrintf("<TR>"); for (colIx=0; colIx<colCount; ++colIx) { writeHtmlCell(row[colIx]); } hPrintf("</TR>\n"); } hTableEnd(); } printTrackHtml(tdb); /* Clean up and go home. */ lmCleanup(&lm); bbiFileClose(&bbi); freeMem(fileName); hFreeConn(&conn); }
static int wigOutRegion(char *table, struct sqlConnection *conn, struct region *region, int maxOut, enum wigOutputType wigOutType, struct wigAsciiData **data, int spanConstraint) /* Write out wig data in region. Write up to maxOut elements. * Returns number of elements written. */ { int linesOut = 0; char splitTableOrFileName[HDB_MAX_TABLE_STRING]; struct customTrack *ct = NULL; boolean isCustom = FALSE; boolean hasConstraint = FALSE; struct wiggleDataStream *wds = NULL; unsigned long long valuesMatched = 0; int operations = wigFetchAscii; char *dataConstraint; double ll = 0.0; double ul = 0.0; char *table2 = NULL; struct bed *intersectBedList = NULL; switch (wigOutType) { case wigOutBed: operations = wigFetchBed; break; default: case wigDataNoPrint: case wigOutData: operations = wigFetchAscii; break; }; WIG_INIT; /* ct, isCustom, hasConstraint, wds and table2 are set here */ if (hasConstraint) freeMem(dataConstraint); /* been cloned into wds */ wds->setMaxOutput(wds, maxOut); wds->setChromConstraint(wds, region->chrom); wds->setPositionConstraint(wds, region->start, region->end); if (table2) intersectBedList = bedTable2(conn, region, table2); if (isCustom) { if (ct->dbTrack) { if (spanConstraint) wds->setSpanConstraint(wds,spanConstraint); else { struct sqlConnection *trashConn = hAllocConn(CUSTOM_TRASH); struct trackDb *tdb = findTdbForTable(database, curTrack, table, ctLookupName); unsigned span = minSpan(trashConn, splitTableOrFileName, region->chrom, region->start, region->end, cart, tdb); wds->setSpanConstraint(wds, span); hFreeConn(&trashConn); } valuesMatched = getWigglePossibleIntersection(wds, region, CUSTOM_TRASH, table2, &intersectBedList, splitTableOrFileName, operations); } else valuesMatched = getWigglePossibleIntersection(wds, region, NULL, table2, &intersectBedList, splitTableOrFileName, operations); } else { if (hFindSplitTable(database, region->chrom, table, splitTableOrFileName, sizeof splitTableOrFileName, NULL)) { /* XXX TBD, watch for a span limit coming in as an SQL filter */ if (intersectBedList) { struct trackDb *tdb = findTdbForTable(database, curTrack, table, ctLookupName); unsigned span; span = minSpan(conn, splitTableOrFileName, region->chrom, region->start, region->end, cart, tdb); wds->setSpanConstraint(wds, span); } else if (spanConstraint) wds->setSpanConstraint(wds,spanConstraint); valuesMatched = getWigglePossibleIntersection(wds, region, database, table2, &intersectBedList, splitTableOrFileName, operations); } } switch (wigOutType) { case wigDataNoPrint: if (data) { if (*data != NULL) /* no exercise of this function yet */ { /* data not null, add to existing list */ struct wigAsciiData *asciiData; struct wigAsciiData *next; for (asciiData = *data; asciiData; asciiData = next) { next = asciiData->next; slAddHead(&wds->ascii, asciiData); } } wds->sortResults(wds); *data = wds->ascii; /* moving the list to *data */ wds->ascii = NULL; /* gone as far as wds is concerned */ } linesOut = valuesMatched; break; case wigOutBed: linesOut = wds->bedOut(wds, "stdout", TRUE);/* TRUE == sort output */ break; default: case wigOutData: linesOut = wds->asciiOut(wds, database, "stdout", TRUE, FALSE); break; /* TRUE == sort output, FALSE == not raw data out */ }; wiggleDataStreamFree(&wds); return linesOut; } /* static int wigOutRegion() */