void lineFileExpandBuf(struct lineFile *lf, int newSize) /* Expand line file buffer. */ { assert(newSize > lf->bufSize); lf->buf = needMoreMem(lf->buf, lf->bytesInBuf, newSize); lf->bufSize = newSize; }
static unsigned expandHeader(struct gbFa *fa) /* expand or initially allocate header memory, return new capacity */ { unsigned newSize = 2*fa->headerCap; fa->headerBuf = needMoreMem(fa->headerBuf, fa->headerCap, newSize); fa->headerCap = newSize; return fa->headerCap; }
static unsigned expandSeq(struct gbFa *fa) /* expand or initially allocate seq memory, return new capacity */ { unsigned newSize = 2*fa->seqCap; fa->seqBuf = needMoreMem(fa->seqBuf, fa->seqCap, newSize); fa->seqCap = newSize; return fa->seqCap; }
void chainStitchId(char *inChain, char *outChain) /* chainStitchId - Join chain fragments with the same chain ID into a single chain per ID. */ { struct lineFile *lf = lineFileOpen(inChain, TRUE); struct chain *chain = NULL, *chainList = NULL; FILE *f = mustOpen(outChain, "w"); int idArrLen = 64 * 1024 * 1024; struct chain **idArr = needLargeZeroedMem(idArrLen * sizeof(struct chain *)); int i=0; /* Build up an array of chains, indexed by IDs. Agglomerate chains with same * ID as we go. */ while ((chain = chainRead(lf)) != NULL) { while (chain->id >= idArrLen) { idArr = needMoreMem(idArr, idArrLen, idArrLen*2*sizeof(idArr[0])); idArrLen *= 2; } if (idArr[chain->id] == NULL) idArr[chain->id] = chain; else { tackOnFrag(idArr[chain->id], chain); chainFree(&chain); } } lineFileClose(&lf); /* Clean up each agglomerated chain and add to head of list (but step * backwards so the resulting list is in order by chain id). */ for (i = idArrLen-1; i >= 0; i--) { chain = idArr[i]; if (chain != NULL) { slSort(&(chain->blockList), cBlockCmpTarget); slAddHead(&chainList, chain); } } /* Ordering by original chain id gets us most of the way to sorting by * score, but not all the way: sort and finally write out the chains. */ slSort(&chainList, chainCmpScore); for (chain = chainList; chain != NULL; chain = chain->next) { chainWrite(chain, f); /* could free here, but program is about to end so why waste the time. */ } carefulClose(&f); }
static struct phyloTree *newEdge(struct phyloTree *parent, struct phyloTree *child) { parent->numEdges++; if (parent->numEdges > parent->allocedEdges) { int oldSize = parent->allocedEdges * sizeof (struct phyloTree *); int newSize; parent->allocedEdges += 5; newSize = parent->allocedEdges * sizeof (struct phyloTree *); parent->edges = needMoreMem(parent->edges, oldSize, newSize); } child->parent = parent; return parent->edges[parent->numEdges -1 ] = child; }
static struct phyloTree *newEdge(struct phyloTree *parent, struct phyloTree *child) { parent->numEdges++; if (parent->numEdges > parent->allocedEdges) { int oldSize = parent->allocedEdges * sizeof (struct phyloTree *); int newSize; parent->allocedEdges += 5; newSize = parent->allocedEdges * sizeof (struct phyloTree *); parent->edges = needMoreMem(parent->edges, oldSize, newSize); } if (!child) errAbort("unexpected error: child is null in phyloTree.c::newEdge()"); child->parent = parent; return parent->edges[parent->numEdges -1 ] = child; }
static void pslArrayInsert(unsigned **destArray, int numDestBlocks, unsigned *srcArray, int numSrcBlocks, int insertIdx) /* Grow one of the psl arrays and move contents down * to make room, and copy new entries */ { *destArray = needMoreMem(*destArray, numDestBlocks*sizeof(unsigned), (numDestBlocks+numSrcBlocks)*sizeof(unsigned)); int iBlk; for (iBlk = numDestBlocks-1; iBlk >= insertIdx; iBlk--) { assert(iBlk+numSrcBlocks < numDestBlocks+numSrcBlocks); (*destArray)[iBlk+numSrcBlocks] = (*destArray)[iBlk]; } for (iBlk = 0; iBlk < numSrcBlocks; iBlk++) { assert(iBlk+insertIdx < numDestBlocks+numSrcBlocks); (*destArray)[iBlk+insertIdx] = srcArray[iBlk]; } }
void bgOutWrite(struct bgOut *out, char *chrom, int start, int end, double val) /* Store data to output stream buffering and maybe collapsing a tiny bit. */ { if (clNoCollapse || !out->gotData || start != out->end || val != out->val || !sameString(chrom, out->chrom)) { bgOutFlush(out); int len = strlen(chrom) + 1; if (len > out->chromAlloc) { out->chromAlloc = len; out->chrom = needMoreMem(out->chrom, 0, len); } strcpy(out->chrom, chrom); out->start = start; out->end = end; out->val = val; out->gotData = TRUE; } else { out->end = end; } }
struct sage *loadSageTags(char *fileName, int numExps) { struct sage *sgList=NULL, *sg=NULL; char *words[3]; struct lineFile *lf = lineFileOpen(fileName, TRUE); while(lineFileNextRow(lf, words,3)) { if(sg == NULL || sg->uni != atoi(words[0])) { if(sg != NULL) slSafeAddHead(&sgList,sg); sg = createNewSage(numExps); sg->uni = atoi(words[0]); snprintf(sg->gb, sizeof(sg->gb), "unknown"); snprintf(sg->gi, sizeof(sg->gb), "unknown"); sg->description = cloneString(words[1]); sg->numTags =1; assert(strlen(words[2]) <= 10); sg->tags = needMem(sizeof(char*) * 1); sg->tags[0] = needMem(sizeof(char) * 11); strcpy(sg->tags[0],words[2]); } else { sg->tags = needMoreMem(sg->tags, (sg->numTags*sizeof(char*)), ((sg->numTags+1)*sizeof(char*))); sg->tags[sg->numTags] = needMem(sizeof(char) * 11); strcpy(sg->tags[sg->numTags],words[2]); sg->numTags++; } } return(sgList); /*for(sg=sgList; sg != NULL; sg = sg->next) { sageTabOut(sg,stdout); }*/ }
static void dyStringExpandBuf(struct dyString *ds, int newSize) /* Expand buffer to new size. */ { ds->string = needMoreMem(ds->string, ds->stringSize+1, newSize+1); ds->bufSize = newSize; }
static void filterBed(struct track *tg, struct linkedFeatures **pLfList) /* Apply filters if any to mRNA linked features. */ { struct linkedFeatures *lf, *next, *newList = NULL, *oldList = NULL; struct mrnaUiData *mud = tg->extraUiData; struct mrnaFilter *fil; char *type; boolean anyFilter = FALSE; boolean colorIx = 0; boolean isExclude = FALSE; boolean andLogic = TRUE; if (*pLfList == NULL || mud == NULL) return; /* First make a quick pass through to see if we actually have * to do the filter. */ for (fil = mud->filterList; fil != NULL; fil = fil->next) { fil->pattern = cartUsualStringClosestToHome(cart, tg->tdb, FALSE, fil->suffix, ""); if (fil->pattern[0] != 0) anyFilter = TRUE; } if (!anyFilter) return; type = cartUsualStringClosestToHome(cart, tg->tdb, FALSE, mud->filterTypeSuffix, "red"); if (sameString(type, "exclude")) isExclude = TRUE; else if (sameString(type, "include")) isExclude = FALSE; else colorIx = getFilterColor(type, MG_BLACK); type = cartUsualStringClosestToHome(cart, tg->tdb, FALSE, mud->logicTypeSuffix, "and"); andLogic = sameString(type, "and"); /* Make a pass though each filter, and start setting up search for * those that have some text. */ for (fil = mud->filterList; fil != NULL; fil = fil->next) { if (fil->pattern[0] != 0) // Already retrieved above. fil->hash = newHash(10); } /* Scan tables id/name tables to build up hash of matching id's. */ for (fil = mud->filterList; fil != NULL; fil = fil->next) { struct hash *hash = fil->hash; int wordIx, wordCount; char *words[128]; if (hash != NULL) { boolean anyWild; char *dupPat = cloneString(fil->pattern); wordCount = chopLine(dupPat, words); for (wordIx=0; wordIx <wordCount; ++wordIx) { char *pattern = cloneString(words[wordIx]); if (lastChar(pattern) != '*') { int len = strlen(pattern)+1; pattern = needMoreMem(pattern, len, len+1); pattern[len-1] = '*'; } anyWild = (strchr(pattern, '*') != NULL || strchr(pattern, '?') != NULL); touppers(pattern); for(lf = *pLfList; lf != NULL; lf=lf->next) { char copy[SMALLBUF]; boolean gotMatch; safef(copy, sizeof(copy), "%s", lf->name); touppers(copy); if (anyWild) gotMatch = wildMatch(pattern, copy); else gotMatch = sameString(pattern, copy); if (gotMatch) { hashAdd(hash, lf->name, NULL); } } freez(&pattern); } freez(&dupPat); } } /* Scan through linked features coloring and or including/excluding ones that * match filter. */ for (lf = *pLfList; lf != NULL; lf = next) { boolean passed = andLogic; next = lf->next; for (fil = mud->filterList; fil != NULL; fil = fil->next) { if (fil->hash != NULL) { if (hashLookup(fil->hash, lf->name) == NULL) { if (andLogic) passed = FALSE; } else { if (!andLogic) passed = TRUE; } } } if (passed ^ isExclude) { slAddHead(&newList, lf); if (colorIx > 0) lf->filterColor = colorIx; } else { slAddHead(&oldList, lf); } } slReverse(&newList); slReverse(&oldList); if (colorIx > 0) { /* Draw stuff that passes filter first in full mode, last in dense. */ if (tg->visibility == tvDense) { newList = slCat(oldList, newList); } else { newList = slCat(newList, oldList); } } *pLfList = newList; tg->limitedVisSet = FALSE; /* Need to recalculate this after filtering. */ /* Free up hashes, etc. */ for (fil = mud->filterList; fil != NULL; fil = fil->next) { hashFree(&fil->hash); } }
boolean qaFastReadNext(struct lineFile *lf, UBYTE **retQ, int *retSize, char **retName) /* Read in next QA entry as fast as we can. Return FALSE at EOF. * The returned QA info and name will be overwritten by the next call * to this function. */ { char *line, *words[256], *s = NULL; static char name[256]; static int bufSize = 0; static UBYTE *buf; int bufIx = 0; int lineSize, wordCount; int i; initShortNum(); /* Read to first non-space line. Complain if it doesn't start with '>' */ while (lineFileNext(lf, &line, &lineSize)) { wordCount = chopLine(line, words); if (wordCount != 0) { s = words[0]; if (s[0] != '>') errAbort("Expecting '>' line %d of %s", lf->lineIx, lf->fileName); if (s[1] != 0) s += 1; else { if (wordCount == 1) errAbort("Expecting '>name' line %d of %s", lf->lineIx, lf->fileName); else s = words[1]; } break; } } if (s == NULL) return FALSE; strncpy(name, s, sizeof(name)); /* Read numbers until next '>' line or end of file. */ /* Read to first non-space line. Complain if it doesn't start with '>' */ while (lineFileNext(lf, &line, &lineSize)) { if (line[0] == '>') { lineFileReuse(lf); break; } wordCount = chopLine(line, words); for (i=0; i<wordCount; ++i) { if (bufIx >= bufSize) { int newSize = bufSize + bufSize; if (newSize == 0) newSize = 16*1024; buf = needMoreMem(buf, bufSize, newSize); bufSize = newSize; } buf[bufIx++] = readShortNum(words[i], lf); } } *retQ = buf; *retSize = bufIx; *retName = name; return TRUE; }
char *tokenizerNext(struct tokenizer *tkz) /* Return token's next string (also available as tkz->string) or * NULL at EOF. */ { char *start, *end; char c, *s; int size; if (tkz->reuse) { tkz->reuse = FALSE; return tkz->string; } for (;;) /* Skip over white space and comments. */ { int lineSize; s = start = skipLeadingSpaces(tkz->linePt); if ((c = start[0]) != 0) { if (tkz->uncommentC && c == '/') { if (start[1] == '/') ; /* Keep going in loop effectively ignoring rest of line. */ else if (start[1] == '*') { start += 2; for (;;) { char *end = stringIn("*/", start); if (end != NULL) { tkz->linePt = end+2; break; } if (!lineFileNext(tkz->lf, &tkz->curLine, &lineSize)) errAbort("End of file (%s) in comment", tokenizerFileName(tkz)); start = tkz->curLine; } continue; } else break; } else if (tkz->uncommentShell && c == '#') ; /* Keep going in loop effectively ignoring rest of line. */ else break; /* Got something real. */ } if (!lineFileNext(tkz->lf, &tkz->curLine, &lineSize)) { tkz->eof = TRUE; return NULL; } tkz->linePt = tkz->curLine; } if (isalnum(c) || (c == '_')) { for (;;) { s++; if (!(isalnum(*s) || (*s == '_'))) break; } end = s; } else if (c == '"' || c == '\'') { char quot = c; if (tkz->leaveQuotes) start = s++; else start = ++s; for (;;) { c = *s; if (c == quot) { if (s[-1] == '\\') { if (s >= start+2 && s[-2] == '\\') break; } else break; } else if (c == 0) { break; } ++s; } end = s; if (c != 0) ++s; if (tkz->leaveQuotes) end += 1; } else { end = ++s; } tkz->linePt = s; size = end - start; if (size >= tkz->sAlloc) { tkz->sAlloc = size+128; tkz->string = needMoreMem(tkz->string, 0, tkz->sAlloc); } memcpy(tkz->string, start, size); tkz->string[size] = 0; return tkz->string; }