struct slName *thoseInHash(struct hash *wordHash, struct slName *list, boolean inverse) /* Given a hash and a list, which in the list are also in the hash. */ { struct slName *newList = NULL; struct slName *cur; for (cur = list; cur != NULL; cur = cur->next) { if ((hashLookup(wordHash, cur->name) != NULL) && (!inverse)) slNameAddHead(&newList, cur->name); else if ((hashLookup(wordHash, cur->name) == NULL) && (inverse)) slNameAddHead(&newList, cur->name); } slReverse(&newList); return newList; }
struct slName *vgImageFileOrganisms(struct sqlConnection *conn, int imageFile) /* Return unique alphabetical list of all organisms associated with image file. */ { struct hash *uniqHash = hashNew(0); struct slInt *imageList, *image; struct slName *orgList=NULL; imageList = visiGeneImagesForFile(conn, imageFile); for (image = imageList; image != NULL; image = image->next) { char *organism = visiGeneOrganism(conn, image->val); char *org = cloneString(shortOrgName(organism)); freeMem(organism); if (hashLookup(uniqHash, org)) freeMem(org); else { hashAdd(uniqHash, org, NULL); slNameAddHead(&orgList, org); } } slFreeList(&imageList); hashFree(&uniqHash); slSort(&orgList, slNameCmp); return orgList; }
static struct slName *getListFromCgapSageLibs(struct sqlConnection *conn, char *column, boolean returnIds, boolean distinct) /* Return [unique] list of tissues sorted alphabetically. */ { struct slName *list = NULL; struct dyString *dy = dyStringNew(0); char **row; struct sqlResult *sr; sqlDyStringPrintf(dy, "select "); if (distinct) dyStringAppend(dy, "distinct "); sqlDyStringPrintf(dy, "%s", column); if (returnIds) dyStringAppend(dy, ",libId"); sqlDyStringPrintf(dy, " from cgapSageLib order by %s", column); sr = sqlGetResult(conn, dy->string); while ((row = sqlNextRow(sr)) != NULL) { char *word = (returnIds) ? row[1] : row[0]; slNameAddHead(&list, word); } slReverse(&list); sqlFreeResult(&sr); dyStringFree(&dy); return list; }
void doGzippedSomethingToBigBed(struct encode2Manifest *mi, char *sourcePath, char *assembly, char *destDir, char *destFileName, char *bedConverter, char *tempNameRoot, struct slName **pTargetList, FILE *f, FILE *manF) /* Convert something that has a bed-converter program to bigBed. */ { char bigBedName[PATH_LEN]; safef(bigBedName, sizeof(bigBedName), "%s%s%s", destDir, destFileName, ".bigBed"); char tempBigBed[PATH_LEN]; safef(tempBigBed, sizeof(tempBigBed), "%s.tmp", bigBedName); char *tempBed = veryTempName(tempDir, tempNameRoot, ".bed"); char *sortedTempBed = veryTempName(tempDir, tempNameRoot, ".sorted"); fprintf(f, "%s: %s\n", bigBedName, sourcePath); fprintf(f, "\t%s %s %s\n", bedConverter, sourcePath, tempBed); fprintf(f, "\tsort -k1,1 -k2,2n %s > %s\n", tempBed, sortedTempBed); fprintf(f, "\trm %s\n", tempBed); char *clippedTempBed = veryTempName(tempDir, tempNameRoot, ".clipped"); fprintf(f, "\tbedClip %s %s/%s/chrom.sizes %s\n", sortedTempBed, dataDir, assembly, clippedTempBed); fprintf(f, "\trm %s\n", sortedTempBed); fprintf(f, "\tbedToBigBed %s %s/%s/chrom.sizes %s\n", clippedTempBed, dataDir, assembly, tempBigBed); fprintf(f, "\trm %s\n", clippedTempBed); fprintf(f, "\tmv %s %s\n", tempBigBed, bigBedName); slNameAddHead(pTargetList, bigBedName); /* Print out info about bigBed we made to new manifest files. */ char localFileName[PATH_LEN+8]; // a little extra for .bigBed safef(localFileName, PATH_LEN, "%s", mi->fileName); chopSuffix(localFileName); strcat(localFileName, ".bigBed"); mi->fileName = localFileName; encode2ManifestShortTabOut(mi, manF); }
struct slName *randomBamIds(char *table, struct sqlConnection *conn, int count) /* Return some semi-random qName based IDs from a BAM file. */ { /* Read 10000 items from bam file, or if they ask for a big list, then 4x what they ask for. */ char *fileName = bamFileName(table, conn, NULL); samfile_t *fh = bamOpen(fileName, NULL); struct lm *lm = lmInit(0); int orderedCount = count * 4; if (orderedCount < 10000) orderedCount = 10000; struct samAlignment *sam, *samList = bamReadNextSamAlignments(fh, orderedCount, lm); /* Shuffle list and extract qNames from first count of them. */ shuffleList(&samList); struct slName *randomIdList = NULL; int i; for (i=0, sam = samList; i<count && sam != NULL; ++i, sam = sam->next) slNameAddHead(&randomIdList, sam->qName); /* Clean up and go home. */ lmCleanup(&lm); bamClose(&fh); freez(&fileName); return randomIdList; }
struct slName *customPpCloneSkippedLines(struct customPp *cpp) /* Return a clone of most recent nonempty skipped (comment/header) lines from cpp, * which will still have them. slFreeList when done. */ { struct slName *skippedLines = NULL, *sl; for (sl = cpp->skippedLines; sl != NULL; sl = sl->next) slNameAddHead(&skippedLines, sl->name); return skippedLines; }
void doGzippedGffToBigBed(struct encode2Manifest *mi, char *sourcePath, char *destPath, char *assembly, char *destDir, char *destFileName, struct slName **pTargetList, FILE *f, FILE *manF) /* Do both copy and conversion to bigBed. Also do some doctoring. */ { /* First handle the straight up copy. */ fprintf(f, "%s: %s\n", destPath, sourcePath); fprintf(f, "\tln -s %s %s\n", sourcePath, destPath); slNameAddHead(pTargetList, destPath); encode2ManifestShortTabOut(mi, manF); /* Now convert to big bed. */ char *tempNameRoot = "gff2bb"; char bigBedName[PATH_LEN]; safef(bigBedName, sizeof(bigBedName), "%s%s%s", destDir, destFileName, ".bigBed"); char tempBigBed[PATH_LEN]; safef(tempBigBed, sizeof(tempBigBed), "%s.tmp", bigBedName); char *fixedGff = veryTempName(tempDir, tempNameRoot, ".gff"); char *tempBed = veryTempName(tempDir, tempNameRoot, ".bed"); char *sortedTempBed = veryTempName(tempDir, tempNameRoot, ".sorted"); char *clippedTempBed = veryTempName(tempDir, tempNameRoot, ".clipped"); fprintf(f, "%s: %s\n", bigBedName, sourcePath); fprintf(f, "\tencode2GffDoctor %s %s\n", sourcePath, fixedGff); fprintf(f, "\tgffToBed %s %s\n", fixedGff, tempBed); fprintf(f, "\trm %s\n", fixedGff); fprintf(f, "\tsort -k1,1 -k2,2n %s > %s\n", tempBed, sortedTempBed); fprintf(f, "\trm %s\n", tempBed); fprintf(f, "\tbedClip %s %s/%s/chrom.sizes %s\n", sortedTempBed, dataDir, assembly, clippedTempBed); fprintf(f, "\trm %s\n", sortedTempBed); fprintf(f, "\tbedToBigBed %s %s/%s/chrom.sizes %s\n", clippedTempBed, dataDir, assembly, tempBigBed); fprintf(f, "\trm %s\n", clippedTempBed); fprintf(f, "\tmv %s %s\n", tempBigBed, bigBedName); slNameAddHead(pTargetList, bigBedName); /* Print out info about bigBed we made to new manifest files. */ char localFileName[PATH_LEN+8]; // a little extra for .bigBed safef(localFileName, PATH_LEN, "%s", mi->fileName); chopSuffix(localFileName); strcat(localFileName, ".bigBed"); mi->fileName = localFileName; mi->format = "bigBed"; encode2ManifestShortTabOut(mi, manF); }
static struct slName* getExonFrames(char *table, struct sqlConnection *conn, struct bed *bedList) /* get real exonFrames if they are available */ { struct slName* list = NULL; struct bed *bed; for (bed = bedList; bed != NULL; bed = bed->next) { // be super specific, the same name may align to multiple locations // or even the same location with alternate splicing or exon structure. // convert bed block coordinates to exonStarts, exonEnds int i; struct dyString *exonStarts = newDyString(256); struct dyString *exonEnds = newDyString(256); for( i = 0 ; i < bed->blockCount; i++ ) { int exonStart = bed->chromStart + bed->chromStarts[i]; int exonEnd = exonStart + bed->blockSizes[i]; dyStringPrintf(exonStarts, "%d,", exonStart); dyStringPrintf(exonEnds, "%d,", exonEnd); } char sql[4096+strlen(exonStarts->string)+strlen(exonEnds->string)]; sqlSafef(sql, sizeof sql, "select exonFrames " "from %s where " "name = '%s' and " "chrom = '%s' and " "strand = '%c' and " "txStart = %d and " "txEnd = %d and " "cdsStart = %d and " "cdsEnd = %d and " "exonCount = %d and " "exonStarts = '%s' and " "exonEnds = '%s'" , table, bed->name, bed->chrom, bed->strand[0], bed->chromStart, bed->chromEnd, bed->thickStart, bed->thickEnd, bed->blockCount, exonStarts->string, exonEnds->string ); char *exonFrames = sqlQuickString(conn, sql); slNameAddHead(&list, exonFrames); dyStringFree(&exonStarts); dyStringFree(&exonEnds); } slReverse(&list); return list; }
struct slName *bigBedListExtraIndexes(struct bbiFile *bbi) /* Return list of names of extra indexes beyond primary chrom:start-end one" */ { struct udcFile *udc = bbi->udc; boolean isSwapped = bbi->isSwapped; /* See if we have any extra indexes, and if so seek to there. */ bits64 offset = bbi->extraIndexListOffset; if (offset == 0) return NULL; udcSeek(udc, offset); /* Construct list of field that are being indexed. List is list of * field numbers within asObj. */ int i; struct slInt *intList = NULL, *intEl; for (i=0; i<bbi->extraIndexCount; ++i) { bits16 type,fieldCount; type = udcReadBits16(udc, isSwapped); fieldCount = udcReadBits16(udc, isSwapped); udcSeekCur(udc, sizeof(bits64)); // skip over fileOffset udcSeekCur(udc, 4); // skip over reserved bits if (fieldCount == 1) { bits16 fieldId = udcReadBits16(udc, isSwapped); udcSeekCur(udc, 2); // skip over reserved bits intEl = slIntNew(fieldId); slAddHead(&intList, intEl); } else { warn("Not yet understanding indexes on multiple fields at once."); internalErr(); } } /* Now have to make an asObject to find out name that corresponds to this field. */ struct asObject *as = bigBedAsOrDefault(bbi); /* Make list of field names out of list of field numbers */ struct slName *nameList = NULL; for (intEl = intList; intEl != NULL; intEl = intEl->next) { struct asColumn *col = slElementFromIx(as->columnList, intEl->val); if (col == NULL) { warn("Inconsistent bigBed file %s", bbi->fileName); internalErr(); } slNameAddHead(&nameList, col->name); } asObjectFree(&as); return nameList; }
struct slName *hashToList(struct hash *wordHash) /* convert the hash back to a list. */ { struct hashEl *elList = hashElListHash(wordHash); struct slName *list = NULL; struct hashEl *cur; for (cur = elList; cur != NULL; cur = cur->next) slNameAddHead(&list, cur->name); slNameSort(&list); hashElFreeList(&elList); return list; }
static void rPathsInDirAndSubdirs(char *dir, char *wildcard, struct slName **pList) /* Recursively add directory contents that match wildcard (* for all) to list */ { struct fileInfo *fi, *fiList = listDirX(dir, wildcard, TRUE); for (fi = fiList; fi != NULL; fi = fi->next) { if (fi->isDir) rPathsInDirAndSubdirs(fi->name, wildcard, pList); else slNameAddHead(pList, fi->name); } slFreeList(&fiList); }
struct slName *subtractLists(struct slName *listA, struct slName *listB) /* Subtract two lists. */ { struct hash *theHash = hashList(listB); struct slName *diffList = NULL, *cur; for (cur = listA; cur != NULL; cur = cur->next) { if (hashLookup(theHash, cur->name) == NULL) slNameAddHead(&diffList, cur->name); } freeHashAndVals(&theHash); slReverse(&diffList); return diffList; }
void shuffleLines(char *in, char *out) /* shuffleLines - Create a version of file with lines shuffled.. */ { struct lineFile *lf = lineFileOpen(in, TRUE); FILE *f = mustOpen(out, "w"); struct slName *list = NULL, *el; char *line; while (lineFileNext(lf, &line, NULL)) slNameAddHead(&list, line); shuffleList(&list); for (el = list; el != NULL; el = el->next) fprintf(f, "%s\n", el->name); carefulClose(&f); }
int main(int argc, char *argv[]) /* Process command line. */ { struct slName *fileList = NULL; int i; optionInit(&argc, argv, options); if (argc < 4) usage(); /* make a list out of the filenames. */ for (i = 2; i < argc; i++) slNameAddHead(&fileList, argv[i]); slReverse(&fileList); sets(argv[1], fileList); slNameFreeList(&fileList); return 0; }
static void rListFields(struct tagStanza *stanzaList, struct hash *uniq, struct slName **retList) /* Recurse through stanzas adding tags we've never seen before to uniq hash and *retList */ { struct tagStanza *stanza; for (stanza = stanzaList; stanza != NULL; stanza = stanza->next) { struct slPair *pair; for (pair = stanza->tagList; pair != NULL; pair = pair->next) { if (!hashLookup(uniq, pair->name)) { hashAdd(uniq, pair->name, NULL); slNameAddHead(retList, pair->name); } } rListFields(stanza->children, uniq, retList); } }
static void rGetAllFields(struct tagStanza *list, struct hash *uniqHash, struct slName **pList) /* Recursively add all fields in tag-storm */ { struct tagStanza *stanza; for (stanza = list; stanza != NULL; stanza = stanza->next) { struct slPair *pair; for (pair = stanza->tagList; pair != NULL; pair = pair->next) { if (hashLookup(uniqHash, pair->name) == NULL) { hashAdd(uniqHash, pair->name, pair->val); slNameAddHead(pList, pair->name); } rGetAllFields(stanza->children, uniqHash, pList); } } }
char *customPpNextReal(struct customPp *cpp) /* Return next line that's nonempty, non-space and not a comment. * Save skipped comment lines to cpp->skippedLines. */ { slFreeList(&cpp->skippedLines); for (;;) { char *line = customPpNext(cpp); if (line == NULL) return line; char *s = skipLeadingSpaces(line); char c = *s; if (c != 0 && c != '#') return line; else if (c == '#') slNameAddHead(&cpp->skippedLines, line); } }
void tagStormWriteAsFlatTab(struct tagStorm *tagStorm, char *fileName, char *idTag, boolean withParent, int maxDepth) /* Write tag storm flattening out hierarchy so kids have all of parents tags in .ra format */ { FILE *f = mustOpen(fileName, "w"); struct slName *fieldList = getAllFields(tagStorm), *field; if (withParent && slNameFind(fieldList, "parent") == NULL) slNameAddHead(&fieldList, "parent"); fputc('#', f); for (field = fieldList; field != NULL; field = field->next) { if (field != fieldList) fputc('\t', f); fprintf(f, "%s", field->name); } fputc('\n', f); rTsWriteAsFlatTab(tagStorm->forest, fieldList, f, idTag, withParent, maxDepth, 0); carefulClose(&f); }
static struct slName *hgFindSpecNameList(char *db) /* Return the hgFindSpec table name(s) to use (based on trackDb name). */ { struct slName *trackDbList = hTrackDbList(); struct slName *specNameList = NULL; struct slName *tdbName; for (tdbName = trackDbList; tdbName != NULL; tdbName = tdbName->next) { char *subbed = replaceChars(tdbName->name, "trackDb", "hgFindSpec"); if (hTableExists(db, subbed)) slNameAddHead(&specNameList, subbed); freez(&subbed); } if (!specNameList) specNameList = slNameNew("hgFindSpec"); else slReverse(&specNameList); return specNameList; }
static struct slName *wrapUrlList(struct slName *labelList, struct slName *idList, char *url) /* Return list that substitutes id into url and wraps * url as a hyperlink around label. */ { struct slName *list = NULL, *label, *id; struct dyString *dy = dyStringNew(0); for (id = idList, label = labelList; id != NULL && label != NULL; id = id->next, label = label->next) { dyStringClear(dy); dyStringAppend(dy, "<A HREF=\""); dyStringPrintf(dy, url, id->name); dyStringAppend(dy, "\" target=_blank>"); dyStringAppend(dy, label->name); dyStringAppend(dy, "</A>"); slNameAddHead(&list, dy->string); } slReverse(&list); return list; }
struct slName *tagsInAny(struct meta *metaList) /* Return list of variables that are used in any node in list. */ { struct hash *tagHash = hashNew(6); struct slName *tag, *tagList = NULL; struct meta *meta; for (meta = metaList; meta != NULL; meta = meta->next) { struct metaTagVal *v; for (v = meta->tagList; v != NULL; v = v->next) { if (!hashLookup(tagHash, v->tag)) { tag = slNameAddHead(&tagList, v->tag); hashAdd(tagHash, tag->name, tag); } } } hashFree(&tagHash); return tagList; }
struct slName *varsInAnyNode(struct metaNode *nodeList) /* Return list of variables that are used in any node in list. */ { struct hash *varHash = hashNew(6); struct slName *var, *varList = NULL; struct metaNode *node; for (node = nodeList; node != NULL; node = node->next) { struct mdbVar *v; for (v = node->vars; v != NULL; v = v->next) { if (!hashLookup(varHash, v->var)) { var = slNameAddHead(&varList, v->var); hashAdd(varHash, var->name, var); } } } hashFree(&varHash); return varList; }
void dropDownAdvFilterControls(struct column *col, struct sqlConnection *conn) /* Print out controls for dropdown list filter. */ { struct sqlResult *sr; char **row; char query[256]; struct slName *list=NULL, *el; sqlSafef(query, sizeof(query), "select distinct %s from gisaidSubjInfo", col->name); sr = sqlGetResult(conn, query); while ((row = sqlNextRow(sr)) != NULL) { char *val = row[0]; if (col->remap) val = hashFindVal(col->remap,val); slNameAddHead(&list, val); } sqlFreeResult(&sr); slNameSort(&list); hPrintf("choose: "); hPrintf("<SELECT NAME=\"%s\"", "countVarName"); hPrintf(">\n"); hPrintf("<OPTION VALUE=\"\">"); for (el = list; el; el = el->next) { hPrintf("<OPTION VALUE=\"%s\"", el->name); if (sameString(el->name, "displayCountString")) hPrintf(" SELECTED"); hPrintf(">%s\n", el->name); } hPrintf("</SELECT>\n"); slFreeList(&list); }
struct slName *valsForVar(char *varName, struct taggedFile *tfList) /* Return all values for given variable. */ { struct slName *list = NULL; struct hash *uniqHash = hashNew(7); struct taggedFile *tf; for (tf = tfList; tf != NULL; tf = tf->next) { char *val = metaTagValFindVal(tf->tagList, varName); if (val != NULL) { if (hashLookup(uniqHash, val) == NULL) { hashAdd(uniqHash, val, NULL); slNameAddHead(&list, val); } } } hashFree(&uniqHash); slNameSort(&list); return list; }
void writeTrackDbTxt(struct sqlConnection *conn, struct eapGraph *eg, struct fullExperiment *expList, char *assembly, char *outRa) /* Write out a trackDb that represents the replicated hotspot experiments we have on the * given assembly */ { uglyf("%d in expList\n", slCount(expList)); /* Make up a list and hash of unique biosamples. */ struct hash *bioHash = hashNew(0); struct slName *bioList=NULL; struct fullExperiment *exp; for (exp = expList; exp != NULL; exp = exp->next) { char *name = exp->exp->biosample; if (!hashLookup(bioHash, name)) { slNameAddHead(&bioList, name); hashAdd(bioHash, name, NULL); } } slSort(&bioList, slNameCmp); writeTrackDb(conn, eg, expList, bioList, outRa); }
char *wordTreeString(struct wordTree *wt) /* Return something like '(a b c)' where c would be the value at wt itself, and * a and b would be gotten by following parents. */ { struct slName *list = NULL, *el; for (;wt != NULL; wt = wt->parent) { char *word = wt->info->word; if (!isEmpty(word)) // Avoid blank great grandparent slNameAddHead(&list, word); } struct dyString *dy = dyStringNew(0); dyStringAppendC(dy, '('); for (el = list; el != NULL; el = el->next) { dyStringPrintf(dy, "%s", el->name); if (el->next != NULL) dyStringAppendC(dy, ' '); } dyStringAppendC(dy, ')'); slFreeList(&list); return dyStringCannibalize(&dy); }
void showListOfFilterValues(struct column *col, struct sqlConnection *conn) /* Print out list of values availabe for filter. */ { struct sqlResult *sr; char **row; char query[256]; struct slName *list=NULL, *el; sqlSafef(query, sizeof(query), "select distinct %s from gisaidSubjInfo", col->name); sr = sqlGetResult(conn, query); while ((row = sqlNextRow(sr)) != NULL) { char *val = row[0]; if (col->remap) val = hashFindVal(col->remap,val); slNameAddHead(&list, val); } sqlFreeResult(&sr); slNameSort(&list); hPrintf("<BR>\n"); hPrintf("<B>Available Values:</B><BR>\n"); hPrintf("<TABLE>\n"); for (el = list; el; el = el->next) { hPrintf("<TR><TD>%s</TD></TR>\n", el->name); } hPrintf("</TABLE>\n"); slFreeList(&list); }
void cdwCheckDataset(char *outputFile) /* cdwCheckDataset - Look at all the submissions for a given dataset and print helpful stats.. */ { FILE *f = mustOpen(outputFile,"w"); struct sqlConnection *conn = sqlConnect("cdw"); char query[1024]; struct cdwSubmitDir *submitDir; // Get the id associated with the submission directory. if (gWorkingDir == NULL) { char cwd[1024]; getcwd(cwd, sizeof(cwd)); sqlSafef(query, sizeof(query), "select * from cdwSubmitDir where url = '%s';", cwd); submitDir = cdwSubmitDirLoadByQuery(conn, query); } else { sqlSafef(query, sizeof(query), "select * from cdwSubmitDir where url = '%s';", gWorkingDir); submitDir = cdwSubmitDirLoadByQuery(conn, query); } if (!submitDir) uglyAbort("There are no submissions associated with the submission directory provided."); // Get all cdwSubmit entries associated with the submission directory. sqlSafef(query, sizeof(query), "select * from cdwSubmit where submitDirId = '%i';", submitDir->id); struct cdwSubmit *submitList = cdwSubmitLoadByQuery(conn, query); struct cdwSubmit *submission; // Keep track of the # of submissions, meta files and mani files. int count = 0; long long int totalFiles = 0, totalBytes = 0; // Count the total files and bytes. struct slName *submitters = NULL, *user; // Keep track of the submitters. // Loop over all cdwSubmit entries and gather/print out stats. for (submission = submitList; ; submission = submission->next) { ++count; sqlSafef(query,sizeof(query), "select * from cdwUser where id = '%i';", (int)submission->userId); struct cdwUser *user = cdwUserLoadByQuery(conn,query); if(!slNameInList(submitters, user->email)) // If a submitter has not been seen add it. slNameAddHead(&submitters, user->email); // Print out submission stats. fprintf(f, "Sub #: %i | Sub id: %i | New files: %i | New bytes: %lld | Mani id: %i |", count, submission->id, submission->newFiles, submission->newBytes, submission->manifestFileId); fprintf(f, " Meta id: %i | Files with new meta data values: %i | User email: %s | Wrangler: %s\n", submission->metaFileId, submission->metaChangeCount, user->email, submission->wrangler); totalFiles += submission->newFiles; totalBytes += submission->newBytes; if (submission->next == NULL) break; } sqlSafef(query, sizeof(query), "select distinct metaFileId from cdwSubmit where submitDirId=%i", submitDir->id); fprintf(f,"Total submissions:\t%i\n", count); fprintf(f,"Total files:\t%lld\n",totalFiles); // Math magic for pretty printing the file size. long long int tBytes, gBytes, mBytes, kBytes, bytes; tBytes = totalBytes/1000000000000; gBytes = (totalBytes - tBytes * 1000000000000)/1000000000; mBytes = (totalBytes - tBytes * 1000000000000 - gBytes * 1000000000)/1000000; kBytes = (totalBytes - tBytes * 1000000000000 - gBytes * 1000000000 - mBytes * 1000000)/1000; bytes = totalBytes - tBytes * 1000000000000 - gBytes * 1000000000 - mBytes * 1000000 - kBytes * 1000; fprintf(f,"Total bytes:\t%lld T %lld G %lld M %lld K %lld B\n", tBytes, gBytes, mBytes, kBytes, bytes); fprintf(f,"Submitters: "); // Print out the submitters, ideally it will only be one person but in practice... Not so much. for (user = submitters; ; user = user->next) { fprintf(f,"%s", user->name); if (user->next == NULL) break; fprintf(f,", "); } fprintf(f,"\n"); sqlDisconnect(&conn); }
int hubSettingsCheckInit(struct trackHub *hub, struct trackHubCheckOptions *options, struct dyString *errors) { int retVal = 0; if (hub->version != NULL && options->version == NULL) options->version = hub->version; struct trackHubSettingSpec *hubLevel = NULL; int level = 0; if (hub->version != NULL) { AllocVar(hubLevel); if ((level = trackHubSettingLevel(hubLevel)) < 0) { dyStringPrintf(errors, "Unknown hub support level: %s. Defaulting to 'all'.\n", hub->level); retVal = 1; } else options->level = hub->level; } verbose(2, "Checking hub '%s'", hub->longLabel); if (options->level) verbose(2, " for compliance to level '%s' (use -settings to view)", options->level); verbose(2, "\n"); struct errCatch *errCatch = errCatchNew(); if (errCatchStart(errCatch)) { /* make hash of settings for this version, saving in options */ struct trackHubSettingSpec *setting, *settings = trackHubSettingsForVersion(options->specHost, options->version); options->settings = newHash(0); options->suggest = NULL; for (setting = settings; setting != NULL; setting = setting->next) { hashAdd(options->settings, setting->name, setting); slNameAddHead(&options->suggest, setting->name); } /* TODO: ? also need to check settings not in this list (other tdb fields) */ // TODO: move extra file handling here (out of hubCheck) if (options->extra != NULL) { struct hashCookie cookie = hashFirst(options->extra); struct hashEl *hel; while ((hel = hashNext(&cookie)) != NULL) slNameAddHead(&options->suggest, hel->name); } slNameSort(&options->suggest); verbose(3, "Suggest list has %d settings\n", slCount(options->suggest)); } errCatchEnd(errCatch); if (errCatch->gotError) { retVal = 1; dyStringPrintf(errors, "%s", errCatch->message->string); } errCatchFree(&errCatch); return retVal; }
void verifyGreatAssemblies() { // First read in the assembly name and description information into name lists struct slName* supportedAssemblies = NULL; struct lineFile *lf = lineFileOpen(greatData, TRUE); int fieldCount = 1; char* row[fieldCount]; int wordCount; while ((wordCount = lineFileChopTab(lf, row)) != 0) { if (wordCount != fieldCount) errAbort("The %s file is not properly formatted.\n", greatData); slNameAddHead(&supportedAssemblies, row[0]); } lineFileClose(&lf); boolean invalidAssembly = TRUE; struct slName* currAssembly; for (currAssembly = supportedAssemblies; currAssembly != NULL; currAssembly = currAssembly->next) { if (!hDbIsActive(currAssembly->name)) { errAbort("Assembly %s in supported assembly file is not an active assembly.\n", currAssembly->name); } if (sameOk(database, currAssembly->name)) { invalidAssembly = FALSE; break; } } if (invalidAssembly) { slReverse(&supportedAssemblies); currAssembly = supportedAssemblies; struct dyString* dy = dyStringNew(0); addAssemblyToSupportedList(dy, currAssembly->name); currAssembly = currAssembly->next; while (currAssembly != NULL) { dyStringAppend(dy, ", "); if (currAssembly->next == NULL) dyStringAppend(dy, "and "); addAssemblyToSupportedList(dy, currAssembly->name); currAssembly = currAssembly->next; } hPrintf("<script type='text/javascript'>\n"); hPrintf("function logSpecies() {\n"); hPrintf("try {\n"); hPrintf("var r = new XMLHttpRequest();\n"); hPrintf("r.open('GET', 'http://great.stanford.edu/public/cgi-bin/logSpecies.php?species=%s');\n", database); hPrintf("r.send(null);\n"); hPrintf("} catch (err) { }\n"); hPrintf("}\n"); hPrintf("window.onload = logSpecies;\n"); hPrintf("</script>\n"); errAbort("GREAT only supports the %s assemblies." "\nPlease go back and ensure that one of those assemblies is chosen.", dyStringContents(dy)); htmlClose(); dyStringFree(&dy); } slNameFreeList(&supportedAssemblies); }