static struct trackHub *fetchHub(struct hubConnectStatus *hubStatus, char **errorMessage) { struct errCatch *errCatch = errCatchNew(); struct trackHub *tHub = NULL; boolean gotWarning = FALSE; char *url = hubStatus->hubUrl; char hubName[64]; safef(hubName, sizeof(hubName), "hub_%d", hubStatus->id); if (errCatchStart(errCatch)) tHub = trackHubOpen(url, cloneString(hubName)); // open hub errCatchEnd(errCatch); if (errCatch->gotError) { gotWarning = TRUE; *errorMessage = cloneString(errCatch->message->string); } errCatchFree(&errCatch); if (gotWarning) { return NULL; } return tHub; }
static void getSubmittedFile(struct sqlConnection *conn, struct edwFile *bf, char *submitDir, char *submitUrl, int submitId) /* We know the submission, we know what the file is supposed to look like. Fetch it. * If things go badly catch the error, attach it to the submission record, and then * keep throwing. */ { struct errCatch *errCatch = errCatchNew(); if (errCatchStart(errCatch)) { if (freeSpaceOnFileSystem(edwRootDir) < 2*bf->size) errAbort("No space left in warehouse!!"); int hostId=0, submitDirId = 0; int fd = edwOpenAndRecordInDir(conn, submitDir, bf->submitFileName, submitUrl, &hostId, &submitDirId); int fileId = edwFileFetch(conn, bf, fd, submitUrl, submitId, submitDirId, hostId); close(fd); edwAddQaJob(conn, fileId); tellSubscribers(conn, submitDir, bf->submitFileName, fileId); } errCatchEnd(errCatch); if (errCatch->gotError) { handleSubmitError(conn, submitId, errCatch->message->string); /* The handleSubmitError will keep on throwing. */ } errCatchFree(&errCatch); }
void doVcfDetailsCore(struct trackDb *tdb, char *fileOrUrl, boolean isTabix) /* Show item details using fileOrUrl. */ { genericHeader(tdb, NULL); int start = cartInt(cart, "o"); int end = cartInt(cart, "t"); int vcfMaxErr = -1; struct vcfFile *vcff = NULL; /* protect against temporary network or parsing error */ struct errCatch *errCatch = errCatchNew(); if (errCatchStart(errCatch)) { if (isTabix) vcff = vcfTabixFileMayOpen(fileOrUrl, seqName, start, end, vcfMaxErr, -1); else vcff = vcfFileMayOpen(fileOrUrl, seqName, start, end, vcfMaxErr, -1, TRUE); } errCatchEnd(errCatch); if (errCatch->gotError) { if (isNotEmpty(errCatch->message->string)) warn("%s", errCatch->message->string); } errCatchFree(&errCatch); if (vcff != NULL) { struct vcfRecord *rec; for (rec = vcff->records; rec != NULL; rec = rec->next) if (rec->chromStart == start && rec->chromEnd == end) // in pgSnp mode, don't get name vcfRecordDetails(tdb, rec); } else printf("Sorry, unable to open %s<BR>\n", fileOrUrl); printTrackHtml(tdb); }
int hubCheckGenome(struct trackHub *hub, struct trackHubGenome *genome, struct trackHubCheckOptions *options, struct dyString *errors) /* Check out genome within hub. */ { struct errCatch *errCatch = errCatchNew(); struct trackDb *tdbList = NULL; int retVal = 0; if (errCatchStart(errCatch)) { tdbList = trackHubTracksForGenome(hub, genome); tdbList = trackDbLinkUpGenerations(tdbList); tdbList = trackDbPolishAfterLinkup(tdbList, genome->name); trackHubPolishTrackNames(hub, tdbList); } errCatchEnd(errCatch); if (errCatch->gotError) { retVal = 1; dyStringPrintf(errors, "%s", errCatch->message->string); } if (errCatch->gotWarning && !errCatch->gotError) dyStringPrintf(errors, "%s", errCatch->message->string); errCatchFree(&errCatch); verbose(2, "%d tracks in %s\n", slCount(tdbList), genome->name); struct trackDb *tdb; for (tdb = tdbList; tdb != NULL; tdb = tdb->next) { retVal |= hubCheckTrack(hub, genome, tdb, options, errors); } return retVal; }
int hubCheckTrack(struct trackHub *hub, struct trackHubGenome *genome, struct trackDb *tdb, struct trackHubCheckOptions *options, struct dyString *errors) /* Check track settings and optionally, files */ { int retVal = 0; if (options->checkSettings && options->settings) { //verbose(3, "Found %d settings to check to spec\n", slCount(settings)); verbose(3, "Checking track: %s\n", tdb->shortLabel); verbose(3, "Found %d settings to check to spec\n", hashNumEntries(tdb->settingsHash)); struct hashEl *hel; struct hashCookie cookie = hashFirst(tdb->settingsHash); while ((hel = hashNext(&cookie)) != NULL) retVal |= hubCheckTrackSetting(hub, tdb, hel->name, options, errors); /* TODO: ? also need to check settings not in this list (other tdb fields) */ } if (options->printMeta) { struct slPair *metaPairs = trackDbMetaPairs(tdb); if (metaPairs != NULL) { printf("%s\n", trackHubSkipHubName(tdb->track)); struct slPair *pair; for(pair = metaPairs; pair; pair = pair->next) { printf("\t%s : %s\n", pair->name, (char *)pair->val); } printf("\n"); } slPairFreeValsAndList(&metaPairs); } if (!options->checkFiles) return retVal; struct errCatch *errCatch = errCatchNew(); if (errCatchStart(errCatch)) { hubCheckBigDataUrl(hub, genome, tdb); } errCatchEnd(errCatch); if (errCatch->gotError) { retVal = 1; dyStringPrintf(errors, "%s", errCatch->message->string); } errCatchFree(&errCatch); return retVal; }
struct trackDb *hubCollectTracks( char *database, struct grp **pGroupList) /* Generate trackDb structures for all the tracks in attached hubs. * Make grp structures for each hub. Returned group list is reversed. */ { // return the cached copy if it exists static struct trackDb *hubTrackDbs; static struct grp *hubGroups; if (hubTrackDbs != NULL) { if (pGroupList != NULL) *pGroupList = hubGroups; return hubTrackDbs; } struct hubConnectStatus *hub, *hubList = hubConnectGetHubs(); struct trackDb *tdbList = NULL; for (hub = hubList; hub != NULL; hub = hub->next) { if (isEmpty(hub->errorMessage)) { /* error catching in so it won't just abort */ struct errCatch *errCatch = errCatchNew(); if (errCatchStart(errCatch)) { struct trackDb *thisList = hubAddTracks(hub, database); tdbList = slCat(tdbList, thisList); } errCatchEnd(errCatch); if (errCatch->gotError) { warn("%s", errCatch->message->string); hubUpdateStatus( errCatch->message->string, hub); } else { if (!trackHubDatabase(database)) { struct grp *grp = grpFromHub(hub); slAddHead(&hubGroups, grp); } hubUpdateStatus(NULL, hub); } errCatchFree(&errCatch); } } hubTrackDbs = tdbList; if (pGroupList != NULL) *pGroupList = hubGroups; return tdbList; }
static void gtfGroupToGenePred(struct gffFile *gtf, struct gffGroup *group, FILE *gpFh, FILE *infoFh) /* convert one gtf group to a genePred */ { unsigned optFields = (clGenePredExt ? genePredAllFlds : 0); struct errCatch *errCatch = errCatchNew(); if (errCatchStart(errCatch)) { struct genePred *gp = genePredFromGroupedGtf(gtf, group, group->name, optFields, clGxfOptions); if (gp == NULL) { if (!clIgnoreGroupsWithoutExons) { char *msg = "no exons defined for group %s, feature %s (perhaps try -ignoreGroupsWithoutExons)"; if (clAllErrors) { fprintf(stderr, msg, group->name, group->lineList->feature); fputc('\n', stderr); badGroupCount++; } else errAbort(msg, group->name, group->lineList->feature); } } else { genePredTabOut(gp, gpFh); genePredFree(&gp); } } errCatchEnd(errCatch); if (errCatch->gotError) { // drop trailing newline in caught message if (endsWith(errCatch->message->string, "\n")) dyStringResize(errCatch->message, dyStringLen(errCatch->message)-1); if (clAllErrors) { fprintf(stderr, "%s\n", errCatch->message->string); badGroupCount++; } else errAbort("%s", errCatch->message->string); } else { if (infoFh != NULL) writeInfo(infoFh, group); } errCatchFree(&errCatch); }
struct htmlPage *htmlPageForwardedNoAbort(char *url, struct htmlCookie *cookies) /* Try and get an HTML page. Print warning and return NULL if there's a problem. */ { struct errCatch *errCatch = errCatchNew(); struct htmlPage *page = NULL; if (errCatchStart(errCatch)) page = htmlPageForwarded(url, cookies); errCatchEnd(errCatch); if (errCatch->gotError) warn("%s", errCatch->message->string); errCatchFree(&errCatch); return page; }
static void vcfTabixLoadItems(struct track *tg) /* Load items in window from VCF file using its tabix index file. */ { char *fileOrUrl = NULL; /* Figure out url or file name. */ if (tg->parallelLoading) { /* do not use mysql during parallel-fetch load */ fileOrUrl = trackDbSetting(tg->tdb, "bigDataUrl"); } else { struct sqlConnection *conn = hAllocConnTrack(database, tg->tdb); fileOrUrl = bbiNameFromSettingOrTableChrom(tg->tdb, conn, tg->table, chromName); hFreeConn(&conn); } if (isEmpty(fileOrUrl)) return; int vcfMaxErr = -1; struct vcfFile *vcff = NULL; boolean hapClustEnabled = cartOrTdbBoolean(cart, tg->tdb, VCF_HAP_ENABLED_VAR, TRUE); /* protect against temporary network error */ struct errCatch *errCatch = errCatchNew(); if (errCatchStart(errCatch)) { vcff = vcfTabixFileMayOpen(fileOrUrl, chromName, winStart, winEnd, vcfMaxErr, -1); if (vcff != NULL) { filterRecords(vcff, tg->tdb); if (hapClustEnabled && vcff->genotypeCount > 1 && vcff->genotypeCount < 3000 && (tg->visibility == tvPack || tg->visibility == tvSquish)) vcfHapClusterOverloadMethods(tg, vcff); else { tg->items = vcfFileToPgSnp(vcff, tg->tdb); // pgSnp bases coloring/display decision on count of items: tg->customInt = slCount(tg->items); } // Don't vcfFileFree here -- we are using its string pointers! } } errCatchEnd(errCatch); if (errCatch->gotError || vcff == NULL) { if (isNotEmpty(errCatch->message->string)) tg->networkErrMsg = cloneString(errCatch->message->string); tg->drawItems = bigDrawWarning; tg->totalHeight = bigWarnTotalHeight; } errCatchFree(&errCatch); }
struct bigBedInterval *bigBedSelectRange(struct track *track, char *chrom, int start, int end, struct lm *lm) /* Return list of intervals in range. */ { struct bigBedInterval *result = NULL; /* protect against temporary network error */ struct errCatch *errCatch = errCatchNew(); if (errCatchStart(errCatch)) { struct bbiFile *bbi = fetchBbiForTrack(track); int maxItems = min(BIGBEDMAXIMUMITEMS, maximumTrackItems(track)); // do not allow it to exceed BIGBEDMAXIMUMITEMS for bigBed result = bigBedIntervalQuery(bbi, chrom, start, end, maxItems + 1, lm); if (slCount(result) > maxItems) { track->limitedVis = tvDense; track->limitedVisSet = TRUE; result = NULL; AllocArray(track->summary, insideWidth); if (bigBedSummaryArrayExtended(bbi, chrom, start, end, insideWidth, track->summary)) { char *denseCoverage = trackDbSettingClosestToHome(track->tdb, "denseCoverage"); if (denseCoverage != NULL) { double endVal = atof(denseCoverage); if (endVal <= 0) { AllocVar(track->sumAll); *track->sumAll = bbiTotalSummary(bbi); } } } else freez(&track->summary); } bbiFileClose(&bbi); track->bbiFile = NULL; } errCatchEnd(errCatch); if (errCatch->gotError) { track->networkErrMsg = cloneString(errCatch->message->string); track->drawItems = bigDrawWarning; track->totalHeight = bigWarnTotalHeight; result = NULL; } errCatchFree(&errCatch); return result; }
static void vcfLoadItems(struct track *tg) /* Load items in window from VCF file. */ { int vcfMaxErr = -1; struct vcfFile *vcff = NULL; boolean hapClustEnabled = cartOrTdbBoolean(cart, tg->tdb, VCF_HAP_ENABLED_VAR, TRUE); char *table = tg->table; struct customTrack *ct = tg->customPt; struct sqlConnection *conn; if (ct == NULL) conn = hAllocConnTrack(database, tg->tdb); else { conn = hAllocConn(CUSTOM_TRASH); table = ct->dbTableName; } char *vcfFile = bbiNameFromSettingOrTable(tg->tdb, conn, table); hFreeConn(&conn); /* protect against parse error */ struct errCatch *errCatch = errCatchNew(); if (errCatchStart(errCatch)) { vcff = vcfFileMayOpen(vcfFile, chromName, winStart, winEnd, vcfMaxErr, -1, TRUE); if (vcff != NULL) { filterRecords(vcff, tg->tdb); if (hapClustEnabled && vcff->genotypeCount > 1 && vcff->genotypeCount < 3000 && (tg->visibility == tvPack || tg->visibility == tvSquish)) vcfHapClusterOverloadMethods(tg, vcff); else { tg->items = vcfFileToPgSnp(vcff, tg->tdb); // pgSnp bases coloring/display decision on count of items: tg->customInt = slCount(tg->items); } // Don't vcfFileFree here -- we are using its string pointers! } } errCatchEnd(errCatch); if (errCatch->gotError || vcff == NULL) { if (isNotEmpty(errCatch->message->string)) tg->networkErrMsg = cloneString(errCatch->message->string); tg->drawItems = bigDrawWarning; tg->totalHeight = bigWarnTotalHeight; } errCatchFree(&errCatch); }
int edwOpenAndRecordInDir(struct sqlConnection *conn, char *submitDir, char *submitFile, char *url, int *retHostId, int *retDirId) /* Return a low level read socket handle on URL if possible. Consult and * update the edwHost and edwDir tables to help log and troubleshoot remote * problems. The url parameter should be just a concatenation of submitDir and * submitFile. */ { /* Wrap routine to open network file in errCatch and remember whether it works. */ struct errCatch *errCatch = errCatchNew(); int sd = -1; boolean success = TRUE; if (errCatchStart(errCatch)) { sd = netUrlMustOpenPastHeader(url); } errCatchEnd(errCatch); if (errCatch->gotError) { success = FALSE; warn("Error: %s", trimSpaces(errCatch->message->string)); } /* Parse url into pieces */ struct netParsedUrl npu; ZeroVar(&npu); netParseUrl(url, &npu); char urlDir[PATH_LEN], urlFileName[PATH_LEN], urlExtension[PATH_LEN]; splitPath(npu.file, urlDir, urlFileName, urlExtension); /* Record success of open attempt in host and submitDir tables. */ int hostId = edwGetHost(conn, npu.host); recordIntoHistory(conn, hostId, "edwHost", success); int submitDirId = edwGetSubmitDir(conn, hostId, submitDir); recordIntoHistory(conn, submitDirId, "edwSubmitDir", success); /* Finish up error processing, bailing out of further processing if there was an error. */ errCatchFree(&errCatch); if (!success) noWarnAbort(); /* Update optional return variables and return socket to read from. */ if (retHostId != NULL) *retHostId = hostId; if (retDirId != NULL) *retDirId = submitDirId; return sd; }
static char *maybeGetDescriptionText(char *db) /* Slurp the description.html file for db into a string (if possible, don't die if * we can't read it) and return it. */ { struct errCatch *errCatch = errCatchNew(); char *descText = NULL; if (errCatchStart(errCatch)) { char *htmlPath = hHtmlPath(db); if (isNotEmpty(htmlPath)) descText = udcFileReadAll(htmlPath, NULL, 0, NULL); } errCatchEnd(errCatch); // Just ignore errors for now. return descText; }
void cartJsonExecute(struct cartJson *cj) /* Get commands from cgi, print Content-type, execute commands, print results as JSON. */ { cartJsonPushErrHandlers(); puts("Content-Type:text/javascript\n"); // Initialize response JSON object: jsonWriteObjectStart(cj->jw, NULL); // Always send back hgsid: jsonWriteString(cj->jw, cartSessionVarName(), cartSessionId(cj->cart)); char *commandJson = cgiOptionalString(CARTJSON_COMMAND); if (commandJson) { struct errCatch *errCatch = errCatchNew(); if (errCatchStart(errCatch)) { struct jsonElement *commandObj = jsonParse(commandJson); struct hash *commandHash = jsonObjectVal(commandObj, "commandObj"); // change* commands need to go first! Really we need an ordered map type here... // for now, just make a list and sort to put change commands at the front. struct slPair *commandList = NULL, *cmd; struct hashCookie cookie = hashFirst(commandHash); struct hashEl *hel; while ((hel = hashNext(&cookie)) != NULL) slAddHead(&commandList, slPairNew(hel->name, hel->val)); slSort(&commandList, commandCmp); for (cmd = commandList; cmd != NULL; cmd = cmd->next) doOneCommand(cj, cmd->name, (struct jsonElement *)cmd->val); } errCatchEnd(errCatch); if (errCatch->gotError) { jsonWritePopToLevel(cj->jw, 1); //#*** TODO: move jsonStringEscape inside jsonWriteString char *encoded = jsonStringEscape(errCatch->message->string); jsonWriteString(cj->jw, "error", encoded); } errCatchFree(&errCatch); } cartJsonPrintWarnings(cj->jw); jsonWriteObjectEnd(cj->jw); puts(cj->jw->dy->string); cartJsonPopErrHandlers(); }
int trackHubCrawl(char *hubUrl) /* Crawl a track data hub and output strings useful in a search */ { struct errCatch *errCatch = errCatchNew(); struct trackHub *hub = NULL; int retVal = 0; if (errCatchStart(errCatch)) { hub = trackHubOpen(hubUrl, "hub_0"); } errCatchEnd(errCatch); if (errCatch->gotError) { retVal = 1; fprintf(stderr, "%s\n", errCatch->message->string); } errCatchFree(&errCatch); if (hub == NULL) return 1; FILE *searchFp =stdout; struct trackHubGenome *genomeList = hub->genomeList; for(; genomeList ; genomeList = genomeList->next) fprintf(searchFp, "%s\t%s\n",hub->url, trackHubSkipHubName(genomeList->name)); fprintf(searchFp, "%s\t%s\t%s\n",hub->url, hub->shortLabel, hub->longLabel); if (hub->descriptionUrl != NULL) { char *html = netReadTextFileIfExists(hub->descriptionUrl); char *stripHtml =htmlTextStripTags(html); strSwapChar(stripHtml, '\n', ' '); strSwapChar(stripHtml, '\t', ' '); strSwapChar(stripHtml, '\015', ' '); strSwapChar(stripHtml, ')', ' '); strSwapChar(stripHtml, '(', ' '); strSwapChar(stripHtml, '[', ' '); strSwapChar(stripHtml, ']', ' '); fprintf(searchFp, "%s\t%s\n",hub->url, stripHtml); } trackHubClose(&hub); return retVal; }
static void tryHubOpen(unsigned id) /* try to open hub, leaks trackHub structure */ { /* try opening this again to reset error */ struct sqlConnection *conn = hConnectCentral(); struct errCatch *errCatch = errCatchNew(); struct hubConnectStatus *hub = NULL; if (errCatchStart(errCatch)) hub = hubConnectStatusForId(conn, id); errCatchEnd(errCatch); if (errCatch->gotError) hubUpdateStatus( errCatch->message->string, NULL); else hubUpdateStatus(NULL, hub); errCatchFree(&errCatch); hDisconnectCentral(&conn); }
boolean errCatchFinish(struct errCatch **pErrCatch) /* Finish up error catching. Report error if there is a * problem and return FALSE. If no problem return TRUE. * This handles errCatchEnd and errCatchFree. */ { struct errCatch *errCatch = *pErrCatch; boolean ok = TRUE; if (errCatch != NULL) { errCatchEnd(errCatch); if (errCatch->gotError) { ok = FALSE; warn("%s", errCatch->message->string); } errCatchFree(pErrCatch); } return ok; }
int oneHubTrackSettings(char *hubUrl, struct hash *totals) /* Read hub trackDb files, noting settings used */ { struct trackHub *hub = NULL; struct errCatch *errCatch = errCatchNew(); if (errCatchStart(errCatch)) hub = trackHubOpen(hubUrl, "hub_0"); errCatchEnd(errCatch); errCatchFree(&errCatch); if (hub == NULL) return 1; printf("%s (%s)\n", hubUrl, hub->shortLabel); struct trackHubGenome *genome; struct hash *counts; if (totals) counts = totals; else counts = newHash(0); struct hashEl *el; for (genome = hub->genomeList; genome != NULL; genome = genome->next) { struct trackDb *tdb, *tdbs = trackHubTracksForGenome(hub, genome); for (tdb = tdbs; tdb != NULL; tdb = tdb->next) { struct hashCookie cookie = hashFirst(trackDbHashSettings(tdb)); verbose(2, " track: %s\n", tdb->shortLabel); while ((el = hashNext(&cookie)) != NULL) { int count = hashIntValDefault(counts, el->name, 0); count++; hashReplace(counts, el->name, intToPt(count)); } } } if (!totals) printCounts(counts); trackHubClose(&hub); return 0; }
boolean doPcr(struct pcrServer *server, struct targetPcrServer *targetServer, char *fPrimer, char *rPrimer, int maxSize, int minPerfect, int minGood, boolean flipReverse) /* Do the PCR, and show results. */ { struct errCatch *errCatch = errCatchNew(); boolean ok = FALSE; hgBotDelay(); if (flipReverse) reverseComplement(rPrimer, strlen(rPrimer)); if (errCatchStart(errCatch)) { struct gfPcrInput *gpi; AllocVar(gpi); gpi->fPrimer = fPrimer; gpi->rPrimer = rPrimer; if (server != NULL) doQuery(server, gpi, maxSize, minPerfect, minGood); if (targetServer != NULL) doTargetQuery(targetServer, gpi, maxSize, minPerfect, minGood); ok = TRUE; } errCatchEnd(errCatch); if (errCatch->gotError) warn("%s", errCatch->message->string); errCatchFree(&errCatch); if (flipReverse) reverseComplement(rPrimer, strlen(rPrimer)); webNewSection("Primer Melting Temperatures"); printf("<TT>"); printf("<B>Forward:</B> %4.1f C %s<BR>\n", oligoTm(fPrimer, 50.0, 50.0), fPrimer); printf("<B>Reverse:</B> %4.1f C %s<BR>\n", oligoTm(rPrimer, 50.0, 50.0), rPrimer); printf("</TT>"); printf("The temperature calculations are done assuming 50 mM salt and 50 nM annealing " "oligo concentration. The code to calculate the melting temp comes from " "<A HREF=\"http://frodo.wi.mit.edu/primer3/input.htm\" target=_blank>" "Primer3</A>."); return ok; }
static void checkTrackDbs(struct hubConnectStatus *hubList) { struct hubConnectStatus *hub = hubList; for(; hub; hub = hub->next) { struct errCatch *errCatch = errCatchNew(); if (errCatchStart(errCatch)) { hubAddTracks(hub, database); } errCatchEnd(errCatch); if (errCatch->gotError) { hub->errorMessage = cloneString(errCatch->message->string); hubUpdateStatus( errCatch->message->string, hub); } else hubUpdateStatus(NULL, hub); } }
struct qaStatus *qaPageGet(char *url, struct htmlPage **retPage) /* Get info on given url, (and return page if retPage non-null). */ { struct errCatch *errCatch = errCatchNew(); struct qaStatus *qs; struct htmlPage *page = NULL; long startTime = clock1000(); if (errCatchStart(errCatch)) { page = htmlPageGet(url); htmlPageValidateOrAbort(page); } else { htmlPageFree(&page); } errCatchEnd(errCatch); qs = qaStatusOnPage(errCatch, page, startTime, retPage); errCatchFree(&errCatch); return qs; }
bigWig_t * bigwig_load(const char * filename, const char * udc_dir) { bigWig_t * bigwig = NULL; struct errCatch * err; /* set cache */ if (udc_dir != NULL) udcSetDefaultDir((char*) udc_dir); /* setup error management & try to open file */ err = errCatchNew(); if (errCatchStart(err)) bigwig = bigWigFileOpen((char*)filename); errCatchEnd(err); if (err->gotError) { fprintf(stderr, "error: %s\n", err->message->string); errCatchFree(&err); return NULL; } errCatchFree(&err); return bigwig; }
struct qaStatus *qaPageFromForm(struct htmlPage *origPage, struct htmlForm *form, char *buttonName, char *buttonVal, struct htmlPage **retPage) /* Get update to form based on pressing a button. */ { struct errCatch *errCatch = errCatchNew(); struct qaStatus *qs; struct htmlPage *page = NULL; long startTime = clock1000(); if (errCatchStart(errCatch)) { page = htmlPageFromForm(origPage, form, buttonName, buttonVal); htmlPageValidateOrAbort(page); } else { htmlPageFree(&page); } errCatchEnd(errCatch); qs = qaStatusOnPage(errCatch, page, startTime, retPage); errCatchFree(&errCatch); return qs; }
int trackHubCheck(char *hubUrl, struct trackHubCheckOptions *options, struct dyString *errors) /* Check a track data hub for integrity. Put errors in dyString. * return 0 if hub has no errors, 1 otherwise * if options->checkTracks is TRUE, check remote files of individual tracks */ { struct errCatch *errCatch = errCatchNew(); struct trackHub *hub = NULL; int retVal = 0; if (errCatchStart(errCatch)) { hub = trackHubOpen(hubUrl, "hub_0"); } errCatchEnd(errCatch); if (errCatch->gotError) { retVal = 1; dyStringPrintf(errors, "%s\n", errCatch->message->string); } if (errCatch->gotWarning && !errCatch->gotError) dyStringPrintf(errors, "%s", errCatch->message->string); errCatchFree(&errCatch); if (hub == NULL) return 1; if (options->checkSettings) retVal |= hubSettingsCheckInit(hub, options, errors); struct trackHubGenome *genome; for (genome = hub->genomeList; genome != NULL; genome = genome->next) { retVal |= hubCheckGenome(hub, genome, options, errors); } trackHubClose(&hub); return retVal; }
int hubPublicAdd(char *table, char *url) /* hubPublicAdd -- add url to hubPublic table */ { struct errCatch *errCatch = errCatchNew(); boolean gotWarning = FALSE; struct trackHub *tHub = NULL; int dbCount = 0; if (errCatchStart(errCatch)) tHub = trackHubOpen(url, "hub_1"); errCatchEnd(errCatch); if (errCatch->gotError) { gotWarning = TRUE; warn("%s", errCatch->message->string); } errCatchFree(&errCatch); if (gotWarning) return 1; struct hashEl *hel; struct hashCookie cookie = hashFirst(tHub->genomeHash); struct dyString *dy = newDyString(1024); while ((hel = hashNext(&cookie)) != NULL) { dbCount++; dyStringPrintf(dy, "%s,", trackHubSkipHubName(hel->name)); } printf("insert into %s (hubUrl,descriptionUrl,shortLabel,longLabel,registrationTime,dbCount,dbList) values (\"%s\",\"%s\", \"%s\", \"%s\", now(),%d, \"%s\");\n", table, url, tHub->descriptionUrl, tHub->shortLabel, tHub->longLabel, dbCount, dy->string); return 0; }
void edwSubmit(char *submitUrl, char *email) /* edwSubmit - Submit URL with validated.txt to warehouse. */ { /* Parse out url a little into submitDir and submitFile */ char *lastSlash = strrchr(submitUrl, '/'); if (lastSlash == NULL) errAbort("%s is not a valid URL - it has no '/' in it.", submitUrl); char *submitFile = lastSlash+1; int submitDirSize = submitFile - submitUrl; char submitDir[submitDirSize+1]; memcpy(submitDir, submitUrl, submitDirSize); submitDir[submitDirSize] = 0; // Add trailing zero /* Make sure user has access. */ struct sqlConnection *conn = edwConnectReadWrite(); struct edwUser *user = edwMustGetUserFromEmail(conn, email); int userId = user->id; /* See if we are already running on same submission. If so council patience and quit. */ notOverlappingSelf(conn, submitUrl); /* Make a submit record. */ int submitId = makeNewEmptySubmitRecord(conn, submitUrl, userId); /* The next errCatch block will fill these in if all goes well. */ struct submitFileRow *sfrList = NULL, *oldList = NULL, *newList = NULL; int oldCount = 0; long long oldBytes = 0, newBytes = 0, byteCount = 0; /* Start catching errors from here and writing them in submitId. If we don't * throw we'll end up having a list of all files in the submit in sfrList. */ struct errCatch *errCatch = errCatchNew(); char query[1024]; if (errCatchStart(errCatch)) { /* Make sure they got a bit of space, enough for a reasonable submit file. * We do this here just because we can make error message more informative. */ long long diskFreeSpace = freeSpaceOnFileSystem(edwRootDir); if (diskFreeSpace < 4*1024*1024) errAbort("No space left in warehouse!"); /* Open remote submission file. This is most likely where we will fail. */ int hostId=0, submitDirId = 0; long long startUploadTime = edwNow(); int remoteFd = edwOpenAndRecordInDir(conn, submitDir, submitFile, submitUrl, &hostId, &submitDirId); /* Copy to local temp file. */ char tempSubmitFile[PATH_LEN]; fetchFdToTempFile(remoteFd, tempSubmitFile); mustCloseFd(&remoteFd); long long endUploadTime = edwNow(); /* Calculate MD5 sum, and see if we already have such a file. */ char *md5 = md5HexForFile(tempSubmitFile); int fileId = findFileGivenMd5AndSubmitDir(conn, md5, submitDirId); /* If we already have it, then delete temp file, otherwise put file in file table. */ char submitLocalPath[PATH_LEN]; if (fileId != 0) { remove(tempSubmitFile); char submitRelativePath[PATH_LEN]; sqlSafef(query, sizeof(query), "select edwFileName from edwFile where id=%d", fileId); sqlNeedQuickQuery(conn, query, submitRelativePath, sizeof(submitRelativePath)); safef(submitLocalPath, sizeof(submitLocalPath), "%s%s", edwRootDir, submitRelativePath); } else { /* Looks like it's the first time we've seen this submission file, so * save the file itself. We'll get to the records inside the file in a bit. */ fileId = makeNewEmptyFileRecord(conn, submitId, submitDirId, submitFile, 0); /* Get file/path names for submission file inside warehouse. */ char edwFile[PATH_LEN]; edwMakeFileNameAndPath(fileId, submitFile, edwFile, submitLocalPath); /* Move file to final resting place and get update time and size from local file system. */ mustRename(tempSubmitFile, submitLocalPath); time_t updateTime = fileModTime(submitLocalPath); off_t size = fileSize(submitLocalPath); /* Update file table which now should be complete including updateTime. */ sqlSafef(query, sizeof(query), "update edwFile set " " updateTime=%lld, size=%lld, md5='%s', edwFileName='%s'," " startUploadTime=%lld, endUploadTime=%lld" " where id=%u\n", (long long)updateTime, (long long)size, md5, edwFile, startUploadTime, endUploadTime, fileId); sqlUpdate(conn, query); } /* By now there is a submit file on the local file system. We parse it out. */ edwParseSubmitFile(conn, submitLocalPath, submitUrl, &sfrList); /* Save our progress so far to submit table. */ sqlSafef(query, sizeof(query), "update edwSubmit" " set submitFileId=%lld, submitDirId=%lld, fileCount=%d where id=%d", (long long)fileId, (long long)submitDirId, slCount(sfrList), submitId); sqlUpdate(conn, query); /* Weed out files we already have. */ struct submitFileRow *sfr, *sfrNext; for (sfr = sfrList; sfr != NULL; sfr = sfrNext) { sfrNext = sfr->next; struct edwFile *bf = sfr->file; long long fileId; if ((fileId = edwGotFile(conn, submitDir, bf->submitFileName, bf->md5, bf->size)) >= 0) { ++oldCount; oldBytes += bf->size; sfr->md5MatchFileId = fileId; slAddHead(&oldList, sfr); } else slAddHead(&newList, sfr); byteCount += bf->size; } sfrList = NULL; slReverse(&newList); slReverse(&oldList); /* Update database with oldFile count. */ sqlSafef(query, sizeof(query), "update edwSubmit set oldFiles=%d,oldBytes=%lld,byteCount=%lld where id=%u", oldCount, oldBytes, byteCount, submitId); sqlUpdate(conn, query); /* Deal with old files. This may throw an error. We do it before downloading new * files since we want to fail fast if we are going to fail. */ int updateCount = handleOldFileTags(conn, oldList, doUpdate); sqlSafef(query, sizeof(query), "update edwSubmit set metaChangeCount=%d where id=%u", updateCount, submitId); sqlUpdate(conn, query); } errCatchEnd(errCatch); if (errCatch->gotError) { handleSubmitError(conn, submitId, errCatch->message->string); /* The handleSubmitError will keep on throwing. */ } errCatchFree(&errCatch); /* Go through list attempting to load the files if we don't already have them. */ struct submitFileRow *sfr; for (sfr = newList; sfr != NULL; sfr = sfr->next) { if (edwSubmitShouldStop(conn, submitId)) break; struct edwFile *bf = sfr->file; int submitUrlSize = strlen(submitDir) + strlen(bf->submitFileName) + 1; char submitUrl[submitUrlSize]; safef(submitUrl, submitUrlSize, "%s%s", submitDir, bf->submitFileName); if (edwGotFile(conn, submitDir, bf->submitFileName, bf->md5, bf->size)<0) { /* We can't get a ID for this file. There's two possible reasons - * either somebody is in the middle of fetching it or nobody's started. * If somebody is in the middle of fetching it, assume they died * if they took more than an hour, and start up another fetch. * So here we fetch unless somebody else is fetching recently. */ if (edwGettingFile(conn, submitDir, bf->submitFileName) < 0) { verbose(1, "Fetching %s\n", bf->submitFileName); getSubmittedFile(conn, bf, submitDir, submitUrl, submitId); newBytes += bf->size; sqlSafef(query, sizeof(query), "update edwSubmit set newFiles=newFiles+1,newBytes=%lld where id=%d", newBytes, submitId); sqlUpdate(conn, query); } } else { verbose(2, "Already got %s\n", bf->submitFileName); sqlSafef(query, sizeof(query), "update edwSubmit set oldFiles=oldFiles+1 where id=%d", submitId); sqlUpdate(conn, query); } if (sfr->replacesFile != 0) { /* What happens when the replacement doesn't validate? */ verbose(2, "Replacing %s with %s\n", sfr->replaces, bf->submitFileName); sqlSafef(query, sizeof(query), "update edwFile set replacedBy=%u, deprecated='%s' where id=%u", bf->id, sfr->replaceReason, sfr->replacesFile); sqlUpdate(conn, query); } } /* If we made it here, update submit endUploadTime */ sqlSafef(query, sizeof(query), "update edwSubmit set endUploadTime=%lld where id=%d", edwNow(), submitId); sqlUpdate(conn, query); /* Get a real submission record and then set things up so mail user when all done. */ struct edwSubmit *submit = edwSubmitFromId(conn, submitId); sqlDisconnect(&conn); // We'll be waiting a while so free connection waitForValidationAndSendEmail(submit, email); }
void bamLoadItemsCore(struct track *tg, boolean isPaired) /* Load BAM data into tg->items item list, unless zoomed out so far * that the data would just end up in dense mode and be super-slow. */ { /* protect against temporary network error */ struct errCatch *errCatch = errCatchNew(); if (errCatchStart(errCatch)) { struct hash *pairHash = isPaired ? hashNew(18) : NULL; int minAliQual = atoi(cartOrTdbString(cart, tg->tdb, BAM_MIN_ALI_QUAL, BAM_MIN_ALI_QUAL_DEFAULT)); char *colorMode = cartOrTdbString(cart, tg->tdb, BAM_COLOR_MODE, BAM_COLOR_MODE_DEFAULT); char *grayMode = cartOrTdbString(cart, tg->tdb, BAM_GRAY_MODE, BAM_GRAY_MODE_DEFAULT); char *userTag = cartOrTdbString(cart, tg->tdb, BAM_COLOR_TAG, BAM_COLOR_TAG_DEFAULT); int aliQualShadeMin = 0, aliQualShadeMax = 99, baseQualShadeMin = 0, baseQualShadeMax = 40; parseIntRangeSetting(tg->tdb, "aliQualRange", &aliQualShadeMin, &aliQualShadeMax); parseIntRangeSetting(tg->tdb, "baseQualRange", &baseQualShadeMin, &baseQualShadeMax); struct bamTrackData btd = {tg, pairHash, minAliQual, colorMode, grayMode, userTag, aliQualShadeMin, aliQualShadeMax, baseQualShadeMin, baseQualShadeMax}; char *fileName = trackDbSetting(tg->tdb, "bigDataUrl"); if (fileName == NULL) { if (tg->customPt) { errAbort("bamLoadItemsCore: can't find bigDataUrl for custom track %s", tg->track); } else { struct sqlConnection *conn = hAllocConnTrack(database, tg->tdb); fileName = bamFileNameFromTable(conn, tg->table, chromName); hFreeConn(&conn); } } char *fileName2 = hReplaceGbdb(fileName); char posForBam[512]; safef(posForBam, sizeof(posForBam), "%s:%d-%d", chromName, winStart, winEnd); char *cacheDir = cfgOption("cramRef"); char *refUrl = trackDbSetting(tg->tdb, "refUrl"); if (!isPaired) bamFetchPlus(fileName2, posForBam, addBam, &btd, NULL, refUrl, cacheDir); else { char *setting = trackDbSettingClosestToHomeOrDefault(tg->tdb, "pairSearchRange", "20000"); int pairSearchRange = atoi(setting); if (pairSearchRange > 0) safef(posForBam, sizeof(posForBam), "%s:%d-%d", chromName, max(0, winStart-pairSearchRange), winEnd+pairSearchRange); bamFetchPlus(fileName2, posForBam, addBamPaired, &btd, NULL, refUrl, cacheDir); struct hashEl *hel; struct hashCookie cookie = hashFirst(btd.pairHash); while ((hel = hashNext(&cookie)) != NULL) { struct linkedFeatures *lf = hel->val; if (lf->start < winEnd && lf->end > winStart) slAddHead(&(tg->items), lfsFromLf(lf)); } } freez(&fileName2); if (tg->visibility != tvDense) { slReverse(&(tg->items)); if (isPaired) slSort(&(tg->items), linkedFeaturesSeriesCmp); else if (sameString(colorMode, BAM_COLOR_MODE_STRAND)) slSort(&(tg->items), linkedFeaturesCmpOri); else if (sameString(colorMode, BAM_COLOR_MODE_GRAY) && sameString(grayMode, BAM_GRAY_MODE_ALI_QUAL)) slSort(&(tg->items), linkedFeaturesCmpScore); else slSort(&(tg->items), linkedFeaturesCmpStart); if (slCount(tg->items) > MAX_ITEMS_FOR_MAPBOX) { // flag drawItems to make a mapBox for the whole track tg->customInt = 1; tg->mapItem = dontMapItem; } } } errCatchEnd(errCatch); if (errCatch->gotError) { tg->networkErrMsg = cloneString(errCatch->message->string); tg->drawItems = bigDrawWarning; tg->totalHeight = bigWarnTotalHeight; } errCatchFree(&errCatch); }
int hubSettingsCheckInit(struct trackHub *hub, struct trackHubCheckOptions *options, struct dyString *errors) { int retVal = 0; if (hub->version != NULL && options->version == NULL) options->version = hub->version; struct trackHubSettingSpec *hubLevel = NULL; int level = 0; if (hub->version != NULL) { AllocVar(hubLevel); if ((level = trackHubSettingLevel(hubLevel)) < 0) { dyStringPrintf(errors, "Unknown hub support level: %s. Defaulting to 'all'.\n", hub->level); retVal = 1; } else options->level = hub->level; } verbose(2, "Checking hub '%s'", hub->longLabel); if (options->level) verbose(2, " for compliance to level '%s' (use -settings to view)", options->level); verbose(2, "\n"); struct errCatch *errCatch = errCatchNew(); if (errCatchStart(errCatch)) { /* make hash of settings for this version, saving in options */ struct trackHubSettingSpec *setting, *settings = trackHubSettingsForVersion(options->specHost, options->version); options->settings = newHash(0); options->suggest = NULL; for (setting = settings; setting != NULL; setting = setting->next) { hashAdd(options->settings, setting->name, setting); slNameAddHead(&options->suggest, setting->name); } /* TODO: ? also need to check settings not in this list (other tdb fields) */ // TODO: move extra file handling here (out of hubCheck) if (options->extra != NULL) { struct hashCookie cookie = hashFirst(options->extra); struct hashEl *hel; while ((hel = hashNext(&cookie)) != NULL) slNameAddHead(&options->suggest, hel->name); } slNameSort(&options->suggest); verbose(3, "Suggest list has %d settings\n", slCount(options->suggest)); } errCatchEnd(errCatch); if (errCatch->gotError) { retVal = 1; dyStringPrintf(errors, "%s", errCatch->message->string); } errCatchFree(&errCatch); return retVal; }
int hubPublicCheck(char *table) /* hubPublicCheck - checks that the labels in hubPublic match what is in the hub labels. */ { struct sqlConnection *conn = hConnectCentral(); char query[512]; bool hasDescriptionUrl = sqlColumnExists(conn, table, "descriptionUrl"); if (hasDescriptionUrl) sqlSafef(query, sizeof(query), "select hubUrl, shortLabel,longLabel,dbList,descriptionUrl from %s", table); else sqlSafef(query, sizeof(query), "select hubUrl, shortLabel,longLabel,dbList from %s", table); struct sqlResult *sr = sqlGetResult(conn, query); char **row; int differences = 0; while ((row = sqlNextRow(sr)) != NULL) { char *url = row[0], *shortLabel = row[1], *longLabel = row[2], *dbList = row[3], *descriptionUrl = row[4]; struct errCatch *errCatch = errCatchNew(); boolean gotWarning = FALSE; struct trackHub *tHub = NULL; if (errCatchStart(errCatch)) tHub = trackHubOpen(url, "hub_1"); errCatchEnd(errCatch); if (errCatch->gotError) { gotWarning = TRUE; warn("%s", errCatch->message->string); } errCatchFree(&errCatch); if (gotWarning) { continue; } if (!sameString(shortLabel, tHub->shortLabel)) { differences++; printf("update %s set shortLabel=\"%s\" where hubUrl=\"%s\";\n",table, tHub->shortLabel, url); } if (!sameString(longLabel, tHub->longLabel)) { differences++; printf("update %s set longLabel=\"%s\" where hubUrl=\"%s\";\n",table, tHub->longLabel, url); } struct hashCookie cookie = hashFirst(tHub->genomeHash); struct dyString *dy = newDyString(1024); struct hashEl *hel; while ((hel = hashNext(&cookie)) != NULL) dyStringPrintf(dy, "%s,", trackHubSkipHubName(hel->name)); if (!sameString(dy->string, dbList)) { differences++; printf("update %s set dbList=\"%s\" where hubUrl=\"%s\";\n",table, dy->string, url); } if (hasDescriptionUrl && !isEmpty(tHub->descriptionUrl) && ((descriptionUrl == NULL) || !sameString(descriptionUrl, tHub->descriptionUrl))) { differences++; printf("update %s set descriptionUrl=\"%s\" where hubUrl=\"%s\";\n",table, tHub->descriptionUrl, url); } trackHubClose(&tHub); } return differences; }
void cartJsonGetGroupedTrackDb(struct cartJson *cj, struct hash *paramHash) /* Translate trackDb list (only a subset of the fields) into JSON array of track group objects; * each group contains an array of track objects that may have subtracks. Send it in a wrapper * object that includes the database from which it was taken; it's possible that by the time * this reaches the client, the user might have switched to a new db. */ { struct jsonWrite *jw = cj->jw; struct trackDb *fullTrackList = NULL; struct grp *fullGroupList = NULL; struct errCatch *errCatch = errCatchNew(); if (errCatchStart(errCatch)) { cartTrackDbInit(cj->cart, &fullTrackList, &fullGroupList, /* useAccessControl=*/TRUE); } errCatchEnd(errCatch); if (errCatch->gotError) { warn("%s", errCatch->message->string); jsonWriteObjectStart(jw, "groupedTrackDb"); jsonWriteString(jw, "db", cartString(cj->cart, "db")); jsonWriteListStart(jw, "groupedTrackDb"); jsonWriteListEnd(jw); jsonWriteObjectEnd(jw); return; } errCatchFree(&errCatch); struct hash *groupedTrackRefList = hashTracksByGroup(fullTrackList); // If the optional param 'fields' is given, hash the field names that should be returned. char *fields = cartJsonOptionalParam(paramHash, "fields"); struct hash *fieldHash = hashFromCommaString(fields); char *excludeTypes = cartJsonOptionalParam(paramHash, "excludeTypes"); struct hash *excludeTypesHash = hashFromCommaString(excludeTypes); // Also check for optional parameter 'maxDepth': int maxDepth = -1; char *maxDepthStr = cartJsonOptionalParam(paramHash, "maxDepth"); if (isNotEmpty(maxDepthStr)) maxDepth = atoi(maxDepthStr); jsonWriteObjectStart(jw, "groupedTrackDb"); jsonWriteString(jw, "db", cartString(cj->cart, "db")); jsonWriteListStart(jw, "groupedTrackDb"); int nonEmptyGroupCount = 0; struct grp *grp; for (grp = fullGroupList; grp != NULL; grp = grp->next) { struct slRef *tdbRefList = hashFindVal(groupedTrackRefList, grp->name); if (writeGroupedTrack(jw, grp->name, grp->label, fieldHash, excludeTypesHash, maxDepth, tdbRefList)) { nonEmptyGroupCount++; } } if (nonEmptyGroupCount == 0) { // Catch-all for assembly hubs that don't declare groups for their tracks: add All Tracks struct slRef *allTracks = sortedAllTracks(fullTrackList); (void)writeGroupedTrack(jw, "allTracks", "All Tracks", fieldHash, excludeTypesHash, maxDepth, allTracks); } jsonWriteListEnd(jw); jsonWriteObjectEnd(jw); }