static int GET_query(BlogRef const blog, SLNSessionRef const session, HTTPConnectionRef const conn, HTTPMethod const method, strarg_t const URI, HTTPHeadersRef const headers) { if(HTTP_GET != method && HTTP_HEAD != method) return -1; strarg_t qs = NULL; if(0 != uripathcmp("/", URI, &qs)) return -1; if(HTTP_HEAD == method) return 501; // TODO // TODO: This is the most complicated function in the whole program. // It's unbearable. str_t *query = NULL; str_t *query_HTMLSafe = NULL; str_t *parsed_HTMLSafe = NULL; SLNFilterRef filter = NULL; int rc; static strarg_t const fields[] = { "q", }; str_t *values[numberof(fields)] = {}; QSValuesParse(qs, values, fields, numberof(fields)); query = values[0]; values[0] = NULL; query_HTMLSafe = htmlenc(values[0]); rc = SLNUserFilterParse(session, query, &filter); QSValuesCleanup(values, numberof(values)); if(DB_EACCES == rc) { FREE(&query); FREE(&query_HTMLSafe); return 403; } if(DB_EINVAL == rc) rc = SLNFilterCreate(session, SLNVisibleFilterType, &filter); if(rc < 0) { FREE(&query); FREE(&query_HTMLSafe); return 500; } str_t tmp[URI_MAX]; FILE *parsed = fmemopen(tmp, sizeof(tmp), "w"); if(!parsed) { FREE(&query); FREE(&query_HTMLSafe); return 500; } SLNFilterPrintUser(filter, parsed, 0); fclose(parsed); tmp[sizeof(tmp)-1] = '\0'; // fmemopen(3) says this isn't guaranteed. parsed_HTMLSafe = htmlenc(tmp); str_t *primaryURI = NULL; SLNFilterRef core = SLNFilterUnwrap(filter); SLNFilterType const filtertype = SLNFilterGetType(core); if(SLNURIFilterType == filtertype) { primaryURI = strdup(SLNFilterGetStringArg(core, 0)); assert(primaryURI); // TODO SLNFilterRef alt; rc = SLNFilterCreate(session, SLNLinksToFilterType, &alt); assert(rc >= 0); // TODO SLNFilterAddStringArg(alt, primaryURI, -1); SLNFilterFree(&filter); filter = alt; alt = NULL; } core = NULL; // SLNFilterPrint(filter, 0); // DEBUG SLNFilterPosition pos[1] = {{ .dir = -1 }}; str_t *URIs[RESULTS_MAX]; uint64_t max = numberof(URIs); int outdir = -1; SLNFilterParseOptions(qs, pos, &max, &outdir, NULL); if(max < 1) max = 1; if(max > numberof(URIs)) max = numberof(URIs); bool const has_start = !!pos->URI; uint64_t const t1 = uv_hrtime(); ssize_t const count = SLNFilterCopyURIs(filter, session, pos, outdir, false, URIs, (size_t)max); SLNFilterPositionCleanup(pos); if(count < 0) { FREE(&query); FREE(&query_HTMLSafe); FREE(&parsed_HTMLSafe); SLNFilterFree(&filter); if(DB_NOTFOUND == count) { // Possibly a filter age-function bug. alogf("Invalid start parameter? %s\n", URI); return 500; } alogf("Filter error: %s\n", sln_strerror(count)); return 500; } SLNFilterFree(&filter); uint64_t const t2 = uv_hrtime(); str_t *reponame_HTMLSafe = htmlenc(SLNRepoGetName(blog->repo)); snprintf(tmp, sizeof(tmp), "Queried in %.6f seconds", (t2-t1) / 1e9); str_t *querytime_HTMLSafe = htmlenc(tmp); str_t *account_HTMLSafe; if(0 == SLNSessionGetUserID(session)) { account_HTMLSafe = htmlenc("Log In"); } else { strarg_t const user = SLNSessionGetUsername(session); snprintf(tmp, sizeof(tmp), "Account: %s", user); account_HTMLSafe = htmlenc(tmp); } // TODO: Write a real function for building query strings // Don't use ?: GNUism // Preserve other query parameters like `dir` str_t *query_encoded = !query ? NULL : QSEscape(query, strlen(query), true); FREE(&query); str_t *firstpage_HTMLSafe = NULL; str_t *prevpage_HTMLSafe = NULL; str_t *nextpage_HTMLSafe = NULL; str_t *lastpage_HTMLSafe = NULL; snprintf(tmp, sizeof(tmp), "?q=%s&start=-", query_encoded ?: ""); firstpage_HTMLSafe = htmlenc(tmp); str_t *p = !count ? NULL : URIs[outdir > 0 ? 0 : count-1]; str_t *n = !count ? NULL : URIs[outdir > 0 ? count-1 : 0]; if(p) p = QSEscape(p, strlen(p), 1); if(n) n = QSEscape(n, strlen(n), 1); snprintf(tmp, sizeof(tmp), "?q=%s&start=%s", query_encoded ?: "", p ?: ""); prevpage_HTMLSafe = htmlenc(tmp); snprintf(tmp, sizeof(tmp), "?q=%s&start=-%s", query_encoded ?: "", n ?: ""); nextpage_HTMLSafe = htmlenc(tmp); snprintf(tmp, sizeof(tmp), "?q=%s", query_encoded ?: ""); lastpage_HTMLSafe = htmlenc(tmp); FREE(&query_encoded); FREE(&p); FREE(&n); str_t *qs_HTMLSafe = htmlenc(qs); TemplateStaticArg const args[] = { {"reponame", reponame_HTMLSafe}, {"querytime", querytime_HTMLSafe}, {"account", account_HTMLSafe}, {"query", query_HTMLSafe}, {"parsed", parsed_HTMLSafe}, {"firstpage", firstpage_HTMLSafe}, {"prevpage", prevpage_HTMLSafe}, {"nextpage", nextpage_HTMLSafe}, {"lastpage", lastpage_HTMLSafe}, {"qs", qs_HTMLSafe}, {NULL, NULL}, }; HTTPConnectionWriteResponse(conn, 200, "OK"); HTTPConnectionWriteHeader(conn, "Content-Type", "text/html; charset=utf-8"); HTTPConnectionWriteHeader(conn, "Transfer-Encoding", "chunked"); if(0 == SLNSessionGetUserID(session)) { HTTPConnectionWriteHeader(conn, "Cache-Control", "no-cache, public"); } else { HTTPConnectionWriteHeader(conn, "Cache-Control", "no-cache, private"); } HTTPConnectionBeginBody(conn); TemplateWriteHTTPChunk(blog->header, &TemplateStaticCBs, args, conn); if(primaryURI) { SLNFileInfo info[1]; rc = SLNSessionGetFileInfo(session, primaryURI, info); if(rc >= 0) { str_t *preferredURI = SLNFormatURI(SLN_INTERNAL_ALGO, info->hash); str_t *previewPath = BlogCopyPreviewPath(blog, info->hash); send_preview(blog, conn, session, preferredURI, previewPath); FREE(&preferredURI); FREE(&previewPath); SLNFileInfoCleanup(info); } else if(DB_NOTFOUND == rc) { TemplateWriteHTTPChunk(blog->notfound, &TemplateStaticCBs, args, conn); } if(count || has_start) { TemplateWriteHTTPChunk(blog->backlinks, &TemplateStaticCBs, args, conn); } } if(0 == count && (!primaryURI || has_start)) { TemplateWriteHTTPChunk(blog->noresults, &TemplateStaticCBs, args, conn); } for(size_t i = 0; i < count; i++) { str_t algo[SLN_ALGO_SIZE]; // SLN_INTERNAL_ALGO str_t hash[SLN_HASH_SIZE]; SLNParseURI(URIs[i], algo, hash); str_t *previewPath = BlogCopyPreviewPath(blog, hash); rc = send_preview(blog, conn, session, URIs[i], previewPath); FREE(&previewPath); if(rc < 0) break; } FREE(&primaryURI); TemplateWriteHTTPChunk(blog->footer, &TemplateStaticCBs, args, conn); FREE(&reponame_HTMLSafe); FREE(&querytime_HTMLSafe); FREE(&account_HTMLSafe); FREE(&query_HTMLSafe); FREE(&parsed_HTMLSafe); FREE(&firstpage_HTMLSafe); FREE(&prevpage_HTMLSafe); FREE(&nextpage_HTMLSafe); FREE(&lastpage_HTMLSafe); FREE(&qs_HTMLSafe); HTTPConnectionWriteChunkEnd(conn); HTTPConnectionEnd(conn); for(size_t i = 0; i < count; i++) FREE(&URIs[i]); assert_zeroed(URIs, count); return 0; }
int SLNSubmissionEnd(SLNSubmissionRef const sub) { if(!sub) return 0; if(sub->size <= 0) return UV_EINVAL; assert(sub->tmppath); assert(sub->tmpfile >= 0); sub->URIs = SLNHasherEnd(sub->hasher); sub->internalHash = strdup(SLNHasherGetInternalHash(sub->hasher)); SLNHasherFree(&sub->hasher); if(!sub->URIs || !sub->internalHash) return UV_ENOMEM; SLNRepoRef const repo = SLNSubmissionGetRepo(sub); str_t *internalPath = NULL; bool worker = false; int rc = 0; rc = verify(sub); if(rc < 0) goto cleanup; internalPath = SLNRepoCopyInternalPath(repo, sub->internalHash); if(!internalPath) rc = UV_ENOMEM; if(rc < 0) goto cleanup; async_pool_enter(NULL); worker = true; rc = async_fs_fdatasync(sub->tmpfile); if(rc < 0) goto cleanup; // We use link(2) rather than rename(2) because link gives an error // if there's a name collision, rather than overwriting. We want to // keep the oldest file for any given hash, rather than the newest. rc = async_fs_link_mkdirp(sub->tmppath, internalPath); if(UV_EEXIST == rc) { rc = 0; goto cleanup; } if(rc < 0) { alogf("SLNSubmission couldn't move '%s' to '%s' (%s)\n", sub->tmppath, internalPath, sln_strerror(rc)); goto cleanup; } rc = async_fs_sync_dirname(internalPath); cleanup: if(worker) { async_pool_leave(NULL); worker = false; } FREE(&internalPath); async_fs_unlink(sub->tmppath); FREE(&sub->tmppath); return rc; }