int EFSSyncMetaFileAvailable(EFSSyncRef const sync, strarg_t const URI, strarg_t const target) { if(!sync) return 0; if(!URI) return UV_EINVAL; if(!target) return UV_EINVAL; str_t *URICopy = strdup(URI); str_t *targetCopy = strdup(target); if(!URICopy || !targetCopy) { FREE(&URICopy); FREE(&targetCopy); return UV_ENOMEM; } async_mutex_lock(sync->mutex); while(BATCH_MAX == sync->cur->metaFileURICount) { rc = async_cond_wait(sync->cond, sync->mutex); // TODO } sync->cur->metaFileURIs[sync->cur->metaFileURICount] = URICopy; sync->cur->metaFileTargets[sync->cur->metaFileURICount] = targetCopy; sync->cur->metaFileURICount++; async_cond_broadcast(sync->cond); async_mutex_unlock(sync->mutex); return 0; }
static void reader(SLNPullRef const pull) { HTTPConnectionRef conn = NULL; int rc; for(;;) { if(pull->stop) goto stop; str_t URI[URI_MAX]; async_mutex_lock(pull->connlock); rc = HTTPConnectionReadBodyLine(pull->conn, URI, sizeof(URI)); if(rc < 0) { for(;;) { if(pull->stop) break; if(reconnect(pull) >= 0) break; if(pull->stop) break; async_sleep(1000 * 5); } async_mutex_unlock(pull->connlock); continue; } if('#' == URI[0]) { // Comment line. async_mutex_unlock(pull->connlock); continue; } async_mutex_lock(pull->mutex); while(pull->count + 1 > QUEUE_SIZE) { async_cond_wait(pull->cond, pull->mutex); if(pull->stop) { async_mutex_unlock(pull->mutex); async_mutex_unlock(pull->connlock); goto stop; } } size_t pos = (pull->cur + pull->count) % QUEUE_SIZE; pull->count += 1; async_mutex_unlock(pull->mutex); async_mutex_unlock(pull->connlock); for(;;) { if(import(pull, URI, pos, &conn) >= 0) break; if(pull->stop) goto stop; async_sleep(1000 * 5); } } stop: HTTPConnectionFree(&conn); async_mutex_lock(pull->mutex); assertf(pull->stop, "Reader ended early"); assert(pull->tasks > 0); pull->tasks--; async_cond_broadcast(pull->cond); async_mutex_unlock(pull->mutex); }
int EFSSyncFileAvailable(EFSSyncRef const sync, strarg_t const URI) { if(!sync) return 0; if(!URI) return UV_EINVAL; str_t *URICopy = strdup(URI); if(!URICopy) return UV_ENOMEM; async_mutex_lock(sync->mutex); while(BATCH_MAX == sync->cur->fileURICount) { rc = async_cond_wait(sync->cond, sync->mutex); // TODO } sync->cur->fileURIs[sync->cur->fileURICount++] = URICopy; async_cond_broadcast(sync->cond); async_mutex_unlock(sync->mutex); return 0; }
void SLNPullStop(SLNPullRef const pull) { if(!pull) return; if(pull->stop) return; async_mutex_lock(pull->mutex); pull->stop = true; async_cond_broadcast(pull->cond); while(pull->tasks > 0) { async_cond_wait(pull->cond, pull->mutex); } async_mutex_unlock(pull->mutex); HTTPConnectionFree(&pull->conn); for(size_t i = 0; i < QUEUE_SIZE; ++i) { SLNSubmissionFree(&pull->queue[i]); pull->filled[i] = false; } pull->cur = 0; pull->count = 0; }
static void gen_preview(BlogRef const blog, SLNSessionRef const session, strarg_t const URI, strarg_t const path) { // It's okay to accidentally regenerate a preview // It's okay to send an error if another thread tried to gen and failed // We want to minimize false positives and false negatives // In particular, if a million connections request a new file at once, // we want to avoid starting gen for each connection before any of them // have finished // Capping the total number of concurrent gens to PENDING_MAX is not // a bad side effect bool beat_us_to_it = false; size_t slot = SIZE_MAX; async_mutex_lock(blog->pending_mutex); for(;; async_cond_wait(blog->pending_cond, blog->pending_mutex)) { if(gen_pending(blog, path)) { beat_us_to_it = true; continue; } if(beat_us_to_it) break; if(gen_available(blog, path, &slot)) break; } async_mutex_unlock(blog->pending_mutex); if(beat_us_to_it) return; // Note: we don't know their return status. assert(slot < PENDING_MAX); SLNFileInfo src[1]; int rc = SLNSessionGetFileInfo(session, URI, src); if(rc >= 0) { rc = -1; rc = rc >= 0 ? rc : BlogConvert(blog, session, path, NULL, URI, src); rc = rc >= 0 ? rc : BlogGeneric(blog, session, path, URI, src); SLNFileInfoCleanup(src); } async_mutex_lock(blog->pending_mutex); assert(path == blog->pending[slot]); blog->pending[slot] = NULL; async_cond_broadcast(blog->pending_cond); async_mutex_unlock(blog->pending_mutex); }
static int import(SLNPullRef const pull, strarg_t const URI, size_t const pos, HTTPConnectionRef *const conn) { if(!pull) return 0; // TODO: Even if there's nothing to do, we have to enqueue something to fill up our reserved slots. I guess it's better than doing a lot of work inside the connection lock, but there's got to be a better way. SLNSubmissionRef sub = NULL; HTTPHeadersRef headers = NULL; if(!URI) goto enqueue; str_t algo[SLN_ALGO_SIZE]; str_t hash[SLN_HASH_SIZE]; if(SLNParseURI(URI, algo, hash) < 0) goto enqueue; int rc = SLNSessionGetFileInfo(pull->session, URI, NULL); if(rc >= 0) goto enqueue; db_assertf(DB_NOTFOUND == rc, "Database error: %s", sln_strerror(rc)); // TODO: We're logging out of order when we do it like this... // alogf("Pulling %s\n", URI); if(!*conn) { rc = HTTPConnectionCreateOutgoing(pull->host, 0, conn); if(rc < 0) { alogf("Pull import connection error: %s\n", sln_strerror(rc)); goto fail; } } str_t *path = aasprintf("/sln/file/%s/%s", algo, hash); if(!path) { alogf("Pull aasprintf error\n"); goto fail; } rc = HTTPConnectionWriteRequest(*conn, HTTP_GET, path, pull->host); assert(rc >= 0); // TODO FREE(&path); HTTPConnectionWriteHeader(*conn, "Cookie", pull->cookie); HTTPConnectionBeginBody(*conn); rc = HTTPConnectionEnd(*conn); if(rc < 0) { alogf("Pull import request error: %s\n", sln_strerror(rc)); goto fail; } int const status = HTTPConnectionReadResponseStatus(*conn); if(status < 0) { alogf("Pull import response error: %s\n", sln_strerror(status)); goto fail; } if(status < 200 || status >= 300) { alogf("Pull import status error: %d\n", status); goto fail; } rc = HTTPHeadersCreateFromConnection(*conn, &headers); assert(rc >= 0); // TODO /* if(rc < 0) { alogf("Pull import headers error %s\n", sln_strerror(rc)); goto fail; }*/ strarg_t const type = HTTPHeadersGet(headers, "content-type"); rc = SLNSubmissionCreate(pull->session, URI, &sub); if(rc < 0) { alogf("Pull submission error: %s\n", sln_strerror(rc)); goto fail; } rc = SLNSubmissionSetType(sub, type); if(rc < 0) { alogf("Pull submission type error: %s\n", sln_strerror(rc)); goto fail; } for(;;) { if(pull->stop) goto fail; uv_buf_t buf[1] = {}; rc = HTTPConnectionReadBody(*conn, buf); if(rc < 0) { alogf("Pull download error: %s\n", sln_strerror(rc)); goto fail; } if(0 == buf->len) break; rc = SLNSubmissionWrite(sub, (byte_t *)buf->base, buf->len); if(rc < 0) { alogf("Pull write error\n"); goto fail; } } rc = SLNSubmissionEnd(sub); if(rc < 0) { alogf("Pull submission error: %s\n", sln_strerror(rc)); goto fail; } enqueue: HTTPHeadersFree(&headers); async_mutex_lock(pull->mutex); pull->queue[pos] = sub; sub = NULL; pull->filled[pos] = true; async_cond_broadcast(pull->cond); async_mutex_unlock(pull->mutex); return 0; fail: HTTPHeadersFree(&headers); SLNSubmissionFree(&sub); HTTPConnectionFree(conn); return -1; }
static void writer(SLNPullRef const pull) { SLNSubmissionRef queue[QUEUE_SIZE]; size_t count = 0; size_t skipped = 0; double time = uv_now(async_loop) / 1000.0; for(;;) { if(pull->stop) goto stop; async_mutex_lock(pull->mutex); while(0 == count || (count < QUEUE_SIZE && pull->count > 0)) { size_t const pos = pull->cur; while(!pull->filled[pos]) { async_cond_wait(pull->cond, pull->mutex); if(pull->stop) { async_mutex_unlock(pull->mutex); goto stop; } if(!count) time = uv_now(async_loop) / 1000.0; } assert(pull->filled[pos]); // Skip any bubbles in the queue. if(pull->queue[pos]) queue[count++] = pull->queue[pos]; else skipped++; pull->queue[pos] = NULL; pull->filled[pos] = false; pull->cur = (pull->cur + 1) % QUEUE_SIZE; pull->count--; async_cond_broadcast(pull->cond); } async_mutex_unlock(pull->mutex); assert(count <= QUEUE_SIZE); for(;;) { int rc = SLNSubmissionStoreBatch(queue, count); if(rc >= 0) break; alogf("Submission error: %s (%d)\n", sln_strerror(rc), rc); async_sleep(1000 * 5); } for(size_t i = 0; i < count; ++i) { SLNSubmissionFree(&queue[i]); } double const now = uv_now(async_loop) / 1000.0; alogf("Pulled %f files per second\n", count / (now - time)); time = now; count = 0; skipped = 0; } stop: for(size_t i = 0; i < count; ++i) { SLNSubmissionFree(&queue[i]); } assert_zeroed(queue, count); async_mutex_lock(pull->mutex); assertf(pull->stop, "Writer ended early"); assert(pull->tasks > 0); pull->tasks--; async_cond_broadcast(pull->cond); async_mutex_unlock(pull->mutex); }