int EFSSyncMetaFileAvailable(EFSSyncRef const sync, strarg_t const URI, strarg_t const target) { if(!sync) return 0; if(!URI) return UV_EINVAL; if(!target) return UV_EINVAL; str_t *URICopy = strdup(URI); str_t *targetCopy = strdup(target); if(!URICopy || !targetCopy) { FREE(&URICopy); FREE(&targetCopy); return UV_ENOMEM; } async_mutex_lock(sync->mutex); while(BATCH_MAX == sync->cur->metaFileURICount) { rc = async_cond_wait(sync->cond, sync->mutex); // TODO } sync->cur->metaFileURIs[sync->cur->metaFileURICount] = URICopy; sync->cur->metaFileTargets[sync->cur->metaFileURICount] = targetCopy; sync->cur->metaFileURICount++; async_cond_broadcast(sync->cond); async_mutex_unlock(sync->mutex); return 0; }
static void reader(SLNPullRef const pull) { HTTPConnectionRef conn = NULL; int rc; for(;;) { if(pull->stop) goto stop; str_t URI[URI_MAX]; async_mutex_lock(pull->connlock); rc = HTTPConnectionReadBodyLine(pull->conn, URI, sizeof(URI)); if(rc < 0) { for(;;) { if(pull->stop) break; if(reconnect(pull) >= 0) break; if(pull->stop) break; async_sleep(1000 * 5); } async_mutex_unlock(pull->connlock); continue; } if('#' == URI[0]) { // Comment line. async_mutex_unlock(pull->connlock); continue; } async_mutex_lock(pull->mutex); while(pull->count + 1 > QUEUE_SIZE) { async_cond_wait(pull->cond, pull->mutex); if(pull->stop) { async_mutex_unlock(pull->mutex); async_mutex_unlock(pull->connlock); goto stop; } } size_t pos = (pull->cur + pull->count) % QUEUE_SIZE; pull->count += 1; async_mutex_unlock(pull->mutex); async_mutex_unlock(pull->connlock); for(;;) { if(import(pull, URI, pos, &conn) >= 0) break; if(pull->stop) goto stop; async_sleep(1000 * 5); } } stop: HTTPConnectionFree(&conn); async_mutex_lock(pull->mutex); assertf(pull->stop, "Reader ended early"); assert(pull->tasks > 0); pull->tasks--; async_cond_broadcast(pull->cond); async_mutex_unlock(pull->mutex); }
static void db_thread(EFSSyncRef const sync) { int rc; struct queues *cur; for(;;) { if(sync->stop) break; async_mutex_lock(sync->mutex); // First we wait for anything to enter the queue. // Then we wait an additional LATENCY_MAX for // the queue to fill completely before processing. while(empty(sync)) { rc = async_cond_wait(sync->cond, sync->mutex); if(UV_ECANCELED == rc) { async_mutex_unlock(sync->mutex); return; // TODO } } uint64_t const future = uv_now(loop) + LATENCY_MAX; while(!filled(sync)) { rc = async_cond_timedwait(sync->cond, sync->mutex, future); if(UV_ETIMEDOUT == rc) break; if(UV_ECANCELED == rc) { async_mutex_unlock(sync->mutex); return; // TODO } } // Double buffering. cur = sync->cur; sync->cur = (&sync->queues[1] == sync->cur) ? &sync->queues[0] : &sync->queues[1]; async_mutex_unlock(sync->mutex); for(;;) { rc = db_work(sync, cur); if(DB_SUCCESS == rc) break; fprintf(stderr, "Sync database error %s\n", db_strerror(rc)); async_sleep(1000 * 5); } } // TODO: Thread joining }
int EFSSyncFileAvailable(EFSSyncRef const sync, strarg_t const URI) { if(!sync) return 0; if(!URI) return UV_EINVAL; str_t *URICopy = strdup(URI); if(!URICopy) return UV_ENOMEM; async_mutex_lock(sync->mutex); while(BATCH_MAX == sync->cur->fileURICount) { rc = async_cond_wait(sync->cond, sync->mutex); // TODO } sync->cur->fileURIs[sync->cur->fileURICount++] = URICopy; async_cond_broadcast(sync->cond); async_mutex_unlock(sync->mutex); return 0; }
void SLNPullStop(SLNPullRef const pull) { if(!pull) return; if(pull->stop) return; async_mutex_lock(pull->mutex); pull->stop = true; async_cond_broadcast(pull->cond); while(pull->tasks > 0) { async_cond_wait(pull->cond, pull->mutex); } async_mutex_unlock(pull->mutex); HTTPConnectionFree(&pull->conn); for(size_t i = 0; i < QUEUE_SIZE; ++i) { SLNSubmissionFree(&pull->queue[i]); pull->filled[i] = false; } pull->cur = 0; pull->count = 0; }
static void gen_preview(BlogRef const blog, SLNSessionRef const session, strarg_t const URI, strarg_t const path) { // It's okay to accidentally regenerate a preview // It's okay to send an error if another thread tried to gen and failed // We want to minimize false positives and false negatives // In particular, if a million connections request a new file at once, // we want to avoid starting gen for each connection before any of them // have finished // Capping the total number of concurrent gens to PENDING_MAX is not // a bad side effect bool beat_us_to_it = false; size_t slot = SIZE_MAX; async_mutex_lock(blog->pending_mutex); for(;; async_cond_wait(blog->pending_cond, blog->pending_mutex)) { if(gen_pending(blog, path)) { beat_us_to_it = true; continue; } if(beat_us_to_it) break; if(gen_available(blog, path, &slot)) break; } async_mutex_unlock(blog->pending_mutex); if(beat_us_to_it) return; // Note: we don't know their return status. assert(slot < PENDING_MAX); SLNFileInfo src[1]; int rc = SLNSessionGetFileInfo(session, URI, src); if(rc >= 0) { rc = -1; rc = rc >= 0 ? rc : BlogConvert(blog, session, path, NULL, URI, src); rc = rc >= 0 ? rc : BlogGeneric(blog, session, path, URI, src); SLNFileInfoCleanup(src); } async_mutex_lock(blog->pending_mutex); assert(path == blog->pending[slot]); blog->pending[slot] = NULL; async_cond_broadcast(blog->pending_cond); async_mutex_unlock(blog->pending_mutex); }
static void writer(SLNPullRef const pull) { SLNSubmissionRef queue[QUEUE_SIZE]; size_t count = 0; size_t skipped = 0; double time = uv_now(async_loop) / 1000.0; for(;;) { if(pull->stop) goto stop; async_mutex_lock(pull->mutex); while(0 == count || (count < QUEUE_SIZE && pull->count > 0)) { size_t const pos = pull->cur; while(!pull->filled[pos]) { async_cond_wait(pull->cond, pull->mutex); if(pull->stop) { async_mutex_unlock(pull->mutex); goto stop; } if(!count) time = uv_now(async_loop) / 1000.0; } assert(pull->filled[pos]); // Skip any bubbles in the queue. if(pull->queue[pos]) queue[count++] = pull->queue[pos]; else skipped++; pull->queue[pos] = NULL; pull->filled[pos] = false; pull->cur = (pull->cur + 1) % QUEUE_SIZE; pull->count--; async_cond_broadcast(pull->cond); } async_mutex_unlock(pull->mutex); assert(count <= QUEUE_SIZE); for(;;) { int rc = SLNSubmissionStoreBatch(queue, count); if(rc >= 0) break; alogf("Submission error: %s (%d)\n", sln_strerror(rc), rc); async_sleep(1000 * 5); } for(size_t i = 0; i < count; ++i) { SLNSubmissionFree(&queue[i]); } double const now = uv_now(async_loop) / 1000.0; alogf("Pulled %f files per second\n", count / (now - time)); time = now; count = 0; skipped = 0; } stop: for(size_t i = 0; i < count; ++i) { SLNSubmissionFree(&queue[i]); } assert_zeroed(queue, count); async_mutex_lock(pull->mutex); assertf(pull->stop, "Writer ended early"); assert(pull->tasks > 0); pull->tasks--; async_cond_broadcast(pull->cond); async_mutex_unlock(pull->mutex); }