static void reader(SLNPullRef const pull) { HTTPConnectionRef conn = NULL; int rc; for(;;) { if(pull->stop) goto stop; str_t URI[URI_MAX]; async_mutex_lock(pull->connlock); rc = HTTPConnectionReadBodyLine(pull->conn, URI, sizeof(URI)); if(rc < 0) { for(;;) { if(pull->stop) break; if(reconnect(pull) >= 0) break; if(pull->stop) break; async_sleep(1000 * 5); } async_mutex_unlock(pull->connlock); continue; } if('#' == URI[0]) { // Comment line. async_mutex_unlock(pull->connlock); continue; } async_mutex_lock(pull->mutex); while(pull->count + 1 > QUEUE_SIZE) { async_cond_wait(pull->cond, pull->mutex); if(pull->stop) { async_mutex_unlock(pull->mutex); async_mutex_unlock(pull->connlock); goto stop; } } size_t pos = (pull->cur + pull->count) % QUEUE_SIZE; pull->count += 1; async_mutex_unlock(pull->mutex); async_mutex_unlock(pull->connlock); for(;;) { if(import(pull, URI, pos, &conn) >= 0) break; if(pull->stop) goto stop; async_sleep(1000 * 5); } } stop: HTTPConnectionFree(&conn); async_mutex_lock(pull->mutex); assertf(pull->stop, "Reader ended early"); assert(pull->tasks > 0); pull->tasks--; async_cond_broadcast(pull->cond); async_mutex_unlock(pull->mutex); }
/* Exec loop sequential state machine */ static void m_loop_state(struct fpi_ssm *ssm) { struct fp_img_dev *dev = ssm->priv; vfs301_dev_t *vdev = dev->priv; switch (ssm->cur_state) { case M_REQUEST_PRINT: vfs301_proto_request_fingerprint(dev->udev, vdev); fpi_ssm_next_state(ssm); break; case M_WAIT_PRINT: /* Wait fingerprint scanning */ async_sleep(200, ssm); break; case M_CHECK_PRINT: if (!vfs301_proto_peek_event(dev->udev, vdev)) fpi_ssm_jump_to_state(ssm, M_WAIT_PRINT); else fpi_ssm_next_state(ssm); break; case M_READ_PRINT_START: fpi_imgdev_report_finger_status(dev, TRUE); vfs301_proto_process_event_start(dev->udev, vdev); fpi_ssm_next_state(ssm); break; case M_READ_PRINT_WAIT: /* Wait fingerprint scanning */ async_sleep(200, ssm); break; case M_READ_PRINT_POLL: { int rv = vfs301_proto_process_event_poll(dev->udev, vdev); assert(rv != VFS301_FAILURE); if (rv == VFS301_ONGOING) fpi_ssm_jump_to_state(ssm, M_READ_PRINT_WAIT); else fpi_ssm_next_state(ssm); } break; case M_SUBMIT_PRINT: if (submit_image(ssm)) { fpi_ssm_mark_completed(ssm); /* NOTE: finger off is expected only after submitting image... */ fpi_imgdev_report_finger_status(dev, FALSE); } else { fpi_ssm_jump_to_state(ssm, M_REQUEST_PRINT); } break; } }
static int w_async_sleep(sip_msg_t* msg, char* sec, char* str2) { int s; async_param_t *ap; if(msg==NULL) return -1; if(async_workers<=0) { LM_ERR("no async mod timer workers (modparam missing?)\n"); return -1; } ap = (async_param_t*)sec; if(fixup_get_ivalue(msg, ap->pinterval, &s)!=0) { LM_ERR("no async sleep time value\n"); return -1; } if(ap->type==0) { if(ap->u.paction==NULL || ap->u.paction->next==NULL) { LM_ERR("cannot be executed as last action in a route block\n"); return -1; } if(async_sleep(msg, s, ap->u.paction->next)<0) return -1; /* force exit in config */ return 0; } return -1; }
static void db_thread(EFSSyncRef const sync) { int rc; struct queues *cur; for(;;) { if(sync->stop) break; async_mutex_lock(sync->mutex); // First we wait for anything to enter the queue. // Then we wait an additional LATENCY_MAX for // the queue to fill completely before processing. while(empty(sync)) { rc = async_cond_wait(sync->cond, sync->mutex); if(UV_ECANCELED == rc) { async_mutex_unlock(sync->mutex); return; // TODO } } uint64_t const future = uv_now(loop) + LATENCY_MAX; while(!filled(sync)) { rc = async_cond_timedwait(sync->cond, sync->mutex, future); if(UV_ETIMEDOUT == rc) break; if(UV_ECANCELED == rc) { async_mutex_unlock(sync->mutex); return; // TODO } } // Double buffering. cur = sync->cur; sync->cur = (&sync->queues[1] == sync->cur) ? &sync->queues[0] : &sync->queues[1]; async_mutex_unlock(sync->mutex); for(;;) { rc = db_work(sync, cur); if(DB_SUCCESS == rc) break; fprintf(stderr, "Sync database error %s\n", db_strerror(rc)); async_sleep(1000 * 5); } } // TODO: Thread joining }
static int w_async_route(struct sip_msg* msg, char* rt, char* sec) { cfg_action_t *act; int s; str rn; int ri; if(msg==NULL) return -1; if(async_workers<=0) { LM_ERR("no async mod timer workers\n"); return -1; } if(fixup_get_svalue(msg, (gparam_t*)rt, &rn)!=0) { LM_ERR("no async route block name\n"); return -1; } if(fixup_get_ivalue(msg, (gparam_t*)sec, &s)!=0) { LM_ERR("no async interval value\n"); return -1; } ri = route_get(&main_rt, rn.s); if(ri<0) { LM_ERR("unable to find route block [%.*s]\n", rn.len, rn.s); return -1; } act = main_rt.rlist[ri]; if(act==NULL) { LM_ERR("empty action lists in route block [%.*s]\n", rn.len, rn.s); return -1; } if(async_sleep(msg, s, act)<0) return -1; /* force exit in config */ return 0; }
static int tls_poll(uv_stream_t *const stream, int const event) { int rc; if(TLS_READ_AGAIN == event) { uv_buf_t buf; rc = async_read(stream, 0, &buf); if(UV_ENOBUFS == rc) rc = 0; if(rc < 0) alogf("tls_poll read %s\n", uv_strerror(rc)); rc = 0; } else if(TLS_WRITE_AGAIN == event) { // TODO: libuv provides NO WAY to wait until a stream is // writable! Even our zero-length write hack doesn't work. // uv_poll can't be used on uv's own stream fds. rc = async_sleep(50); // uv_buf_t buf = uv_buf_init(NULL, 0); // rc = async_write(stream, &buf, 1); if(rc < 0) alogf("tls_poll write %s\n", uv_strerror(rc)); rc = 0; } else { rc = -errno; // TODO: Might have problems on Windows? if(rc >= 0) rc = UV_EOF; // Most common case, is this guaranteed? } return rc; }
int ki_async_route(sip_msg_t *msg, str *rn, int s) { cfg_action_t *act; int ri; ri = route_get(&main_rt, rn->s); if(ri<0) { LM_ERR("unable to find route block [%.*s]\n", rn->len, rn->s); return -1; } act = main_rt.rlist[ri]; if(act==NULL) { LM_ERR("empty action lists in route block [%.*s]\n", rn->len, rn->s); return -1; } if(async_sleep(msg, s, act)<0) return -1; /* force exit in config */ return 0; }
static void writer(SLNPullRef const pull) { SLNSubmissionRef queue[QUEUE_SIZE]; size_t count = 0; size_t skipped = 0; double time = uv_now(async_loop) / 1000.0; for(;;) { if(pull->stop) goto stop; async_mutex_lock(pull->mutex); while(0 == count || (count < QUEUE_SIZE && pull->count > 0)) { size_t const pos = pull->cur; while(!pull->filled[pos]) { async_cond_wait(pull->cond, pull->mutex); if(pull->stop) { async_mutex_unlock(pull->mutex); goto stop; } if(!count) time = uv_now(async_loop) / 1000.0; } assert(pull->filled[pos]); // Skip any bubbles in the queue. if(pull->queue[pos]) queue[count++] = pull->queue[pos]; else skipped++; pull->queue[pos] = NULL; pull->filled[pos] = false; pull->cur = (pull->cur + 1) % QUEUE_SIZE; pull->count--; async_cond_broadcast(pull->cond); } async_mutex_unlock(pull->mutex); assert(count <= QUEUE_SIZE); for(;;) { int rc = SLNSubmissionStoreBatch(queue, count); if(rc >= 0) break; alogf("Submission error: %s (%d)\n", sln_strerror(rc), rc); async_sleep(1000 * 5); } for(size_t i = 0; i < count; ++i) { SLNSubmissionFree(&queue[i]); } double const now = uv_now(async_loop) / 1000.0; alogf("Pulled %f files per second\n", count / (now - time)); time = now; count = 0; skipped = 0; } stop: for(size_t i = 0; i < count; ++i) { SLNSubmissionFree(&queue[i]); } assert_zeroed(queue, count); async_mutex_lock(pull->mutex); assertf(pull->stop, "Writer ended early"); assert(pull->tasks > 0); pull->tasks--; async_cond_broadcast(pull->cond); async_mutex_unlock(pull->mutex); }