static void * swthread(void *aux) { plugins_init2(); hts_mutex_lock(&gconf.state_mutex); gconf.state_plugins_loaded = 1; hts_cond_broadcast(&gconf.state_cond); hts_mutex_unlock(&gconf.state_mutex); upgrade_init(); usage_start(); if(!gconf.disable_upgrades) { for(int i = 0; i < 10; i++) { if(!plugins_upgrade_check()) break; TRACE(TRACE_DEBUG, "plugins", "Failed to update repo, retrying in %d seconds", i + 1); sleep(i + i); } for(int i = 0; i < 10; i++) { if(!upgrade_refresh()) break; sleep(i + 1); TRACE(TRACE_DEBUG, "upgrade", "Failed to check for app upgrade, retrying in %d seconds", i + 1); } } load_site_news(); hts_mutex_lock(&gconf.state_mutex); gconf.swrefresh = 0; while(!gconf.disable_upgrades) { int timeout = 0; while(gconf.swrefresh == 0) { timeout = hts_cond_wait_timeout(&gconf.state_cond, &gconf.state_mutex, 12 * 3600 * 1000); if(timeout) break; } gconf.swrefresh = 0; hts_mutex_unlock(&gconf.state_mutex); if(!timeout) plugins_upgrade_check(); upgrade_refresh(); load_site_news(); hts_mutex_lock(&gconf.state_mutex); } hts_mutex_unlock(&gconf.state_mutex); return NULL; }
static void * timer_thread(void *aux) { int destroy = 0; es_timer_t *et; hts_mutex_lock(&timer_mutex); while(1) { et = LIST_FIRST(&timers); if(et == NULL) break; int64_t now = arch_get_ts(); int64_t delta = et->et_expire - now; if(delta > 0) { int ms = (delta + 999) / 1000; hts_cond_wait_timeout(&timer_cond, &timer_mutex, ms); continue; } LIST_REMOVE(et, et_link); if(et->et_interval) { et->et_expire = now + et->et_interval * 1000LL; LIST_INSERT_SORTED(&timers, et, et_link, estimercmp, es_timer_t); } else { et->et_expire = 0; destroy = 1; } es_resource_retain(&et->super); hts_mutex_unlock(&timer_mutex); es_context_t *ec = et->super.er_ctx; es_context_begin(ec); duk_context *ctx = ec->ec_duk; es_push_root(ctx, et); int rc = duk_pcall(ctx, 0); if(rc) es_dump_err(ctx); duk_pop(ctx); es_resource_release(&et->super); if(destroy) es_resource_destroy(&et->super); es_context_end(ec, 0); hts_mutex_lock(&timer_mutex); } thread_running = 0; hts_mutex_unlock(&timer_mutex); return NULL; }
/** * Return 0 if ownership of 'data' has been transfered from caller */ static void submit_au(vdec_decoder_t *vdd, struct vdec_au *au, void *data, size_t len, int drop_non_ref, video_decoder_t *vd) { vdec_pic_t *vp; if(data != NULL && vdd->filter_aud) len = filter_aud(data, len); au->packet_addr = (intptr_t)data; au->packet_size = len; hts_mutex_lock(&vdd->mtx); vdd->submitted_au = 1; int r = vdec_decode_au(vdd->handle, drop_non_ref ? VDEC_DECODER_MODE_SKIP_NON_REF : VDEC_DECODER_MODE_NORMAL, au); if(r == 0) { while(vdd->submitted_au) { if(hts_cond_wait_timeout(&vdd->audone, &vdd->mtx, 5000)) { panic("Cell video decoder lockup"); } } } if(data == NULL) { // When we want to flush out all frames from the decoder // we just wait for them by sleeping. Lame but kinda works hts_mutex_unlock(&vdd->mtx); usleep(100000); hts_mutex_lock(&vdd->mtx); } while((vp = LIST_FIRST(&vdd->pictures)) != NULL) { // data == NULL means that we should do a complete flush if(vdd->flush_to < vp->order && data != NULL) break; LIST_REMOVE(vp, link); vdd->num_pictures--; hts_mutex_unlock(&vdd->mtx); emit_frame(vd, vp); hts_mutex_lock(&vdd->mtx); free(vp); } while(vdd->num_pictures > 16) { vp = LIST_FIRST(&vdd->pictures); assert(vp != NULL); release_picture(vp); vdd->num_pictures--; } hts_mutex_unlock(&vdd->mtx); }
void omx_wait_command(omx_component_t *oc) { hts_mutex_lock(oc->oc_mtx); while(!oc->oc_cmd_done) { if(hts_cond_wait_timeout(&oc->oc_event_cond, oc->oc_mtx, 250)) { TRACE(TRACE_ERROR, "OMX", "OMX timeout"); break; } } hts_mutex_unlock(oc->oc_mtx); }
OMX_BUFFERHEADERTYPE * omx_get_buffer_locked(omx_component_t *oc) { OMX_BUFFERHEADERTYPE *buf; while((buf = oc->oc_avail) == NULL) { if(hts_cond_wait_timeout(oc->oc_avail_cond, oc->oc_mtx, 3000)) { TRACE(TRACE_ERROR, "OMX", "Timeout while waiting for buffer"); return NULL; } } oc->oc_avail = buf->pAppPrivate; oc->oc_avail_bytes -= buf->nAllocLen; oc->oc_inflight_buffers++; return buf; }
int hts_cond_wait_timeout_abs(hts_cond_t *c, hts_mutex_t *m, int64_t deadline) { #if defined(__APPLE__) int64_t ts = deadline - arch_get_ts(); if(ts <= 0) return 1; return hts_cond_wait_timeout(c, m, ts / 1000); #else struct timespec ts; ts.tv_sec = deadline / 1000000LL; ts.tv_nsec = (deadline % 1000000LL) * 1000; return pthread_cond_timedwait(c, m, &ts) == ETIMEDOUT; #endif }
static void * omx_clk_thread(void *aux) { omx_clk_cmd_t *cmd; omx_clk_t *clk = aux; int run = 1; hts_mutex_lock(&clk->mp->mp_mutex); while(run) { while((cmd = TAILQ_FIRST(&clk->q)) == NULL) { #if 0 if(hts_cond_wait_timeout(&clk->cond, &clk->mp->mp_mutex, 1000)) { hts_mutex_unlock(&clk->mp->mp_mutex); int64_t ts = omx_get_media_time(clk->c); hts_mutex_lock(&clk->mp->mp_mutex); } #else hts_cond_wait(&clk->cond, &clk->mp->mp_mutex); #endif continue; } TAILQ_REMOVE(&clk->q, cmd, link); hts_mutex_unlock(&clk->mp->mp_mutex); switch(cmd->cmd) { case OMX_CLK_QUIT: run = 0; break; case OMX_CLK_INIT: omx_clk_init(clk, cmd->arg); break; case OMX_CLK_PAUSE: omx_clk_set_speed(clk, 0); break; case OMX_CLK_PLAY: omx_clk_set_speed(clk, 1 << 16); break; case OMX_CLK_BEGIN_SEEK: omx_clk_begin_seek(clk); break; case OMX_CLK_SEEK_AUDIO_DONE: omx_clk_seek_done(clk); break; case OMX_CLK_SEEK_VIDEO_DONE: omx_clk_seek_done(clk); break; } free(cmd); hts_mutex_lock(&clk->mp->mp_mutex); } hts_mutex_unlock(&clk->mp->mp_mutex); return NULL; }