void __attribute__((optimize("O0"))) VPOutPluginDummy::resume() { if (ATOMIC_CAS(&work,true,true) && ATOMIC_CAS(&paused,false,false) ){ m_pause.unlock(); while (ATOMIC_CAS(&paused,false,false)) {} } }
void VPlayer::stop() { // only run on control mutex locked bool locked=control.try_lock(); if (ATOMIC_CAS(&active,true,true)) { if (vpdecode){ DBG("free decoder"); ATOMIC_CAS(&work,true,false); vpout->resume(); playWorker->join(); paused=true; delete playWorker; playWorker=NULL; delete vpdecode; vpdecode = NULL; } VPEvents::getSingleton()->fire("StateChangeStopped",¤tResource,sizeof(VPResource)); active = false; } if (locked) control.unlock(); }
void __attribute__((optimize("O0"))) VPOutPluginDummy::pause() { if (ATOMIC_CAS(&work,true,true) && !ATOMIC_CAS(&paused,false,false) ){ m_pause.lock(); ATOMIC_CAS(&pause_check,false,true); while (!ATOMIC_CAS(&paused,false,false)) {} } }
void __attribute__((optimize("O0"))) VPOutPluginAlsa::resume() { if (ATOMIC_CAS(&paused,false,false) ){ m_pause.unlock(); while (ATOMIC_CAS(&paused,false,false)) {} } }
void __attribute__((optimize("O0"))) VPOutPluginAlsa::rewind() { if (!ATOMIC_CAS(&paused,false,false) ){ m_pause.lock(); ATOMIC_CAS(&pause_check,false,true); while (!ATOMIC_CAS(&paused,false,false)) {} } }
VPOutPluginAlsa::~VPOutPluginAlsa() { ATOMIC_CAS(&work,true,false); // make sure decoders have properly ended then mutex[0] should locked and // mutex[1] unlocked from the decoder and mutex[1] locked by output thread // we unlock it here to avoid deadlock owner->mutex[1].unlock(); resume(); inotify_rm_watch( in_fd, in_wd[0] ); close(in_fd); if (worker){ worker->join(); DBG("out thread joined"); delete worker; } snd_pcm_close(handle); ALIGNED_FREE(out_buf); ALIGNED_FREE(out_buf_i); src_delete(rs); }
void OGGDecoder::reader() { float **pcm; long ret=1; int bit; size_t j,done=0; while (ATOMIC_CAS(&owner->work,true,true) && ret > 0 ){ j=0; ret=1; done=0; while (done<VPBUFFER_FRAMES && ret > 0){ ret = ov_read_float( &vf, &pcm, VPBUFFER_FRAMES - done,&bit ); for (long i=0;i<ret;i++){ for (size_t ch=0;ch<bout->chans;ch++){ buffer[j]=pcm[ch][i]; j++; } } done+=ret; } memcpy(bout->buffer[*bout->cursor], buffer, VPBUFFER_FRAMES*bout->chans*sizeof(float) ); owner->postProcess(bout->buffer[*bout->cursor]); owner->mutex[0].lock(); VP_SWAP_BUFFERS(bout); owner->mutex[1].unlock(); } }
static void *easy_mempool_alloc_from_page_(easy_mempool_page_t *page, int32_t page_size, int32_t alloc_size) { void *ret = NULL; if (NULL != page) { volatile int32_t oldv = 0; volatile int32_t newv = 0; volatile int32_t cmpv = 0; while (1) { oldv = page->base; newv = oldv + alloc_size; cmpv = oldv; if (newv > page_size) { break; } if (oldv == ATOMIC_CAS(&(page->base), cmpv, newv)) { ret = (char *)page + sizeof(easy_mempool_page_t) + oldv; break; } } } return ret; }
void rb_postponed_job_flush(rb_vm_t *vm) { rb_thread_t *th = GET_THREAD(); unsigned long saved_postponed_job_interrupt_mask = th->interrupt_mask & POSTPONED_JOB_INTERRUPT_MASK; VALUE saved_errno = th->errinfo; th->errinfo = Qnil; /* mask POSTPONED_JOB dispatch */ th->interrupt_mask |= POSTPONED_JOB_INTERRUPT_MASK; { TH_PUSH_TAG(th); EXEC_TAG(); { int index; while ((index = vm->postponed_job_index) > 0) { if (ATOMIC_CAS(vm->postponed_job_index, index, index-1) == index) { rb_postponed_job_t *pjob = &vm->postponed_job_buffer[index-1]; (*pjob->func)(pjob->data); } } } TH_POP_TAG(); } /* restore POSTPONED_JOB mask */ th->interrupt_mask &= ~(saved_postponed_job_interrupt_mask ^ POSTPONED_JOB_INTERRUPT_MASK); th->errinfo = saved_errno; }
taskID dag_get_task(CSOUND *csound) { int i; int morework = 0; int active = csound->dag_num_active; volatile enum state *task_status = csound->dag_task_status; //printf("**GetTask from %d\n", csound->dag_num_active); for (i=0; i<active; i++) { if (ATOMIC_CAS(&(task_status[i]), AVAILABLE, INPROGRESS)) { return (taskID)i; } //else if (ATOMIC_READ(task_status[i])==WAITING) // printf("**%d waiting\n", i); //else if (ATOMIC_RE\AD(task_status[i])==INPROGRESS) // print(f"**%d active\n", i); else if (ATOMIC_READ(task_status[i])==DONE) { //printf("**%d done\n", i); morework++; } } //dag_print_state(csound); if (morework==active) return (taskID)INVALID; //printf("taskstodo=%d)\n", morework); return (taskID)WAIT; }
static int32_t easy_mempool_dec_ref_cnt_and_inc_seq_num_(easy_mempool_page_meta_t *page_meta) { int32_t ret = -1; if (NULL != page_meta) { easy_mempool_atomic_t oldv = {0}; easy_mempool_atomic_t newv = {0}; easy_mempool_atomic_t cmpv = {0}; while (1) { oldv.atomic = page_meta->atomic; newv.atomic = oldv.atomic; cmpv.atomic = oldv.atomic; newv.ref_cnt -= 1; if (0 == newv.ref_cnt) { newv.seq_num += 1; } //assert(0 != oldv.ref_cnt); if (oldv.atomic == ATOMIC_CAS(&(page_meta->atomic), cmpv.atomic, newv.atomic)) { ret = newv.ref_cnt; break; } } } return ret; }
void FFMPEGDecoder::setPosition(uint64_t t) { if ( ATOMIC_CAS(&seek_to,SEEK_MAX,SEEK_MAX) == SEEK_MAX){ seek_to = t; } }
void OGGDecoder::reader() { float **pcm; long ret=1; int bit; size_t j,done=0; while (ATOMIC_CAS(&owner->work,true,true) && ret > 0 ){ j=0; ret=1; done=0; if (ATOMIC_CAS(&seek_to,SEEK_MAX,SEEK_MAX) != SEEK_MAX ){ if (ov_seekable(&vf)) ov_pcm_seek(&vf,(ogg_int64_t) seek_to); seek_to = SEEK_MAX; } while (done<VPBUFFER_FRAMES && ret > 0){ ret = ov_read_float( &vf, &pcm, VPBUFFER_FRAMES - done,&bit ); for (long i=0;i<ret;i++){ for (size_t ch=0;ch<bout->chans;ch++){ buffer[j]=(float)pcm[ch][i]; j++; } } done+=ret; } int samples= done*bout->chans; memcpy(bout->currentBuffer(), buffer,samples*sizeof(float) ); for (int i=samples;i<VPBUFFER_FRAMES*bout->chans;i++){ bout->currentBuffer()[i]=0.0f; } owner->postProcess(); owner->mutex[0].lock(); VP_SWAP_BUFFERS(bout); owner->mutex[1].unlock(); } }
bool dequeue(DataT* pdata) { while (true) { QueuePointerWrapper<DataT> head = this->head_; QueuePointerWrapper<DataT> tail = this->tail_; QueuePointerWrapper<DataT> next = head.real.ptr->next; QueuePointerWrapper<DataT> pointer; pointer.real.ptr = next.real.ptr; if (head.real == this->head_.real) { if (head.real.ptr == tail.real.ptr) // empty { if (next.real.ptr == NULL) return false; // advance tail pointer.real.seq = tail.real.seq + 1; ATOMIC_CAS(&this->tail_.w128bit, tail.w128bit, pointer.w128bit); } else { // Read before CAS // Otherwise, another dequeue might free the next node if (pdata != NULL) *pdata = next.real.ptr->value; pointer.real.seq = head.real.seq + 1; if (ATOMIC_CAS(&this->head_.w128bit, head.w128bit, pointer.w128bit)) { // delete safe assert(head.real.ptr != NULL); delete head.real.ptr; break; } } } } return true; }
static void easy_mempool_deref_page_(easy_mempool_t *pool, int32_t page_pos) { if (NULL != pool && pool->page_num > page_pos) { easy_mempool_page_t *tmp_page = pool->page_metas[page_pos].page; if (0 == easy_mempool_dec_ref_cnt_and_inc_seq_num_(&(pool->page_metas[page_pos]))) { if (tmp_page == ATOMIC_CAS(&(pool->page_metas[page_pos].page), tmp_page, NULL)) { easy_mempool_free_page_(pool, tmp_page); } } } }
void enqueue(const DataT &data) { QueueNode<DataT> *node = new QueueNode<DataT>(); // Allocate new node, TODO: from free list node->value = data; // copy data into node node->next.real.ptr = NULL; QueuePointerWrapper<DataT> pointer; pointer.real.ptr = node; QueuePointerWrapper<DataT> tail = this->tail_; // read ptr and seq together QueuePointerWrapper<DataT> next = tail.real.ptr->next; while (true) // keep trying unitl enqueue done { if (tail.real == this->tail_.real) // tail and next consistent ? { pointer.real.seq = next.real.seq + 1; // update seq if (next.real.ptr == NULL) // last one ? { // try to link node to last one if (ATOMIC_CAS(&(tail.real.ptr->next.w128bit), next.w128bit, pointer.w128bit)) break; // done, exit loop } else { // this->tail_ to next one QueuePointerWrapper<DataT> next_pointer; next_pointer.real.ptr = next.real.ptr; next_pointer.real.seq = tail.real.seq + 1; ATOMIC_CAS(&this->tail_.w128bit, tail.w128bit, next_pointer.w128bit); } } // update tail and next tail = this->tail_; next = tail.real.ptr->next; } // try to swing this->tail to the enqueued node; pointer.real.seq = tail.real.seq + 1; ATOMIC_CAS(&this->tail_.w128bit, tail.w128bit, pointer.w128bit); }
void RWSessionCtx::kill() { if (ST_ALIVE != ATOMIC_CAS(&stat_, ST_ALIVE, ST_KILLING)) { TBSYS_LOG(WARN, "session will not be killed sd=%u stat=%d session_start_time=%ld stmt_start_time=%ld session_timeout=%ld stmt_timeout=%ld", get_session_descriptor(), stat_, get_session_start_time(), get_stmt_start_time(), get_session_timeout(), get_stmt_timeout()); } else { TBSYS_LOG(INFO, "session is being killed sd=%u stat=%d session_start_time=%ld stmt_start_time=%ld session_timeout=%ld stmt_timeout=%ld", get_session_descriptor(), stat_, get_session_start_time(), get_stmt_start_time(), get_session_timeout(), get_stmt_timeout()); alive_flag_ = false; } }
void *rqueue_read(rqueue_t *rb) { int i; void *v = NULL; for (i = 0; i < RQUEUE_MAX_RETRIES; i++) { if (__builtin_expect(ATOMIC_CAS(rb->read_sync, 0, 1), 1)) { rqueue_page_t *head = ATOMIC_READ(rb->head); rqueue_page_t *commit = ATOMIC_READ(rb->commit); rqueue_page_t *tail = ATOMIC_READ(rb->tail); rqueue_page_t *next = ATOMIC_READ(head->next); rqueue_page_t *old_next = ATOMIC_READ(rb->reader->next); if (rb->reader == commit || (head == tail && commit != tail) || ATOMIC_READ(rb->writes) == 0) { // nothing to read ATOMIC_CAS(rb->read_sync, 1, 0); break; } if (ATOMIC_CAS(rb->reader->next, old_next, RQUEUE_FLAG_ON(next, RQUEUE_FLAG_HEAD))) { rb->reader->prev = head->prev; if (ATOMIC_CAS(head->prev->next, RQUEUE_FLAG_ON(head, RQUEUE_FLAG_HEAD), rb->reader)) { ATOMIC_CAS(rb->head, head, next); next->prev = rb->reader; rb->reader = head; /* rb->reader->next = next; rb->reader->prev = next->prev; */ v = ATOMIC_READ(rb->reader->value); ATOMIC_CAS(rb->reader->value, v, NULL); ATOMIC_INCREMENT(rb->reads); ATOMIC_CAS(rb->read_sync, 1, 0); break; } else { fprintf(stderr, "head swap failed\n"); } } else { fprintf(stderr, "reader->next swap failed\n"); } ATOMIC_CAS(rb->read_sync, 1, 0); } } return v; }
void dag_end_task(CSOUND *csound, taskID i) { watchList *to_notify, *next; int canQueue; int j, k; watchList * volatile *task_watch = csound->dag_task_watch; ATOMIC_WRITE(csound->dag_task_status[i], DONE); /* as DONE is zero */ { /* ATOMIC_SWAP */ do { to_notify = ATOMIC_READ(task_watch[i]); } while (!ATOMIC_CAS(&task_watch[i],to_notify,&DoNotRead)); } //to_notify = ATOMIC_SWAP(task_watch[i], &DoNotRead); //printf("Ending task %d\n", i); next = to_notify; while (to_notify) { /* walk the list of watchers */ next = to_notify->next; j = to_notify->id; //printf("%d notifying task %d it finished\n", i, j); canQueue = 1; for (k=0; k<j; k++) { /* seek next watch */ if (csound->dag_task_dep[j][k]==0) continue; //printf("investigating task %d (%d)\n", k, csound->dag_task_status[k]); if (ATOMIC_READ(csound->dag_task_status[k]) != DONE) { //printf("found task %d to watch %d status %d\n", // k, j, csound->dag_task_status[k]); if (moveWatch(csound, &task_watch[k], to_notify)) { //printf("task %d now watches %d\n", j, k); canQueue = 0; break; } else { /* assert csound->dag_task_status[j] == DONE and we are in race */ //printf("Racing status %d %d %d %d\n", // csound->dag_task_status[j], i, j, k); } } //else { printf("not %d\n", k); } } if (canQueue) { /* could use monitor here */ csound->dag_task_status[j] = AVAILABLE; } to_notify = next; } //dag_print_state(csound); return; }
void VPlayer::playWork(VPlayer *self) { self->vpout->wakeup(); while (1) { self->vpdecode->reader(); ATOMIC_CAS(&self->active,true,false); DBG("waiting for output writing to finish"); self->mutex[0].lock(); self->mutex[0].unlock(); DBG("done"); self->nextResource.setURL(""); if ( self->work) { VPEvents::getSingleton()->fire("StateChangeFinished",&self->currentResource,sizeof(VPResource)); VPEvents::getSingleton()->fire("GrabNext",&self->nextResource,sizeof(VPResource)); if (self->nextResource.getURL().size() > 0) { while ( self->open(self->nextResource,true) < 0 ) { DBG("drop due to failure to open"); VPEvents::getSingleton()->fire("GrabNext",&self->nextResource,sizeof(VPResource)); } DBG("new track arrived"); } else { delete self->vpdecode; self->vpdecode = NULL; delete self->playWorker; self->playWorker = NULL; break; } } else { delete self->vpdecode; self->vpdecode = NULL; delete self->playWorker; self->playWorker = NULL; break; } } self->vpout->idle(); DBG("play worker dying"); }
VPOutPluginDummy::~VPOutPluginDummy() { // make sure decoders are finished before calling ATOMIC_CAS(&work,true,false); // make sure decoders have properly ended then mutex[0] should locked and // mutex[1] unlocked from the decoder and mutex[1] locked by output thread // we unlock it here to avoid deadlock owner->mutex[1].unlock(); resume(); if (worker){ worker->join(); DBG("out thread joined"); delete worker; } free(out_buf); src_delete(rs); }
inline static int moveWatch(CSOUND *csound, watchList * volatile *w, watchList *t) { watchList *local=*w; t->next = NULL; //printf("moveWatch\n"); do { //dag_print_state(csound); local = ATOMIC_READ(*w); if (local==&DoNotRead) { //printf("local is DoNotRead\n"); return 0;//was no & earlier } else t->next = local; } while (!ATOMIC_CAS(w,local,t)); //dag_print_state(csound); //printf("moveWatch done\n"); return 1; }
int32_t set_rand(void) { /* this fd will not close */ static volatile int fd = -1; int32_t value = 0; int tmp = 0; if(ATOMIC_GET(fd) == -1) { tmp = open("/dev/urandom", O_RDONLY); if(tmp == -1) { return mq_errno(); } if(ATOMIC_CAS(fd, -1, tmp) != -1) { close(tmp); } } return (read(fd, &value, sizeof(value)) == sizeof(value)) ? (value & 0x7FFFFFFF) : mq_errno(); }
void ThreadPoolImpl::pokeIdleThread() { /* Find a bit in the sleeping thread bitmap and poke it awake, do * not give up until a thread is awakened or all of them are awake */ for (int i = 0; i < m_numSleepMapWords; i++) { uint64_t oldval = m_sleepMap[i]; while (oldval) { unsigned long id; CTZ64(id, oldval); uint64_t newval = oldval & ~(1LL << id); if (ATOMIC_CAS(&m_sleepMap[i], oldval, newval) == oldval) { m_threads[(i << 6) | id].poke(); return; } oldval = m_sleepMap[i]; } } }
void VPOutPluginDummy::worker_run(VPOutPluginDummy *self) { int ret; unsigned out_frames=0; unsigned chans=self->bin->chans; while (ATOMIC_CAS(&self->work,true,true)){ if (ATOMIC_CAS(&self->pause_check,true,true)) { ATOMIC_CAS(&self->paused,false,true); self->m_pause.lock(); self->m_pause.unlock(); ATOMIC_CAS(&self->paused,true,false); ATOMIC_CAS(&self->pause_check,true,false); if (!ATOMIC_CAS(&self->work,false,false)) { break; } } self->rd.end_of_input = 0; self->rd.data_out = self->out_buf; self->rd.input_frames = VPBUFFER_FRAMES; self->rd.output_frames = self->out_frames; self->rd.output_frames_gen = 1; out_frames=0; self->owner->mutex[1].lock(); self->rd.data_in = self->bin->buffer[1-*self->bin->cursor]; while (self->rd.output_frames_gen) { src_process(self->rs,&self->rd); self->rd.input_frames -= self->rd.input_frames_used; self->rd.data_in += self->rd.input_frames_used*chans; out_frames+=self->rd.output_frames_gen; } self->owner->mutex[0].unlock(); } }
static enum postponed_job_register_result postponed_job_register(rb_thread_t *th, rb_vm_t *vm, unsigned int flags, rb_postponed_job_func_t func, void *data, int max, int expected_index) { rb_postponed_job_t *pjob; if (expected_index >= max) return PJRR_FULL; /* failed */ if (ATOMIC_CAS(vm->postponed_job_index, expected_index, expected_index+1) == expected_index) { pjob = &vm->postponed_job_buffer[expected_index]; } else { return PJRR_INTERRUPTED; } pjob->flags = flags; pjob->th = th; pjob->func = func; pjob->data = data; RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(th); return PJRR_SUCESS; }
static easy_mempool_page_t *easy_mempool_get_cur_page_(easy_mempool_t *pool, int32_t ensure_size, int32_t *page_pos) { easy_mempool_page_t *ret = NULL; if (NULL != pool) { volatile int32_t oldv = -1; volatile int32_t newv = -1; volatile int32_t cmpv = -1; easy_mempool_page_t *cur_page = NULL; while (oldv != pool->cur_page_pos) { oldv = pool->cur_page_pos; newv = oldv; cmpv = oldv; ATOMIC_INC(&(pool->page_metas[oldv].ref_cnt)); if (NULL == pool->page_metas[oldv].page) { easy_mempool_page_t *tmp_page = easy_mempool_alloc_page_(pool); if (NULL != tmp_page) { if (NULL != ATOMIC_CAS(&(pool->page_metas[oldv].page), NULL, tmp_page)) { easy_mempool_free_page_(pool, tmp_page); } } } if (NULL == (cur_page = pool->page_metas[oldv].page)) { easy_mempool_deref_page_(pool, oldv); break; } if ((pool->page_size - cur_page->base) < ensure_size) { int32_t base = cur_page->base; easy_mempool_deref_page_(pool, oldv); if (0 == base) { break; } int32_t counter = 0; while (++counter < pool->page_num) { newv = (newv + 1) % pool->page_num; if (0 == ATOMIC_CAS(&(pool->page_metas[newv].ref_cnt), 0, 1)) { if (oldv == ATOMIC_CAS(&(pool->cur_page_pos), cmpv, newv)) { easy_mempool_deref_page_(pool, oldv); } else { easy_mempool_deref_page_(pool, newv); } break; } } } else { *page_pos = oldv; ret = cur_page; break; } } } return ret; }
int rqueue_write(rqueue_t *rb, void *value) { int retries = 0; int did_update = 0; int did_move_head = 0; rqueue_page_t *temp_page = NULL; rqueue_page_t *next_page = NULL; rqueue_page_t *tail = NULL; rqueue_page_t *head = NULL; rqueue_page_t *commit; ATOMIC_INCREMENT(rb->num_writers); do { temp_page = ATOMIC_READ(rb->tail); commit = ATOMIC_READ(rb->commit); next_page = RQUEUE_FLAG_OFF(ATOMIC_READ(temp_page->next), RQUEUE_FLAG_ALL); head = ATOMIC_READ(rb->head); if (rb->mode == RQUEUE_MODE_BLOCKING) { if (temp_page == commit && next_page == head) { if (ATOMIC_READ(rb->writes) - ATOMIC_READ(rb->reads) != 0) { //fprintf(stderr, "No buffer space\n"); if (ATOMIC_READ(rb->num_writers) == 1) ATOMIC_CAS(rb->commit, ATOMIC_READ(rb->commit), ATOMIC_READ(rb->tail)); ATOMIC_DECREMENT(rb->num_writers); return -2; } } else if (next_page == head) { if (ATOMIC_READ(rb->num_writers) == 1) { tail = temp_page; break; } else { if (ATOMIC_READ(rb->num_writers) == 1) ATOMIC_CAS(rb->commit, ATOMIC_READ(rb->commit), ATOMIC_READ(rb->tail)); ATOMIC_DECREMENT(rb->num_writers); return -2; } } } tail = ATOMIC_CAS_RETURN(rb->tail, temp_page, next_page); } while (tail != temp_page && !(RQUEUE_CHECK_FLAG(ATOMIC_READ(tail->next), RQUEUE_FLAG_UPDATE)) && retries++ < RQUEUE_MAX_RETRIES); if (!tail) { if (ATOMIC_READ(rb->num_writers) == 1) ATOMIC_CAS(rb->commit, ATOMIC_READ(rb->commit), ATOMIC_READ(rb->tail)); ATOMIC_DECREMENT(rb->num_writers); return -1; } rqueue_page_t *nextp = RQUEUE_FLAG_OFF(ATOMIC_READ(tail->next), RQUEUE_FLAG_ALL); if (ATOMIC_CAS(tail->next, RQUEUE_FLAG_ON(nextp, RQUEUE_FLAG_HEAD), RQUEUE_FLAG_ON(nextp, RQUEUE_FLAG_UPDATE))) { did_update = 1; //fprintf(stderr, "Did update head pointer\n"); if (rb->mode == RQUEUE_MODE_OVERWRITE) { // we need to advance the head if in overwrite mode ...otherwise we must stop //fprintf(stderr, "Will advance head and overwrite old data\n"); rqueue_page_t *nextpp = RQUEUE_FLAG_OFF(ATOMIC_READ(nextp->next), RQUEUE_FLAG_ALL); if (ATOMIC_CAS(nextp->next, nextpp, RQUEUE_FLAG_ON(nextpp, RQUEUE_FLAG_HEAD))) { if (ATOMIC_READ(rb->tail) != next_page) { ATOMIC_CAS(nextp->next, RQUEUE_FLAG_ON(nextpp, RQUEUE_FLAG_HEAD), nextpp); } else { ATOMIC_CAS(rb->head, head, nextpp); did_move_head = 1; } } } } void *old_value = ATOMIC_READ(tail->value); ATOMIC_CAS(tail->value, old_value, value); if (old_value && rb->free_value_cb) rb->free_value_cb(old_value); if (did_update) { //fprintf(stderr, "Try restoring head pointer\n"); ATOMIC_CAS(tail->next, RQUEUE_FLAG_ON(nextp, RQUEUE_FLAG_UPDATE), did_move_head ? RQUEUE_FLAG_OFF(nextp, RQUEUE_FLAG_ALL) : RQUEUE_FLAG_ON(nextp, RQUEUE_FLAG_HEAD)); //fprintf(stderr, "restored head pointer\n"); } ATOMIC_INCREMENT(rb->writes); if (ATOMIC_READ(rb->num_writers) == 1) ATOMIC_CAS(rb->commit, ATOMIC_READ(rb->commit), tail); ATOMIC_DECREMENT(rb->num_writers); return 0; }
void VPOutPluginAlsa::worker_run(VPOutPluginAlsa *self) { int ret; unsigned out_frames=0; unsigned chans=self->bin->chans; int *out_buf_i=self->out_buf_i; float *out_buf=self->out_buf; int multiplier = self->multiplier; float *buffer[2]; buffer[0] = self->bin->buffer[0]; buffer[1] = self->bin->buffer[1]; fd_set rfds; struct timeval tv; tv.tv_sec = 1; tv.tv_usec = 0; FD_ZERO(&rfds); FD_SET(self->in_fd, &rfds); // pollfd pfd = { self->in_fd, POLLIN, 5 }; while (ATOMIC_CAS(&self->work,true,true)){ if (ATOMIC_CAS(&self->pause_check,true,true)) { ATOMIC_CAS(&self->paused,false,true); snd_pcm_pause(self->handle,true); self->m_pause.lock(); self->m_pause.unlock(); snd_pcm_pause(self->handle,false); ATOMIC_CAS(&self->paused,true,false); ATOMIC_CAS(&self->pause_check,true,false); if (!ATOMIC_CAS(&self->work,false,false)) { break; } } self->rd.end_of_input = 0; self->rd.data_out = self->out_buf; self->rd.output_frames = self->out_frames; self->rd.output_frames_gen = 1; out_frames=0; self->owner->mutex[1].lock(); self->rd.input_frames = VPBUFFER_FRAMES ; self->rd.data_in = self->bin->nextBuffer(); while (self->rd.output_frames_gen) { src_process(self->rs,&self->rd); self->rd.input_frames -= self->rd.input_frames_used; self->rd.data_in += self->rd.input_frames_used*chans; out_frames+=self->rd.output_frames_gen; } for (int i=0;i<out_frames*chans;i++){ out_buf_i[i]=CLIP(out_buf[i])*multiplier; } ret = snd_pcm_writei(self->handle, out_buf_i, out_frames); self->owner->mutex[0].unlock(); if (ret == -EPIPE || ret == -EINTR || ret == -ESTRPIPE){ DBG("trying to recover"); if ( snd_pcm_recover(self->handle, ret, 0) < 0 ) { DBG("recover failed for "<<ret); } } else if (ret < 0 && ret != -EAGAIN){ DBG("write error "<<ret); } } }
void FFMPEGDecoder::reader() { int frameFinished=0,packetFinished=0; int plane_size; int vpbuffer_write=0; int vpbuffer_samples=(VPBUFFER_FRAMES)*bout->chans; int remainder_write=0; int remainder_read=0; av_init_packet(&packet); AVFrame *frame=avcodec_alloc_frame(); current_in_seconds=0; while (ATOMIC_CAS(&owner->work,true,true) && packetFinished >=0) { vpbuffer_write=0; if (remainder_write>0) { remainder_read = 0; for (int nn=0;nn<remainder_write / vpbuffer_samples; nn++) { vpbuffer_write=0; while (vpbuffer_write < vpbuffer_samples){ bout->buffer[*bout->cursor][vpbuffer_write]=remainder[remainder_read]; remainder_read++; vpbuffer_write++; } owner->postProcess(); owner->mutex[0].lock(); VP_SWAP_BUFFERS(bout); owner->mutex[1].unlock(); } vpbuffer_write=0; while (remainder_read < remainder_write) { bout->buffer[*bout->cursor][vpbuffer_write]=remainder[remainder_read]; vpbuffer_write++; remainder_read++; } } remainder_write=0; while(vpbuffer_write < vpbuffer_samples && packetFinished>=0 ) { packetFinished = av_read_frame(container,&packet); if (packetFinished < 0){ break; } if (ATOMIC_CAS(&seek_to,SEEK_MAX,SEEK_MAX) != SEEK_MAX ){ int ret=av_seek_frame(container,audio_stream_id,seek_to *audio_st->time_base.den / audio_st->time_base.num ,AVSEEK_FLAG_ANY); if (ret<0) { DBG("seek failed"); } else { current_in_seconds=seek_to; avcodec_flush_buffers(ctx); } seek_to = SEEK_MAX; } if(packet.stream_index==audio_stream_id){ avcodec_decode_audio4(ctx,frame,&frameFinished,&packet); av_samples_get_buffer_size(&plane_size, ctx->channels, frame->nb_samples, ctx->sample_fmt, 1); if(frameFinished){ current_in_seconds = ( audio_st->time_base.num * frame->pkt_pts )/ audio_st->time_base.den ; switch (sfmt){ case AV_SAMPLE_FMT_S16P: for (int nb=0;nb<plane_size/sizeof(uint16_t);nb++){ for (int ch = 0; ch < ctx->channels; ch++) { if (vpbuffer_write< vpbuffer_samples){ bout->currentBuffer()[vpbuffer_write]= ((short *) frame->extended_data[ch])[nb] * SHORTTOFL; vpbuffer_write++; } else { remainder[remainder_write] = ((short *) frame->extended_data[ch])[nb] * SHORTTOFL; remainder_write++; } } } break; case AV_SAMPLE_FMT_FLTP: for (int nb=0;nb<plane_size/sizeof(float);nb++){ for (int ch = 0; ch < ctx->channels; ch++) { if (vpbuffer_write< vpbuffer_samples){ bout->currentBuffer()[vpbuffer_write]= ((float *) frame->extended_data[ch])[nb]; vpbuffer_write++; } else { remainder[remainder_write] = ((float *) frame->extended_data[ch])[nb]; remainder_write++; } } } break; case AV_SAMPLE_FMT_S16: for (int nb=0;nb<plane_size/sizeof(short);nb++){ if (vpbuffer_write< vpbuffer_samples){ bout->currentBuffer()[vpbuffer_write]= ((short *) frame->extended_data[0])[nb] * SHORTTOFL ; vpbuffer_write++; } else { remainder[remainder_write] = ((short *) frame->extended_data[0])[nb] * SHORTTOFL; remainder_write++; } } break; case AV_SAMPLE_FMT_FLT: for (int nb=0;nb<plane_size/sizeof(float);nb++){ if (vpbuffer_write< vpbuffer_samples){ bout->currentBuffer()[vpbuffer_write]= ((float *) frame->extended_data[0])[nb] ; vpbuffer_write++; } else { remainder[remainder_write] = ((float *) frame->extended_data[0])[nb]; remainder_write++; } } break; case AV_SAMPLE_FMT_U8P: for (int nb=0;nb<plane_size/sizeof(uint8_t);nb++){ for (int ch = 0; ch < ctx->channels; ch++) { if (vpbuffer_write< vpbuffer_samples){ bout->currentBuffer()[vpbuffer_write]= ( ( ((uint8_t *) frame->extended_data[0])[nb] - 127) * 32768 )/ 127 ; vpbuffer_write++; } else { remainder[remainder_write] = ( ( ((uint8_t *) frame->extended_data[0])[nb] - 127) * 32768 )/ 127 ; remainder_write++; } } } break; case AV_SAMPLE_FMT_U8: for (int nb=0;nb<plane_size/sizeof(uint8_t);nb++){ if (vpbuffer_write< vpbuffer_samples){ bout->currentBuffer()[vpbuffer_write]= ( ( ((uint8_t *) frame->extended_data[0])[nb] - 127) * 32768 )/ 127 ; vpbuffer_write++; } else { remainder[remainder_write] = ( ( ((uint8_t *) frame->extended_data[0])[nb] - 127) * 32768 )/ 127 ; remainder_write++; } } break; default: WARN("PCM type not supported"); } } else { DBG("frame failed"); } } } av_free_packet(&packet); owner->postProcess(); owner->mutex[0].lock(); VP_SWAP_BUFFERS(bout); owner->mutex[1].unlock(); last_read=time(NULL); } avcodec_free_frame(&frame); }