size_t charbuffer_read(struct charbuffer *cb, unsigned char *out, size_t length, bool block) { size_t i = 0; if(!(cb->flags & CHARBUFFER_LOCKLESS)) mutex_acquire(&cb->lock); while(i < length) { if(cb->count > 0) { *out++ = cb->buffer[cb->tail++ % cb->cap]; cb->count--; i++; } else if(i == 0 && block) { if(atomic_exchange(&cb->eof, 0)) { if(!(cb->flags & CHARBUFFER_LOCKLESS)) mutex_release(&cb->lock); return 0; } /* no data - block */ tm_blocklist_wakeall(&cb->writers); int r = tm_thread_block_confirm(&cb->readers, THREADSTATE_INTERRUPTIBLE, __release_lock, cb); if(r != 0) return i; if(!(cb->flags & CHARBUFFER_LOCKLESS)) mutex_acquire(&cb->lock); } else { break; } } tm_blocklist_wakeall(&cb->writers); if(!(cb->flags & CHARBUFFER_LOCKLESS)) mutex_release(&cb->lock); return i; }
// Actual queue push. static void qpush(intq* self, qitem* n) { qitem* prev; atomic_store(&n->next, 0); prev = atomic_exchange(&self->head, n); atomic_store(&prev->next, n); }
/* log_init_lock assumed */ static int statsdOpen() { int i, ret = 0; i = atomic_load(&statsdLoggerWrite.sock); if (i < 0) { int flags = SOCK_DGRAM; #ifdef SOCK_CLOEXEC flags |= SOCK_CLOEXEC; #endif #ifdef SOCK_NONBLOCK flags |= SOCK_NONBLOCK; #endif int sock = TEMP_FAILURE_RETRY(socket(PF_UNIX, flags, 0)); if (sock < 0) { ret = -errno; } else { struct sockaddr_un un; memset(&un, 0, sizeof(struct sockaddr_un)); un.sun_family = AF_UNIX; strcpy(un.sun_path, "/dev/socket/statsdw"); if (TEMP_FAILURE_RETRY( connect(sock, (struct sockaddr*)&un, sizeof(struct sockaddr_un))) < 0) { ret = -errno; switch (ret) { case -ENOTCONN: case -ECONNREFUSED: case -ENOENT: i = atomic_exchange(&statsdLoggerWrite.sock, ret); /* FALLTHRU */ default: break; } close(sock); } else { ret = atomic_exchange(&statsdLoggerWrite.sock, sock); if ((ret >= 0) && (ret != sock)) { close(ret); } ret = 0; } } } return ret; }
int FileDescriptorTable::close(int fd) { //No need to lock the mutex when deleting if(fd<0 || fd>=MAX_OPEN_FILES) return -EBADF; intrusive_ref_ptr<FileBase> toClose; toClose=atomic_exchange(files+fd,intrusive_ref_ptr<FileBase>()); if(!toClose) return -EBADF; //File entry was not open return 0; }
void *child(void *p) { magic_thread_local_value_child = atomic_exchange(&magic_global_value, SWAP_VALUE); vanish(); /* bypass thr_exit */ while(1) continue; /* placate compiler portably */ }
uint32_t LeaseFactory::new_lease_id() { uint32_t lease_id = atomic_inc(&lease_id_factory_); if (lease_id > UINT32_MAX - 1) { lease_id = atomic_exchange(&lease_id_factory_, 1); } return lease_id; }
__inline__ __device__ void atomic_assign( volatile T * const dest , typename Kokkos::Impl::enable_if< sizeof(T) != sizeof(int) && sizeof(T) != sizeof(unsigned long long int) , const T & >::type val ) { (void) atomic_exchange(dest,val); }
int ObStat::set_value(const int32_t index, int64_t value) { int ret = OB_SIZE_OVERFLOW; if (index < MAX_STATICS_PER_TABLE && index >= 0) { atomic_exchange(reinterpret_cast<volatile uint64_t*>(&(values_[index])), value); ret = OB_SUCCESS; } return ret; }
void enqueue(T* c) { // clear the next pointer (*get_next_ptr(c)) = NULL; // atomically, // swap(tail, c) // tail->next = c; T* prev = c; atomic_exchange(tail, prev); (*get_next_ptr(prev)) = c; asm volatile ("" : : : "memory"); }
void EpollDaemon::stop(){ if(atomic_exchange(g_running, false, ATOMIC_ACQ_REL) == false){ return; } LOG_POSEIDON(Logger::SP_MAJOR | Logger::LV_INFO, "Stopping epoll daemon..."); if(g_thread.joinable()){ g_thread.join(); } g_epoll->clear(); g_servers.clear(); }
int main() { thr_init(PAGE_SIZE); misbehave(BGND_BRWN >> FGND_CYAN); // for landslide (void) thr_create(child, (void *)0); magic_thread_local_value_parent = atomic_exchange(&magic_global_value, SWAP_VALUE); vanish(); /* bypass thr_exit */ while(1) continue; /* placate compiler portably */ }
static void *stress(void *arg) { struct foo *foo = arg; unsigned r = (unsigned)pthread_self(); int i, j; size_t sz; void *p; for (i=0; i<LOOPS; i++) { j = rng(&r) % SH_COUNT; sz = rng(&r) % MAX_SZ; p = atomic_exchange(&foo[j].mem,0); free(p); if (!p) { p = malloc(sz); p = atomic_exchange(&foo[j].mem,p); free(p); } } return (void*)i; }
void EpollDaemon::start(){ if(atomic_exchange(g_running, true, ATOMIC_ACQ_REL) != false){ LOG_POSEIDON_FATAL("Only one daemon is allowed at the same time."); std::abort(); } LOG_POSEIDON(Logger::SP_MAJOR | Logger::LV_INFO, "Starting epoll daemon..."); MainConfig::get(g_max_timeout, "epoll_max_timeout"); LOG_POSEIDON_DEBUG("Max timeout = ", g_max_timeout); MainConfig::get(g_tcp_request_timeout, "epoll_tcp_request_timeout"); LOG_POSEIDON_DEBUG("Tcp request timeout = ", g_tcp_request_timeout); Thread(&thread_proc, " N").swap(g_thread); }
void ca_Play(audio_output_t * p_aout, block_t * p_block) { struct aout_sys_common *p_sys = (struct aout_sys_common *) p_aout->sys; /* Do the channel reordering */ if (p_sys->chans_to_reorder) aout_ChannelReorder(p_block->p_buffer, p_block->i_buffer, p_sys->chans_to_reorder, p_sys->chan_table, VLC_CODEC_FL32); /* move data to buffer */ while (!TPCircularBufferProduceBytes(&p_sys->circular_buffer, p_block->p_buffer, p_block->i_buffer)) { if (atomic_load_explicit(&p_sys->b_paused, memory_order_relaxed)) { msg_Warn(p_aout, "dropping block because the circular buffer is " "full and paused"); break; } /* Try to play what we can */ int32_t i_avalaible_bytes; TPCircularBufferHead(&p_sys->circular_buffer, &i_avalaible_bytes); assert(i_avalaible_bytes >= 0); if (unlikely((size_t) i_avalaible_bytes >= p_block->i_buffer)) continue; bool ret = TPCircularBufferProduceBytes(&p_sys->circular_buffer, p_block->p_buffer, i_avalaible_bytes); assert(ret == true); p_block->p_buffer += i_avalaible_bytes; p_block->i_buffer -= i_avalaible_bytes; /* Wait for the render buffer to play the remaining data */ const mtime_t i_frame_us = FramesToUs(p_sys, BytesToFrames(p_sys, p_block->i_buffer)); msleep(i_frame_us / 2); } unsigned i_underrun_size = atomic_exchange(&p_sys->i_underrun_size, 0); if (i_underrun_size > 0) msg_Warn(p_aout, "underrun of %u bytes", i_underrun_size); block_Release(p_block); }
void armci_generic_rmw(int op, void *ploc, void *prem, int extra, int proc) { #if defined(CLUSTER) && !defined(SGIALTIX) int lock = (proc-armci_clus_info[armci_clus_id(proc)].master)%NUM_LOCKS; #else int lock = 0; #endif ARMCI_PR_DBG("enter",0); NATIVE_LOCK(lock,proc); switch (op) { case ARMCI_FETCH_AND_ADD: armci_get(prem,ploc,sizeof(int),proc); _a_temp = *(int*)ploc + extra; armci_put(&_a_temp,prem,sizeof(int),proc); break; case ARMCI_FETCH_AND_ADD_LONG: armci_get(prem,ploc,sizeof(long),proc); _a_ltemp = *(long*)ploc + extra; armci_put(&_a_ltemp,prem,sizeof(long),proc); break; case ARMCI_SWAP: #if (defined(__i386__) || defined(__x86_64__)) if(SERVER_CONTEXT || armci_nclus==1){ atomic_exchange(ploc, prem, sizeof(int)); } else #endif { armci_get(prem,&_a_temp,sizeof(int),proc); armci_put(ploc,prem,sizeof(int),proc); *(int*)ploc = _a_temp; } break; case ARMCI_SWAP_LONG: armci_get(prem,&_a_ltemp,sizeof(long),proc); armci_put(ploc,prem,sizeof(long),proc); *(long*)ploc = _a_ltemp; break; default: armci_die("rmw: operation not supported",op); } /*TODO memfence here*/ NATIVE_UNLOCK(lock,proc); ARMCI_PR_DBG("exit",0); }
static int aout_CheckReady (audio_output_t *aout) { aout_owner_t *owner = aout_owner (aout); int status = AOUT_DEC_SUCCESS; int restart = atomic_exchange (&owner->restart, 0); if (unlikely(restart)) { if (owner->mixer_format.i_format) aout_FiltersDelete (aout, owner->filters); if (restart & AOUT_RESTART_OUTPUT) { /* Reinitializes the output */ msg_Dbg (aout, "restarting output..."); if (owner->mixer_format.i_format) aout_OutputDelete (aout); owner->mixer_format = owner->input_format; if (aout_OutputNew (aout, &owner->mixer_format)) owner->mixer_format.i_format = 0; aout_volume_SetFormat (owner->volume, owner->mixer_format.i_format); status = AOUT_DEC_CHANGED; } msg_Dbg (aout, "restarting filters..."); owner->sync.end = VLC_TS_INVALID; owner->sync.resamp_type = AOUT_RESAMPLING_NONE; if (owner->mixer_format.i_format) { owner->filters = aout_FiltersNew (aout, &owner->input_format, &owner->mixer_format, &owner->request_vout); if (owner->filters == NULL) { aout_OutputDelete (aout); owner->mixer_format.i_format = 0; } } /* TODO: This would be a good time to call clean up any video output * left over by an audio visualization: input_resource_TerminatVout(MAGIC HERE); */ } return (owner->mixer_format.i_format) ? status : AOUT_DEC_FAILED; }
int mm_allocate_dma_buffer(struct dma_region *d) { if (!atomic_exchange(&dma_virtual_init, true)) { valloc_create(&dma_virtual, MEMMAP_VIRTDMA_START, MEMMAP_VIRTDMA_END, mm_page_size(0), 0); } d->p.address = mm_physical_allocate(d->p.size, false); if (d->p.address == 0) return -1; struct valloc_region reg; int npages = (d->p.size - 1) / mm_page_size(0) + 1; valloc_allocate(&dma_virtual, ®, npages); for (int i = 0; i < npages; i++) mm_virtual_map(reg.start + i * mm_page_size(0), d->p.address + i * mm_page_size(0), PAGE_PRESENT | PAGE_WRITE, mm_page_size(0)); d->v = reg.start; return 0; }
void __mutex_acquire(struct mutex *m, char *file, int line) { assert(m->magic == MUTEX_MAGIC); if(unlikely(kernel_state_flags & KSF_DEBUGGING)) return; if(unlikely(current_thread && current_thread->interrupt_level)) panic(PANIC_NOSYNC, "cannot lock a mutex within interrupt context (%s:%d)", file, line); if(unlikely(kernel_state_flags & KSF_SHUTDOWN)) return; /* wait until we can set bit 0. once this is done, we have the lock */ #if MUTEX_DEBUG int timeout = 8000; #endif if(unlikely(current_thread && __current_cpu->preempt_disable > 0)) panic(0, "tried to lock schedulable mutex with preempt off"); if(likely(current_thread != NULL)) current_thread->held_locks++; while(atomic_exchange(&m->lock, true)) { if(likely(current_thread != NULL)) { /* are we re-locking ourselves? */ if(m->owner == current_thread) panic(0, "tried to relock mutex (%s:%d)", file, line); /* we can use __current_cpu here, because we're testing if we're the idle * thread, and the idle thread never migrates. */ if(current_thread != __current_cpu->idle_thread) { tm_thread_block_confirm(&m->blocklist, THREADSTATE_UNINTERRUPTIBLE, __confirm, m); } else { tm_schedule(); } } #if MUTEX_DEBUG if(--timeout == 0) { panic(0, "mutex timeout from %s:%d (owned by %d: %s:%d)\n", file, line, m->pid, m->owner_file, m->owner_line); } #endif } m->owner = current_thread; m->owner_file = file; m->owner_line = line; }
KOKKOS_INLINE_FUNCTION void push_work( const std::int32_t w ) const noexcept { const std::int32_t N = m_graph.numRows(); std::int32_t volatile * const ready_queue = & m_queue[0] ; std::int32_t volatile * const end_hint = & m_queue[2*N+1] ; // Push work to end of queue const std::int32_t j = atomic_fetch_add( end_hint , 1 ); if ( ( N <= j ) || ( END_TOKEN != atomic_exchange(ready_queue+j,w) ) ) { // ERROR: past the end of queue or did not replace END_TOKEN Kokkos::abort("WorkGraphPolicy push_work error"); } memory_fence(); }
static int aout_CheckReady (audio_output_t *aout) { aout_owner_t *owner = aout_owner (aout); aout_assert_locked (aout); int restart = atomic_exchange (&owner->restart, 0); if (unlikely(restart)) { const aout_request_vout_t request_vout = owner->request_vout; if (owner->mixer_format.i_format) aout_FiltersDelete (aout); if (restart & AOUT_RESTART_OUTPUT) { /* Reinitializes the output */ msg_Dbg (aout, "restarting output..."); if (owner->mixer_format.i_format) aout_OutputDelete (aout); owner->mixer_format = owner->input_format; if (aout_OutputNew (aout, &owner->mixer_format)) owner->mixer_format.i_format = 0; aout_volume_SetFormat (owner->volume, owner->mixer_format.i_format); } msg_Dbg (aout, "restarting filters..."); owner->sync.end = VLC_TS_INVALID; owner->sync.resamp_type = AOUT_RESAMPLING_NONE; if (owner->mixer_format.i_format && aout_FiltersNew (aout, &owner->input_format, &owner->mixer_format, &request_vout)) { aout_OutputDelete (aout); owner->mixer_format.i_format = 0; } } return (owner->mixer_format.i_format) ? 0 : -1; }
friend void push(sink& s, buffer p) { v4l2_buffer b = {0}; b.type = V4L2_BUF_TYPE_VIDEO_OUTPUT; b.memory = V4L2_MEMORY_MMAP; b.index = p.get() - s.p->buffers; if(ioctl(s.p->fd.native_handle(), VIDIOC_QBUF, &b)) throw std::system_error(errno, std::system_category()); intrusive_ptr_add_ref(p.get()); if(atomic_exchange(&s.p->streaming, true)) { auto dqbuf = std::make_unique<v4l2_buffer>(); memset(dqbuf.get(), 0, sizeof(*dqbuf.get())); dqbuf->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; dqbuf->memory = V4L2_MEMORY_MMAP; auto dqp = dqbuf.get(); s.p->fd.async_read_some(utils::make_ioctl_read_buffer<VIDIOC_DQBUF>(dqp), [&s, buffer = utils::move_on_copy(std::move(dqbuf))](std::error_code const& ec, std::size_t) { if(!ec) intrusive_ptr_release(s.p->buffers + unwrap(buffer)->index); }); } }
QueueResult queue_push(queue_p q, void *data) { assert(q); /* create the new tail */ node *new_tail = malloc(sizeof(node) + q->item_size); if (!new_tail) { return QUEUE_OUT_OF_MEMORY; } atomic_init(&new_tail->next, 0); memcpy(new_tail + 1, data, q->item_size); /* swap the new tail with the old */ node *old_tail = (node *) atomic_exchange(&q->tail, new_tail->next); /* link the old tail to the new */ if (old_tail) { atomic_store(&old_tail->next, new_tail->next); } else { atomic_store(&q->head, new_tail->next); } return QUEUE_SUCCESS; }
void tm_thread_do_exit(void) { assert(current_thread->held_locks == 0); assert(current_thread->blocklist == 0); struct async_call *thread_cleanup_call = async_call_create(¤t_thread->cleanup_call, 0, tm_thread_destroy, (unsigned long)current_thread, 0); struct ticker *ticker = (void *)atomic_exchange(¤t_thread->alarm_ticker, NULL); if(ticker) { if(ticker_delete(ticker, ¤t_thread->alarm_timeout) != -ENOENT) tm_thread_put(current_thread); } linkedlist_remove(¤t_process->threadlist, ¤t_thread->pnode); tm_thread_remove_kerfs_entries(current_thread); atomic_fetch_sub_explicit(&running_threads, 1, memory_order_relaxed); if(atomic_fetch_sub(¤t_process->thread_count, 1) == 1) { atomic_fetch_sub_explicit(&running_processes, 1, memory_order_relaxed); tm_process_remove_kerfs_entries(current_process); tm_process_exit(current_thread->exit_code); } cpu_disable_preemption(); assert(!current_thread->blocklist); tqueue_remove(current_thread->cpu->active_queue, ¤t_thread->activenode); atomic_fetch_sub_explicit(¤t_thread->cpu->numtasks, 1, memory_order_relaxed); tm_thread_raise_flag(current_thread, THREAD_SCHEDULE); current_thread->state = THREADSTATE_DEAD; workqueue_insert(&__current_cpu->work, thread_cleanup_call); cpu_interrupt_set(0); /* don't schedule away until we get back to the syscall handler! */ cpu_enable_preemption(); }
/** * Blocking write to a stream * * If the stream is full, the task is suspended until the consumer * reads items from the stream, freeing space for more items. * * @param sd stream descriptor * @param item data item (a pointer) to write * @pre current task is single writer * @pre item != NULL */ void LpelStreamWrite( lpel_stream_desc_t *sd, void *item) { lpel_task_t *self = sd->task; int poll_wakeup = 0; /* check if opened for writing */ assert( sd->mode == 'w' ); assert( item != NULL ); /* MONITORING CALLBACK */ #ifdef USE_TASK_EVENT_LOGGING if (sd->mon && MON_CB(stream_writeprepare)) { MON_CB(stream_writeprepare)(sd->mon, item); } #endif /* quasi P(e_sem) */ if ( atomic_fetch_sub( &sd->stream->e_sem, 1)== 0) { /* MONITORING CALLBACK */ #ifdef USE_TASK_EVENT_LOGGING if (sd->mon && MON_CB(stream_blockon)) { MON_CB(stream_blockon)(sd->mon); } #endif /* wait on stream: */ LpelTaskBlockStream( self); } /* writing to the buffer and checking if consumer polls must be atomic */ PRODLOCK_LOCK( &sd->stream->prod_lock); { /* there must be space now in buffer */ assert( LpelBufferIsSpace( &sd->stream->buffer) ); /* put item into buffer */ LpelBufferPut( &sd->stream->buffer, item); if ( sd->stream->is_poll) { /* get consumer's poll token */ poll_wakeup = atomic_exchange( &sd->stream->cons_sd->task->poll_token, 0); sd->stream->is_poll = 0; } } PRODLOCK_UNLOCK( &sd->stream->prod_lock); /* quasi V(n_sem) */ if ( atomic_fetch_add( &sd->stream->n_sem, 1) < 0) { /* n_sem was -1 */ lpel_task_t *cons = sd->stream->cons_sd->task; /* wakeup consumer: make ready */ LpelTaskUnblock( self, cons); /* MONITORING CALLBACK */ #ifdef USE_TASK_EVENT_LOGGING if (sd->mon && MON_CB(stream_wakeup)) { MON_CB(stream_wakeup)(sd->mon); } #endif } else { /* we are the sole producer task waking the polling consumer up */ if (poll_wakeup) { lpel_task_t *cons = sd->stream->cons_sd->task; cons->wakeup_sd = sd->stream->cons_sd; LpelTaskUnblock( self, cons); /* MONITORING CALLBACK */ #ifdef USE_TASK_EVENT_LOGGING if (sd->mon && MON_CB(stream_wakeup)) { MON_CB(stream_wakeup)(sd->mon); } #endif } } /* MONITORING CALLBACK */ #ifdef USE_TASK_EVENT_LOGGING if (sd->mon && MON_CB(stream_writefinish)) { MON_CB(stream_writefinish)(sd->mon); } #endif }
/** * Poll a set of streams * * This is a blocking function called by a consumer which wants to wait * for arrival of data on any of a specified set of streams. * The consumer task is suspended while there is no new data on all streams. * * @param set a stream descriptor set the task wants to poll * @pre set must not be empty (*set != NULL) * * @post The first element when iterating through the set after * LpelStreamPoll() will be the one after the one which * caused the task to wakeup, * i.e., the first stream where data arrived. */ lpel_stream_desc_t *LpelStreamPoll( lpel_streamset_t *set) { lpel_task_t *self; lpel_stream_iter_t *iter; int do_ctx_switch = 1; int cnt = 0; assert( *set != NULL); /* get 'self', i.e. the task calling LpelStreamPoll() */ self = (*set)->task; iter = LpelStreamIterCreate( set); /* fast path*/ while( LpelStreamIterHasNext( iter)) { lpel_stream_desc_t *sd = LpelStreamIterNext( iter); lpel_stream_t *s = sd->stream; if ( LpelBufferTop( &s->buffer) != NULL) { LpelStreamIterDestroy(iter); *set = sd; return sd; } } /* place a poll token */ atomic_store( &self->poll_token, 1); /* for each stream in the set */ LpelStreamIterReset(iter, set); while( LpelStreamIterHasNext( iter)) { lpel_stream_desc_t *sd = LpelStreamIterNext( iter); lpel_stream_t *s = sd->stream; /* lock stream (prod-side) */ PRODLOCK_LOCK( &s->prod_lock); { /* CS BEGIN */ /* check if there is something in the buffer */ if ( LpelBufferTop( &s->buffer) != NULL) { /* yes, we can stop iterating through streams. * determine, if we have been woken up by another producer: */ int tok = atomic_exchange( &self->poll_token, 0); if (tok) { /* we have not been woken yet, no need for ctx switch */ do_ctx_switch = 0; self->wakeup_sd = sd; } /* unlock stream */ PRODLOCK_UNLOCK( &s->prod_lock); /* exit loop */ break; } else { /* nothing in the buffer, register stream as activator */ s->is_poll = 1; cnt++; //sd->event_flags |= STDESC_WAITON; /* TODO marking all streams does potentially flood the log-files - is it desired to have anyway? MarkDirty( sd); */ } } /* CS END */ /* unlock stream */ PRODLOCK_UNLOCK( &s->prod_lock); } /* end for each stream */ /* context switch */ if (do_ctx_switch) { /* set task as blocked */ LpelTaskBlockStream( self); } assert( atomic_load( &self->poll_token) == 0); /* unregister activators * - would only be necessary, if the consumer task closes the stream * while the producer is in an is_poll state, * as this could result in a SEGFAULT when the producer * is trying to dereference sd->stream->cons_sd * - a consumer closes the stream if it reads * a terminate record or a sync record, and between reading the record * and closing the stream the consumer issues no LpelStreamPoll() * and no entity writes a record on the stream after these records. * UPDATE: with static/dynamc collectors in S-Net, this is possible! */ LpelStreamIterReset(iter, set); while( LpelStreamIterHasNext( iter)) { lpel_stream_t *s = (LpelStreamIterNext(iter))->stream; PRODLOCK_LOCK( &s->prod_lock); s->is_poll = 0; PRODLOCK_UNLOCK( &s->prod_lock); if (--cnt == 0) break; } LpelStreamIterDestroy(iter); /* 'rotate' set to stream descriptor for non-empty buffer */ *set = self->wakeup_sd; return self->wakeup_sd; }
static int lavc_dr_GetFrame(struct AVCodecContext *ctx, AVFrame *frame, picture_t *pic) { decoder_t *dec = (decoder_t *)ctx->opaque; decoder_sys_t *sys = dec->p_sys; if (ctx->pix_fmt == PIX_FMT_PAL8) goto error; int width = frame->width; int height = frame->height; int aligns[AV_NUM_DATA_POINTERS]; avcodec_align_dimensions2(ctx, &width, &height, aligns); /* Check that the picture is suitable for libavcodec */ assert(pic->p[0].i_pitch >= width * pic->p[0].i_pixel_pitch); assert(pic->p[0].i_lines >= height); for (int i = 0; i < pic->i_planes; i++) { if (pic->p[i].i_pitch % aligns[i]) { if (!atomic_exchange(&sys->b_dr_failure, true)) msg_Warn(dec, "plane %d: pitch not aligned (%d%%%d): %s", i, pic->p[i].i_pitch, aligns[i], "disabling direct rendering"); goto error; } if (((uintptr_t)pic->p[i].p_pixels) % aligns[i]) { if (!atomic_exchange(&sys->b_dr_failure, true)) msg_Warn(dec, "plane %d not aligned: %s", i, "disabling direct rendering"); goto error; } } /* Allocate buffer references and initialize planes */ assert(pic->i_planes < PICTURE_PLANE_MAX); static_assert(PICTURE_PLANE_MAX <= AV_NUM_DATA_POINTERS, "Oops!"); for (int i = 0; i < pic->i_planes; i++) { uint8_t *data = pic->p[i].p_pixels; int size = pic->p[i].i_pitch * pic->p[i].i_lines; frame->data[i] = data; frame->linesize[i] = pic->p[i].i_pitch; frame->buf[i] = av_buffer_create(data, size, lavc_ReleaseFrame, pic, 0); if (unlikely(frame->buf[i] == NULL)) { while (i > 0) av_buffer_unref(&frame->buf[--i]); goto error; } picture_Hold(pic); } frame->opaque = pic; /* The loop above held one reference to the picture for each plane. */ picture_Release(pic); return 0; error: picture_Release(pic); return -1; }
aout_OutputUnlock (aout); return ret; drop: owner->sync.discontinuity = true; block_Release (block); lost: atomic_fetch_add(&owner->buffers_lost, 1); goto out; } void aout_DecGetResetStats(audio_output_t *aout, unsigned *restrict lost, unsigned *restrict played) { aout_owner_t *owner = aout_owner (aout); *lost = atomic_exchange(&owner->buffers_lost, 0); *played = atomic_exchange(&owner->buffers_played, 0); } void aout_DecChangePause (audio_output_t *aout, bool paused, mtime_t date) { aout_owner_t *owner = aout_owner (aout); aout_OutputLock (aout); if (owner->sync.end != VLC_TS_INVALID) { if (paused) owner->sync.end -= date; else owner->sync.end += date; }
bool EventThreadGetAndResetHasMoved( event_thread_t *p_event ) { return atomic_exchange(&p_event->has_moved, false); }
int aout_DecGetResetLost (audio_output_t *aout) { aout_owner_t *owner = aout_owner (aout); return atomic_exchange(&owner->buffers_lost, (atomic_uint)0); // sunqueen modify }
int aout_DecGetResetLost (audio_output_t *aout) { aout_owner_t *owner = aout_owner (aout); return atomic_exchange(&owner->buffers_lost, 0); }