static void RemoveActiveEffectSlots(const ALuint *slotids, ALsizei count, ALCcontext *context) { struct ALeffectslotArray *curarray = ATOMIC_LOAD(&context->ActiveAuxSlots, almemory_order_acquire); struct ALeffectslotArray *newarray = NULL; ALCdevice *device = context->Device; ALsizei i, j; /* Don't shrink the allocated array size since we don't know how many (if * any) of the effect slots to remove are in the array. */ newarray = al_calloc(DEF_ALIGN, FAM_SIZE(struct ALeffectslotArray, slot, curarray->count)); newarray->count = 0; for(i = 0;i < curarray->count;i++) { /* Insert this slot into the new array only if it's not one to remove. */ ALeffectslot *slot = curarray->slot[i]; for(j = count;j != 0;) { if(slot->id == slotids[--j]) goto skip_ins; } newarray->slot[newarray->count++] = slot; skip_ins: ; } /* TODO: Could reallocate newarray now that we know it's needed size. */ curarray = ATOMIC_EXCHANGE_PTR(&context->ActiveAuxSlots, newarray, almemory_order_acq_rel); while((ATOMIC_LOAD(&device->MixCount, almemory_order_acquire)&1)) althrd_yield(); al_free(curarray); }
void DeinitEffectSlot(ALeffectslot *slot) { struct ALeffectslotProps *props; ALeffectState *state; size_t count = 0; props = ATOMIC_LOAD(&slot->Update); if(props) { state = ATOMIC_LOAD(&props->State, almemory_order_relaxed); if(state != slot->Params.EffectState) DELETE_OBJ(state); TRACE("Freed unapplied AuxiliaryEffectSlot update %p\n", props); al_free(props); } props = ATOMIC_LOAD(&slot->FreeList, almemory_order_relaxed); while(props) { struct ALeffectslotProps *next; state = ATOMIC_LOAD(&props->State, almemory_order_relaxed); next = ATOMIC_LOAD(&props->next, almemory_order_relaxed); DELETE_OBJ(state); al_free(props); props = next; ++count; } TRACE("Freed "SZFMT" AuxiliaryEffectSlot property object%s\n", count, (count==1)?"":"s"); DELETE_OBJ(slot->Params.EffectState); }
/* * Called upon thread deletion. */ static void mod_stats_on_thread_exit(TXPARAMS void *arg) { mod_stats_data_t *stats; unsigned long max, min; stats = (mod_stats_data_t *)stm_get_specific(TXARGS mod_stats_key); assert(stats != NULL); ATOMIC_FETCH_ADD_FULL(&mod_stats_global.commits, stats->commits); ATOMIC_FETCH_ADD_FULL(&mod_stats_global.retries_cnt, stats->retries_cnt); ATOMIC_FETCH_ADD_FULL(&mod_stats_global.retries_acc, stats->retries_acc); retry_max: max = ATOMIC_LOAD(&mod_stats_global.retries_max); if (stats->retries_max > max) { if (ATOMIC_CAS_FULL(&mod_stats_global.retries_max, max, stats->retries_max) == 0) goto retry_max; } retry_min: min = ATOMIC_LOAD(&mod_stats_global.retries_min); if (stats->retries_min < min) { if (ATOMIC_CAS_FULL(&mod_stats_global.retries_min, min, stats->retries_min) == 0) goto retry_min; } free(stats); }
void UpdateListenerProps(ALCcontext *context) { ALlistener *listener = context->Listener; struct ALlistenerProps *props; /* Get an unused proprty container, or allocate a new one as needed. */ props = ATOMIC_LOAD(&listener->FreeList, almemory_order_acquire); if(!props) props = al_calloc(16, sizeof(*props)); else { struct ALlistenerProps *next; do { next = ATOMIC_LOAD(&props->next, almemory_order_relaxed); } while(ATOMIC_COMPARE_EXCHANGE_WEAK(struct ALlistenerProps*, &listener->FreeList, &props, next, almemory_order_seq_cst, almemory_order_acquire) == 0); } /* Copy in current property values. */ ATOMIC_STORE(&props->Position[0], listener->Position[0], almemory_order_relaxed); ATOMIC_STORE(&props->Position[1], listener->Position[1], almemory_order_relaxed); ATOMIC_STORE(&props->Position[2], listener->Position[2], almemory_order_relaxed); ATOMIC_STORE(&props->Velocity[0], listener->Velocity[0], almemory_order_relaxed); ATOMIC_STORE(&props->Velocity[1], listener->Velocity[1], almemory_order_relaxed); ATOMIC_STORE(&props->Velocity[2], listener->Velocity[2], almemory_order_relaxed); ATOMIC_STORE(&props->Forward[0], listener->Forward[0], almemory_order_relaxed); ATOMIC_STORE(&props->Forward[1], listener->Forward[1], almemory_order_relaxed); ATOMIC_STORE(&props->Forward[2], listener->Forward[2], almemory_order_relaxed); ATOMIC_STORE(&props->Up[0], listener->Up[0], almemory_order_relaxed); ATOMIC_STORE(&props->Up[1], listener->Up[1], almemory_order_relaxed); ATOMIC_STORE(&props->Up[2], listener->Up[2], almemory_order_relaxed); ATOMIC_STORE(&props->Gain, listener->Gain, almemory_order_relaxed); ATOMIC_STORE(&props->MetersPerUnit, listener->MetersPerUnit, almemory_order_relaxed); ATOMIC_STORE(&props->DopplerFactor, context->DopplerFactor, almemory_order_relaxed); ATOMIC_STORE(&props->DopplerVelocity, context->DopplerVelocity, almemory_order_relaxed); ATOMIC_STORE(&props->SpeedOfSound, context->SpeedOfSound, almemory_order_relaxed); ATOMIC_STORE(&props->SourceDistanceModel, context->SourceDistanceModel, almemory_order_relaxed); ATOMIC_STORE(&props->DistanceModel, context->DistanceModel, almemory_order_relaxed); /* Set the new container for updating internal parameters. */ props = ATOMIC_EXCHANGE(struct ALlistenerProps*, &listener->Update, props, almemory_order_acq_rel); if(props) { /* If there was an unused update container, put it back in the * freelist. */ ATOMIC_REPLACE_HEAD(struct ALlistenerProps*, &listener->FreeList, props); } }
static void AddActiveEffectSlots(const ALuint *slotids, ALsizei count, ALCcontext *context) { struct ALeffectslotArray *curarray = ATOMIC_LOAD(&context->ActiveAuxSlots, almemory_order_acquire); struct ALeffectslotArray *newarray = NULL; ALsizei newcount = curarray->count + count; ALCdevice *device = context->Device; ALsizei i, j; /* Insert the new effect slots into the head of the array, followed by the * existing ones. */ newarray = al_calloc(DEF_ALIGN, FAM_SIZE(struct ALeffectslotArray, slot, newcount)); newarray->count = newcount; for(i = 0;i < count;i++) newarray->slot[i] = LookupEffectSlot(context, slotids[i]); for(j = 0;i < newcount;) newarray->slot[i++] = curarray->slot[j++]; /* Remove any duplicates (first instance of each will be kept). */ for(i = 1;i < newcount;i++) { for(j = i;j != 0;) { if(UNLIKELY(newarray->slot[i] == newarray->slot[--j])) { newcount--; for(j = i;j < newcount;j++) newarray->slot[j] = newarray->slot[j+1]; i--; break; } } } /* Reallocate newarray if the new size ended up smaller from duplicate * removal. */ if(UNLIKELY(newcount < newarray->count)) { struct ALeffectslotArray *tmpnewarray = al_calloc(DEF_ALIGN, FAM_SIZE(struct ALeffectslotArray, slot, newcount)); memcpy(tmpnewarray, newarray, FAM_SIZE(struct ALeffectslotArray, slot, newcount)); al_free(newarray); newarray = tmpnewarray; newarray->count = newcount; } curarray = ATOMIC_EXCHANGE_PTR(&context->ActiveAuxSlots, newarray, almemory_order_acq_rel); while((ATOMIC_LOAD(&device->MixCount, almemory_order_acquire)&1)) althrd_yield(); al_free(curarray); }
static int ALCnullBackend_mixerProc(void *ptr) { ALCnullBackend *self = (ALCnullBackend*)ptr; ALCdevice *device = STATIC_CAST(ALCbackend, self)->mDevice; struct timespec now, start; ALuint64 avail, done; const long restTime = (long)((ALuint64)device->UpdateSize * 1000000000 / device->Frequency / 2); SetRTPriority(); althrd_setname(althrd_current(), MIXER_THREAD_NAME); done = 0; if(altimespec_get(&start, AL_TIME_UTC) != AL_TIME_UTC) { ERR("Failed to get starting time\n"); return 1; } while(!ATOMIC_LOAD(&self->killNow, almemory_order_acquire) && ATOMIC_LOAD(&device->Connected, almemory_order_acquire)) { if(altimespec_get(&now, AL_TIME_UTC) != AL_TIME_UTC) { ERR("Failed to get current time\n"); return 1; } avail = (now.tv_sec - start.tv_sec) * device->Frequency; avail += (ALint64)(now.tv_nsec - start.tv_nsec) * device->Frequency / 1000000000; if(avail < done) { /* Oops, time skipped backwards. Reset the number of samples done * with one update available since we (likely) just came back from * sleeping. */ done = avail - device->UpdateSize; } if(avail-done < device->UpdateSize) al_nssleep(restTime); else while(avail-done >= device->UpdateSize) { ALCnullBackend_lock(self); aluMixData(device, NULL, device->UpdateSize); ALCnullBackend_unlock(self); done += device->UpdateSize; } } return 0; }
void UpdateEffectSlotProps(ALeffectslot *slot, ALboolean withstate) { struct ALeffectslotProps *props; ALeffectState *oldstate; /* Get an unused property container, or allocate a new one as needed. */ props = ATOMIC_LOAD(&slot->FreeList, almemory_order_acquire); if(!props) props = al_calloc(16, sizeof(*props)); else { struct ALeffectslotProps *next; do { next = ATOMIC_LOAD(&props->next, almemory_order_relaxed); } while(ATOMIC_COMPARE_EXCHANGE_WEAK(struct ALeffectslotProps*, &slot->FreeList, &props, next, almemory_order_seq_cst, almemory_order_consume) == 0); } /* Copy in current property values. */ ATOMIC_STORE(&props->Gain, slot->Gain, almemory_order_relaxed); ATOMIC_STORE(&props->AuxSendAuto, slot->AuxSendAuto, almemory_order_relaxed); ATOMIC_STORE(&props->Type, slot->Effect.Type, almemory_order_relaxed); memcpy(&props->Props, &slot->Effect.Props, sizeof(props->Props)); /* Swap out any stale effect state object there may be in the container, to * delete it. */ ATOMIC_STORE(&props->UpdateState, withstate, almemory_order_relaxed); oldstate = ATOMIC_EXCHANGE(ALeffectState*, &props->State, withstate ? slot->Effect.State : NULL, almemory_order_relaxed ); /* Set the new container for updating internal parameters. */ props = ATOMIC_EXCHANGE(struct ALeffectslotProps*, &slot->Update, props, almemory_order_acq_rel); if(props) { /* If there was an unused update container, put it back in the * freelist. */ struct ALeffectslotProps *first = ATOMIC_LOAD(&slot->FreeList); do { ATOMIC_STORE(&props->next, first, almemory_order_relaxed); } while(ATOMIC_COMPARE_EXCHANGE_WEAK(struct ALeffectslotProps*, &slot->FreeList, &first, props) == 0); } DELETE_OBJ(oldstate); }
void alSetError(ALCcontext *context, ALenum errorCode, const char *msg, ...) { ALenum curerr = AL_NO_ERROR; char message[1024] = { 0 }; va_list args; int msglen; va_start(args, msg); msglen = vsnprintf(message, sizeof(message), msg, args); va_end(args); if(msglen < 0 || (size_t)msglen >= sizeof(message)) { message[sizeof(message)-1] = 0; msglen = (int)strlen(message); } if(msglen > 0) msg = message; else { msg = "<internal error constructing message>"; msglen = (int)strlen(msg); } WARN("Error generated on context %p, code 0x%04x, \"%s\"\n", context, errorCode, message); if(TrapALError) { #ifdef _WIN32 /* DebugBreak will cause an exception if there is no debugger */ if(IsDebuggerPresent()) DebugBreak(); #elif defined(SIGTRAP) raise(SIGTRAP); #endif } ATOMIC_COMPARE_EXCHANGE_STRONG_SEQ(&context->LastError, &curerr, errorCode); if((ATOMIC_LOAD(&context->EnabledEvts, almemory_order_relaxed)&EventType_Error)) { ALbitfieldSOFT enabledevts; almtx_lock(&context->EventCbLock); enabledevts = ATOMIC_LOAD(&context->EnabledEvts, almemory_order_relaxed); if((enabledevts&EventType_Error) && context->EventCb) (*context->EventCb)(AL_EVENT_TYPE_ERROR_SOFT, 0, errorCode, msglen, msg, context->EventParam); almtx_unlock(&context->EventCbLock); } }
size_t RingFIFORead(struct RingFIFO* buffer, void* output, size_t length) { void* data = buffer->readPtr; void* end; ATOMIC_LOAD(end, buffer->writePtr); // Wrap around if we can't fit enough in here if ((intptr_t) data - (intptr_t) buffer->data + length >= buffer->capacity) { if (end == data) { // Oops! If we wrap now, it'll appear full return 0; } data = buffer->data; } size_t remaining; if (data > end) { uintptr_t bufferEnd = (uintptr_t) buffer->data + buffer->capacity; remaining = bufferEnd - (uintptr_t) data; } else { remaining = (intptr_t) end - (intptr_t) data; } // If the pointers touch, it's empty if (remaining < length) { return 0; } if (output) { memcpy(output, data, length); } ATOMIC_STORE(buffer->readPtr, (void*) ((intptr_t) data + length)); return length; }
/* * Validate read set (check if all read addresses are still valid now). */ static inline int stm_validate(stm_tx_t *tx) { r_entry_t *r; int i; stm_word_t l; PRINT_DEBUG("==> stm_validate(%p[%lu-%lu])\n", tx, (unsigned long)tx->start, (unsigned long)tx->end); /* Validate reads */ r = tx->r_set.entries; for (i = tx->r_set.nb_entries; i > 0; i--, r++) { /* Read lock */ l = ATOMIC_LOAD(r->lock); /* Unlocked and still the same version? */ if (LOCK_GET_OWNED(l)) { /* Do we own the lock? */ w_entry_t *w = (w_entry_t *)LOCK_GET_ADDR(l); /* Simply check if address falls inside our write set (avoids non-faulting load) */ if (!(tx->w_set.entries <= w && w < tx->w_set.entries + tx->w_set.nb_entries)) { /* Locked by another transaction: cannot validate */ return 0; } /* We own the lock: OK */ } else { if (LOCK_GET_TIMESTAMP(l) != r->version) { /* Other version: cannot validate */ return 0; } /* Same version: OK */ } } return 1; }
AL_API ALvoid AL_APIENTRY alDistanceModel(ALenum value) { ALCcontext *context; context = GetContextRef(); if(!context) return; if(!(value == AL_INVERSE_DISTANCE || value == AL_INVERSE_DISTANCE_CLAMPED || value == AL_LINEAR_DISTANCE || value == AL_LINEAR_DISTANCE_CLAMPED || value == AL_EXPONENT_DISTANCE || value == AL_EXPONENT_DISTANCE_CLAMPED || value == AL_NONE)) SET_ERROR_AND_GOTO(context, AL_INVALID_VALUE, done); WriteLock(&context->PropLock); context->DistanceModel = value; if(!context->SourceDistanceModel) { if(!ATOMIC_LOAD(&context->DeferUpdates, almemory_order_acquire)) UpdateListenerProps(context); } WriteUnlock(&context->PropLock); done: ALCcontext_DecRef(context); }
static int ALCwinmmCapture_captureProc(void *arg) { ALCwinmmCapture *self = arg; WAVEHDR *WaveHdr; MSG msg; althrd_setname(althrd_current(), RECORD_THREAD_NAME); while(GetMessage(&msg, NULL, 0, 0)) { if(msg.message != WIM_DATA) continue; /* Don't wait for other buffers to finish before quitting. We're * closing so we don't need them. */ if(ATOMIC_LOAD(&self->killNow, almemory_order_acquire)) break; WaveHdr = ((WAVEHDR*)msg.lParam); ll_ringbuffer_write(self->Ring, WaveHdr->lpData, WaveHdr->dwBytesRecorded / self->Format.nBlockAlign ); // Send buffer back to capture more data waveInAddBuffer(self->InHdl, WaveHdr, sizeof(WAVEHDR)); IncrementRef(&self->WaveBuffersCommitted); } return 0; }
AL_API void AL_APIENTRY alListener3i(ALenum param, ALint value1, ALint value2, ALint value3) { ALCcontext *context; switch(param) { case AL_POSITION: case AL_VELOCITY: alListener3f(param, (ALfloat)value1, (ALfloat)value2, (ALfloat)value3); return; } context = GetContextRef(); if(!context) return; WriteLock(&context->PropLock); switch(param) { default: SET_ERROR_AND_GOTO(context, AL_INVALID_ENUM, done); } if(!ATOMIC_LOAD(&context->DeferUpdates, almemory_order_acquire)) UpdateListenerProps(context); done: WriteUnlock(&context->PropLock); ALCcontext_DecRef(context); }
FORCE_ALIGN static int ALCwinmmPlayback_mixerProc(void *arg) { ALCwinmmPlayback *self = arg; ALCdevice *device = STATIC_CAST(ALCbackend, self)->mDevice; WAVEHDR *WaveHdr; MSG msg; SetRTPriority(); althrd_setname(althrd_current(), MIXER_THREAD_NAME); while(GetMessage(&msg, NULL, 0, 0)) { if(msg.message != WOM_DONE) continue; if(ATOMIC_LOAD(&self->killNow, almemory_order_acquire)) { if(ReadRef(&self->WaveBuffersCommitted) == 0) break; continue; } WaveHdr = ((WAVEHDR*)msg.lParam); ALCwinmmPlayback_lock(self); aluMixData(device, WaveHdr->lpData, WaveHdr->dwBufferLength / self->Format.nBlockAlign); ALCwinmmPlayback_unlock(self); // Send buffer back to play more data waveOutWrite(self->OutHdl, WaveHdr, sizeof(WAVEHDR)); IncrementRef(&self->WaveBuffersCommitted); } return 0; }
ALvoid aluHandleDisconnect(ALCdevice *device) { ALCcontext *Context; device->Connected = ALC_FALSE; Context = ATOMIC_LOAD(&device->ContextList); while(Context) { ALvoice *voice, *voice_end; voice = Context->Voices; voice_end = voice + Context->VoiceCount; while(voice != voice_end) { ALsource *source = voice->Source; voice->Source = NULL; if(source && source->state == AL_PLAYING) { source->state = AL_STOPPED; ATOMIC_STORE(&source->current_buffer, NULL); source->position = 0; source->position_fraction = 0; } voice++; } Context->VoiceCount = 0; Context = Context->next; } }
AL_API ALvoid AL_APIENTRY alListenerf(ALenum param, ALfloat value) { ALCcontext *context; context = GetContextRef(); if(!context) return; WriteLock(&context->PropLock); switch(param) { case AL_GAIN: if(!(value >= 0.0f && isfinite(value))) SET_ERROR_AND_GOTO(context, AL_INVALID_VALUE, done); context->Listener->Gain = value; break; case AL_METERS_PER_UNIT: if(!(value >= 0.0f && isfinite(value))) SET_ERROR_AND_GOTO(context, AL_INVALID_VALUE, done); context->Listener->MetersPerUnit = value; break; default: SET_ERROR_AND_GOTO(context, AL_INVALID_ENUM, done); } if(!ATOMIC_LOAD(&context->DeferUpdates, almemory_order_acquire)) UpdateListenerProps(context); done: WriteUnlock(&context->PropLock); ALCcontext_DecRef(context); }
size_t RingFIFOWrite(struct RingFIFO* buffer, const void* value, size_t length) { void* data = buffer->writePtr; void* end; ATOMIC_LOAD(end, buffer->readPtr); // Wrap around if we can't fit enough in here if ((intptr_t) data - (intptr_t) buffer->data + length >= buffer->capacity) { if (end == buffer->data) { // Oops! If we wrap now, it'll appear empty return 0; } data = buffer->data; } size_t remaining; if (data >= end) { uintptr_t bufferEnd = (uintptr_t) buffer->data + buffer->capacity; remaining = bufferEnd - (uintptr_t) data; } else { remaining = (uintptr_t) end - (uintptr_t) data; } // Note that we can't hit the end pointer if (remaining <= length) { return 0; } if (value) { memcpy(data, value, length); } ATOMIC_STORE(buffer->writePtr, (void*) ((intptr_t) data + length)); return length; }
void UpdateEffectSlotProps(ALeffectslot *slot, ALCcontext *context) { struct ALeffectslotProps *props; ALeffectState *oldstate; /* Get an unused property container, or allocate a new one as needed. */ props = ATOMIC_LOAD(&context->FreeEffectslotProps, almemory_order_relaxed); if(!props) props = al_calloc(16, sizeof(*props)); else { struct ALeffectslotProps *next; do { next = ATOMIC_LOAD(&props->next, almemory_order_relaxed); } while(ATOMIC_COMPARE_EXCHANGE_PTR_WEAK(&context->FreeEffectslotProps, &props, next, almemory_order_seq_cst, almemory_order_acquire) == 0); } /* Copy in current property values. */ props->Gain = slot->Gain; props->AuxSendAuto = slot->AuxSendAuto; props->Type = slot->Effect.Type; props->Props = slot->Effect.Props; /* Swap out any stale effect state object there may be in the container, to * delete it. */ ALeffectState_IncRef(slot->Effect.State); oldstate = props->State; props->State = slot->Effect.State; /* Set the new container for updating internal parameters. */ props = ATOMIC_EXCHANGE_PTR(&slot->Update, props, almemory_order_acq_rel); if(props) { /* If there was an unused update container, put it back in the * freelist. */ if(props->State) ALeffectState_DecRef(props->State); props->State = NULL; ATOMIC_REPLACE_HEAD(struct ALeffectslotProps*, &context->FreeEffectslotProps, props); } if(oldstate) ALeffectState_DecRef(oldstate); }
static int template_consumer_process(tp_vtable_child_arg_t *arg) { tp_vtable_global_arg_t *g = arg->global; template_producer_t *consumer= (template_producer_t *)g->producer_arg; tp_chan_t *chan = consumer->chan; tp_pool_t *pool = g->self_pool; uint64_t id = 0; void *val = NULL; int rv = 0; struct template_conf *conf = (struct template_conf *)g->user_data; int min_nt = conf->min_nthreads; int time_out = MAX_TIME; for (;;) { val = (void *)(intptr_t)-1; printf("recv fd before\n"); rv = tp_chan_recv_timedwait(chan, &val, -1); if (rv == TP_SHUTDOWN) { printf("id(%ld) shutdown\n", id); return TP_PLUGIN_EXIT; } printf("recv fd after\n"); /* * 如果要收缩线程的话, timeout时可以退出, * 并退出template_consumer_process */ if (ATOMIC_LOAD(&g->c_ref_count) + ATOMIC_LOAD(&g->p_ref_count) > min_nt && rv == ETIMEDOUT) { printf("delete %s\n", __func__); tp_pool_thread_deln(pool, 1); return TP_PLUGIN_EXIT; } engine_msg_process(arg, (intptr_t)val); close((intptr_t)val); /* 第一次用max_time, 以后用min_time */ if (time_out == MAX_TIME) { time_out = MIN_TIME; } } printf("%s %ldbye bye\n", __func__, (long)pthread_self()); return 0; }
static void AddEffectSlotList(ALCcontext *context, ALeffectslot *start, ALeffectslot *last) { ALeffectslot *root = ATOMIC_LOAD(&context->ActiveAuxSlotList); do { ATOMIC_STORE(&last->next, root, almemory_order_relaxed); } while(!ATOMIC_COMPARE_EXCHANGE_WEAK(ALeffectslot*, &context->ActiveAuxSlotList, &root, start)); }
AL_API ALboolean AL_APIENTRY alGetBoolean(ALenum pname) { ALCcontext *context; ALboolean value=AL_FALSE; context = GetContextRef(); if(!context) return AL_FALSE; switch(pname) { case AL_DOPPLER_FACTOR: if(context->DopplerFactor != 0.0f) value = AL_TRUE; break; case AL_DOPPLER_VELOCITY: if(context->DopplerVelocity != 0.0f) value = AL_TRUE; break; case AL_DISTANCE_MODEL: if(context->DistanceModel == AL_INVERSE_DISTANCE_CLAMPED) value = AL_TRUE; break; case AL_SPEED_OF_SOUND: if(context->SpeedOfSound != 0.0f) value = AL_TRUE; break; case AL_DEFERRED_UPDATES_SOFT: if(ATOMIC_LOAD(&context->DeferUpdates, almemory_order_acquire)) value = AL_TRUE; break; case AL_GAIN_LIMIT_SOFT: if(GAIN_MIX_MAX/context->GainBoost != 0.0f) value = AL_TRUE; break; case AL_NUM_RESAMPLERS_SOFT: /* Always non-0. */ value = AL_TRUE; break; case AL_DEFAULT_RESAMPLER_SOFT: value = ResamplerDefault ? AL_TRUE : AL_FALSE; break; default: SET_ERROR_AND_GOTO(context, AL_INVALID_ENUM, done); } done: ALCcontext_DecRef(context); return value; }
static INLINE void abi_exit(void) { TX_GET; char * statistics; abi_exit_thread(tx); /* Ensure thread safety */ reload: if (ATOMIC_LOAD_ACQ(&global_abi.status) == ABI_INITIALIZED) { if (ATOMIC_CAS_FULL(&global_abi.status, ABI_INITIALIZED, ABI_FINALIZING) == 0) goto reload; } else { return; } if ((statistics = getenv("ITM_STATISTICS")) != NULL) { FILE * f; int i = 0; stats_t * ts; if (statistics[0] == '-') f = stdout; else if ((f = fopen("itm.log", "w")) == NULL) { fprintf(stderr, "can't open itm.log for writing\n"); goto finishing; } fprintf(f, "STATS REPORT\n"); fprintf(f, "THREAD TOTALS\n"); while (1) { do { ts = (stats_t *)ATOMIC_LOAD(&thread_stats); if (ts == NULL) goto no_more_stat; } while(ATOMIC_CAS_FULL(&thread_stats, ts, ts->next) == 0); /* Skip stats if not a transactional thread */ if (ts->nb_commits == 0) continue; fprintf(f, "Thread %-4i : %12s %12s %12s %12s\n", i, "Min", "Mean", "Max", "Total"); fprintf(f, " Transactions : %12lu\n", ts->nb_commits); fprintf(f, " %-25s: %12lu %12.2f %12lu %12lu\n", "Retries", ts->nb_retries_min, ts->nb_retries_avg, ts->nb_retries_max, ts->nb_aborts); fprintf(f,"\n"); /* Free the thread stats structure */ free(ts); i++; } no_more_stat: if (f != stdout) { fclose(f); } } finishing: stm_exit(); ATOMIC_STORE(&global_abi.status, ABI_NOT_INITIALIZED); }
AL_API ALint64SOFT AL_APIENTRY alGetInteger64SOFT(ALenum pname) { ALCcontext *context; ALint64SOFT value = 0; context = GetContextRef(); if(!context) return 0; switch(pname) { case AL_DOPPLER_FACTOR: value = (ALint64SOFT)context->DopplerFactor; break; case AL_DOPPLER_VELOCITY: value = (ALint64SOFT)context->DopplerVelocity; break; case AL_DISTANCE_MODEL: value = (ALint64SOFT)context->DistanceModel; break; case AL_SPEED_OF_SOUND: value = (ALint64SOFT)context->SpeedOfSound; break; case AL_DEFERRED_UPDATES_SOFT: if(ATOMIC_LOAD(&context->DeferUpdates, almemory_order_acquire)) value = (ALint64SOFT)AL_TRUE; break; case AL_GAIN_LIMIT_SOFT: value = (ALint64SOFT)(GAIN_MIX_MAX/context->GainBoost); break; case AL_NUM_RESAMPLERS_SOFT: value = (ALint64SOFT)(ResamplerMax + 1); break; case AL_DEFAULT_RESAMPLER_SOFT: value = (ALint64SOFT)ResamplerDefault; break; default: SET_ERROR_AND_GOTO(context, AL_INVALID_ENUM, done); } done: ALCcontext_DecRef(context); return value; }
AL_API ALvoid AL_APIENTRY alAuxiliaryEffectSloti(ALuint effectslot, ALenum param, ALint value) { ALCdevice *device; ALCcontext *context; ALeffectslot *slot; ALeffect *effect = NULL; ALenum err; context = GetContextRef(); if(!context) return; WriteLock(&context->PropLock); LockEffectSlotsRead(context); if((slot=LookupEffectSlot(context, effectslot)) == NULL) SET_ERROR_AND_GOTO(context, AL_INVALID_NAME, done); switch(param) { case AL_EFFECTSLOT_EFFECT: device = context->Device; LockEffectsRead(device); effect = (value ? LookupEffect(device, value) : NULL); if(!(value == 0 || effect != NULL)) { UnlockEffectsRead(device); SET_ERROR_AND_GOTO(context, AL_INVALID_VALUE, done); } err = InitializeEffect(device, slot, effect); UnlockEffectsRead(device); if(err != AL_NO_ERROR) SET_ERROR_AND_GOTO(context, err, done); break; case AL_EFFECTSLOT_AUXILIARY_SEND_AUTO: if(!(value == AL_TRUE || value == AL_FALSE)) SET_ERROR_AND_GOTO(context, AL_INVALID_VALUE, done); slot->AuxSendAuto = value; UpdateEffectSlotProps(slot); if(!ATOMIC_LOAD(&context->DeferUpdates, almemory_order_acquire)) UpdateAllSourceProps(context); break; default: SET_ERROR_AND_GOTO(context, AL_INVALID_ENUM, done); } done: UnlockEffectSlotsRead(context); WriteUnlock(&context->PropLock); ALCcontext_DecRef(context); }
static INLINE stm_tx_t * abi_init_thread(void) { stm_tx_t *tx = tls_get_tx(); if (tx == NULL) { /* Make sure that the main initilization is done */ if (ATOMIC_LOAD(&global_abi.status) != ABI_INITIALIZED) _ITM_initializeProcess(); //t->thread_id = (int)ATOMIC_FETCH_INC_FULL(&global_abi.thread_counter); tx = stm_init_thread(); } return tx; }
AL_API ALvoid AL_APIENTRY alListenerfv(ALenum param, const ALfloat *values) { ALCcontext *context; if(values) { switch(param) { case AL_GAIN: case AL_METERS_PER_UNIT: alListenerf(param, values[0]); return; case AL_POSITION: case AL_VELOCITY: alListener3f(param, values[0], values[1], values[2]); return; } } context = GetContextRef(); if(!context) return; WriteLock(&context->PropLock); if(!(values)) SET_ERROR_AND_GOTO(context, AL_INVALID_VALUE, done); switch(param) { case AL_ORIENTATION: if(!(isfinite(values[0]) && isfinite(values[1]) && isfinite(values[2]) && isfinite(values[3]) && isfinite(values[4]) && isfinite(values[5]))) SET_ERROR_AND_GOTO(context, AL_INVALID_VALUE, done); /* AT then UP */ context->Listener->Forward[0] = values[0]; context->Listener->Forward[1] = values[1]; context->Listener->Forward[2] = values[2]; context->Listener->Up[0] = values[3]; context->Listener->Up[1] = values[4]; context->Listener->Up[2] = values[5]; break; default: SET_ERROR_AND_GOTO(context, AL_INVALID_ENUM, done); } if(!ATOMIC_LOAD(&context->DeferUpdates, almemory_order_acquire)) UpdateListenerProps(context); done: WriteUnlock(&context->PropLock); ALCcontext_DecRef(context); }
static void mod_order_on_precommit(void *arg) { stm_word_t my_ts, current_ts; my_ts = (stm_word_t)stm_get_specific(mod_order_key); /* Wait its turn... */ do { current_ts = ATOMIC_LOAD(&mod_order_ts_commit); /* Check that we are not killed to keep the liveness, the transaction will * abort before to commit. Note that if the kill feature is not present, the * transaction must abort if it is not its turn to guarantee progress. */ if (stm_killed()) return; } while (current_ts != my_ts); }
void KmerNode::getNeighborCounts(unsigned *l_count, unsigned *r_count) // const { unsigned left_count = 0, right_count = 0; #ifdef VELOUR_TBB // XXX: only needed for directly constructed version that parallel tip-clips union { counter_t count[4]; four_counter_t side; } local_left, local_right; local_left.side = ATOMIC_LOAD(this->left_side); local_right.side = ATOMIC_LOAD(this->right_side); for (int i = 0 ; i < 4 ; i ++) { left_count += (local_left.count[i] != 0); right_count += (local_right.count[i] != 0); } #else for (int i = 0 ; i < 4 ; i ++) { left_count += (this->left_count[i] != 0); right_count += (this->right_count[i] != 0); } #endif *l_count = left_count; *r_count = right_count; }
void UpdateAllEffectSlotProps(ALCcontext *context) { struct ALeffectslotArray *auxslots; ALsizei i; LockEffectSlotList(context); auxslots = ATOMIC_LOAD(&context->ActiveAuxSlots, almemory_order_acquire); for(i = 0;i < auxslots->count;i++) { ALeffectslot *slot = auxslots->slot[i]; if(!ATOMIC_FLAG_TEST_AND_SET(&slot->PropsClean, almemory_order_acq_rel)) UpdateEffectSlotProps(slot, context); } UnlockEffectSlotList(context); }
static INLINE stm_tx_t * abi_init_thread(void) { stm_tx_t *tx = tls_get_tx(); if (tx == NULL) { /* Make sure that the main initilization is done */ if (ATOMIC_LOAD(&global_abi.status) != ABI_INITIALIZED) _ITM_initializeProcess(); //t->thread_id = (int)ATOMIC_FETCH_INC_FULL(&global_abi.thread_counter); tx = stm_init_thread(); #ifdef STACK_CHECK get_stack_attr(&t->stack_addr_low, &t->stack_addr_high); #endif /* STACK_CHECK */ } return tx; }