void conn_info_init(struct conn_info *info) { info->refcount = 0; info->authenticated = false; memset(&info->addr, 0, sizeof(info->addr)); memset(info->dsn, 0, sizeof(info->dsn)); reader_init(&info->reader); info->last_active = -1; info->current_buf = NULL; STAILQ_INIT(&info->cmd_queue); STAILQ_INIT(&info->ready_queue); STAILQ_INIT(&info->waiting_queue); STAILQ_INIT(&info->buf_times); TAILQ_INIT(&info->data); TAILQ_INIT(&info->local_data); ATOMIC_SET(info->send_bytes, 0); ATOMIC_SET(info->recv_bytes, 0); ATOMIC_SET(info->completed_commands, 0); info->status = DISCONNECTED; }
void T1_ensure_t2_is_initialized(void){ if (ATOMIC_GET(g_use_t2_thread)==Use_T2_Thread::UNINITIALIZED){ if(SETTINGS_read_bool("opengl_draw_in_separate_process",true)) ATOMIC_SET(g_use_t2_thread, Use_T2_Thread::YES); else ATOMIC_SET(g_use_t2_thread, Use_T2_Thread::NO); } }
void rtw_mstat_update(const enum mstat_f flags, const MSTAT_STATUS status, u32 sz) { static u32 update_time = 0; int peak, alloc; int i; /* initialization */ if(!update_time) { for(i=0;i<mstat_tf_idx(MSTAT_TYPE_MAX);i++) { ATOMIC_SET(&(rtw_mem_type_stat[i].alloc), 0); ATOMIC_SET(&(rtw_mem_type_stat[i].peak), 0); ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_cnt), 0); ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_err_cnt), 0); } for(i=0;i<mstat_ff_idx(MSTAT_FUNC_MAX);i++) { ATOMIC_SET(&(rtw_mem_func_stat[i].alloc), 0); ATOMIC_SET(&(rtw_mem_func_stat[i].peak), 0); ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_cnt), 0); ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_err_cnt), 0); } } switch(status) { case MSTAT_ALLOC_SUCCESS: ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt)); alloc = ATOMIC_ADD_RETURN(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz); peak=ATOMIC_READ(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak)); if (peak<alloc) ATOMIC_SET(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak), alloc); ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt)); alloc = ATOMIC_ADD_RETURN(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz); peak=ATOMIC_READ(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak)); if (peak<alloc) ATOMIC_SET(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak), alloc); break; case MSTAT_ALLOC_FAIL: ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_err_cnt)); ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_err_cnt)); break; case MSTAT_FREE: ATOMIC_DEC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt)); ATOMIC_SUB(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz); ATOMIC_DEC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt)); ATOMIC_SUB(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz); break; }; //if (rtw_get_passing_time_ms(update_time) > 5000) { // rtw_mstat_dump(); update_time=rtw_get_current_time(); //} }
static void rtw_dev_shutdown(struct device *dev) { struct usb_interface *usb_intf = container_of(dev, struct usb_interface, dev); struct dvobj_priv *dvobj = NULL; _adapter *adapter = NULL; int i; DBG_871X("%s\n", __func__); if(usb_intf) { dvobj = usb_get_intfdata(usb_intf); if (dvobj) { for (i = 0; i<dvobj->iface_nums; i++) { adapter = dvobj->padapters[i]; if (adapter) { adapter->bSurpriseRemoved = _TRUE; } } ATOMIC_SET(&dvobj->continual_io_error, MAX_CONTINUAL_IO_ERR+1); } } }
/** * Put is protected by the writer mutex. This means that the tail mutex could * actually increase while this is happening. That's ok. Increasing the tail * just means there is _more_ room in the ring. We only modify writer_head. */ bool parcRingBuffer1x1_Put(PARCRingBuffer1x1 *ring, void *data) { // Our speculative operation // The consumer modifies reader_tail, so make sure that's an atomic read. // only the prodcuer modifies writer_head, so there's only us uint32_t writer_head = ring->writer_head; uint32_t reader_tail = ATOMIC_FETCH(&ring->reader_tail); uint32_t writer_next = (writer_head + 1) & ring->ring_mask; // ring is full if (writer_next == reader_tail) { return false; } assertNull(ring->buffer[writer_head], "Ring index %u is not null!", writer_head); ring->buffer[writer_head] = data; // we're using this just for atomic write to the integer ATOMIC_SET(&ring->writer_head, writer_head, writer_next); return true; }
bool parcRingBuffer1x1_Get(PARCRingBuffer1x1 *ring, void **outputDataPtr) { // do our speculative operation. // The producer modifies writer_head, so make sure that's an atomic read. // only the consumer modifies reader_tail, so there's only us uint32_t writer_head = ATOMIC_FETCH(&ring->writer_head); // native type assignment is atomic uint32_t reader_tail = ring->reader_tail; uint32_t reader_next = (reader_tail + 1) & ring->ring_mask; // ring is empty if (writer_head == reader_tail) { return false; } // now try to commit it ATOMIC_SET(&ring->reader_tail, reader_tail, reader_next); *outputDataPtr = ring->buffer[reader_tail]; // for sanity's sake ring->buffer[reader_tail] = NULL; return true; }
void mbuf_init(struct context *ctx) { ATOMIC_SET(ctx->mstats.free_buffers, 0); TAILQ_INIT(&ctx->free_mbufq); ctx->mbuf_offset = config.bufsize - sizeof(struct mbuf); }
static void faust_gui_zone_callback(float val, void* arg){ MyUI::Controller *controller = (MyUI::Controller*)arg; float min = controller->min_value; float max = controller->max_value; SoundPlugin *plugin = controller->plugin; int effect_num = controller->effect_num; Data *data = GET_DATA_FROM_PLUGIN(plugin); if (fabs(val - data->automation_values[effect_num]) < fabs((max-min)/100.0)) // approx. return; //printf(" Callback called %f. controller: %p\n val/auto: %f %f", val, controller, val, data->automation_values[effect_num]); float stored_value; stored_value = PLUGIN_get_effect_value(plugin, effect_num, VALUE_FROM_STORAGE); if (val==stored_value) return; // We are now pretty certain that this update was caused by a user interaction in the faust gui, and not a roundtrip from radium. PLAYER_lock();{ PLUGIN_set_native_effect_value(plugin, -1, effect_num, val, PLUGIN_STORED_TYPE, PLUGIN_STORE_VALUE, FX_single); }PLAYER_unlock(); volatile struct Patch *patch = plugin->patch; ATOMIC_SET(patch->widget_needs_to_be_updated, true); }
void switchEditOnOff(void){ struct Tracker_Windows *window=getWindowFromNum(-1); ATOMIC_SET(root->editonoff, ATOMIC_GET(root->editonoff)?false:true); char temp[1000]; sprintf(temp,"Edit %s",ATOMIC_GET(root->editonoff)?"On":"Off"); GFX_SetStatusBar(temp); window->must_redraw=true; }
void rtw_update_mem_stat(u8 flag, u32 sz) { static u32 update_time = 0; int peak, alloc; if(!update_time) { ATOMIC_SET(&rtw_dbg_mem_stat.vir_alloc,0); ATOMIC_SET(&rtw_dbg_mem_stat.vir_peak,0); ATOMIC_SET(&rtw_dbg_mem_stat.vir_alloc_err,0); ATOMIC_SET(&rtw_dbg_mem_stat.phy_alloc,0); ATOMIC_SET(&rtw_dbg_mem_stat.phy_peak,0); ATOMIC_SET(&rtw_dbg_mem_stat.phy_alloc_err,0); } switch(flag) { case MEM_STAT_VIR_ALLOC_SUCCESS: alloc = ATOMIC_ADD_RETURN(&rtw_dbg_mem_stat.vir_alloc, sz); peak=ATOMIC_READ(&rtw_dbg_mem_stat.vir_peak); if (peak<alloc) ATOMIC_SET(&rtw_dbg_mem_stat.vir_peak, alloc); break; case MEM_STAT_VIR_ALLOC_FAIL: ATOMIC_INC(&rtw_dbg_mem_stat.vir_alloc_err); break; case MEM_STAT_VIR_FREE: alloc = ATOMIC_SUB_RETURN(&rtw_dbg_mem_stat.vir_alloc, sz); break; case MEM_STAT_PHY_ALLOC_SUCCESS: alloc = ATOMIC_ADD_RETURN(&rtw_dbg_mem_stat.phy_alloc, sz); peak=ATOMIC_READ(&rtw_dbg_mem_stat.phy_peak); if (peak<alloc) ATOMIC_SET(&rtw_dbg_mem_stat.phy_peak, alloc); break; case MEM_STAT_PHY_ALLOC_FAIL: ATOMIC_INC(&rtw_dbg_mem_stat.phy_alloc_err); break; case MEM_STAT_PHY_FREE: alloc = ATOMIC_SUB_RETURN(&rtw_dbg_mem_stat.phy_alloc, sz); }; if (rtw_get_passing_time_ms(update_time) > 5000) { rtw_dump_mem_stat(); update_time=rtw_get_current_time(); } }
static void transfer_atomic_must_redraws(struct Tracker_Windows *window) { bool a_must_redraw = ATOMIC_GET(atomic_must_redraw); if (a_must_redraw){ ATOMIC_SET(atomic_must_redraw, false); window->must_redraw = true; } bool a_must_redraw_editor = ATOMIC_GET(atomic_must_redraw_editor); if (a_must_redraw_editor){ ATOMIC_SET(atomic_must_redraw_editor, false); window->must_redraw_editor = true; } bool a_must_calculate = ATOMIC_GET(atomic_must_calculate_coordinates); if (a_must_calculate){ ATOMIC_SET(atomic_must_calculate_coordinates, false); window->must_calculate_coordinates = true; } }
void EditorWidget::updateEditor(){ if(ATOMIC_GET(is_starting_up)==true) return; { struct Patch *patch = ATOMIC_GET(atomic_must_redraw_instrument); if (patch!=NULL){ ATOMIC_SET(atomic_must_redraw_instrument, NULL); GFX_update_instrument_widget(patch);//GFX_update_instrument_patch_gui(patch); } } transfer_atomic_must_redraws(window); #if 0 //!defined(RELEASE) { int queue_size = GFX_get_op_queue_size(this->window); if (queue_size > 0 || this->window->must_calculate_coordinates==true || this->window->must_redraw==true || this->window->must_redraw_editor) printf("..Updating. Queue: %d. Update coordinates: %d. Redraw editor: %d. Redraw: %d\n", queue_size, this->window->must_calculate_coordinates, this->window->must_redraw_editor, this->window->must_redraw ); } #endif if (GFX_get_op_queue_size(this->window)>0) this->window->must_redraw = true; if (this->window->must_calculate_coordinates==true){ this->window->must_redraw = true; this->window->must_calculate_coordinates=false; } if (this->window->must_redraw) { UpdateTrackerWindowCoordinates(window); UpdateWBlockCoordinates(this->window, this->window->wblock); GFX_UpdateUpperLeft(window, window->wblock); UpdateAllPianoRollHeaders(window, window->wblock); SEQUENCER_update(); update(); this->window->must_redraw_editor=true; this->window->must_redraw=false; } if (this->window->must_redraw_editor==true){ GL_create(this->window, this->window->wblock); if (!is_playing()) SEQUENCER_update(); this->window->must_redraw_editor=false; } }
static void setit(struct WBlocks *wblock, int realline){ if (!ATOMIC_GET(root->play_cursor_onoff)){ // Set current realline in main thread (main thread picks up till_curr_realline and sets curr_realline afterwards) //printf(" Setting till_curr_realline to %d\n", realline); ATOMIC_SET(wblock->till_curr_realline, realline); // Set current realline in opengl thread //printf("PEQ: set realline %d\n",realline); GE_set_curr_realline(realline); } PC_Pause_set_pos(wblock->l.num, realline); }
Soundfilesaver_widget(QWidget *parent=NULL) : RememberGeometryQDialog(parent) , currently_saving_plugin(NULL) , msgBox(NULL) { _initing = true; msgBox = new MyQMessageBox; setupUi(this); _initing = false; _timer.parent = this; ATOMIC_SET(_timer.async_message, NULL); _timer.setInterval(100); }
void timerEvent(QTimerEvent * e){ printf("clicked: %p\n", parent->msgBox->clickedButton()); if (parent->msgBox->clickedButton()!=NULL){ SOUNDFILESAVER_request_stop(); // Reset clickedButton(). delete parent->msgBox; parent->msgBox = new MyQMessageBox; } const char *message = ATOMIC_GET(async_message); if(message != NULL){ ATOMIC_SET(async_message, NULL); MIXER_request_stop_saving_soundfile(); // This is very messy. The code would be far simpler if jack_set_freewheel could be called from any thread. //usleep(1000*100); // Wait a bit for saving to stop; ScrollEditorToRealLine_CurrPos(root->song->tracker_windows, root->song->tracker_windows->wblock->bot_realline); root->song->tracker_windows->must_redraw = true; #if 0 MyQMessageBox msgBox; msgBox->setText(QString(message)); //msgBox->setInformativeText(message); msgBox->setStandardButtons(QMessageBox::Ok); safeExec(msgBox); #endif bool was_cancelled = !strcmp(message, "Cancelled"); V_free((void*)message); stop(); parent->clean_prev(); if (was_cancelled) PlayStop(); // Sometimes it continues playing after pressing "cancel". else parent->save_next(); } }
static bool paste_track( struct WBlocks *wblock, struct WTracks *wtrack, struct WTracks *towtrack ) { struct Tracks *totrack = towtrack->track; struct Tracks *track = wtrack->track; Place *p1,p2; towtrack->notelength=wtrack->notelength; towtrack->fxwidth=wtrack->fxwidth; totrack->onoff=track->onoff; totrack->pan=track->pan; totrack->volume=track->volume; totrack->panonoff=track->panonoff; totrack->volumeonoff=track->volumeonoff; ATOMIC_SET(totrack->midi_channel, ATOMIC_GET(track->midi_channel)); if(track->midi_instrumentdata!=NULL){ totrack->midi_instrumentdata=MIDI_CopyInstrumentData(track); } totrack->trackname=talloc_strdup(track->trackname); totrack->notes=NULL; totrack->stops=NULL; VECTOR_clean(&totrack->fxs); p1=PlaceGetFirstPos(); PlaceSetLastPos(wblock->block,&p2); CopyRange_notes(&totrack->notes,track->notes,p1,&p2); CopyRange_stops(&totrack->stops,track->stops,p1,&p2); if (totrack->patch != NULL) CopyRange_fxs(&totrack->fxs,&track->fxs,p1,&p2); LegalizeFXlines(wblock->block,totrack); LegalizeNotes(wblock->block,totrack); return true; }
void stopMALdataflow(void) { int i; ATOMIC_SET(exiting, 1, exitingLock, "q_dequeue"); if (todo) { for (i = 0; i < THREADS; i++) MT_sema_up(&todo->s, "stopMALdataflow"); MT_lock_set(&dataflowLock, "stopMALdataflow"); for (i = 0; i < THREADS; i++) { if (workers[i].flag != IDLE && workers[i].flag != JOINING) { workers[i].flag = JOINING; MT_lock_unset(&dataflowLock, "stopMALdataflow"); MT_join_thread(workers[i].id); MT_lock_set(&dataflowLock, "stopMALdataflow"); } workers[i].flag = IDLE; } MT_lock_unset(&dataflowLock, "stopMALdataflow"); } }
void PC_StopPause(void){ #ifdef NOPAUSEPLAY return; #else Place place; STime pausetime; struct PEventQueue *peq; if( ! is_playing()) return; pausetime=pc->pausetime; PlaceSetFirstPos(&place); InitPEQclock(); InitPEQ_LPB(pc->block,place); InitPEQ_Signature(pc->block,place); InitPEQ_Beat(pc->block,place); InitPEQrealline(pc->block,&place); InitPEQline(pc->block,&place); InitPEQblock(pc->block,&place); InitAllPEQnotes(pc->block,&place); peq=pc->peq; while(peq!=NULL && pausetime>=peq->l.time){ PC_RemoveFirst(); (*peq->TreatMe)(pausetime,peq,0); peq=pc->peq; } ATOMIC_SET(root->setfirstpos, false); StopPausePlayer(); #endif }
/* Balance the lists so that we can fit an object with the given size into * the cache. */ static inline void arc_balance(arc_t *cache) { if (!ATOMIC_READ(cache->needs_balance)) return; MUTEX_LOCK(&cache->lock); /* First move objects from MRU/MFU to their respective ghost lists. */ while (cache->mru.size + cache->mfu.size > cache->c) { if (cache->mru.size > cache->p) { arc_object_t *obj = arc_state_lru(&cache->mru); arc_move(cache, obj, &cache->mrug); } else if (cache->mfu.size > cache->c - cache->p) { arc_object_t *obj = arc_state_lru(&cache->mfu); arc_move(cache, obj, &cache->mfug); } else { break; } } /* Then start removing objects from the ghost lists. */ while (cache->mrug.size + cache->mfug.size > cache->c) { if (cache->mfug.size > cache->p) { arc_object_t *obj = arc_state_lru(&cache->mfug); arc_move(cache, obj, NULL); } else if (cache->mrug.size > cache->c - cache->p) { arc_object_t *obj = arc_state_lru(&cache->mrug); arc_move(cache, obj, NULL); } else { break; } } ATOMIC_SET(cache->needs_balance, 0); MUTEX_UNLOCK(&cache->lock); }
void arc_set_mode(arc_t *cache, arc_mode_t mode) { ATOMIC_SET(cache->mode, mode); }
bool InitProgram(void){ // GC_INIT(); bool ret; #if !defined(FOR_MACOSX) RADIUM_ensure_bin_packages_gc_is_used(); #endif printf("Initializing...\n"); printf("...Error handler\n"); Error_init(); printf("...Memory handler\n"); init_memory(); root=tralloc(sizeof(struct Root)); if(root==NULL){ fprintf(stderr,"Not enough memory\n"); return false; } root->keyoct=36; root->quantitize_options = Quantitize_get_default_options(); root->grid_numerator=1; root->grid_denominator=1; root->min_standardvel=MAX_VELOCITY*40/100; root->standardvel=MAX_VELOCITY*80/100; ATOMIC_SET(root->editonoff, true); ATOMIC_SET(root->play_cursor_onoff, false); ATOMIC_SET(root->editor_follows_play_cursor_onoff, true); root->song=talloc(sizeof(struct Song)); pc=tralloc(sizeof(PlayerClass)); if(root->song==NULL || pc==NULL){ fprintf(stderr,"Not enough memory\n"); return false; } pc->pfreq = 48000; // Default value. Should be overridden in MIXER_start(). /* if( ( ! InitPEQmempool(1000) ) || ( ! Input_Init(4000) ) ){ // 1000 and 4000 are hardcoded values. Not good. return false; } */ printf("...Midi\n"); MIDI_input_init(); SCHEDULER_init(); PATCH_init(); printf("...Sound\n"); if(MIXER_start()==false){ fprintf(stderr,"Could not open Sound\n"); return false; } printf("...Player 1/2\n"); if( ( ! InitPEQmempool() ) ){ // 1000 and 4000 are hardcoded values. Not good. return false; } printf("...Clock handler\n"); if( ! InitClock() ) return false; printf("...Player 2/2\n"); PEQ_GetType_Init(); printf("...Instrument\n"); if( OpenInstruments()==false ){ return false; } printf("...Kebang\n"); ret=NewSong(); #if !USE_OPENGL printf("...Blitting\n"); if(ret==true){ Blt_blt(root->song->tracker_windows); } #endif printf("Initialization finished.\n"); return ret; }
// Constructors. qtractorAudioBuffer::qtractorAudioBuffer ( qtractorAudioBufferThread *pSyncThread, unsigned short iChannels ) { m_pSyncThread = pSyncThread; m_iChannels = iChannels; m_pFile = NULL; m_pRingBuffer = NULL; m_iThreshold = 0; m_iBufferSize = 0; m_syncFlags = 0; m_iReadOffset = 0; m_iWriteOffset = 0; m_iFileLength = 0; m_bIntegral = false; m_iOffset = 0; m_iLength = 0; m_iLoopStart = 0; m_iLoopEnd = 0; m_iSeekOffset = 0; ATOMIC_SET(&m_seekPending, 0); m_ppFrames = NULL; m_ppBuffer = NULL; m_bTimeStretch = false; m_fTimeStretch = 1.0f; m_bPitchShift = false; m_fPitchShift = 1.0f; m_pTimeStretcher = NULL; m_fGain = 1.0f; m_fPanning = 0.0f; m_pfGains = NULL; m_fNextGain = 0.0f; m_iRampGain = 0; #ifdef CONFIG_LIBSAMPLERATE m_bResample = false; m_fResampleRatio = 1.0f; m_iInputPending = 0; m_ppInBuffer = NULL; m_ppOutBuffer = NULL; m_ppSrcState = NULL; #endif m_pPeakFile = NULL; // Time-stretch mode local options. m_bWsolaTimeStretch = g_bDefaultWsolaTimeStretch; m_bWsolaQuickSeek = g_bDefaultWsolaQuickSeek; }
void P2MUpdateSongPosCallBack(void){ struct Tracker_Windows *window=root->song->tracker_windows; bool setfirstpos=ATOMIC_GET(root->setfirstpos); NInt curr_block=ATOMIC_GET(root->curr_blocknum); struct WBlocks *wblock; int till_curr_realline; if(scrolls_per_second==-1) scrolls_per_second = SETTINGS_read_int("scrolls_per_second", default_scrolls_per_second); if(pc->playtype==PLAYSONG) BS_SelectPlaylistPos(root->curr_playlist); while(window!=NULL){ if(window->playalong==true){ DO_GFX({ wblock=ListFindElement1(&window->wblocks->l,curr_blocknum); till_curr_realline=ATOMIC_GET(wblock->till_curr_realline); if(window->curr_block!=curr_block){ if(setfirstpos){ wblock->curr_realline=0; SetWBlock_Top_And_Bot_Realline(window,wblock); } SelectWBlock( window, wblock ); } if(setfirstpos){ // The player routine (PEQblock.c) sets this one. ATOMIC_SET(wblock->till_curr_realline, 0); till_curr_realline=0; } //fprintf(stderr,"tilline: %d\n",till_curr_realline); #if 0 if(wblock->curr_realline != till_curr_realline) ScrollEditorToRealLine(window,wblock,till_curr_realline); #else { bool do_scrolling = false; if(wblock != last_wblock) do_scrolling = true; else if (last_time > ATOMIC_GET(pc->therealtime)) do_scrolling = true; else if(till_curr_realline < wblock->curr_realline) do_scrolling = true; else if(till_curr_realline > wblock->curr_realline){ STime from_time = (STime) ((double)Place2STime(wblock->block, &wblock->reallines[wblock->curr_realline]->l.p) / wblock->block->reltempo); STime to_time = (STime) ((double)Place2STime(wblock->block, &wblock->reallines[till_curr_realline]->l.p) / wblock->block->reltempo); STime time = to_time - from_time; STime time_necessary_to_scroll = pc->pfreq / scrolls_per_second; if(time>=time_necessary_to_scroll) do_scrolling = true; } if(do_scrolling==true) { ScrollEditorToRealLine(window,wblock,till_curr_realline); last_time = ATOMIC_GET(pc->therealtime); last_wblock = wblock; } } #endif }); }
/* * Set the continual_io_error of this @param dvobjprive to 0 */ void rtw_reset_continual_io_error(struct dvobj_priv *dvobj) { ATOMIC_SET(&dvobj->continual_io_error, 0); }
static int64_t RT_scheduled_realline(struct SeqTrack *seqtrack, int64_t time, union SuperType *args){ const struct SeqBlock *seqblock = args[0].const_pointer; struct WBlocks *wblock = args[1].pointer; int realline = args[2].int32_num; int64_t counter = args[3].int_num; //printf("%d. counter: %d / seqblock->counter: %d. Num reallines: %d\n", realline, (int)counter, (int)seqblock->curr_scheduled_realline_counter, wblock->num_reallines); if (seqblock->curr_scheduled_realline_counter > counter){ // I.e. this event have been replaced because the number of reallines was changed while playing. //printf(" stop1: %d / %d\n", (int)counter, (int)seqblock->curr_scheduled_realline_counter); return DONT_RESCHEDULE; } const int num_reallines = wblock->num_reallines; /* if (num_reallines != wblock->num_reallines){ // Happens when changing LZ value. realline = get_curr_realline_for_seqtrack(seqtrack); num_reallines = wblock->num_reallines; args[3].int32_num = num_reallines; } */ #ifdef WITH_PD bool inserted_pd_realline = false; int64_t org_time = time; const Place *org_pos = NULL; if (realline < num_reallines) // number of reallines can change while playing. org_pos = &wblock->reallines[realline]->l.p; #endif // Do thing with the current realline setit(wblock, realline); // Schedule next realline // const int next_realline = realline+1; if(pc->playtype==PLAYRANGE){ // This never happens. Instead playtype is PLAYBLOCK, and pc->is_playing_range is true; R_ASSERT(false); /* if(next_realline>=wblock->rangey2){ next_realline=wblock->rangey1; } // sanity checks to avoid crash. May happen if editing next_reallines while playing. if (next_realline>=wblock->num_next_reallines) // If outside range, first try to set next_realline to rangey1 next_realline = wblock->rangey1; if (next_realline>=wblock->num_next_reallines) // that didn't work, set next_realline to 0 next_realline = 0; */ } else if (pc->playtype==PLAYBLOCK && pc->is_playing_range == true){ if (next_realline>=num_reallines || p_Greater_Than(wblock->reallines[next_realline]->l.p, wblock->rangey2)){ ATOMIC_SET(pc->player_state, PLAYER_STATE_STOPPING); //PC_ReturnElements(); return DONT_RESCHEDULE; } }else if(next_realline>=num_reallines) { return DONT_RESCHEDULE; } #ifdef WITH_PD if (org_pos != NULL) if(inserted_pd_realline==false) RT_PD_set_realline(org_time, time, org_pos); #endif { args[2].int32_num = next_realline; Place next_place = wblock->reallines[next_realline]->l.p; //printf(" next_place: %d + %d/%d\n", next_place.line, next_place.counter, next_place.dividor); return get_seqblock_place_time(seqblock, next_place); } }
void PlayerTask(double reltime){ if (ATOMIC_GET(is_starting_up)) return; pc->reltime = reltime; Player_State player_state = ATOMIC_GET(pc->player_state); if (player_state==PLAYER_STATE_PROGRAM_NOT_READY){ //printf("player: program not ready\n"); return; } else if (player_state==PLAYER_STATE_ENDING) { return; } else if (player_state==PLAYER_STATE_STOPPING) { //PC_ReturnElements(); g_time_was_stopped = true; SCHEDULER_reset_all_timing(); if (SCHEDULER_clear_all()) { ATOMIC_SET(pc->player_state, PLAYER_STATE_STOPPED); // Finished. SCHEDULER_clear() cleared everything. //RT_BACKUP_reset_timer(); // Don't want to take backup right after stopping to play. It's quite annoying. (we handle this directly in Qt_AutoBackups instead) player_state = PLAYER_STATE_STOPPED; } else return; // Must run SCHEDULER_clear() at least one more time. We don't want clear too much at once since it could cause CPU spikes. //} else if (player_state==PLAYER_STATE_STOPPED) { // return; } R_ASSERT(player_state==PLAYER_STATE_STARTING_TO_PLAY || player_state==PLAYER_STATE_PLAYING || player_state==PLAYER_STATE_STOPPED); if (player_state != PLAYER_STATE_STOPPED) if(g_time_was_stopped){ OS_InitMidiTiming(); OS_InitAudioTiming(); g_time_was_stopped = false; } bool is_finished = true; ALL_SEQTRACKS_FOR_EACH(){ double reltempo = 1.0; struct SeqBlock *curr_seqblock = seqtrack->curr_seqblock; struct Blocks *block = curr_seqblock==NULL ? NULL : curr_seqblock->block; if(block!=NULL) reltempo = ATOMIC_DOUBLE_GET(block->reltempo); double seqreltime = (double)reltime * reltempo; //if(reltempo!=1.0) // printf("Curr_seqblock: %p. seqrelteim: %f\n", curr_seqblock,seqreltime); pc->is_treating_editor_events = true; { if (SCHEDULER_called_per_block(seqtrack, seqreltime) > 0) is_finished = false; } pc->is_treating_editor_events = false; if (player_state != PLAYER_STATE_STOPPED){ if (curr_seqblock != NULL) { bool set_player_time = false; if (pc->playtype==PLAYBLOCK && seqtrack==&root->song->block_seqtrack) set_player_time = true; else if (pc->playtype==PLAYSONG && seqtrack==root->song->seqtracks.elements[root->song->curr_seqtracknum]) set_player_time = true; if (set_player_time) ATOMIC_DOUBLE_SET(block->player_time, seqtrack->start_time - curr_seqblock->time); //else ATOMIC_DOUBLE_SET(block->player_time, -100); // Not necessary (-100 is set in scheduler_seqtrack.c when switching block), and we also need to check if we are playing block, etc. } } }END_ALL_SEQTRACKS_FOR_EACH; if (player_state==PLAYER_STATE_STOPPED) return; pc->absabstime += RADIUM_BLOCKSIZE; if(pc->playtype==PLAYSONG) { double song_abstime = ATOMIC_DOUBLE_GET(pc->song_abstime); double new_song_abstime = song_abstime + reltime; ATOMIC_DOUBLE_SET(pc->song_abstime, new_song_abstime); } #ifdef WITH_PD RT_PD_set_absolute_time(ATOMIC_DOUBLE_GET(pc->song_abstime)); #endif if (player_state == PLAYER_STATE_STARTING_TO_PLAY) ATOMIC_SET(pc->player_state, PLAYER_STATE_PLAYING); //printf("num_scheduled: %d. state: %d\n",num_scheduled_events,player_state); if(player_state == PLAYER_STATE_PLAYING && is_finished) ATOMIC_SET(pc->player_state, PLAYER_STATE_STOPPING); if(pc->playtype==PLAYSONG){ if (SEQUENCER_is_looping()){ if (ATOMIC_DOUBLE_GET(pc->song_abstime) >= SEQUENCER_get_loop_end()){ ATOMIC_SET(pc->player_state, PLAYER_STATE_STOPPING); } } } }
/* Move the object to the given state. If the state transition requires, * fetch, evict or destroy the object. */ static inline int arc_move(arc_t *cache, arc_object_t *obj, arc_state_t *state) { // In the first conditional we check If the object is being locked, // which means someone is fetching its value and we don't what // don't mess up with it. Whoever is fetching will also take care of moving it // to one of the lists (or dropping it) // NOTE: while the object is being fetched it doesn't belong // to any list, so there is no point in going ahead // also arc_balance() should never go through this object // (since in none of the lists) so it won't be affected. // The only call which would silently fail is arc_remove() // but if the object is being fetched and need to be removed // will be determined by who is fetching the object or by the // next call to arc_balance() (which would anyway happen if // the object will be put into the cache by the fetcher) // // In the second conditional instead we handle a specific corner case which // happens when concurring threads access an item which has been just fetched // but also dropped (so its state is NULL). // If a thread entering arc_lookup() manages to get the object out of the hashtable // before it's being deleted it will try putting the object to the mfu list without checking first // if it was already in a list or not (new objects should be first moved to the // mru list and not the mfu one) if (UNLIKELY(obj->locked || (state == &cache->mfu && ATOMIC_READ(obj->state) == NULL))) return 0; MUTEX_LOCK(&cache->lock); arc_state_t *obj_state = ATOMIC_READ(obj->state); if (LIKELY(obj_state != NULL)) { if (LIKELY(obj_state == state)) { // short path for recurring keys // (those in the mfu list being hit again) if (LIKELY(state->head.next != &obj->head)) arc_list_move_to_head(&obj->head, &state->head); MUTEX_UNLOCK(&cache->lock); return 0; } // if the state is not NULL // (and the object is not going to be being removed) // move the ^ (p) marker if (LIKELY(state != NULL)) { if (obj_state == &cache->mrug) { size_t csize = cache->mrug.size ? (cache->mfug.size / cache->mrug.size) : cache->mfug.size / 2; cache->p = MIN(cache->c, cache->p + MAX(csize, 1)); } else if (obj_state == &cache->mfug) { size_t csize = cache->mfug.size ? (cache->mrug.size / cache->mfug.size) : cache->mrug.size / 2; cache->p = MAX(0, cache->p - MAX(csize, 1)); } } ATOMIC_DECREASE(obj_state->size, obj->size); arc_list_remove(&obj->head); ATOMIC_DECREMENT(obj_state->count); ATOMIC_SET(obj->state, NULL); } if (state == NULL) { if (ht_delete_if_equals(cache->hash, (void *)obj->key, obj->klen, obj, sizeof(arc_object_t)) == 0) release_ref(cache->refcnt, obj->node); } else if (state == &cache->mrug || state == &cache->mfug) { obj->async = 0; arc_list_prepend(&obj->head, &state->head); ATOMIC_INCREMENT(state->count); ATOMIC_SET(obj->state, state); ATOMIC_INCREASE(state->size, obj->size); } else if (obj_state == NULL) { obj->locked = 1; // unlock the cache while the backend is fetching the data // (the object has been locked while being fetched so nobody // will change its state) MUTEX_UNLOCK(&cache->lock); size_t size = 0; int rc = cache->ops->fetch(obj->ptr, &size, cache->ops->priv); switch (rc) { case 1: case -1: { if (ht_delete_if_equals(cache->hash, (void *)obj->key, obj->klen, obj, sizeof(arc_object_t)) == 0) release_ref(cache->refcnt, obj->node); return rc; } default: { if (size >= cache->c) { // the (single) object doesn't fit in the cache, let's return it // to the getter without (re)adding it to the cache if (ht_delete_if_equals(cache->hash, (void *)obj->key, obj->klen, obj, sizeof(arc_object_t)) == 0) release_ref(cache->refcnt, obj->node); return 1; } MUTEX_LOCK(&cache->lock); obj->size = ARC_OBJ_BASE_SIZE(obj) + cache->cos + size; arc_list_prepend(&obj->head, &state->head); ATOMIC_INCREMENT(state->count); ATOMIC_SET(obj->state, state); ATOMIC_INCREASE(state->size, obj->size); ATOMIC_INCREMENT(cache->needs_balance); break; } } // since this object is going to be put back into the cache, // we need to unmark it so that it won't be ignored next time // it's going to be moved to another list obj->locked = 0; } else { arc_list_prepend(&obj->head, &state->head); ATOMIC_INCREMENT(state->count); ATOMIC_SET(obj->state, state); ATOMIC_INCREASE(state->size, obj->size); } MUTEX_UNLOCK(&cache->lock); return 0; }
/** * Abort execution, possibly dumping a stack frame. */ static G_GNUC_COLD G_GNUC_NORETURN void assertion_abort(void) { static volatile sig_atomic_t seen_fatal; #define STACK_OFF 2 /* 2 extra calls: assertion_failure(), then here */ /* * We're going to stop the execution. * * If this is the first fatal assertion we're dealing with (and not a * second one happening in the crash-handling code), log the current * stack trace to give a first clue about the code path leading to * the failure. */ if (!ATOMIC_GET(&seen_fatal)) { ATOMIC_SET(&seen_fatal, TRUE); /* * If the thread holds any locks, dump them. */ thread_lock_dump_self_if_any(STDERR_FILENO); if (log_stdout_is_distinct()) thread_lock_dump_self_if_any(STDOUT_FILENO); /* * Dump stacktrace. */ stacktrace_where_cautious_print_offset(STDERR_FILENO, STACK_OFF); if (log_stdout_is_distinct()) stacktrace_where_cautious_print_offset(STDOUT_FILENO, STACK_OFF); /* * Before calling abort(), which will generate a SIGABRT and invoke * the crash handler we need to save the current stack frame in case * signal delivery happens on a dedicated stack where it will no * longer be possible to get the frame of the assertion failure. */ crash_save_current_stackframe(STACK_OFF); } /* * We used to call abort() here. * * However, assertion handling is already coupled to crash handling and * therefore it buys us little to call abort() to raise a SIGABRT signal * which will then be trapped by the crash handler anyway. * * Furthermore, there is a bug in the linux kernel that causes a hang in * the fork() system call used by the crash handler to exec() a debugger, * and this may be due to signal delivery. * * Calling crash_abort() will ensure synchronous crash handling. * --RAM, 2011-10-24 */ crash_abort(); #undef STACK_OFF }