static PyObject* _is_playing(PyObject *self, PyObject *args) { play_id_t play_id; play_item_t* list_item = play_list_head.next_item; int found = 0; dbg1("_is_playing call\n"); if (!PyArg_ParseTuple(args, "K", &play_id)) { return NULL; } dbg1("looking for play ID %llu\n", play_id); /* walk the list and find the matching play ID */ grab_mutex(play_list_head.mutex); while(list_item != NULL) { if (list_item->play_id == play_id) { #if DEBUG > 0 fprintf(DBG_OUT, DBG_PRE"found play ID in list item at %p\n", list_item); #endif found = 1; } list_item = list_item->next_item; } release_mutex(play_list_head.mutex); if (found) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } }
int Mono_Unix_UnixSignal_uninstall (void* info) { #if defined(HAVE_SIGNAL) signal_info* h; int r = -1; if (acquire_mutex (&signals_mutex) == -1) return -1; h = info; if (h == NULL || h < signals || h > &signals [NUM_SIGNALS]) errno = EINVAL; else { /* last UnixSignal -- we can unregister */ if (h->have_handler && count_handlers (h->signum) == 1) { mph_sighandler_t p = signal (h->signum, h->handler); if (p != SIG_ERR) r = 0; h->handler = NULL; h->have_handler = 0; } h->signum = 0; } release_mutex (&signals_mutex); return r; #else g_error ("signal() is not supported by this platform"); return 0; #endif }
/* memory_tracker_unlock_mutex() Unlocks the memory tracker mutex with a platform specific call Returns: 0: Success <0: Failure, either the mutex was not initialized or the call to unlock the mutex failed */ static int memory_tracker_unlock_mutex() { int ret = -1; if (g_b_mem_tracker_inited) { #if defined(LINUX) || defined(__uClinux__) ret = pthread_mutex_unlock(&memtrack.mutex); #elif defined(WIN32) || defined(_WIN32_WCE) ret = !release_mutex(memtrack.mutex); #elif defined(VXWORKS) ret = sem_give(memtrack.mutex); #elif defined(NDS_NITRO) os_unlock_mutex(&memtrack.mutex); ret = 0; #endif if (ret) { memtrack_log("memory_tracker_unlock_mutex: mutex unlock failed\n"); } } return ret; }
void __inline unlock_fs(struct fs *fs, int fsop) { if (fs->ops->reentrant & fsop) return; if (fs->ops->unlockfs) { fs->ops->unlockfs(fs); } else { release_mutex(&fs->exclusive); } }
/** * Release allocated block. * input: index 1..NALLOC-1 * addr ptr to allocated block */ void release_mem(uint16 index, byte *addr) { // put released block at head of list for its size claim_mutex(&mutex_mem); ChainedBlock_p block = (ChainedBlock_p)addr; block->next = procmemlist[index]; procmemlist[index] = block; release_mutex(&mutex_mem); }
void TexturePlane::updateTextureAspectRatio() { get_mutex(&tex_param_mutex); if (texAspect_needs_update){ texAspect_needs_update = FALSE; textureAspect = new_textureAspect; texAmp = textureAspect*texWL/2.0; } release_mutex(&tex_param_mutex); }
def locked_eval(Value fn, Value arg) { release_mutex(); lock_mutex(); lua_pushvalue(L,fn); lua_pushvalue(L,arg); lua_pcall(L,1,1,0); lua_pushvalue(L,-1); return 1; }
void TexturePlane::updateTextureAmplitude(){ get_mutex(&tex_param_mutex); if (texAmp_needs_update){ texAmp_needs_update = FALSE; texAmp = new_texAmp; textureAspect = 2.0*texAmp/texWL; } release_mutex(&tex_param_mutex); }
/// sleep and use no processing time. // @param millisec sleep period // @function sleep def sleep(Int millisec,Boolean lock) { if (lock) { release_mutex(); } Sleep(millisec); if (lock) { lock_mutex(); } return 0; }
void TexturePlane::updateTextureSize() { get_mutex(&tex_param_mutex); if (texSize_needs_update){ texSize_needs_update = FALSE; texWL = new_Size; texWN = 1.0/texWL; texAmp = textureAspect*texWL/2.0; } release_mutex(&tex_param_mutex); }
void TexturePlane::updateTextureWavelength(){ get_mutex(&tex_param_mutex); if (texWN_needs_update){ texWN_needs_update = FALSE; texWN = new_texWN; texWL = 1.0/texWN; textureAspect = 2.0*texAmp/texWL; } release_mutex(&tex_param_mutex); }
/* * Processing if the priority of wait task changes */ void mtx_chg_pri(RAW_TASK_OBJ *tcb, RAW_U8 oldpri) { RAW_MUTEX *mtxcb; RAW_TASK_OBJ *mtxtsk; mtxcb = (RAW_MUTEX *)(tcb->block_obj); /*mutex_recursion_levels can never deeper than certain levles, anyway it is the design fault*/ if (mutex_recursion_levels > CONFIG_RAW_MUTEX_RECURSION_LEVELS) { port_system_error_process(RAW_MUTEX_RECURSION_LEVELS_EXCEEDED, 0, 0, 0, 0, 0, 0); return; } mutex_recursion_levels++; /*update max mutex recursion levels for debug, mainly help to find the mutex design fault*/ if (mutex_recursion_levels > mutex_recursion_max_levels) { mutex_recursion_max_levels = mutex_recursion_levels; } if (mtxcb->common_block_obj.object_type == RAW_MUTEX_OBJ_TYPE) { if (mtxcb->policy == RAW_MUTEX_INHERIT_POLICY) { mtxtsk = mtxcb->mtxtsk; if (mtxtsk->priority > tcb->priority) { /* Since the highest priority of the lock wait task became higher, raise the lock get task priority higher */ change_internal_task_priority(mtxtsk, tcb->priority); } /*the highest priority task blocked on this mutex may decrease priority so reset the mutex task priority*/ else if (mtxtsk->priority == oldpri) { release_mutex(mtxtsk, 0); } else { /*tcb->priority<= mtxtsk->priority and mtxtsk->priority != oldpri*/ } } } mutex_recursion_levels--; }
static PyObject* _stop_all(PyObject *self, PyObject *args) { play_item_t* list_item = play_list_head.next_item; dbg1("_stop_all call\n"); /* walk the list and set all audio to stop */ grab_mutex(play_list_head.mutex); while(list_item != NULL) { dbg1("stopping ID %llu in list item at %p\n", list_item->play_id, list_item); grab_mutex(list_item->mutex); list_item->stop_flag = SA_STOP; release_mutex(list_item->mutex); list_item = list_item->next_item; } release_mutex(play_list_head.mutex); Py_RETURN_NONE; }
/* * Unlock mutex */ SYSCALL ER _tk_unl_mtx( ID mtxid ) { MTXCB *mtxcb; TCB *tcb; ER ercd = E_OK; CHECK_MTXID(mtxid); CHECK_INTSK(); mtxcb = get_mtxcb(mtxid); BEGIN_CRITICAL_SECTION; if ( mtxcb->mtxid == 0 ) { ercd = E_NOEXS; goto error_exit; } if ( mtxcb->mtxtsk != ctxtsk ) { ercd = E_ILUSE; /* This is not locked by its own task */ goto error_exit; } /* Delete the mutex from the list, and adjust its own task priority if necessary. */ release_mutex(ctxtsk, mtxcb); if ( mtx_waited(mtxcb) ) { tcb = (TCB*)mtxcb->wait_queue.next; /* Release wait */ wait_release_ok(tcb); /* Change mutex get task */ mtxcb->mtxtsk = tcb; mtxcb->mtxlist = tcb->mtxlist; tcb->mtxlist = mtxcb; if ( (mtxcb->mtxatr & TA_CEILING) == TA_CEILING ) { if ( tcb->priority > mtxcb->ceilpri ) { /* Raise the priority of the task that got lock to the highest priority limit */ change_task_priority(tcb, mtxcb->ceilpri); } } } else { /* No wait task */ mtxcb->mtxtsk = NULL; } error_exit: END_CRITICAL_SECTION; return ercd; }
void* playback_thread(void* thread_param) { audio_blob_t* audio_blob = (audio_blob_t*)thread_param; void* audio_ptr; int play_samples; int samples_left = (audio_blob->len_bytes - audio_blob->used_bytes) / audio_blob->frame_size; int buffer_samples = audio_blob->buffer_size / audio_blob->frame_size; int result; int stop_flag = 0; dbg1("playback thread started with audio blob at %p\n", thread_param); while (samples_left > 0 && !stop_flag) { grab_mutex(audio_blob->play_list_item->mutex); stop_flag = audio_blob->play_list_item->stop_flag; release_mutex(audio_blob->play_list_item->mutex); dbg2("loop iteration with stop flag: %d\n", stop_flag); if (samples_left < audio_blob->buffer_size) { play_samples = samples_left; } else { play_samples = buffer_samples; } audio_ptr = audio_blob->buffer_obj.buf + (size_t)(audio_blob->used_bytes); result = snd_pcm_writei(audio_blob->handle, audio_ptr, play_samples); if (result < 0) { dbg2("snd_pcm_writei error code: %d\n", result); result = snd_pcm_recover(audio_blob->handle, result, 0); if (result < 0) { dbg2("unrecoverable error - code: %d\n", result); /* unrecoverable error */ break; } } else { audio_blob->used_bytes += result * audio_blob->frame_size; } samples_left = (audio_blob->len_bytes - audio_blob->used_bytes) / audio_blob->frame_size; } dbg2("done buffering audio - cleaning up\n"); snd_pcm_drain(audio_blob->handle); snd_pcm_close(audio_blob->handle); destroy_audio_blob(audio_blob); dbg1("playback thread done"); pthread_exit(0); }
static void create_semaphore_serial(semaphore_t *sem) { if (!(sem)) return; get_mutex(&crit_sec_create_sem); if (!(sem->is_init)) create_semaphore(sem); release_mutex(&crit_sec_create_sem); }
void destroy_audio_blob(audio_blob_t* audio_blob) { PyGILState_STATE gstate; dbg1("destroying audio blob at %p\n", audio_blob); /* release the buffer view so Python can decrement it's reference count*/ gstate = PyGILState_Ensure(); PyBuffer_Release(&audio_blob->buffer_obj); PyGILState_Release(gstate); grab_mutex(audio_blob->list_mutex); delete_list_item(audio_blob->play_list_item); release_mutex(audio_blob->list_mutex); PyMem_Free(audio_blob); }
/// call a Lua function. // This ensures that only one Lua function can be entered at any time, controlled // by a mutex. If in 'GUI mode' then the Lua function is furthermore called // from the GUI state. // @param L the state // @param ref a reference to the function // @param idx a stack index: if greater than zero, pass value to function // @param text a string: if not NULL, pass this string to the function // @param discard if 1, then remove the reference after calling // @function call_lua BOOL call_lua(lua_State *L, Ref ref, int idx, const char *text, int discard) { BOOL res; lock_mutex(); if (s_use_mutex) { res = call_lua_direct(L,ref,idx,text,discard); } else { LuaCallParms *parms = (LuaCallParms*)malloc(sizeof(LuaCallParms)); parms->L = L; parms->ref = ref; parms->idx = idx; parms->text = text; parms->discard = discard; PostMessage(hMessageWin,MY_INTERNAL_LUA_MESSAGE,0,(LPARAM)parms); res = FALSE; // for now } release_mutex(); return res; }
RAW_OS_ERROR raw_mutex_delete(RAW_MUTEX *mutex_ptr) { LIST *block_list_head; RAW_SR_ALLOC(); #if (RAW_MUTEX_FUNCTION_CHECK > 0) if (mutex_ptr == 0) { return RAW_NULL_OBJECT; } #endif RAW_CRITICAL_ENTER(); if (mutex_ptr->common_block_obj.object_type != RAW_MUTEX_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } block_list_head = &mutex_ptr->common_block_obj.block_list; mutex_ptr->common_block_obj.object_type = RAW_OBJ_TYPE_NONE; if (mutex_ptr->mtxtsk) { release_mutex(mutex_ptr->mtxtsk, mutex_ptr); } /*All task blocked on this mutex is waken up*/ while (!is_list_empty(block_list_head)) { delete_pend_obj(raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list)); } RAW_CRITICAL_EXIT(); TRACE_MUTEX_DELETE(raw_task_active, mutex_ptr); raw_sched(); return RAW_SUCCESS; }
/* * Processing if the task blocked on mutex is timeout or aborted or deleted */ void mutex_state_change(RAW_TASK_OBJ *tcb) { RAW_MUTEX *mtxcb; RAW_TASK_OBJ *mtxtsk; mtxcb = (RAW_MUTEX *)(tcb->block_obj); if (mtxcb->common_block_obj.object_type == RAW_MUTEX_OBJ_TYPE) { if (mtxcb->policy == RAW_MUTEX_INHERIT_POLICY) { mtxtsk = mtxcb->mtxtsk; /*the highest priority task blocked on this mutex may decrease priority so reset the mutex task priority*/ if(mtxtsk->priority == tcb->priority) { release_mutex(mtxtsk, 0); } } } }
static int serial_read(struct dev *dev, void *buffer, size_t count, blkno_t blkno, int flags) { struct serial_port *sp = (struct serial_port *) dev->privdata; unsigned int n; unsigned char *bufp; if (wait_for_object(&sp->rx_lock, sp->cfg.rx_timeout) < 0) return -ETIMEOUT; bufp = (unsigned char *) buffer; for (n = 0; n < count; n++) { // Wait until rx queue is not empty if (wait_for_object(&sp->rx_sem, n == 0 ? sp->cfg.rx_timeout : 0) < 0) break; // Remove next char from receive queue cli(); *bufp++ = fifo_get(&sp->rxq); sti(); //kprintf("serial: read %02X\n", bufp[-1]); } release_mutex(&sp->rx_lock); return n; }
static HANDLE acquire_mutex (const char *mutexname) { HANDLE mutex; DWORD res; mutex = CreateMutexA (NULL, FALSE, mutexname); if (!mutex) return 0; res = WaitForSingleObject (mutex, INFINITE); switch (res) { case WAIT_ABANDONED: release_mutex (mutex); return 0; case WAIT_FAILED: case WAIT_TIMEOUT: return 0; } return mutex; }
static mutex_t *create_mutex_serial(mutex_t *mutex) { if (!(mutex)) return NULL; /* critical section to avoid remote possibility of race on same *mutex -- see get_mutex comments above. module clients must initialize the present module with threads_wrapper_init(). */ get_mutex(&crit_sec_create_mutex); if (!(mutex->is_init)) create_mutex(mutex); release_mutex(&crit_sec_create_mutex); return mutex; }
void* Mono_Unix_UnixSignal_install (int sig) { int i; signal_info* h = NULL; int have_handler = 0; void* handler = NULL; if (acquire_mutex (&signals_mutex) == -1) return NULL; #if defined (SIGRTMIN) && defined (SIGRTMAX) /*The runtime uses some rt signals for itself so it's important to not override them.*/ if (sig >= SIGRTMIN && sig <= SIGRTMAX && count_handlers (sig) == 0) { struct sigaction sinfo; sigaction (sig, NULL, &sinfo); if (sinfo.sa_handler != SIG_DFL || (void*)sinfo.sa_sigaction != (void*)SIG_DFL) { pthread_mutex_unlock (&signals_mutex); errno = EADDRINUSE; return NULL; } } #endif /*defined (SIGRTMIN) && defined (SIGRTMAX)*/ for (i = 0; i < NUM_SIGNALS; ++i) { if (h == NULL && signals [i].signum == 0) { h = &signals [i]; h->handler = signal (sig, default_handler); if (h->handler == SIG_ERR) { h->handler = NULL; h = NULL; break; } else { h->have_handler = 1; } } if (!have_handler && signals [i].signum == sig && signals [i].handler != default_handler) { have_handler = 1; handler = signals [i].handler; } if (h && have_handler) break; } if (h && have_handler) { h->have_handler = 1; h->handler = handler; } if (h) { mph_int_set (&h->count, h->count, 0); mph_int_set (&h->signum, h->signum, sig); mph_int_set (&h->pipecnt, h->pipecnt, 0); } release_mutex (&signals_mutex); return h; }
/** * @fn THRET accept_thread(void *arg) * @brief The accepting thread for TCP connection. * @param[in] arg The argument of accepting thread: CONN_BCAP_SERVER. */ static THRET THTYPE accept_thread(void *arg) { #if !defined(THRET) THRET ret = (THRET)NULL; #endif int client; HRESULT hr; volatile struct CONN_BCAP_SERVER *child; struct CONN_BCAP_SERVER *bcap_param = (struct CONN_BCAP_SERVER *) arg; MUTEX mutex; /* Initializes mutex */ bcap_param->relation_mutex = &mutex; hr = initialize_mutex(&mutex); if(FAILED(hr)) goto exit_proc; while(1) { hr = wait_event(&bcap_param->term_main_evt, 300); if(SUCCEEDED(hr)) { break; } if(bcap_param->num_child < BCAP_CLIENT_MAX) { hr = tcp_accept(bcap_param->device.sock, &client); if(SUCCEEDED(hr)) { /* Sets no delay option */ tcp_set_nodelay(client, 1); /* Sets keep alive option */ tcp_set_keepalive(client, KEEPALIVE_ENABLE, KEEPALIVE_IDLE, KEEPALIVE_INTERVAL, KEEPALIVE_COUNT); /* Adds child */ change_relation(bcap_param, ADD_CHILD, &client); } } /* Deletes child */ change_relation(bcap_param, DELETE_CHILD, NULL); } exit_proc: /* Ends all children thread */ child = bcap_param->node1; while(child != NULL) { set_event((EVENT *) &child->term_main_evt); exit_thread(child->main_thread); child = bcap_param->node1; } /* Deletes child */ change_relation(bcap_param, DELETE_CHILD, NULL); /* Releases mutex */ release_mutex(&mutex); #if !defined(THRET) return ret; #endif }
/* -create a thread to create and connect a socket -orphan the thread if does not return within timeout period -return socket connection success/fail -limit number of connection threads orphaned at any given time so that if one is orphaned, return connection fail Algorithm assumption is that if orphaned thread(s) then network problems, and probably due to not online. A more general solution would take into account that different callers may/probably would have different dests and any problems could be on the dest end */ RC_TYPE tcp_initialize_async(TCP_SOCKET *p_self,CB_EXIT_COND p_exit_func,void *p_cb_data) { #ifndef _WIN32 pthread_t c_thread=0; #else unsigned long c_thread=0; #endif CONNECT_THREAD_DATA *p_thread_data; CONNECT_THREAD_DATA *p_thread_return=NULL; int sleep_time=0; RC_TYPE ret_rc; TCP_SOCKET *p_this_self; int is_exit_request=0; int is_t_data_mutex_released=0; int is_got_test_timer_mutex; DBG_PRINTF((LOG_INFO,"I:" MODULE_TAG "Entered tcp_initialize_async...\n")); if (!(p_self)) { DBG_PRINTF((LOG_CRIT,"C:" MODULE_TAG "p_self null in tcp_initialize_async...\n")); return RC_INVALID_POINTER; } get_mutex(&connect_in_progress_mutex); if (!(is_connect_in_progress)) { /*no orphaned thread, not in timer loop*/ get_mutex(&timer_loop_mutex); } else { /*orphaned thread, or still in timer loop -- don't let still in loop prevent us an attempt*/ if ((get_mutex_try(&timer_loop_mutex)==0)) { /*we've orphaned a connection thread because of timeout -- just return connection failure*/ DBG_PRINTF((LOG_INFO,"I:" MODULE_TAG "connection in progress in tcp_initialize_async...\n")); release_mutex(&timer_loop_mutex); release_mutex(&connect_in_progress_mutex); return RC_IP_CONNECT_FAILED; } } is_connect_in_progress+=1; release_mutex(&connect_in_progress_mutex); /*if client requests exit we may abandon connect thread so rallocate client's p_self data -- we'll deallocate this if connect thread exits normally -- so our dangling thread does not have p_self data pulled out from under it when/if our client quits*/ /*see the function to see just what is/isn't cloned*/ tcp_clone(&p_this_self,p_self); p_self=p_this_self; is_got_test_timer_mutex=(get_mutex_try(&test_timer_loop_mutex)==0); p_thread_data=create_connect_data(p_self); get_mutex(&p_thread_data->t_data_mutex); create_connect_thread(&c_thread,p_thread_data); /*loop/sleep 'til thread returns or prog exit requested*/ while(!(p_thread_data->is_thread_exit) && !((is_exit_request=p_exit_func(p_cb_data)))) { sleep_time+=sleep_lightly_ms(1000,p_exit_func,p_cb_data); if (!(sleep_time<p_self->super.timeout)) { DBG_PRINTF((LOG_INFO,"I:" MODULE_TAG "tcp_initialize_async timed out...\n")); break; } } if (p_thread_data->is_thread_exit) { release_mutex(&p_thread_data->t_data_mutex); is_t_data_mutex_released=1; } else { DBG_PRINTF((LOG_INFO,"I:" MODULE_TAG "connect thread has not exited...attempting to shutdown socket in tcp_initialize_async...\n")); if ((kill_thread(c_thread)==0)) { c_thread=0; DBG_PRINTF((LOG_WARNING,"W:" MODULE_TAG "killed connect thread in tcp_initialize_async...\n")); } } if (c_thread) { DBG_PRINTF((LOG_INFO,"I:" MODULE_TAG "tcp_initialize_async joinng connect thread...\n")); exit_thread(c_thread,(void **) &p_thread_return); } DBG_PRINTF((LOG_INFO,"I:" MODULE_TAG "tcp_initialize_async returning...\n")); if (is_exit_request) ret_rc=global_is_online; else { global_is_online=p_thread_data->rc; ret_rc=global_is_online; } if (is_got_test_timer_mutex) release_mutex(&test_timer_loop_mutex); release_mutex(&timer_loop_mutex); /*if program not quiting, and thread exited, free assocated data*/ if (!(p_thread_data->is_thread_exit)) { p_thread_data->is_parent_exit=1; release_mutex(&p_thread_data->t_data_mutex); } else { if (!(is_t_data_mutex_released)) release_mutex(&p_thread_data->t_data_mutex); wait_sem(&p_thread_data->t_data_sem); DBG_PRINTF((LOG_INFO,"I:" MODULE_TAG "destroying connect data in tcp_initialize_async...\n")); destroy_connect_data(&p_thread_data); } return ret_rc; }
void wait_mutex() { lock_mutex(); release_mutex(); }
// A UnixSignal object is being constructed void* Mono_Unix_UnixSignal_install (int sig) { #if defined(HAVE_SIGNAL) int i; signal_info* h = NULL; // signals[] slot to install to int have_handler = 0; // Candidates for signal_info handler fields void* handler = NULL; if (acquire_mutex (&signals_mutex) == -1) return NULL; #if defined (SIGRTMIN) && defined (SIGRTMAX) /*The runtime uses some rt signals for itself so it's important to not override them.*/ if (sig >= SIGRTMIN && sig <= SIGRTMAX && count_handlers (sig) == 0) { struct sigaction sinfo; sigaction (sig, NULL, &sinfo); if (sinfo.sa_handler != SIG_DFL || (void*)sinfo.sa_sigaction != (void*)SIG_DFL) { pthread_mutex_unlock (&signals_mutex); errno = EADDRINUSE; return NULL; // This is an rt signal with an existing handler. Bail out. } } #endif /*defined (SIGRTMIN) && defined (SIGRTMAX)*/ // Scan through signals list looking for (1) an unused spot (2) a usable value for handler for (i = 0; i < NUM_SIGNALS; ++i) { int just_installed = 0; // We're still looking for a signal_info spot, and this one is available: if (h == NULL && mph_int_get (&signals [i].signum) == 0) { h = &signals [i]; h->handler = signal (sig, default_handler); if (h->handler == SIG_ERR) { h->handler = NULL; h = NULL; break; } else { just_installed = 1; } } // Check if this slot has a "usable" (not installed by this file) handler-to-restore-later: // (On the first signal to be installed, signals [i] will be == h when this happens.) if (!have_handler && (just_installed || mph_int_get (&signals [i].signum) == sig) && signals [i].handler != default_handler) { have_handler = 1; handler = signals [i].handler; } if (h && have_handler) // We have everything we need break; } if (h) { // If we reached here without have_handler, this means that default_handler // was set as the signal handler before the first UnixSignal object was installed. g_assert (have_handler); // Overwrite the tenative handler we set a moment ago with a known-usable one h->handler = handler; h->have_handler = 1; mph_int_set (&h->count, 0); mph_int_set (&h->pipecnt, 0); mph_int_set (&h->signum, sig); } release_mutex (&signals_mutex); return h; #else g_error ("signal() is not supported by this platform"); return 0; #endif }
static gchar * get_session_address_dbus_launch (GError **error) { HANDLE autolaunch_mutex, init_mutex; char *address = NULL; wchar_t gio_path[MAX_PATH+1+200]; autolaunch_mutex = acquire_mutex (DBUS_AUTOLAUNCH_MUTEX); init_mutex = acquire_mutex (UNIQUE_DBUS_INIT_MUTEX); if (is_mutex_owned (DBUS_DAEMON_MUTEX)) address = read_shm (DBUS_DAEMON_ADDRESS_INFO); release_mutex (init_mutex); if (address == NULL) { gio_path[MAX_PATH] = 0; if (GetModuleFileNameW (_g_io_win32_get_module (), gio_path, MAX_PATH)) { PROCESS_INFORMATION pi = { 0 }; STARTUPINFOW si = { 0 }; BOOL res; wchar_t gio_path_short[MAX_PATH]; wchar_t rundll_path[MAX_PATH*2]; wchar_t args[MAX_PATH*4]; GetShortPathNameW (gio_path, gio_path_short, MAX_PATH); GetWindowsDirectoryW (rundll_path, MAX_PATH); wcscat (rundll_path, L"\\rundll32.exe"); if (GetFileAttributesW (rundll_path) == INVALID_FILE_ATTRIBUTES) { GetSystemDirectoryW (rundll_path, MAX_PATH); wcscat (rundll_path, L"\\rundll32.exe"); } wcscpy (args, L"\""); wcscat (args, rundll_path); wcscat (args, L"\" "); wcscat (args, gio_path_short); #if defined(_WIN64) || defined(_M_X64) || defined(_M_AMD64) wcscat (args, L",g_win32_run_session_bus"); #elif defined (_MSC_VER) wcscat (args, L",_g_win32_run_session_bus@16"); #else wcscat (args, L",g_win32_run_session_bus@16"); #endif res = CreateProcessW (rundll_path, args, 0, 0, FALSE, NORMAL_PRIORITY_CLASS | CREATE_NO_WINDOW | DETACHED_PROCESS, 0, NULL /* TODO: Should be root */, &si, &pi); if (res) address = read_shm (DBUS_DAEMON_ADDRESS_INFO); } } release_mutex (autolaunch_mutex); if (address == NULL) g_set_error (error, G_IO_ERROR, G_IO_ERROR_FAILED, _("Session dbus not running, and autolaunch failed")); return address; }
/* ************************************************************************************************************************ * Release a mutex * * Description: This function is called to release a mutex. * * Arguments :mutex_ptr is the address of the mutex object want to be released * * * * Returns RAW_SUCCESS: raw os return success * Note(s) Any task pended on this semphore will be waked up and will return RAW_B_DEL. * * ************************************************************************************************************************ */ RAW_OS_ERROR raw_mutex_put(RAW_MUTEX *mutex_ptr) { LIST *block_list_head; RAW_TASK_OBJ *tcb; RAW_SR_ALLOC(); #if (RAW_MUTEX_FUNCTION_CHECK > 0) if (mutex_ptr == 0) { return RAW_NULL_OBJECT; } if (raw_int_nesting) { return RAW_NOT_CALLED_BY_ISR; } #endif RAW_CRITICAL_ENTER(); if (mutex_ptr->common_block_obj.object_type != RAW_MUTEX_OBJ_TYPE) { RAW_CRITICAL_EXIT(); return RAW_ERROR_OBJECT_TYPE; } /*Must release the mutex by self*/ if (raw_task_active != mutex_ptr->mtxtsk) { RAW_CRITICAL_EXIT(); return RAW_MUTEX_NOT_RELEASE_BY_OCCYPY; } mutex_ptr->owner_nested--; if (mutex_ptr->owner_nested) { RAW_CRITICAL_EXIT(); return RAW_MUTEX_OWNER_NESTED; } release_mutex(raw_task_active, mutex_ptr); block_list_head = &mutex_ptr->common_block_obj.block_list; /*if no block task on this list just return*/ if (is_list_empty(block_list_head)) { /* No wait task */ mutex_ptr->mtxtsk = 0; RAW_CRITICAL_EXIT(); TRACE_MUTEX_RELEASE_SUCCESS(raw_task_active, mutex_ptr); return RAW_SUCCESS; } /* there must have task blocked on this mutex object*/ tcb = raw_list_entry(block_list_head->next, RAW_TASK_OBJ, task_list); /*Wake up the occupy task, which is the highst priority task on the list*/ raw_wake_object(tcb); /* Change mutex get task */ mutex_ptr->mtxtsk = tcb; mutex_ptr->mtxlist = tcb->mtxlist; tcb->mtxlist = mutex_ptr; mutex_ptr->owner_nested = 1u; if (mutex_ptr->policy == RAW_MUTEX_CEILING_POLICY) { if (tcb->priority > mutex_ptr->ceiling_prio) { /* Raise the priority of the task that got lock to the highest priority limit */ change_internal_task_priority(tcb, mutex_ptr->ceiling_prio); } } TRACE_MUTEX_WAKE_TASK(raw_task_active, tcb); RAW_CRITICAL_EXIT(); raw_sched(); return RAW_SUCCESS; }