int _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock) { pthread_rwlock_t prwlock; int ret; if (rwlock == NULL) return(EINVAL); prwlock = *rwlock; /* check for static initialization */ if (prwlock == NULL) { if ((ret = init_static(rwlock)) != 0) return(ret); prwlock = *rwlock; } /* grab the monitor lock */ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0) return(ret); if (prwlock->state != 0) ret = EBUSY; else /* indicate we are locked for writing */ prwlock->state = -1; /* see the comment on this in pthread_rwlock_rdlock */ _pthread_mutex_unlock(&prwlock->lock); return (ret); }
/*! initialize */ void mrbc_init(uint8_t *ptr, unsigned int size ) { mrbc_init_alloc(ptr, size); init_static(); hal_init(); // TODO 関数呼び出しが、c_XXX => mrbc_XXX の daisy chain になっている。 // 不要な複雑さかもしれない。要リファクタリング。 mrbc_define_method(0, mrbc_class_object, "sleep", c_sleep); mrbc_define_method(0, mrbc_class_object, "sleep_ms", c_sleep_ms); mrbc_define_method(0, mrbc_class_object, "relinquish", c_relinquish); mrbc_define_method(0, mrbc_class_object, "change_priority", c_change_priority); mrbc_define_method(0, mrbc_class_object, "suspend_task", c_suspend_task); mrbc_define_method(0, mrbc_class_object, "resume_task", c_resume_task); mrbc_define_method(0, mrbc_class_object, "get_tcb", c_get_tcb); mrbc_class *c_mutex; c_mutex = mrbc_define_class(0, "Mutex", mrbc_class_object); mrbc_define_method(0, c_mutex, "new", c_mutex_new); mrbc_define_method(0, c_mutex, "lock", c_mutex_lock); mrbc_define_method(0, c_mutex, "unlock", c_mutex_unlock); mrbc_define_method(0, c_mutex, "try_lock", c_mutex_trylock); mrbc_class *c_vm; c_vm = mrbc_define_class(0, "VM", mrbc_class_object); mrbc_define_method(0, c_vm, "tick", c_vm_tick); }
void Game::loadRessources() { init_static(); Character::init_static(); mapSprite.SetImage(mapImage); mapSprite.Resize(sf::Vector2f(1024*2, 768*2)); }
HelpButton::HelpButton(wxWindow *parent, wxString const& page, wxPoint position, wxSize size) : wxButton(parent, wxID_HELP, "", position, size) { Bind(wxEVT_COMMAND_BUTTON_CLICKED, std::bind(&HelpButton::OpenPage, page)); init_static(); if (pages->find(page) == pages->end()) throw agi::InternalError("Invalid help page", nullptr); }
void HelpButton::OpenPage(wxString const& pageID) { init_static(); wxString page = (*pages)[pageID]; wxString section; page = page.BeforeFirst('#', §ion); wxLaunchDefaultBrowser(wxString::Format("http://docs.aegisub.org/3.1/%s/#%s", page, section)); }
static int rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime) { pthread_rwlock_t prwlock; int ret; if (rwlock == NULL) return(EINVAL); prwlock = *rwlock; /* check for static initialization */ if (prwlock == NULL) { if ((ret = init_static(rwlock)) != 0) return(ret); prwlock = *rwlock; } /* grab the monitor lock */ if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0) return(ret); while (prwlock->state != 0) { prwlock->blocked_writers++; if (abstime != NULL) { ret = pthread_cond_timedwait(&prwlock->write_signal, &prwlock->lock, abstime); } else { ret = pthread_cond_wait(&prwlock->write_signal, &prwlock->lock); } if (ret != 0) { prwlock->blocked_writers--; pthread_mutex_unlock(&prwlock->lock); return(ret); } prwlock->blocked_writers--; } /* indicate we are locked for writing */ prwlock->state = -1; /* see the comment on this in pthread_rwlock_rdlock */ pthread_mutex_unlock(&prwlock->lock); return (ret); }
void tgen_init() { static bool initialized = false; if (initialized) return; initialized = true; init_noise(); rebase_data(); init_function_map(); init_static(); entry_point(); // save memory state mem.save_heap(saved_heap); }
int main(void) { struct VM *vm; init_static(); vm = vm_open(); if( vm == NULL ){ printf("VM open Error\n"); return -1; } int ret = loca_mrb_array(vm, ary); if( ret != NO_ERROR ){ printf("MRB Load Error (%04x_%04x)\n", ret>>16, ret&0xffff); return -1; }
int pthread_mutex_trylock(pthread_mutex_t *mutex) { int ret = 0; if (mutex == NULL) ret = EINVAL; /* * If the mutex is statically initialized, perform the dynamic * initialization: */ else if ((*mutex != NULL) || (ret = init_static(mutex)) == 0) ret = mutex_trylock_common(mutex); return (ret); }
int _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock) { pthread_rwlock_t prwlock; int ret; if (rwlock == NULL) return(EINVAL); prwlock = *rwlock; /* check for static initialization */ if (prwlock == NULL) { if ((ret = init_static(rwlock)) != 0) return(ret); prwlock = *rwlock; } /* grab the monitor lock */ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0) return(ret); while (prwlock->state != 0) { prwlock->blocked_writers++; ret = _pthread_cond_wait(&prwlock->write_signal, &prwlock->lock); if (ret != 0) { prwlock->blocked_writers--; _pthread_mutex_unlock(&prwlock->lock); return(ret); } prwlock->blocked_writers--; } /* indicate we are locked for writing */ prwlock->state = -1; /* see the comment on this in pthread_rwlock_rdlock */ _pthread_mutex_unlock(&prwlock->lock); return (ret); }
int _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) { pthread_rwlock_t prwlock; struct pthread *curthread; int ret; if (rwlock == NULL) return(EINVAL); prwlock = *rwlock; /* check for static initialization */ if (prwlock == NULL) { if ((ret = init_static(rwlock)) != 0) return(ret); prwlock = *rwlock; } /* grab the monitor lock */ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0) return(ret); curthread = _get_curthread(); if (prwlock->state == MAX_READ_LOCKS) ret = EAGAIN; /* too many read locks acquired */ else if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) { /* see comment for pthread_rwlock_rdlock() */ curthread->rdlock_count++; prwlock->state++; } /* give writers priority over readers */ else if (prwlock->blocked_writers || prwlock->state < 0) ret = EBUSY; else { prwlock->state++; /* indicate we are locked for reading */ curthread->rdlock_count++; } /* see the comment on this in pthread_rwlock_rdlock */ _pthread_mutex_unlock(&prwlock->lock); return (ret); }
int pthread_mutex_lock(pthread_mutex_t *mutex) { int ret = 0; if (_thread_initial == NULL) _thread_init(); if (mutex == NULL) ret = EINVAL; /* * If the mutex is statically initialized, perform the dynamic * initialization. Note: _thread_mutex_lock() in libc requires * pthread_mutex_lock() to perform the mutex init when *mutex * is NULL. */ else if ((*mutex != NULL) || ((ret = init_static(mutex)) == 0)) ret = mutex_lock_common(mutex); return (ret); }
void mrubyc(uint8_t *mrbbuf) { struct VM *vm; mrbc_init_alloc(memory_pool, MEMORY_SIZE); init_static(); vm = mrbc_vm_open(NULL); if( vm == 0 ) { fprintf(stderr, "Error: Can't open VM.\n"); return; } if( mrbc_load_mrb(vm, mrbbuf) != 0 ) { fprintf(stderr, "Error: Illegal bytecode.\n"); return; } mrbc_vm_begin(vm); mrbc_vm_run(vm); mrbc_vm_end(vm); mrbc_vm_close(vm); }
int _pthread_mutex_trylock(pthread_mutex_t * mutex) { struct pthread *curthread = _get_curthread(); int ret = 0; if (mutex == NULL) ret = EINVAL; /* * If the mutex is statically initialized, perform the dynamic * initialization: */ else if (*mutex != NULL || (ret = init_static(mutex)) == 0) { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * If the mutex was statically allocated, properly * initialize the tail queue. */ if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { TAILQ_INIT(&(*mutex)->m_queue); _MUTEX_INIT_LINK(*mutex); (*mutex)->m_flags |= MUTEX_FLAGS_INITED; } /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = curthread; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = curthread; /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; /* * The mutex takes on the attributes of the * running thread when there are no waiters. */ (*mutex)->m_prio = curthread->active_priority; (*mutex)->m_saved_prio = curthread->inherited_priority; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* POSIX priority protection mutex: */ case PTHREAD_PRIO_PROTECT: /* Check for a priority ceiling violation: */ if (curthread->active_priority > (*mutex)->m_prio) ret = EINVAL; /* Check if this mutex is not locked: */ else if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = curthread; /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; /* * The running thread inherits the ceiling * priority of the mutex and executes at that * priority. */ curthread->active_priority = (*mutex)->m_prio; (*mutex)->m_saved_prio = curthread->inherited_priority; curthread->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_trylock(*mutex); else /* Return a busy error: */ ret = EBUSY; break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } /* Return the completion status: */ return (ret); }
int _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock) { pthread_rwlock_t prwlock; struct pthread *curthread; int ret; if (rwlock == NULL) return(EINVAL); prwlock = *rwlock; /* check for static initialization */ if (prwlock == NULL) { if ((ret = init_static(rwlock)) != 0) return(ret); prwlock = *rwlock; } /* grab the monitor lock */ if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0) return(ret); /* check lock count */ if (prwlock->state == MAX_READ_LOCKS) { _pthread_mutex_unlock(&prwlock->lock); return (EAGAIN); } curthread = _get_curthread(); if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) { /* * To avoid having to track all the rdlocks held by * a thread or all of the threads that hold a rdlock, * we keep a simple count of all the rdlocks held by * a thread. If a thread holds any rdlocks it is * possible that it is attempting to take a recursive * rdlock. If there are blocked writers and precedence * is given to them, then that would result in the thread * deadlocking. So allowing a thread to take the rdlock * when it already has one or more rdlocks avoids the * deadlock. I hope the reader can follow that logic ;-) */ ; /* nothing needed */ } else { /* give writers priority over readers */ while (prwlock->blocked_writers || prwlock->state < 0) { ret = _pthread_cond_wait(&prwlock->read_signal, &prwlock->lock); if (ret != 0) { /* can't do a whole lot if this fails */ _pthread_mutex_unlock(&prwlock->lock); return(ret); } } } curthread->rdlock_count++; prwlock->state++; /* indicate we are locked for reading */ /* * Something is really wrong if this call fails. Returning * error won't do because we've already obtained the read * lock. Decrementing 'state' is no good because we probably * don't have the monitor lock. */ _pthread_mutex_unlock(&prwlock->lock); return (ret); }
/** * OpenVPN's main init-run-cleanup loop. * @ingroup eventloop * * This function contains the two outer OpenVPN loops. Its structure is * as follows: * - Once-per-process initialization. * - Outer loop, run at startup and then once per \c SIGHUP: * - Level 1 initialization * - Inner loop, run at startup and then once per \c SIGUSR1: * - Call event loop function depending on client or server mode: * - \c tunnel_point_to_point() * - \c tunnel_server() * - Level 1 cleanup * - Once-per-process cleanup. * * @param argc - Commandline argument count. * @param argv - Commandline argument values. */ int main (int argc, char *argv[]) { struct context c; #if PEDANTIC fprintf (stderr, "Sorry, I was built with --enable-pedantic and I am incapable of doing any real work!\n"); return 1; #endif CLEAR (c); /* signify first time for components which can only be initialized once per program instantiation. */ c.first_time = true; /* initialize program-wide statics */ if (init_static ()) { /* * This loop is initially executed on startup and then * once per SIGHUP. */ do { /* enter pre-initialization mode with regard to signal handling */ pre_init_signal_catch (); /* zero context struct but leave first_time member alone */ context_clear_all_except_first_time (&c); /* static signal info object */ CLEAR (siginfo_static); c.sig = &siginfo_static; /* initialize garbage collector scoped to context object */ gc_init (&c.gc); /* initialize environmental variable store */ c.es = env_set_create (NULL); #ifdef WIN32 env_set_add_win32 (c.es); #endif #ifdef ENABLE_MANAGEMENT /* initialize management subsystem */ init_management (&c); #endif /* initialize options to default state */ init_options (&c.options, true); /* parse command line options, and read configuration file */ parse_argv (&c.options, argc, argv, M_USAGE, OPT_P_DEFAULT, NULL, c.es); #ifdef ENABLE_PLUGIN /* plugins may contribute options configuration */ init_verb_mute (&c, IVM_LEVEL_1); init_plugins (&c); open_plugins (&c, true, OPENVPN_PLUGIN_INIT_PRE_CONFIG_PARSE); #endif /* init verbosity and mute levels */ init_verb_mute (&c, IVM_LEVEL_1); /* set dev options */ init_options_dev (&c.options); /* openssl print info? */ if (print_openssl_info (&c.options)) break; /* --genkey mode? */ if (do_genkey (&c.options)) break; /* tun/tap persist command? */ if (do_persist_tuntap (&c.options)) break; /* sanity check on options */ options_postprocess (&c.options); /* show all option settings */ show_settings (&c.options); /* print version number */ msg (M_INFO, "%s", title_string); /* misc stuff */ pre_setup (&c.options); /* test crypto? */ if (do_test_crypto (&c.options)) break; #ifdef ENABLE_MANAGEMENT /* open management subsystem */ if (!open_management (&c)) break; #endif /* set certain options as environmental variables */ setenv_settings (c.es, &c.options); /* finish context init */ context_init_1 (&c); do { /* run tunnel depending on mode */ switch (c.options.mode) { case MODE_POINT_TO_POINT: tunnel_point_to_point (&c); break; #if P2MP_SERVER case MODE_SERVER: tunnel_server (&c); break; #endif default: ASSERT (0); } /* indicates first iteration -- has program-wide scope */ c.first_time = false; /* any signals received? */ if (IS_SIG (&c)) print_signal (c.sig, NULL, M_INFO); /* pass restart status to management subsystem */ signal_restart_status (c.sig); } while (c.sig->signal_received == SIGUSR1); uninit_options (&c.options); gc_reset (&c.gc); } while (c.sig->signal_received == SIGHUP); } context_gc_free (&c); env_set_destroy (c.es); #ifdef ENABLE_MANAGEMENT /* close management interface */ close_management (); #endif /* uninitialize program-wide statics */ uninit_static (); openvpn_exit (OPENVPN_EXIT_STATUS_GOOD); /* exit point */ return 0; /* NOTREACHED */ }
int _pthread_mutex_lock(pthread_mutex_t * mutex) { struct pthread *curthread = _get_curthread(); int ret = 0; if (_thread_initial == NULL) _thread_init(); if (mutex == NULL) return (EINVAL); /* * If the mutex is statically initialized, perform the dynamic * initialization: */ if ((*mutex == NULL) && ((ret = init_static(mutex)) != 0)) return (ret); /* Reset the interrupted flag: */ curthread->interrupted = 0; /* * Enter a loop waiting to become the mutex owner. We need a * loop in case the waiting thread is interrupted by a signal * to execute a signal handler. It is not (currently) possible * to remain in the waiting queue while running a handler. * Instead, the thread is interrupted and backed out of the * waiting queue prior to executing the signal handler. */ do { /* * Defer signals to protect the scheduling queues from * access by the signal handler: */ _thread_kern_sig_defer(); /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* * If the mutex was statically allocated, properly * initialize the tail queue. */ if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { TAILQ_INIT(&(*mutex)->m_queue); (*mutex)->m_flags |= MUTEX_FLAGS_INITED; _MUTEX_INIT_LINK(*mutex); } /* Process according to mutex type: */ switch ((*mutex)->m_protocol) { /* Default POSIX mutex: */ case PTHREAD_PRIO_NONE: if ((*mutex)->m_owner == NULL) { /* Lock the mutex for this thread: */ (*mutex)->m_owner = curthread; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, curthread); /* * Keep a pointer to the mutex this thread * is waiting on: */ curthread->data.mutex = *mutex; /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); } break; /* POSIX priority inheritence mutex: */ case PTHREAD_PRIO_INHERIT: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for this thread: */ (*mutex)->m_owner = curthread; /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; /* * The mutex takes on attributes of the * running thread when there are no waiters. */ (*mutex)->m_prio = curthread->active_priority; (*mutex)->m_saved_prio = curthread->inherited_priority; curthread->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, curthread); /* * Keep a pointer to the mutex this thread * is waiting on: */ curthread->data.mutex = *mutex; if (curthread->active_priority > (*mutex)->m_prio) /* Adjust priorities: */ mutex_priority_adjust(*mutex); /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); } break; /* POSIX priority protection mutex: */ case PTHREAD_PRIO_PROTECT: /* Check for a priority ceiling violation: */ if (curthread->active_priority > (*mutex)->m_prio) ret = EINVAL; /* Check if this mutex is not locked: */ else if ((*mutex)->m_owner == NULL) { /* * Lock the mutex for the running * thread: */ (*mutex)->m_owner = curthread; /* Track number of priority mutexes owned: */ curthread->priority_mutex_count++; /* * The running thread inherits the ceiling * priority of the mutex and executes at that * priority: */ curthread->active_priority = (*mutex)->m_prio; (*mutex)->m_saved_prio = curthread->inherited_priority; curthread->inherited_priority = (*mutex)->m_prio; /* Add to the list of owned mutexes: */ _MUTEX_ASSERT_NOT_OWNED(*mutex); TAILQ_INSERT_TAIL(&curthread->mutexq, (*mutex), m_qe); } else if ((*mutex)->m_owner == curthread) ret = mutex_self_lock(*mutex); else { /* * Join the queue of threads waiting to lock * the mutex: */ mutex_queue_enq(*mutex, curthread); /* * Keep a pointer to the mutex this thread * is waiting on: */ curthread->data.mutex = *mutex; /* Clear any previous error: */ errno = 0; /* * Unlock the mutex structure and schedule the * next thread: */ _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, &(*mutex)->lock, __FILE__, __LINE__); /* Lock the mutex structure again: */ _SPINLOCK(&(*mutex)->lock); /* * The threads priority may have changed while * waiting for the mutex causing a ceiling * violation. */ ret = errno; errno = 0; } break; /* Trap invalid mutex types: */ default: /* Return an invalid argument error: */ ret = EINVAL; break; } /* * Check to see if this thread was interrupted and * is still in the mutex queue of waiting threads: */ if (curthread->interrupted != 0) mutex_queue_remove(*mutex, curthread); /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); /* * Undefer and handle pending signals, yielding if * necessary: */ _thread_kern_sig_undefer(); } while (((*mutex)->m_owner != curthread) && (ret == 0) && (curthread->interrupted == 0)); if (curthread->interrupted != 0 && curthread->continuation != NULL) curthread->continuation((void *) curthread); /* Return the completion status: */ return (ret); }