static struct mutex_info* mutex_get_or_allocate(const Addr mutex, const SizeT size, const MutexT mutex_type) { struct mutex_info* p; tl_assert(offsetof(DrdClientobj, mutex) == 0); p = &clientobj_get(mutex, ClientMutex)->mutex; if (p) { tl_assert(p->mutex_type == mutex_type); tl_assert(p->a2 - p->a1 == size); return p; } if (clientobj_present(mutex, mutex + size)) { GenericErrInfo GEI; VG_(maybe_record_error)(VG_(get_running_tid)(), GenericErr, VG_(get_IP)(VG_(get_running_tid)()), "Not a mutex", &GEI); return 0; } p = &clientobj_add(mutex, mutex + size, ClientMutex)->mutex; mutex_initialize(p, mutex, size, mutex_type); return p; }
/** Initialize physical memory management. * */ void frame_init(void) { if (config.cpu_active == 1) { zones.count = 0; irq_spinlock_initialize(&zones.lock, "frame.zones.lock"); mutex_initialize(&mem_avail_mtx, MUTEX_ACTIVE); condvar_initialize(&mem_avail_cv); } /* Tell the architecture to create some memory */ frame_low_arch_init(); if (config.cpu_active == 1) { frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)), SIZE2FRAMES(config.kernel_size)); frame_mark_unavailable(ADDR2PFN(KA2PA(config.stack_base)), SIZE2FRAMES(config.stack_size)); for (size_t i = 0; i < init.cnt; i++) frame_mark_unavailable(ADDR2PFN(init.tasks[i].paddr), SIZE2FRAMES(init.tasks[i].size)); if (ballocs.size) frame_mark_unavailable(ADDR2PFN(KA2PA(ballocs.base)), SIZE2FRAMES(ballocs.size)); /* * Blacklist first frame, as allocating NULL would * fail in some places */ frame_mark_unavailable(0, 1); } frame_high_arch_init(); }
/** Initialize a phone structure. * * @param phone Phone structure to be initialized. * @param caller Owning task. * */ void ipc_phone_init(phone_t *phone, task_t *caller) { mutex_initialize(&phone->lock, MUTEX_PASSIVE); phone->caller = caller; phone->callee = NULL; phone->state = IPC_PHONE_FREE; atomic_set(&phone->active_calls, 0); }
/** Initialize sysinfo subsystem * * Create SLAB cache for sysinfo items. * */ void sysinfo_init(void) { sysinfo_item_slab = slab_cache_create("sysinfo_item_t", sizeof(sysinfo_item_t), 0, sysinfo_item_constructor, sysinfo_item_destructor, SLAB_CACHE_MAGDEFERRED); mutex_initialize(&sysinfo_lock, MUTEX_ACTIVE); }
task_sync_data_t *allocate_task_sync_data(void) { task_sync_data_t *s=alloc_pages_addr(SYNC_OBJS_PAGES,0); if( s ) { memset(s,0,SYNC_OBJS_PAGES*PAGE_SIZE); atomic_set(&s->use_count,1); mutex_initialize(&s->mutex); } return s; }
void log_open() { mutex_initialize(&l_writeMutex); if( l_logFilePath == NULL ) { // log open? close it _log_closefile(); } else { // not open? open it _log_closefile(); _log_openfile(l_logFilePath); } }
static VALUE thgroup_s_alloc(VALUE self, SEL sel) { rb_thread_group_t *t = (rb_thread_group_t *)xmalloc( sizeof(rb_thread_group_t)); t->enclosed = false; GC_WB(&t->threads, rb_ary_new()); OBJ_UNTRUST(t->threads); VALUE mutex = mutex_s_alloc(rb_cMutex, 0); mutex_initialize(mutex, 0); GC_WB(&t->mutex, mutex); return Data_Wrap_Struct(self, NULL, NULL, t); }
/** Initialize DDI. * */ void ddi_init(void) { btree_create(&parea_btree); mutex_initialize(&parea_lock, MUTEX_PASSIVE); }