コード例 #1
0
ファイル: erl_bif_ddll.c プロジェクト: aronisstav/otp
static int load_driver_entry(DE_Handle **dhp, char *path, char *name)
{
    int res;
    DE_Handle *dh = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sizeof(DE_Handle));

    assert_drv_list_rwlocked();

    dh->handle = NULL;
    dh->procs = NULL;
    erts_atomic32_init_nob(&dh->port_count, 0);
    erts_refc_init(&(dh->refc), (erts_aint_t) 0);
    dh->status = -1;
    dh->reload_full_path = NULL;
    dh->reload_driver_name = NULL;
    dh->reload_flags = 0;
    dh->full_path = NULL;
    dh->flags = 0;

    if ((res = do_load_driver_entry(dh, path, name)) != ERL_DE_NO_ERROR) {
	erts_free(ERTS_ALC_T_DDLL_HANDLE, (void *) dh);
	dh = NULL;
    }
    *dhp = dh;
    return res;
}
コード例 #2
0
ファイル: erl_process_lock.c プロジェクト: PiotrSikora/otp
void
erts_proc_lock_init(Process *p)
{
#if ERTS_PROC_LOCK_OWN_IMPL
    /* We always start with all locks locked */
#if ERTS_PROC_LOCK_ATOMIC_IMPL
    erts_smp_atomic32_init_nob(&p->lock.flags,
			       (erts_aint32_t) ERTS_PROC_LOCKS_ALL);
#else
    p->lock.flags = ERTS_PROC_LOCKS_ALL;
#endif
    p->lock.queues = NULL;
#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_proc_lc_trylock(p, ERTS_PROC_LOCKS_ALL, 1);
#endif
#elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL
    erts_mtx_init_x(&p->lock.main, "proc_main", p->id);
    ethr_mutex_lock(&p->lock.main.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_lc_trylock(1, &p->lock.main.lc);
#endif
    erts_mtx_init_x(&p->lock.link, "proc_link", p->id);
    ethr_mutex_lock(&p->lock.link.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_lc_trylock(1, &p->lock.link.lc);
#endif
    erts_mtx_init_x(&p->lock.msgq, "proc_msgq", p->id);
    ethr_mutex_lock(&p->lock.msgq.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_lc_trylock(1, &p->lock.msgq.lc);
#endif
    erts_mtx_init_x(&p->lock.status, "proc_status", p->id);
    ethr_mutex_lock(&p->lock.status.mtx);
#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_lc_trylock(1, &p->lock.status.lc);
#endif
#endif
    erts_atomic32_init_nob(&p->lock.refc, 1);
#ifdef ERTS_PROC_LOCK_DEBUG
    {
	int i;
	for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++)
	    erts_smp_atomic32_init_nob(&p->lock.locked[i], (erts_aint32_t) 1);
    }
#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
    erts_lcnt_proc_lock_init(p);
    erts_lcnt_proc_lock(&(p->lock), ERTS_PROC_LOCKS_ALL);
    erts_lcnt_proc_lock_post_x(&(p->lock), ERTS_PROC_LOCKS_ALL, __FILE__, __LINE__);
#endif
}
コード例 #3
0
ファイル: erl_thr_queue.c プロジェクト: 0x00evil/otp
void
erts_thr_q_initialize(ErtsThrQ_t *q, ErtsThrQInit_t *qi)
{
#ifndef USE_THREADS
    q->init = *qi;
    if (!q->init.notify)
	q->init.notify = noop_callback;
    q->first = NULL;
    q->last = NULL;
    q->q.blk = NULL;
#else
    erts_atomic_init_nob(&q->tail.data.marker.next, ERTS_AINT_NULL);
    q->tail.data.marker.data.ptr = NULL;
    erts_atomic_init_nob(&q->tail.data.last,
			 (erts_aint_t) &q->tail.data.marker);
    erts_atomic_init_nob(&q->tail.data.um_refc[0], 0);
    erts_atomic_init_nob(&q->tail.data.um_refc[1], 0);
    erts_atomic32_init_nob(&q->tail.data.um_refc_ix, 0);
    q->tail.data.live = qi->live.objects;
    q->tail.data.arg = qi->arg;
    q->tail.data.notify = qi->notify;
    if (!q->tail.data.notify)
	q->tail.data.notify = noop_callback;

    erts_atomic_init_nob(&q->head.head, (erts_aint_t) &q->tail.data.marker);
    q->head.live = qi->live.objects;
    q->head.first = &q->tail.data.marker;
    q->head.unref_end = &q->tail.data.marker;
    q->head.clean_reached_head_count = 0;
    q->head.deq_fini.automatic = qi->auto_finalize_dequeue;
    q->head.deq_fini.start = NULL;
    q->head.deq_fini.end = NULL;
#ifdef ERTS_SMP
    q->head.next.thr_progress = erts_thr_progress_current();
    q->head.next.thr_progress_reached = 1;
#endif
    q->head.next.um_refc_ix = 1;
    q->head.next.unref_end = &q->tail.data.marker;
    q->head.used_marker = 1;
    q->head.arg = qi->arg;
    q->head.notify = q->tail.data.notify;
    q->q.finalizing = 0;
    q->q.live = qi->live.queue;
    q->q.blk = NULL;
#endif
}
コード例 #4
0
ファイル: erl_thr_progress.c プロジェクト: Bufias/otp
void
erts_thr_progress_init(int no_schedulers, int managed, int unmanaged)
{
    int i, j, um_low, um_high;
    char *ptr;
    size_t cb_sz, intrnl_sz, thr_arr_sz, m_wakeup_size, um_wakeup_size,
	tot_size;

    intrnl_sz = sizeof(ErtsThrPrgrInternalData);
    intrnl_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(intrnl_sz);

    cb_sz = sizeof(ErtsThrPrgrCallbacks)*(managed+unmanaged);
    cb_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(cb_sz);

    thr_arr_sz = sizeof(ErtsThrPrgrArray)*managed;
    ASSERT(thr_arr_sz == ERTS_ALC_CACHE_LINE_ALIGN_SIZE(thr_arr_sz));

    m_wakeup_size = sizeof(ErtsThrPrgrManagedWakeupData);
    m_wakeup_size += (managed - 1)*sizeof(int);
    m_wakeup_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(m_wakeup_size);

    um_low = (unmanaged - 1)/ERTS_THR_PRGR_BM_BITS + 1;
    um_high = (um_low - 1)/ERTS_THR_PRGR_BM_BITS + 1;

    um_wakeup_size = sizeof(ErtsThrPrgrUnmanagedWakeupData);
    um_wakeup_size += (um_high + um_low)*sizeof(erts_atomic32_t);
    um_wakeup_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(um_wakeup_size);

    tot_size = intrnl_sz;
    tot_size += cb_sz;
    tot_size += thr_arr_sz;
    tot_size += m_wakeup_size*ERTS_THR_PRGR_WAKEUP_DATA_SIZE;
    tot_size += um_wakeup_size*ERTS_THR_PRGR_WAKEUP_DATA_SIZE;

    ptr = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_THR_PRGR_IDATA,
					     tot_size);

    intrnl = (ErtsThrPrgrInternalData *) ptr;
    ptr += intrnl_sz;

    erts_atomic32_init_nob(&intrnl->misc.data.lflgs,
			   ERTS_THR_PRGR_LFLG_NO_LEADER);
    erts_atomic32_init_nob(&intrnl->misc.data.block_count,
			   (ERTS_THR_PRGR_BC_FLG_NOT_BLOCKING
			    | (erts_aint32_t) managed));
    erts_atomic_init_nob(&intrnl->misc.data.blocker_event, ERTS_AINT_NULL);
    erts_atomic32_init_nob(&intrnl->misc.data.pref_wakeup_used, 0);
    erts_atomic32_init_nob(&intrnl->misc.data.managed_count, 0);
    erts_atomic32_init_nob(&intrnl->misc.data.managed_id, no_schedulers);
    erts_atomic32_init_nob(&intrnl->misc.data.unmanaged_id, -1);
    intrnl->misc.data.chk_next_ix = 0;
    intrnl->misc.data.umrefc_ix.waiting = -1;
    erts_atomic32_init_nob(&intrnl->misc.data.umrefc_ix.current, 0);

    erts_atomic_init_nob(&intrnl->umrefc[0].refc, (erts_aint_t) 0);
    erts_atomic_init_nob(&intrnl->umrefc[1].refc, (erts_aint_t) 0);

    intrnl->thr = (ErtsThrPrgrArray *) ptr;
    ptr += thr_arr_sz;
    for (i = 0; i < managed; i++)
	init_nob(&intrnl->thr[i].data.current, 0);

    intrnl->managed.callbacks = (ErtsThrPrgrCallbacks *) ptr;
    intrnl->unmanaged.callbacks = &intrnl->managed.callbacks[managed];
    ptr += cb_sz;

    intrnl->managed.no = managed;
    for (i = 0; i < managed; i++) {
	intrnl->managed.callbacks[i].arg = NULL;
	intrnl->managed.callbacks[i].wakeup = NULL;
    }

    intrnl->unmanaged.no = unmanaged;
    for (i = 0; i < unmanaged; i++) {
	intrnl->unmanaged.callbacks[i].arg = NULL;
	intrnl->unmanaged.callbacks[i].wakeup = NULL;
    }

    for (i = 0; i < ERTS_THR_PRGR_WAKEUP_DATA_SIZE; i++) {
	intrnl->managed.data[i] = (ErtsThrPrgrManagedWakeupData *) ptr;
	erts_atomic32_init_nob(&intrnl->managed.data[i]->len, 0);
	ptr += m_wakeup_size;
    }

    for (i = 0; i < ERTS_THR_PRGR_WAKEUP_DATA_SIZE; i++) {
	erts_atomic32_t *bm;
	intrnl->unmanaged.data[i] = (ErtsThrPrgrUnmanagedWakeupData *) ptr;
	erts_atomic32_init_nob(&intrnl->unmanaged.data[i]->len, 0);
	bm = (erts_atomic32_t *) (ptr + sizeof(ErtsThrPrgrUnmanagedWakeupData));
	intrnl->unmanaged.data[i]->high = bm;
	intrnl->unmanaged.data[i]->high_sz = um_high;
	for (j = 0; j < um_high; j++)
	    erts_atomic32_init_nob(&intrnl->unmanaged.data[i]->high[j], 0);
	intrnl->unmanaged.data[i]->low
	    = &intrnl->unmanaged.data[i]->high[um_high];	
	intrnl->unmanaged.data[i]->low_sz = um_low;
	for (j = 0; j < um_low; j++)
	    erts_atomic32_init_nob(&intrnl->unmanaged.data[i]->low[j], 0);
	ptr += um_wakeup_size;
    }
    ERTS_THR_MEMORY_BARRIER;
}
コード例 #5
0
ファイル: erl_bif_ddll.c プロジェクト: aronisstav/otp
static int do_load_driver_entry(DE_Handle *dh, char *path, char *name)
{
    void *init_handle;
    int res;
    ErlDrvEntry *dp;

    assert_drv_list_rwlocked();

    if ((res =  erts_sys_ddll_open(path, &(dh->handle), NULL)) != ERL_DE_NO_ERROR) {
	return res;
    }
    
    if ((res = erts_sys_ddll_load_driver_init(dh->handle, 
					      &init_handle)) != ERL_DE_NO_ERROR) {
	res = ERL_DE_LOAD_ERROR_NO_INIT;
	goto error;
    }
    
    dp = erts_sys_ddll_call_init(init_handle);
    if (dp == NULL) {
	res = ERL_DE_LOAD_ERROR_FAILED_INIT;
	goto error;
    }

    switch (dp->extended_marker) {
    case ERL_DRV_EXTENDED_MARKER:
	if (dp->major_version < ERL_DRV_MIN_REQUIRED_MAJOR_VERSION_ON_LOAD
	    || (ERL_DRV_EXTENDED_MAJOR_VERSION < dp->major_version
		|| (ERL_DRV_EXTENDED_MAJOR_VERSION == dp->major_version
		    && ERL_DRV_EXTENDED_MINOR_VERSION < dp->minor_version))) {
	    /* Incompatible driver version */
	    res = ERL_DE_LOAD_ERROR_INCORRECT_VERSION;
	    goto error;
	}
	break;
    default:
	/* Old driver; needs to be recompiled... */
	res = ERL_DE_LOAD_ERROR_INCORRECT_VERSION;
	goto error;
    }

    if (strcmp(name, dp->driver_name) != 0) {
	res = ERL_DE_LOAD_ERROR_BAD_NAME;
	goto error;
    }
    erts_atomic_init_nob(&(dh->refc), (erts_aint_t) 0);
    erts_atomic32_init_nob(&dh->port_count, 0);
    dh->full_path = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sys_strlen(path) + 1);
    sys_strcpy(dh->full_path, path);
    dh->flags = 0;
    dh->status = ERL_DE_OK;

    if (erts_add_driver_entry(dp, dh, 1) != 0 /* io.c */) { 
	/*
	 * The init in the driver struct did not return 0 
	 */
	erts_free(ERTS_ALC_T_DDLL_HANDLE, dh->full_path);
	dh->full_path = NULL;
	res = ERL_DE_LOAD_ERROR_FAILED_INIT;
	goto error;
    }
    return ERL_DE_NO_ERROR;

error:
    erts_sys_ddll_close(dh->handle);
    return res;
}
コード例 #6
0
erts_sspa_data_t *
erts_sspa_create(size_t blk_sz, int pa_size, int nthreads, const char* name)
{
    erts_sspa_data_t *data;
    size_t tot_size;
    size_t chunk_mem_size;
    char *p;
    char *chunk_start;
    int cix;
    int no_blocks = pa_size;
    int no_blocks_per_chunk;
    size_t aligned_blk_sz;

#if !defined(ERTS_STRUCTURE_ALIGNED_ALLOC)
    /* Force 64-bit alignment... */
    aligned_blk_sz = ((blk_sz - 1) / 8) * 8 + 8;
#else
    /* Alignment of structure is enough... */
    aligned_blk_sz = blk_sz;
#endif

    if (!name) { /* schedulers only variant */
        ASSERT(!nthreads);
        nthreads = erts_no_schedulers;
    }
    else {
        ASSERT(nthreads > 0);
    }

    if (nthreads == 1)
	no_blocks_per_chunk = no_blocks;
    else {
	int extra = (no_blocks - 1)/4 + 1;
	if (extra == 0)
	    extra = 1;
	no_blocks_per_chunk = no_blocks;
	no_blocks_per_chunk += extra * nthreads;
	no_blocks_per_chunk /= nthreads;
    }
    no_blocks = no_blocks_per_chunk * nthreads;
    chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_chunk_header_t));
    chunk_mem_size += aligned_blk_sz * no_blocks_per_chunk;
    chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(chunk_mem_size);
    tot_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_data_t));
    tot_size += chunk_mem_size * nthreads;

    p = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_PRE_ALLOC_DATA, tot_size);
    data = (erts_sspa_data_t *) p;
    p += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_data_t));
    chunk_start = p;

    data->chunks_mem_size = chunk_mem_size;
    data->start = chunk_start;
    data->end = chunk_start + chunk_mem_size * nthreads;
    data->nthreads = nthreads;

    if (name) { /* thread variant */
        erts_tsd_key_create(&data->tsd_key, (char*)name);
        erts_atomic_init_nob(&data->id_generator, 0);
    }

    /* Initialize all chunks */
    for (cix = 0; cix < nthreads; cix++) {
	erts_sspa_chunk_t *chnk = erts_sspa_cix2chunk(data, cix);
	erts_sspa_chunk_header_t *chdr = &chnk->aligned.header;
	erts_sspa_blk_t *blk;
	int i;

	erts_atomic_init_nob(&chdr->tail.data.last, (erts_aint_t) &chdr->tail.data.marker);
	erts_atomic_init_nob(&chdr->tail.data.marker.next_atmc, ERTS_AINT_NULL);
	erts_atomic_init_nob(&chdr->tail.data.um_refc[0], 0);
	erts_atomic_init_nob(&chdr->tail.data.um_refc[1], 0);
	erts_atomic32_init_nob(&chdr->tail.data.um_refc_ix, 0);

	chdr->head.no_thr_progress_check = 0;
	chdr->head.used_marker = 1;
	chdr->head.first = &chdr->tail.data.marker;
	chdr->head.unref_end = &chdr->tail.data.marker;
	chdr->head.next.thr_progress = erts_thr_progress_current();
	chdr->head.next.thr_progress_reached = 1;
	chdr->head.next.um_refc_ix = 1;
	chdr->head.next.unref_end = &chdr->tail.data.marker;

	p = &chnk->data[0];
	chdr->local.first = (erts_sspa_blk_t *) p;
	blk = (erts_sspa_blk_t *) p;
	for (i = 0; i < no_blocks_per_chunk; i++) {
	    blk = (erts_sspa_blk_t *) p;
	    p += aligned_blk_sz;
	    blk->next_ptr = (erts_sspa_blk_t *) p;
	}

	blk->next_ptr = NULL;
	chdr->local.last = blk;
	chdr->local.cnt = no_blocks_per_chunk;
	chdr->local.lim = no_blocks_per_chunk / 3;

	ERTS_SSPA_DBG_CHK_LCL(chdr);
    }

    return data;
}
コード例 #7
0
erts_sspa_data_t *
erts_sspa_create(size_t blk_sz, int pa_size)
{
    erts_sspa_data_t *data;
    size_t tot_size;
    size_t chunk_mem_size;
    char *p;
    char *chunk_start;
    int cix;
    int no_blocks = pa_size;
    int no_blocks_per_chunk;

    if (erts_no_schedulers == 1)
	no_blocks_per_chunk = no_blocks;
    else {
	int extra = (no_blocks - 1)/4 + 1;
	if (extra == 0)
	    extra = 1;
	no_blocks_per_chunk = no_blocks;
	no_blocks_per_chunk += extra*erts_no_schedulers;
	no_blocks_per_chunk /= erts_no_schedulers;
    }
    no_blocks = no_blocks_per_chunk * erts_no_schedulers;
    chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_chunk_header_t));
    chunk_mem_size += blk_sz * no_blocks_per_chunk;
    chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(chunk_mem_size);
    tot_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_data_t));
    tot_size += chunk_mem_size*erts_no_schedulers;

    p = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_PRE_ALLOC_DATA, tot_size);
    data = (erts_sspa_data_t *) p;
    p += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_data_t));
    chunk_start = p;

    data->chunks_mem_size = chunk_mem_size;
    data->start = chunk_start;
    data->end = chunk_start + chunk_mem_size*erts_no_schedulers;

    /* Initialize all chunks */
    for (cix = 0; cix < erts_no_schedulers; cix++) {
	erts_sspa_chunk_t *chnk = erts_sspa_cix2chunk(data, cix);
	erts_sspa_chunk_header_t *chdr = &chnk->aligned.header;
	erts_sspa_blk_t *blk;
	int i;

	erts_atomic_init_nob(&chdr->tail.data.last, (erts_aint_t) &chdr->tail.data.marker);
	erts_atomic_init_nob(&chdr->tail.data.marker.next_atmc, ERTS_AINT_NULL);
	erts_atomic_init_nob(&chdr->tail.data.um_refc[0], 0);
	erts_atomic_init_nob(&chdr->tail.data.um_refc[1], 0);
	erts_atomic32_init_nob(&chdr->tail.data.um_refc_ix, 0);

	chdr->head.no_thr_progress_check = 0;
	chdr->head.used_marker = 1;
	chdr->head.first = &chdr->tail.data.marker;
	chdr->head.unref_end = &chdr->tail.data.marker;
	chdr->head.next.thr_progress = erts_thr_progress_current();
	chdr->head.next.thr_progress_reached = 1;
	chdr->head.next.um_refc_ix = 1;
	chdr->head.next.unref_end = &chdr->tail.data.marker;

	p = &chnk->data[0];
	chdr->local.first = (erts_sspa_blk_t *) p;
	blk = (erts_sspa_blk_t *) p;
	for (i = 0; i < no_blocks_per_chunk; i++) {
	    blk = (erts_sspa_blk_t *) p;
	    p += blk_sz;
	    blk->next_ptr = (erts_sspa_blk_t *) p;
	}

	blk->next_ptr = NULL;
	chdr->local.last = blk;
	chdr->local.cnt = no_blocks_per_chunk;
	chdr->local.lim = no_blocks_per_chunk / 3;

	ERTS_SSPA_DBG_CHK_LCL(chdr);
    }

    return data;
}
コード例 #8
0
ファイル: sys.c プロジェクト: JeromeDeBretagne/otp
void
erts_sys_pre_init(void)
{
    erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER;

    erts_printf_add_cr_to_stdout = 1;
    erts_printf_add_cr_to_stderr = 1;


    eid.thread_create_child_func = thr_create_prepare_child;
    /* Before creation in parent */
    eid.thread_create_prepare_func = thr_create_prepare;
    /* After creation in parent */
    eid.thread_create_parent_func = thr_create_cleanup,

#ifdef ERTS_ENABLE_LOCK_COUNT
    erts_lcnt_pre_thr_init();
#endif

    erts_thr_init(&eid);

#ifdef ERTS_ENABLE_LOCK_COUNT
    erts_lcnt_post_thr_init();
#endif

#ifdef ERTS_ENABLE_LOCK_CHECK
    erts_lc_init();
#endif


    erts_init_sys_time_sup();


    erts_atomic32_init_nob(&erts_break_requested, 0);
    erts_atomic32_init_nob(&have_prepared_crash_dump, 0);


    erts_atomic_init_nob(&sys_misc_mem_sz, 0);

    {
      /*
       * Unfortunately we depend on fd 0,1,2 in the old shell code.
       * So if for some reason we do not have those open when we start
       * we have to open them here. Not doing this can cause the emulator
       * to deadlock when reaping the fd_driver ports :(
       */
      int fd;
      /* Make sure fd 0 is open */
      if ((fd = open("/dev/null", O_RDONLY)) != 0)
	close(fd);
      /* Make sure fds 1 and 2 are open */
      while (fd < 3) {
	fd = open("/dev/null", O_WRONLY);
      }
      close(fd);
    }

    /* We need a file descriptor to close in the crashdump creation.
     * We close this one to be sure we can get a fd for our real file ...
     * so, we create one here ... a stone to carry all the way home.
     */

    crashdump_companion_cube_fd = open("/dev/null", O_RDONLY);

    /* don't lose it, there will be cake */
}