Exemplo n.º 1
0
static void *
xrealloc(void *ptr, size_t size)
{
	if ((ptr = realloc(ptr, size)) == NULL)
		NOMEM();
	return (ptr);
}
Exemplo n.º 2
0
//------------------------------------------------------------------------
// ArenaAllocator::alloateMemory:
//    Allocates memory using an `ArenaAllocator`.
//
// Arguments:
//    size - The number of bytes to allocate.
//
// Return Value:
//    A pointer to the allocated memory.
//
// Note:
//    This is the DEBUG-only version of `allocateMemory`; the release
//    version of this method is defined in the corresponding header file.
//    This version of the method has some abilities that the release
//    version does not: it may inject faults into the allocator and
//    seeds all allocations with a specified pattern to help catch
//    use-before-init problems.
void* ArenaAllocator::allocateMemory(size_t size)
{
    assert(size != 0 && (size & (sizeof(int) - 1)) == 0);

    // Ensure that we always allocate in pointer sized increments.
    size = (size_t)roundUp(size, sizeof(size_t));

    if (JitConfig.ShouldInjectFault() != 0)
    {
        // Force the underlying memory allocator (either the OS or the CLR hoster) 
        // to allocate the memory. Any fault injection will kick in.
        void* p = ClrAllocInProcessHeap(0, S_SIZE_T(1));
        if (p != nullptr)
        {
            ClrFreeInProcessHeap(0, p);
        }
        else 
        {
            NOMEM();  // Throw!
        }
    }

    void* block = m_nextFreeByte;
    m_nextFreeByte += size;

    if (m_nextFreeByte > m_lastFreeByte)
    {
        block = allocateNewPage(size);
    }

    memset(block, UninitializedWord<char>(), size);
    return block;
}
Exemplo n.º 3
0
static char *
xstrdup(const char *str)
{
	char *nstr;

	if ((nstr = strdup(str)) == NULL)
		NOMEM();
	return (nstr);
}
Exemplo n.º 4
0
static void *
xmalloc(size_t len)
{
	void   *p;

	if ((p = malloc(len)) == NULL)
		NOMEM();
	return (p);
}
Exemplo n.º 5
0
void    *           norls_allocator::nraAlloc(size_t sz)
{
    void    *   block;

    assert(sz != 0 && (sz & (sizeof(int) - 1)) == 0);
#ifdef _WIN64
    //Ensure that we always allocate in pointer sized increments.
    /* TODO-Cleanup:
     * This is wasteful.  We should add alignment requirements to the allocations so we don't waste space in
     * the heap.
     */
    sz = (unsigned)roundUp(sz, sizeof(size_t));
#endif

#ifdef DEBUG
    if (nraShouldInjectFault)
    {
        // Force the underlying memory allocator (either the OS or the CLR hoster) 
        // to allocate the memory. Any fault injection will kick in.
        void * p = DbgNew(1); 
        if (p) 
        {
            DbgDelete(p);
        }
        else 
        {
            NOMEM();  // Throw!
        }
    }
#endif    

    block = nraFreeNext;
            nraFreeNext += sz;

    if  ((size_t)block == blockStop) debugStop("Block at %08X allocated", block);

    if  (nraFreeNext > nraFreeLast)
        block = nraAllocNewPage(sz);

#ifdef DEBUG
    memset(block, UninitializedWord<char>(), sz);
#endif

    return  block;
}
Exemplo n.º 6
0
int hal_inst_create(const char *name, const int comp_id, const int size,
		    void **inst_data)
{
    CHECK_HALDATA();
    CHECK_STR(name);

    {
	hal_inst_t *inst  __attribute__((cleanup(halpr_autorelease_mutex)));
	hal_comp_t *comp;
	void *m = NULL;
	rtapi_mutex_get(&(hal_data->mutex));

	// comp must exist
	if ((comp = halpr_find_comp_by_id(comp_id)) == 0) {
	    HALERR("comp %d not found", comp_id);
	    return -ENOENT;
	}

	// inst may not exist
	if ((inst = halpr_find_inst_by_name(name)) != NULL) {
	    HALERR("instance '%s' already exists", name);
	    return -EEXIST;
	}

	if (size > 0) {
	    m = shmalloc_up(size);
	    if (m == NULL)
		NOMEM(" instance %s: cant allocate %d bytes", name, size);
	}
	memset(m, 0, size);

	// allocate instance descriptor
	if ((inst = alloc_inst_struct()) == NULL)
	    NOMEM("instance '%s'", name);

	inst->comp_id = comp->comp_id;
	inst->inst_id = rtapi_next_handle();
	inst->inst_data_ptr = SHMOFF(m);

	inst->inst_size = size;
	rtapi_snprintf(inst->name, sizeof(inst->name), "%s", name);


	HALDBG("%s: creating instance '%s' size %d at ^ %d/%p base=%p",
#ifdef RTAPI
	       "rtapi",
#else
	       "ulapi",
#endif
	       name, size, inst->inst_data_ptr, m,  hal_shmem_base);

	// if not NULL, pass pointer to blob
	if (inst_data)
	    *(inst_data) = m;

	// make it visible
	inst->next_ptr = hal_data->inst_list_ptr;
	hal_data->inst_list_ptr = SHMOFF(inst);

	return inst->inst_id;
  }
}
Exemplo n.º 7
0
void    *   norls_allocator::nraAllocNewPage(size_t sz)
{
    norls_pagdesc * newPage;
    size_t          sizPage;

    size_t          realSize = sz + sizeof(norls_pagdesc);
    if (realSize < sz) 
        NOMEM();   // Integer overflow

    /* Do we have a page that's now full? */

    if  (nraPageLast)
    {
        /* Undo the "+=" done in nraAlloc() */

        nraFreeNext -= sz;

        /* Save the actual used size of the page */

        nraPageLast->nrpUsedSize = nraFreeNext - nraPageLast->nrpContents;
    }

    /* Make sure we grab enough to satisfy the allocation request */

    sizPage = nraPageSize;

    if  (sizPage < realSize)
    {
        /* The allocation doesn't fit in a default-sized page */

#ifdef  DEBUG
//      if  (nraPageLast) printf("NOTE: wasted %u bytes in last page\n", nraPageLast->nrpPageSize - nraPageLast->nrpUsedSize);
#endif

        sizPage = realSize;
    }

    /* Round to the nearest multiple of OS page size */

    if (!nraDirectAlloc())
    {
        sizPage +=  (DEFAULT_PAGE_SIZE - 1);
        sizPage &= ~(DEFAULT_PAGE_SIZE - 1);
    }

    /* Allocate the new page */

    newPage = (norls_pagdesc *)nraVirtualAlloc(0, sizPage, MEM_COMMIT, PAGE_READWRITE);
    if  (!newPage)
        NOMEM();

#ifdef DEBUG
    newPage->nrpSelfPtr = newPage;
#endif

    /* Append the new page to the end of the list */

    newPage->nrpNextPage = 0;
    newPage->nrpPageSize = sizPage;
    newPage->nrpPrevPage = nraPageLast;
    newPage->nrpUsedSize = 0;  // nrpUsedSize is meaningless until a new page is allocated.
                               // Instead of letting it contain garbage (so to confuse us),
                               // set it to zero.

    if  (nraPageLast)
        nraPageLast->nrpNextPage = newPage;
    else
        nraPageList              = newPage;
    nraPageLast = newPage;

    /* Set up the 'next' and 'last' pointers */

    nraFreeNext = newPage->nrpContents + sz;
    nraFreeLast = newPage->nrpPageSize + (BYTE *)newPage;

    assert(nraFreeNext <= nraFreeLast);

    return  newPage->nrpContents;
}
Exemplo n.º 8
0
//------------------------------------------------------------------------
// ArenaAllocator::allocateNewPage:
//    Allocates a new arena page.
//
// Arguments:
//    size - The number of bytes that were requested by the allocation
//           that triggered this request to allocate a new arena page.
//
// Return Value:
//    A pointer to the first usable byte of the newly allocated page.
void* ArenaAllocator::allocateNewPage(size_t size)
{
    size_t pageSize = sizeof(PageDescriptor) + size;

    // Check for integer overflow
    if (pageSize < size)
    {
        NOMEM();
    }

    // If the current page is now full, update a few statistics
    if (m_lastPage != nullptr)
    {
        // Undo the "+=" done in allocateMemory()
        m_nextFreeByte -= size;

        // Save the actual used size of the page
        m_lastPage->m_usedBytes = m_nextFreeByte - m_lastPage->m_contents;
    }

    // Round up to a default-sized page if necessary
    if (pageSize <= s_defaultPageSize)
    {
        pageSize = s_defaultPageSize;
    }

    // Round to the nearest multiple of OS page size if necessary
    if (!bypassHostAllocator())
    {
        pageSize = roundUp(pageSize, DEFAULT_PAGE_SIZE);
    }

    // Allocate the new page
    PageDescriptor* newPage = (PageDescriptor*)allocateHostMemory(pageSize);
    if (newPage == nullptr)
    {
        NOMEM();
    }

    // Append the new page to the end of the list
    newPage->m_next = nullptr;
    newPage->m_pageBytes = pageSize;
    newPage->m_previous = m_lastPage;
    newPage->m_usedBytes = 0;  // m_usedBytes is meaningless until a new page is allocated.
                               // Instead of letting it contain garbage (so to confuse us),
                               // set it to zero.

    if (m_lastPage != nullptr)
    {
        m_lastPage->m_next = newPage;
    }
    else
    {
        m_firstPage = newPage;
    }

    m_lastPage = newPage;

    // Adjust the next/last free byte pointers
    m_nextFreeByte = newPage->m_contents + size;
    m_lastFreeByte = (BYTE*)newPage + pageSize;
    assert((m_lastFreeByte - m_nextFreeByte) >= 0);

    return newPage->m_contents;
}
Exemplo n.º 9
0
int hal_ring_new(const char *name, int size, int sp_size, int mode)
{
    hal_ring_t *rbdesc;
    int *prev, next, cmp, retval;
    int ring_id;
    ringheader_t *rhptr;

    CHECK_HALDATA();
    CHECK_STRLEN(name, HAL_NAME_LEN);
    CHECK_LOCK(HAL_LOCK_LOAD);

    {
	hal_ring_t *ptr __attribute__((cleanup(halpr_autorelease_mutex)));

	rtapi_mutex_get(&(hal_data->mutex));

	// make sure no such ring name already exists
	if ((ptr = halpr_find_ring_by_name(name)) != 0) {
	    HALERR("ring '%s' already exists", name);
	    return -EEXIST;
	}
	// allocate a new ring id - needed since we dont track ring shm
	// segments in RTAPI
	if ((ring_id = next_ring_id()) < 0) {
	    HALERR("cant allocate new ring id for '%s'", name);
	    return -ENOMEM;
	}

	// allocate a new ring descriptor
	if ((rbdesc = alloc_ring_struct()) == 0)
	    NOMEM("ring '%s'", name);

	rbdesc->handle = rtapi_next_handle();
	rbdesc->flags = mode;
	rbdesc->ring_id = ring_id;

	// make total allocation fit ringheader, ringbuffer and scratchpad
	rbdesc->total_size = ring_memsize( rbdesc->flags, size, sp_size);

	if (rbdesc->flags & ALLOC_HALMEM) {
	    void *ringmem = shmalloc_up(rbdesc->total_size);
	    if (ringmem == NULL)
		NOMEM("ring '%s' size %d - insufficient HAL memory for ring",
		      name,rbdesc->total_size);

	    rbdesc->ring_offset = SHMOFF(ringmem);
	    rhptr = ringmem;
	} else {
	    // allocate shared memory segment for ring and init
	    rbdesc->ring_shmkey = OS_KEY((RTAPI_RING_SHM_KEY + ring_id), rtapi_instance);

	    int shmid;

	    // allocate an RTAPI shm segment owned by HAL_LIB_xxx
	    if ((shmid = rtapi_shmem_new(rbdesc->ring_shmkey, lib_module_id,
					 rbdesc->total_size)) < 0)
		NOMEM("rtapi_shmem_new(0x%8.8x,%d) failed: %d",
		       rbdesc->ring_shmkey, lib_module_id,
		       rbdesc->total_size);

	    // map the segment now so we can fill in the ringheader details
	    if ((retval = rtapi_shmem_getptr(shmid,
					     (void **)&rhptr, 0)) < 0)
		NOMEM("rtapi_shmem_getptr for %d failed %d",
		       shmid, retval);
	}

	HALDBG("created ring '%s' in %s, total_size=%d",
	       name, (rbdesc->flags & ALLOC_HALMEM) ? "halmem" : "shm",
	       rbdesc->total_size);

	ringheader_init(rhptr, rbdesc->flags, size, sp_size);
	rhptr->refcount = 0; // on hal_ring_attach: increase; on hal_ring_detach: decrease
	rtapi_snprintf(rbdesc->name, sizeof(rbdesc->name), "%s", name);
	rbdesc->next_ptr = 0;

	// search list for 'name' and insert new structure
	prev = &(hal_data->ring_list_ptr);
	next = *prev;
	while (1) {
	    if (next == 0) {
		/* reached end of list, insert here */
		rbdesc->next_ptr = next;
		*prev = SHMOFF(rbdesc);
		return 0;
	    }
	    ptr = SHMPTR(next);
	    cmp = strcmp(ptr->name, rbdesc->name);
	    if (cmp > 0) {
		/* found the right place for it, insert here */
		rbdesc->next_ptr = next;
		*prev = SHMOFF(rbdesc);
		return 0;
	    }
	    /* didn't find it yet, look at next one */
	    prev = &(ptr->next_ptr);
	    next = *prev;
	}
	// automatic unlock by scope exit
    }
}
Exemplo n.º 10
0
static int hal_export_xfunctfv(const hal_export_xfunct_args_t *xf, const char *fmt, va_list ap)
{
    int *prev, next, cmp, sz;
    hal_funct_t *nf, *fptr;
    char name[HAL_NAME_LEN + 1];

    CHECK_HALDATA();
    CHECK_LOCK(HAL_LOCK_LOAD);

    sz = rtapi_vsnprintf(name, sizeof(name), fmt, ap);
    if(sz == -1 || sz > HAL_NAME_LEN) {
        HALERR("length %d invalid for name starting '%s'", sz, name);
        return -ENOMEM;
    }

    HALDBG("exporting function '%s' type %d", name, xf->type);
    {
	hal_comp_t *comp  __attribute__((cleanup(halpr_autorelease_mutex)));

	/* get mutex before accessing shared data */
	rtapi_mutex_get(&(hal_data->mutex));

	comp = halpr_find_owning_comp(xf->owner_id);
	if (comp == 0) {
	    /* bad comp_id */
	    HALERR("funct '%s': owning component %d not found",
		   name, xf->owner_id);
	    return -EINVAL;
	}

	if (comp->type == TYPE_USER) {
	    /* not a realtime component */
	    HALERR("funct '%s': component %s/%d is not realtime (%d)",
		   name, comp->name, comp->comp_id, comp->type);
	    return -EINVAL;
	}

	bool legacy = (halpr_find_inst_by_id(xf->owner_id) == NULL);

	// instances may export functs post hal_ready
	if (legacy && (comp->state > COMP_INITIALIZING)) {
	    HALERR("funct '%s': called after hal_ready", name);
	    return -EINVAL;
	}
	/* allocate a new function structure */
	nf = alloc_funct_struct();
	if (nf == 0)
	    NOMEM("function '%s'", name);

	/* initialize the structure */
	nf->uses_fp = xf->uses_fp;
	nf->owner_id = xf->owner_id;
	nf->reentrant = xf->reentrant;
	nf->users = 0;
	nf->handle = rtapi_next_handle();
	nf->arg = xf->arg;
	nf->type = xf->type;
	nf->funct.l = xf->funct.l; // a bit of a cheat really
	rtapi_snprintf(nf->name, sizeof(nf->name), "%s", name);
	/* search list for 'name' and insert new structure */
	prev = &(hal_data->funct_list_ptr);
	next = *prev;
	while (1) {
	    if (next == 0) {
		/* reached end of list, insert here */
		nf->next_ptr = next;
		*prev = SHMOFF(nf);
		/* break out of loop and init the new function */
		break;
	    }
	    fptr = SHMPTR(next);
	    cmp = strcmp(fptr->name, nf->name);
	    if (cmp > 0) {
		/* found the right place for it, insert here */
		nf->next_ptr = next;
		*prev = SHMOFF(nf);
		/* break out of loop and init the new function */
		break;
	    }
	    if (cmp == 0) {
		/* name already in list, can't insert */
		free_funct_struct(nf);
		HALERR("duplicate function '%s'", name);
		return -EINVAL;
	    }
	    /* didn't find it yet, look at next one */
	    prev = &(fptr->next_ptr);
	    next = *prev;
	}
	// at this point we have a new function and can
	// yield the mutex by scope exit

    }
    /* init time logging variables */
    nf->runtime = 0;
    nf->maxtime = 0;
    nf->maxtime_increased = 0;

    /* at this point we have a new function and can yield the mutex */
    rtapi_mutex_give(&(hal_data->mutex));

    switch (xf->type) {
    case FS_LEGACY_THREADFUNC:
    case FS_XTHREADFUNC:
	/* create a pin with the function's runtime in it */
	if (hal_pin_s32_newf(HAL_OUT, &(nf->runtime), xf->owner_id, "%s.time",name)) {
	    HALERR("failed to create pin '%s.time'", name);
	    return -EINVAL;
	}
	*(nf->runtime) = 0;

	/* note that failure to successfully create the following params
	   does not cause the "export_funct()" call to fail - they are
	   for debugging and testing use only */
	/* create a parameter with the function's maximum runtime in it */
	nf->maxtime = 0;
	hal_param_s32_newf(HAL_RW,  &(nf->maxtime), xf->owner_id, "%s.tmax", name);

	/* create a parameter with the function's maximum runtime in it */
	nf->maxtime_increased = 0;
	hal_param_bit_newf(HAL_RO, &(nf->maxtime_increased), xf->owner_id,
			    "%s.tmax-inc", name);
	break;
    case FS_USERLAND: // no timing pins/params
	;
    }

    return 0;
}
Exemplo n.º 11
0
int hal_add_funct_to_thread(const char *funct_name,
			    const char *thread_name, int position)
{
    hal_funct_t *funct;
    hal_list_t *list_root, *list_entry;
    int n;
    hal_funct_entry_t *funct_entry;

    CHECK_HALDATA();
    CHECK_LOCK(HAL_LOCK_CONFIG);
    CHECK_STR(funct_name);
    CHECK_STR(thread_name);

    HALDBG("adding function '%s' to thread '%s'", funct_name, thread_name);
    {
	hal_thread_t *thread __attribute__((cleanup(halpr_autorelease_mutex)));

	/* get mutex before accessing data structures */
	rtapi_mutex_get(&(hal_data->mutex));

	/* make sure position is valid */
	if (position == 0) {
	    /* zero is not allowed */
	    HALERR("bad position: 0");
	    return -EINVAL;
	}

	/* search function list for the function */
	funct = halpr_find_funct_by_name(funct_name);
	if (funct == 0) {
	    HALERR("function '%s' not found", funct_name);
	    return -EINVAL;
	}
	// type-check the functions which go onto threads
	switch (funct->type) {
	case FS_LEGACY_THREADFUNC:
	case FS_XTHREADFUNC:
	    break;
	default:
	    HALERR("cant add type %d function '%s' "
		   "to a thread", funct->type, funct_name);
	    return -EINVAL;
	}
	/* found the function, is it available? */
	if ((funct->users > 0) && (funct->reentrant == 0)) {
	    HALERR("function '%s' may only be added "
		   "to one thread", funct_name);
	    return -EINVAL;
	}
	/* search thread list for thread_name */
	thread = halpr_find_thread_by_name(thread_name);
	if (thread == 0) {
	    /* thread not found */
	    HALERR("thread '%s' not found", thread_name);
	    return -EINVAL;
	}
#if 0
	/* ok, we have thread and function, are they compatible? */
	if ((funct->uses_fp) && (!thread->uses_fp)) {
	    HALERR("function '%s' needs FP", funct_name);
	    return -EINVAL;
	}
#endif
	/* find insertion point */
	list_root = &(thread->funct_list);
	list_entry = list_root;
	n = 0;
	if (position > 0) {
	    /* insertion is relative to start of list */
	    while (++n < position) {
		/* move further into list */
		list_entry = list_next(list_entry);
		if (list_entry == list_root) {
		    /* reached end of list */
		    HALERR("position '%d' is too high", position);
		    return -EINVAL;
		}
	    }
	} else {
	    /* insertion is relative to end of list */
	    while (--n > position) {
		/* move further into list */
		list_entry = list_prev(list_entry);
		if (list_entry == list_root) {
		    /* reached end of list */
		    HALERR("position '%d' is too low", position);
		    return -EINVAL;
		}
	    }
	    /* want to insert before list_entry, so back up one more step */
	    list_entry = list_prev(list_entry);
	}
	/* allocate a funct entry structure */
	funct_entry = alloc_funct_entry_struct();
	if (funct_entry == 0)
	    NOMEM("thread->function link");

	/* init struct contents */
	funct_entry->funct_ptr = SHMOFF(funct);
	funct_entry->arg = funct->arg;
	funct_entry->funct.l = funct->funct.l;
	funct_entry->type = funct->type;
	/* add the entry to the list */
	list_add_after((hal_list_t *) funct_entry, list_entry);
	/* update the function usage count */
	funct->users++;
    }
    return 0;
}