Ejemplo n.º 1
0
/*
 * alloc_sem() - allocate semaphores
 * @num - no. of semaphores to create
 *
 * Allocate and initialize semaphores in a shared memory area, so that
 * the semaphore can be used accross processes.
 *
 * RETURNS:
 * Array of initialized semaphores.
 */
sem_t *alloc_sem(int num)
{
	sem_t *sem;
	void *sem_mem;
	int i, ret;

	sem_mem = mmap(NULL, get_page_size(),
		       PROT_READ | PROT_WRITE,
		       MAP_SHARED | MAP_ANONYMOUS, 0, 0);
	if (sem_mem == MAP_FAILED) {
		tst_resm(TBROK | TERRNO, "allocation of semaphore page failed");
		goto err_exit;
	}

	sem = sem_mem;

	for (i = 0; i < num; i++) {
		ret = sem_init(&sem[i], 1, 0);
		if (ret == -1) {
			tst_resm(TBROK | TERRNO, "semaphore initialization "
				 "failed");
			goto err_free_mem;
		}
	}

	return sem;

err_free_mem:
	ret = munmap(sem_mem, get_page_size());
	if (ret == -1)
		tst_resm(TWARN | TERRNO, "error freeing semaphore memory");
err_exit:
	return NULL;
}
Ejemplo n.º 2
0
/*
 * alloc_shared_pages_on_node() - allocate shared pages on a NUMA node
 * @pages: array to store the allocated pages
 * @num: the no. of pages to allocate
 * @node: the node in which the pages should be allocated
 *
 * RETURNS:
 * 0 on success, -1 on allocation failure
 */
int alloc_shared_pages_on_node(void **pages, unsigned int num, int node)
{
#if HAVE_NUMA_H
	char *shared;
	unsigned int i;
	int nodes[num];
	size_t total_size = num * get_page_size();

	shared = mmap(NULL, total_size,
		      PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
	if (shared == MAP_FAILED) {
		tst_resm(TBROK | TERRNO, "allocation of shared pages failed");
		return -1;
	}

	numa_tonode_memory(shared, total_size, node);

	for (i = 0; i < num; i++) {
		char *page;

		pages[i] = shared;
		shared += get_page_size();

		nodes[i] = node;

		/* Touch the page to force allocation */
		page = pages[i];
		page[0] = i;
	}

	return 0;
#else
	return -1;
#endif
}
Ejemplo n.º 3
0
void *kmalloc(unsigned sz) {
  /* We need to add a small header to the allocation to track which
     cache (if any) it came from. It must be a multiple of the pointer
     size in order that the address after it (which we will be returning)
     has natural alignment. */
  sz += sizeof(uintptr_t);

  uintptr_t *ptr;

  unsigned l2 = log2_roundup(sz);
  if (l2 < MIN_CACHESZ_LOG2) l2 = MIN_CACHESZ_LOG2;

  if (l2 >= MIN_CACHESZ_LOG2 && l2 <= MAX_CACHESZ_LOG2) {
    ptr = (uintptr_t*)slab_cache_alloc(&caches[l2-MIN_CACHESZ_LOG2]);
  } else {
    /* Get the size as the smallest power of 2 >= sz */
    unsigned sz_p2 = 1U << l2;
    if (sz_p2 < get_page_size()) {
      sz_p2 = get_page_size();
      l2 = log2_roundup(sz_p2);
    }

    ptr = (uintptr_t*)vmspace_alloc(&kernel_vmspace, sz_p2, 1);
  }

  ptr[0] = (KMALLOC_CANARY << 8) | l2;
  return &ptr[1];
}
Ejemplo n.º 4
0
static void *
map_buffer (counter_t *counter, GError **err)
{ 
    int n_bytes = N_PAGES * get_page_size();
    void *address;

    address = mmap (NULL, n_bytes + get_page_size(), PROT_READ | PROT_WRITE, MAP_SHARED, counter->fd, 0);

    if (address == MAP_FAILED)
      return fail (err, "mmap");

    return address;
}
Ejemplo n.º 5
0
static void
perf_event_read(struct event_ring_info *ring, void **buf, size_t *buf_len)
{
	enum bpf_perf_event_ret ret;

	ret = bpf_perf_event_read_simple(ring->mem,
					 MMAP_PAGE_CNT * get_page_size(),
					 get_page_size(), buf, buf_len,
					 print_bpf_output, ring);
	if (ret != LIBBPF_PERF_EVENT_CONT) {
		fprintf(stderr, "perf read loop failed with %d\n", ret);
		stop = true;
	}
}
Ejemplo n.º 6
0
void		*realloc(void *addr, size_t len)
{
	t_page_list	*lst;
	int			item;

	if (addr == NULL)
		return (malloc(len));
	MALLOC_LOCK();
	lst = g_pages;
	while (lst)
	{
		if (lst->page.type == LARGE && addr == lst->page.addr)
			return (realloc_large(lst, addr, len));
		else if (lst->page.type != LARGE && addr >= lst->page.addr
				&& addr <= lst->page.addr + get_page_size(lst->page.type))
		{
			item = (addr - lst->page.addr) / get_block_size(lst->page.type);
			if (lst->page.addr + get_block_size(lst->page.type) * item == addr)
				return (realloc_small_tiny(lst, addr, len));
			return (return_enomem(NULL));
		}
		lst = lst->next;
	}
	MALLOC_UNLOCK();
	return (return_enomem(NULL));
}
Ejemplo n.º 7
0
/*
 * alloc_pages_on_nodes() - allocate pages on specified NUMA nodes
 * @pages: array in which the page pointers will be stored
 * @num: no. of pages to allocate
 * @nodes: array of NUMA nodes
 *
 * A page will be allocated in each node specified by @nodes, and the
 * page pointers will be stored in @pages array.
 *
 * RETURNS:
 * 0 on success, -1 on allocation failure.
 */
int alloc_pages_on_nodes(void **pages, unsigned int num, int *nodes)
{
	int i;
#if HAVE_NUMA_ALLOC_ONNODE
	size_t onepage = get_page_size();
#endif

	for (i = 0; i < num; i++) {
		pages[i] = NULL;
	}

	for (i = 0; i < num; i++) {
		char *page;

#if HAVE_NUMA_ALLOC_ONNODE
		pages[i] = numa_alloc_onnode(onepage, nodes[i]);
#endif
		if (pages[i] == NULL) {
			tst_resm(TBROK, "allocation of page on node "
				 "%d failed", nodes[i]);
			break;
		}

		/* Touch the page, to force allocation. */
		page = pages[i];
		page[0] = i;
	}

	if (i == num)
		return 0;

	free_pages(pages, num);

	return -1;
}
Ejemplo n.º 8
0
void get_proc_mem (dbgov_proc_mem * buf, pid_t pid, pid_t tid)
{
  char buffer[BUFSIZ], *p;
  const size_t pagesize = get_page_size ();
  memset (buf, 0, sizeof (dbgov_proc_mem));

  int res = try_file_to_buffer (buffer, "/proc/%d/task/%d/stat", pid, tid);
  if (res == TRY_FILE_TO_BUFFER_OK_IOSTAT)
    {
      p = proc_stat_after_cmd (buffer);
      if (!p)
	return;

      p = skip_multiple_token (p, 20);

      buf->vsize = strtoull (p, &p, 0);
      buf->rss = strtoull (p, &p, 0);
      buf->rss_rlim = strtoull (p, &p, 0);

      int res = try_file_to_buffer(buffer, "/proc/%d/task/%d/statm", pid, tid);
      if (res == TRY_FILE_TO_BUFFER_OK_IOSTAT)
	{
	  buf->size = strtoull (buffer, &p, 0);
	  buf->resident = strtoull (p, &p, 0);
	  buf->share = strtoull (p, &p, 0);
	}
      buf->size *= pagesize;
      buf->resident *= pagesize;
      buf->share *= pagesize;
      buf->rss *= pagesize;
    }
}
Ejemplo n.º 9
0
void			free(void *addr)
{
	t_page_list	*prv;
	t_page_list	*lst;

	if (!addr)
		return ;
	MALLOC_LOCK();
	prv = NULL;
	lst = g_pages;
	while (lst)
	{
		if (lst->page.type == LARGE && case_large(addr, prv, lst))
			return ;
		else if (lst->page.type != LARGE && addr >= lst->page.addr
				&& addr <= lst->page.addr + get_page_size(lst->page.type))
		{
			case_else(addr, lst);
			return ;
		}
		prv = lst;
		lst = lst->next;
	}
	MALLOC_UNLOCK();
}
Ejemplo n.º 10
0
int main(int argc, char *argv[])
{
	DCL_THREADGBL_ACCESS;

	GTM_THREADGBL_INIT;
	set_blocksig();
	gtm_imagetype_init(DSE_IMAGE);
	gtm_wcswidth_fnptr = gtm_wcswidth;
	gtm_env_init();	/* read in all environment variables */
	licensed = TRUE;
	TREF(transform) = TRUE;
	op_open_ptr = op_open;
	patch_curr_blk = get_dir_root();
	err_init(util_base_ch);
	GTM_ICU_INIT_IF_NEEDED;	/* Note: should be invoked after err_init (since it may error out) and before CLI parsing */
	sig_init(generic_signal_handler, dse_ctrlc_handler, suspsigs_handler);
	atexit(util_exit_handler);
	SET_LATCH_GLOBAL(&defer_latch, LOCK_AVAILABLE);
	get_page_size();
	stp_init(STP_INITSIZE);
	rts_stringpool = stringpool;
	getjobname();
	INVOKE_INIT_SECSHR_ADDRS;
	getzdir();
	prealloc_gt_timers();
	initialize_pattern_table();
	gvinit();
	region_init(FALSE);
	INIT_GBL_ROOT(); /* Needed for GVT initialization */
	getjobnum();
	util_out_print("!/File  !_!AD", TRUE, DB_LEN_STR(gv_cur_region));
	util_out_print("Region!_!AD!/", TRUE, REG_LEN_STR(gv_cur_region));
	cli_lex_setup(argc, argv);
	CREATE_DUMMY_GBLDIR(gd_header, original_header, gv_cur_region, gd_map, gd_map_top);
	gtm_chk_dist(argv[0]);
#	ifdef DEBUG
	if ((gtm_white_box_test_case_enabled && (WBTEST_SEMTOOLONG_STACK_TRACE == gtm_white_box_test_case_number) ))
	{
		sgmnt_addrs     * csa;
		node_local_ptr_t cnl;
		csa = &FILE_INFO(gv_cur_region)->s_addrs;
		cnl = csa->nl;
		cnl->wbox_test_seq_num  = 1; /*Signal the first step and wait here*/
		while (2 != cnl->wbox_test_seq_num) /*Wait for another process to get hold of the semaphore and signal next step*/
			LONG_SLEEP(10);
	}
#	endif
	if (argc < 2)
                display_prompt();
	io_init(TRUE);
	while (1)
	{
		if (!dse_process(argc))
			break;
		display_prompt();
	}
	dse_exit();
	REVERT;
}
Ejemplo n.º 11
0
/*
 * free_shared_pages() - free shared pages
 * @pages: array of pages to be freed
 * @num: the no. of pages to free
 */
void free_shared_pages(void **pages, unsigned int num)
{
	int ret;

	ret = munmap(pages[0], num * get_page_size());
	if (ret == -1)
		tst_resm(TWARN | TERRNO, "unmapping of shared pages failed");
}
Ejemplo n.º 12
0
/* check if the current directory allows exec mappings */
static int check_current_dir_for_exec(void)
{
    int fd;
    char tmpfn[] = "anonmap.XXXXXX";
    void *ret = MAP_FAILED;

    fd = mkstemps( tmpfn, 0 );
    if (fd == -1) return 0;
    if (grow_file( fd, 1 ))
    {
        ret = mmap( NULL, get_page_size(), PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0 );
        if (ret != MAP_FAILED) munmap( ret, get_page_size() );
    }
    close( fd );
    unlink( tmpfn );
    return (ret != MAP_FAILED);
}
Ejemplo n.º 13
0
static void free_stack_and_tls(uintptr_t stack) {
  unsigned pagesz = get_page_size();

  unsigned flags;
  for (unsigned i = 0; i < THREAD_STACK_SZ; i += pagesz) {
    free_page(get_mapping(stack+i, &flags));
    unmap(stack+i, 1);
  }
}
Ejemplo n.º 14
0
static int test() {
  xbitmap_t xb;
  uintptr_t loc = 0x20000000;
  xbitmap_init(&xb, get_page_size(), &alloc, &free, (void*)&loc);

  // CHECK: isset(0) = 0
  kprintf("isset(0) = %d\n", xbitmap_isset(&xb, 0));
  // CHECK: isclear(0) = 1
  kprintf("isclear(0) = %d\n", xbitmap_isclear(&xb, 0));

  xbitmap_set(&xb, 0);
  // CHECK: isset(0) = 1
  kprintf("isset(0) = %d\n", xbitmap_isset(&xb, 0));
  // CHECK: isclear(0) = 0
  kprintf("isclear(0) = %d\n", xbitmap_isclear(&xb, 0));

  xbitmap_clear(&xb, 0);
  // CHECK: isset(0) = 0
  kprintf("isset(0) = %d\n", xbitmap_isset(&xb, 0));

  xbitmap_set(&xb, 5);
  xbitmap_set(&xb, 7);
  xbitmap_set(&xb, 12);
  xbitmap_set(&xb, 20);
  // CHECK: first_set() = 5
  kprintf("first_set() = %d\n", xbitmap_first_set(&xb));
  xbitmap_clear(&xb, 5);
  // CHECK: first_set() = 7
  kprintf("first_set() = %d\n", xbitmap_first_set(&xb));

  // CHECK: isset(0x8456) = 0
  kprintf("isset(0x8456) = %d\n", xbitmap_isset(&xb, 0x8456));
  xbitmap_set(&xb, 0x8456);
  // CHECK: isset(0x8456) = 1
  kprintf("isset(0x8456) = %d\n", xbitmap_isset(&xb, 0x8456));
  // CHECK: isset(0x8455) = 0
  kprintf("isset(0x8455) = %d\n", xbitmap_isset(&xb, 0x8455));
  // CHECK: isset(12) = 1
  kprintf("isset(12) = %d\n", xbitmap_isset(&xb, 12));

  xbitmap_clear(&xb, 7);
  xbitmap_clear(&xb, 12);
  xbitmap_clear(&xb, 20);

  // CHECK: isset(6) = 0
  kprintf("isset(6) = %d\n", xbitmap_isset(&xb, 6));
  
  // CHECK: first_set() = 0x8456
  kprintf("first_set() = 0x%x\n", xbitmap_first_set(&xb));
  xbitmap_clear(&xb, 0x8456);
  
  // CHECK: first_set() = -1
  kprintf("first_set() = %d\n", xbitmap_first_set(&xb));
  
  return 0;
}
Ejemplo n.º 15
0
static uintptr_t alloc_stack_and_tls() {
  unsigned pagesz = get_page_size();

  uintptr_t addr = vmspace_alloc(&kernel_vmspace, THREAD_STACK_SZ, 0);

  for (unsigned i = 0; i < THREAD_STACK_SZ; i += pagesz)
    map(addr+i, alloc_page(PAGE_REQ_NONE), 1, PAGE_WRITE);

  return addr;
}
Ejemplo n.º 16
0
Queue* create_queue()
{
	int size = get_page_size()/sizeof(Elem);
	Queue* queue = (Queue*) malloc(sizeof(Queue));
	queue->block = (Elem*) malloc(size*sizeof(Elem));
	queue->head = 0;
	queue->tail = 0;
	queue->size = size;
	return queue;
}
Ejemplo n.º 17
0
Archivo: os.cpp Proyecto: ardeujho/self
char* OS::allocate_idealized_page_aligned(int32 &size, const char *name,
                                          caddr_t desiredAddress, 
                                          bool mustAllocate) {
  size= roundTo(size, idealized_page_size);
  assert(idealized_page_size % get_page_size() == 0, "page size mismatch");
  char* b = allocate_heap_aligned(desiredAddress, size, idealized_page_size,
                                    name, mustAllocate);
  if (b == NULL) size= 0;
  return b;
}
Ejemplo n.º 18
0
int ibv_fork_init(void)
{
	void *tmp, *tmp_aligned;
	int ret;
	unsigned long size;

	if (mm_root)
		return 0;

	if (too_late)
		return EINVAL;

	page_size = sysconf(_SC_PAGESIZE);
	if (page_size < 0)
		return errno;

	if (posix_memalign(&tmp, page_size, page_size))
		return ENOMEM;

	if (getenv("RDMAV_HUGEPAGES_SAFE"))
		huge_page_enabled = 1;
	else
		huge_page_enabled = 0;

	if (huge_page_enabled) {
		size = get_page_size(tmp);
		tmp_aligned = (void *)((uintptr_t) tmp & ~(size - 1));
	} else {
		size = page_size;
		tmp_aligned = tmp;
	}

	ret = madvise(tmp_aligned, size, MADV_DONTFORK) ||
	      madvise(tmp_aligned, size, MADV_DOFORK);

	free(tmp);

	if (ret)
		return ENOSYS;

	mm_root = malloc(sizeof *mm_root);
	if (!mm_root)
		return ENOMEM;

	mm_root->parent = NULL;
	mm_root->left   = NULL;
	mm_root->right  = NULL;
	mm_root->color  = IBV_BLACK;
	mm_root->start  = 0;
	mm_root->end    = UINTPTR_MAX;
	mm_root->refcnt = 0;

	return 0;
}
Ejemplo n.º 19
0
static void
counter_free (counter_t *counter)
{
    d_print ("munmap\n");

    munmap (counter->mmap_page, (N_PAGES + 1) * get_page_size());
    fd_remove_watch (counter->fd);
    
    close (counter->fd);
    
    g_free (counter);
}
Ejemplo n.º 20
0
int appfs_mem_getpageinfo(const devfs_handle_t * handle, void * ctl){
	DECLARE_APPFS_CONFIG();
	u32 size = 0;
	mem_pageinfo_t * pageinfo = ctl;

	if( pageinfo->o_flags & MEM_FLAG_IS_QUERY ){
		u32 type;
		pageinfo->num = get_page(config, pageinfo->addr, 0, &type);
		pageinfo->size = get_page_size(config, pageinfo->num, MEM_FLAG_IS_RAM);
		pageinfo->o_flags = type;
		if( type == 0 ){ return SYSFS_SET_RETURN(EINVAL); }
		return 0;
	}

	size = get_page_size(config, pageinfo->num, pageinfo->o_flags);
	if (size == 0 ){ return SYSFS_SET_RETURN(EINVAL); }
	pageinfo->addr = get_page_addr(config, pageinfo->num, pageinfo->o_flags);
	pageinfo->size = size;

	return 0;
}
Ejemplo n.º 21
0
int FlashIAP::init()
{
    int ret = 0;
    _mutex->lock();
    if (flash_init(&_flash)) {
        ret = -1;
    }
    uint32_t page_size = get_page_size();
    _page_buf = new uint8_t[page_size];

    _mutex->unlock();
    return ret;
}
Ejemplo n.º 22
0
/* len is the total size (in ints) */
static int check_process_write_access( struct thread *thread, long *addr, data_size_t len )
{
    int page = get_page_size() / sizeof(int);

    for (;;)
    {
        if (write_thread_long( thread, addr, 0, 0 ) == -1) return 0;
        if (len <= page) break;
        addr += page;
        len -= page;
    }
    return (write_thread_long( thread, addr + len - 1, 0, 0 ) != -1);
}
Ejemplo n.º 23
0
        void mapped_region::move(offset_t offset, std::size_t length, void *buffer)
        {
            if(buffer_ != 0) {
                close();
            }

            int prots = 0;
            int flags = 0;
            switch(acmode_) {
            case ReadOnly:
                prots |= PROT_READ;
                flags |= MAP_SHARED;
                break;
            case ReadWrite:
                prots |= (PROT_WRITE | PROT_READ);
                flags |= MAP_SHARED;
                break;
            case CopyOnWrite:
                prots |= (PROT_WRITE | PROT_READ);
                flags |= MAP_PRIVATE;
                break;
            default:
                throw ipc_error("unknown mapping mode", 0);
                break;
            }

            // mapped region must be page aligned
            size_t      page_size   = get_page_size();
            offset_t    extra_offset= offset - (offset / page_size) * page_size;
            if(buffer) {
                buffer = static_cast<char* >(buffer) - extra_offset;
            }

            void* base = mmap(buffer, extra_offset + length, prots, flags,
                              handle_, offset - extra_offset);
            if(base == MAP_FAILED) {
                int errcode = sys::err::get();
                close();
                throw ipc_error("mmap failed", errcode);
            }

            buffer_ = static_cast<char* >(base) + extra_offset;
            extoff_ = extra_offset;
            offset_ = offset;
            length_ = length;

            if(buffer && (base != buffer)) {
                close();
                throw ipc_error("can't mapping to specified address", 0);
            }
        }
Ejemplo n.º 24
0
static int read_misc(unsigned page_offset, void *buf, unsigned size)
{
	const char *ptn_name = "misc";
	uint32_t pagesize = get_page_size();
	unsigned offset;

	if (size == 0 || buf == NULL)
		return -1;

	offset = page_offset * pagesize;

	if (target_is_emmc_boot())
	{
		int index;
		unsigned long long ptn;
		unsigned long long ptn_size;

		index = partition_get_index(ptn_name);
		if (index == INVALID_PTN)
		{
			dprintf(CRITICAL, "No '%s' partition found\n", ptn_name);
			return -1;
		}

		ptn = partition_get_offset(index);
		ptn_size = partition_get_size(index);

		mmc_set_lun(partition_get_lun(index));

		if (ptn_size < offset + size)
		{
			dprintf(CRITICAL, "Read request out of '%s' boundaries\n",
					ptn_name);
			return -1;
		}

		if (mmc_read(ptn + offset, (unsigned int *)buf, size))
		{
			dprintf(CRITICAL, "Reading MMC failed\n");
			return -1;
		}
	}
	else
	{
		dprintf(CRITICAL, "Misc partition not supported for NAND targets.\n");
		return -1;
	}

	return 0;
}
Ejemplo n.º 25
0
int FlashIAP::program(const void *buffer, uint32_t addr, uint32_t size)
{
    uint32_t page_size = get_page_size();
    uint32_t flash_size = flash_get_size(&_flash);
    uint32_t flash_start_addr = flash_get_start_address(&_flash);
    uint32_t chunk, prog_size;
    const uint8_t *buf = (uint8_t *) buffer;
    const uint8_t *prog_buf;

    // addr should be aligned to page size
    if (!is_aligned(addr, page_size) || (!buffer) ||
        ((addr + size) > (flash_start_addr + flash_size))) {
        return -1;
    }

    int ret = 0;
    _mutex->lock();
    while (size) {
        uint32_t current_sector_size = flash_get_sector_size(&_flash, addr);
        bool unaligned_src = (((size_t) buf / sizeof(uint32_t) * sizeof(uint32_t)) != (size_t) buf);
        chunk = std::min(current_sector_size - (addr % current_sector_size), size);
        // Need to use the internal page buffer in any of these two cases:
        // 1. Size is not page aligned
        // 2. Source buffer is not aligned to uint32_t. This is not supported by many targets (although
        //    the pointer they accept is of uint8_t).
        if (unaligned_src || (chunk < page_size)) {
            chunk = std::min(chunk, page_size);
            memcpy(_page_buf, buf, chunk);
            if (chunk < page_size) {
                memset(_page_buf + chunk, 0xFF, page_size - chunk);
            }
            prog_buf = _page_buf;
            prog_size = page_size;
        } else {
            chunk = chunk / page_size * page_size;
            prog_buf = buf;
            prog_size = chunk;
        }
        if (flash_program_page(&_flash, addr, prog_buf, prog_size)) {
            ret = -1;
            break;
        }
        size -= chunk;
        addr += chunk;
        buf += chunk;
    }
    _mutex->unlock();

    return ret;
}
Ejemplo n.º 26
0
/*
 * free_pages() - free an array of pages
 * @pages: array of page pointers to be freed
 * @num: no. of pages in the array
 */
void free_pages(void **pages, unsigned int num)
{

#if HAVE_NUMA_H
	int i;
	size_t onepage = get_page_size();

	for (i = 0; i < num; i++) {
		if (pages[i] != NULL) {
			numa_free(pages[i], onepage);
		}
	}
#endif
}
Ejemplo n.º 27
0
/*
 * free_sem() - free semaphores
 * @sem - array of semphores to be freed
 * @num - no. of semaphores in the array
 */
void free_sem(sem_t * sem, int num)
{
	int i;
	int ret;

	for (i = 0; i < num; i++) {
		ret = sem_destroy(&sem[i]);
		if (ret == -1)
			tst_resm(TWARN | TERRNO, "error destroying semaphore");
	}

	ret = munmap(sem, get_page_size());
	if (ret == -1)
		tst_resm(TWARN | TERRNO, "error freeing semaphore memory");
}
Ejemplo n.º 28
0
Archivo: mem.c Proyecto: rickbutton/os
static int free_memory() {
  if ((mboot.flags & MBOOT_MMAP) == 0)
    panic("Bootloader did not provide memory map info!");

  range_t ranges[32], ranges_cpy[32];

  uint32_t i = mboot.mmap_addr;
  unsigned n = 0;
  uint64_t extent = 0;
  while (i < mboot.mmap_addr+mboot.mmap_length) {
    if (n >= 128) break;

    multiboot_mmap_entry_t *entry = (multiboot_mmap_entry_t*)i;

    if (MBOOT_IS_MMAP_TYPE_RAM(entry->type)) {
      ranges[n].start = entry->base_addr;
      ranges[n++].extent = entry->length;

      if (entry->base_addr + entry->length > extent)
        extent = entry->base_addr + entry->length;
    }
    //kprintf("e: sz %x addr %x len %x ty %x\n", entry->size, (uint32_t)entry->base_addr, (uint32_t)entry->length, entry->type);

    i += entry->size + 4;
  }

  extern int __start, __end;
  uintptr_t end = (((uintptr_t)&__end) & ~get_page_mask()) + get_page_size();

  for (i = 0; i < n; ++i)
    remove_range(&ranges[i], (uintptr_t)&__start, end);

  for (i = 0; i < n; ++i) {
    //kprintf("r: %x ext %x\n", (uint32_t)ranges[i].start, (uint32_t)ranges[i].extent);
  }

  /* Copy the ranges to a backup, as init_physical_memory mutates them and 
     init_cow_refcnts needs to run after init_physical_memory */
  for (i = 0; i < n; ++i)
    ranges_cpy[i] = ranges[i];

  init_physical_memory_early(ranges, n, extent);
  init_virtual_memory(ranges, n);
  init_physical_memory();
  init_cow_refcnts(ranges, n);

  return 0;
}
Ejemplo n.º 29
0
int get_ffbm(char *ffbm, unsigned size)
{
	const char *ffbm_cmd = "ffbm-";
	uint32_t page_size = get_page_size();
	char *ffbm_page_buffer = NULL;
	int retval = 0;
	if (size < FFBM_MODE_BUF_SIZE || size >= page_size)
	{
		dprintf(CRITICAL, "Invalid size argument passed to get_ffbm\n");
		retval = -1;
		goto cleanup;
	}
	ffbm_page_buffer = (char*)malloc(page_size);
	if (!ffbm_page_buffer)
	{
		dprintf(CRITICAL, "Failed to alloc buffer for ffbm cookie\n");
		retval = -1;
		goto cleanup;
	}
	if (read_misc(0, ffbm_page_buffer, page_size))
	{
		dprintf(CRITICAL, "Error reading MISC partition\n");
		retval = -1;
		goto cleanup;
	}
	ffbm_page_buffer[size] = '\0';
	if (strncmp(ffbm_cmd, ffbm_page_buffer, strlen(ffbm_cmd)))
	{
		retval = 0;
		goto cleanup;
	}
	else
	{
		if (strlcpy(ffbm, ffbm_page_buffer, size) <
				FFBM_MODE_BUF_SIZE -1)
		{
			dprintf(CRITICAL, "Invalid string in misc partition\n");
			retval = -1;
		}
		else
			retval = 1;
	}
cleanup:
	if(ffbm_page_buffer)
		free(ffbm_page_buffer);
	return retval;
}
Ejemplo n.º 30
0
void mips_cp0_unmap_tlb_to_mts (cpu_mips_t * cpu, int index)
{
    m_va_t v0_addr, v1_addr;
    m_uint32_t page_size;
    tlb_entry_t *entry;

    entry = &cpu->cp0.tlb[index];

    page_size = get_page_size (entry->mask);
    v0_addr = entry->hi & mips_cp0_get_vpn2_mask (cpu);
    v1_addr = v0_addr + page_size;

    if (entry->lo0 & MIPS_TLB_V_MASK)
        cpu->mts_unmap (cpu, v0_addr, page_size, MTS_ACC_T, index);

    if (entry->lo1 & MIPS_TLB_V_MASK)
        cpu->mts_unmap (cpu, v1_addr, page_size, MTS_ACC_T, index);
}