Beispiel #1
0
int spin_check(spin_t *s)
{
	/* Check whether a timeout has taken place. Return TRUE if the caller
	 * should continue spinning, and FALSE if a timeout has occurred. The
	 * implementation assumes that it is okay to spin a little bit too long
	 * (up to a full clock tick extra).
	 */
	u64_t cur_tsc, tsc_delta;
	clock_t now, micro_delta;

	switch (s->s_state) {
	case STATE_INIT:
		s->s_state = STATE_BASE_TS;
		break;

	case STATE_BASE_TS:
		s->s_state = STATE_TS;
		read_tsc_64(&s->s_base_tsc);
		break;

	case STATE_TS:
		read_tsc_64(&cur_tsc);

		tsc_delta = sub64(cur_tsc, s->s_base_tsc);

		micro_delta = tsc_64_to_micros(tsc_delta);

		if (micro_delta >= s->s_usecs) {
			s->s_timeout = TRUE;
			return FALSE;
		}

		if (micro_delta >= TSC_SPIN) {
			s->s_usecs -= micro_delta;
			getticks(&s->s_base_uptime);
			s->s_state = STATE_UPTIME;
		}

		break;

	case STATE_UPTIME:
		getticks(&now);

		/* We assume that sys_hz() caches its return value. */
		micro_delta = ((now - s->s_base_uptime) * 1000 / sys_hz()) *
			1000;

		if (micro_delta >= s->s_usecs) {
			s->s_timeout = TRUE;
			return FALSE;
		}

		break;

	default:
		panic("spin_check: invalid state %d", s->s_state);
	}

	return TRUE;
}
Beispiel #2
0
PUBLIC void busy_delay_ms(int ms)
{
	u64_t cycles = ms_2_cpu_time(ms), tsc0, tsc, tsc1;
	read_tsc_64(&tsc0);
	tsc1 = tsc0 + cycles;
	do { read_tsc_64(&tsc); } while(tsc < tsc1);
	return;
}
Beispiel #3
0
/* returns 0 on exit */
int main(int argc, char *argv[]) {
    /* argument 1 is the number of iterations to run */
    /* argument 2 is the argument to nice() */

    /* PI = 4 * (1/1 - 1/3 + 1/5 - 1/7 + 1/9 - 1/11 ...) */
    int i;
    double sum = 0;
    int denom, numer;
    time_t current;
    time_t start = time(NULL);
    int seconds = 0;
    pid_t process_id = getpid();
    int retval, nice_val, iters;
    u64_t s,e,diff;
    double elapsed;
    unsigned long max;

    if (argc < 3) {
        printf("usage: cpu iterations tickets\n");
        return 1;
    }

    iters = atoi(argv[1]);
    nice_val = atoi(argv[2]);
    retval = nice(nice_val);
    if (retval == -1) {
        printf("Error calling nice()\n");
        return 1;
    }
    printf("Process %d at nice %d\n", process_id, nice_val);

    read_tsc_64(&s);
    for (i = 1; i<iters; ++i) {
        denom = 2 * i - 1;
        numer;
        if (i % 2)
            numer = -1;
        else
            numer = 1;
        sum += ((double)numer / (double)denom);
        current = time(NULL);
        if (current - start > seconds) {
            seconds = current - start;
            printf("Process %d has been running for %d seconds.\n", process_id, seconds);
            /*if (seconds == 10)
                break;*/
        }
    }
    read_tsc_64(&e);
    diff = sub64(e, s);
    max = -1;
    elapsed = (double)diff.hi + (double)(diff.lo/100000) / (double)(max/100000);
    printf("CPU process %d (nice %d) calculated pi as %f at %f time units\n", process_id, nice_val, -4 * sum, elapsed);
    return 0;
}
Beispiel #4
0
/*===========================================================================*
 *				idle					     * 
 *===========================================================================*/
PRIVATE void idle()
{
	/* This function is called whenever there is no work to do.
	 * Halt the CPU, and measure how many timestamp counter ticks are
	 * spent not doing anything. This allows test setups to measure
	 * the CPU utiliziation of certain workloads with high precision.
	 */
#ifdef CONFIG_IDLE_TSC
	u64_t idle_start;

	read_tsc_64(&idle_start);
	idle_active = 1;
#endif

	halt_cpu();

#ifdef CONFIG_IDLE_TSC
	if (idle_active) {
		IDLE_STOP;
		printf("Kernel: idle active after resuming CPU\n");
	}

	idle_tsc = add64(idle_tsc, sub64(idle_stop, idle_start));
#endif
}
Beispiel #5
0
void cycles_accounting_init(void)
{
	read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));

	make_zero64(get_cpu_var(cpu, cpu_last_tsc));
	make_zero64(get_cpu_var(cpu, cpu_last_idle));
}
Beispiel #6
0
void init_scheduling(void)
{
	u64_t r;
	balance_timeout = BALANCE_TIMEOUT * sys_hz();
	init_timer(&sched_timer);
	set_timer(&sched_timer, balance_timeout, balance_queues, 0);
	read_tsc_64(&r);
 	srandom((unsigned)r);
}
Beispiel #7
0
double getidle(void)
{
  u64_t stop, idle2;
  u64_t idelta, tdelta;
  double ifp, tfp, rfp;
  int r;

  if (!running) {
	if ((r = sys_getidletsc(&idle)) != OK)
		return -1.0;

	running = 1;

	read_tsc_64(&start);

	return 0.0;
  }
  else {
	read_tsc_64(&stop);

	running = 0;

	if ((r = sys_getidletsc(&idle2)) != OK)
		return -1.0;

	idelta = sub64(idle2, idle);
	tdelta = sub64(stop, start);

	if (cmp64(idelta, tdelta) >= 0)
		return 100.0;

	ifp = make_double(idelta);
	tfp = make_double(tdelta);

	rfp = ifp / tfp * 100.0;

	if (rfp < 0.0) rfp = 0.0;
	else if (rfp > 100.0) rfp = 100.0;

	return rfp;
  }

  running = !running;
}
void init_scheduling(void)
{
	/*Lottery Scheduling*/
	u64_t r;

	balance_timeout = BALANCE_TIMEOUT * sys_hz();
	init_timer(&sched_timer);
	set_timer(&sched_timer, balance_timeout, balance_queues, 0);

	/*Lottery Scheduling*/
	read_tsc_64(&r);
	srandom();

}
Beispiel #9
0
PRIVATE int do_lottery() {
    struct schedproc *rmp;
    int rv, proc_nr;
    int total_tickets = 0;
    u64_t tsc;
    int winner;

    /* count the total number of tickets in all processes */
    /* we really should have a global to keep track of this total */
    /* rather than computing it every time */
    for (proc_nr = 0, rmp = schedproc; proc_nr < NR_PROCS; ++proc_nr, ++rmp)
        if (rmp->priority == HOLDING_Q && rmp->flags == (IN_USE | USER_PROCESS)) /* winnable? */
            total_tickets += rmp->tickets;
    
    if (!total_tickets) /* there were no winnable processes */
        return OK;
    
    /* generate a "random" winning ticket */
    /* lower bits of time stamp counter are random enough */
    /*   and much faster then random() */
    read_tsc_64(&tsc);
    winner = tsc.lo % total_tickets + 1;

    /* now find the process with the winning ticket */
    for (proc_nr = 0, rmp = schedproc; proc_nr < NR_PROCS; ++proc_nr, ++rmp) {
        if (rmp->priority == HOLDING_Q && rmp->flags == (IN_USE | USER_PROCESS)) /* winnable? */
            winner -= rmp->tickets;
        if (winner <= 0)
            break;
   }

    printf("Process %d won with %d(%d) of %d tickets\n", proc_nr, rmp->tickets, rmp->blocking, total_tickets);
    /* schedule new winning process */
    rmp->priority = WINNING_Q;
    rmp->time_slice = USER_QUANTUM;
    /*if (rmp->blocking)
        rmp->time_slice = USER_QUANTUM / (rmp->blocking + 1); */
    rmp->blocking = 0;

    if ((rv = schedule_process(rmp)) != OK)
        return rv;
    return OK;
}
Beispiel #10
0
PRIVATE int calib_cpu_handler(irq_hook_t * UNUSED(hook))
{
	u64_t tsc;

	probe_ticks++;
	read_tsc_64(&tsc);


	if (probe_ticks == 1) {
		tsc0 = tsc;
	}
	else if (probe_ticks == PROBE_TICKS) {
		tsc1 = tsc;
	}

	/* just in case we are in an SMP single cpu fallback mode */
	BKL_UNLOCK();
	return 1;
}
Beispiel #11
0
void context_stop(struct proc * p)
{
	u64_t tsc;
	u32_t tsc_delta;
	u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);

	read_tsc_64(&tsc);
	assert(tsc >= *__tsc_ctr_switch);
	tsc_delta = tsc - *__tsc_ctr_switch;
	p->p_cycles += tsc_delta;

	if(kbill_ipc) {
		kbill_ipc->p_kipc_cycles =
			add64(kbill_ipc->p_kipc_cycles, tsc_delta);
		kbill_ipc = NULL;
	}

	if(kbill_kcall) {
		kbill_kcall->p_kcall_cycles =
			add64(kbill_kcall->p_kcall_cycles, tsc_delta);
		kbill_kcall = NULL;
	}

	/*
	 * deduct the just consumed cpu cycles from the cpu time left for this
	 * process during its current quantum. Skip IDLE and other pseudo kernel
	 * tasks
	 */
	if (p->p_endpoint >= 0) {
#if DEBUG_RACE
		p->p_cpu_time_left = 0;
#else
		if (tsc_delta < p->p_cpu_time_left) {
			p->p_cpu_time_left -= tsc_delta;
		} else p->p_cpu_time_left = 0;
#endif
	}

	*__tsc_ctr_switch = tsc;
}
Beispiel #12
0
PUBLIC short cpu_load(void)
{
	u64_t current_tsc, *current_idle;
	u64_t tsc_delta, idle_delta, busy;
	struct proc *idle;
	short load;
#ifdef CONFIG_SMP
	unsigned cpu = cpuid;
#endif

	u64_t *last_tsc, *last_idle;

	last_tsc = get_cpu_var_ptr(cpu, cpu_last_tsc);
	last_idle = get_cpu_var_ptr(cpu, cpu_last_idle);

	idle = get_cpu_var_ptr(cpu, idle_proc);;
	read_tsc_64(&current_tsc);
	current_idle = &idle->p_cycles; /* ptr to idle proc */

	/* calculate load since last cpu_load invocation */
	if (!is_zero64(*last_tsc)) {
		tsc_delta = sub64(current_tsc, *last_tsc);
		idle_delta = sub64(*current_idle, *last_idle);

		busy = sub64(tsc_delta, idle_delta);
		busy = mul64(busy, make64(100, 0));
		load = ex64lo(div64(busy, tsc_delta));

		if (load > 100)
			load = 100;
	} else
		load = 0;
	
	*last_tsc = current_tsc;
	*last_idle = *current_idle;
	return load;
}
Beispiel #13
0
PUBLIC void context_stop(struct proc * p)
{
	u64_t tsc, tsc_delta;
	u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);
#ifdef CONFIG_SMP
	unsigned cpu = cpuid;

	/*
	 * This function is called only if we switch from kernel to user or idle
	 * or back. Therefore this is a perfect location to place the big kernel
	 * lock which will hopefully disappear soon.
	 *
	 * If we stop accounting for KERNEL we must unlock the BKL. If account
	 * for IDLE we must not hold the lock
	 */
	if (p == proc_addr(KERNEL)) {
		u64_t tmp;

		read_tsc_64(&tsc);
		tmp = sub64(tsc, *__tsc_ctr_switch);
		kernel_ticks[cpu] = add64(kernel_ticks[cpu], tmp);
		p->p_cycles = add64(p->p_cycles, tmp);
		BKL_UNLOCK();
	} else {
		u64_t bkl_tsc;
		atomic_t succ;
		
		read_tsc_64(&bkl_tsc);
		/* this only gives a good estimate */
		succ = big_kernel_lock.val;
		
		BKL_LOCK();
		
		read_tsc_64(&tsc);

		bkl_ticks[cpu] = add64(bkl_ticks[cpu], sub64(tsc, bkl_tsc));
		bkl_tries[cpu]++;
		bkl_succ[cpu] += !(!(succ == 0));

		p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));

#ifdef CONFIG_SMP
		/*
		 * Since at the time we got a scheduling IPI we might have been
		 * waiting for BKL already, we may miss it due to a similar IPI to
		 * the cpu which is already waiting for us to handle its. This
		 * results in a live-lock of these two cpus.
		 *
		 * Therefore we always check if there is one pending and if so,
		 * we handle it straight away so the other cpu can continue and
		 * we do not deadlock.
		 */
		smp_sched_handler();
#endif
	}
#else
	read_tsc_64(&tsc);
	p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
#endif
	
	tsc_delta = sub64(tsc, *__tsc_ctr_switch);

	if(kbill_ipc) {
		kbill_ipc->p_kipc_cycles =
			add64(kbill_ipc->p_kipc_cycles, tsc_delta);
		kbill_ipc = NULL;
	}

	if(kbill_kcall) {
		kbill_kcall->p_kcall_cycles =
			add64(kbill_kcall->p_kcall_cycles, tsc_delta);
		kbill_kcall = NULL;
	}

	/*
	 * deduct the just consumed cpu cycles from the cpu time left for this
	 * process during its current quantum. Skip IDLE and other pseudo kernel
	 * tasks
	 */
	if (p->p_endpoint >= 0) {
#if DEBUG_RACE
		make_zero64(p->p_cpu_time_left);
#else
		/* if (tsc_delta < p->p_cpu_time_left) in 64bit */
		if (ex64hi(tsc_delta) < ex64hi(p->p_cpu_time_left) ||
				(ex64hi(tsc_delta) == ex64hi(p->p_cpu_time_left) &&
				 ex64lo(tsc_delta) < ex64lo(p->p_cpu_time_left)))
			p->p_cpu_time_left = sub64(p->p_cpu_time_left, tsc_delta);
		else {
			make_zero64(p->p_cpu_time_left);
		}
#endif
	}

	*__tsc_ctr_switch = tsc;
}
Beispiel #14
0
PUBLIC int init_local_timer(unsigned freq)
{
#ifdef USE_APIC
	/* if we know the address, lapic is enabled and we should use it */
	if (lapic_addr) {
		unsigned cpu = cpuid;
		tsc_per_ms[cpu] = div64u(cpu_get_freq(cpu), 1000);
		lapic_set_timer_one_shot(1000000/system_hz);
	} else
	{
		BOOT_VERBOSE(printf("Initiating legacy i8253 timer\n"));
#else
	{
#endif
		init_8253A_timer(freq);
		estimate_cpu_freq();
		/* always only 1 cpu in the system */
		tsc_per_ms[0] = div64u(cpu_get_freq(0), 1000);
	}

	return 0;
}

PUBLIC void stop_local_timer(void)
{
#ifdef USE_APIC
	if (lapic_addr) {
		lapic_stop_timer();
		apic_eoi();
	} else
#endif
	{
		stop_8253A_timer();
	}
}

PUBLIC void restart_local_timer(void)
{
#ifdef USE_APIC
	if (lapic_addr) {
		lapic_restart_timer();
	}
#endif
}

PUBLIC int register_local_timer_handler(const irq_handler_t handler)
{
#ifdef USE_APIC
	if (lapic_addr) {
		/* Using APIC, it is configured in apic_idt_init() */
		BOOT_VERBOSE(printf("Using LAPIC timer as tick source\n"));
	} else
#endif
	{
		/* Using PIC, Initialize the CLOCK's interrupt hook. */
		pic_timer_hook.proc_nr_e = NONE;
		pic_timer_hook.irq = CLOCK_IRQ;

		put_irq_handler(&pic_timer_hook, CLOCK_IRQ, handler);
	}

	return 0;
}

PUBLIC void cycles_accounting_init(void)
{
#ifdef CONFIG_SMP
	unsigned cpu = cpuid;
#endif

	read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));

	make_zero64(get_cpu_var(cpu, cpu_last_tsc));
	make_zero64(get_cpu_var(cpu, cpu_last_idle));
}
Beispiel #15
0
PUBLIC void context_stop(struct proc * p)
{
	u64_t tsc, tsc_delta;
	u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);
#ifdef CONFIG_SMP
	unsigned cpu = cpuid;

	/*
	 * This function is called only if we switch from kernel to user or idle
	 * or back. Therefore this is a perfect location to place the big kernel
	 * lock which will hopefully disappear soon.
	 *
	 * If we stop accounting for KERNEL we must unlock the BKL. If account
	 * for IDLE we must not hold the lock
	 */
	if (p == proc_addr(KERNEL)) {
		u64_t tmp;

		read_tsc_64(&tsc);
		tmp = sub64(tsc, *__tsc_ctr_switch);
		kernel_ticks[cpu] = add64(kernel_ticks[cpu], tmp);
		p->p_cycles = add64(p->p_cycles, tmp);
		BKL_UNLOCK();
	} else {
		u64_t bkl_tsc;
		atomic_t succ;
		
		read_tsc_64(&bkl_tsc);
		/* this only gives a good estimate */
		succ = big_kernel_lock.val;
		
		BKL_LOCK();
		
		read_tsc_64(&tsc);

		bkl_ticks[cpu] = add64(bkl_ticks[cpu], sub64(tsc, bkl_tsc));
		bkl_tries[cpu]++;
		bkl_succ[cpu] += !(!(succ == 0));

		p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
	}
#else
	read_tsc_64(&tsc);
	p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
#endif
	
	tsc_delta = sub64(tsc, *__tsc_ctr_switch);

	if(kbill_ipc) {
		kbill_ipc->p_kipc_cycles =
			add64(kbill_ipc->p_kipc_cycles, tsc_delta);
		kbill_ipc = NULL;
	}

	if(kbill_kcall) {
		kbill_kcall->p_kcall_cycles =
			add64(kbill_kcall->p_kcall_cycles, tsc_delta);
		kbill_kcall = NULL;
	}

	/*
	 * deduct the just consumed cpu cycles from the cpu time left for this
	 * process during its current quantum. Skip IDLE and other pseudo kernel
	 * tasks
	 */
	if (p->p_endpoint >= 0) {
#if DEBUG_RACE
		make_zero64(p->p_cpu_time_left);
#else
		/* if (tsc_delta < p->p_cpu_time_left) in 64bit */
		if (ex64hi(tsc_delta) < ex64hi(p->p_cpu_time_left) ||
				(ex64hi(tsc_delta) == ex64hi(p->p_cpu_time_left) &&
				 ex64lo(tsc_delta) < ex64lo(p->p_cpu_time_left)))
			p->p_cpu_time_left = sub64(p->p_cpu_time_left, tsc_delta);
		else {
			make_zero64(p->p_cpu_time_left);
		}
#endif
	}

	*__tsc_ctr_switch = tsc;
}
Beispiel #16
0
/*===========================================================================*
 *				    main				     *
 *===========================================================================*/
int main(int argc, char **argv)
{
	endpoint_t ep_self, ep_child;
	size_t size = BUF_SIZE;
	int i, r, pid;
	int status;
	u64_t start, end, diff;
	double micros;
	char nr_pages_str[10], is_map_str[2], is_write_str[2];
	int nr_pages, is_map, is_write;

	/* SEF local startup. */
	env_setargs(argc, argv);
	sef_local_startup();

	/* Parse the command line. */
	r = env_get_param("pages", nr_pages_str, sizeof(nr_pages_str));
	errno = 0;
	nr_pages = atoi(nr_pages_str);
	if (r != OK || errno || nr_pages <=0) {
		exit_usage();
	}
	if(nr_pages > TEST_PAGE_NUM) {
		printf("REQUESTOR: too many pages. Max allowed: %d\n",
			TEST_PAGE_NUM);
		exit_usage();
	}
	r = env_get_param("map", is_map_str, sizeof(is_map_str));
	errno = 0;
	is_map = atoi(is_map_str);
	if (r != OK || errno || (is_map!=0 && is_map!=1)) {
		exit_usage();
	}
	r = env_get_param("write", is_write_str, sizeof(is_write_str));
	errno = 0;
	is_write = atoi(is_write_str);
	if (r != OK || errno || (is_write!=0 && is_write!=1)) {
		exit_usage();
	}
	printf("REQUESTOR: Running tests with pages=%d map=%d write=%d...\n",
		nr_pages, is_map, is_write);

	/* Prepare work. */
	buf = (char*) CLICK_CEIL(buf_buf);
	fid_get = open(FIFO_GRANTOR, O_RDONLY);
	fid_send = open(FIFO_REQUESTOR, O_WRONLY);
	if(fid_get < 0 || fid_send < 0) {
		printf("REQUESTOR: can't open fifo files.\n");
		return 1;
	}

	/* Send the endpoint to the granter, in order to let him to
	 * create the grant.
	 */
	ep_self = getprocnr();
	write(fid_send, &ep_self, sizeof(ep_self));
	dprint("REQUESTOR: sending my endpoint: %d\n", ep_self);

	/* Get the granter's endpoint and gid. */
	read(fid_get, &ep_granter, sizeof(ep_granter));
	read(fid_get, &gid, sizeof(gid));
	dprint("REQUESTOR: getting granter's endpoint %d and gid %d\n",
		ep_granter, gid);

	FIFO_WAIT(fid_get);
	diff = make64(0, 0);

	if(is_map) {
		/* Test safemap. */
		for(i=0;i<NR_TEST_ITERATIONS;i++) {
			read_tsc_64(&start);
			r = sys_safemap(ep_granter, gid, 0, (long)buf,
				nr_pages*CLICK_SIZE, D, 1);
			if(r != OK) {
				printf("REQUESTOR: safemap error: %d\n", r);
				return 1;
			}
			read_write_buff(buf, nr_pages*CLICK_SIZE, is_write);
			read_tsc_64(&end);
			diff = add64(diff, (sub64(end, start)));
			r = sys_safeunmap(D, (long)buf);
			if(r != OK) {
				printf("REQUESTOR: safeunmap error: %d\n", r);
				return 1;
			}
		}
		micros = ((double)tsc_64_to_micros(diff))
			/ (NR_TEST_ITERATIONS*nr_pages);
		REPORT_TEST("REQUESTOR", "SAFEMAP", micros);
	}
	else {
		/* Test safecopy. */
		for(i=0;i<NR_TEST_ITERATIONS;i++) {
			read_tsc_64(&start);
			r = sys_safecopyfrom(ep_granter, gid, 0, (long)buf,
				nr_pages*CLICK_SIZE, D);
			if(r != OK) {
				printf("REQUESTOR: safecopy error: %d\n", r);
				return 1;
			}
			read_write_buff(buf, nr_pages*CLICK_SIZE, is_write);
			read_tsc_64(&end);
			diff = add64(diff, (sub64(end, start)));
		}
		micros = ((double)tsc_64_to_micros(diff))
			/ (NR_TEST_ITERATIONS*nr_pages);
		REPORT_TEST("REQUESTOR", "SAFECOPY", micros);
	}

	FIFO_NOTIFY(fid_send);

	return 0;
}
Beispiel #17
0
static struct block *block_alloc(size_t size) 
{
	struct block *block;
	u8_t *dataptr, *p, *ptr;
	unsigned page_index, page_index_max;
	size_t sizerem, totalsize;
	u64_t tsc;

	LOG(("block_alloc; size=0x%x\n", size));
	assert(size > 0);
	
	/* round size up to machine word size */
	sizerem = size % sizeof(long);
	if (sizerem)
		size += sizeof(long) - sizerem;

	/* initialize address range */
	if (!ptr_min && !ptr_max) {
		/* keep a safe distance from areas that are in use:
		 * - 4MB from the break (should not change if traditional
		 *   malloc is not used so a small margin is sufficient
		 * - 256MB from the stack (big margin because memory beyond
		 *   this may be allocated by mmap when the address space 
		 *   starts to fill up)
		 */
		ptr_min = page_round_up_ptr((u8_t *) sbrk(0) + 0x400000);
		ptr_max = page_round_down_ptr((u8_t *) &size - 0x10000000);
	}
	assert(ptr_min);
	assert(ptr_max);
	assert(ptr_min < ptr_max);

	/* select address at random */
	read_tsc_64(&tsc);
	totalsize = block_get_totalsize(size);
	page_index_max = (ptr_max - ptr_min - totalsize) / PAGE_SIZE;
	page_index = (page_index_max > 0) ? (tsc.lo % page_index_max) : 0;
	ptr = ptr_min + page_index * PAGE_SIZE;
	
	/* allocate block */
	block = (struct block *) mmap(
		ptr, 				/* addr */
		totalsize,			/* len */ 
		PROT_READ|PROT_WRITE, 		/* prot */
		MAP_PREALLOC, 			/* flags */
		-1, 				/* fd */
		0);				/* offset */
	if (block == MAP_FAILED) {
		/* mmap call failed */
		abort();
	}

	/* block may not be at the requested location if that is in use */
	if (ptr_min > (u8_t *) block)
		ptr_min = (u8_t *) block;

	if (ptr_max < (u8_t *) block)
		ptr_max = (u8_t *) block;

	/* initialize block, including fillers */
	block->size = size;
	block->magic = block_compute_magic(block);
	dataptr = block_get_dataptr(block);
	for (p = (u8_t *) (block + 1); p < dataptr; p++)
		*p = ((unsigned long) p & 0xff);
		
	LOG(("block_alloc; block=0x%x\n", block));
	return block;
}