示例#1
0
文件: vcore.c 项目: windyuuy/akaros
void __attribute__((constructor)) vcore_lib_init(void)
{
    uintptr_t mmap_block;

    /* Note this is racy, but okay.  The first time through, we are _S.
     * Also, this is the "lowest" level constructor for now, so we don't need
     * to call any other init functions after our run_once() call. This may
     * change in the future. */
    init_once_racy(return);

    /* Need to alloc vcore0's transition stuff here (technically, just the TLS)
     * so that schedulers can use vcore0's transition TLS before it comes up in
     * vcore_entry() */
    if (allocate_vcore_stack(0) || allocate_transition_tls(0))
        goto vcore_lib_init_fail;

    /* Initialize our VCPD event queues' ucqs, two pages per ucq, 4 per vcore */
    mmap_block = (uintptr_t)mmap(0, PGSIZE * 4 * max_vcores(),
                                 PROT_WRITE | PROT_READ,
                                 MAP_POPULATE | MAP_ANONYMOUS, -1, 0);
    /* Yeah, this doesn't fit in the error-handling scheme, but this whole
     * system doesn't really handle failure, and needs a rewrite involving less
     * mmaps/munmaps. */
    assert(mmap_block);
    /* Note we may end up doing vcore 0's elsewhere, for _Ss, or else have a
     * separate ev_q for that. */
    for (int i = 0; i < max_vcores(); i++) {
        /* four pages total for both ucqs from the big block (2 pages each) */
        ucq_init_raw(&vcpd_of(i)->ev_mbox_public.ev_msgs,
                     mmap_block + (4 * i    ) * PGSIZE,
                     mmap_block + (4 * i + 1) * PGSIZE);
        ucq_init_raw(&vcpd_of(i)->ev_mbox_private.ev_msgs,
                     mmap_block + (4 * i + 2) * PGSIZE,
                     mmap_block + (4 * i + 3) * PGSIZE);
        /* Set the lowest level entry point for each vcore. */
        vcpd_of(i)->vcore_entry = (uintptr_t)__kernel_vcore_entry;
    }
    atomic_init(&vc_req_being_handled, 0);
    assert(!in_vcore_context());
    vcore_libc_init();
    return;
vcore_lib_init_fail:
    assert(0);
}
示例#2
0
文件: ucq.c 项目: brho/akaros
/* Inits a ucq, where you don't have to bother with the memory allocation.  This
 * would be appropriate for one or two UCQs, though if you're allocating in
 * bulk, use the raw version. */
void ucq_init(struct ucq *ucq)
{
	uintptr_t two_pages = (uintptr_t)mmap(0, PGSIZE * 2,
	                                      PROT_WRITE | PROT_READ,
	                                      MAP_POPULATE | MAP_ANONYMOUS |
	                                      MAP_PRIVATE, -1, 0);

	assert(two_pages);
	ucq_init_raw(ucq, two_pages, two_pages + PGSIZE);
}
示例#3
0
int main(int argc, char** argv)
{
	/* this program should only be started from the kernel for tests */
	printf("[user] Attempting to read ucq messages from test_ucq().  "
	       "Don't call this manually.\n");
	/* Map into a known, extremely ghetto location.  The kernel knows to look
	 * here. */
	struct ucq *ucq = mmap((void*)USTACKTOP, PGSIZE, PROT_WRITE | PROT_READ,
	                       MAP_POPULATE, -1, 0);
	assert((uintptr_t)ucq == USTACKTOP);
	/* Now init it */
	uintptr_t two_pages = (uintptr_t)mmap(0, PGSIZE * 2, PROT_WRITE | PROT_READ,
	                                      MAP_POPULATE | MAP_ANONYMOUS, -1, 0);
	assert(two_pages);
	ucq_init_raw(ucq, two_pages, two_pages + PGSIZE);
	printf("[user] UCQ %08p initialized\n", ucq);
	/* try to get a simple message */
	struct event_msg msg;
	/* 1: Spin til we can get a message (0 on success breaks) */
	while (get_ucq_msg(ucq, &msg))
		cpu_relax();
	printf("[user] Got simple message type %d(7) with A2 %08p(0xdeadbeef)\n",
	       msg.ev_type, msg.ev_arg2);
	/* 2: get a bunch */
	for (int i = 0; i < 5000; i++) {
		while (get_ucq_msg(ucq, &msg))
			cpu_relax();
		assert(msg.ev_type == i);
	}
	printf("[user] #2 Received a bunch!  Last one was %d(4999), "
	       "extra pages %d(6, if #3 is 1000 and was blasted already)\n",
	       msg.ev_type, atomic_read(&ucq->nr_extra_pgs));
	/* 3: test chaining */
	while (atomic_read(&ucq->nr_extra_pgs) < 2)
		cpu_relax();
	printf("[user] #3 There's now a couple pages (%d), trying to receive...\n",
	       atomic_read(&ucq->nr_extra_pgs));
	/* this assumes 1000 is enough for a couple pages */
	for (int i = 0; i < 1000; i++) {
		while (get_ucq_msg(ucq, &msg))
			cpu_relax();
		assert(msg.ev_type == i);
	}
	printf("[user] Done, extra pages: %d(0)\n", atomic_read(&ucq->nr_extra_pgs));
	int extra = 0;
	while (!get_ucq_msg(ucq, &msg)) {
		printf("[user] got %d extra messages in the ucq, type %d\n", ++extra,
		       msg.ev_type);
	}
	printf("[user] Spinning...\n");
	while(1);

	return 0;
}
示例#4
0
文件: vcore.c 项目: 7perl/akaros
void vcore_init(void)
{
	uintptr_t mmap_block;
	/* Note this is racy, but okay.  The first time through, we are _S */
	init_once_racy(return);

	/* Need to alloc vcore0's transition stuff here (technically, just the TLS)
	 * so that schedulers can use vcore0's transition TLS before it comes up in
	 * vcore_entry() */
	if(allocate_transition_stack(0) || allocate_transition_tls(0))
		goto vcore_init_fail;

	/* Initialize our VCPD event queues' ucqs, two pages per ucq, 4 per vcore */
	mmap_block = (uintptr_t)mmap(0, PGSIZE * 4 * max_vcores(),
	                             PROT_WRITE | PROT_READ,
	                             MAP_POPULATE | MAP_ANONYMOUS, -1, 0);
	/* Yeah, this doesn't fit in the error-handling scheme, but this whole
	 * system doesn't really handle failure, and needs a rewrite involving less
	 * mmaps/munmaps. */
	assert(mmap_block);
	/* Note we may end up doing vcore 0's elsewhere, for _Ss, or else have a
	 * separate ev_q for that. */
	for (int i = 0; i < max_vcores(); i++) {
		/* four pages total for both ucqs from the big block (2 pages each) */
		ucq_init_raw(&vcpd_of(i)->ev_mbox_public.ev_msgs,
		             mmap_block + (4 * i    ) * PGSIZE,
		             mmap_block + (4 * i + 1) * PGSIZE);
		ucq_init_raw(&vcpd_of(i)->ev_mbox_private.ev_msgs,
		             mmap_block + (4 * i + 2) * PGSIZE,
		             mmap_block + (4 * i + 3) * PGSIZE);
	}
	atomic_init(&vc_req_being_handled, 0);
	assert(!in_vcore_context());
	/* no longer need to enable notifs on vcore 0, it is set like that by
	 * default (so you drop into vcore context immediately on transtioning to
	 * _M) */
	vc_initialized = TRUE;
	return;
vcore_init_fail:
	assert(0);
}
示例#5
0
文件: vcore.c 项目: brho/akaros
/* Helper: prepares a vcore for use.  Takes a block of pages for the UCQs.
 *
 * Vcores need certain things, such as a stack and TLS.  These are determined by
 * userspace.  Every vcore needs these set up before we drop into vcore context
 * on that vcore.  This means we need to prep before asking the kernel for those
 * vcores.
 *
 * We could have this function do its own mmap, at the expense of O(n) syscalls
 * when we prepare the extra vcores. */
static void __prep_vcore(int vcoreid, uintptr_t mmap_block)
{
	struct preempt_data *vcpd = vcpd_of(vcoreid);
	int ret;

	ret = allocate_vcore_stack(vcoreid);
		assert(!ret);
	ret = allocate_transition_tls(vcoreid);
		assert(!ret);

	vcpd->ev_mbox_public.type = EV_MBOX_UCQ;
	ucq_init_raw(&vcpd->ev_mbox_public.ucq,
	             mmap_block + 0 * PGSIZE,
	             mmap_block + 1 * PGSIZE);
	vcpd->ev_mbox_private.type = EV_MBOX_UCQ;
	ucq_init_raw(&vcpd->ev_mbox_private.ucq,
	             mmap_block + 2 * PGSIZE,
	             mmap_block + 3 * PGSIZE);

	/* Set the lowest level entry point for each vcore. */
	vcpd->vcore_entry = (uintptr_t)__kernel_vcore_entry;
}