Пример #1
0
void
start(void)
{
	int i;

	for (i = 0; i < RUNCOUNT; i++) {
		#ifndef __EXERCISE_8__
		/* EXERCISE 6: use a system call to print characters *****************/
		sys_print(PRINTCHAR);
		#endif

		#ifdef __EXERCISE_8__
		/* EXERCISE 8: use a lock to prevent race conditions *****************/
		// spinlock until we obtain write lock
		while (atomic_swap(&spinlock, 1) != 0)
			continue;
		// write
		*cursorpos++ = PRINTCHAR;

		// release write lock
		atomic_swap(&spinlock, 0);
		#endif

		sys_yield();
	}

	// Yield forever.
	while (1)
		sys_exit(0);
}
Пример #2
0
void
start(void)
{
	int i;

	proc_priority(PRIORITYCHECK);
	proc_share(PRIORITYCHECK);
	sys_yield();

	for (i = 0; i < RUNCOUNT; i++) {
#ifdef USE_SYSTEM_SYNC
		// use a safe system call to print character to avoid a race condition
		proc_print(PRINTCHAR);
#else
		// use atomic_swap to get a lock
		while (atomic_swap(&lock, 1) !=0 ) {
			continue;
		}
		// Write characters to the console, yielding after each one.
		*cursorpos++ = PRINTCHAR;

		// use atomic_swap to release lock
		atomic_swap(&lock, 0);
#endif
		sys_yield();
	}

	// Yield forever.
	// while (1)
	// 	sys_yield();
	sys_exit(0);
}
Пример #3
0
void
start(void)
{
	int i;

	for (i = 0; i < RUNCOUNT; i++) {
		// Write characters to the console, yielding after each one.
		
		#ifndef __EXERCISE_8__
			// (exercise 6 code)
			sys_print(PRINTCHAR);
		

		#else
			// (exercise 8 code)
			// Implemented spinlock to remove race condition of printing for each process.
			while (atomic_swap(&spin_lock, 1) != 0)
				continue;

			*cursorpos++ = PRINTCHAR;
			atomic_swap(&spin_lock, 0);
		#endif

		sys_yield();
	}
	sys_exit(0);
	// // Yield forever.
	// while (1)
	// 	sys_yield();
}
Пример #4
0
void
start(void)
{

	sys_priority(PRIORITY);
	sys_share(SHARE);
	sys_yield();

	int i;

	for (i = 0; i < RUNCOUNT; i++) {
		// Write characters to the console, yielding after each one.
		//*cursorpos++ = PRINTCHAR;
		//call system call instead
		#ifndef __EXERCISE_8__
		sys_print(PRINTCHAR);
		#endif

		#ifdef __EXERCISE_8__
		while(atomic_swap(&spin_lock, 1) != 0){ //run 4ever until locked
			continue;
		}
		*cursorpos++ = PRINTCHAR;
		atomic_swap(&spin_lock, 0); //free
		#endif

		sys_yield();
	}

	// Yield forever.
	while (1)
		//sys_yield();
		sys_exit(0);
}
Пример #5
0
void
start(void)
{
	int i;
	sys_share(SHARE);
	sys_priority(PRIORITY);
	#ifdef __EXERCISE_4A__ 
	sys_yield();//for 4a
	#endif
	for (i = 0; i < RUNCOUNT; i++) {
		// Write characters to the console, yielding after each one.
		//cursorpos++ = PRINTCHAR;
		//atomic_swap(uint32_t *addr, uint32_t val);
		#ifdef __EXERCISE_8__  // exercise 8 sync method
			sys_atomic_print(PRINTCHAR);
		#else // exercise 6 sync method
		while(atomic_swap(&lock,1)!= 0)
		{
			//return 0 means get the lock;
			continue;
		}
		*cursorpos++ = PRINTCHAR;
		atomic_swap(&lock,0);
		#endif
		sys_yield();
		
		//release the lock
	}

	// Yield forever.
	sys_exit(0);
}
Пример #6
0
void
start(void)
{
	int i;
	
	sys_set_priority(__PRIORITY__);
	sys_set_share(__SHARE__);
	sys_set_lottery_tickets(__LOTTERY_TICKETS__);

	for (i = 0; i < RUNCOUNT; i++) {
		// Write characters to the console, yielding after each one.

		#ifdef __PRINT_METHOD_LOCK__

		while(atomic_swap(&lock, 1) != 0) 
			continue;
		*cursorpos++ = PRINTCHAR;
		atomic_swap(&lock, 0);
		
		#else
		
		sys_atomic_print(PRINTCHAR);

		#endif

		sys_yield();
	}

	// Yield forever.
	//while (1)
	//	sys_yield();
	sys_exit(0);
}
Пример #7
0
void
start(void)
{
	int i;

	for (i = 0; i < RUNCOUNT; i++) {
		// Write characters to the console, yielding after each one.
		//*cursorpos++ = PRINTCHAR;

		// Atomic syscall version
		#ifdef __EXERCISE_6__
		sys_print(PRINTCHAR);
		#endif

		// Atomic lock version
		#ifdef __EXERCISE_8__
		while (atomic_swap(&lock, 1) != 0)
			continue;

		*cursorpos++ = PRINTCHAR;
		atomic_swap(&lock, 0);
		#endif

		sys_yield();
	}

	/*// Yield forever.
	while (1)
		sys_yield();*/

	sys_exit(0);
}
Пример #8
0
/* Returns -1 with errno set on error, or 0 on success.  This does not return
 * the number of cores actually granted (though some parts of the kernel do
 * internally).
 *
 * This tries to get "more vcores", based on the number we currently have.
 * We'll probably need smarter 2LSs in the future that just directly set
 * amt_wanted.  What happens is we can have a bunch of 2LS vcore contexts
 * trying to get "another vcore", which currently means more than num_vcores().
 * If you have someone ask for two more, and then someone else ask for one more,
 * how many you ultimately ask for depends on if the kernel heard you and
 * adjusted num_vcores in between the two calls.  Or maybe your amt_wanted
 * already was num_vcores + 5, so neither call is telling the kernel anything
 * new.  It comes down to "one more than I have" vs "one more than I've already
 * asked for".
 *
 * So for now, this will keep the older behavior (one more than I have).  It
 * will try to accumulate any concurrent requests, and adjust amt_wanted up.
 * Interleaving, repetitive calls (everyone asking for one more) may get
 * ignored.
 *
 * Note the doesn't block or anything (despite the min number requested is
 * 1), since the kernel won't block the call.
 *
 * There are a few concurrency concerns.  We have _max_vcores_ever_wanted,
 * initialization of new vcore stacks/TLSs, making sure we don't ask for too
 * many (minor point), and most importantly not asking the kernel for too much
 * or otherwise miscommunicating our desires to the kernel.  Remember, the
 * kernel wants just one answer from the process about what it wants, and it is
 * up to the process to figure that out.
 *
 * So we basically have one thread do the submitting/prepping/bookkeeping, and
 * other threads come in just update the number wanted and make sure someone
 * is sorting things out.  This will perform a bit better too, since only one
 * vcore makes syscalls (which hammer the proc_lock).  This essentially has
 * cores submit work, and one core does the work (like Eric's old delta
 * functions).
 *
 * There's a slight semantic change: this will return 0 (success) for the
 * non-submitters, and 0 if we submitted.  -1 only if the submitter had some
 * non-kernel failure.
 *
 * Also, beware that this (like the old version) doesn't protect with races on
 * num_vcores().  num_vcores() is how many you have now or very soon (accounting
 * for messages in flight that will take your cores), not how many you told the
 * kernel you want. */
int vcore_request(long nr_new_vcores)
{
	long nr_to_prep_now, nr_vcores_wanted;

	assert(vc_initialized);
	/* Early sanity checks */
	if ((nr_new_vcores < 0) || (nr_new_vcores + num_vcores() > max_vcores()))
		return -1;	/* consider ERRNO */
	/* Post our desires (ROS atomic_add() conflicts with glibc) */
	atomic_fetch_and_add(&nr_new_vcores_wanted, nr_new_vcores);
try_handle_it:
	cmb();	/* inc before swap.  the atomic is a CPU mb() */
	if (atomic_swap(&vc_req_being_handled, 1)) {
		/* We got a 1 back, so someone else is already working on it */
		return 0;
	}
	/* So now we're the ones supposed to handle things.  This does things in the
	 * "increment based on the number we have", vs "increment on the number we
	 * said we want".
	 *
	 * Figure out how many we have, though this is racy.  Yields/preempts/grants
	 * will change this over time, and we may end up asking for less than we
	 * had. */
	nr_vcores_wanted = num_vcores();
	/* Pull all of the vcores wanted into our local variable, where we'll deal
	 * with prepping/requesting that many vcores.  Keep doing this til we think
	 * no more are wanted. */
	while ((nr_to_prep_now = atomic_swap(&nr_new_vcores_wanted, 0))) {
		nr_vcores_wanted += nr_to_prep_now;
		/* Don't bother prepping or asking for more than we can ever get */
		nr_vcores_wanted = MIN(nr_vcores_wanted, max_vcores());
		/* Make sure all we might ask for are prepped */
		for (long i = _max_vcores_ever_wanted; i < nr_vcores_wanted; i++) {
			if (allocate_transition_stack(i) || allocate_transition_tls(i)) {
				atomic_set(&vc_req_being_handled, 0);	/* unlock and bail out*/
				return -1;
			}
			_max_vcores_ever_wanted++;	/* done in the loop to handle failures*/
		}
	}
	cmb();	/* force a reread of num_vcores() */
	/* Update amt_wanted if we now want *more* than what the kernel already
	 * knows.  See notes in the func doc. */
	if (nr_vcores_wanted > __procdata.res_req[RES_CORES].amt_wanted)
		__procdata.res_req[RES_CORES].amt_wanted = nr_vcores_wanted;
	/* If num_vcores isn't what we want, we can poke the ksched.  Due to some
	 * races with yield, our desires may be old.  Not a big deal; any vcores
	 * that pop up will just end up yielding (or get preempt messages.)  */
	if (nr_vcores_wanted > num_vcores())
		sys_poke_ksched(0, RES_CORES);	/* 0 -> poke for ourselves */
	/* Unlock, (which lets someone else work), and check to see if more work
	 * needs to be done.  If so, we'll make sure it gets handled. */
	atomic_set(&vc_req_being_handled, 0);	/* unlock, to allow others to try */
	wrmb();
	/* check for any that might have come in while we were out */
	if (atomic_read(&nr_new_vcores_wanted))
		goto try_handle_it;
	return 0;
}
Пример #9
0
static client_stats_t *
shared_memory_init(void)
{
    bool is_NT = is_windows_NT();
    int num;
    int pos;
    /* We do not want to rely on the registry.
     * Instead, a piece of shared memory with the key base name holds the
     * total number of stats instances.
     */
    shared_map_count =
        CreateFileMappingW(INVALID_HANDLE_VALUE, NULL,
                           PAGE_READWRITE, 0, sizeof(client_stats_t),
                           is_NT ? CLIENT_SHMEM_KEY_NT_L : CLIENT_SHMEM_KEY_L);
    DR_ASSERT(shared_map_count != NULL);
    shared_view_count =
        MapViewOfFile(shared_map_count, FILE_MAP_READ|FILE_MAP_WRITE, 0, 0, 0);
    DR_ASSERT(shared_view_count != NULL);
    shared_count = (int *) shared_view_count;
    /* ASSUMPTION: memory is initialized to 0!
     * otherwise our protocol won't work
     * it's hard to build a protocol to initialize it to 0 -- if you want
     * to add one, feel free, but make sure it's correct
     */
    do {
        pos = (int) atomic_swap(shared_count, (uint) -1);
        /* if get -1 back, someone else is looking at it */
    } while (pos == -1);
    /* now increment it */
    atomic_swap(shared_count, pos+1);

    num = 0;
    while (1) {
        _snwprintf(shared_keyname, KEYNAME_MAXLEN, L"%s.%03d",
                   is_NT ? CLIENT_SHMEM_KEY_NT_L : CLIENT_SHMEM_KEY_L, num);
        shared_map = CreateFileMappingW(INVALID_HANDLE_VALUE, NULL,
                                        PAGE_READWRITE, 0,
                                        sizeof(client_stats_t),
                                        shared_keyname);
        if (shared_map != NULL && GetLastError() == ERROR_ALREADY_EXISTS) {
            dr_close_file(shared_map);
            shared_map = NULL;
        }
        if (shared_map != NULL)
            break;
        num++;
    }
    dr_log(NULL, LOG_ALL, 1, "Shared memory key is: \"%S\"\n", shared_keyname);
#ifdef SHOW_RESULTS
    dr_fprintf(STDERR, "Shared memory key is: \"%S\"\n", shared_keyname);
#endif
    shared_view = MapViewOfFile(shared_map, FILE_MAP_READ|FILE_MAP_WRITE, 0, 0, 0);
    DR_ASSERT(shared_view != NULL);
    return (client_stats_t *) shared_view;
}
Пример #10
0
static struct chan *consopen(struct chan *c, int omode)
{
    c->aux = NULL;
    c = devopen(c, omode, consdir, ARRAY_SIZE(consdir), devgen);
    switch ((uint32_t) c->qid.path) {
    case Qconsctl:
        kref_get(&kbd.ctl, 1);
        break;

    case Qkprint:
        if (atomic_swap(&kprintinuse, 1) != 0) {
            c->flag &= ~COPEN;
            error(EADDRINUSE, "kprintinuse lock failed");
        }
        if (kprintoq == NULL) {
            kprintoq = qopen(8 * 1024, Qcoalesce, 0, 0);
            if (kprintoq == NULL) {
                c->flag &= ~COPEN;
                error(ENOMEM, "Can't allocate kprintoq");
            }
            qdropoverflow(kprintoq, 1);
        } else
            qreopen(kprintoq);
        c->iounit = qiomaxatomic;
        break;
    }
    return c;
}
Пример #11
0
static void _append_to(tslist_t *list, tslist_elem_t *head,
                       tslist_elem_t *tail)
{
    tslist_elem_t *prev = (tslist_elem_t *)atomic_swap((uint64_t*)&list->tail, (uint64_t)tail);
    atomic_wmb();
    prev->next = head;
}
Пример #12
0
static void
shared_memory_exit(void)
{
    int pos;
    stats->exited = true;
    /* close down statistics */
    UnmapViewOfFile(shared_view);
    dr_close_file(shared_map);
    /* decrement count, then unmap */
    do {
        pos = atomic_swap(shared_count, (uint) -1);
        /* if get -1 back, someone else is looking at it */
    } while (pos == -1);
    /* now increment it */
    atomic_swap(shared_count, pos-1);
    UnmapViewOfFile(shared_view_count);
    CloseHandle(shared_map_count);
}
Пример #13
0
uintptr_t mcall_clear_ipi(void)
{
	// only clear SSIP if no other events are pending
	if (HLS()->device_response_queue_head == NULL) {
		clear_csr(mip, MIP_SSIP);
		mb();
	}

	return atomic_swap(&HLS()->ipi_pending, 0);
}
Пример #14
0
void
start(void)
{
        int i;

	sys_priority(PRIORITY);
	sys_proportion(PROPORTION);

	for (i = 0; i < RUNCOUNT; i++) {
	  // Write characters to the console, yielding after each one.
#if CURRENT_PART == 1
	  *cursorpos++ = PRINTCHAR;
#endif
	  
#if CURRENT_PART == 2
	   //first solution for synchronization
#ifndef EXTRA
		while(atomic_swap(&lock,1))
			continue;
		*cursorpos++ = PRINTCHAR;
		atomic_swap(&lock,0);
#endif


		//second solution for synchronization
#ifdef EXTRA
		sys_print((uint16_t)PRINTCHAR);
#endif

#endif
	  
	  
	  
	  sys_yield();
	}

	sys_exit(0);
	/*
	// Yield forever.
	while (1)
		sys_yield();
	*/
}
Пример #15
0
uintptr_t mcall_send_ipi(uintptr_t recipient)
{
	//if (recipient >= num_harts)
	//return -1;

	if (atomic_swap(&OTHER_HLS(recipient)->ipi_pending, 1) == 0) {
		mb();
		write_csr(send_ipi, recipient);
	}

	return 0;
}
Пример #16
0
static inline void free_kmalloc_pages( struct page_descriptor *page, unsigned long order, int dma )
{
	if ( !dma && order < MAX_CACHE_ORDER )
	{
		page = ( struct page_descriptor * )atomic_swap( ( int * )( kmalloc_cache + order ), ( int )page );
		if ( !page )
		{
			return;
		}
	}
	atomic_sub( &g_sSysBase.ex_nKernelMemPages, ( 1 << order ) );

	free_pages( ( unsigned long )page, ( 1 << order ) );
}
Пример #17
0
void
start(void)
{
	sys_priority(PRIORITY);
	sys_share(SHARE);

	int i;

	for (i = 0; i < RUNCOUNT; i++) {

		#ifndef __EXERCISE_8__

		// Get lock
		while (atomic_swap(&lock, 1) != 0)
			continue;

		// Print char
		*cursorpos++ = PRINTCHAR;

		// Release lock
		atomic_swap(&lock, 0);

		// Yield after printing
		sys_yield();

		#else

		// Make system call to prin character
		sys_printchar(PRINTCHAR);

		#endif
	}

	// Yield forever.
	while (1)
		sys_exit(0);
}
Пример #18
0
/* Helper, extracts a message from a ceq[idx], returning TRUE if there was a
 * message.  Note that there might have been nothing in the message (coal == 0).
 * still, that counts; it's more about idx_posted.  A concurrent reader could
 * have swapped out the coal contents (imagine two consumers, each gets past the
 * idx_posted check).  If having an "empty" coal is a problem, then higher level
 * software can ask for another event.
 *
 * Implied in all of that is that idx_posted is also racy.  The consumer blindly
 * sets it to false.  So long as it extracts coal after doing so, we're fine. */
static bool extract_ceq_msg(struct ceq *ceq, int32_t idx, struct event_msg *msg)
{
	struct ceq_event *ceq_ev = &ceq->events[idx];
	if (!ceq_ev->idx_posted)
		return FALSE;
	/* Once we clear this flag, any new coalesces will trigger another ring
	 * event, so we don't need to worry about missing anything.  It is possible
	 * that this CEQ event will get those new coalesces as part of this message,
	 * and future messages will have nothing.  That's fine. */
	ceq_ev->idx_posted = FALSE;
	cmb();	/* order the read after the flag write.  swap provides cpu_mb */
	/* We extract the existing coals and reset the collection to 0; now the
	 * collected events are in our msg. */
	msg->ev_arg2 = atomic_swap(&ceq_ev->coalesce, 0);
	/* if the user wants access to user_data, they can peak in the event array
	 * via ceq->events[msg->ev_type].user_data. */
	msg->ev_type = idx;
	msg->ev_arg3 = (void*)ceq_ev->blob_data;
	ceq_ev->blob_data = 0;	/* racy, but there are no blob guarantees */
	return TRUE;
}
Пример #19
0
int pthread_cond_wait(pthread_cond_t *c, pthread_mutex_t *m)
{
  int old_waiter = c->next_waiter;
  int my_waiter = c->next_waiter;
  
  //allocate a slot
  while (atomic_swap (& (c->in_use[my_waiter]), SLOT_IN_USE) == SLOT_IN_USE)
  {
    my_waiter = (my_waiter + 1) % MAX_PTHREADS;
    assert (old_waiter != my_waiter);  // do not want to wrap around
  }
  c->waiters[my_waiter] = WAITER_WAITING;
  c->next_waiter = (my_waiter+1) % MAX_PTHREADS;  // race on next_waiter but ok, because it is advisary

  pthread_mutex_unlock(m);

  volatile int* poll = &c->waiters[my_waiter];
  while(*poll);
  c->in_use[my_waiter] = SLOT_FREE;
  pthread_mutex_lock(m);

  return 0;
}
Пример #20
0
Файл: poke.c Проект: brho/akaros
/* This is the 'post (work) and poke' style of sync.  We make sure the poke
 * tracker's function runs.  Once this returns, the func either has run or is
 * currently running (in case someone else is running now).  We won't wait or
 * spin or anything, and it is safe to call this recursively (deeper in the
 * call-graph).
 *
 * It's up to the caller to somehow post its work.  We'll also pass arg to the
 * func, ONLY IF the caller is the one to execute it - so there's no guarantee
 * the func(specific_arg) combo will actually run.  It's more for info
 * purposes/optimizations/etc.  If no one uses it, I'll get rid of it. */
void poke(struct poke_tracker *tracker, void *arg)
{
	atomic_set(&tracker->need_to_run, TRUE);
	/* will need to repeatedly do it if someone keeps posting work */
	do {
		/* want an wrmb() btw posting work/need_to_run and in_progress.
		 * the swap provides the HW mb. just need a cmb, which we do in
		 * the loop to cover the iterations (even though i can't imagine
		 * the compiler reordering the check it needed to do for the
		 * branch).. */
		cmb();
		/* poke / make sure someone does it.  if we get a TRUE (1) back,
		 * someone is already running and will deal with the posted
		 * work.  (probably on their next loop).  if we got a 0 back, we
		 * won the race and have the 'lock'. */
		if (atomic_swap(&tracker->run_in_progress, TRUE))
			return;
		/* if we're here, then we're the one who needs to run the func.
		 * */
		/* clear the 'need to run', since we're running it now.  new
		 * users will set it again.  this write needs to be wmb()'d
		 * after in_progress.  the swap provided the HW mb(). */
		cmb();
		/* no internal HW mb */
		atomic_set(&tracker->need_to_run, FALSE);
		/* run the actual function.  the poke sync makes sure only one
		 * caller is in that func at a time. */
		assert(tracker->func);
		tracker->func(arg);
		/* ensure the in_prog write comes after the run_again. */
		wmb();
		/* no internal HW mb */
		atomic_set(&tracker->run_in_progress, FALSE);
		/* in_prog write must come before run_again read */
		wrmb();
	} while (atomic_read(&tracker->need_to_run));
}
Пример #21
0
Файл: ucq.c Проект: brho/akaros
/* Consumer side, returns TRUE on success and fills *msg with the ev_msg.  If
 * the ucq appears empty, it will return FALSE.  Messages may have arrived after
 * we started getting that we do not receive. */
bool get_ucq_msg(struct ucq *ucq, struct event_msg *msg)
{
	uintptr_t my_idx;
	struct ucq_page *old_page, *other_page;
	struct msg_container *my_msg;
	struct spin_pdr_lock *ucq_lock = (struct spin_pdr_lock*)(&ucq->u_lock);

	do {
loop_top:
		cmb();
		my_idx = atomic_read(&ucq->cons_idx);
		/* The ucq is empty if the consumer and producer are on the same
		 * 'next' slot. */
		if (my_idx == atomic_read(&ucq->prod_idx))
			return FALSE;
		/* Is the slot we want good?  If not, we're going to need to try
		 * and move on to the next page.  If it is, we bypass all of
		 * this and try to CAS on us getting my_idx. */
		if (slot_is_good(my_idx))
			goto claim_slot;
		/* Slot is bad, let's try and fix it */
		spin_pdr_lock(ucq_lock);
		/* Reread the idx, in case someone else fixed things up while we
		 * were waiting/fighting for the lock */
		my_idx = atomic_read(&ucq->cons_idx);
		if (slot_is_good(my_idx)) {
			/* Someone else fixed it already, let's just try to get
			 * out */
			spin_pdr_unlock(ucq_lock);
			/* Make sure this new slot has a producer (ucq isn't
			 * empty) */
			if (my_idx == atomic_read(&ucq->prod_idx))
				return FALSE;
			goto claim_slot;
		}
		/* At this point, the slot is bad, and all other possible
		 * consumers are spinning on the lock.  Time to fix things up:
		 * Set the counter to the next page, and free the old one. */
		/* First, we need to wait and make sure the kernel has posted
		 * the next page.  Worst case, we know that the kernel is
		 * working on it, since prod_idx != cons_idx */
		old_page = (struct ucq_page*)PTE_ADDR(my_idx);
		while (!old_page->header.cons_next_pg)
			cpu_relax();
		/* Now set the counter to the next page */
		assert(!PGOFF(old_page->header.cons_next_pg));
		atomic_set(&ucq->cons_idx, old_page->header.cons_next_pg);
		/* Side note: at this point, any *new* consumers coming in will
		 * grab slots based off the new counter index (cons_idx) */
		/* Now free up the old page.  Need to make sure all other
		 * consumers are done.  We spin til enough are done, like an
		 * inverted refcnt. */
		while (atomic_read(&old_page->header.nr_cons) < NR_MSG_PER_PAGE)
		{
			/* spinning on userspace here, specifically, another
			 * vcore and we don't know who it is.  This will spin a
			 * bit, then make sure they aren't preeempted */
			cpu_relax_any();
		}
		/* Now the page is done.  0 its metadata and give it up. */
		old_page->header.cons_next_pg = 0;
		atomic_set(&old_page->header.nr_cons, 0);
		/* We want to "free" the page.  We'll try and set it as the
		 * spare.  If there is already a spare, we'll free that one. */
		other_page = (struct ucq_page*)atomic_swap(&ucq->spare_pg,
		                                           (long)old_page);
		assert(!PGOFF(other_page));
		if (other_page) {
			munmap(other_page, PGSIZE);
			atomic_dec(&ucq->nr_extra_pgs);
		}
		/* All fixed up, unlock.  Other consumers may lock and check to
		 * make sure things are done. */
		spin_pdr_unlock(ucq_lock);
		/* Now that everything is fixed, try again from the top */
		goto loop_top;
claim_slot:
		cmb();	/* so we can goto claim_slot */
		/* If we're still here, my_idx is good, and we'll try to claim
		 * it.  If we fail, we need to repeat the whole process. */
	} while (!atomic_cas(&ucq->cons_idx, my_idx, my_idx + 1));
	assert(slot_is_good(my_idx));
	/* Now we have a good slot that we can consume */
	my_msg = slot2msg(my_idx);
	/* linux would put an rmb_depends() here */
	/* Wait til the msg is ready (kernel sets this flag) */
	while (!my_msg->ready)
		cpu_relax();
	rmb();	/* order the ready read before the contents */
	/* Copy out */
	*msg = my_msg->ev_msg;
	/* Unset this for the next usage of the container */
	my_msg->ready = FALSE;
	wmb();	/* post the ready write before incrementing */
	/* Increment nr_cons, showing we're done */
	atomic_inc(&((struct ucq_page*)PTE_ADDR(my_idx))->header.nr_cons);
	return TRUE;
}
Пример #22
0
/*
 * Ugh, this is ugly, but we want the default case to run
 * straight through, which is why we have the ugly goto's
 */
void *kmalloc( size_t size, int priority )
{
	unsigned long flags;
	unsigned long type;
	int order, dma;
	struct block_header *p;
	struct page_descriptor *page, **pg;
	struct size_descriptor *bucket = sizes;

	if ( CURRENT_THREAD != NULL && CURRENT_THREAD->tr_nNumLockedCacheBlocks > 0 && ( priority & MEMF_NOBLOCK ) == 0 )
	{
		//printk( "Error: kmalloc() attempt to alloc memory while holding %d cache blocks locked. Could may lead to deadlock\n", CURRENT_THREAD->tr_nNumLockedCacheBlocks );
		//trace_stack( 0, NULL );
	}
	/* Get order */
	order = 0;
	{
		unsigned int realsize = size + sizeof( struct block_header );

		// kmalloc() is inefficient for allocations >= 128K
		//if ( realsize > BLOCKSIZE( 12 ) )
		//{
		//	printk( "Warning: kmalloc() of oversized block (%d bytes). Could cause fragmentation problems\n", size );
		//	trace_stack( 0, NULL );
		//}

		for ( ;; )
		{
			int ordersize = BLOCKSIZE( order );

			if ( realsize <= ordersize )
				break;
			order++;
			bucket++;
			if ( ordersize )
				continue;
			printk( "kmalloc of too large a block (%d bytes).\n", ( int )size );
			return NULL;
		}
	}

	dma = 0;
	type = MF_USED;
	pg = &bucket->firstfree;

#ifndef __ATHEOS__
	if ( priority & GFP_DMA )
	{
		dma = 1;
		type = MF_DMA;
		pg = &bucket->dmafree;
	}
#endif

/* Sanity check... */

	flags = spinlock_disable( &g_sMemSpinLock );
	page = *pg;
	if ( !page )
		goto no_bucket_page;

	p = page->firstfree;

	if ( p->bh_flags != MF_FREE )
		goto not_free_on_freelist;

      found_it:
	page->firstfree = p->bh_next;

	page->nfree--;
	if ( !page->nfree )
		*pg = page->next;

	spinunlock_enable( &g_sMemSpinLock, flags );
	bucket->nmallocs++;
	bucket->nbytesmalloced += size;
	p->bh_flags = type;	/* As of now this block is officially in use */
	p->bh_length = size;

	memset( p +1, 0, size );

	atomic_add( &g_sSysBase.ex_nKernelMemSize, size );
	return ( p +1 );	/* Pointer arithmetic: increments past header */


      no_bucket_page:
	/*
	 * If we didn't find a page already allocated for this
	 * bucket size, we need to get one..
	 *
	 * This can be done with ints on: it is private to this invocation
	 */
	spinunlock_enable( &g_sMemSpinLock, flags );

	{
		int i, sz;

		/* sz is the size of the blocks we're dealing with */
		sz = BLOCKSIZE( order );

		page = get_kmalloc_pages( priority, bucket->gfporder, dma );
		if ( !page )
			goto no_free_page;
	      found_cached_page:
		bucket->npages++;

		page->order = order;
		/* Loop for all but last block: */
		i = ( page->nfree = bucket->nblocks ) - 1;
		p = BH( page + 1 );

		while ( i > 0 )
		{
			i--;
			p->bh_flags = MF_FREE;
			p->bh_next = BH( ( ( long )p )+sz );
			p = p->bh_next;
		}
		/* Last block: */
		p->bh_flags = MF_FREE;
		p->bh_next = NULL;

		p = BH( page + 1 );
	}

	/*
	 * Now we're going to muck with the "global" freelist
	 * for this size: this should be uninterruptible
	 */
	flags = spinlock_disable( &g_sMemSpinLock );
	page->next = *pg;
	*pg = page;
	goto found_it;


      no_free_page:
	/*
	 * No free pages, check the kmalloc cache of
	 * pages to see if maybe we have something available
	 */
	if ( !dma && order < MAX_CACHE_ORDER )
	{
		page = ( struct page_descriptor * )atomic_swap( ( int * )( kmalloc_cache + order ), ( int )page );
		if ( page )
		{
			goto found_cached_page;
		}
	}
	return NULL;


      not_free_on_freelist:
	spinunlock_enable( &g_sMemSpinLock, flags );
	printk( "Problem: block on freelist at %08lx isn't free.\n", ( long )p );
	printk( "%p\n%p\n%p\n", __builtin_return_address( 0 ), __builtin_return_address( 1 ), __builtin_return_address( 2 ) );
	return NULL;
}
Пример #23
0
int pthread_mutex_trylock(pthread_mutex_t* m)
{
  return atomic_swap(&m->lock,1) == 0 ? 0 : EBUSY;
}
Пример #24
0
int pthread_once(pthread_once_t* once_control, void (*init_routine)(void))
{
  if(atomic_swap(once_control,1) == 0)
    init_routine();
  return 0;
}