/*
 * semaphore_P(semaphore_t sem)
 *	P on the sempahore.
 */
void semaphore_P(semaphore_t sem) {
	//Loop until we succeed
	semaphore_spinlock(&(sem->lock));
	while (sem->count == 0) {
		queue_append(sem->thread_queue, minithread_self());
		atomic_clear(&(sem->lock));
		minithread_stop();
		semaphore_spinlock(&(sem->lock));
	}

	//Got the semaphore. Decrement and break
	sem->count--;
	atomic_clear(&(sem->lock));
}
static int host_get_next_event(uint8_t *out)
{
	uint32_t event_out = events;
	memcpy(out, &event_out, sizeof(event_out));
	atomic_clear(&events, event_out);
	return sizeof(event_out);
}
/*
 *	Atomically release the specified test-and-set lock and
 *	block the calling thread. This is a convenience function that
 *  does not add much (if any) power, but makes for more readable
 *  code and simplifies the possible system states, making it
 *  easier to reason about application correctness.
 */
int minithread_unlock_and_stop(tas_lock_t *lock) {
	interrupt_level_t old_int = set_interrupt_level(DISABLED);
	atomic_clear(lock);
	minithread_stop();
	set_interrupt_level(old_int);
	return 0;
}
/**
 * Clear one or more host event bits from copy B.
 *
 * @param mask          Event bits to clear (use EC_HOST_EVENT_MASK()).
 *                      Write 1 to a bit to clear it.
 */
static void host_clear_events_b(uint32_t mask)
{
	/* Only print if something's about to change */
	if (events_copy_b & mask)
		CPRINTS("event clear B 0x%08x", mask);

	atomic_clear(&events_copy_b, mask);
}
Example #5
0
void tcpc_alert_clear(int port)
{
	/*
	 * The TCPM has acknowledged all Alert bits and the
	 * Alert# line needs to be set inactive. Clear
	 * the corresponding port's bit in the static variable.
	 */
	atomic_clear(&ec_int_status, port ?
		  PD_STATUS_TCPC_ALERT_1 : PD_STATUS_TCPC_ALERT_0);
	pd_send_ec_int();
}
/*
 * semaphore_V(semaphore_t sem)
 *	V on the sempahore.
 * If a thread is waiting to P the semaphore, 
 * wake it up.
 */
void semaphore_V(semaphore_t sem) {
	minithread_t waiting_thread;

	semaphore_spinlock(&(sem->lock));
	sem->count++;
	if ( queue_length(sem->thread_queue) ) {
		queue_dequeue(sem->thread_queue, &waiting_thread);
		minithread_start(waiting_thread);
	}
	atomic_clear(&(sem->lock));
}
int dptf_query_next_sensor_event(void)
{
	int id;

	for (id = 0; id < TEMP_SENSOR_COUNT; id++)
		if (dptf_seen & (1 << id)) {	/* atomic? */
			atomic_clear(&dptf_seen, (1 << id));
			return id;
		}

	return -1;
}
Example #8
0
/*
 * Implementation of two phase barrier
 */
void sesc_barrier_init(sbarrier_t * barr)
{
#ifdef SESCAPI_NATIVE_IRIX
  barr->count = atomic_alloc_variable(atomic_reservoir, NULL);
  atomic_clear(barr->count);
#else /* !SESCAPI_NATIVE_IRIX */
  barr->count = 0;
#ifdef NOSPIN_DOSUSPEND
  barr->waitingPos = 0;
#endif /* NOSPIN_DOSUSPEND */
#endif /* SESCAPI_NATIVE_IRIX */
  barr->gsense = 0;
}
Example #9
0
/*
 * minithread_unlock_and_stop(tas_lock_t* lock)
 *	Atomically release the specified test-and-set lock and
 *	block the calling thread.
 */
void minithread_unlock_and_stop(tas_lock_t* lock)
{
	// Make it atomic
	set_interrupt_level(DISABLED);

	// Atomically clear the lock
	atomic_clear(lock);

	// Stop the thread
	minithread_stop();

	// Don't need to re-enable interrupts because mt_stop() performs a context 
	// switch and mt_switch() reenables interrupts
}
void dptf_set_temp_threshold(int sensor_id, int temp, int idx, int enable)
{
	CPRINTS("DPTF sensor %d, threshold %d C, index %d, %sabled",
		sensor_id, K_TO_C(temp), idx, enable ? "en" : "dis");

	if (enable) {
		/* Don't update threshold condition if already enabled */
		if (dptf_threshold[sensor_id][idx].temp == -1)
			cond_init(&dptf_threshold[sensor_id][idx].over, 0);
		dptf_threshold[sensor_id][idx].temp = temp;
		atomic_clear(&dptf_seen, (1 << sensor_id));
	} else {
		dptf_threshold[sensor_id][idx].temp = -1;
	}
}
Example #11
0
uint8_t tester_init_gap(void)
{
	if (bt_enable(NULL) < 0) {
		return BTP_STATUS_FAILED;
	}

	atomic_clear(&current_settings);
	atomic_set_bit(&current_settings, GAP_SETTINGS_POWERED);
	atomic_set_bit(&current_settings, GAP_SETTINGS_CONNECTABLE);
	atomic_set_bit(&current_settings, GAP_SETTINGS_BONDABLE);
	atomic_set_bit(&current_settings, GAP_SETTINGS_LE);

	bt_conn_cb_register(&conn_callbacks);

	return BTP_STATUS_SUCCESS;
}
Example #12
0
void host_clear_events(uint32_t mask)
{
	/* Only print if something's about to change */
	if (events & mask)
		CPRINTS("event clear 0x%08x", mask);

	atomic_clear(&events, mask);

#ifdef CONFIG_LPC
	lpc_set_host_event_state(events);
#else
	*(uint32_t *)host_get_memmap(EC_MEMMAP_HOST_EVENTS) = events;
#ifdef CONFIG_MKBP_EVENT
	mkbp_send_event(EC_MKBP_EVENT_HOST_EVENT);
#endif
#endif  /* !CONFIG_LPC */
}
Example #13
0
File: gap.c Project: hudkmr/zephyr
static void tester_init_gap_cb(int err)
{
	if (err) {
		tester_rsp(BTP_SERVICE_ID_CORE, CORE_REGISTER_SERVICE,
			   BTP_INDEX_NONE, BTP_STATUS_FAILED);
		return;
	}

	atomic_clear(&current_settings);
	atomic_set_bit(&current_settings, GAP_SETTINGS_POWERED);
	atomic_set_bit(&current_settings, GAP_SETTINGS_CONNECTABLE);
	atomic_set_bit(&current_settings, GAP_SETTINGS_BONDABLE);
	atomic_set_bit(&current_settings, GAP_SETTINGS_LE);

	bt_conn_cb_register(&conn_callbacks);

	tester_rsp(BTP_SERVICE_ID_CORE, CORE_REGISTER_SERVICE, BTP_INDEX_NONE,
		   BTP_STATUS_SUCCESS);
}
Example #14
0
static int ec_status_host_cmd(struct host_cmd_handler_args *args)
{
	struct ec_response_pd_status *r = args->response;

	/*
	 * ec_int_status is used to store state for HOST_EVENT,
	 * TCPC 0 Alert, and TCPC 1 Alert bits.
	 */
	r->status = ec_int_status;
	args->response_size = sizeof(*r);

	/*
	 * If the source of the EC int line was HOST_EVENT, it has
	 * been acknowledged so can always clear HOST_EVENT bit
	 * from the ec_int_status variable
	 */
	atomic_clear(&ec_int_status, PD_STATUS_HOST_EVENT);

	return EC_RES_SUCCESS;
}
Example #15
0
static int entropy_nrf5_init(struct device *device)
{
	/* Enable the RNG interrupt to be generated on the VALRDY event,
	 * but do not enable this interrupt in NVIC to be serviced.
	 * When the interrupt enters the Pending state it will set internal
	 * event (SEVONPEND is activated by kernel) and wake up the core
	 * if it was suspended by WFE. And that's enough. */
	nrf_rng_int_enable(NRF_RNG_INT_VALRDY_MASK);
	NVIC_ClearPendingIRQ(RNG_IRQn);

	/* Enable or disable bias correction */
	if (IS_ENABLED(CONFIG_ENTROPY_NRF5_BIAS_CORRECTION)) {
		nrf_rng_error_correction_enable();
	} else {
		nrf_rng_error_correction_disable();
	}

	/* Initialize the user count with zero */
	atomic_clear(&DEV_DATA(device)->user_count);

	return 0;
}
static void clear_event(uint8_t event_type)
{
	atomic_clear(&events, 1 << event_type);
}
Example #17
0
void sesc_barrier(sbarrier_t *barr, int num_proc)
{
#ifdef NOSPIN_DOSUSPEND
  int pos;
#endif
  int lsense;
  int i=1000;

  lsense = !barr->gsense;
#ifdef SESCAPI_NATIVE_IRIX
  if((atomic_fetch_and_increment(barr->count)) == num_proc-1) {
    atomic_clear(barr->count);
#else
  if(sesc_fetch_op(FetchIncOp, &barr->count, 0) == num_proc-1) {    /*count = 0;*/
    barr->count = 0;
#endif
    barr->gsense = lsense;
  } else {

#ifdef NOSPIN_DOSUSPEND
    /* IF Joe: I left this in for you!   */
    /* ELSE: DO NOT ACTIVATE THIS CODE   */
    /* Leaving this code in for updating */
    pos = sesc_fetch_op(FetchIncOp,&barr->waitingPos,0);
    if( pos < MAXLOCKWAITING ) {
      barr->waiting[pos]=sesc_self();
      sesc_suspend(sesc_self());
    }
#endif
    while(lsense != barr->gsense){
      i--;
      if( i < 0 ) {
	sesc_yield(-1);
	i=1000;
      }
    };
    
  }
  sesc_memfence((int)barr);
}

/*
 * Implementation of semaphore
 */
void sesc_sema_init(ssema_t *sema, int initValue)
{
  sema->count = initValue;
}

void sesc_psema(ssema_t *sema)
{
  int i=0;
  /* DOWN, wait() */
  while(sema->count <= 0) {
    i--;
    if( i < 0 ) {
      sesc_yield(-1);
      i=1000;
    }
  };

  sesc_fetch_op(FetchDecOp, (int *)&(sema->count), 0);

  sesc_memfence((int)sema);
}
Example #18
0
void atomic_test(void)
{
	int i;

	atomic_t target, orig;
	atomic_val_t value;
	atomic_val_t oldvalue;

	target = 4;
	value = 5;
	oldvalue = 6;

	/* atomic_cas() */
	zassert_true((atomic_cas(&target, oldvalue, value) == 0), "atomic_cas");
	target = 6;
	zassert_true((atomic_cas(&target, oldvalue, value) == 1), "atomic_cas");
	zassert_true((target == value), "atomic_cas");

	/* atomic_add() */
	target = 1;
	value = 2;
	zassert_true((atomic_add(&target, value) == 1), "atomic_add");
	zassert_true((target == 3), "atomic_add");

	/* atomic_sub() */
	target = 10;
	value = 2;
	zassert_true((atomic_sub(&target, value) == 10), "atomic_sub");
	zassert_true((target == 8), "atomic_sub");

	/* atomic_inc() */
	target = 5;
	zassert_true((atomic_inc(&target) == 5), "atomic_inc");
	zassert_true((target == 6), "atomic_inc");

	/* atomic_dec() */
	target = 2;
	zassert_true((atomic_dec(&target) == 2), "atomic_dec");
	zassert_true((target == 1), "atomic_dec");

	/* atomic_get() */
	target = 50;
	zassert_true((atomic_get(&target) == 50), "atomic_get");

	/* atomic_set() */
	target = 42;
	value = 77;
	zassert_true((atomic_set(&target, value) == 42), "atomic_set");
	zassert_true((target == value), "atomic_set");

	/* atomic_clear() */
	target = 100;
	zassert_true((atomic_clear(&target) == 100), "atomic_clear");
	zassert_true((target == 0), "atomic_clear");

	/* atomic_or() */
	target = 0xFF00;
	value  = 0x0F0F;
	zassert_true((atomic_or(&target, value) == 0xFF00), "atomic_or");
	zassert_true((target == 0xFF0F), "atomic_or");

	/* atomic_xor() */
	target = 0xFF00;
	value  = 0x0F0F;
	zassert_true((atomic_xor(&target, value) == 0xFF00), "atomic_xor");
	zassert_true((target == 0xF00F), "atomic_xor");

	/* atomic_and() */
	target = 0xFF00;
	value  = 0x0F0F;
	zassert_true((atomic_and(&target, value) == 0xFF00), "atomic_and");
	zassert_true((target == 0x0F00), "atomic_and");


	/* atomic_nand() */
	target = 0xFF00;
	value  = 0x0F0F;
	zassert_true((atomic_nand(&target, value) == 0xFF00), "atomic_nand");
	zassert_true((target == 0xFFFFF0FF), "atomic_nand");

	/* atomic_test_bit() */
	for (i = 0; i < 32; i++) {
		target = 0x0F0F0F0F;
		zassert_true(!!(atomic_test_bit(&target, i) == !!(target & (1 << i))),
			    "atomic_test_bit");
	}

	/* atomic_test_and_clear_bit() */
	for (i = 0; i < 32; i++) {
		orig = 0x0F0F0F0F;
		target = orig;
		zassert_true(!!(atomic_test_and_clear_bit(&target, i)) == !!(orig & (1 << i)),
			    "atomic_test_and_clear_bit");
		zassert_true(target == (orig & ~(1 << i)), "atomic_test_and_clear_bit");
	}

	/* atomic_test_and_set_bit() */
	for (i = 0; i < 32; i++) {
		orig = 0x0F0F0F0F;
		target = orig;
		zassert_true(!!(atomic_test_and_set_bit(&target, i)) == !!(orig & (1 << i)),
			    "atomic_test_and_set_bit");
		zassert_true(target == (orig | (1 << i)), "atomic_test_and_set_bit");
	}

	/* atomic_clear_bit() */
	for (i = 0; i < 32; i++) {
		orig = 0x0F0F0F0F;
		target = orig;
		atomic_clear_bit(&target, i);
		zassert_true(target == (orig & ~(1 << i)), "atomic_clear_bit");
	}

	/* atomic_set_bit() */
	for (i = 0; i < 32; i++) {
		orig = 0x0F0F0F0F;
		target = orig;
		atomic_set_bit(&target, i);
		zassert_true(target == (orig | (1 << i)), "atomic_set_bit");
	}

}
Example #19
0
void main(void)
{
	int failed, rv, i;

	atomic_t target, orig;
	atomic_val_t value;
	atomic_val_t oldvalue;

	failed = 0;
	TC_START("Test atomic operation primitives");

	TC_PRINT("Test atomic_cas()\n");
	target = 4;
	value = 5;
	oldvalue = 6;

	CHECK_OUTPUT(atomic_cas(&target, oldvalue, value), 0);
	target = 6;
	CHECK_OUTPUT(atomic_cas(&target, oldvalue, value), 1);
	CHECK_OUTPUT(target, value);

	TC_PRINT("Test atomic_add()\n");
	target = 1;
	value = 2;
	CHECK_OUTPUT(atomic_add(&target, value), 1);
	CHECK_OUTPUT(target, 3);

	TC_PRINT("Test atomic_sub()\n");
	target = 10;
	value = 2;
	CHECK_OUTPUT(atomic_sub(&target, value), 10);
	CHECK_OUTPUT(target, 8);

	TC_PRINT("Test atomic_inc()\n");
	target = 5;
	CHECK_OUTPUT(atomic_inc(&target), 5);
	CHECK_OUTPUT(target, 6);

	TC_PRINT("Test atomic_dec()\n");
	target = 2;
	CHECK_OUTPUT(atomic_dec(&target), 2);
	CHECK_OUTPUT(target, 1);

	TC_PRINT("Test atomic_get()\n");
	target = 50;
	CHECK_OUTPUT(atomic_get(&target), 50);

	TC_PRINT("Test atomic_set()\n");
	target = 42;
	value = 77;
	CHECK_OUTPUT(atomic_set(&target, value), 42);
	CHECK_OUTPUT(target, value);

	TC_PRINT("Test atomic_clear()\n");
	target = 100;
	CHECK_OUTPUT(atomic_clear(&target), 100);
	CHECK_OUTPUT(target, 0);

	TC_PRINT("Test atomic_or()\n");
	target = 0xFF00;
	value  = 0x0F0F;
	CHECK_OUTPUT(atomic_or(&target, value), 0xFF00);
	CHECK_OUTPUT(target, 0xFF0F);

	TC_PRINT("Test atomic_xor()\n");
	target = 0xFF00;
	value  = 0x0F0F;
	CHECK_OUTPUT(atomic_xor(&target, value), 0xFF00);
	CHECK_OUTPUT(target, 0xF00F);

	TC_PRINT("Test atomic_and()\n");
	target = 0xFF00;
	value  = 0x0F0F;
	CHECK_OUTPUT(atomic_and(&target, value), 0xFF00);
	CHECK_OUTPUT(target, 0x0F00);

	TC_PRINT("Test atomic_nand()\n");
	target = 0xFF00;
	value  = 0x0F0F;
	CHECK_OUTPUT(atomic_nand(&target, value), 0xFF00);
	CHECK_OUTPUT(target, 0xFFFFF0FF);

	TC_PRINT("Test atomic_test_bit()\n");
	for (i = 0; i < 32; i++) {
		target = 0x0F0F0F0F;
		CHECK_TRUTH(atomic_test_bit(&target, i),
			    (target & (1 << i)));
	}

	TC_PRINT("Test atomic_test_and_clear_bit()\n");
	for (i = 0; i < 32; i++) {
		orig = 0x0F0F0F0F;
		target = orig;
		CHECK_TRUTH(atomic_test_and_clear_bit(&target, i),
			    (orig & (1 << i)));
		CHECK_OUTPUT(target, orig & ~(1 << i));
	}

	TC_PRINT("Test atomic_test_and_set_bit()\n");
	for (i = 0; i < 32; i++) {
		orig = 0x0F0F0F0F;
		target = orig;
		CHECK_TRUTH(atomic_test_and_set_bit(&target, i),
			    (orig & (1 << i)));
		CHECK_OUTPUT(target, orig | (1 << i));
	}

	TC_PRINT("Test atomic_clear_bit()\n");
	for (i = 0; i < 32; i++) {
		orig = 0x0F0F0F0F;
		target = orig;
		atomic_clear_bit(&target, i);
		CHECK_OUTPUT(target, orig & ~(1 << i));
	}

	TC_PRINT("Test atomic_set_bit()\n");
	for (i = 0; i < 32; i++) {
		orig = 0x0F0F0F0F;
		target = orig;
		atomic_set_bit(&target, i);
		CHECK_OUTPUT(target, orig | (1 << i));
	}

	if (failed) {
		TC_PRINT("%d tests failed\n", failed);
		rv = TC_FAIL;
	} else {
		rv = TC_PASS;
	}

	TC_END_RESULT(rv);
	TC_END_REPORT(rv);
}