int mb__system_property_update(prop_info* pi, const char* value, unsigned int len) {
  if (len >= PROP_VALUE_MAX) {
    return -1;
  }

  prop_area* pa = mb__system_property_area__;

  if (!pa) {
    return -1;
  }

  uint32_t serial = atomic_load_explicit(&pi->serial, memory_order_relaxed);
  serial |= 1;
  atomic_store_explicit(&pi->serial, serial, memory_order_relaxed);
  // The memcpy call here also races.  Again pretend it
  // used memory_order_relaxed atomics, and use the analogous
  // counterintuitive fence.
  atomic_thread_fence(memory_order_release);
  strlcpy(pi->value, value, len + 1);

  atomic_store_explicit(&pi->serial, (len << 24) | ((serial + 1) & 0xffffff), memory_order_release);
  __futex_wake(&pi->serial, INT32_MAX);

  atomic_store_explicit(pa->serial(), atomic_load_explicit(pa->serial(), memory_order_relaxed) + 1,
                        memory_order_release);
  __futex_wake(pa->serial(), INT32_MAX);

  return 0;
}
Пример #2
0
int
main ()
{
    v = 0;
    count = 0;

    if (atomic_load_explicit (&v, memory_order_relaxed) != count++)
        abort ();
    else
        v++;

    if (atomic_load_explicit (&v, memory_order_acquire) != count++)
        abort ();
    else
        v++;

    if (atomic_load_explicit (&v, memory_order_consume) != count++)
        abort ();
    else
        v++;

    if (atomic_load_explicit (&v, memory_order_seq_cst) != count++)
        abort ();
    else
        v++;

    if (atomic_load (&v) != count)
        abort ();

    return 0;
}
bool prop_area::foreach_property(prop_bt* const trie,
                                 void (*propfn)(const prop_info* pi, void* cookie), void* cookie) {
  if (!trie) return false;

  uint_least32_t left_offset = atomic_load_explicit(&trie->left, memory_order_relaxed);
  if (left_offset != 0) {
    const int err = foreach_property(to_prop_bt(&trie->left), propfn, cookie);
    if (err < 0) return false;
  }
  uint_least32_t prop_offset = atomic_load_explicit(&trie->prop, memory_order_relaxed);
  if (prop_offset != 0) {
    prop_info* info = to_prop_info(&trie->prop);
    if (!info) return false;
    propfn(info, cookie);
  }
  uint_least32_t children_offset = atomic_load_explicit(&trie->children, memory_order_relaxed);
  if (children_offset != 0) {
    const int err = foreach_property(to_prop_bt(&trie->children), propfn, cookie);
    if (err < 0) return false;
  }
  uint_least32_t right_offset = atomic_load_explicit(&trie->right, memory_order_relaxed);
  if (right_offset != 0) {
    const int err = foreach_property(to_prop_bt(&trie->right), propfn, cookie);
    if (err < 0) return false;
  }

  return true;
}
Пример #4
0
void* p1(void *) {
  int r1 = atomic_load_explicit( &y, memory_order_relaxed );
  if( r1 == 1 ) {
    int r2 = atomic_load_explicit( &x, memory_order_relaxed );
    assert( r2 == 1);
  }
  return NULL;
}
const prop_info* prop_area::find_property(prop_bt* const trie, const char* name, uint32_t namelen,
                                          const char* value, uint32_t valuelen,
                                          bool alloc_if_needed) {
  if (!trie) return nullptr;

  const char* remaining_name = name;
  prop_bt* current = trie;
  while (true) {
    const char* sep = strchr(remaining_name, '.');
    const bool want_subtree = (sep != nullptr);
    const uint32_t substr_size = (want_subtree) ? sep - remaining_name : strlen(remaining_name);

    if (!substr_size) {
      return nullptr;
    }

    prop_bt* root = nullptr;
    uint_least32_t children_offset = atomic_load_explicit(&current->children, memory_order_relaxed);
    if (children_offset != 0) {
      root = to_prop_bt(&current->children);
    } else if (alloc_if_needed) {
      uint_least32_t new_offset;
      root = new_prop_bt(remaining_name, substr_size, &new_offset);
      if (root) {
        atomic_store_explicit(&current->children, new_offset, memory_order_release);
      }
    }

    if (!root) {
      return nullptr;
    }

    current = find_prop_bt(root, remaining_name, substr_size, alloc_if_needed);
    if (!current) {
      return nullptr;
    }

    if (!want_subtree) break;

    remaining_name = sep + 1;
  }

  uint_least32_t prop_offset = atomic_load_explicit(&current->prop, memory_order_relaxed);
  if (prop_offset != 0) {
    return to_prop_info(&current->prop);
  } else if (alloc_if_needed) {
    uint_least32_t new_offset;
    prop_info* new_info = new_prop_info(name, namelen, value, valuelen, &new_offset);
    if (new_info) {
      atomic_store_explicit(&current->prop, new_offset, memory_order_release);
    }

    return new_info;
  } else {
    return nullptr;
  }
}
Пример #6
0
/**
 * Idle timeout for session reached in:
 * - ARP inspection is ON and DHCP lease is expired.
 * - ARP inspection is OFF and last activity was not earlier than default dhcp lease time.
 * @param[in] sess Session.
 * @param[in] now Current time.
 * @return bool
 */
static inline bool overlord_sess_is_idle_timeout(struct zsession *sess, uint64_t now)
{
    if (atomic_load_explicit(&zinst()->arp.mode, memory_order_acquire)) {
        return now > atomic_load_explicit(&sess->dhcp_lease_end, memory_order_acquire);
    } else {
        return (now - zcfg()->dhcp_default_lease_time) >
               atomic_load_explicit(&sess->last_activity, memory_order_acquire);
    }
}
Пример #7
0
void SIGTERM_handler(int sig)
{
    if( atomic_load_explicit( &guide, memory_order_relaxed) == 1)
    {
       atomic_signal_fence(memory_order_acquire);
       int d = atomic_load_explicit( &data, memory_order_relaxed);
       assert(d == 100);               // Condition fulfilled!
    // ...
    }
    _Exit(0);
}
Пример #8
0
static int __pthread_rwlock_timedrdlock(pthread_rwlock_internal_t* rwlock,
                                        const timespec* abs_timeout_or_null) {

  if (atomic_load_explicit(&rwlock->writer_tid, memory_order_relaxed) == __get_thread()->tid) {
    return EDEADLK;
  }

  while (true) {
    int result = __pthread_rwlock_tryrdlock(rwlock);
    if (result == 0 || result == EAGAIN) {
      return result;
    }
    result = check_timespec(abs_timeout_or_null);
    if (result != 0) {
      return result;
    }

    int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
    if (__can_acquire_read_lock(old_state, rwlock->writer_nonrecursive_preferred)) {
      continue;
    }

    rwlock->pending_lock.lock();
    rwlock->pending_reader_count++;

    // We rely on the fact that all atomic exchange operations on the same object (here it is
    // rwlock->state) always appear to occur in a single total order. If the pending flag is added
    // before unlocking, the unlocking thread will wakeup the waiter. Otherwise, we will see the
    // state is unlocked and will not wait anymore.
    old_state = atomic_fetch_or_explicit(&rwlock->state, STATE_HAVE_PENDING_READERS_FLAG,
                                         memory_order_relaxed);

    int old_serial = rwlock->pending_reader_wakeup_serial;
    rwlock->pending_lock.unlock();

    int futex_result = 0;
    if (!__can_acquire_read_lock(old_state, rwlock->writer_nonrecursive_preferred)) {
      futex_result = __futex_wait_ex(&rwlock->pending_reader_wakeup_serial, rwlock->pshared,
                                  old_serial, true, abs_timeout_or_null);
    }

    rwlock->pending_lock.lock();
    rwlock->pending_reader_count--;
    if (rwlock->pending_reader_count == 0) {
      atomic_fetch_and_explicit(&rwlock->state, ~STATE_HAVE_PENDING_READERS_FLAG,
                                memory_order_relaxed);
    }
    rwlock->pending_lock.unlock();

    if (futex_result == -ETIMEDOUT) {
      return ETIMEDOUT;
    }
  }
}
Пример #9
0
static void b(void *obj)
{
	int r1, r2;
	r1 = atomic_load_explicit(&x, memory_order_relaxed);
	atomic_thread_fence(memory_order_acquire);
	r2 = atomic_load_explicit(&x, memory_order_relaxed);

	printf("FENCES: r1 = %d, r2 = %d\n", r1, r2);
	if (r1 == 2)
		MODEL_ASSERT(r2 != 1);
}
Пример #10
0
void* ponyint_mpmcq_pop_bailout_immediate(mpmcq_t* q)
{
    mpmcq_node_t* head = atomic_load_explicit(&q->head, memory_order_relaxed);
    mpmcq_node_t* tail = atomic_load_explicit(&q->tail, memory_order_relaxed);

    // If we believe the queue is empty, bailout immediately without taking a
    // ticket to avoid unnecessary contention.
    if(head == tail)
        return NULL;

    return ponyint_mpmcq_pop(q);
}
Пример #11
0
int pthread_mutex_unlock(pthread_mutex_t* mutex_interface) {
#if !defined(__LP64__)
    // Some apps depend on being able to pass NULL as a mutex and get EINVAL
    // back. Don't need to worry about it for LP64 since the ABI is brand new,
    // but keep compatibility for LP32. http://b/19995172.
    if (mutex_interface == NULL) {
        return EINVAL;
    }
#endif

    pthread_mutex_internal_t* mutex = __get_internal_mutex(mutex_interface);

    uint16_t old_state = atomic_load_explicit(&mutex->state, memory_order_relaxed);
    uint16_t mtype  = (old_state & MUTEX_TYPE_MASK);
    uint16_t shared = (old_state & MUTEX_SHARED_MASK);

    // Handle common case first.
    if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
        __pthread_normal_mutex_unlock(mutex, shared);
        return 0;
    }

    // Do we already own this recursive or error-check mutex?
    pid_t tid = __get_thread()->tid;
    if ( tid != atomic_load_explicit(&mutex->owner_tid, memory_order_relaxed) ) {
        return EPERM;
    }

    // If the counter is > 0, we can simply decrement it atomically.
    // Since other threads can mutate the lower state bits (and only the
    // lower state bits), use a compare_exchange loop to do it.
    if (!MUTEX_COUNTER_BITS_IS_ZERO(old_state)) {
        // We still own the mutex, so a release fence is not needed.
        atomic_fetch_sub_explicit(&mutex->state, MUTEX_COUNTER_BITS_ONE, memory_order_relaxed);
        return 0;
    }

    // The counter is 0, so we'are going to unlock the mutex by resetting its
    // state to unlocked, we need to perform a atomic_exchange inorder to read
    // the current state, which will be locked_contended if there may have waiters
    // to awake.
    // A release fence is required to make previous stores visible to next
    // lock owner threads.
    atomic_store_explicit(&mutex->owner_tid, 0, memory_order_relaxed);
    const uint16_t unlocked = mtype | shared | MUTEX_STATE_BITS_UNLOCKED;
    old_state = atomic_exchange_explicit(&mutex->state, unlocked, memory_order_release);
    if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(old_state)) {
        __futex_wake_ex(&mutex->state, shared, 1);
    }

    return 0;
}
Пример #12
0
void* p1(void *) {
  int r1 = atomic_load_explicit( &s1, memory_order_relaxed );
  if( r1 == 1 ) {
    int r2 = atomic_load_explicit( &m1, memory_order_relaxed );
    assert( r2 == 1);
  }
  int r3 = atomic_load_explicit( &s2, memory_order_relaxed );
  if( r3 == 1 ) {
    int r4 = atomic_load_explicit( &m2, memory_order_relaxed );
    assert( r4 == 1);
  }
  return NULL;
}
Пример #13
0
static size_t messageq_size_debug(messageq_t* q)
{
  pony_msg_t* tail = q->tail;
  size_t count = 0;

  while(atomic_load_explicit(&tail->next, memory_order_relaxed) != NULL)
  {
    count++;
    tail = atomic_load_explicit(&tail->next, memory_order_relaxed);
  }

  return count;
}
Пример #14
0
/**
 * Calculate speed.
 * @param[in] speed
 * @return Calculated speed.
 */
uint64_t spdm_calc(const struct speed_meter *speed)
{
    uint64_t aux = 0;
    uint64_t curr_time = zclock(false);

    for (size_t i = 0; i < SPEED_METER_BACKLOG; i++) {
        uint64_t diff = USEC2SEC(curr_time - atomic_load_explicit(&speed->backlog[i].timestamp, memory_order_acquire));
        if (diff <= SPEED_METER_BACKLOG) {
            aux += atomic_load_explicit(&speed->backlog[i].speed, memory_order_acquire);
        }
    }

    return aux / SPEED_METER_BACKLOG;
}
Пример #15
0
void input_stats_Compute(struct input_stats *stats, input_stats_t *st)
{
    /* Input */
    vlc_mutex_lock(&stats->input_bitrate.lock);
    st->i_read_packets = stats->input_bitrate.updates;
    st->i_read_bytes = stats->input_bitrate.value;
    st->f_input_bitrate = stats_GetRate(&stats->input_bitrate);
    vlc_mutex_unlock(&stats->input_bitrate.lock);

    vlc_mutex_lock(&stats->demux_bitrate.lock);
    st->i_demux_read_bytes = stats->demux_bitrate.value;
    st->f_demux_bitrate = stats_GetRate(&stats->demux_bitrate);
    vlc_mutex_unlock(&stats->demux_bitrate.lock);
    st->i_demux_corrupted = atomic_load_explicit(&stats->demux_corrupted,
                                                 memory_order_relaxed);
    st->i_demux_discontinuity = atomic_load_explicit(
                    &stats->demux_discontinuity, memory_order_relaxed);

    /* Aout */
    st->i_decoded_audio = atomic_load_explicit(&stats->decoded_audio,
                                               memory_order_relaxed);
    st->i_played_abuffers = atomic_load_explicit(&stats->played_abuffers,
                                                 memory_order_relaxed);
    st->i_lost_abuffers = atomic_load_explicit(&stats->lost_abuffers,
                                               memory_order_relaxed);

    /* Vouts */
    st->i_decoded_video = atomic_load_explicit(&stats->decoded_video,
                                               memory_order_relaxed);
    st->i_displayed_pictures = atomic_load_explicit(&stats->displayed_pictures,
                                                    memory_order_relaxed);
    st->i_lost_pictures = atomic_load_explicit(&stats->lost_pictures,
                                               memory_order_relaxed);
}
Пример #16
0
prop_bt *prop_area::find_prop_bt(prop_bt *const bt, const char *name,
                                 uint8_t namelen, bool alloc_if_needed)
{

    prop_bt* current = bt;
    while (true) {
        if (!current) {
            return NULL;
        }

        const int ret = cmp_prop_name(name, namelen, current->name, current->namelen);
        if (ret == 0) {
            return current;
        }

        if (ret < 0) {
            uint_least32_t left_offset = atomic_load_explicit(&current->left, memory_order_relaxed);
            if (left_offset != 0) {
                current = to_prop_bt(&current->left);
            } else {
                if (!alloc_if_needed) {
                   return NULL;
                }

                uint_least32_t new_offset;
                prop_bt* new_bt = new_prop_bt(name, namelen, &new_offset);
                if (new_bt) {
                    atomic_store_explicit(&current->left, new_offset, memory_order_release);
                }
                return new_bt;
            }
        } else {
            uint_least32_t right_offset = atomic_load_explicit(&current->right, memory_order_relaxed);
            if (right_offset != 0) {
                current = to_prop_bt(&current->right);
            } else {
                if (!alloc_if_needed) {
                   return NULL;
                }

                uint_least32_t new_offset;
                prop_bt* new_bt = new_prop_bt(name, namelen, &new_offset);
                if (new_bt) {
                    atomic_store_explicit(&current->right, new_offset, memory_order_release);
                }
                return new_bt;
            }
        }
    }
}
Пример #17
0
static int __pthread_rwlock_timedwrlock(pthread_rwlock_internal_t* rwlock,
                                        const timespec* abs_timeout_or_null) {

  if (__predict_false(__get_thread()->tid == atomic_load_explicit(&rwlock->writer_thread_id,
                                                                  memory_order_relaxed))) {
    return EDEADLK;
  }

  while (true) {
    int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
    if (__predict_true(old_state == 0)) {
      if (atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, -1,
                                                memory_order_acquire, memory_order_relaxed)) {
        // writer_thread_id is protected by rwlock and can only be modified in rwlock write
        // owner thread. Other threads may read it for EDEADLK error checking, atomic operation
        // is safe enough for it.
        atomic_store_explicit(&rwlock->writer_thread_id, __get_thread()->tid, memory_order_relaxed);
        return 0;
      }
    } else {
      timespec ts;
      timespec* rel_timeout = NULL;

      if (abs_timeout_or_null != NULL) {
        rel_timeout = &ts;
        if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, CLOCK_REALTIME)) {
          return ETIMEDOUT;
        }
      }

      // To avoid losing wake ups, the pending_writers increment should be observed before
      // futex_wait by all threads. A seq_cst fence instead of a seq_cst operation is used
      // here. Because only a seq_cst fence can ensure sequential consistency for non-atomic
      // operations in futex_wait.
      atomic_fetch_add_explicit(&rwlock->pending_writers, 1, memory_order_relaxed);

      atomic_thread_fence(memory_order_seq_cst);

      int ret = __futex_wait_ex(&rwlock->state, rwlock->process_shared(), old_state,
                                rel_timeout);

      atomic_fetch_sub_explicit(&rwlock->pending_writers, 1, memory_order_relaxed);

      if (ret == -ETIMEDOUT) {
        return ETIMEDOUT;
      }
    }
  }
}
static void *Worker( void *arg ) {
    TYPE id = (size_t)arg;
	uint64_t entry;
#ifdef FAST
	unsigned int cnt = 0, oid = id;
#endif // FAST

    for ( int r = 0; r < RUNS; r += 1 ) {
        entry = 0;
        while ( atomic_load(&stop) == 0 ) {
            atomic_store(&states[id*PADRATIO], LOCKED);
            while (1) {
                int lturn = atomic_load(&turn);
                if (!validate_left(id, lturn)) {
                    atomic_store(&states[id*PADRATIO], WAITING);
                    while (1) {
                        if (validate_left(id, lturn) && lturn == atomic_load_explicit(&turn, memory_order_acquire)) break;
                        Pause();
                        lturn = atomic_load_explicit(&turn, memory_order_acquire);
                    }
                    atomic_store(&states[id*PADRATIO], LOCKED);
                    continue;
                }
                while (lturn == atomic_load_explicit(&turn, memory_order_acquire)) {
                    if (validate_right(id, lturn)) break;
                    Pause();
                }
                if (lturn == atomic_load_explicit(&turn, memory_order_acquire)) break;
            }
			CriticalSection( id );						// critical section
			int lturn = (atomic_load_explicit(&turn, memory_order_relaxed)+1) % N;
			atomic_store_explicit(&turn, lturn, memory_order_relaxed);
			atomic_store_explicit(&states[id*PADRATIO], UNLOCKED, memory_order_release); // exit protocol
#ifdef FAST
			id = startpoint( cnt );						// different starting point each experiment
			cnt = cycleUp( cnt, NoStartPoints );
#endif // FAST
			entry += 1;
		} // while
#ifdef FAST
		id = oid;
#endif // FAST
		entries[r][id] = entry;
        atomic_fetch_add( &Arrived, 1 );
        while ( atomic_load(&stop) != 0 ) Pause();
        atomic_fetch_add( &Arrived, -1 );
	} // for
	return NULL;
} // Worker
Пример #19
0
static int __pthread_rwlock_timedwrlock(pthread_rwlock_internal_t* rwlock,
                                        const timespec* abs_timeout_or_null) {

  if (atomic_load_explicit(&rwlock->writer_tid, memory_order_relaxed) == __get_thread()->tid) {
    return EDEADLK;
  }
  while (true) {
    int result = __pthread_rwlock_trywrlock(rwlock);
    if (result == 0) {
      return result;
    }
    result = check_timespec(abs_timeout_or_null);
    if (result != 0) {
      return result;
    }

    int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
    if (__can_acquire_write_lock(old_state)) {
      continue;
    }

    rwlock->pending_lock.lock();
    rwlock->pending_writer_count++;

    old_state = atomic_fetch_or_explicit(&rwlock->state, STATE_HAVE_PENDING_WRITERS_FLAG,
                                         memory_order_relaxed);

    int old_serial = rwlock->pending_writer_wakeup_serial;
    rwlock->pending_lock.unlock();

    int futex_result = 0;
    if (!__can_acquire_write_lock(old_state)) {
      futex_result = __futex_wait_ex(&rwlock->pending_writer_wakeup_serial, rwlock->pshared,
                                  old_serial, true, abs_timeout_or_null);
    }

    rwlock->pending_lock.lock();
    rwlock->pending_writer_count--;
    if (rwlock->pending_writer_count == 0) {
      atomic_fetch_and_explicit(&rwlock->state, ~STATE_HAVE_PENDING_WRITERS_FLAG,
                                memory_order_relaxed);
    }
    rwlock->pending_lock.unlock();

    if (futex_result == -ETIMEDOUT) {
      return ETIMEDOUT;
    }
  }
}
Пример #20
0
void resize(Deque *q) {
	Array *a = (Array *) atomic_load_explicit(&q->array, memory_order_seq_cst);
	size_t size=atomic_load_explicit(&a->size, memory_order_seq_cst);
	size_t new_size=size << 1;
	Array *new_a = (Array *) calloc(1, new_size * sizeof(atomic_int) + sizeof(Array));
	size_t top=atomic_load_explicit(&q->top, memory_order_seq_cst);
	size_t bottom=atomic_load_explicit(&q->bottom, memory_order_seq_cst);
	atomic_store_explicit(&new_a->size, new_size, memory_order_seq_cst);
	size_t i;
	for(i=top; i < bottom; i++) {
		atomic_store_explicit(&new_a->buffer[i % new_size], atomic_load_explicit(&a->buffer[i % size], memory_order_seq_cst), memory_order_seq_cst);
	}
	atomic_store_explicit(&q->array, (long unsigned int) new_a, memory_order_seq_cst);
	printf("resize\n");
}
Пример #21
0
void enqueue(queue_t *q, unsigned int val)
{
	std::string str1("enqueue"); //ANNOTATION
	function_call(str1, INVOCATION, val); //ANNOTATION
	
	int success = 0;
	unsigned int node;
	pointer tail;
	pointer next;
	pointer tmp;

	node = new_node();
	store_32(&q->nodes[node].value, val);
	tmp = atomic_load_explicit(&q->nodes[node].next, memory_order_seq_cst);
	set_ptr(&tmp, 0); // NULL
	atomic_store_explicit(&q->nodes[node].next, tmp, memory_order_seq_cst);

	while (!success) {
		tail = atomic_load_explicit(&q->tail, memory_order_seq_cst);
		next = atomic_load_explicit(&q->nodes[get_ptr(tail)].next, memory_order_seq_cst);
		if (tail == atomic_load_explicit(&q->tail, memory_order_seq_cst)) {

			/* Check for uninitialized 'next' */
			MODEL_ASSERT(get_ptr(next) != POISON_IDX);

			if (get_ptr(next) == 0) { // == NULL
				pointer value = MAKE_POINTER(node, get_count(next) + 1);
				success = atomic_compare_exchange_strong_explicit(&q->nodes[get_ptr(tail)].next,
						&next, value, memory_order_seq_cst, memory_order_seq_cst);
			}
			if (!success) {
				unsigned int ptr = get_ptr(atomic_load_explicit(&q->nodes[get_ptr(tail)].next, memory_order_seq_cst));
				pointer value = MAKE_POINTER(ptr,
						get_count(tail) + 1);
				atomic_compare_exchange_strong_explicit(&q->tail,
						&tail, value,
						memory_order_seq_cst, memory_order_seq_cst);
				thrd_yield();
			}
		}
	}
	atomic_compare_exchange_strong_explicit(&q->tail,
			&tail,
			MAKE_POINTER(node, get_count(tail) + 1),
			memory_order_seq_cst, memory_order_seq_cst);

	function_call(str1, RESPONSE); //ANNOTATION
}
Пример #22
0
/**
 * Process upstream p2p bandwidth limits.
 *
 * @param[in] sess Session.
 * @param[in] packet_len Packet length.
 * @param[in] iph IP header.
 * @param[in] flow_dir Flow direction.
 * @return Zero on pass.
 */
static int packet_process_p2p_ipv4(struct zsession *sess, size_t packet_len, struct ip *iph, struct l4_data *l4,
                                   enum flow_dir flow_dir)
{
    if (PROTO_MAX == l4->proto) {
        return 0;
    }

    uint16_t port = ntohs((DIR_UP == flow_dir) ? *l4->dst_port : *l4->src_port);

    pthread_rwlock_rdlock(&sess->lock_client);

    // p2p police enabled and port greater than 1024 and not whitelisted
    if (sess->client->p2p_policy && (port >= 1024) && !utarray_find(&zcfg()->p2p_ports_whitelist, &port, uint16_cmp)) {
        uint64_t speed = spdm_calc(&sess->client->speed[flow_dir]);
        // 1/4 of bw limit
        uint64_t throttle_speed = token_bucket_get_max(&sess->client->band[flow_dir]) / 4;

        uint64_t diff = zclock(false) - sess->client->last_p2p_throttle;
        if ((speed > throttle_speed) || (diff < P2P_THROTTLE_TIME)) {
            unsigned upstream_id = IPTOS_DSCP(iph->ip_tos) >> 2;
            struct token_bucket *bucket = &zinst()->upstreams[upstream_id].band[flow_dir];
            if (0 != token_bucket_update(bucket, packet_len)) {
                return -1;
            }

            struct speed_meter *spd = &zinst()->upstreams[upstream_id].speed[flow_dir];
            spdm_update(spd, packet_len);

            diff = zclock(false) - atomic_load_explicit(&sess->client->last_p2p_throttle, memory_order_acquire);
            if (diff > P2P_THROTTLE_TIME) {
                atomic_store_explicit(&sess->client->last_p2p_throttle, zclock(false), memory_order_release);
            }
        }
int __system_property_add(const char *name, unsigned int namelen,
            const char *value, unsigned int valuelen)
{
    prop_area *pa = __system_property_area__;
    const prop_info *pi;

    if (namelen >= PROP_NAME_MAX)
        return -1;
    if (valuelen >= PROP_VALUE_MAX)
        return -1;
    if (namelen < 1)
        return -1;

    pi = find_property(root_node(), name, namelen, value, valuelen, true);
    if (!pi)
        return -1;

    // There is only a single mutator, but we want to make sure that
    // updates are visible to a reader waiting for the update.
    atomic_store_explicit(
        &pa->serial,
        atomic_load_explicit(&pa->serial, memory_order_relaxed) + 1,
        memory_order_release);
    __futex_wake(&pa->serial, INT32_MAX);
    return 0;
}
Пример #24
0
void * ccsynch_delegate_or_lock(void* lock,
                                unsigned int messageSize) {
    CCSynchLock *l = (CCSynchLock*)lock;
    CCSynchLockNode *nextNode;
    CCSynchLockNode *curNode;
    ccsynchlock_initLocalIfNeeded();
    nextNode = ccsynchNextLocalNode;
    atomic_store_explicit(&nextNode->next, (uintptr_t)NULL, memory_order_relaxed);
    atomic_store_explicit(&nextNode->wait, 1, memory_order_relaxed);
    nextNode->completed = false;
    curNode = (CCSynchLockNode *)atomic_exchange_explicit(&l->tailPtr.value, (uintptr_t)nextNode, memory_order_release);
    curNode->messageSize = messageSize;

    curNode->requestFunction = NULL; //Forces helper to stop if it sees this

    atomic_store_explicit(&curNode->next, (uintptr_t)nextNode, memory_order_release);

    ccsynchNextLocalNode = curNode;
    if (atomic_load_explicit(&curNode->wait, memory_order_acquire) == 1){
        //Somone else has the lock delegate
        return curNode->tempBuffer;
    }else{
        //Yey we got the lock
        return NULL;
    }

}
Пример #25
0
static void
_ui_saved(void *data, int status)
{
	bin_t *bin = data;
	prog_t *handle = (void *)bin - offsetof(prog_t, bin);

	//printf("_ui_saved: %i\n", status);
	if(handle->save_state == SAVE_STATE_NSM)
	{
		synthpod_nsm_saved(bin->nsm, status);
	}
	else if(handle->save_state == SAVE_STATE_JACK)
	{
		jack_session_event_t *ev = handle->session_event;
		if(ev)
		{
			if(status != 0)
				ev->flags |= JackSessionSaveError;
			jack_session_reply(handle->client, ev);
			jack_session_event_free(ev);
		}
		handle->session_event = NULL;
	}
	handle->save_state = SAVE_STATE_INTERNAL;

	if(atomic_load_explicit(&handle->kill, memory_order_relaxed))
	{
		elm_exit();
	}
}
Пример #26
0
void _queueNode(stpcProxy* proxy, stpcNode* newNode) {
	sequencedPtr oldTail;
	sequencedPtr newTail;
    stpcNode *tailNode, *next;
    bool rc, rc2;
	long attempts = 0;

		
    newNode->count = GUARD_BIT + 2 * REFERENCE;
	
	/*
	 * monkey through the trees queuing trick
	 */
	
	newTail.ptr = newNode;
	newTail.sequence = 0;
	oldTail.ival = atomic_load_explicit(&proxy->tail.ival, memory_order_consume);
	do {
		attempts++;
	}
	while (!atomic_compare_exchange_strong_explicit(&proxy->tail.ival, &oldTail.ival, newTail.ival, memory_order_acq_rel, memory_order_acquire));
    
	atomic_store_explicit(&oldTail.ptr->next, newNode, memory_order_relaxed);
	// update old node's reference count by number of acquired references, clear guard bit, and drop ref acquired from tail pointer
	_dropProxyNodeReference(proxy, oldTail.ptr, (oldTail.sequence - GUARD_BIT));
		    
    stats_t *stats = stpcGetLocalStats(proxy);
    stats->tries++;                         // _addNode invocations
    stats->attempts += attempts;            // tail enqueue attempts
}
Пример #27
0
void mrqd_delegate(void* lock,
                   void (*funPtr)(unsigned int, void *), 
                   unsigned int messageSize,
                   void * messageAddress) {
    MRQDLock *l = (MRQDLock*)lock;
    while(atomic_load_explicit(&l->writeBarrier.value, memory_order_seq_cst) > 0){
        thread_yield();
    }
    while(true) {
        if(tatas_try_lock(&l->mutexLock)) {
            qdq_open(&l->queue);
            rgri_wait_all_readers_gone(&l->readIndicator);
            funPtr(messageSize, messageAddress);
            qdq_flush(&l->queue);
            tatas_unlock(&l->mutexLock);
            return;
        } else if(qdq_enqueue(&l->queue,
                              funPtr,
                              messageSize,
                              messageAddress)){
            return;
        }
        thread_yield();
    }
}
Пример #28
0
void mrqd_delegate_wait(void* lock,
                      void (*funPtr)(unsigned int, void *), 
                      unsigned int messageSize,
                      void * messageAddress) {
    volatile atomic_int waitVar = ATOMIC_VAR_INIT(1);
    unsigned int metaDataSize = sizeof(volatile atomic_int *) + 
        sizeof(void (*)(unsigned int, void *));
    char * buff = mrqd_delegate_or_lock(lock,
                                        metaDataSize + messageSize);
    if(buff==NULL){
        funPtr(messageSize, messageAddress);
        mrqd_delegate_unlock(lock);
    }else{
        volatile atomic_int ** waitVarPtrAddress = (volatile atomic_int **)buff;
        *waitVarPtrAddress = &waitVar;
        void (**funPtrAdress)(unsigned int, void *) = (void (**)(unsigned int, void *))&buff[sizeof(volatile atomic_int *)];
        *funPtrAdress = funPtr;
        unsigned int metaDataSize = sizeof(volatile atomic_int *) + 
            sizeof(void (*)(unsigned int, void *));
        char * msgBuffer = (char *)messageAddress;
        for(unsigned int i = metaDataSize; i < (messageSize + metaDataSize); i++){
            buff[i] = msgBuffer[i - metaDataSize];
        }
        mrqd_close_delegate_buffer((void *)buff, mrqd_executeAndWaitCS);
        while(atomic_load_explicit(&waitVar, memory_order_acquire)){
            thread_yield();
        }
    }
}
Пример #29
0
int pthread_rwlock_destroy(pthread_rwlock_t *rwlock) {
  assert(atomic_load_explicit(&rwlock->__state, memory_order_relaxed) ==
             CLOUDABI_LOCK_UNLOCKED &&
         "Attempted to destroy locked rwlock");
  assert(rwlock->__write_recursion <= 0 && "Recursion counter invalid");
  return 0;
}
Пример #30
0
int mb__system_property_add(const char *name, unsigned int namelen,
            const char *value, unsigned int valuelen)
{
    if (namelen >= PROP_NAME_MAX)
        return -1;
    if (valuelen >= PROP_VALUE_MAX)
        return -1;
    if (namelen < 1)
        return -1;

    if (!mb__system_property_area__) {
        return -1;
    }

    prop_area* pa = get_prop_area_for_name(name);

    if (!pa) {
        LOGE("Access denied adding property \"%s\"", name);
        return -1;
    }

    bool ret = pa->add(name, namelen, value, valuelen);
    if (!ret)
        return -1;

    // There is only a single mutator, but we want to make sure that
    // updates are visible to a reader waiting for the update.
    atomic_store_explicit(
        mb__system_property_area__->serial(),
        atomic_load_explicit(mb__system_property_area__->serial(), memory_order_relaxed) + 1,
        memory_order_release);
    __futex_wake(mb__system_property_area__->serial(), INT32_MAX);
    return 0;
}