Esempio n. 1
0
int odp_timer_cancel_tmo(odp_timer_t timer, odp_timer_tmo_t tmo)
{
	int id;
	uint64_t tick_idx;
	timeout_t *cancel_tmo;
	tick_t *tick;

	/* get id */
	id = timer - 1;

	/* get tmo_buf to cancel */
	cancel_tmo = (timeout_t *)odp_buffer_addr(tmo);
	tick_idx = cancel_tmo->tick;
	tick = &odp_timer.timer[id].tick[tick_idx];

	odp_spinlock_lock(&tick->lock);
	/* search and delete tmo from tick list */
	if (find_and_del_tmo(&tick->list, tmo) != 0) {
		odp_spinlock_unlock(&tick->lock);
		ODP_DBG("Couldn't find the tmo (%d) in tick list\n", (int)tmo);
		return -1;
	}
	odp_spinlock_unlock(&tick->lock);

	return 0;
}
Esempio n. 2
0
int ofp_timer_cancel(odp_timer_t tim)
{
	odp_event_t timeout_event = ODP_EVENT_INVALID;
	odp_timeout_t tmo;
	uint32_t t = (uint32_t)tim;
	struct ofp_timer_internal *bufdata;
	struct ofp_timer_internal *prev = NULL;

	if (tim == ODP_TIMER_INVALID)
		return 0;

	if (t & 0x80000000) {
		/* long timeout */
		odp_spinlock_lock(&shm->lock);
		bufdata = shm->long_table[t & TIMER_LONG_MASK];

		while (bufdata) {
			struct ofp_timer_internal *next = bufdata->next;
			if (bufdata->id == t) {
				if (prev == NULL)
					shm->long_table[t & TIMER_LONG_MASK] = next;
				else
					prev->next = next;
				odp_buffer_free(bufdata->buf);
				odp_spinlock_unlock(&shm->lock);
				return 0;
			}
			prev = bufdata;
			bufdata = next;
		}
		odp_spinlock_unlock(&shm->lock);
		return -1;
	}
	else {
		if (odp_timer_cancel(tim, &timeout_event) < 0)
		{
			OFP_WARN("Timeout already expired or inactive");
			return 0;
		}

		if (timeout_event != ODP_EVENT_INVALID) {
			tmo = odp_timeout_from_event(timeout_event);
			bufdata = odp_timeout_user_ptr(tmo);
			odp_buffer_free(bufdata->buf);
			odp_timeout_free(tmo);
		} else {
			OFP_WARN("Lost timeout buffer at timer cancel");
			return -1;
		}

		if (odp_timer_free(tim) != ODP_EVENT_INVALID) {
			OFP_ERR("odp_timer_free failed");
			return -1;
		}
	}

	return 0;
}
Esempio n. 3
0
static void timer_add(struct odp_hisi_timer *tim,
		      unsigned tim_core, int local_is_locked)
{
	unsigned core_id = odp_core_id();
	unsigned lvl;
	struct odp_hisi_timer *prev[MAX_SKIPLIST_DEPTH + 1];

	if ((tim_core != core_id) || !local_is_locked)
		odp_spinlock_lock(&priv_timer[tim_core].list_lock);

	timer_get_prev_entries(tim->expire, tim_core, prev);

	const unsigned tim_level = timer_get_skiplist_level(
		priv_timer[tim_core].curr_skiplist_depth);

	if (tim_level == priv_timer[tim_core].curr_skiplist_depth)
		priv_timer[tim_core].curr_skiplist_depth++;

	lvl = tim_level;
	while (lvl > 0) {
		tim->sl_next[lvl] = prev[lvl]->sl_next[lvl];
		prev[lvl]->sl_next[lvl] = tim;
		lvl--;
	}

	tim->sl_next[0] = prev[0]->sl_next[0];
	prev[0]->sl_next[0] = tim;

	priv_timer[tim_core].pending_head.expire =
		priv_timer[tim_core].pending_head.sl_next[0]->expire;

	if ((tim_core != core_id) || !local_is_locked)
		odp_spinlock_unlock(&priv_timer[tim_core].list_lock);
}
Esempio n. 4
0
static void timer_del(struct odp_hisi_timer	 *tim,
		      union odp_hisi_timer_status prev_status,
		      int			  local_is_locked)
{
	unsigned core_id = odp_core_id();
	unsigned prev_owner = prev_status.owner;
	int i;
	struct odp_hisi_timer *prev[MAX_SKIPLIST_DEPTH + 1];

	if ((prev_owner != core_id) || !local_is_locked)
		odp_spinlock_lock(&priv_timer[prev_owner].list_lock);

	if (tim == priv_timer[prev_owner].pending_head.sl_next[0])
		priv_timer[prev_owner].pending_head.expire =
			((!tim->sl_next[0]) ? 0 : tim->sl_next[0]->expire);

	timer_get_prev_entries_for_node(tim, prev_owner, prev);
	for (i = priv_timer[prev_owner].curr_skiplist_depth - 1; i >= 0; i--)
		if (prev[i]->sl_next[i] == tim)
			prev[i]->sl_next[i] = tim->sl_next[i];

	for (i = priv_timer[prev_owner].curr_skiplist_depth - 1; i >= 0; i--)
		if (!priv_timer[prev_owner].pending_head.sl_next[i])
			priv_timer[prev_owner].curr_skiplist_depth--;
		else
			break;

	if ((prev_owner != core_id) || !local_is_locked)
		odp_spinlock_unlock(&priv_timer[prev_owner].list_lock);
}
Esempio n. 5
0
int odp_thread_init_local(odp_thread_type_t type)
{
	int id;
	int cpu;

	odp_spinlock_lock(&thread_globals->lock);
	id = alloc_id(type);
	odp_spinlock_unlock(&thread_globals->lock);

	if (id < 0) {
		ODP_ERR("Too many threads\n");
		return -1;
	}

	cpu = sched_getcpu();

	if (cpu < 0) {
		ODP_ERR("getcpu failed\n");
		return -1;
	}

	thread_globals->thr[id].thr  = id;
	thread_globals->thr[id].cpu  = cpu;
	thread_globals->thr[id].type = type;

	this_thread = &thread_globals->thr[id];
	return 0;
}
Esempio n. 6
0
odp_shm_t odp_shm_lookup(const char *name)
{
	uint32_t i;
	odp_shm_t hdl;

	odp_spinlock_lock(&odp_shm_tbl->lock);
	if (find_block(name, &i) == 0) {
		odp_spinlock_unlock(&odp_shm_tbl->lock);
		return ODP_SHM_INVALID;
	}

	hdl = odp_shm_tbl->block[i].hdl;
	odp_spinlock_unlock(&odp_shm_tbl->lock);

	return hdl;
}
Esempio n. 7
0
odp_pktio_t odp_pktio_lookup(const char *dev)
{
	odp_pktio_t id = ODP_PKTIO_INVALID;
	pktio_entry_t *entry;
	int i;

	odp_spinlock_lock(&pktio_tbl->lock);

	for (i = 1; i <= ODP_CONFIG_PKTIO_ENTRIES; ++i) {
		entry = get_pktio_entry(_odp_cast_scalar(odp_pktio_t, i));
		if (!entry || is_free(entry))
			continue;

		lock_entry(entry);

		if (!is_free(entry) &&
		    strncmp(entry->s.name, dev, IF_NAMESIZE) == 0)
			id = _odp_cast_scalar(odp_pktio_t, i);

		unlock_entry(entry);

		if (id != ODP_PKTIO_INVALID)
			break;
	}

	odp_spinlock_unlock(&pktio_tbl->lock);

	return id;
}
Esempio n. 8
0
static void add_tmo(tick_t *tick, timeout_t *tmo)
{
	odp_spinlock_lock(&tick->lock);

	tmo->next  = tick->list;
	tick->list = tmo;

	odp_spinlock_unlock(&tick->lock);
}
Esempio n. 9
0
static odp_queue_t pri_set(int id, int prio)
{
	odp_spinlock_lock(&sched->mask_lock);
	sched->pri_mask[prio] |= 1 << id;
	sched->pri_count[prio][id]++;
	odp_spinlock_unlock(&sched->mask_lock);

	return sched->pri_queue[prio][id];
}
Esempio n. 10
0
void odp_spinlock_recursive_unlock(odp_spinlock_recursive_t *rlock)
{
	rlock->cnt--;

	if (rlock->cnt > 0)
		return;

	rlock->owner = NO_OWNER;
	odp_spinlock_unlock(&rlock->lock);
}
Esempio n. 11
0
static void pri_clr(int id, int prio)
{
	odp_spinlock_lock(&sched->mask_lock);

	/* Clear mask bit when last queue is removed*/
	sched->pri_count[prio][id]--;

	if (sched->pri_count[prio][id] == 0)
		sched->pri_mask[prio] &= (uint8_t)(~(1 << id));

	odp_spinlock_unlock(&sched->mask_lock);
}
Esempio n. 12
0
static void timer_notify(union sigval sigval ODP_UNUSED)
{
	odp_timer_pool *tp = _odp_timer_pool_global;
	uint64_t prev_tick = odp_atomic_fetch_inc_u64(&tp->cur_tick);
	/* Attempt to acquire the lock, check if the old value was clear */
	if (odp_spinlock_trylock(&tp->itimer_running)) {
		/* Scan timer array, looking for timers to expire */
		(void)_odp_timer_pool_expire(tp, prev_tick);
		odp_spinlock_unlock(&tp->itimer_running);
	}
	/* Else skip scan of timers. cur_tick was updated and next itimer
	 * invocation will process older expiration ticks as well */
}
Esempio n. 13
0
int odp_thread_term_local(void)
{
	int num;
	int id = this_thread->thr;

	odp_spinlock_lock(&thread_globals->lock);
	num = free_id(id);
	odp_spinlock_unlock(&thread_globals->lock);

	if (num < 0) {
		ODP_ERR("failed to free thread id %i", id);
		return -1;
	}

	return num; /* return a number of threads left */
}
Esempio n. 14
0
static timeout_t *rem_tmo(tick_t *tick)
{
	timeout_t *tmo;

	odp_spinlock_lock(&tick->lock);

	tmo = tick->list;

	if (tmo)
		tick->list = tmo->next;

	odp_spinlock_unlock(&tick->lock);

	if (tmo)
		tmo->next = NULL;

	return tmo;
}
Esempio n. 15
0
odp_pktio_t odp_pktio_open(const char *dev, odp_pool_t pool)
{
	odp_pktio_t id;

	id = odp_pktio_lookup(dev);
	if (id != ODP_PKTIO_INVALID) {
		/* interface is already open */
		__odp_errno = EEXIST;
		PRINT("odp_pktio_lookup fail.\n");
		return ODP_PKTIO_INVALID;
	}

	odp_spinlock_lock(&pktio_tbl->lock);
	id = setup_pktio_entry(dev, pool);
	odp_spinlock_unlock(&pktio_tbl->lock);

	return id;
}
Esempio n. 16
0
static void one_sec(void *arg)
{
	struct ofp_timer_internal *bufdata;
	(void)arg;

	odp_spinlock_lock(&shm->lock);
	shm->sec_counter = (shm->sec_counter + 1) & TIMER_LONG_MASK;
	bufdata = shm->long_table[shm->sec_counter];
	shm->long_table[shm->sec_counter] = NULL;
	odp_spinlock_unlock(&shm->lock);

	while (bufdata) {
		struct ofp_timer_internal *next = bufdata->next;
		bufdata->callback(&bufdata->arg);
		odp_buffer_free(bufdata->buf);
		bufdata = next;
	}

	/* Start one second timeout */
	shm->timer_1s = ofp_timer_start(1000000UL, one_sec, NULL, 0);
}
Esempio n. 17
0
int odp_shm_free(odp_shm_t shm)
{
	uint32_t i;
	int ret;
	odp_shm_block_t *block;
	char name[ODP_SHM_NAME_LEN + 8];

	if (shm == ODP_SHM_INVALID) {
		ODP_DBG("odp_shm_free: Invalid handle\n");
		return -1;
	}

	i = from_handle(shm);

	if (i >= ODP_CONFIG_SHM_BLOCKS) {
		ODP_DBG("odp_shm_free: Bad handle\n");
		return -1;
	}

	odp_spinlock_lock(&odp_shm_tbl->lock);

	block = &odp_shm_tbl->block[i];

	if (block->addr == NULL) {
		ODP_DBG("odp_shm_free: Free block\n");
		odp_spinlock_unlock(&odp_shm_tbl->lock);
		return 0;
	}

	/* right now, for this tpye of memory, we do nothing as free */
	if (block->flags & ODP_SHM_MONOPOLIZE_CNTNUS_PHY) {
		int pid = getpid();

		snprintf(name, sizeof(name), "%s_%d", block->name, pid);
		odp_mm_district_unreserve(name);
		memset(block, 0, sizeof(odp_shm_block_t));
		odp_spinlock_unlock(&odp_shm_tbl->lock);
		return 0;
	}

	if (block->flags & ODP_SHM_SHARE_CNTNUS_PHY) {
		odp_mm_district_unreserve(name);
		memset(block, 0, sizeof(odp_shm_block_t));
		odp_spinlock_unlock(&odp_shm_tbl->lock);
		return 0;
	}

	ret = munmap(block->addr_orig, block->alloc_size);
	if (0 != ret) {
		ODP_DBG("odp_shm_free: munmap failed: %s, id %u, addr %p\n",
			strerror(errno), i, block->addr_orig);
		odp_spinlock_unlock(&odp_shm_tbl->lock);
		return -1;
	}

	if (block->flags & ODP_SHM_PROC) {
		ret = shm_unlink(block->name);
		if (0 != ret) {
			ODP_DBG("odp_shm_free: shm_unlink failed\n");
			odp_spinlock_unlock(&odp_shm_tbl->lock);
			return -1;
		}
	}

	memset(block, 0, sizeof(odp_shm_block_t));
	odp_spinlock_unlock(&odp_shm_tbl->lock);
	return 0;
}
Esempio n. 18
0
odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align,
			  uint32_t flags)
{
	uint32_t i;
	odp_shm_block_t *block;
	void *addr;
	int   fd = -1;
	int   map_flag = MAP_SHARED;

	/* If already exists: O_EXCL: error, O_TRUNC: truncate to zero */
	int oflag = O_RDWR | O_CREAT | O_TRUNC;
	uint64_t alloc_size;
	uint64_t page_sz;

#ifdef MAP_HUGETLB
	uint64_t huge_sz;
	int need_huge_page = 0;
	uint64_t alloc_hp_size;
#endif

	const struct odp_mm_district *zone = NULL;
	char memdistrict_name[ODP_SHM_NAME_LEN + 8];

	page_sz = odp_sys_page_size();
	alloc_size = size + align;

#ifdef MAP_HUGETLB
	huge_sz = odp_sys_huge_page_size();
	need_huge_page = (huge_sz && alloc_size > page_sz);

	/* munmap for huge pages requires sizes round up by page */
	alloc_hp_size = (size + align + (huge_sz - 1)) & (-huge_sz);
#endif

	if (flags & ODP_SHM_PROC) {
		/* Creates a file to /dev/shm */
		fd = shm_open(name, oflag,
			      S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
		if (fd == -1) {
			ODP_DBG("%s: shm_open failed.\n", name);
			return ODP_SHM_INVALID;
		}
	} else if (flags & ODP_SHM_MONOPOLIZE_CNTNUS_PHY) {
		int pid = getpid();

		snprintf(memdistrict_name, sizeof(memdistrict_name),
			 "%s_%d", name, pid);
		zone = odp_mm_district_reserve(memdistrict_name, name,
					       alloc_size, 0,
					       ODP_MEMZONE_2MB |
					       ODP_MEMZONE_SIZE_HINT_ONLY);
		if (zone == NULL) {
			ODP_DBG("odp_mm_district_reseve %s failed.\n", name);
			return ODP_SHM_INVALID;
		}
	} else if (flags & ODP_SHM_SHARE_CNTNUS_PHY) {
		zone = odp_mm_district_reserve(name, name,
					       alloc_size, 0,
					       ODP_MEMZONE_2MB |
					       ODP_MEMZONE_SIZE_HINT_ONLY);
		if (zone == NULL) {
			ODP_DBG("odp_mm_district_reseve %s failed.\n", name);
			return ODP_SHM_INVALID;
		}
	} else {
		map_flag |= MAP_ANONYMOUS;
	}

	odp_spinlock_lock(&odp_shm_tbl->lock);

	if (find_block(name, NULL)) {
		/* Found a block with the same name */
		odp_spinlock_unlock(&odp_shm_tbl->lock);
		ODP_DBG("name %s already used.\n", name);
		return ODP_SHM_INVALID;
	}

	for (i = 0; i < ODP_CONFIG_SHM_BLOCKS; i++)
		if (odp_shm_tbl->block[i].addr == NULL)
			/* Found free block */
			break;

	if (i > ODP_CONFIG_SHM_BLOCKS - 1) {
		/* Table full */
		odp_spinlock_unlock(&odp_shm_tbl->lock);
		ODP_DBG("%s: no more blocks.\n", name);
		return ODP_SHM_INVALID;
	}

	block = &odp_shm_tbl->block[i];

	block->hdl = to_handle(i);
	addr = MAP_FAILED;

#ifdef MAP_HUGETLB

	/* Try first huge pages */
	if (need_huge_page) {
		if ((flags & ODP_SHM_PROC) &&
		    (ftruncate(fd, alloc_hp_size) == -1)) {
			odp_spinlock_unlock(&odp_shm_tbl->lock);
			ODP_DBG("%s: ftruncate huge pages failed.\n", name);
			return ODP_SHM_INVALID;
		}

		addr = mmap(NULL, alloc_hp_size, PROT_READ | PROT_WRITE,
			    map_flag | MAP_HUGETLB, fd, 0);
		if (addr == MAP_FAILED) {
			ODP_DBG(" %s: No huge pages, fall back to normal pages,"
				"check: /proc/sys/vm/nr_hugepages.\n",
				name);
		} else {
			block->alloc_size = alloc_hp_size;
			block->huge = 1;
			block->page_sz = huge_sz;
		}
	}
#endif

	if (flags & ODP_SHM_MONOPOLIZE_CNTNUS_PHY ||
	    flags & ODP_SHM_SHARE_CNTNUS_PHY)
		addr = zone->addr;

	/* Use normal pages for small or failed huge page allocations */
	if (addr == MAP_FAILED) {
		if ((flags & ODP_SHM_PROC) &&
		    (ftruncate(fd, alloc_size) == -1)) {
			odp_spinlock_unlock(&odp_shm_tbl->lock);
			ODP_ERR("%s: ftruncate failed.\n", name);
			return ODP_SHM_INVALID;
		}

		addr = mmap(NULL, alloc_size, PROT_READ | PROT_WRITE,
			    map_flag, fd, 0);
		if (addr == MAP_FAILED) {
			odp_spinlock_unlock(&odp_shm_tbl->lock);
			ODP_DBG("%s mmap failed.\n", name);
			return ODP_SHM_INVALID;
		}

		block->alloc_size = alloc_size;
		block->huge = 0;
		block->page_sz = page_sz;
	}

	if (flags & ODP_SHM_MONOPOLIZE_CNTNUS_PHY ||
	    flags & ODP_SHM_SHARE_CNTNUS_PHY) {
		block->alloc_size = alloc_size;
		block->huge = 1;
		block->page_sz = ODP_MEMZONE_2MB;
		block->addr_orig = addr;

		/* move to correct alignment */
		addr = ODP_ALIGN_ROUNDUP_PTR(zone->addr, align);

		strncpy(block->name, name, ODP_SHM_NAME_LEN - 1);
		block->name[ODP_SHM_NAME_LEN - 1] = 0;
		block->size  = size;
		block->align = align;
		block->flags = flags;
		block->fd = -1;
		block->addr = addr;
	} else {
		block->addr_orig = addr;

		/* move to correct alignment */
		addr = ODP_ALIGN_ROUNDUP_PTR(addr, align);

		strncpy(block->name, name, ODP_SHM_NAME_LEN - 1);
		block->name[ODP_SHM_NAME_LEN - 1] = 0;
		block->size  = size;
		block->align = align;
		block->flags = flags;
		block->fd = fd;
		block->addr = addr;
	}

	odp_spinlock_unlock(&odp_shm_tbl->lock);

	return block->hdl;
}
Esempio n. 19
0
static int schedule_common_(void *arg)
{
	thread_args_t *args = (thread_args_t *)arg;
	odp_schedule_sync_t sync;
	test_globals_t *globals;
	queue_context *qctx;
	buf_contents *bctx, *bctx_cpy;
	odp_pool_t pool;
	int locked;
	int num;
	odp_event_t ev;
	odp_buffer_t buf, buf_cpy;
	odp_queue_t from;

	globals = args->globals;
	sync = args->sync;

	pool = odp_pool_lookup(MSG_POOL_NAME);
	CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);

	if (args->num_workers > 1)
		odp_barrier_wait(&globals->barrier);

	while (1) {
		from = ODP_QUEUE_INVALID;
		num = 0;

		odp_ticketlock_lock(&globals->lock);
		if (globals->buf_count == 0) {
			odp_ticketlock_unlock(&globals->lock);
			break;
		}
		odp_ticketlock_unlock(&globals->lock);

		if (args->enable_schd_multi) {
			odp_event_t events[BURST_BUF_SIZE],
				ev_cpy[BURST_BUF_SIZE];
			odp_buffer_t buf_cpy[BURST_BUF_SIZE];
			int j;

			num = odp_schedule_multi(&from, ODP_SCHED_NO_WAIT,
						 events, BURST_BUF_SIZE);
			CU_ASSERT(num >= 0);
			CU_ASSERT(num <= BURST_BUF_SIZE);
			if (num == 0)
				continue;

			if (sync == ODP_SCHED_SYNC_ORDERED) {
				int ndx;
				int ndx_max;
				int rc;

				ndx_max = odp_queue_lock_count(from);
				CU_ASSERT_FATAL(ndx_max >= 0);

				qctx = odp_queue_context(from);

				for (j = 0; j < num; j++) {
					bctx = odp_buffer_addr(
						odp_buffer_from_event
						(events[j]));

					buf_cpy[j] = odp_buffer_alloc(pool);
					CU_ASSERT_FATAL(buf_cpy[j] !=
							ODP_BUFFER_INVALID);
					bctx_cpy = odp_buffer_addr(buf_cpy[j]);
					memcpy(bctx_cpy, bctx,
					       sizeof(buf_contents));
					bctx_cpy->output_sequence =
						bctx_cpy->sequence;
					ev_cpy[j] =
						odp_buffer_to_event(buf_cpy[j]);
				}

				rc = odp_queue_enq_multi(qctx->pq_handle,
							 ev_cpy, num);
				CU_ASSERT(rc == num);

				bctx = odp_buffer_addr(
					odp_buffer_from_event(events[0]));
				for (ndx = 0; ndx < ndx_max; ndx++) {
					odp_schedule_order_lock(ndx);
					CU_ASSERT(bctx->sequence ==
						  qctx->lock_sequence[ndx]);
					qctx->lock_sequence[ndx] += num;
					odp_schedule_order_unlock(ndx);
				}
			}

			for (j = 0; j < num; j++)
				odp_event_free(events[j]);
		} else {
			ev  = odp_schedule(&from, ODP_SCHED_NO_WAIT);
			if (ev == ODP_EVENT_INVALID)
				continue;

			buf = odp_buffer_from_event(ev);
			num = 1;
			if (sync == ODP_SCHED_SYNC_ORDERED) {
				int ndx;
				int ndx_max;
				int rc;

				ndx_max = odp_queue_lock_count(from);
				CU_ASSERT_FATAL(ndx_max >= 0);

				qctx = odp_queue_context(from);
				bctx = odp_buffer_addr(buf);
				buf_cpy = odp_buffer_alloc(pool);
				CU_ASSERT_FATAL(buf_cpy != ODP_BUFFER_INVALID);
				bctx_cpy = odp_buffer_addr(buf_cpy);
				memcpy(bctx_cpy, bctx, sizeof(buf_contents));
				bctx_cpy->output_sequence = bctx_cpy->sequence;

				rc = odp_queue_enq(qctx->pq_handle,
						   odp_buffer_to_event
						   (buf_cpy));
				CU_ASSERT(rc == 0);

				for (ndx = 0; ndx < ndx_max; ndx++) {
					odp_schedule_order_lock(ndx);
					CU_ASSERT(bctx->sequence ==
						  qctx->lock_sequence[ndx]);
					qctx->lock_sequence[ndx] += num;
					odp_schedule_order_unlock(ndx);
				}
			}

			odp_buffer_free(buf);
		}

		if (args->enable_excl_atomic) {
			locked = odp_spinlock_trylock(&globals->atomic_lock);
			CU_ASSERT(locked != 0);
			CU_ASSERT(from != ODP_QUEUE_INVALID);
			if (locked) {
				int cnt;
				odp_time_t time = ODP_TIME_NULL;
				/* Do some work here to keep the thread busy */
				for (cnt = 0; cnt < 1000; cnt++)
					time = odp_time_sum(time,
							    odp_time_local());

				odp_spinlock_unlock(&globals->atomic_lock);
			}
		}

		if (sync == ODP_SCHED_SYNC_ATOMIC)
			odp_schedule_release_atomic();

		if (sync == ODP_SCHED_SYNC_ORDERED)
			odp_schedule_release_ordered();

		odp_ticketlock_lock(&globals->lock);

		globals->buf_count -= num;

		if (globals->buf_count < 0) {
			odp_ticketlock_unlock(&globals->lock);
			CU_FAIL_FATAL("Buffer counting failed");
		}

		odp_ticketlock_unlock(&globals->lock);
	}

	if (args->num_workers > 1)
		odp_barrier_wait(&globals->barrier);

	if (sync == ODP_SCHED_SYNC_ORDERED)
		locked = odp_ticketlock_trylock(&globals->lock);
	else
		locked = 0;

	if (locked && globals->buf_count_cpy > 0) {
		odp_event_t ev;
		odp_queue_t pq;
		uint64_t seq;
		uint64_t bcount = 0;
		int i, j;
		char name[32];
		uint64_t num_bufs = args->num_bufs;
		uint64_t buf_count = globals->buf_count_cpy;

		for (i = 0; i < args->num_prio; i++) {
			for (j = 0; j < args->num_queues; j++) {
				snprintf(name, sizeof(name),
					 "plain_%d_%d_o", i, j);
				pq = odp_queue_lookup(name);
				CU_ASSERT_FATAL(pq != ODP_QUEUE_INVALID);

				seq = 0;
				while (1) {
					ev = odp_queue_deq(pq);

					if (ev == ODP_EVENT_INVALID) {
						CU_ASSERT(seq == num_bufs);
						break;
					}

					bctx = odp_buffer_addr(
						odp_buffer_from_event(ev));

					CU_ASSERT(bctx->sequence == seq);
					seq++;
					bcount++;
					odp_event_free(ev);
				}
			}
		}
		CU_ASSERT(bcount == buf_count);
		globals->buf_count_cpy = 0;
	}

	if (locked)
		odp_ticketlock_unlock(&globals->lock);

	/* Clear scheduler atomic / ordered context between tests */
	num = exit_schedule_loop();

	CU_ASSERT(num == 0);

	if (num)
		printf("\nDROPPED %i events\n\n", num);

	return 0;
}
Esempio n. 20
0
void odp_hisi_timer_manage(void)
{
	union odp_hisi_timer_status status;
	struct odp_hisi_timer *tim, *next_tim;
	unsigned core_id = odp_core_id();
	struct odp_hisi_timer *prev[MAX_SKIPLIST_DEPTH + 1];
	uint64_t cur_time;
	int i, ret;

	assert(core_id < ODP_MAX_CORE);

	__TIMER_STAT_ADD(manage, 1);

	if (!priv_timer[core_id].pending_head.sl_next[0])
		return;

	cur_time = odp_get_tsc_cycles();

	odp_spinlock_lock(&priv_timer[core_id].list_lock);

	if ((!priv_timer[core_id].pending_head.sl_next[0]) ||
	    (priv_timer[core_id].pending_head.sl_next[0]->expire > cur_time))
		goto done;

	tim = priv_timer[core_id].pending_head.sl_next[0];

	timer_get_prev_entries(cur_time, core_id, prev);
	for (i = priv_timer[core_id].curr_skiplist_depth - 1; i >= 0; i--) {
		priv_timer[core_id].pending_head.sl_next[i] =
			prev[i]->sl_next[i];
		if (!prev[i]->sl_next[i])
			priv_timer[core_id].curr_skiplist_depth--;

		prev[i]->sl_next[i] = NULL;
	}

	for (; tim; tim = next_tim) {
		next_tim = tim->sl_next[0];

		ret = timer_set_running_state(tim);

		if (ret < 0)
			continue;

		odp_spinlock_unlock(&priv_timer[core_id].list_lock);

		priv_timer[core_id].updated = 0;

		tim->f(tim, tim->arg);

		odp_spinlock_lock(&priv_timer[core_id].list_lock);
		__TIMER_STAT_ADD(pending, -1);

		if (priv_timer[core_id].updated == 1)
			continue;

		if (tim->period == 0) {
			status.state = ODP_HISI_TIMER_STOP;
			status.owner = ODP_HISI_TIMER_NO_OWNER;
			odp_mb_full();
			tim->status.u32 = status.u32;
		} else {
			status.state = ODP_HISI_TIMER_PENDING;
			__TIMER_STAT_ADD(pending, 1);
			status.owner = (int16_t)core_id;
			odp_mb_full();
			tim->status.u32 = status.u32;
			__odp_hisi_timer_reset(tim, cur_time + tim->period,
					       tim->period, core_id, tim->f,
					       tim->arg, 1);
		}
	}

	priv_timer[core_id].pending_head.expire =
		(!priv_timer[core_id].pending_head.sl_next[0]) ? 0 :
		priv_timer[core_id].pending_head.sl_next[0]->expire;
done:

	odp_spinlock_unlock(&priv_timer[core_id].list_lock);
}
Esempio n. 21
0
void
ofp_sbunlock(struct sockbuf *sb)
{
	odp_spinlock_unlock(&sb->sb_sx);
}
Esempio n. 22
0
odp_timer_t ofp_timer_start(uint64_t tmo_us, ofp_timer_callback callback,
		       void *arg, int arglen)
{
	uint64_t tick;
	uint64_t period;
	uint64_t period_ns;
	struct ofp_timer_internal *bufdata;
	odp_buffer_t buf;
	odp_timer_set_t t;
	odp_timeout_t tmo;

	/* Init shm if not done yet. */
	if ((shm == NULL) && ofp_timer_lookup_shared_memory()) {
		OFP_ERR("ofp_timer_lookup_shared_memory failed");
		return ODP_TIMER_INVALID;
	}

	/* Alloc user buffer */
	buf = odp_buffer_alloc(shm->buf_pool);
	if (buf == ODP_BUFFER_INVALID) {
		OFP_ERR("odp_buffer_alloc failed");
		return ODP_TIMER_INVALID;
	}

	bufdata = (struct ofp_timer_internal *)odp_buffer_addr(buf);
	bufdata->callback = callback;
	bufdata->buf = buf;
	bufdata->t_ev = ODP_EVENT_INVALID;
	bufdata->next = NULL;
	bufdata->id = 0;
	if (arg && arglen)
		memcpy(bufdata->arg, arg, arglen);

	if (tmo_us >= OFP_TIMER_MAX_US) {
		/* Long 1 s resolution timeout */
		uint64_t sec = tmo_us/1000000UL;
		if (sec > TIMER_NUM_LONG_SLOTS) {
			OFP_ERR("Timeout too long = %"PRIu64"s", sec);
		}

		odp_spinlock_lock(&shm->lock);
		int ix = (shm->sec_counter + sec) & TIMER_LONG_MASK;
		bufdata->id = ((shm->id++)<<TIMER_LONG_SHIFT) | ix | 0x80000000;
		bufdata->next = shm->long_table[ix];
		shm->long_table[ix] = bufdata;
		odp_spinlock_unlock(&shm->lock);

		return (odp_timer_t) bufdata->id;
	} else {
		/* Short 10 ms resolution timeout */
		odp_timer_t timer;

		/* Alloc timout event */
		tmo = odp_timeout_alloc(shm->pool);
		if (tmo == ODP_TIMEOUT_INVALID) {
			odp_buffer_free(buf);
			OFP_ERR("odp_timeout_alloc failed");
			return ODP_TIMER_INVALID;
		}
		bufdata->t_ev = odp_timeout_to_event(tmo);

		period_ns = tmo_us*ODP_TIME_USEC_IN_NS;
		period    = odp_timer_ns_to_tick(shm->socket_timer_pool, period_ns);
		tick      = odp_timer_current_tick(shm->socket_timer_pool);
		tick     += period;

		timer = odp_timer_alloc(shm->socket_timer_pool,
					shm->queue, bufdata);
		if (timer == ODP_TIMER_INVALID) {
			odp_timeout_free(tmo);
			odp_buffer_free(buf);
			OFP_ERR("odp_timer_alloc failed");
			return ODP_TIMER_INVALID;
		}

		t = odp_timer_set_abs(timer, tick, &bufdata->t_ev);

		if (t != ODP_TIMER_SUCCESS) {
			odp_timeout_free(tmo);
			odp_buffer_free(buf);
			OFP_ERR("odp_timer_set_abs failed");
			return ODP_TIMER_INVALID;
		}

		return timer;
	}
	return ODP_TIMER_INVALID;
}
Esempio n. 23
0
void unlock_entry(pktio_entry_t *entry)
{
	odp_spinlock_unlock(&entry->s.lock);
}
Esempio n. 24
0
static void unlock_entry_classifier(pktio_entry_t *entry)
{
	odp_spinlock_unlock(&entry->s.cls.lock);
	odp_spinlock_unlock(&entry->s.lock);
}