Example #1
0
static void timer_add(struct odp_hisi_timer *tim,
		      unsigned tim_core, int local_is_locked)
{
	unsigned core_id = odp_core_id();
	unsigned lvl;
	struct odp_hisi_timer *prev[MAX_SKIPLIST_DEPTH + 1];

	if ((tim_core != core_id) || !local_is_locked)
		odp_spinlock_lock(&priv_timer[tim_core].list_lock);

	timer_get_prev_entries(tim->expire, tim_core, prev);

	const unsigned tim_level = timer_get_skiplist_level(
		priv_timer[tim_core].curr_skiplist_depth);

	if (tim_level == priv_timer[tim_core].curr_skiplist_depth)
		priv_timer[tim_core].curr_skiplist_depth++;

	lvl = tim_level;
	while (lvl > 0) {
		tim->sl_next[lvl] = prev[lvl]->sl_next[lvl];
		prev[lvl]->sl_next[lvl] = tim;
		lvl--;
	}

	tim->sl_next[0] = prev[0]->sl_next[0];
	prev[0]->sl_next[0] = tim;

	priv_timer[tim_core].pending_head.expire =
		priv_timer[tim_core].pending_head.sl_next[0]->expire;

	if ((tim_core != core_id) || !local_is_locked)
		odp_spinlock_unlock(&priv_timer[tim_core].list_lock);
}
Example #2
0
int odp_timer_cancel_tmo(odp_timer_t timer, odp_timer_tmo_t tmo)
{
	int id;
	uint64_t tick_idx;
	timeout_t *cancel_tmo;
	tick_t *tick;

	/* get id */
	id = timer - 1;

	/* get tmo_buf to cancel */
	cancel_tmo = (timeout_t *)odp_buffer_addr(tmo);
	tick_idx = cancel_tmo->tick;
	tick = &odp_timer.timer[id].tick[tick_idx];

	odp_spinlock_lock(&tick->lock);
	/* search and delete tmo from tick list */
	if (find_and_del_tmo(&tick->list, tmo) != 0) {
		odp_spinlock_unlock(&tick->lock);
		ODP_DBG("Couldn't find the tmo (%d) in tick list\n", (int)tmo);
		return -1;
	}
	odp_spinlock_unlock(&tick->lock);

	return 0;
}
Example #3
0
static void timer_del(struct odp_hisi_timer	 *tim,
		      union odp_hisi_timer_status prev_status,
		      int			  local_is_locked)
{
	unsigned core_id = odp_core_id();
	unsigned prev_owner = prev_status.owner;
	int i;
	struct odp_hisi_timer *prev[MAX_SKIPLIST_DEPTH + 1];

	if ((prev_owner != core_id) || !local_is_locked)
		odp_spinlock_lock(&priv_timer[prev_owner].list_lock);

	if (tim == priv_timer[prev_owner].pending_head.sl_next[0])
		priv_timer[prev_owner].pending_head.expire =
			((!tim->sl_next[0]) ? 0 : tim->sl_next[0]->expire);

	timer_get_prev_entries_for_node(tim, prev_owner, prev);
	for (i = priv_timer[prev_owner].curr_skiplist_depth - 1; i >= 0; i--)
		if (prev[i]->sl_next[i] == tim)
			prev[i]->sl_next[i] = tim->sl_next[i];

	for (i = priv_timer[prev_owner].curr_skiplist_depth - 1; i >= 0; i--)
		if (!priv_timer[prev_owner].pending_head.sl_next[i])
			priv_timer[prev_owner].curr_skiplist_depth--;
		else
			break;

	if ((prev_owner != core_id) || !local_is_locked)
		odp_spinlock_unlock(&priv_timer[prev_owner].list_lock);
}
Example #4
0
int odp_thread_init_local(odp_thread_type_t type)
{
	int id;
	int cpu;

	odp_spinlock_lock(&thread_globals->lock);
	id = alloc_id(type);
	odp_spinlock_unlock(&thread_globals->lock);

	if (id < 0) {
		ODP_ERR("Too many threads\n");
		return -1;
	}

	cpu = sched_getcpu();

	if (cpu < 0) {
		ODP_ERR("getcpu failed\n");
		return -1;
	}

	thread_globals->thr[id].thr  = id;
	thread_globals->thr[id].cpu  = cpu;
	thread_globals->thr[id].type = type;

	this_thread = &thread_globals->thr[id];
	return 0;
}
Example #5
0
odp_pktio_t odp_pktio_lookup(const char *dev)
{
	odp_pktio_t id = ODP_PKTIO_INVALID;
	pktio_entry_t *entry;
	int i;

	odp_spinlock_lock(&pktio_tbl->lock);

	for (i = 1; i <= ODP_CONFIG_PKTIO_ENTRIES; ++i) {
		entry = get_pktio_entry(_odp_cast_scalar(odp_pktio_t, i));
		if (!entry || is_free(entry))
			continue;

		lock_entry(entry);

		if (!is_free(entry) &&
		    strncmp(entry->s.name, dev, IF_NAMESIZE) == 0)
			id = _odp_cast_scalar(odp_pktio_t, i);

		unlock_entry(entry);

		if (id != ODP_PKTIO_INVALID)
			break;
	}

	odp_spinlock_unlock(&pktio_tbl->lock);

	return id;
}
Example #6
0
int ofp_timer_cancel(odp_timer_t tim)
{
	odp_event_t timeout_event = ODP_EVENT_INVALID;
	odp_timeout_t tmo;
	uint32_t t = (uint32_t)tim;
	struct ofp_timer_internal *bufdata;
	struct ofp_timer_internal *prev = NULL;

	if (tim == ODP_TIMER_INVALID)
		return 0;

	if (t & 0x80000000) {
		/* long timeout */
		odp_spinlock_lock(&shm->lock);
		bufdata = shm->long_table[t & TIMER_LONG_MASK];

		while (bufdata) {
			struct ofp_timer_internal *next = bufdata->next;
			if (bufdata->id == t) {
				if (prev == NULL)
					shm->long_table[t & TIMER_LONG_MASK] = next;
				else
					prev->next = next;
				odp_buffer_free(bufdata->buf);
				odp_spinlock_unlock(&shm->lock);
				return 0;
			}
			prev = bufdata;
			bufdata = next;
		}
		odp_spinlock_unlock(&shm->lock);
		return -1;
	}
	else {
		if (odp_timer_cancel(tim, &timeout_event) < 0)
		{
			OFP_WARN("Timeout already expired or inactive");
			return 0;
		}

		if (timeout_event != ODP_EVENT_INVALID) {
			tmo = odp_timeout_from_event(timeout_event);
			bufdata = odp_timeout_user_ptr(tmo);
			odp_buffer_free(bufdata->buf);
			odp_timeout_free(tmo);
		} else {
			OFP_WARN("Lost timeout buffer at timer cancel");
			return -1;
		}

		if (odp_timer_free(tim) != ODP_EVENT_INVALID) {
			OFP_ERR("odp_timer_free failed");
			return -1;
		}
	}

	return 0;
}
Example #7
0
static void add_tmo(tick_t *tick, timeout_t *tmo)
{
	odp_spinlock_lock(&tick->lock);

	tmo->next  = tick->list;
	tick->list = tmo;

	odp_spinlock_unlock(&tick->lock);
}
Example #8
0
static odp_queue_t pri_set(int id, int prio)
{
	odp_spinlock_lock(&sched->mask_lock);
	sched->pri_mask[prio] |= 1 << id;
	sched->pri_count[prio][id]++;
	odp_spinlock_unlock(&sched->mask_lock);

	return sched->pri_queue[prio][id];
}
Example #9
0
static void pri_clr(int id, int prio)
{
	odp_spinlock_lock(&sched->mask_lock);

	/* Clear mask bit when last queue is removed*/
	sched->pri_count[prio][id]--;

	if (sched->pri_count[prio][id] == 0)
		sched->pri_mask[prio] &= (uint8_t)(~(1 << id));

	odp_spinlock_unlock(&sched->mask_lock);
}
Example #10
0
void odp_spinlock_recursive_lock(odp_spinlock_recursive_t *rlock)
{
	int thr = odp_thread_id();

	if (rlock->owner == thr) {
		rlock->cnt++;
		return;
	}

	odp_spinlock_lock(&rlock->lock);
	rlock->owner = thr;
	rlock->cnt   = 1;
}
Example #11
0
int odp_thread_term_local(void)
{
	int num;
	int id = this_thread->thr;

	odp_spinlock_lock(&thread_globals->lock);
	num = free_id(id);
	odp_spinlock_unlock(&thread_globals->lock);

	if (num < 0) {
		ODP_ERR("failed to free thread id %i", id);
		return -1;
	}

	return num; /* return a number of threads left */
}
Example #12
0
odp_shm_t odp_shm_lookup(const char *name)
{
	uint32_t i;
	odp_shm_t hdl;

	odp_spinlock_lock(&odp_shm_tbl->lock);
	if (find_block(name, &i) == 0) {
		odp_spinlock_unlock(&odp_shm_tbl->lock);
		return ODP_SHM_INVALID;
	}

	hdl = odp_shm_tbl->block[i].hdl;
	odp_spinlock_unlock(&odp_shm_tbl->lock);

	return hdl;
}
Example #13
0
static timeout_t *rem_tmo(tick_t *tick)
{
	timeout_t *tmo;

	odp_spinlock_lock(&tick->lock);

	tmo = tick->list;

	if (tmo)
		tick->list = tmo->next;

	odp_spinlock_unlock(&tick->lock);

	if (tmo)
		tmo->next = NULL;

	return tmo;
}
Example #14
0
odp_pktio_t odp_pktio_open(const char *dev, odp_pool_t pool)
{
	odp_pktio_t id;

	id = odp_pktio_lookup(dev);
	if (id != ODP_PKTIO_INVALID) {
		/* interface is already open */
		__odp_errno = EEXIST;
		PRINT("odp_pktio_lookup fail.\n");
		return ODP_PKTIO_INVALID;
	}

	odp_spinlock_lock(&pktio_tbl->lock);
	id = setup_pktio_entry(dev, pool);
	odp_spinlock_unlock(&pktio_tbl->lock);

	return id;
}
Example #15
0
static void one_sec(void *arg)
{
	struct ofp_timer_internal *bufdata;
	(void)arg;

	odp_spinlock_lock(&shm->lock);
	shm->sec_counter = (shm->sec_counter + 1) & TIMER_LONG_MASK;
	bufdata = shm->long_table[shm->sec_counter];
	shm->long_table[shm->sec_counter] = NULL;
	odp_spinlock_unlock(&shm->lock);

	while (bufdata) {
		struct ofp_timer_internal *next = bufdata->next;
		bufdata->callback(&bufdata->arg);
		odp_buffer_free(bufdata->buf);
		bufdata = next;
	}

	/* Start one second timeout */
	shm->timer_1s = ofp_timer_start(1000000UL, one_sec, NULL, 0);
}
Example #16
0
int
ofp_sblock(struct sockbuf *sb, int flags)
{
	KASSERT((flags & SBL_VALID) == flags,
	    ("ofp_sblock: flags invalid (0x%x)", flags));

	if (flags & SBL_WAIT) {
		if ((sb->sb_flags & SB_NOINTR) ||
		    (flags & SBL_NOINTR)) {
			odp_spinlock_lock(&sb->sb_sx);
			return (0);
		}
		//OFP_ERR("lock: dont know what to do");
		//odp_spinlock_lock(&sb->sb_sx);
		return 0;
		/* HJo: What is this?  (sx_xlock_sig(&sb->sb_sx));*/
	} else {
		if (!odp_spinlock_trylock(&sb->sb_sx))
			return (OFP_EWOULDBLOCK);
		return (0);
	}
}
Example #17
0
int odp_shm_free(odp_shm_t shm)
{
	uint32_t i;
	int ret;
	odp_shm_block_t *block;
	char name[ODP_SHM_NAME_LEN + 8];

	if (shm == ODP_SHM_INVALID) {
		ODP_DBG("odp_shm_free: Invalid handle\n");
		return -1;
	}

	i = from_handle(shm);

	if (i >= ODP_CONFIG_SHM_BLOCKS) {
		ODP_DBG("odp_shm_free: Bad handle\n");
		return -1;
	}

	odp_spinlock_lock(&odp_shm_tbl->lock);

	block = &odp_shm_tbl->block[i];

	if (block->addr == NULL) {
		ODP_DBG("odp_shm_free: Free block\n");
		odp_spinlock_unlock(&odp_shm_tbl->lock);
		return 0;
	}

	/* right now, for this tpye of memory, we do nothing as free */
	if (block->flags & ODP_SHM_MONOPOLIZE_CNTNUS_PHY) {
		int pid = getpid();

		snprintf(name, sizeof(name), "%s_%d", block->name, pid);
		odp_mm_district_unreserve(name);
		memset(block, 0, sizeof(odp_shm_block_t));
		odp_spinlock_unlock(&odp_shm_tbl->lock);
		return 0;
	}

	if (block->flags & ODP_SHM_SHARE_CNTNUS_PHY) {
		odp_mm_district_unreserve(name);
		memset(block, 0, sizeof(odp_shm_block_t));
		odp_spinlock_unlock(&odp_shm_tbl->lock);
		return 0;
	}

	ret = munmap(block->addr_orig, block->alloc_size);
	if (0 != ret) {
		ODP_DBG("odp_shm_free: munmap failed: %s, id %u, addr %p\n",
			strerror(errno), i, block->addr_orig);
		odp_spinlock_unlock(&odp_shm_tbl->lock);
		return -1;
	}

	if (block->flags & ODP_SHM_PROC) {
		ret = shm_unlink(block->name);
		if (0 != ret) {
			ODP_DBG("odp_shm_free: shm_unlink failed\n");
			odp_spinlock_unlock(&odp_shm_tbl->lock);
			return -1;
		}
	}

	memset(block, 0, sizeof(odp_shm_block_t));
	odp_spinlock_unlock(&odp_shm_tbl->lock);
	return 0;
}
Example #18
0
odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align,
			  uint32_t flags)
{
	uint32_t i;
	odp_shm_block_t *block;
	void *addr;
	int   fd = -1;
	int   map_flag = MAP_SHARED;

	/* If already exists: O_EXCL: error, O_TRUNC: truncate to zero */
	int oflag = O_RDWR | O_CREAT | O_TRUNC;
	uint64_t alloc_size;
	uint64_t page_sz;

#ifdef MAP_HUGETLB
	uint64_t huge_sz;
	int need_huge_page = 0;
	uint64_t alloc_hp_size;
#endif

	const struct odp_mm_district *zone = NULL;
	char memdistrict_name[ODP_SHM_NAME_LEN + 8];

	page_sz = odp_sys_page_size();
	alloc_size = size + align;

#ifdef MAP_HUGETLB
	huge_sz = odp_sys_huge_page_size();
	need_huge_page = (huge_sz && alloc_size > page_sz);

	/* munmap for huge pages requires sizes round up by page */
	alloc_hp_size = (size + align + (huge_sz - 1)) & (-huge_sz);
#endif

	if (flags & ODP_SHM_PROC) {
		/* Creates a file to /dev/shm */
		fd = shm_open(name, oflag,
			      S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
		if (fd == -1) {
			ODP_DBG("%s: shm_open failed.\n", name);
			return ODP_SHM_INVALID;
		}
	} else if (flags & ODP_SHM_MONOPOLIZE_CNTNUS_PHY) {
		int pid = getpid();

		snprintf(memdistrict_name, sizeof(memdistrict_name),
			 "%s_%d", name, pid);
		zone = odp_mm_district_reserve(memdistrict_name, name,
					       alloc_size, 0,
					       ODP_MEMZONE_2MB |
					       ODP_MEMZONE_SIZE_HINT_ONLY);
		if (zone == NULL) {
			ODP_DBG("odp_mm_district_reseve %s failed.\n", name);
			return ODP_SHM_INVALID;
		}
	} else if (flags & ODP_SHM_SHARE_CNTNUS_PHY) {
		zone = odp_mm_district_reserve(name, name,
					       alloc_size, 0,
					       ODP_MEMZONE_2MB |
					       ODP_MEMZONE_SIZE_HINT_ONLY);
		if (zone == NULL) {
			ODP_DBG("odp_mm_district_reseve %s failed.\n", name);
			return ODP_SHM_INVALID;
		}
	} else {
		map_flag |= MAP_ANONYMOUS;
	}

	odp_spinlock_lock(&odp_shm_tbl->lock);

	if (find_block(name, NULL)) {
		/* Found a block with the same name */
		odp_spinlock_unlock(&odp_shm_tbl->lock);
		ODP_DBG("name %s already used.\n", name);
		return ODP_SHM_INVALID;
	}

	for (i = 0; i < ODP_CONFIG_SHM_BLOCKS; i++)
		if (odp_shm_tbl->block[i].addr == NULL)
			/* Found free block */
			break;

	if (i > ODP_CONFIG_SHM_BLOCKS - 1) {
		/* Table full */
		odp_spinlock_unlock(&odp_shm_tbl->lock);
		ODP_DBG("%s: no more blocks.\n", name);
		return ODP_SHM_INVALID;
	}

	block = &odp_shm_tbl->block[i];

	block->hdl = to_handle(i);
	addr = MAP_FAILED;

#ifdef MAP_HUGETLB

	/* Try first huge pages */
	if (need_huge_page) {
		if ((flags & ODP_SHM_PROC) &&
		    (ftruncate(fd, alloc_hp_size) == -1)) {
			odp_spinlock_unlock(&odp_shm_tbl->lock);
			ODP_DBG("%s: ftruncate huge pages failed.\n", name);
			return ODP_SHM_INVALID;
		}

		addr = mmap(NULL, alloc_hp_size, PROT_READ | PROT_WRITE,
			    map_flag | MAP_HUGETLB, fd, 0);
		if (addr == MAP_FAILED) {
			ODP_DBG(" %s: No huge pages, fall back to normal pages,"
				"check: /proc/sys/vm/nr_hugepages.\n",
				name);
		} else {
			block->alloc_size = alloc_hp_size;
			block->huge = 1;
			block->page_sz = huge_sz;
		}
	}
#endif

	if (flags & ODP_SHM_MONOPOLIZE_CNTNUS_PHY ||
	    flags & ODP_SHM_SHARE_CNTNUS_PHY)
		addr = zone->addr;

	/* Use normal pages for small or failed huge page allocations */
	if (addr == MAP_FAILED) {
		if ((flags & ODP_SHM_PROC) &&
		    (ftruncate(fd, alloc_size) == -1)) {
			odp_spinlock_unlock(&odp_shm_tbl->lock);
			ODP_ERR("%s: ftruncate failed.\n", name);
			return ODP_SHM_INVALID;
		}

		addr = mmap(NULL, alloc_size, PROT_READ | PROT_WRITE,
			    map_flag, fd, 0);
		if (addr == MAP_FAILED) {
			odp_spinlock_unlock(&odp_shm_tbl->lock);
			ODP_DBG("%s mmap failed.\n", name);
			return ODP_SHM_INVALID;
		}

		block->alloc_size = alloc_size;
		block->huge = 0;
		block->page_sz = page_sz;
	}

	if (flags & ODP_SHM_MONOPOLIZE_CNTNUS_PHY ||
	    flags & ODP_SHM_SHARE_CNTNUS_PHY) {
		block->alloc_size = alloc_size;
		block->huge = 1;
		block->page_sz = ODP_MEMZONE_2MB;
		block->addr_orig = addr;

		/* move to correct alignment */
		addr = ODP_ALIGN_ROUNDUP_PTR(zone->addr, align);

		strncpy(block->name, name, ODP_SHM_NAME_LEN - 1);
		block->name[ODP_SHM_NAME_LEN - 1] = 0;
		block->size  = size;
		block->align = align;
		block->flags = flags;
		block->fd = -1;
		block->addr = addr;
	} else {
		block->addr_orig = addr;

		/* move to correct alignment */
		addr = ODP_ALIGN_ROUNDUP_PTR(addr, align);

		strncpy(block->name, name, ODP_SHM_NAME_LEN - 1);
		block->name[ODP_SHM_NAME_LEN - 1] = 0;
		block->size  = size;
		block->align = align;
		block->flags = flags;
		block->fd = fd;
		block->addr = addr;
	}

	odp_spinlock_unlock(&odp_shm_tbl->lock);

	return block->hdl;
}
Example #19
0
void odp_hisi_timer_manage(void)
{
	union odp_hisi_timer_status status;
	struct odp_hisi_timer *tim, *next_tim;
	unsigned core_id = odp_core_id();
	struct odp_hisi_timer *prev[MAX_SKIPLIST_DEPTH + 1];
	uint64_t cur_time;
	int i, ret;

	assert(core_id < ODP_MAX_CORE);

	__TIMER_STAT_ADD(manage, 1);

	if (!priv_timer[core_id].pending_head.sl_next[0])
		return;

	cur_time = odp_get_tsc_cycles();

	odp_spinlock_lock(&priv_timer[core_id].list_lock);

	if ((!priv_timer[core_id].pending_head.sl_next[0]) ||
	    (priv_timer[core_id].pending_head.sl_next[0]->expire > cur_time))
		goto done;

	tim = priv_timer[core_id].pending_head.sl_next[0];

	timer_get_prev_entries(cur_time, core_id, prev);
	for (i = priv_timer[core_id].curr_skiplist_depth - 1; i >= 0; i--) {
		priv_timer[core_id].pending_head.sl_next[i] =
			prev[i]->sl_next[i];
		if (!prev[i]->sl_next[i])
			priv_timer[core_id].curr_skiplist_depth--;

		prev[i]->sl_next[i] = NULL;
	}

	for (; tim; tim = next_tim) {
		next_tim = tim->sl_next[0];

		ret = timer_set_running_state(tim);

		if (ret < 0)
			continue;

		odp_spinlock_unlock(&priv_timer[core_id].list_lock);

		priv_timer[core_id].updated = 0;

		tim->f(tim, tim->arg);

		odp_spinlock_lock(&priv_timer[core_id].list_lock);
		__TIMER_STAT_ADD(pending, -1);

		if (priv_timer[core_id].updated == 1)
			continue;

		if (tim->period == 0) {
			status.state = ODP_HISI_TIMER_STOP;
			status.owner = ODP_HISI_TIMER_NO_OWNER;
			odp_mb_full();
			tim->status.u32 = status.u32;
		} else {
			status.state = ODP_HISI_TIMER_PENDING;
			__TIMER_STAT_ADD(pending, 1);
			status.owner = (int16_t)core_id;
			odp_mb_full();
			tim->status.u32 = status.u32;
			__odp_hisi_timer_reset(tim, cur_time + tim->period,
					       tim->period, core_id, tim->f,
					       tim->arg, 1);
		}
	}

	priv_timer[core_id].pending_head.expire =
		(!priv_timer[core_id].pending_head.sl_next[0]) ? 0 :
		priv_timer[core_id].pending_head.sl_next[0]->expire;
done:

	odp_spinlock_unlock(&priv_timer[core_id].list_lock);
}
Example #20
0
odp_timer_t ofp_timer_start(uint64_t tmo_us, ofp_timer_callback callback,
		       void *arg, int arglen)
{
	uint64_t tick;
	uint64_t period;
	uint64_t period_ns;
	struct ofp_timer_internal *bufdata;
	odp_buffer_t buf;
	odp_timer_set_t t;
	odp_timeout_t tmo;

	/* Init shm if not done yet. */
	if ((shm == NULL) && ofp_timer_lookup_shared_memory()) {
		OFP_ERR("ofp_timer_lookup_shared_memory failed");
		return ODP_TIMER_INVALID;
	}

	/* Alloc user buffer */
	buf = odp_buffer_alloc(shm->buf_pool);
	if (buf == ODP_BUFFER_INVALID) {
		OFP_ERR("odp_buffer_alloc failed");
		return ODP_TIMER_INVALID;
	}

	bufdata = (struct ofp_timer_internal *)odp_buffer_addr(buf);
	bufdata->callback = callback;
	bufdata->buf = buf;
	bufdata->t_ev = ODP_EVENT_INVALID;
	bufdata->next = NULL;
	bufdata->id = 0;
	if (arg && arglen)
		memcpy(bufdata->arg, arg, arglen);

	if (tmo_us >= OFP_TIMER_MAX_US) {
		/* Long 1 s resolution timeout */
		uint64_t sec = tmo_us/1000000UL;
		if (sec > TIMER_NUM_LONG_SLOTS) {
			OFP_ERR("Timeout too long = %"PRIu64"s", sec);
		}

		odp_spinlock_lock(&shm->lock);
		int ix = (shm->sec_counter + sec) & TIMER_LONG_MASK;
		bufdata->id = ((shm->id++)<<TIMER_LONG_SHIFT) | ix | 0x80000000;
		bufdata->next = shm->long_table[ix];
		shm->long_table[ix] = bufdata;
		odp_spinlock_unlock(&shm->lock);

		return (odp_timer_t) bufdata->id;
	} else {
		/* Short 10 ms resolution timeout */
		odp_timer_t timer;

		/* Alloc timout event */
		tmo = odp_timeout_alloc(shm->pool);
		if (tmo == ODP_TIMEOUT_INVALID) {
			odp_buffer_free(buf);
			OFP_ERR("odp_timeout_alloc failed");
			return ODP_TIMER_INVALID;
		}
		bufdata->t_ev = odp_timeout_to_event(tmo);

		period_ns = tmo_us*ODP_TIME_USEC_IN_NS;
		period    = odp_timer_ns_to_tick(shm->socket_timer_pool, period_ns);
		tick      = odp_timer_current_tick(shm->socket_timer_pool);
		tick     += period;

		timer = odp_timer_alloc(shm->socket_timer_pool,
					shm->queue, bufdata);
		if (timer == ODP_TIMER_INVALID) {
			odp_timeout_free(tmo);
			odp_buffer_free(buf);
			OFP_ERR("odp_timer_alloc failed");
			return ODP_TIMER_INVALID;
		}

		t = odp_timer_set_abs(timer, tick, &bufdata->t_ev);

		if (t != ODP_TIMER_SUCCESS) {
			odp_timeout_free(tmo);
			odp_buffer_free(buf);
			OFP_ERR("odp_timer_set_abs failed");
			return ODP_TIMER_INVALID;
		}

		return timer;
	}
	return ODP_TIMER_INVALID;
}
Example #21
0
void lock_entry(pktio_entry_t *entry)
{
	odp_spinlock_lock(&entry->s.lock);
}
Example #22
0
static void lock_entry_classifier(pktio_entry_t *entry)
{
	odp_spinlock_lock(&entry->s.lock);
	odp_spinlock_lock(&entry->s.cls.lock);
}