RJ_API int rj_m_conn_send(rj_net_m_conn_h handle, int conn_id, int net_ch_id, rj_ndp_pk_t *p_ndp, char *p_data)
{
    rj_net_multi_conn_t *p_m_conn = (rj_net_multi_conn_t *)(handle);
    assert (NULL != p_m_conn);
    assert ((NULL != p_ndp) && (NULL != p_data));

    if ((NULL != p_m_conn) && (NULL != p_ndp) && (NULL != p_data))
    {
        sys_mutex_lock(p_m_conn->p_sys_mutex);

        rj_net_conn_h net_conn = find_conn(p_m_conn->conn_list, conn_id);
        if (NULL != net_conn)
        {
            int ret = rj_conn_send(net_conn, net_ch_id, p_ndp, p_data);
            sys_mutex_unlock(p_m_conn->p_sys_mutex);
            return ret;
        }
        else
        {
            // 没有找到通道信息
            // 调用者通过收发数据接口, 得到通道错误信息
            // 调用者再调用 rj_m_conn_stop_conn 关闭通道
            // 到这里, 说明是调用者关闭了通道, 还来发送数据, 这是调用者使用错误
            assert(false);
        }

        sys_mutex_unlock(p_m_conn->p_sys_mutex);
    }

    assert(false);
    return RN_TCP_PARAM_ERR;
}
Exemplo n.º 2
0
static void notify_opened(qd_container_t *container, qd_connection_t *conn, void *context)
{
    const qd_node_type_t *nt;

    //
    // Note the locking structure in this function.  Generally this would be unsafe, but since
    // this particular list is only ever appended to and never has items inserted or deleted,
    // this usage is safe in this case.
    //
    sys_mutex_lock(container->lock);
    qdc_node_type_t *nt_item = DEQ_HEAD(container->node_type_list);
    sys_mutex_unlock(container->lock);

    while (nt_item) {
        nt = nt_item->ntype;
        if (qd_connection_inbound(conn)) {
            if (nt->inbound_conn_opened_handler)
                nt->inbound_conn_opened_handler(nt->type_context, conn, context);
        } else {
            if (nt->outbound_conn_opened_handler)
                nt->outbound_conn_opened_handler(nt->type_context, conn, context);
        }

        sys_mutex_lock(container->lock);
        nt_item = DEQ_NEXT(nt_item);
        sys_mutex_unlock(container->lock);
    }
}
Exemplo n.º 3
0
static char* test_two_reverse(void *context)
{
    while(fire_head());
    fire_mask = 0;

    nx_timer_schedule(timers[0], 4);
    nx_timer_schedule(timers[1], 2);

    sys_mutex_lock(lock);
    nx_timer_visit_LH(time++);
    nx_timer_visit_LH(time++);
    sys_mutex_unlock(lock);
    int count = fire_head();
    if (count < 1) return "First failed to fire";
    if (count > 1) return "Second fired prematurely";
    if (fire_mask != 2) return "Incorrect fire mask 2";

    sys_mutex_lock(lock);
    nx_timer_visit_LH(time++);
    nx_timer_visit_LH(time++);
    sys_mutex_unlock(lock);
    if (fire_head() < 1) return "Second failed to fire";
    if (fire_mask != 3)  return "Incorrect fire mask 3";

    return 0;
}
Exemplo n.º 4
0
static int writable_handler(qd_container_t *container, pn_connection_t *conn, qd_connection_t* qd_conn)
{
    const qd_node_type_t *nt;
    int                   event_count = 0;

    //
    // Note the locking structure in this function.  Generally this would be unsafe, but since
    // this particular list is only ever appended to and never has items inserted or deleted,
    // this usage is safe in this case.
    //
    sys_mutex_lock(container->lock);
    qdc_node_type_t *nt_item = DEQ_HEAD(container->node_type_list);
    sys_mutex_unlock(container->lock);

    while (nt_item) {
        nt = nt_item->ntype;
        if (nt->writable_handler)
            event_count += nt->writable_handler(nt->type_context, qd_conn, 0);

        sys_mutex_lock(container->lock);
        nt_item = DEQ_NEXT(nt_item);
        sys_mutex_unlock(container->lock);
    }

    return event_count;
}
Exemplo n.º 5
0
/**
 * Remove (the only) strong reference.
 *
 * If it were real strong/weak pointers, we should also call
 * destructor for the referenced object, but 
 */
void
pollmgr_refptr_unref(struct pollmgr_refptr *rp)
{
    sys_mutex_lock(&rp->lock);

    LWIP_ASSERT1(rp->strong == 1);
    --rp->strong;

    if (rp->strong > 0) {
        sys_mutex_unlock(&rp->lock);
    }
    else {
        size_t weak;

        /* void *ptr = rp->ptr; */
        rp->ptr = NULL;

        /* delete ptr; // see doc comment */

        weak = rp->weak;
        sys_mutex_unlock(&rp->lock);
        if (weak == 0) {
            pollmgr_refptr_delete(rp);
        }
    }
}
Exemplo n.º 6
0
static char* test_two_duplicate(void *context)
{
    while(fire_head());
    fire_mask = 0;

    nx_timer_schedule(timers[0], 2);
    nx_timer_schedule(timers[1], 2);

    sys_mutex_lock(lock);
    nx_timer_visit_LH(time++);
    nx_timer_visit_LH(time++);
    sys_mutex_unlock(lock);
    int count = fire_head();
    if (count != 2) return "Expected two firings";
    fire_head();
    if (fire_mask != 3) return "Incorrect fire mask 3";

    sys_mutex_lock(lock);
    nx_timer_visit_LH(time++);
    nx_timer_visit_LH(time++);
    sys_mutex_unlock(lock);
    if (fire_head() > 0) return "Spurious timer fires";

    return 0;
}
Exemplo n.º 7
0
/**
 * New Outgoing Link Handler
 */
static int router_outgoing_link_handler(void* context, dx_link_t *link)
{
    dx_router_t *router  = (dx_router_t*) context;
    pn_link_t   *pn_link = dx_link_pn(link);
    const char  *r_tgt   = pn_terminus_get_address(pn_link_remote_target(pn_link));

    sys_mutex_lock(router->lock);
    dx_router_link_t *rlink = new_dx_router_link_t();
    rlink->link = link;
    DEQ_INIT(rlink->out_fifo);
    dx_link_set_context(link, rlink);

    dx_field_iterator_t *iter = dx_field_iterator_string(r_tgt, ITER_VIEW_NO_HOST);
    int result = hash_insert(router->out_hash, iter, rlink);
    dx_field_iterator_free(iter);

    if (result == 0) {
        pn_terminus_copy(pn_link_source(pn_link), pn_link_remote_source(pn_link));
        pn_terminus_copy(pn_link_target(pn_link), pn_link_remote_target(pn_link));
        pn_link_open(pn_link);
        sys_mutex_unlock(router->lock);
        dx_log(module, LOG_TRACE, "Registered new local address: %s", r_tgt);
        return 0;
    }

    dx_log(module, LOG_TRACE, "Address '%s' not registered as it already exists", r_tgt);
    pn_link_close(pn_link);
    sys_mutex_unlock(router->lock);
    return 0;
}
Exemplo n.º 8
0
static char* test_single(void *context)
{
    while(fire_head());
    fire_mask = 0;

    nx_timer_schedule(timers[0], 2);
    if (fire_head() > 0) return "Premature firing 1";

    sys_mutex_lock(lock);
    nx_timer_visit_LH(time++);
    sys_mutex_unlock(lock);
    if (fire_head() > 0) return "Premature firing 2";

    sys_mutex_lock(lock);
    nx_timer_visit_LH(time++);
    sys_mutex_unlock(lock);
    if (fire_head() < 1) return "Failed to fire";

    sys_mutex_lock(lock);
    nx_timer_visit_LH(time++);
    nx_timer_visit_LH(time++);
    nx_timer_visit_LH(time++);
    sys_mutex_unlock(lock);
    if (fire_head() != 0) return "Spurious fires";

    if (fire_mask != 1)  return "Incorrect fire mask";
    if (timers[0]->state != TIMER_IDLE) return "Expected idle timer state";

    return 0;
}
RJ_API int rj_m_conn_recv(rj_net_m_conn_h handle, int *p_conn_id, rj_net_r_h *p_recv_data)
{
    rj_net_multi_conn_t *p_m_conn = (rj_net_multi_conn_t *)(handle);
    assert (NULL != p_m_conn);
    assert (NULL != p_conn_id);
    assert (NULL != p_recv_data);

    if ((NULL != p_m_conn) && (NULL != p_conn_id) && (NULL != p_recv_data))
    {
        sys_mutex_lock(p_m_conn->p_sys_mutex);

        //如果连接为0, 则直接退出
        if(rj_list_size(p_m_conn->conn_list) <= 0)
        {
            sys_mutex_unlock(p_m_conn->p_sys_mutex);
            return RN_TCP_OTHER;
        }

        //当前待接收的通道为空, 则取得一个连接
        if (rj_list_end(p_m_conn->conn_list) == p_m_conn->curr_conn)
        {
            assert (NULL != p_m_conn->conn_list);
            p_m_conn->curr_conn = rj_list_begin(p_m_conn->conn_list);
        }

        assert (rj_list_end(p_m_conn->conn_list) != p_m_conn->curr_conn);

        // 取到需要接收的通道后, 取这个通道的数据
        rj_net_conn_h net_conn = (rj_net_conn_h)rj_iter_data(p_m_conn->curr_conn);
        if (NULL != net_conn)
        {
            // 将接收通道, 移到下一个通道
            p_m_conn->curr_conn = rj_iter_add(p_m_conn->curr_conn);
            
            // 取得这个连接的id
            *p_conn_id = rj_conn_id(net_conn);

            // 接收这个通道的数据
            // 注意: 这个地方取得接收结果, 可能为通道已断开
            // 调用者后续需要调用 rj_m_conn_stop_conn 来关闭通道
            int ret = rj_conn_recv(net_conn, p_recv_data);
            sys_mutex_unlock(p_m_conn->p_sys_mutex);
            return ret;
        }
        else
        {
             // 内部逻辑错误
             assert(false);
        }

        sys_mutex_unlock(p_m_conn->p_sys_mutex);
    }

    assert(false);
    return RN_TCP_PARAM_ERR;
}
Exemplo n.º 10
0
/**
 * Try to get the pointer from implicitely weak reference we've got
 * from a channel.
 *
 * If we detect that the object is still strongly referenced, but no
 * longer registered with the poll manager we abort strengthening
 * conversion here b/c lwip thread callback is already scheduled to
 * destruct the object.
 */
struct pollmgr_handler *
pollmgr_refptr_get(struct pollmgr_refptr *rp)
{
    struct pollmgr_handler *handler;
    size_t weak;

    sys_mutex_lock(&rp->lock);

    LWIP_ASSERT1(rp->weak > 0);
    weak = --rp->weak;

    handler = rp->ptr;
    if (handler == NULL) {
        LWIP_ASSERT1(rp->strong == 0);
        sys_mutex_unlock(&rp->lock);
        if (weak == 0) {
            pollmgr_refptr_delete(rp);
        }
        return NULL;
    }

    LWIP_ASSERT1(rp->strong == 1);

    /*
     * Here we woild do:
     *
     *   ++rp->strong;
     *
     * and then, after channel handler is done, we would decrement it
     * back.
     *
     * Instead we check that the object is still registered with poll
     * manager. If it is, there's no race with lwip thread trying to
     * drop its strong reference, as lwip thread callback to destruct
     * the object is always scheduled by its poll manager callback.
     *
     * Conversly, if we detect that the object is no longer registered
     * with poll manager, we immediately abort. Since channel handler
     * can't do anything useful anyway and would have to return
     * immediately.
     *
     * Since channel handler would always find rp->strong as it had
     * left it, just elide extra strong reference creation to avoid
     * the whole back-and-forth.
     */

    if (handler->slot < 0) { /* no longer polling */
        sys_mutex_unlock(&rp->lock);
        return NULL;
    }

    sys_mutex_unlock(&rp->lock);
    return handler;
}
Exemplo n.º 11
0
static char* test_separated(void *context)
{
    int count;

    while(fire_head());
    fire_mask = 0;

    nx_timer_schedule(timers[0], 2);
    nx_timer_schedule(timers[1], 4);

    sys_mutex_lock(lock);
    nx_timer_visit_LH(time++);
    nx_timer_visit_LH(time++);
    sys_mutex_unlock(lock);
    count = fire_head();
    if (count < 1) return "First failed to fire";
    if (count > 1) return "Second fired prematurely";
    if (fire_mask != 1) return "Incorrect fire mask 1";

    nx_timer_schedule(timers[2], 2);
    nx_timer_schedule(timers[3], 4);

    sys_mutex_lock(lock);
    nx_timer_visit_LH(time++);
    nx_timer_visit_LH(time++);
    sys_mutex_unlock(lock);
    count = fire_head();
    fire_head();
    if (count < 1) return "Second failed to fire";
    if (count < 2) return "Third failed to fire";
    if (fire_mask != 7)  return "Incorrect fire mask 7";

    sys_mutex_lock(lock);
    nx_timer_visit_LH(time++);
    nx_timer_visit_LH(time++);
    sys_mutex_unlock(lock);
    count = fire_head();
    if (count < 1) return "Fourth failed to fire";
    if (fire_mask != 15) return "Incorrect fire mask 15";

    sys_mutex_lock(lock);
    nx_timer_visit_LH(time++);
    nx_timer_visit_LH(time++);
    nx_timer_visit_LH(time++);
    nx_timer_visit_LH(time++);
    nx_timer_visit_LH(time++);
    nx_timer_visit_LH(time++);
    sys_mutex_unlock(lock);
    count = fire_head();
    if (count > 0) return "Spurious fire";

    return 0;
}
RJ_API int rj_m_conn_push_ch(rj_net_m_conn_h handle, int conn_id, rn_tcp_h ws_tcp)
{
    rj_net_multi_conn_t *p_m_conn = (rj_net_multi_conn_t *)(handle);
    assert (NULL != p_m_conn);

    if (NULL != p_m_conn)
    {
        sys_mutex_lock(p_m_conn->p_sys_mutex);

        rj_net_conn_h net_conn = find_conn(p_m_conn->conn_list, conn_id);
        if (NULL != net_conn)
        {
            // 加入已有的连接

            int ret = rj_conn_push_ch(net_conn, ws_tcp);

            sys_mutex_unlock(p_m_conn->p_sys_mutex);
            return ret;
        }
        else
        {
            // 加入一个新的连接

            rj_net_conn_h new_conn = rj_conn_create(p_m_conn, p_m_conn->sub_send_buf_len, p_m_conn->send_buf_len, p_m_conn->sub_recv_buf_len, p_m_conn->recv_buf_len);
            assert (NULL != new_conn);

            //设置id
            rj_conn_set_id(new_conn, conn_id);

            //将通道插入新的连接中
            if(0 != rj_conn_push_ch(new_conn, ws_tcp))
            {
                // 插入tcp失败
                rj_conn_destroy(new_conn);
                new_conn = NULL;

                sys_mutex_unlock(p_m_conn->p_sys_mutex);
                return 1;
            }

            //插入到列表尾部
            rj_list_push_back(p_m_conn->conn_list, new_conn);

            sys_mutex_unlock(p_m_conn->p_sys_mutex);
            return (NULL != new_conn) ? 0 : 1;
        }

        sys_mutex_unlock(p_m_conn->p_sys_mutex);
    }

    return 1;
}
Exemplo n.º 13
0
/** \brief  Free TX buffers that are complete
 *
 *  \param[in] k64f_enet  Pointer to driver data structure
 */
static void k64f_tx_reclaim(struct k64f_enetdata *k64f_enet)
{
  uint8_t i;
  volatile enet_bd_struct_t * bdPtr = (enet_bd_struct_t *)k64f_enet->tx_desc_start_addr;

  /* Get exclusive access */
  sys_mutex_lock(&k64f_enet->TXLockMutex);

  // Traverse all descriptors, looking for the ones modified by the uDMA
  i = k64f_enet->tx_consume_index;
  while(i != k64f_enet->tx_produce_index && !(bdPtr[i].control & kEnetTxBdReady)) {
      if (k64f_enet->txb_aligned[i]) {
        free(k64f_enet->txb_aligned[i]);
        k64f_enet->txb_aligned[i] = NULL;
      } else if (k64f_enet->txb[i]) {
        pbuf_free(k64f_enet->txb[i]);
        k64f_enet->txb[i] = NULL;
      }
      osSemaphoreRelease(k64f_enet->xTXDCountSem.id);
      bdPtr[i].controlExtend2 &= ~TX_DESC_UPDATED_MASK;
      i = (i + 1) % ENET_TX_RING_LEN;
  }
  k64f_enet->tx_consume_index = i;

  /* Restore access */
  sys_mutex_unlock(&k64f_enet->TXLockMutex);
}
Exemplo n.º 14
0
uint64_t qdr_identifier(qdr_core_t* core)
{
    sys_mutex_lock(core->id_lock);
    uint64_t id = core->next_identifier++;
    sys_mutex_unlock(core->id_lock);
    return id;
}
Exemplo n.º 15
0
void qdr_action_enqueue(qdr_core_t *core, qdr_action_t *action)
{
    sys_mutex_lock(core->action_lock);
    DEQ_INSERT_TAIL(core->action_list, action);
    sys_cond_signal(core->action_cond);
    sys_mutex_unlock(core->action_lock);
}
Exemplo n.º 16
0
void report_mem_leak(void)
{
    unsigned short index;
    MEM_LEAK * leak_info;

    char *info;
	sys_mutex_lock(&mem_mutex);
	printf("ptr_start =%p\n",ptr_start);
	info = (char *)zalloc(name_length);
	if(info) {
        for(leak_info = ptr_start; leak_info != NULL; leak_info = leak_info->next)
        {
			printf("%p\n",leak_info);
            sprintf(info, "address : %p\n", leak_info->mem_info.address);
            printf("%s\n",info);
            sprintf(info, "size    : %d bytes\n", leak_info->mem_info.size);            
            printf("%s\n",info);
            snprintf(info,name_length,"file    : %s\n", leak_info->mem_info.file_name);
            printf("%s\n",info);
            sprintf(info, "line    : %d\n", leak_info->mem_info.line);
            printf("%s\n",info);
        }
		clear();
		free(info);
	}
	sys_mutex_unlock(&mem_mutex);
	sys_mutex_free(&mem_mutex);
	
}
Exemplo n.º 17
0
static void task1(void *pdata)
{
	int timeout;
	int i;
	INT8U err;
	char *p;

    while(1)
    {
        dprintf("task1,%s\r\n",pdata);
		//dprintf("sys_jiffies:%d\r\n",sys_jiffies());
		//dprintf("sys_now:%dms\r\n",sys_now());
		//p = OSQPend(mbox, 1, &err);
		timeout = sys_arch_mbox_fetch(&sys_mbox, (void **)&p, 0);
		sys_mutex_lock(&sys_mutex);
		if(timeout != -1)
			dprintf("task1 received mbox: %s\r\n",p);
		sys_mutex_unlock(&sys_mutex);
		//else
			//OSTaskResume(9);
#if 0
		if(err != OS_NO_ERR)
		{
			dprintf("task1 OSQPend err:%d\r\n",err);
			OSTaskResume(9);
		}
		else
			dprintf("task1 received mbox: %s\r\n",p);
#endif
    }
}
Exemplo n.º 18
0
int
sys_threadpool_add_task(sys_threadpool *pool,
	void(*task_proc)(void *),
	void *args,
	uint64_t *id)
{
	sys_task task;
	
	if (!pool)
		return SYS_INVALID_ARGS;

	if (pool->shutdown)
		return SYS_TP_SHUTTING_DOWN;

	sys_mutex_lock(pool->task_mutex);

	task.id = pool->next_task_id++;
	task.proc = task_proc;
	task.args = args;
	task.worker = NULL;

	rt_queue_push(&pool->task_queue, &task);

	sys_cond_var_signal(pool->task_notify);

	sys_mutex_unlock(pool->task_mutex);

	if (id)
		*id = task.id;

	return SYS_OK;
}
/* Free TX buffers that are complete */
STATIC void lpc_tx_reclaim_st(lpc_enetdata_t *lpc_enetif, u32_t cidx)
{
#if NO_SYS == 0
    /* Get exclusive access */
    sys_mutex_lock(&lpc_enetif->tx_lock_mutex);
#endif

    while (cidx != lpc_enetif->lpc_last_tx_idx) {
        if (lpc_enetif->txb[lpc_enetif->lpc_last_tx_idx] != NULL) {
            LWIP_DEBUGF(EMAC_DEBUG | LWIP_DBG_TRACE,
                        ("lpc_tx_reclaim_st: Freeing packet %p (index %d)\n",
                         lpc_enetif->txb[lpc_enetif->lpc_last_tx_idx],
                         lpc_enetif->lpc_last_tx_idx));
            pbuf_free(lpc_enetif->txb[lpc_enetif->lpc_last_tx_idx]);
            lpc_enetif->txb[lpc_enetif->lpc_last_tx_idx] = NULL;
        }

#if NO_SYS == 0
        xSemaphoreGive(lpc_enetif->xtx_count_sem);
#endif
        lpc_enetif->lpc_last_tx_idx++;
        if (lpc_enetif->lpc_last_tx_idx >= LPC_NUM_BUFF_TXDESCS) {
            lpc_enetif->lpc_last_tx_idx = 0;
        }
    }

#if NO_SYS == 0
    /* Restore access */
    sys_mutex_unlock(&lpc_enetif->tx_lock_mutex);
#endif
}
Exemplo n.º 20
0
static void push_event(action_t action, const char *type, void *object) {
    if (!event_lock) return;    /* Unit tests don't call qd_entity_cache_initialize */
    sys_mutex_lock(event_lock);
    entity_event_t *event = entity_event(action, type, object);
    DEQ_INSERT_TAIL(event_list, event);
    sys_mutex_unlock(event_lock);
}
RJ_API void rj_m_conn_stop_conn(rj_net_m_conn_h handle, int conn_id)
{
    rj_net_multi_conn_t *p_m_conn = (rj_net_multi_conn_t *)(handle);
    assert (NULL != p_m_conn);

    if (NULL != p_m_conn)
    {
        sys_mutex_lock(p_m_conn->p_sys_mutex);

        // 找到连接, 并关闭
        rj_net_conn_h net_conn = find_conn(p_m_conn->conn_list, conn_id);
        if (NULL != net_conn)
        {
            rj_list_remove(p_m_conn->conn_list, (void*)net_conn);
            
            rj_conn_stop(net_conn);
            rj_conn_destroy(net_conn);
        }

        // 简单处理: 直接将当前接收的连接置空
        // 下次接收数据的时候, 直接从连接列表头部开始
        p_m_conn->curr_conn = rj_list_end(p_m_conn->conn_list);

        sys_mutex_unlock(p_m_conn->p_sys_mutex);
    }
}
Exemplo n.º 22
0
Arquivo: alloc.c Projeto: ncdc/qpid
static void dx_alloc_init(dx_alloc_type_desc_t *desc)
{
    sys_mutex_lock(init_lock);

    desc->total_size = desc->type_size;
    if (desc->additional_size)
        desc->total_size += *desc->additional_size;

    //dx_log("ALLOC", LOG_TRACE, "Initialized Allocator - type=%s type-size=%d total-size=%d",
    //       desc->type_name, desc->type_size, desc->total_size);

    if (!desc->global_pool) {
        if (desc->config == 0)
            desc->config = desc->total_size > 256 ?
                &dx_alloc_default_config_big : &dx_alloc_default_config_small;

        assert (desc->config->local_free_list_max >= desc->config->transfer_batch_size);

        desc->global_pool = NEW(dx_alloc_pool_t);
        DEQ_INIT(desc->global_pool->free_list);
        desc->lock = sys_mutex();
        desc->stats = NEW(dx_alloc_stats_t);
        memset(desc->stats, 0, sizeof(dx_alloc_stats_t));
    }

    item_t *type_item = NEW(item_t);
    DEQ_ITEM_INIT(type_item);
    type_item->desc = desc;
    DEQ_INSERT_TAIL(type_list, type_item);

    sys_mutex_unlock(init_lock);
}
Exemplo n.º 23
0
qd_log_source_t *qd_log_source(const char *module)
{
    sys_mutex_lock(log_source_lock);
    qd_log_source_t* src = qd_log_source_lh(module);
    sys_mutex_unlock(log_source_lock);
    return src;
}
Exemplo n.º 24
0
void qd_message_free(qd_message_t *in_msg)
{
    if (!in_msg) return;
    uint32_t rc;
    qd_message_pvt_t     *msg     = (qd_message_pvt_t*) in_msg;

    qd_buffer_list_free_buffers(&msg->ma_to_override);
    qd_buffer_list_free_buffers(&msg->ma_trace);
    qd_buffer_list_free_buffers(&msg->ma_ingress);

    qd_message_content_t *content = msg->content;

    sys_mutex_lock(content->lock);
    rc = --content->ref_count;
    sys_mutex_unlock(content->lock);

    if (rc == 0) {
        if (content->parsed_message_annotations)
            qd_parse_free(content->parsed_message_annotations);

        qd_buffer_t *buf = DEQ_HEAD(content->buffers);
        while (buf) {
            DEQ_REMOVE_HEAD(content->buffers);
            qd_buffer_free(buf);
            buf = DEQ_HEAD(content->buffers);
        }

        sys_mutex_free(content->lock);
        free_qd_message_content_t(content);
    }

    free_qd_message_t((qd_message_t*) msg);
}
Exemplo n.º 25
0
/** \brief  Free TX buffers that are complete
 *
 *  \param[in] lpc_enetif  Pointer to driver data structure
 *  \param[in] cidx  EMAC current descriptor comsumer index
 */
static void lpc_tx_reclaim_st(struct lpc_enetdata *lpc_enetif, u32_t cidx)
{
#if NO_SYS == 0
	/* Get exclusive access */
	sys_mutex_lock(&lpc_enetif->TXLockMutex);
#endif

	while (cidx != lpc_enetif->lpc_last_tx_idx) {
		if (lpc_enetif->txb[lpc_enetif->lpc_last_tx_idx] != NULL) {
			LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
				("lpc_tx_reclaim_st: Freeing packet %p (index %d)\r\n",
				lpc_enetif->txb[lpc_enetif->lpc_last_tx_idx],
				lpc_enetif->lpc_last_tx_idx));
			pbuf_free(lpc_enetif->txb[lpc_enetif->lpc_last_tx_idx]);
		 	lpc_enetif->txb[lpc_enetif->lpc_last_tx_idx] = NULL;
		}

#if NO_SYS == 0
		osSemaphoreRelease(lpc_enetif->xTXDCountSem.id);
#endif
		lpc_enetif->lpc_last_tx_idx++;
		if (lpc_enetif->lpc_last_tx_idx >= LPC_NUM_BUFF_TXDESCS)
			lpc_enetif->lpc_last_tx_idx = 0;
	}

#if NO_SYS == 0
	/* Restore access */
	sys_mutex_unlock(&lpc_enetif->TXLockMutex);
#endif
}
Exemplo n.º 26
0
/**
 * Outgoing Link Writable Handler
 */
static int router_writable_link_handler(void* context, dx_link_t *link)
{
    dx_router_t      *router = (dx_router_t*) context;
    int               grant_delivery = 0;
    pn_delivery_t    *delivery;
    dx_router_link_t *rlink = (dx_router_link_t*) dx_link_get_context(link);
    pn_link_t        *pn_link = dx_link_pn(link);
    uint64_t          tag;

    sys_mutex_lock(router->lock);
    if (DEQ_SIZE(rlink->out_fifo) > 0) {
        grant_delivery = 1;
        tag = router->dtag++;
    }
    sys_mutex_unlock(router->lock);

    if (grant_delivery) {
        pn_delivery(pn_link, pn_dtag((char*) &tag, 8));
        delivery = pn_link_current(pn_link);
        if (delivery) {
            router_tx_handler(context, link, delivery);
            return 1;
        }
    }

    return 0;
}
Exemplo n.º 27
0
void qd_log_impl(qd_log_source_t *source, qd_log_level_t level, const char *file, int line, const char *fmt, ...)
{
    if (!qd_log_enabled(source, level)) return;

    qd_log_entry_t *entry = new_qd_log_entry_t();
    DEQ_ITEM_INIT(entry);
    entry->module = source->module;
    entry->level  = level;
    entry->file   = file ? strdup(file) : 0;
    entry->line   = line;
    time(&entry->time);
    va_list ap;
    va_start(ap, fmt);
    vsnprintf(entry->text, TEXT_MAX, fmt, ap);
    va_end(ap);

    write_log(source, entry);

    // Bounded buffer of log entries, keep most recent.
    sys_mutex_lock(log_lock);
    DEQ_INSERT_TAIL(entries, entry);
    if (DEQ_SIZE(entries) > LIST_MAX)
        qd_log_entry_free_lh(DEQ_HEAD(entries));
    sys_mutex_unlock(log_lock);
}
Exemplo n.º 28
0
static void
call_synced_function(struct threadsync_data *call_data, snmp_threadsync_called_fn fn)
{
  sys_mutex_lock(&call_data->threadsync_node->instance->sem_usage_mutex);
  call_data->threadsync_node->instance->sync_fn(fn, call_data);
  sys_sem_wait(&call_data->threadsync_node->instance->sem);
  sys_mutex_unlock(&call_data->threadsync_node->instance->sem_usage_mutex);
}
Exemplo n.º 29
0
void sys_unlock_tcpip_core(void)
{
    lwip_core_lock_count--;
    if (lwip_core_lock_count == 0) {
        lwip_core_lock_holder_thread = 0;
    }
    sys_mutex_unlock(&lock_tcpip_core);
}
Exemplo n.º 30
0
qd_log_source_t *qd_log_source_reset(const char *module)
{
    sys_mutex_lock(log_source_lock);
    qd_log_source_t* src = qd_log_source_lh(module);
    qd_log_source_defaults(src);
    sys_mutex_unlock(log_source_lock);
    return src;
}