int main(void) { UQueue *queue = queue_new(); if (!queue_push(queue, UINT_TO_POINTER(1))) abort(); if (!queue_push(queue, UINT_TO_POINTER(2))) abort(); if (queue_is_empty(queue)) abort(); if (POINTER_TO_UINT(queue_pop(queue)) != 1) abort(); if (POINTER_TO_UINT(queue_pop(queue)) != 2) abort(); if (!queue_is_empty(queue)) abort(); if (!queue_push(queue, UINT_TO_POINTER(3))) abort(); if (POINTER_TO_UINT(queue_pop(queue)) != 3) abort(); queue_free(queue); }
static inline void pkt_sent(struct net_context *context, int status, void *token, void *user_data) { if (!status) { printk("Sent %d bytes", POINTER_TO_UINT(token)); } }
static inline int stm32f10x_clock_control_off(struct device *dev, clock_control_subsys_t sub_system) { struct stm32f10x_rcc_data *data = dev->driver_data; volatile struct stm32f10x_rcc *rcc = (struct stm32f10x_rcc *)(data->base); u32_t subsys = POINTER_TO_UINT(sub_system); if (subsys > STM32F10X_CLOCK_APB2_BASE) { subsys &= ~(STM32F10X_CLOCK_APB2_BASE); rcc->apb2enr &= ~subsys; } else { rcc->apb1enr &= ~subsys; } return 0; }
sc_result sctpEventManager::_eventsCallback(const sc_event *event, sc_addr arg) { QMutexLocker locker(&sctpEventManager::msInstance->mEventsMutex); Q_ASSERT(event != 0); tScEventsMap::iterator it = sctpEventManager::msInstance->mEvents.find(POINTER_TO_UINT(sc_event_get_data(event))); if (it == sctpEventManager::msInstance->mEvents.end()) return SC_RESULT_ERROR_INVALID_STATE; sEventData *evt = it->second; Q_ASSERT(evt && evt->cmd); Q_ASSERT(event == evt->event); evt->cmd->processEventEmit(evt->id, sc_event_get_element(event), arg); return SC_RESULT_OK; }
static int stm32f10x_clock_control_get_subsys_rate(struct device *clock, clock_control_subsys_t sub_system, u32_t *rate) { ARG_UNUSED(clock); u32_t subsys = POINTER_TO_UINT(sub_system); u32_t prescaler = CONFIG_CLOCK_STM32F10X_CONN_LINE_APB1_PRESCALER; /* assumes SYSCLK is SYS_CLOCK_HW_CYCLES_PER_SEC */ u32_t ahb_clock = get_ahb_clock(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC); if (subsys > STM32F10X_CLOCK_APB2_BASE) { prescaler = CONFIG_CLOCK_STM32F10X_CONN_LINE_APB2_PRESCALER; } *rate = get_apb_clock(ahb_clock, prescaler); return 0; }
static int eth_tx(struct net_if *iface, struct net_pkt *pkt) { struct device *const dev = net_if_get_device(iface); const struct eth_sam_dev_cfg *const cfg = DEV_CFG(dev); struct eth_sam_dev_data *const dev_data = DEV_DATA(dev); Gmac *gmac = cfg->regs; struct gmac_queue *queue = &dev_data->queue_list[0]; struct gmac_desc_list *tx_desc_list = &queue->tx_desc_list; struct gmac_desc *tx_desc; struct net_buf *frag; u8_t *frag_data; u16_t frag_len; u32_t err_tx_flushed_count_at_entry = queue->err_tx_flushed_count; unsigned int key; __ASSERT(pkt, "buf pointer is NULL"); __ASSERT(pkt->frags, "Frame data missing"); SYS_LOG_DBG("ETH tx"); /* First fragment is special - it contains link layer (Ethernet * in our case) header. Modify the data pointer to account for more data * in the beginning of the buffer. */ net_buf_push(pkt->frags, net_pkt_ll_reserve(pkt)); frag = pkt->frags; while (frag) { frag_data = frag->data; frag_len = frag->len; /* Assure cache coherency before DMA read operation */ DCACHE_CLEAN(frag_data, frag_len); k_sem_take(&queue->tx_desc_sem, K_FOREVER); /* The following section becomes critical and requires IRQ lock * / unlock protection only due to the possibility of executing * tx_error_handler() function. */ key = irq_lock(); /* Check if tx_error_handler() function was executed */ if (queue->err_tx_flushed_count != err_tx_flushed_count_at_entry) { irq_unlock(key); return -EIO; } tx_desc = &tx_desc_list->buf[tx_desc_list->head]; /* Update buffer descriptor address word */ tx_desc->w0 = (u32_t)frag_data; /* Guarantee that address word is written before the status * word to avoid race condition. */ __DMB(); /* data memory barrier */ /* Update buffer descriptor status word (clear used bit) */ tx_desc->w1 = (frag_len & GMAC_TXW1_LEN) | (!frag->frags ? GMAC_TXW1_LASTBUFFER : 0) | (tx_desc_list->head == tx_desc_list->len - 1 ? GMAC_TXW1_WRAP : 0); /* Update descriptor position */ MODULO_INC(tx_desc_list->head, tx_desc_list->len); __ASSERT(tx_desc_list->head != tx_desc_list->tail, "tx_desc_list overflow"); irq_unlock(key); /* Continue with the rest of fragments (only data) */ frag = frag->frags; } key = irq_lock(); /* Check if tx_error_handler() function was executed */ if (queue->err_tx_flushed_count != err_tx_flushed_count_at_entry) { irq_unlock(key); return -EIO; } /* Ensure the descriptor following the last one is marked as used */ tx_desc = &tx_desc_list->buf[tx_desc_list->head]; tx_desc->w1 |= GMAC_TXW1_USED; /* Account for a sent frame */ ring_buf_put(&queue->tx_frames, POINTER_TO_UINT(pkt)); irq_unlock(key); /* Start transmission */ gmac->GMAC_NCR |= GMAC_NCR_TSTART; return 0; }
static int event_find_refnum_cb(void *l, void *r) { u_int32_t refnum = POINTER_TO_UINT(l); struct event_entry *event = (struct event_entry *) r; return (refnum - event->refnum); }
static uint32_t fd_hash(const void *p) { return POINTER_TO_UINT(p); }
static int fd_equal(const void *a, const void *b) { return POINTER_TO_UINT(a) == POINTER_TO_UINT(b); }