static void psock_insert_segment(FAR struct tcp_wrbuffer_s *wrb, FAR sq_queue_t *q) { sq_entry_t *entry = (sq_entry_t*)wrb; sq_entry_t *insert = NULL; sq_entry_t *itr; for (itr = sq_peek(q); itr; itr = sq_next(itr)) { FAR struct tcp_wrbuffer_s *wrb0 = (FAR struct tcp_wrbuffer_s*)itr; if (WRB_SEQNO(wrb0) < WRB_SEQNO(wrb)) { insert = itr; } else { break; } } if (insert) { sq_addafter(insert, entry, q); } else { sq_addfirst(entry, q); } }
perf_counter_t perf_alloc(enum perf_counter_type type, const char *name) { perf_counter_t ctr = NULL; switch (type) { case PC_COUNT: ctr = (perf_counter_t)calloc(sizeof(struct perf_ctr_count), 1); break; case PC_ELAPSED: ctr = (perf_counter_t)calloc(sizeof(struct perf_ctr_elapsed), 1); break; case PC_INTERVAL: ctr = (perf_counter_t)calloc(sizeof(struct perf_ctr_interval), 1); break; default: break; } if (ctr != NULL) { ctr->type = type; ctr->name = name; sq_addfirst(&ctr->link, &perf_counters); } return ctr; }
static void hrt_call_enter(struct hrt_call *entry) { struct hrt_call *call, *next; call = (struct hrt_call *)sq_peek(&callout_queue); if ((call == NULL) || (entry->deadline < call->deadline)) { sq_addfirst(&entry->link, &callout_queue); //lldbg("call enter at head, reschedule\n"); /* we changed the next deadline, reschedule the timer event */ hrt_call_reschedule(); } else { do { next = (struct hrt_call *)sq_next(&call->link); if ((next == NULL) || (entry->deadline < next->deadline)) { //lldbg("call enter after head\n"); sq_addafter(&call->link, &entry->link, &callout_queue); break; } } while ((call = next) != NULL); } //lldbg("scheduled\n"); }
static void send_insert_seqment(FAR struct uip_wrbuffer_s *segment, FAR sq_queue_t *q) { sq_entry_t *entry = (sq_entry_t*)segment; sq_entry_t *insert = NULL; sq_entry_t *itr; for (itr = sq_peek(q); itr; itr = sq_next(itr)) { FAR struct uip_wrbuffer_s *segment0 = (FAR struct uip_wrbuffer_s*)itr; if (segment0->wb_seqno < segment->wb_seqno) { insert = itr; } else { break; } } if (insert) { sq_addafter(insert, entry, q); } else { sq_addfirst(entry, q); } }
int lc823450_dmastart(DMA_HANDLE handle, dma_callback_t callback, void *arg) { struct lc823450_dmach_s *dmach = (DMA_HANDLE)handle; irqstate_t flags; DEBUGASSERT(dmach != NULL); /* select physical channel */ flags = spin_lock_irqsave(); sq_addfirst(&dmach->q_ent, &g_dma.phydmach[dmach->chn].req_q); dmach->callback = callback; dmach->arg = arg; /* Kick DMAC, if not active */ if (!g_dma.phydmach[dmach->chn].inprogress) { phydmastart(&g_dma.phydmach[dmach->chn]); } spin_unlock_irqrestore(flags); return OK; }
void uip_tcpreadaheadinit(void) { int i; sq_init(&g_freebuffers); for (i = 0; i < CONFIG_NET_NTCP_READAHEAD_BUFFERS; i++) { sq_addfirst(&g_buffers[i].rh_node, &g_freebuffers); } }
void uip_tcpwrbuffer_init(void) { int i; sq_init(&g_wrbuffer.freebuffers); for (i = 0; i < CONFIG_NET_NTCP_WRITE_BUFFERS; i++) { sq_addfirst(&g_wrbuffer.buffers[i].wb_node, &g_wrbuffer.freebuffers); } sem_init(&g_wrbuffer.sem, 0, CONFIG_NET_NTCP_WRITE_BUFFERS); }
void tcp_wrbuffer_initialize(void) { int i; sq_init(&g_wrbuffer.freebuffers); for (i = 0; i < CONFIG_NET_TCP_NWRBCHAINS; i++) { sq_addfirst(&g_wrbuffer.buffers[i].wb_node, &g_wrbuffer.freebuffers); } sem_init(&g_wrbuffer.sem, 0, CONFIG_NET_TCP_NWRBCHAINS); }
FAR struct igmp_group_s *igmp_grpalloc(FAR struct net_driver_s *dev, FAR const in_addr_t *addr) { FAR struct igmp_group_s *group; net_lock_t flags; nllvdbg("addr: %08x dev: %p\n", *addr, dev); if (up_interrupt_context()) { #if CONFIG_PREALLOC_IGMPGROUPS > 0 grplldbg("Use a pre-allocated group entry\n"); group = igmp_grpprealloc(); #else grplldbg("Cannot allocate from interrupt handler\n"); group = NULL; #endif } else { grplldbg("Allocate from the heap\n"); group = igmp_grpheapalloc(); } grplldbg("group: %p\n", group); /* Check if we successfully allocated a group structure */ if (group) { /* Initialize the non-zero elements of the group structure */ net_ipv4addr_copy(group->grpaddr, *addr); sem_init(&group->sem, 0, 0); /* Initialize the group timer (but don't start it yet) */ group->wdog = wd_create(); DEBUGASSERT(group->wdog); /* Interrupts must be disabled in order to modify the group list */ flags = net_lock(); /* Add the group structure to the list in the device structure */ sq_addfirst((FAR sq_entry_t *)group, &dev->grplist); net_unlock(flags); } return group; }
static inline void destroy_work_item(work_q_item_t *item) { sem_destroy(&item->wait_sem); /* Destroy the item lock */ /* Return the item to the free item queue for later reuse */ lock_queue(&g_free_q); sq_addfirst(&item->link, &(g_free_q.q)); /* Update the queue size and potentially the maximum queue size */ if (++g_free_q.size > g_free_q.max_size) g_free_q.max_size = g_free_q.size; unlock_queue(&g_free_q); }
void uip_grpinit(void) { FAR struct igmp_group_s *group; int i; grplldbg("Initializing\n"); #if CONFIG_PREALLOC_IGMPGROUPS > 0 for (i = 0; i < CONFIG_PREALLOC_IGMPGROUPS; i++) { group = &g_preallocgrps[i]; sq_addfirst((FAR sq_entry_t *)group, &g_freelist); } #endif }
static work_q_item_t * create_work_item(void) { work_q_item_t *item; /* Try to reuse item from free item queue */ lock_queue(&g_free_q); if ((item = (work_q_item_t *)sq_remfirst(&(g_free_q.q)))) { g_free_q.size--; } unlock_queue(&g_free_q); /* If we there weren't any free items then obtain memory for a new ones */ if (item == NULL) { item = (work_q_item_t *)malloc(k_work_item_allocation_chunk_size * sizeof(work_q_item_t)); if (item) { item->first = 1; lock_queue(&g_free_q); for (size_t i = 1; i < k_work_item_allocation_chunk_size; i++) { (item + i)->first = 0; sq_addfirst(&(item + i)->link, &(g_free_q.q)); } /* Update the queue size and potentially the maximum queue size */ g_free_q.size += k_work_item_allocation_chunk_size - 1; if (g_free_q.size > g_free_q.max_size) { g_free_q.max_size = g_free_q.size; } unlock_queue(&g_free_q); } } /* If we got one then lock the item*/ if (item) { px4_sem_init(&item->wait_sem, 1, 0); /* Caller will wait on this... initially locked */ } /* return the item pointer, or NULL if all failed */ return item; }
static inline void recvfrom_readahead(struct recvfrom_s *pstate) { FAR struct uip_conn *conn = (FAR struct uip_conn *)pstate->rf_sock->s_conn; FAR struct uip_readahead_s *readahead; size_t recvlen; /* Check there is any TCP data already buffered in a read-ahead * buffer. */ do { /* Get the read-ahead buffer at the head of the list (if any) */ readahead = (struct uip_readahead_s *)sq_remfirst(&conn->readahead); if (readahead) { /* We have a new buffer... transfer that buffered data into * the user buffer. * * First, get the length of the data to transfer. */ if (readahead->rh_nbytes > pstate->rf_buflen) { recvlen = pstate->rf_buflen; } else { recvlen = readahead->rh_nbytes; } if (recvlen > 0) { /* Copy the read-ahead data into the user buffer */ memcpy(pstate->rf_buffer, readahead->rh_buffer, recvlen); nllvdbg("Received %d bytes (of %d)\n", recvlen, readahead->rh_nbytes); /* Update the accumulated size of the data read */ pstate->rf_recvlen += recvlen; pstate->rf_buffer += recvlen; pstate->rf_buflen -= recvlen; } /* If the read-ahead buffer is empty, then release it. If not, then * we will have to move the data down and return the buffer to the * front of the list. */ if (recvlen < readahead->rh_nbytes) { readahead->rh_nbytes -= recvlen; memcpy(readahead->rh_buffer, &readahead->rh_buffer[recvlen], readahead->rh_nbytes); sq_addfirst(&readahead->rh_node, &conn->readahead); } else { uip_tcpreadaheadrelease(readahead); } } } while (readahead && pstate->rf_buflen > 0); }
int uip_backlogcreate(FAR struct uip_conn *conn, int nblg) { FAR struct uip_backlog_s *bls = NULL; FAR struct uip_blcontainer_s *blc; uip_lock_t flags; int size; int offset; int i; nllvdbg("conn=%p nblg=%d\n", conn, nblg); #ifdef CONFIG_DEBUG if (!conn) { return -EINVAL; } #endif /* Then allocate the backlog as requested */ if (nblg > 0) { /* Align the list of backlog structures to 32-bit boundaries. This * may be excessive on 24-16-bit address machines; and insufficient * on 64-bit address machines -- REVISIT */ offset = (sizeof(struct uip_backlog_s) + 3) & ~3; /* Then determine the full size of the allocation include the * uip_backlog_s, a pre-allocated array of struct uip_blcontainer_s * and alignement padding */ size = offset + nblg * sizeof(struct uip_blcontainer_s); /* Then allocate that much */ bls = (FAR struct uip_backlog_s *)kzalloc(size); if (!bls) { nlldbg("Failed to allocate backlog\n"); return -ENOMEM; } /* Then add all of the pre-allocated containers to the free list */ blc = (FAR struct uip_blcontainer_s*)(((FAR uint8_t*)bls) + offset); for (i = 0; i < nblg; i++) { sq_addfirst(&blc->bc_node, &bls->bl_free); } } /* Destroy any existing backlog (shouldn't be any) */ flags = uip_lock(); uip_backlogdestroy(conn); /* Now install the backlog tear-off in the connection. NOTE that bls may * actually be NULL if nblg is <= 0; In that case, we are disabling backlog * support. Since interrupts are disabled, destroying the old backlog and * replace it with the new is an atomic operation */ conn->backlog = bls; uip_unlock(flags); return OK; }
int mq_dosend(mqd_t mqdes, FAR struct mqueue_msg_s *mqmsg, FAR const char *msg, size_t msglen, int prio) { FAR struct tcb_s *btcb; FAR struct mqueue_inode_s *msgq; FAR struct mqueue_msg_s *next; FAR struct mqueue_msg_s *prev; irqstate_t saved_state; /* Get a pointer to the message queue */ sched_lock(); msgq = mqdes->msgq; /* Construct the message header info */ mqmsg->priority = prio; mqmsg->msglen = msglen; /* Copy the message data into the message */ memcpy((FAR void *)mqmsg->mail, (FAR const void *)msg, msglen); /* Insert the new message in the message queue */ saved_state = irqsave(); /* Search the message list to find the location to insert the new * message. Each is list is maintained in ascending priority order. */ for (prev = NULL, next = (FAR struct mqueue_msg_s *)msgq->msglist.head; next && prio <= next->priority; prev = next, next = next->next); /* Add the message at the right place */ if (prev) { sq_addafter((FAR sq_entry_t *)prev, (FAR sq_entry_t *)mqmsg, &msgq->msglist); } else { sq_addfirst((FAR sq_entry_t *)mqmsg, &msgq->msglist); } /* Increment the count of messages in the queue */ msgq->nmsgs++; irqrestore(saved_state); /* Check if we need to notify any tasks that are attached to the * message queue */ #ifndef CONFIG_DISABLE_SIGNALS if (msgq->ntmqdes) { struct sigevent event; pid_t pid; /* Remove the message notification data from the message queue. */ memcpy(&event, &msgq->ntevent, sizeof(struct sigevent)); pid = msgq->ntpid; /* Detach the notification */ memset(&msgq->ntevent, 0, sizeof(struct sigevent)); msgq->ntpid = INVALID_PROCESS_ID; msgq->ntmqdes = NULL; /* Notification the client via signal? */ if (event.sigev_notify == SIGEV_SIGNAL) { /* Yes... Queue the signal -- What if this returns an error? */ #ifdef CONFIG_CAN_PASS_STRUCTS DEBUGVERIFY(sig_mqnotempty(pid, event.sigev_signo, event.sigev_value)); #else DEBUGVERIFY(sig_mqnotempty(pid, event.sigev_signo, event.sigev_value.sival_ptr)); #endif } #ifdef CONFIG_SIG_EVTHREAD /* Notify the client via a function call */ else if (event.sigev_notify == SIGEV_THREAD) { DEBUGVERIFY(sig_notification(pid, &event)); } #endif } #endif /* Check if any tasks are waiting for the MQ not empty event. */ saved_state = irqsave(); if (msgq->nwaitnotempty > 0) { /* Find the highest priority task that is waiting for * this queue to be non-empty in g_waitingformqnotempty * list. sched_lock() should give us sufficent protection since * interrupts should never cause a change in this list */ for (btcb = (FAR struct tcb_s *)g_waitingformqnotempty.head; btcb && btcb->msgwaitq != msgq; btcb = btcb->flink); /* If one was found, unblock it */ ASSERT(btcb); btcb->msgwaitq = NULL; msgq->nwaitnotempty--; up_unblock_task(btcb); } irqrestore(saved_state); sched_unlock(); return OK; }
int wd_start(WDOG_ID wdog, int delay, wdentry_t wdentry, int argc, ...) { va_list ap; FAR wdog_t *curr; FAR wdog_t *prev; FAR wdog_t *next; int32_t now; irqstate_t saved_state; int i; /* Verify the wdog */ if (!wdog || argc > CONFIG_MAX_WDOGPARMS || delay < 0) { *get_errno_ptr() = EINVAL; return ERROR; } /* Check if the watchdog has been started. If so, stop it. * NOTE: There is a race condition here... the caller may receive * the watchdog between the time that wd_start is called and * the critical section is established. */ saved_state = irqsave(); if (wdog->active) { wd_cancel(wdog); } /* Save the data in the watchdog structure */ wdog->func = wdentry; /* Function to execute when delay expires */ up_getpicbase(&wdog->picbase); wdog->argc = argc; va_start(ap, argc); for (i = 0; i < argc; i++) { wdog->parm[i] = va_arg(ap, uint32_t); } #ifdef CONFIG_DEBUG for (; i < CONFIG_MAX_WDOGPARMS; i++) { wdog->parm[i] = 0; } #endif va_end(ap); /* Calculate delay+1, forcing the delay into a range that we can handle */ if (delay <= 0) { delay = 1; } else if (++delay <= 0) { delay--; } /* Do the easy case first -- when the watchdog timer queue is empty. */ if (g_wdactivelist.head == NULL) { sq_addlast((FAR sq_entry_t*)wdog,&g_wdactivelist); } /* There are other active watchdogs in the timer queue */ else { now = 0; prev = curr = (FAR wdog_t*)g_wdactivelist.head; /* Advance to positive time */ while ((now += curr->lag) < 0 && curr->next) { prev = curr; curr = curr->next; } /* Advance past shorter delays */ while (now <= delay && curr->next) { prev = curr; curr = curr->next; now += curr->lag; } /* Check if the new wdog must be inserted before the curr. */ if (delay < now) { /* The relative delay time is smaller or equal to the current delay * time, so decrement the current delay time by the new relative * delay time. */ delay -= (now - curr->lag); curr->lag -= delay; /* Insert the new watchdog in the list */ if (curr == (FAR wdog_t*)g_wdactivelist.head) { sq_addfirst((FAR sq_entry_t*)wdog, &g_wdactivelist); } else { sq_addafter((FAR sq_entry_t*)prev, (FAR sq_entry_t*)wdog, &g_wdactivelist); } } /* The new watchdog delay time is greater than the curr delay time, * so the new wdog must be inserted after the curr. This only occurs * if the wdog is to be added to the end of the list. */ else { delay -= now; if (!curr->next) { sq_addlast((FAR sq_entry_t*)wdog, &g_wdactivelist); } else { next = curr->next; next->lag -= delay; sq_addafter((FAR sq_entry_t*)curr, (FAR sq_entry_t*)wdog, &g_wdactivelist); } } } /* Put the lag into the watchdog structure and mark it as active. */ wdog->lag = delay; wdog->active = true; irqrestore(saved_state); return OK; }
void uip_tcpreadaheadrelease(struct uip_readahead_s *buf) { sq_addfirst(&buf->rh_node, &g_freebuffers); }