static struct mbuf * _mbuf_get(void) { struct mbuf *mbuf; uint8_t *buf; if (!STAILQ_EMPTY(&free_mbufq)) { ASSERT(nfree_mbufq > 0); mbuf = STAILQ_FIRST(&free_mbufq); nfree_mbufq--; STAILQ_REMOVE_HEAD(&free_mbufq, next); ASSERT(mbuf->magic == MBUF_MAGIC); goto done; } buf = nc_alloc(mbuf_chunk_size); if (buf == NULL) { return NULL; } #if 1 //shenzheng 2015-3-23 common #ifdef NC_DEBUG_LOG ntotal_mbuf ++; #endif #endif //shenzheng 2015-3-23 common /* * mbuf header is at the tail end of the mbuf. This enables us to catch * buffer overrun early by asserting on the magic value during get or * put operations * * <------------- mbuf_chunk_size -------------> * +-------------------------------------------+ * | mbuf data | mbuf header | * | (mbuf_offset) | (struct mbuf) | * +-------------------------------------------+ * ^ ^ ^ ^^ * | | | || * \ | | |\ * mbuf->start \ | | mbuf->end (one byte past valid bound) * mbuf->pos \ * \ mbuf * mbuf->last (one byte past valid byte) * */ mbuf = (struct mbuf *)(buf + mbuf_offset); mbuf->magic = MBUF_MAGIC; done: STAILQ_NEXT(mbuf, next) = NULL; return mbuf; }
Elf_Scn * elf_newscn(Elf *e) { int ec; void *ehdr; Elf_Scn *scn; if (e == NULL || e->e_kind != ELF_K_ELF) { LIBELF_SET_ERROR(ARGUMENT, 0); return (NULL); } if ((ec = e->e_class) != ELFCLASS32 && ec != ELFCLASS64) { LIBELF_SET_ERROR(CLASS, 0); return (NULL); } if ((ehdr = _libelf_ehdr(e, ec, 0)) == NULL) return (NULL); /* * The application may be asking for a new section descriptor * on an ELF object opened with ELF_C_RDWR or ELF_C_READ. We * need to bring in the existing section information before * appending a new one to the list. * * Per the ELF(3) API, an application is allowed to open a * file using ELF_C_READ, mess with its internal structure and * use elf_update(...,ELF_C_NULL) to compute its new layout. */ if (e->e_cmd != ELF_C_WRITE && (e->e_flags & LIBELF_F_SHDRS_LOADED) == 0 && _libelf_load_section_headers(e, ehdr) == 0) return (NULL); if (STAILQ_EMPTY(&e->e_u.e_elf.e_scn)) { assert(e->e_u.e_elf.e_nscn == 0); if ((scn = _libelf_allocate_scn(e, (size_t) SHN_UNDEF)) == NULL) return (NULL); e->e_u.e_elf.e_nscn++; } assert(e->e_u.e_elf.e_nscn > 0); if ((scn = _libelf_allocate_scn(e, e->e_u.e_elf.e_nscn)) == NULL) return (NULL); e->e_u.e_elf.e_nscn++; (void) elf_flagscn(scn, ELF_C_SET, ELF_F_DIRTY); return (scn); }
/* * A device calls mbaqueue() when it wants to get on the adapter queue. * Called at splbio(). If the adapter is inactive, start it. */ void mbaqueue(struct mba_device *md) { struct mba_softc * const sc = md->md_mba; bool was_empty = STAILQ_EMPTY(&sc->sc_xfers); STAILQ_INSERT_TAIL(&sc->sc_xfers, md, md_link); if (was_empty) mbastart(sc); }
static void free_dellist(struct dl_head *dl) { struct deletion_list *dl_entry; while (!STAILQ_EMPTY(dl)) { dl_entry = STAILQ_FIRST(dl); STAILQ_REMOVE_HEAD(dl, next); free(dl_entry); } }
void flowadv_add_entry(struct flowadv_fcentry *fce) { lck_mtx_lock_spin(&fadv_lock); STAILQ_INSERT_HEAD(&fadv_list, fce, fce_link); VERIFY(!STAILQ_EMPTY(&fadv_list)); if (!fadv_active && fadv_thread != THREAD_NULL) wakeup_one((caddr_t)&fadv_list); lck_mtx_unlock(&fadv_lock); }
void obj_free(Obj_Entry *obj) { Objlist_Entry *elm; if (obj->tls_done) free_tls_offset(obj); while (obj->needed != NULL) { Needed_Entry *needed = obj->needed; obj->needed = needed->next; free(needed); } while (!STAILQ_EMPTY(&obj->names)) { Name_Entry *entry = STAILQ_FIRST(&obj->names); STAILQ_REMOVE_HEAD(&obj->names, link); free(entry); } while (!STAILQ_EMPTY(&obj->dldags)) { elm = STAILQ_FIRST(&obj->dldags); STAILQ_REMOVE_HEAD(&obj->dldags, link); free(elm); } while (!STAILQ_EMPTY(&obj->dagmembers)) { elm = STAILQ_FIRST(&obj->dagmembers); STAILQ_REMOVE_HEAD(&obj->dagmembers, link); free(elm); } if (obj->vertab) free(obj->vertab); if (obj->origin_path) free(obj->origin_path); if (obj->z_origin) free(obj->rpath); if (obj->priv) free(obj->priv); if (obj->path) free(obj->path); if (obj->phdr_alloc) free((void *)obj->phdr); free(obj); }
void server_eof(struct connection *server, const char *reason) { LOG(WARN, "server eof"); struct command *c; while (!STAILQ_EMPTY(&server->info->ready_queue)) { c = STAILQ_FIRST(&server->info->ready_queue); STAILQ_REMOVE_HEAD(&server->info->ready_queue, ready_next); STAILQ_NEXT(c, ready_next) = NULL; if (c->stale) { cmd_free(c); } else { cmd_mark_fail(c, reason); } } // remove unprocessed data struct mbuf *b = TAILQ_LAST(&server->info->data, mhdr); if (b != NULL && b->pos < b->last) { b->pos = b->last; } while (!STAILQ_EMPTY(&server->info->waiting_queue)) { c = STAILQ_FIRST(&server->info->waiting_queue); STAILQ_REMOVE_HEAD(&server->info->waiting_queue, waiting_next); STAILQ_NEXT(c, waiting_next) = NULL; mbuf_range_clear(server->ctx, c->rep_buf); if (c->stale) { cmd_free(c); } else { cmd_mark_fail(c, reason); } } event_deregister(&server->ctx->loop, server); // drop all unsent requests cmd_iov_free(&server->info->iov); conn_free(server); slot_create_job(SLOT_UPDATE); }
struct call * call_get(struct conn *conn) { struct call *call; uint32_t i; if (!STAILQ_EMPTY(&free_callq)) { ASSERT(nfree_callq > 0); call = STAILQ_FIRST(&free_callq); nfree_callq--; STAILQ_REMOVE_HEAD(&free_callq, call_tqe); } else { call = mcp_alloc(sizeof(*call)); if (call == NULL) { return NULL; } } STAILQ_NEXT(call, call_tqe) = NULL; call->id = ++id; call->conn = conn; /* keyname, expiry and keylen are initialized later */ call->req.send = 0; call->req.sent = 0; call->req.issue_start = 0.0; call->req.send_start = 0.0; call->req.send_stop = 0.0; for (i = 0; i < REQ_IOV_LEN; i++) { call->req.iov[i].iov_base = NULL; call->req.iov[i].iov_len = 0; } call->req.noreply = 0; call->req.sending = 0; call->rsp.recv_start = 0.0; call->rsp.rcvd = 0; call->rsp.rcurr = conn->buf; call->rsp.rsize = sizeof(conn->buf); call->rsp.pcurr = call->rsp.rcurr; call->rsp.start = NULL; call->rsp.end = NULL; call->rsp.type = 0; call->rsp.vlen = 0; call->rsp.parsed_line = 0; call->rsp.parsed_vlen = 0; log_debug(LOG_VVERB, "get call %p id %"PRIu64"", call, call->id); return call; }
struct mbuf *mbuf_queue_get(struct context *ctx, struct mhdr *q) { struct mbuf *buf = NULL; if (!STAILQ_EMPTY(q)) buf = STAILQ_LAST(q, mbuf, next); if (buf == NULL || mbuf_full(buf)) { buf = mbuf_get(ctx); STAILQ_INSERT_TAIL(q, buf, next); } return buf; }
static struct mbuf * _mbuf_get(void) { struct mbuf *mbuf; uint8_t *buf; //loga("_mbuf_get, nfree_mbufq = %d", nfree_mbufq); if (!STAILQ_EMPTY(&free_mbufq)) { ASSERT(nfree_mbufq > 0); mbuf = STAILQ_FIRST(&free_mbufq); nfree_mbufq--; STAILQ_REMOVE_HEAD(&free_mbufq, next); ASSERT(mbuf->magic == MBUF_MAGIC); goto done; } buf = dn_alloc(mbuf_chunk_size); if (buf == NULL) { return NULL; } mbuf_alloc_count++; /* * mbuf header is at the tail end of the mbuf. This enables us to catch * buffer overrun early by asserting on the magic value during get or * put operations * * <------------- mbuf_chunk_size -------------------------> * +-------------------------------------------------------+ * | mbuf data | mbuf header | * | (mbuf_offset) | (struct mbuf) | * +-------------------------------------------------------+ * ^ ^ ^ ^ ^^ * | | | | || * | | | | \ \mbuf->end_extra (one byte past valid bound) * \ | | \ \ * mbuf->start \ | mbuf->end mbuf * mbuf->pos | * \ * mbuf->last (one byte past valid byte) * */ mbuf = (struct mbuf *)(buf + mbuf_offset); mbuf->magic = MBUF_MAGIC; mbuf->chunk_size = mbuf_chunk_size; done: STAILQ_NEXT(mbuf, next) = NULL; return mbuf; }
static void sfb_fclists_clean(struct sfb *sp) { int i; /* Move all the flow control entries to the flowadv list */ for (i = 0; i < SFB_BINS; ++i) { struct sfb_fcl *fcl = SFB_FC_LIST(sp, i); if (!STAILQ_EMPTY(&fcl->fclist)) sfb_fclist_append(sp, fcl); } }
static char l2e_state(const struct l2t_entry *e) { switch (e->state) { case L2T_STATE_VALID: return 'V'; /* valid, fast-path entry */ case L2T_STATE_STALE: return 'S'; /* needs revalidation, but usable */ case L2T_STATE_SYNC_WRITE: return 'W'; case L2T_STATE_RESOLVING: return STAILQ_EMPTY(&e->wr_list) ? 'R' : 'A'; case L2T_STATE_SWITCHING: return 'X'; default: return 'U'; } }
struct conn * conn_get(struct context *ctx) { struct conn *conn; if (!STAILQ_EMPTY(&free_connq)) { ASSERT(nfree_connq > 0); conn = STAILQ_FIRST(&free_connq); nfree_connq--; STAILQ_REMOVE_HEAD(&free_connq, conn_tqe); } else { conn = mcp_alloc(sizeof(*conn)); if (conn == NULL) { return NULL; } } STAILQ_NEXT(conn, conn_tqe) = NULL; conn->id = ++id; conn->ctx = ctx; conn->ncall_sendq = 0; STAILQ_INIT(&conn->call_sendq); conn->ncall_recvq = 0; STAILQ_INIT(&conn->call_recvq); conn->watchdog = NULL; conn->connect_start = 0.0; conn->sd = -1; /* conn->call_gen is initialized later */ conn->ncall_created = 0; conn->ncall_create_failed = 0; conn->ncall_completed = 0; conn->err = 0; conn->recv_active = 0; conn->recv_ready = 0; conn->send_active = 0; conn->send_ready = 0; conn->connecting = 0; conn->connected = 0; conn->eof = 0; log_debug(LOG_VVERB, "get conn %p id %"PRIu64"", conn, conn->id); return conn; }
void mpc_url_deinit(void) { mpc_url_t *mpc_url; while (!STAILQ_EMPTY(&mpc_url_free_queue)) { mpc_url = STAILQ_FIRST(&mpc_url_free_queue); mpc_url_remove(&mpc_url_free_queue, mpc_url); mpc_url_free(mpc_url); mpc_url_nfree--; } while (!STAILQ_EMPTY(&mpc_url_task_queue)) { mpc_url = STAILQ_FIRST(&mpc_url_task_queue); mpc_url_remove(&mpc_url_task_queue, mpc_url); mpc_url_free(mpc_url); mpc_url_ntask--; } ASSERT(mpc_url_nfree == 0); // ASSERT(mpc_url_ntask == 0); }
void mbuf_deinit(void) { while (!STAILQ_EMPTY(&free_mbufq)) { struct mbuf *mbuf = STAILQ_FIRST(&free_mbufq); mbuf_remove(&free_mbufq, mbuf); mbuf_free(mbuf); nfree_mbufq--; #if 1 //shenzheng 2015-3-23 common #ifdef NC_DEBUG_LOG ntotal_mbuf--; #endif #endif //shenzheng 2015-3-23 common } #if 1 //shenzheng 2015-5-13 proxy administer while (!STAILQ_EMPTY(&free_mbufq_proxy_adm)) { struct mbuf *mbuf = STAILQ_FIRST(&free_mbufq_proxy_adm); mbuf_remove(&free_mbufq_proxy_adm, mbuf); mbuf_free(mbuf); nfree_mbufq_proxy_adm--; #ifdef NC_DEBUG_LOG ntotal_mbuf_proxy_adm--; #endif } #endif //shenzheng 2015-5-13 proxy administer ASSERT(nfree_mbufq == 0); #if 1 //shenzheng 2015-3-23 common #ifdef NC_DEBUG_LOG ASSERT(ntotal_mbuf == 0); #endif #endif //shenzheng 2015-3-23 common }
rstatus_t conn_close(struct conn *conn) { rstatus_t status; struct mbuf *mbuf, *nbuf; /* current and next mbuf */ if (conn->fd < 0) { conn_put(conn); return NC_OK; } if (!STAILQ_EMPTY(&conn->recv_queue)) { log_warn("close conn %d discard data in send_queue", conn->fd); for (mbuf = STAILQ_FIRST(&conn->recv_queue); mbuf != NULL; mbuf = nbuf) { nbuf = STAILQ_NEXT(mbuf, next); mbuf_remove(&conn->recv_queue, mbuf); mbuf_put(mbuf); } } if (!STAILQ_EMPTY(&conn->send_queue)) { log_warn("close conn %d discard data in send_queue", conn->fd); for (mbuf = STAILQ_FIRST(&conn->send_queue); mbuf != NULL; mbuf = nbuf) { nbuf = STAILQ_NEXT(mbuf, next); mbuf_remove(&conn->send_queue, mbuf); mbuf_put(mbuf); } } status = close(conn->fd); if (status < 0) { log_error("close c %d failed, ignored: %s", conn->fd, strerror(errno)); } conn->fd = -1; conn_put(conn); return NC_OK; }
static void deps_free(struct deps_head *dh) { struct deps_entry *e = NULL; while (!STAILQ_EMPTY(dh)) { e = STAILQ_FIRST(dh); STAILQ_REMOVE_HEAD(dh, next); free(e->name); free(e->version); free(e->origin); free(e); } }
/* * Post-split copy handler invoked when the request is a multi vector - * 'get' or 'gets' request and has already been split into two requests */ rstatus_t memcache_post_splitcopy(struct msg *r) { struct mbuf *mbuf; struct string crlf = string(CRLF); ASSERT(r->request); ASSERT(!STAILQ_EMPTY(&r->mhdr)); mbuf = STAILQ_LAST(&r->mhdr, mbuf, next); mbuf_copy(mbuf, crlf.data, crlf.len); return FC_OK; }
int t4_l2t_send_slow(struct adapter *sc, struct wrqe *wr, struct l2t_entry *e) { again: switch (e->state) { case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ if (resolve_entry(sc, e) != EWOULDBLOCK) goto again; /* entry updated, re-examine state */ /* Fall through */ case L2T_STATE_VALID: /* fast-path, send the packet on */ t4_wrq_tx(sc, wr); return (0); case L2T_STATE_RESOLVING: case L2T_STATE_SYNC_WRITE: mtx_lock(&e->lock); if (e->state != L2T_STATE_SYNC_WRITE && e->state != L2T_STATE_RESOLVING) { /* state changed by the time we got here */ mtx_unlock(&e->lock); goto again; } arpq_enqueue(e, wr); mtx_unlock(&e->lock); if (resolve_entry(sc, e) == EWOULDBLOCK) break; mtx_lock(&e->lock); if (e->state == L2T_STATE_VALID && !STAILQ_EMPTY(&e->wr_list)) send_pending(sc, e); if (e->state == L2T_STATE_FAILED) resolution_failed(e); mtx_unlock(&e->lock); break; case L2T_STATE_FAILED: resolution_failed_for_wr(wr); return (EHOSTUNREACH); } return (0); }
unsigned int mbuf_pool_compact(struct mbuf_pool *pool) { unsigned int count = pool->nfree_mbuf_blockq; while (!STAILQ_EMPTY(&pool->free_mbuf_blockq)) { struct mbuf_block *mbuf_block = STAILQ_FIRST(&pool->free_mbuf_blockq); mbuf_block_remove(&pool->free_mbuf_blockq, mbuf_block); mbuf_block_free(pool, mbuf_block); pool->nfree_mbuf_blockq--; } assert(pool->nfree_mbuf_blockq == 0); return count; }
/* * Free a content_types object. */ void _free_content_types(lxw_content_types *content_types) { lxw_tuple *default_type; lxw_tuple *override; if (!content_types) return; while (!STAILQ_EMPTY(content_types->default_types)) { default_type = STAILQ_FIRST(content_types->default_types); STAILQ_REMOVE_HEAD(content_types->default_types, list_pointers); free(default_type->key); free(default_type->value); free(default_type); } while (!STAILQ_EMPTY(content_types->overrides)) { override = STAILQ_FIRST(content_types->overrides); STAILQ_REMOVE_HEAD(content_types->overrides, list_pointers); free(override->key); free(override->value); free(override); }
int server_write(struct connection *server) { struct conn_info *info = server->info; if (!STAILQ_EMPTY(&info->ready_queue)) { server_make_iov(info); } if (info->iov.len <= 0) { cmd_iov_reset(&info->iov); return CORVUS_OK; } int status = conn_write(server, 0); if (status == CORVUS_ERR) { LOG(ERROR, "server_write: server %d fail to write iov", server->fd); return CORVUS_ERR; } if (status == CORVUS_AGAIN) return CORVUS_OK; ATOMIC_INC(info->send_bytes, status); if (info->iov.cursor >= info->iov.len) { cmd_iov_free(&info->iov); } if (!STAILQ_EMPTY(&info->ready_queue) || info->iov.cursor < info->iov.len) { if (conn_register(server) == CORVUS_ERR) { LOG(ERROR, "server_write: fail to reregister server %d", server->fd); return CORVUS_ERR; } } info->last_active = time(NULL); return CORVUS_OK; }
void ble_hci_sched_command_complete(void) { if (ble_hci_sched_cur_entry == NULL) { /* XXX: Increment stat. */ return; } ble_hci_sched_entry_free(ble_hci_sched_cur_entry); ble_hci_sched_cur_entry = NULL; if (!STAILQ_EMPTY(&ble_hci_sched_list)) { ble_hs_kick_hci(); } }
static inline entry_s* get_entry_s(int row, int col, double value) { int i; entry_s *entry; if (STAILQ_EMPTY(&free_entries_s)) { for (i = 19; i>=0; i--) { entry = (entry_s*)malloc(sizeof(entry_s)); if (!entry) return NULL; if (i) STAILQ_INSERT_TAIL(&free_entries_s, entry, hook); } } else { entry = STAILQ_FIRST(&free_entries_s); STAILQ_REMOVE_HEAD(&free_entries_s, hook); } return entry; }
static void ntb_complete_rxc(void *arg, int pending) { struct ntb_transport_qp *qp = arg; struct ntb_queue_entry *entry; struct mbuf *m; unsigned len; CTR0(KTR_NTB, "RX: rx_completion_task"); mtx_lock_spin(&qp->ntb_rx_q_lock); while (!STAILQ_EMPTY(&qp->rx_post_q)) { entry = STAILQ_FIRST(&qp->rx_post_q); if ((entry->flags & IF_NTB_DESC_DONE_FLAG) == 0) break; entry->x_hdr->flags = 0; iowrite32(entry->index, &qp->rx_info->entry); STAILQ_REMOVE_HEAD(&qp->rx_post_q, entry); len = entry->len; m = entry->buf; /* * Re-initialize queue_entry for reuse; rx_handler takes * ownership of the mbuf. */ entry->buf = NULL; entry->len = transport_mtu; entry->cb_data = qp->transport->ifp; STAILQ_INSERT_TAIL(&qp->rx_pend_q, entry, entry); mtx_unlock_spin(&qp->ntb_rx_q_lock); CTR2(KTR_NTB, "RX: completing entry %p, mbuf %p", entry, m); if (qp->rx_handler != NULL && qp->client_ready) qp->rx_handler(qp, qp->cb_data, m, len); else m_freem(m); mtx_lock_spin(&qp->ntb_rx_q_lock); } mtx_unlock_spin(&qp->ntb_rx_q_lock); }
void thread_free(struct thread *td) { struct task *task; task = td->td_task; STAILQ_REMOVE(&task->t_threads, td, struct thread, td_link); cpu_thread_free(td); pool_free(td); if (STAILQ_EMPTY(&task->t_threads)) task_free(task); }
struct conn_info *conn_info_create(struct context *ctx) { struct conn_info *info; if (!STAILQ_EMPTY(&ctx->free_conn_infoq)) { info = STAILQ_FIRST(&ctx->free_conn_infoq); STAILQ_REMOVE_HEAD(&ctx->free_conn_infoq, next); ctx->mstats.free_conn_info--; } else { info = malloc(sizeof(struct conn_info)); // init iov here memset(&info->iov, 0, sizeof(info->iov)); } conn_info_init(info); ctx->mstats.conn_info++; return info; }
DWORD SCALL LogFileFinalize(VOID) { // Write all pending log entries InterlockedExchange(&logStatus, LOG_STATUS_SHUTDOWN); QueueWrite(NULL); InterlockedExchange(&logStatus, LOG_STATUS_INACTIVE); ASSERT(STAILQ_EMPTY(&logQueue)); CloseHandle(logHandle); logHandle = INVALID_HANDLE_VALUE; DeleteCriticalSection(&logLock); return ERROR_SUCCESS; }
struct svf_entry * nicvf_bsvf_pop(void) { struct svf_entry *entry; assert(!STAILQ_EMPTY(&head)); entry = STAILQ_FIRST(&head); assert(entry != NULL); assert(entry->vf != NULL); STAILQ_REMOVE_HEAD(&head, next); return entry; }
static jresult_t start_next_transfer(td243fc_rev2_softc_t *sc, jint_t ep_n) { td243fc_rev2_ep_t *ep = &sc->ep[ep_n]; KASSERT(ep_n > 1, ("No queue for ep0\n")); if (STAILQ_EMPTY(&ep->pipe->req_queue)) { DBG_X(DSLAVE_DCD, ("DCD: start_next_trasfer: queue is empty\n")); return 0; } /* Start the next request on queue */ return dcd_send_io_request((void *)sc, ep->pipe, STAILQ_FIRST(&ep->pipe->req_queue)); }