elf_strtab_head * elf_strtab_create() { elf_strtab_head *strtab = yasm_xmalloc(sizeof(elf_strtab_head)); elf_strtab_entry *entry = yasm_xmalloc(sizeof(elf_strtab_entry)); STAILQ_INIT(strtab); entry->index = 0; entry->str = yasm__xstrdup(""); STAILQ_INSERT_TAIL(strtab, entry, qlink); return strtab; }
struct instruction * seq_alloc() { struct instruction *new_instr; new_instr = (struct instruction *)malloc(sizeof(struct instruction)); if (new_instr == NULL) stop("Unable to malloc instruction object", EX_SOFTWARE); memset(new_instr, 0, sizeof(*new_instr)); STAILQ_INSERT_TAIL(&seq_program, new_instr, links); new_instr->srcline = yylineno; return new_instr; }
static void ida_data_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error) { struct ida_hardware_qcb *hwqcb; struct ida_softc *ida; struct ida_qcb *qcb; bus_dmasync_op_t op; int i; qcb = arg; ida = qcb->ida; if (!dumping) mtx_assert(&ida->lock, MA_OWNED); if (error) { qcb->error = error; ida_done(ida, qcb); return; } hwqcb = qcb->hwqcb; hwqcb->hdr.size = htole16((sizeof(struct ida_req) + sizeof(struct ida_sgb) * IDA_NSEG) >> 2); for (i = 0; i < nsegments; i++) { hwqcb->seg[i].addr = htole32(segs[i].ds_addr); hwqcb->seg[i].length = htole32(segs[i].ds_len); } hwqcb->req.sgcount = nsegments; if (qcb->flags & DMA_DATA_TRANSFER) { switch (qcb->flags & DMA_DATA_TRANSFER) { case DMA_DATA_TRANSFER: op = BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE; break; case DMA_DATA_IN: op = BUS_DMASYNC_PREREAD; break; default: KASSERT((qcb->flags & DMA_DATA_TRANSFER) == DMA_DATA_OUT, ("bad DMA data flags")); op = BUS_DMASYNC_PREWRITE; break; } bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op); } bus_dmamap_sync(ida->hwqcb_dmat, ida->hwqcb_dmamap, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe); ida_start(ida); ida->flags &= ~IDA_QFROZEN; }
/* Setup secondary channel */ int rte_vmbus_subchan_open(struct vmbus_channel *primary, struct vmbus_channel **new_chan) { struct vmbus_channel *chan; int err; err = vmbus_uio_get_subchan(primary, &chan); if (err) return err; STAILQ_INSERT_TAIL(&primary->subchannel_list, chan, next); *new_chan = chan; return 0; }
Elf_Data * elf_rawdata(Elf_Scn *s, Elf_Data *d) { Elf *e; uint32_t sh_type; int elf_class; uint64_t sh_align, sh_offset, sh_size; if (s == NULL || (e = s->s_elf) == NULL || e->e_kind != ELF_K_ELF || e->e_rawfile == NULL) { LIBELF_SET_ERROR(ARGUMENT, 0); return (NULL); } if (d == NULL && (d = STAILQ_FIRST(&s->s_rawdata)) != NULL) return (d); if (d != NULL) return (STAILQ_NEXT(d, d_next)); elf_class = e->e_class; assert(elf_class == ELFCLASS32 || elf_class == ELFCLASS64); if (elf_class == ELFCLASS32) { sh_type = s->s_shdr.s_shdr32.sh_type; sh_offset = (uint64_t) s->s_shdr.s_shdr32.sh_offset; sh_size = (uint64_t) s->s_shdr.s_shdr32.sh_size; sh_align = (uint64_t) s->s_shdr.s_shdr32.sh_addralign; } else { sh_type = s->s_shdr.s_shdr64.sh_type; sh_offset = s->s_shdr.s_shdr64.sh_offset; sh_size = s->s_shdr.s_shdr64.sh_size; sh_align = s->s_shdr.s_shdr64.sh_addralign; } if ((d = _libelf_allocate_data(s)) == NULL) return (NULL); d->d_buf = sh_type == SHT_NOBITS ? NULL : e->e_rawfile + sh_offset; d->d_off = 0; d->d_align = sh_align; d->d_size = sh_size; d->d_type = ELF_T_BYTE; d->d_version = e->e_version; STAILQ_INSERT_TAIL(&s->s_rawdata, d, d_next); return (d); }
/* * Initialize the MI portions of a struct pcpu. */ void pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { bzero(pcpu, size); KASSERT(cpuid >= 0 && cpuid < MAXCPU, ("pcpu_init: invalid cpuid %d", cpuid)); pcpu->pc_cpuid = cpuid; cpuid_to_pcpu[cpuid] = pcpu; STAILQ_INSERT_TAIL(&cpuhead, pcpu, pc_allcpu); cpu_pcpu_init(pcpu, cpuid, size); pcpu->pc_rm_queue.rmq_next = &pcpu->pc_rm_queue; pcpu->pc_rm_queue.rmq_prev = &pcpu->pc_rm_queue; }
Dwarf_Unsigned dwarf_add_frame_fde_b(Dwarf_P_Debug dbg, Dwarf_P_Fde fde, Dwarf_P_Die die, Dwarf_Unsigned cie, Dwarf_Addr virt_addr, Dwarf_Unsigned code_len, Dwarf_Unsigned symbol_index, Dwarf_Unsigned end_symbol_index, Dwarf_Addr offset_from_end_sym, Dwarf_Error *error) { Dwarf_P_Cie ciep; int i; /* * XXX SGI libdwarf need the DIE arg because later it will insert a * DW_AT_MIPS_fde attribute, which points to the offset the * correspoding FDE, into this DIE. Do we need this? */ (void) die; if (dbg == NULL || fde == NULL || fde->fde_dbg != dbg) { DWARF_SET_ERROR(dbg, error, DW_DLE_ARGUMENT); return (DW_DLV_NOCOUNT); } ciep = STAILQ_FIRST(&dbg->dbgp_cielist); for (i = 0; (Dwarf_Unsigned) i < cie; i++) { ciep = STAILQ_NEXT(ciep, cie_next); if (ciep == NULL) break; } if (ciep == NULL) { DWARF_SET_ERROR(dbg, error, DW_DLE_ARGUMENT); return (DW_DLV_NOCOUNT); } if (end_symbol_index > 0 && (dbg->dbgp_flags & DW_DLC_SYMBOLIC_RELOCATIONS) == 0) { DWARF_SET_ERROR(dbg, error, DW_DLE_ARGUMENT); return (DW_DLV_NOCOUNT); } fde->fde_cie = ciep; fde->fde_initloc = virt_addr; fde->fde_adrange = code_len; fde->fde_symndx = symbol_index; fde->fde_esymndx = end_symbol_index; fde->fde_eoff = offset_from_end_sym; STAILQ_INSERT_TAIL(&dbg->dbgp_fdelist, fde, fde_next); return (dbg->dbgp_fdelen++); }
/* * Add an entry to a pattern list */ static void add_pattern(struct pattern_list *list, const char *pattern) { struct pattern *entry; size_t len; debug("adding pattern '%s'\n", pattern); len = strlen(pattern); if ((entry = malloc(sizeof *entry + len + 1)) == NULL) { errno = ENOMEM; error("malloc()"); } memcpy(entry->pattern, pattern, len + 1); STAILQ_INSERT_TAIL(list, entry, link); }
void conn_put(struct conn *c) { log_debug(LOG_VVERB, "put conn %p c %d", c, c->sd); if (c->rsize > RSIZE_HIGHWAT) { conn_free(c); return; } pthread_mutex_lock(&free_connq_mutex); nfree_connq++; STAILQ_INSERT_TAIL(&free_connq, c, c_tqe); pthread_mutex_unlock(&free_connq_mutex); }
static inline entry_s* get_entry_s(int row, int col, double value) { int i; entry_s *entry; if (STAILQ_EMPTY(&free_entries_s)) { for (i = 19; i>=0; i--) { entry = (entry_s*)malloc(sizeof(entry_s)); if (!entry) return NULL; if (i) STAILQ_INSERT_TAIL(&free_entries_s, entry, hook); } } else { entry = STAILQ_FIRST(&free_entries_s); STAILQ_REMOVE_HEAD(&free_entries_s, hook); } return entry; }
void mpc_url_task_insert(mpc_url_t *mpc_url) { pthread_mutex_lock(&mutex_task); STAILQ_INSERT_TAIL(&mpc_url_task_queue, mpc_url, next); mpc_url_ntask++; /* mpc_log_debug(0, "insert task url(%d), total %d, host: \"%V\" uri: \"%V\"", mpc_url->url_id, mpc_url_ntask, &mpc_url->host, &mpc_url->uri); */ pthread_mutex_unlock(&mutex_task); }
void lazfs_workq_run(lazfs_workq_t *workq, lazfs_workq_job_t *job) { int err; assert(workq != NULL); assert(job != NULL); LOCK(workq->lock); STAILQ_INSERT_TAIL(&workq->jobs, job, link); err = pthread_cond_signal(&workq->cond); UNLOCK(workq->lock); assert(err == 0); }
void soaio_enqueue(struct task *task) { mtx_lock(&soaio_jobs_lock); MPASS(task->ta_pending == 0); task->ta_pending++; STAILQ_INSERT_TAIL(&soaio_jobs, task, ta_link); soaio_queued++; if (soaio_queued <= soaio_idle) wakeup_one(&soaio_idle); else if (soaio_num_procs < soaio_max_procs) taskqueue_enqueue(taskqueue_thread, &soaio_kproc_task); mtx_unlock(&soaio_jobs_lock); }
int register_notifier(notifier client) { struct notify_elem *newclient; if (!client) return -1; newclient = (struct notify_elem *)calloc(1, sizeof(struct notify_elem)); newclient->client = client; STAILQ_INSERT_TAIL(&clients, newclient, next); return 0; }
/** * comp_ac_add - Add a Mailbox to a Account */ int comp_ac_add(struct Account *a, struct Mailbox *m) { if (!a || !m) return -1; if (m->magic != MUTT_COMPRESSED) return -1; m->account = a; struct MailboxNode *np = mutt_mem_calloc(1, sizeof(*np)); np->mailbox = m; STAILQ_INSERT_TAIL(&a->mailboxes, np, entries); return 0; }
int main(int argc, char **argv) { int server_socket; struct sockaddr_storage their_addr; // connector's address information socklen_t sin_size; int new_fd; struct request *req; pthread_attr_t attr; pthread_t thread_id; int i; server_opts(argc, argv); server_socket = server_setup_socket(); if (server_socket < 0) { exit(-1); } /* Initialize vars */ STAILQ_INIT(&requestq); pthread_cond_init(&req_cond, NULL); /* Start the detached worker threads */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); for (i = 0; i < workers; i++) { pthread_create(&thread_id, &attr, server_worker, NULL); } /* Accept connections */ while (1) { sin_size = sizeof(their_addr); new_fd = accept(server_socket, (struct sockaddr *)&their_addr, &sin_size); if (new_fd == -1) { perror("accept"); continue; } req = calloc(1, sizeof(struct request)); if (!req) { perror("calloc"); } req->fd = new_fd; pthread_mutex_lock(&req_mutex); STAILQ_INSERT_TAIL(&requestq, req, link); pthread_mutex_unlock(&req_mutex); pthread_cond_signal(&req_cond); } return 0; }
static void ntb_complete_rxc(void *arg, int pending) { struct ntb_transport_qp *qp = arg; struct ntb_queue_entry *entry; struct mbuf *m; unsigned len; CTR0(KTR_NTB, "RX: rx_completion_task"); mtx_lock_spin(&qp->ntb_rx_q_lock); while (!STAILQ_EMPTY(&qp->rx_post_q)) { entry = STAILQ_FIRST(&qp->rx_post_q); if ((entry->flags & IF_NTB_DESC_DONE_FLAG) == 0) break; entry->x_hdr->flags = 0; iowrite32(entry->index, &qp->rx_info->entry); STAILQ_REMOVE_HEAD(&qp->rx_post_q, entry); len = entry->len; m = entry->buf; /* * Re-initialize queue_entry for reuse; rx_handler takes * ownership of the mbuf. */ entry->buf = NULL; entry->len = transport_mtu; entry->cb_data = qp->transport->ifp; STAILQ_INSERT_TAIL(&qp->rx_pend_q, entry, entry); mtx_unlock_spin(&qp->ntb_rx_q_lock); CTR2(KTR_NTB, "RX: completing entry %p, mbuf %p", entry, m); if (qp->rx_handler != NULL && qp->client_ready) qp->rx_handler(qp, qp->cb_data, m, len); else m_freem(m); mtx_lock_spin(&qp->ntb_rx_q_lock); } mtx_unlock_spin(&qp->ntb_rx_q_lock); }
/* Allocate a PCI device structure */ static struct pcifront_device * alloc_pdev(struct xenbus_device *xdev) { struct pcifront_device *pdev = NULL; int err, unit; err = sscanf(xdev->nodename, "device/pci/%d", &unit); if (err != 1) { if (err == 0) err = -EINVAL; xenbus_dev_fatal(pdev->xdev, err, "Error scanning pci device instance number"); goto out; } pdev = (struct pcifront_device *)malloc(sizeof(struct pcifront_device), M_DEVBUF, M_NOWAIT); if (pdev == NULL) { err = -ENOMEM; xenbus_dev_fatal(xdev, err, "Error allocating pcifront_device struct"); goto out; } pdev->unit = unit; pdev->xdev = xdev; pdev->ref_cnt = 1; pdev->sh_info = (struct xen_pci_sharedinfo *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT); if (pdev->sh_info == NULL) { free(pdev, M_DEVBUF); pdev = NULL; err = -ENOMEM; xenbus_dev_fatal(xdev, err, "Error allocating sh_info struct"); goto out; } pdev->sh_info->flags = 0; xdev->data = pdev; mtx_init(&pdev->sh_info_lock, "info_lock", "pci shared dev info lock", MTX_DEF); pdev->evtchn = INVALID_EVTCHN; pdev->gnt_ref = INVALID_GRANT_REF; STAILQ_INSERT_TAIL(&pdev_list, pdev, next); DPRINTF("Allocated pdev @ 0x%p (unit=%d)\n", pdev, unit); out: return pdev; }
int server_enqueue(struct connection *server, struct command *cmd) { if (server == NULL) { mbuf_range_clear(cmd->ctx, cmd->rep_buf); cmd_mark_fail(cmd, rep_server_err); return SERVER_NULL; } if (conn_register(server) == CORVUS_ERR) { return SERVER_REGISTER_ERROR; } server->info->last_active = time(NULL); mbuf_range_clear(cmd->ctx, cmd->rep_buf); cmd->server = server; STAILQ_INSERT_TAIL(&server->info->ready_queue, cmd, ready_next); return CORVUS_OK; }
/* * Common entry point for threads. This just calls the real start * routine, and then signals the thread's death, after having * removed the thread from the list. */ static void * thread_start(void *data) { struct threads *tds; struct thread *td; td = data; tds = td->threads; td->start(td->data); threads_lock(tds); LIST_REMOVE(td, runlist); STAILQ_INSERT_TAIL(&tds->threads_dead, td, deadlist); pthread_cond_signal(&tds->thread_exited); threads_unlock(tds); return (NULL); }
void *new_image(t_mlx_context *mlx_context, int width, int height) { t_image *ni; ni = malloc(sizeof(t_image)); bzero(ni, sizeof(t_image)); ni->size.width = width; ni->size.height = height; ni->data = malloc(width * height * sizeof(int)); bzero(ni->data, width * height * sizeof(int)); glGenTextures(1, &ni->ref); STAILQ_INSERT_TAIL(&mlx_context->i_head, ni, next); return (ni); }
int member_list_nodes(clv_clnode_head_t *cn_head) { int node_count, i; cman_handle_t cman_handle; cman_node_t *cman_nodes, local_node; if ((cman_handle = cman_init(0)) == 0) { return -1; } if ((node_count = cman_get_node_count(cman_handle)) < 0) { return -1; } cman_nodes = calloc((size_t) node_count, sizeof(cman_node_t)); cman_get_nodes(cman_handle, node_count, &node_count, cman_nodes); local_node.cn_name[0] = '\0'; /* init cn_name for cman_get_node */ if (cman_get_node(cman_handle, CMAN_NODEID_US, &local_node) != 0) { return -1; } for (i = 0; i < node_count; i++) { clv_clnode_t *n; n = malloc(sizeof(clv_clnode_t)); STAILQ_INSERT_TAIL(cn_head, n, next); n->id = (uint32_t) cman_nodes[i].cn_nodeid; n->host = strdup(cman_nodes[i].cn_name); n->status = 0; if (cman_nodes[i].cn_member != 0) { n->status |= CLUSTER_NODE_ONLINE; } if (cman_nodes[i].cn_nodeid == local_node.cn_nodeid) { n->status |= CLUSTER_NODE_LOCAL; } } free(cman_nodes); cman_finish(cman_handle); return 0; }
int ida_command(struct ida_softc *ida, int command, void *data, int datasize, int drive, u_int64_t pblkno, int flags) { struct ida_hardware_qcb *hwqcb; struct ida_qcb *qcb; bus_dmasync_op_t op; int error; crit_enter(); qcb = ida_get_qcb(ida); crit_exit(); if (qcb == NULL) { kprintf("ida_command: out of QCBs"); return (EAGAIN); } hwqcb = qcb->hwqcb; bzero(hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req)); bus_dmamap_load(ida->buffer_dmat, qcb->dmamap, data, datasize, ida_setup_dmamap, hwqcb, 0); op = qcb->flags & DMA_DATA_IN ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE; bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op); hwqcb->hdr.drive = drive; hwqcb->req.blkno = pblkno; hwqcb->req.bcount = howmany(datasize, DEV_BSIZE); hwqcb->req.command = command; KKASSERT(pblkno < 0x100000000ULL); qcb->flags = flags | IDA_COMMAND; crit_enter(); STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe); ida_start(ida); error = ida_wait(ida, qcb); crit_exit(); /* XXX should have status returned here? */ /* XXX have "status pointer" area in QCB? */ return (error); }
int mport_pkgmeta_get_assetlist(mportInstance *mport, mportPackageMeta *pkg, mportAssetList **alist_p) { mportAssetList *alist; sqlite3_stmt *stmt; int ret; mportAssetListEntry *e; if ((alist = mport_assetlist_new()) == NULL) RETURN_ERROR(MPORT_ERR_FATAL, "Out of memory."); *alist_p = alist; if (mport_db_prepare(mport->db, &stmt, "SELECT type, data FROM assets WHERE pkg=%Q", pkg->name) != MPORT_OK) RETURN_CURRENT_ERROR; while (1) { ret = sqlite3_step(stmt); if (ret == SQLITE_DONE) break; if (ret != SQLITE_ROW) { sqlite3_finalize(stmt); RETURN_ERROR(MPORT_ERR_FATAL, sqlite3_errmsg(mport->db)); } e = (mportAssetListEntry *)malloc(sizeof(mportAssetListEntry)); if (e == NULL) { sqlite3_finalize(stmt); RETURN_ERROR(MPORT_ERR_FATAL, "Out of memory."); } e->type = sqlite3_column_int(stmt, 0); e->data = strdup(sqlite3_column_text(stmt, 1)); if (e->data == NULL) { sqlite3_finalize(stmt); RETURN_ERROR(MPORT_ERR_FATAL, "Out of memory."); } STAILQ_INSERT_TAIL(alist, e, next); } sqlite3_finalize(stmt); return MPORT_OK; }
/* Add a fixup request to the queue. */ void fixups_put(struct fixups *f, struct coll *coll, const char *name) { struct fixup *fixup; int dosignal; dosignal = 0; fixup = fixup_new(coll, name); fixups_lock(f); assert(!f->closed); STAILQ_INSERT_TAIL(&f->fixupq, fixup, f_link); if (f->size++ == 0) dosignal = 1; fixups_unlock(f); if (dosignal) pthread_cond_signal(&f->cond); }
static int fwdev_allocbuf(struct firewire_comm *fc, struct fw_xferq *q, struct fw_bufspec *b) { int i; if (q->flag & (FWXFERQ_RUNNING | FWXFERQ_EXTBUF)) return(EBUSY); q->bulkxfer = (struct fw_bulkxfer *) malloc( sizeof(struct fw_bulkxfer) * b->nchunk, M_FW, M_WAITOK); if (q->bulkxfer == NULL) return(ENOMEM); b->psize = roundup2(b->psize, sizeof(uint32_t)); q->buf = fwdma_malloc_multiseg(fc, sizeof(uint32_t), b->psize, b->nchunk * b->npacket, BUS_DMA_WAITOK); if (q->buf == NULL) { free(q->bulkxfer, M_FW); q->bulkxfer = NULL; return(ENOMEM); } q->bnchunk = b->nchunk; q->bnpacket = b->npacket; q->psize = (b->psize + 3) & ~3; q->queued = 0; STAILQ_INIT(&q->stvalid); STAILQ_INIT(&q->stfree); STAILQ_INIT(&q->stdma); q->stproc = NULL; for(i = 0 ; i < q->bnchunk; i++){ q->bulkxfer[i].poffset = i * q->bnpacket; q->bulkxfer[i].mbuf = NULL; STAILQ_INSERT_TAIL(&q->stfree, &q->bulkxfer[i], link); } q->flag &= ~FWXFERQ_MODEMASK; q->flag |= FWXFERQ_STREAM; q->flag |= FWXFERQ_EXTBUF; return (0); }
static void ida_construct_qcb(struct ida_softc *ida) { struct ida_hardware_qcb *hwqcb; struct ida_qcb *qcb; bus_dmasync_op_t op; struct buf *bp; struct bio *bio; bio = bioq_first(&ida->bio_queue); if (bio == NULL) return; /* no more buffers */ qcb = ida_get_qcb(ida); if (qcb == NULL) return; /* out of resources */ bioq_remove(&ida->bio_queue, bio); qcb->bio = bio; qcb->flags = 0; hwqcb = qcb->hwqcb; bzero(hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req)); bp = bio->bio_buf; bus_dmamap_load(ida->buffer_dmat, qcb->dmamap, (void *)bp->b_data, bp->b_bcount, ida_setup_dmamap, hwqcb, 0); op = qcb->flags & DMA_DATA_IN ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE; bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op); { struct idad_softc *drv; drv = (struct idad_softc *)bio->bio_driver_info; hwqcb->hdr.drive = drv->drive; } hwqcb->req.blkno = bio->bio_offset >> DEV_BSHIFT; hwqcb->req.bcount = howmany(bp->b_bcount, DEV_BSIZE); hwqcb->req.command = (bp->b_cmd == BUF_CMD_READ) ? CMD_READ : CMD_WRITE; KKASSERT(bio->bio_offset < 0x100000000ULL * DEV_BSIZE); STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe); }
int rte_log_add_in_history(const char *buf, size_t size) { struct log_history *hist_buf = NULL; static const unsigned hist_buf_size = LOG_ELT_SIZE - sizeof(*hist_buf); void *obj; if (history_enabled == 0) return 0; rte_spinlock_lock(&log_list_lock); /* get a buffer for adding in history */ if (log_history_size > RTE_LOG_HISTORY) { hist_buf = STAILQ_FIRST(&log_history); STAILQ_REMOVE_HEAD(&log_history, next); } else { if (rte_mempool_mc_get(log_history_mp, &obj) < 0) obj = NULL; hist_buf = obj; } /* no buffer */ if (hist_buf == NULL) { rte_spinlock_unlock(&log_list_lock); return -ENOBUFS; } /* not enough room for msg, buffer go back in mempool */ if (size >= hist_buf_size) { rte_mempool_mp_put(log_history_mp, hist_buf); rte_spinlock_unlock(&log_list_lock); return -ENOBUFS; } /* add in history */ memcpy(hist_buf->buf, buf, size); hist_buf->buf[size] = hist_buf->buf[hist_buf_size-1] = '\0'; hist_buf->size = size; STAILQ_INSERT_TAIL(&log_history, hist_buf, next); log_history_size++; rte_spinlock_unlock(&log_list_lock); return 0; }
static int issue_call(struct context *ctx, void *arg) { struct conn *conn = arg; struct call *call; ASSERT(!issue_call_done(ctx, conn)); call = call_get(conn); if (call == NULL) { conn->ncall_create_failed++; ctx->stats.ncall_create_failed++; goto done; } call_make_req(ctx, call); /* * Enqueue call into sendq so that it can be sent later on * an out event */ STAILQ_INSERT_TAIL(&conn->call_sendq, call, call_tqe); conn->ncall_sendq++; conn->ncall_created++; ecb_signal(ctx, EVENT_CALL_ISSUE_START, call); done: if (issue_call_done(ctx, conn)) { log_debug(LOG_DEBUG, "issued %"PRIu32" %"PRIu32" of %"PRIu32" " "calls on c %"PRIu64"", conn->ncall_create_failed, conn->ncall_created, ctx->opt.num_calls, conn->id); if (conn->ncall_completed == conn->ncall_created) { ecb_signal(ctx, EVENT_CONN_DESTROYED, conn); } return -1; } log_debug(LOG_VERB, "issued %"PRIu32" %"PRIu32" of %"PRIu32" " "calls on c %"PRIu64"", conn->ncall_create_failed, conn->ncall_created, ctx->opt.num_calls, conn->id); return 0; }
/* * Encap packet & send */ static int lgue_encap(struct lgue_softc *sc, struct mbuf *m) { struct ifnet *ifp; struct lgue_queue_entry *entry; ifp = &sc->lgue_arpcom.ac_if; entry = kmalloc(sizeof(struct lgue_queue_entry), M_USBDEV , M_NOWAIT); if (entry == NULL) { if_printf(ifp, "no memory for internal queue entry\n"); return(ENOBUFS); } entry->entry_mbuf = m; /* Put packet into internal queue tail */ STAILQ_INSERT_TAIL(&sc->lgue_tx_queue, entry, entry_next); return(0); }