Esempio n. 1
0
static int
check_patch(patch_t **start_patch, unsigned int start_instr,
	    unsigned int *skip_addr, int *func_vals)
{
	patch_t *cur_patch;

	cur_patch = *start_patch;

	while (cur_patch != NULL && start_instr == cur_patch->begin) {
		if (func_vals[cur_patch->patch_func] == 0) {
			int skip;

			/* Start rejecting code */
			*skip_addr = start_instr + cur_patch->skip_instr;
			for (skip = cur_patch->skip_patch;
			     skip > 0 && cur_patch != NULL;
			     skip--)
				cur_patch = STAILQ_NEXT(cur_patch, links);
		} else {
			/* Accepted this patch.  Advance to the next
			 * one and wait for our intruction pointer to
			 * hit this point.
			 */
			cur_patch = STAILQ_NEXT(cur_patch, links);
		}
	}

	*start_patch = cur_patch;
	if (start_instr < *skip_addr)
		/* Still skipping */
		return (0);

	return (1);
}
Esempio n. 2
0
void ESP8266WiFiClass::_scanDone(void* result, int status)
{
    if (status != OK)
    {
        ESP8266WiFiClass::_scanCount = 0;
        ESP8266WiFiClass::_scanResult = 0;
    }
    else
    {
      
        int i = 0;
        bss_info_head_t* head = reinterpret_cast<bss_info_head_t*>(result);

        for (bss_info* it = STAILQ_FIRST(head); it; it = STAILQ_NEXT(it, next), ++i);
        ESP8266WiFiClass::_scanCount = i;
        if (i == 0)
        {
            ESP8266WiFiClass::_scanResult = 0;
        }
        else
        {
            bss_info* copied_info = new bss_info[i];
            i = 0;
            for (bss_info* it = STAILQ_FIRST(head); it; it = STAILQ_NEXT(it, next), ++i)
            {
                memcpy(copied_info + i, it, sizeof(bss_info));
            }

            ESP8266WiFiClass::_scanResult = copied_info;
        }

    }
    esp_schedule();   
}
Esempio n. 3
0
static void
fwip_stop(struct fwip_softc *fwip)
{
	struct firewire_comm *fc;
	struct fw_xferq *xferq;
	struct ifnet *ifp = fwip->fw_softc.fwip_ifp;
	struct fw_xfer *xfer, *next;
	int i;

	fc = fwip->fd.fc;

	if (fwip->dma_ch >= 0) {
		xferq = fc->ir[fwip->dma_ch];

		if (xferq->flag & FWXFERQ_RUNNING)
			fc->irx_disable(fc, fwip->dma_ch);
		xferq->flag &= 
			~(FWXFERQ_MODEMASK | FWXFERQ_OPEN | FWXFERQ_STREAM |
			FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_CHTAGMASK);
		xferq->hand =  NULL;

		for (i = 0; i < xferq->bnchunk; i ++)
			m_freem(xferq->bulkxfer[i].mbuf);
		free(xferq->bulkxfer, M_FWIP);

		fw_bindremove(fc, &fwip->fwb);
		for (xfer = STAILQ_FIRST(&fwip->fwb.xferlist); xfer != NULL;
					xfer = next) {
			next = STAILQ_NEXT(xfer, link);
			fw_xfer_free(xfer);
		}

		for (xfer = STAILQ_FIRST(&fwip->xferlist); xfer != NULL;
					xfer = next) {
			next = STAILQ_NEXT(xfer, link);
			fw_xfer_free(xfer);
		}
		STAILQ_INIT(&fwip->xferlist);

		xferq->bulkxfer =  NULL;
		fwip->dma_ch = -1;
	}

#if defined(__FreeBSD__)
	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
#else
	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
#endif
}
Esempio n. 4
0
mpc_url_t *
mpc_url_task_get(void)
{
    mpc_url_t  *mpc_url;

    pthread_mutex_lock(&mutex_task);

    if (!STAILQ_EMPTY(&mpc_url_task_queue)) {
        mpc_url = STAILQ_FIRST(&mpc_url_task_queue);
        mpc_url_ntask--;
        STAILQ_REMOVE_HEAD(&mpc_url_task_queue, next);
        ASSERT(mpc_url->magic == MPC_URL_MAGIC);
        STAILQ_NEXT(mpc_url, next) = NULL;

        /*
        mpc_log_debug(0, "get task url(%d), total %d, host: \"%V\" uri: \"%V\"",
                      mpc_url->url_id, mpc_url_ntask,
                      &mpc_url->host, &mpc_url->uri);
                      */
        pthread_mutex_unlock(&mutex_task);

        return mpc_url;
    }

    pthread_mutex_unlock(&mutex_task);

    return NULL;
}
Esempio n. 5
0
mpc_url_t *
mpc_url_get(void)
{
    mpc_url_t  *mpc_url;
    uint8_t    *buf;

    pthread_mutex_lock(&mutex_free);

    if (!STAILQ_EMPTY(&mpc_url_free_queue)) {
        mpc_url = STAILQ_FIRST(&mpc_url_free_queue);
        mpc_url_nfree--;
        STAILQ_REMOVE_HEAD(&mpc_url_free_queue, next);
        ASSERT(mpc_url->magic == MPC_URL_MAGIC);
        goto done;
    }

    buf = (uint8_t *)mpc_calloc(sizeof(mpc_url_t) + MPC_URL_BUF_SIZE, 1);
    if (buf == NULL) {
        pthread_mutex_unlock(&mutex_free);
        return NULL;
    }

    mpc_url = (mpc_url_t *)(buf + MPC_URL_BUF_SIZE);
    mpc_url->buf = buf;
    mpc_url->buf_size = MPC_URL_BUF_SIZE;

    SET_MAGIC(mpc_url, MPC_URL_MAGIC);
done:
    STAILQ_NEXT(mpc_url, next) = NULL;

    pthread_mutex_unlock(&mutex_free);

    return mpc_url;
}
Esempio n. 6
0
static inline void
_klog_write_get(struct request *req, struct response *rsp, char *buf, int len)
{
    struct response *nr = rsp;
    int suffix_len;
    uint32_t i;
    struct bstring *key;

    for (i = 0; i < array_nelem(req->keys); ++i) {
        key = array_get(req->keys, i);

        if (nr->type != RSP_END && bstring_compare(key, &nr->key) == 0) {
            /* key was found, rsp at nr */
            suffix_len = cc_scnprintf(buf + len, KLOG_MAX_LEN - len, KLOG_GET_FMT,
                                      req_strings[req->type].len, req_strings[req->type].data,
                                      key->len, key->data, rsp->type, _get_val_rsp_len(nr, key));
            nr = STAILQ_NEXT(nr, next);
        } else {
            /* key not found */
            suffix_len = cc_scnprintf(buf + len, KLOG_MAX_LEN - len, KLOG_GET_FMT,
                                      req_strings[req->type].len, req_strings[req->type].data,
                                      key->len, key->data, RSP_UNKNOWN, 0);
        }

        ASSERT(len + suffix_len <= KLOG_MAX_LEN);

        if (log_write(klogger, buf, len + suffix_len)) {
            INCR(klog_metrics, klog_logged);
        } else {
            INCR(klog_metrics, klog_discard);
        }
    }

    ASSERT(nr ->type == RSP_END);
}
Esempio n. 7
0
static struct mbuf *
_mbuf_get_proxy_adm(void)
{
    struct mbuf *mbuf;
    uint8_t *buf;

    if (!STAILQ_EMPTY(&free_mbufq_proxy_adm)) {
        ASSERT(nfree_mbufq_proxy_adm > 0);

        mbuf = STAILQ_FIRST(&free_mbufq_proxy_adm);
        nfree_mbufq_proxy_adm--;
        STAILQ_REMOVE_HEAD(&free_mbufq_proxy_adm, next);

        ASSERT(mbuf->magic == MBUF_MAGIC);
        goto done;
    }

    buf = nc_alloc(mbuf_chunk_size);
    if (buf == NULL) {
        return NULL;
    }

#if 1 //shenzheng 2015-7-9 proxy administer
#ifdef NC_DEBUG_LOG
	ntotal_mbuf_proxy_adm ++;
#endif
#endif //shenzheng 2015-7-9 proxy administer

    mbuf = (struct mbuf *)(buf + mbuf_offset);
    mbuf->magic = MBUF_MAGIC;

done:
    STAILQ_NEXT(mbuf, next) = NULL;
    return mbuf;
}
Esempio n. 8
0
Elf_Data *
elf_rawdata(Elf_Scn *s, Elf_Data *ed)
{
	Elf *e;
	int elf_class;
	uint32_t sh_type;
	struct _Libelf_Data *d;
	uint64_t sh_align, sh_offset, sh_size;

	if (s == NULL || (e = s->s_elf) == NULL || e->e_rawfile == NULL) {
		LIBELF_SET_ERROR(ARGUMENT, 0);
		return (NULL);
	}

	assert(e->e_kind == ELF_K_ELF);

	d = (struct _Libelf_Data *) ed;

	if (d == NULL && (d = STAILQ_FIRST(&s->s_rawdata)) != NULL)
		return (&d->d_data);

	if (d != NULL)
		return (&STAILQ_NEXT(d, d_next)->d_data);

	elf_class = e->e_class;

	assert(elf_class == ELFCLASS32 || elf_class == ELFCLASS64);

	if (elf_class == ELFCLASS32) {
		sh_type   = s->s_shdr.s_shdr32.sh_type;
		sh_offset = (uint64_t) s->s_shdr.s_shdr32.sh_offset;
		sh_size   = (uint64_t) s->s_shdr.s_shdr32.sh_size;
		sh_align  = (uint64_t) s->s_shdr.s_shdr32.sh_addralign;
	} else {
		sh_type   = s->s_shdr.s_shdr64.sh_type;
		sh_offset = s->s_shdr.s_shdr64.sh_offset;
		sh_size   = s->s_shdr.s_shdr64.sh_size;
		sh_align  = s->s_shdr.s_shdr64.sh_addralign;
	}

	if (sh_type == SHT_NULL) {
		LIBELF_SET_ERROR(SECTION, 0);
		return (NULL);
	}

	if ((d = _libelf_allocate_data(s)) == NULL)
		return (NULL);

	d->d_data.d_buf = (sh_type == SHT_NOBITS || sh_size == 0) ? NULL :
	    e->e_rawfile + sh_offset;
	d->d_data.d_off     = 0;
	d->d_data.d_align   = sh_align;
	d->d_data.d_size    = sh_size;
	d->d_data.d_type    = ELF_T_BYTE;
	d->d_data.d_version = e->e_version;

	STAILQ_INSERT_TAIL(&s->s_rawdata, d, d_next);

	return (&d->d_data);
}
Esempio n. 9
0
void mbuf_recycle(struct context *ctx, struct mbuf *mbuf)
{
    ctx->stats.buffers--;
    STAILQ_NEXT(mbuf, next) = NULL;
    STAILQ_INSERT_HEAD(&ctx->free_mbufq, mbuf, next);
    ctx->nfree_mbufq++;
}
Esempio n. 10
0
void
yasm_vps_delete(yasm_valparamhead *headp)
{
    yasm_valparam *cur, *next;

    cur = STAILQ_FIRST(headp);
    while (cur) {
        next = STAILQ_NEXT(cur, link);
        if (cur->val)
            yasm_xfree(cur->val);
        switch (cur->type) {
            case YASM_PARAM_ID:
                yasm_xfree(cur->param.id);
                break;
            case YASM_PARAM_STRING:
                yasm_xfree(cur->param.str);
                break;
            case YASM_PARAM_EXPR:
                yasm_expr_destroy(cur->param.e);
                break;
        }
        yasm_xfree(cur);
        cur = next;
    }
    STAILQ_INIT(headp);
}
Esempio n. 11
0
static void
_process_get(struct response *rsp, struct request *req)
{
    struct bstring *key;
    struct response *r = rsp;
    uint32_t i;

    INCR(process_metrics, get);
    /* use chained responses, move to the next response if key is found. */
    for (i = 0; i < array_nelem(req->keys); ++i) {
        INCR(process_metrics, get_key);
        key = array_get(req->keys, i);
        if (_get_key(r, key)) {
            req->nfound++;
            r->cas = false;
            r = STAILQ_NEXT(r, next);
            if (r == NULL) {
                INCR(process_metrics, get_ex);
                log_warn("get response incomplete due to lack of rsp objects");
                return;
            }
            INCR(process_metrics, get_key_hit);
        } else {
            INCR(process_metrics, get_key_miss);
        }
    }
    r->type = RSP_END;

    log_verb("get req %p processed, %d out of %d keys found", req, req->nfound, i);
}
Esempio n. 12
0
// Lua: table = wifi.ap.getclient()
static int wifi_ap_listclient( lua_State* L )
{
  if (wifi_get_opmode() == STATION_MODE)
  {
    return luaL_error( L, "Can't list client in STATION_MODE mode" );
  }

  char temp[64];

  lua_newtable(L);

  struct station_info * station = wifi_softap_get_station_info();
  struct station_info * next_station;
  while (station != NULL)
  {
    c_sprintf(temp, IPSTR, IP2STR(&station->ip));
    lua_pushstring(L, temp);

    c_sprintf(temp, MACSTR, MAC2STR(station->bssid));
    lua_setfield(L, -2, temp);

    next_station = STAILQ_NEXT(station, next);
    c_free(station);
    station = next_station;
  }

  return 1;
}
Esempio n. 13
0
static void
back_patch(void)
{
	struct instruction *cur_instr;

	for (cur_instr = STAILQ_FIRST(&seq_program);
	     cur_instr != NULL;
	     cur_instr = STAILQ_NEXT(cur_instr, links)) {
		if (cur_instr->patch_label != NULL) {
			struct ins_format3 *f3_instr;
			u_int address;

			if (cur_instr->patch_label->type != LABEL) {
				char buf[255];

				snprintf(buf, sizeof(buf),
					 "Undefined label %s",
					 cur_instr->patch_label->name);
				stop(buf, EX_DATAERR);
				/* NOTREACHED */
			}
			f3_instr = &cur_instr->format.format3;
			address = f3_instr->address;
			address += cur_instr->patch_label->info.linfo->address;
			f3_instr->address = address;
		}
	}
}
Esempio n. 14
0
static int nr_turn_permission_find(nr_turn_client_ctx *ctx, nr_transport_addr *addr,
                                   nr_turn_permission **permp)
{
  nr_turn_permission *perm;
  int _status;

  perm = STAILQ_FIRST(&ctx->permissions);
  while (perm) {
    if (!nr_transport_addr_cmp(&perm->addr, addr,
                               NR_TRANSPORT_ADDR_CMP_MODE_ADDR))
      break;

    perm = STAILQ_NEXT(perm, entry);
  }

  if (!perm) {
    ABORT(R_NOT_FOUND);
  }
  if (perm->stun->last_error_code == 403) {
    ABORT(R_NOT_PERMITTED);
  }
  *permp = perm;

  _status=0;
abort:
  return(_status);
}
Esempio n. 15
0
void
request_reset(struct request *req)
{
    ASSERT(req != NULL && req->keys != NULL);

    STAILQ_NEXT(req, next) = NULL;
    req->free = false;

    req->rstate = REQ_PARSING;
    req->type = REQ_UNKNOWN;

    req->keys->nelem = 0;
    bstring_init(&req->vstr);
    req->nfound = 0;

    req->flag = 0;
    req->expiry = 0;
    req->vlen = 0;
    req->delta = 0;
    req->vcas = 0;

    req->noreply = 0;
    req->val = 0;
    req->serror = 0;
    req->cerror = 0;
}
Esempio n. 16
0
void linker_table(){
    nf_buff_t *buff1=NULL;
    nf_bp_t	*buffer_pool = NULL;
    struct block_frame *bf, *tvar;
    int ret,i=0,n;

    buffer_pool = (nf_bp_t *)calloc(1, sizeof(nf_bp_t));
    printf("sizeof(nf_bp_t) = %d\n",sizeof(nf_bp_t));

    //first buff
    buff1 = (nf_buff_t *)malloc(sizeof(nf_buff_t));
    printf("sizeof(nf_buff_t) = %d\n\n",sizeof(nf_buff_t));
    buff1->len = 1;
	buff1->off = 1;
	buff1->last = "first buff";
	buff1->pool = buffer_pool;
	STAILQ_INIT(&buff1->head);

	for(n=0;n<3;n++){
        ret = try_expand_buffer(buff1, NF_PAGE_SIZE);
	}

	for ( (bf) = STAILQ_FIRST((&buff1->head)); (bf) && ( (tvar) = STAILQ_NEXT((bf), field), 1 ); (bf) = (tvar) ){
        printf("buff %d\n",++i);
	}

	printf("ret = %d\n",ret);
}
Esempio n. 17
0
int
dwarf_linesrc(Dwarf_Line ln, char **ret_linesrc, Dwarf_Error *error)
{
	Dwarf_LineInfo li;
	Dwarf_LineFile lf;
	int i;

	if (ln == NULL || ret_linesrc == NULL) {
		DWARF_SET_ERROR(NULL, error, DW_DLE_ARGUMENT);
		return (DW_DLV_ERROR);
	}

	li = ln->ln_li;
	assert(li != NULL);

	for (i = 1, lf = STAILQ_FIRST(&li->li_lflist);
	     (Dwarf_Unsigned) i < ln->ln_fileno && lf != NULL;
	     i++, lf = STAILQ_NEXT(lf, lf_next))
		;

	if (lf == NULL) {
		DWARF_SET_ERROR(NULL, error, DW_DLE_LINE_FILE_NUM_BAD);
		return (DW_DLV_ERROR);
	}

	if (lf->lf_fullpath) {
		*ret_linesrc = (char *) lf->lf_fullpath;
		return (DW_DLV_OK);
	}

	*ret_linesrc = lf->lf_fname;

	return (DW_DLV_OK);
}
Esempio n. 18
0
int nr_turn_client_process_response(nr_turn_client_ctx *ctx,
                                    UCHAR *msg, int len,
                                    nr_transport_addr *turn_server_addr)
{
  int r, _status;
  nr_turn_stun_ctx *sc1;

  switch (ctx->state) {
    case NR_TURN_CLIENT_STATE_ALLOCATING:
    case NR_TURN_CLIENT_STATE_ALLOCATED:
      break;
    default:
      ABORT(R_FAILED);
  }

  sc1 = STAILQ_FIRST(&ctx->stun_ctxs);
  while (sc1) {
    r = nr_stun_client_process_response(sc1->stun, msg, len, turn_server_addr);
    if (!r)
      break;
    if (r==R_RETRY)  /* Likely a 401 and we will retry */
      break;
    if (r != R_REJECTED)
      ABORT(r);
    sc1 = STAILQ_NEXT(sc1, entry);
  }
  if (!sc1)
    ABORT(R_REJECTED);

  _status=0;
abort:
  return(_status);
}
Esempio n. 19
0
int nr_turn_client_cancel(nr_turn_client_ctx *ctx)
{
  nr_turn_stun_ctx *stun = 0;

  if (ctx->state == NR_TURN_CLIENT_STATE_CANCELLED ||
      ctx->state == NR_TURN_CLIENT_STATE_FAILED)
    return(0);

  if (ctx->label)
    r_log(NR_LOG_TURN, LOG_INFO, "TURN(%s): cancelling", ctx->label);

  /* Cancel the STUN client ctxs */
  stun = STAILQ_FIRST(&ctx->stun_ctxs);
  while (stun) {
    nr_stun_client_cancel(stun->stun);
    stun = STAILQ_NEXT(stun, entry);
  }

  /* Cancel the timers, if not already cancelled */
  NR_async_timer_cancel(ctx->connected_timer_handle);
  NR_async_timer_cancel(ctx->refresh_timer_handle);

  ctx->state = NR_TURN_CLIENT_STATE_CANCELLED;

  return(0);
}
Esempio n. 20
0
int
dwarf_get_abbrev_entry(Dwarf_Abbrev abbrev, Dwarf_Signed ndx,
    Dwarf_Half *attr_num, Dwarf_Signed *form, Dwarf_Off *offset,
    Dwarf_Error *error)
{
	Dwarf_AttrDef ad;
	int i;

	if (abbrev == NULL || attr_num == NULL || form == NULL ||
	    offset == NULL) {
		DWARF_SET_ERROR(NULL, error, DW_DLE_ARGUMENT);
		return (DW_DLV_ERROR);
	}

	if (ndx < 0 || (uint64_t) ndx >= abbrev->ab_atnum) {
		DWARF_SET_ERROR(NULL, error, DW_DLE_NO_ENTRY);
		return (DW_DLV_NO_ENTRY);
	}

	ad = STAILQ_FIRST(&abbrev->ab_attrdef);
	for (i = 0; i < ndx && ad != NULL; i++)
		ad = STAILQ_NEXT(ad, ad_next);

	assert(ad != NULL);

	*attr_num = ad->ad_attrib;
	*form = ad->ad_form;
	*offset = ad->ad_offset;

	return (DW_DLV_OK);
}
Esempio n. 21
0
static void
khttpd_log_abort(struct khttpd_log *log)
{
	struct thread *td;
	struct mbuf *pkt, *m;
	int fd;

	td = curthread;

	mtx_lock(&khttpd_log_lock);

	pkt = mbufq_flush(&log->queue);
	if (pkt != NULL) {
		if (log->draining) {
			log->draining = FALSE;
			wakeup(log);
		}

		TAILQ_REMOVE(&khttpd_busy_logs, log, link);

		while ((m = pkt) != NULL) {
			pkt = STAILQ_NEXT(pkt, m_stailqpkt);
			m_freem(m);
		}
	}

	fd = log->fd;
	log->fd = -1;

	mtx_unlock(&khttpd_log_lock);

	kern_close(td, fd);
}
Esempio n. 22
0
struct resource *
pcib_host_res_alloc(struct pcib_host_resources *hr, device_t dev, int type,
    int *rid, u_long start, u_long end, u_long count, u_int flags)
{
	struct resource_list_entry *rle;
	struct resource *r;
	u_long new_start, new_end;

	if (flags & RF_PREFETCHABLE)
		KASSERT(type == SYS_RES_MEMORY,
		    ("only memory is prefetchable"));

	rle = resource_list_find(&hr->hr_rl, type, 0);
	if (rle == NULL) {
		/*
		 * No decoding ranges for this resource type, just pass
		 * the request up to the parent.
		 */
		return (bus_generic_alloc_resource(hr->hr_pcib, dev, type, rid,
		    start, end, count, flags));
	}

restart:
	/* Try to allocate from each decoded range. */
	for (; rle != NULL; rle = STAILQ_NEXT(rle, link)) {
		if (rle->type != type)
			continue;
		if (((flags & RF_PREFETCHABLE) != 0) !=
		    ((rle->flags & RLE_PREFETCH) != 0))
			continue;
		new_start = ulmax(start, rle->start);
		new_end = ulmin(end, rle->end);
		if (new_start > new_end ||
		    new_start + count - 1 > new_end ||
		    new_start + count < new_start)
			continue;
		r = bus_generic_alloc_resource(hr->hr_pcib, dev, type, rid,
		    new_start, new_end, count, flags);
		if (r != NULL) {
			if (bootverbose)
				device_printf(hr->hr_pcib,
			    "allocated type %d (%#lx-%#lx) for rid %x of %s\n",
				    type, rman_get_start(r), rman_get_end(r),
				    *rid, pcib_child_name(dev));
			return (r);
		}
	}

	/*
	 * If we failed to find a prefetch range for a memory
	 * resource, try again without prefetch.
	 */
	if (flags & RF_PREFETCHABLE) {
		flags &= ~RF_PREFETCHABLE;
		rle = resource_list_find(&hr->hr_rl, type, 0);
		goto restart;
	}
	return (NULL);
}
Esempio n. 23
0
void
elf_strtab_entry_set_str(elf_strtab_entry *entry, const char *str)
{
    elf_strtab_entry *last;
    if (entry->str)
        yasm_xfree(entry->str);
    entry->str = yasm__xstrdup(str);

    /* Update all following indices since string length probably changes */
    last = entry;
    entry = STAILQ_NEXT(last, qlink);
    while (entry) {
        entry->index = last->index + (unsigned long)strlen(last->str) + 1;
        last = entry;
        entry = STAILQ_NEXT(last, qlink);
    }
}
Esempio n. 24
0
/*
 * Remove mbuf from the mhdr Q
 */
void
mbuf_remove(struct mhdr *mhdr, struct mbuf *mbuf)
{
    log_debug(LOG_VVERB, "remove mbuf %p len %d", mbuf, mbuf->last - mbuf->pos);

    STAILQ_REMOVE(mhdr, mbuf, mbuf, next);
    STAILQ_NEXT(mbuf, next) = NULL;
}
Esempio n. 25
0
/*
 * Remove mbuf_block from the mhdr Q
 */
static void
mbuf_block_remove(struct mhdr *mhdr, struct mbuf_block *mbuf_block)
{
	//log_debug(LOG_VVERB, "remove mbuf_block %p", mbuf_block);

	STAILQ_REMOVE(mhdr, mbuf_block, struct mbuf_block, next);
	STAILQ_NEXT(mbuf_block, next) = NULL;
}
Esempio n. 26
0
void
conn_cq_push(struct conn_q *cq, struct conn *c)
{
    STAILQ_NEXT(c, c_tqe) = NULL;

    pthread_mutex_lock(&cq->lock);
    STAILQ_INSERT_TAIL(&cq->hdr, c, c_tqe);
    pthread_mutex_unlock(&cq->lock);
}
Esempio n. 27
0
/**
 * mutt_list_compare - Compare two string lists
 * @param ah First string list
 * @param bh Second string list
 * @retval true Lists are identical
 *
 * To be identical, the lists must both be the same length and contain the same
 * strings.  Two empty lists are identical.
 */
bool mutt_list_compare(const struct ListHead *ah, const struct ListHead *bh)
{
  struct ListNode *a = STAILQ_FIRST(ah);
  struct ListNode *b = STAILQ_FIRST(bh);

  while (a && b)
  {
    if (mutt_str_strcmp(a->data, b->data) != 0)
      return false;

    a = STAILQ_NEXT(a, entries);
    b = STAILQ_NEXT(b, entries);
  }
  if (a || b)
    return false;

  return true;
}
Esempio n. 28
0
static struct mbuf_block *
_mbuf_block_get(struct mbuf_pool *pool)
{
	struct mbuf_block *mbuf_block;
	char *buf;

	if (!STAILQ_EMPTY(&pool->free_mbuf_blockq)) {
		assert(pool->nfree_mbuf_blockq > 0);

		mbuf_block = STAILQ_FIRST(&pool->free_mbuf_blockq);
		pool->nfree_mbuf_blockq--;
		STAILQ_REMOVE_HEAD(&pool->free_mbuf_blockq, next);

		assert(mbuf_block->magic == MBUF_BLOCK_MAGIC);
		goto done;
	}

	buf = (char *) malloc(pool->mbuf_block_chunk_size);
	if (OXT_UNLIKELY(buf == NULL)) {
		return NULL;
	}

	/*
	 * mbuf_block header is at the tail end of the mbuf_block. This enables us to catch
	 * buffer overrun early by asserting on the magic value during get or
	 * put operations
	 *
	 *   <------------- mbuf_block_chunk_size ------------------->
	 *   +-------------------------------------------------------+
	 *   |       mbuf_block data          |  mbuf_block header   |
	 *   |     (mbuf_block_offset)        | (struct mbuf_block)  |
	 *   +-------------------------------------------------------+
	 *   ^                                ^^
	 *   |                                ||
	 *   \                                |\
	 * block->start                       | block->end (one byte past valid bound)
	 *                                    \
	 *                                    block
	 *
	 */
	mbuf_block = (struct mbuf_block *)(buf + pool->mbuf_block_offset);
	mbuf_block->magic = MBUF_BLOCK_MAGIC;
	mbuf_block->pool  = pool;
	mbuf_block->refcount = 1;

done:
	STAILQ_NEXT(mbuf_block, next) = NULL;
	#ifdef MBUF_ENABLE_DEBUGGING
		TAILQ_INSERT_HEAD(&pool->active_mbuf_blockq, mbuf_block, active_q);
	#endif
	#ifdef MBUF_ENABLE_BACKTRACES
		mbuf_block->backtrace = strdup(oxt::thread::current_backtrace().c_str());
	#endif
	pool->nactive_mbuf_blockq++;
	return mbuf_block;
}
Esempio n. 29
0
gdp_event_t *
gdp_event_next(gdp_gcl_t *gcl, EP_TIME_SPEC *timeout)
{
	gdp_event_t *gev;
	EP_TIME_SPEC *abs_to = NULL;
	EP_TIME_SPEC tv;

	if (timeout != NULL)
	{
		ep_time_deltanow(timeout, &tv);
		abs_to = &tv;
	}

	ep_thr_mutex_lock(&ActiveListMutex);
	for (;;)
	{
		int err;

		while ((gev = STAILQ_FIRST(&ActiveList)) == NULL)
		{
			// wait until we have at least one thing to try
			err = ep_thr_cond_wait(&ActiveListSig, &ActiveListMutex, abs_to);
			if (err == ETIMEDOUT)
				goto fail0;
		}
		while (gev != NULL)
		{
			// if this isn't the GCL we want, keep searching the list
			if (gcl == NULL || gev->gcl == gcl)
				break;

			// not the event we want
			gev = STAILQ_NEXT(gev, queue);
		}

		if (gev != NULL)
		{
			// found a match!
			break;
		}

		// if there is no match, wait until something is added and try again
		err = ep_thr_cond_wait(&ActiveListSig, &ActiveListMutex, abs_to);
		if (err == ETIMEDOUT)
			break;
	}

	if (gev != NULL)
		STAILQ_REMOVE(&ActiveList, gev, gdp_event, queue);
fail0:
	ep_thr_mutex_unlock(&ActiveListMutex);

	// the callback must call gdp_event_free(gev)
	return gev;
}
Esempio n. 30
0
void
mbuf_put(struct mbuf *mbuf)
{
    log_debug(LOG_VVERB, "put mbuf %p len %d", mbuf, mbuf->last - mbuf->pos);

    ASSERT(STAILQ_NEXT(mbuf, next) == NULL);
    ASSERT(mbuf->magic == MBUF_MAGIC);

    nfree_mbufq++;
    STAILQ_INSERT_HEAD(&free_mbufq, mbuf, next);
}