Ejemplo n.º 1
0
void strings_test() {
	ustr("hello");
	ustr("world");
	ustr("world");
	data *d;
	RB_FOREACH(d, tree, &head) puts(d->s);
}
Ejemplo n.º 2
0
void
environ_copy(struct environ *srcenv, struct environ *dstenv)
{
	struct environ_entry	*envent;

	RB_FOREACH(envent, environ, srcenv)
		environ_set(dstenv, envent->name, envent->value);
}
Ejemplo n.º 3
0
void
cmd_list_windows_server(struct cmd *self, struct cmd_ctx *ctx)
{
	struct session	*s;

	RB_FOREACH(s, sessions, &sessions)
		cmd_list_windows_session(self, s, ctx, 1);
}
Ejemplo n.º 4
0
static void
cmd_list_windows_server(struct cmd *self, struct cmdq_item *item)
{
	struct session	*s;

	RB_FOREACH(s, sessions, &sessions)
		cmd_list_windows_session(self, s, item, 1);
}
Ejemplo n.º 5
0
/* Copy the lang_str instance */
lang_str_t *lang_str_copy ( const lang_str_t *ls )
{
  lang_str_t *ret = lang_str_create();
  lang_str_ele_t *e;
  RB_FOREACH(e, ls, link)
    lang_str_add(ret, e->str, e->lang, 0);
  return ret;
}
Ejemplo n.º 6
0
static void
revtree_foreach(VALUE self, void (*it)(struct rcsrev *, VALUE), VALUE arg)
{
	struct rcsrev *i;

	RB_FOREACH(i, rcsrevtree, rb_rcsfile_revs(self))
		it(i, arg);
}
Ejemplo n.º 7
0
void
cmd_list_panes_server(struct cmd *self, struct cmd_q *cmdq)
{
	struct session	*s;

	RB_FOREACH(s, sessions, &sessions)
		cmd_list_panes_session(self, s, cmdq, 2);
}
Ejemplo n.º 8
0
void
cmd_list_panes_session(
    struct cmd *self, struct session *s, struct cmd_q *cmdq, int type)
{
	struct winlink	*wl;

	RB_FOREACH(wl, winlinks, &s->windows)
		cmd_list_panes_window(self, s, wl, cmdq, type);
}
Ejemplo n.º 9
0
static void
cmd_list_panes_session(struct cmd *self, struct session *s,
    struct cmdq_item *item, int type)
{
	struct winlink	*wl;

	RB_FOREACH(wl, winlinks, &s->windows)
		cmd_list_panes_window(self, s, wl, item, type);
}
Ejemplo n.º 10
0
static void
epggrab_ota_requeue ( void )
{
  epggrab_ota_mux_t *om;

  /*
   * enqueue all muxes, but ommit the delayed ones (active+pending)
   */
  RB_FOREACH(om, &epggrab_ota_all, om_global_link)
    epggrab_ota_queue_one(om);
}
Ejemplo n.º 11
0
void
ieee80211_iterate_nodes(struct ieee80211com *ic, ieee80211_iter_func *f,
    void *arg)
{
	struct ieee80211_node *ni;

	IEEE80211_NODE_LOCK(ic);
	RB_FOREACH(ni, ieee80211_tree, &ic->ic_tree)
		(*f)(arg, ni);
	IEEE80211_NODE_UNLOCK(ic);
}
Ejemplo n.º 12
0
static VALUE
hash_from_tokmap(struct rcstokmap *map)
{
	VALUE hash;
	struct rcstokpair *p;

	hash = rb_hash_new();
	RB_FOREACH(p, rcstokmap, map)
		rb_hash_aset(hash, str_from_tok(p->first),
			     str_from_tok(p->second));
	return hash;
}
Ejemplo n.º 13
0
void
ieee80211_iterate_nodes(struct ieee80211com *ic, ieee80211_iter_func *f,
    void *arg)
{
	struct ieee80211_node *ni;
	int s;

	s = splnet();
	RB_FOREACH(ni, ieee80211_tree, &ic->ic_tree)
		(*f)(arg, ni);
	splx(s);
}
Ejemplo n.º 14
0
static void
rb_ex(void)
{
	struct item *ip, key, *exists;

	RB_FOREACH(ip, item_map, &map) {
	}

	exists = RB_INSERT(item_map, &map, ip);

	// key.key = y;
	ip = RB_FIND(item_map, &map, &key);
	// RB_NFIND ...
}
Ejemplo n.º 15
0
/*
 * Complete a scan of potential channels.
 */
void
ieee80211_end_scan(struct ifnet *ifp)
{
	struct ieee80211com *ic = (void *)ifp;
	struct ieee80211_node *ni, *nextbs, *selbs;

	if (ifp->if_flags & IFF_DEBUG)
		printf("%s: end %s scan\n", ifp->if_xname,
			(ic->ic_flags & IEEE80211_F_ASCAN) ?
				"active" : "passive");

	if (ic->ic_scan_count)
		ic->ic_flags &= ~IEEE80211_F_ASCAN;

	ni = RB_MIN(ieee80211_tree, &ic->ic_tree);

#ifndef IEEE80211_STA_ONLY
	if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
		/* XXX off stack? */
		u_char occupied[howmany(IEEE80211_CHAN_MAX, NBBY)];
		int i, fail;

		/*
		 * The passive scan to look for existing AP's completed,
		 * select a channel to camp on.  Identify the channels
		 * that already have one or more AP's and try to locate
		 * an unnoccupied one.  If that fails, pick a random
		 * channel from the active set.
		 */
		memset(occupied, 0, sizeof(occupied));
		RB_FOREACH(ni, ieee80211_tree, &ic->ic_tree)
			setbit(occupied, ieee80211_chan2ieee(ic, ni->ni_chan));
		for (i = 0; i < IEEE80211_CHAN_MAX; i++)
			if (isset(ic->ic_chan_active, i) && isclr(occupied, i))
				break;
		if (i == IEEE80211_CHAN_MAX) {
			fail = arc4random() & 3;	/* random 0-3 */
			for (i = 0; i < IEEE80211_CHAN_MAX; i++)
				if (isset(ic->ic_chan_active, i) && fail-- == 0)
					break;
		}
		ieee80211_create_ibss(ic, &ic->ic_channels[i]);
		goto wakeup;
	}
#endif
	if (ni == NULL) {
		DPRINTF(("no scan candidate\n"));
 notfound:

#ifndef IEEE80211_STA_ONLY
		if (ic->ic_opmode == IEEE80211_M_IBSS &&
		    (ic->ic_flags & IEEE80211_F_IBSSON) &&
		    ic->ic_des_esslen != 0) {
			ieee80211_create_ibss(ic, ic->ic_ibss_chan);
			goto wakeup;
		}
#endif
		/*
		 * Scan the next mode if nothing has been found. This
		 * is necessary if the device supports different
		 * incompatible modes in the same channel range, like
		 * like 11b and "pure" 11G mode. This will loop
		 * forever except for user-initiated scans.
		 */
		if (ieee80211_next_mode(ifp) == IEEE80211_MODE_AUTO) {
			if (ic->ic_scan_lock & IEEE80211_SCAN_REQUEST &&
			    ic->ic_scan_lock & IEEE80211_SCAN_RESUME) {
				ic->ic_scan_lock = IEEE80211_SCAN_LOCKED;
				/* Return from an user-initiated scan */
				wakeup(&ic->ic_scan_lock);
			} else if (ic->ic_scan_lock & IEEE80211_SCAN_REQUEST)
				goto wakeup;
			ic->ic_scan_count++;
		}

		/*
		 * Reset the list of channels to scan and start again.
		 */
		ieee80211_next_scan(ifp);
		return;
	}
	selbs = NULL;

	for (; ni != NULL; ni = nextbs) {
		nextbs = RB_NEXT(ieee80211_tree, &ic->ic_tree, ni);
		if (ni->ni_fails) {
			/*
			 * The configuration of the access points may change
			 * during my scan.  So delete the entry for the AP
			 * and retry to associate if there is another beacon.
			 */
			if (ni->ni_fails++ > 2)
				ieee80211_free_node(ic, ni);
			continue;
		}
		if (ieee80211_match_bss(ic, ni) == 0) {
			if (selbs == NULL)
				selbs = ni;
			else if (ni->ni_rssi > selbs->ni_rssi)
				selbs = ni;
		}
	}
	if (selbs == NULL)
		goto notfound;
	(*ic->ic_node_copy)(ic, ic->ic_bss, selbs);
	ni = ic->ic_bss;

	/*
	 * Set the erp state (mostly the slot time) to deal with
	 * the auto-select case; this should be redundant if the
	 * mode is locked.
	 */
	ic->ic_curmode = ieee80211_chan2mode(ic, ni->ni_chan);
	ieee80211_reset_erp(ic);

	if (ic->ic_flags & IEEE80211_F_RSNON)
		ieee80211_choose_rsnparams(ic);
	else if (ic->ic_flags & IEEE80211_F_WEPON)
		ni->ni_rsncipher = IEEE80211_CIPHER_USEGROUP;

	ieee80211_node_newstate(selbs, IEEE80211_STA_BSS);
#ifndef IEEE80211_STA_ONLY
	if (ic->ic_opmode == IEEE80211_M_IBSS) {
		ieee80211_fix_rate(ic, ni, IEEE80211_F_DOFRATE |
		    IEEE80211_F_DONEGO | IEEE80211_F_DODEL);
		if (ni->ni_rates.rs_nrates == 0)
			goto notfound;
		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
	} else
#endif
		ieee80211_new_state(ic, IEEE80211_S_AUTH, -1);

 wakeup:
	if (ic->ic_scan_lock & IEEE80211_SCAN_REQUEST) {
		/* Return from an user-initiated scan */
		wakeup(&ic->ic_scan_lock);
	}

	ic->ic_scan_lock = IEEE80211_SCAN_UNLOCKED;
}
Ejemplo n.º 16
0
int
pf_map_addr_sticky(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr,
    struct pf_addr *naddr, struct pf_src_node **sns, struct pf_pool *rpool,
    enum pf_sn_types type)
{
	struct pf_addr		*raddr, *rmask, *cached;
	struct pf_state		*s;
	struct pf_src_node	 k;
	int			 valid;

	k.af = af;
	k.type = type;
	PF_ACPY(&k.addr, saddr, af);
	k.rule.ptr = r;
	pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
	sns[type] = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
	if (sns[type] == NULL)
		return (-1);

	/* check if the cached entry is still valid */
	cached = &(sns[type])->raddr;
	valid = 0;
	if (PF_AZERO(cached, af)) {
		valid = 1;
	} else if (rpool->addr.type == PF_ADDR_DYNIFTL) {
		if (pfr_kentry_byaddr(rpool->addr.p.dyn->pfid_kt, cached,
		    af, 0))
			valid = 1;
	} else if (rpool->addr.type == PF_ADDR_TABLE) {
		if (pfr_kentry_byaddr(rpool->addr.p.tbl, cached, af, 0))
			valid = 1;
	} else if (rpool->addr.type != PF_ADDR_NOROUTE) {
		raddr = &rpool->addr.v.a.addr;
		rmask = &rpool->addr.v.a.mask;
		valid = pf_match_addr(0, raddr, rmask, cached, af);
	}
	if (!valid) {
		if (pf_status.debug >= LOG_DEBUG) {
			log(LOG_DEBUG, "pf: pf_map_addr: "
			    "stale src tracking (%u) ", type);
			pf_print_host(&k.addr, 0, af);
			addlog(" to ");
			pf_print_host(cached, 0, af);
			addlog("\n");
		}
		if (sns[type]->states != 0) {
			/* XXX expensive */
			RB_FOREACH(s, pf_state_tree_id,
			   &tree_id)
				pf_state_rm_src_node(s,
				    sns[type]);
		}
		sns[type]->expire = 1;
		pf_remove_src_node(sns[type]);
		sns[type] = NULL;
		return (-1);
	}
	if (!PF_AZERO(cached, af))
		PF_ACPY(naddr, cached, af);
	if (pf_status.debug >= LOG_DEBUG) {
		log(LOG_DEBUG, "pf: pf_map_addr: "
		    "src tracking (%u) maps ", type);
		pf_print_host(&k.addr, 0, af);
		addlog(" to ");
		pf_print_host(naddr, 0, af);
		addlog("\n");
	}
	return (0);
}
Ejemplo n.º 17
0
void
process_map(kvm_t *kd, pid_t pid, struct kinfo_proc *proc, struct sum *sum)
{
	struct kbit kbit[3], *vmspace, *vm_map;
	struct vm_map_entry *vm_map_entry;
	size_t total = 0;
	char *thing;
	uid_t uid;
	int vmmap_flags;

	if ((uid = getuid())) {
		if (pid == 0) {
			warnx("kernel map is restricted");
			return;
		}
		if (uid != proc->p_uid) {
			warnx("other users' process maps are restricted");
			return;
		}
	}

	vmspace = &kbit[0];
	vm_map = &kbit[1];

	A(vmspace) = 0;
	A(vm_map) = 0;

	if (pid > 0) {
		A(vmspace) = (u_long)proc->p_vmspace;
		S(vmspace) = sizeof(struct vmspace);
		KDEREF(kd, vmspace);
		thing = "proc->p_vmspace.vm_map";
	} else {
		A(vmspace) = 0;
		S(vmspace) = 0;
		thing = "kernel_map";
	}

	if (pid > 0 && (debug & PRINT_VMSPACE)) {
		printf("proc->p_vmspace %p = {", P(vmspace));
		printf(" vm_refcnt = %d,", D(vmspace, vmspace)->vm_refcnt);
		printf(" vm_shm = %p,\n", D(vmspace, vmspace)->vm_shm);
		printf("    vm_rssize = %d,", D(vmspace, vmspace)->vm_rssize);
#if 0
		printf(" vm_swrss = %d,", D(vmspace, vmspace)->vm_swrss);
#endif
		printf(" vm_tsize = %d,", D(vmspace, vmspace)->vm_tsize);
		printf(" vm_dsize = %d,\n", D(vmspace, vmspace)->vm_dsize);
		printf("    vm_ssize = %d,", D(vmspace, vmspace)->vm_ssize);
		printf(" vm_taddr = %p,", D(vmspace, vmspace)->vm_taddr);
		printf(" vm_daddr = %p,\n", D(vmspace, vmspace)->vm_daddr);
		printf("    vm_maxsaddr = %p,",
		    D(vmspace, vmspace)->vm_maxsaddr);
		printf(" vm_minsaddr = %p }\n",
		    D(vmspace, vmspace)->vm_minsaddr);
	}

	S(vm_map) = sizeof(struct vm_map);
	if (pid > 0) {
		A(vm_map) = A(vmspace);
		memcpy(D(vm_map, vm_map), &D(vmspace, vmspace)->vm_map,
		    S(vm_map));
	} else {
		A(vm_map) = kernel_map_addr;
		KDEREF(kd, vm_map);
	}
	if (debug & PRINT_VM_MAP) {
		printf("%s %p = {", thing, P(vm_map));

		printf(" pmap = %p,\n", D(vm_map, vm_map)->pmap);
		printf("    lock = <struct lock>\n");
		printf("    size = %lx,", D(vm_map, vm_map)->size);
		printf(" ref_count = %d,", D(vm_map, vm_map)->ref_count);
		printf(" ref_lock = <struct simplelock>,\n");
		printf("    min_offset-max_offset = 0x%lx-0x%lx\n",
		    D(vm_map, vm_map)->min_offset,
		    D(vm_map, vm_map)->max_offset);
		printf("    b_start-b_end = 0x%lx-0x%lx\n",
		    D(vm_map, vm_map)->b_start,
		    D(vm_map, vm_map)->b_end);
		printf("    s_start-s_end = 0x%lx-0x%lx\n",
		    D(vm_map, vm_map)->s_start,
		    D(vm_map, vm_map)->s_end);
		vmmap_flags = D(vm_map, vm_map)->flags;
		printf("    flags = %x <%s%s%s%s%s%s >,\n",
		    vmmap_flags,
		    vmmap_flags & VM_MAP_PAGEABLE ? " PAGEABLE" : "",
		    vmmap_flags & VM_MAP_INTRSAFE ? " INTRSAFE" : "",
		    vmmap_flags & VM_MAP_WIREFUTURE ? " WIREFUTURE" : "",
		    vmmap_flags & VM_MAP_BUSY ? " BUSY" : "",
		    vmmap_flags & VM_MAP_WANTLOCK ? " WANTLOCK" : "",
#if VM_MAP_TOPDOWN > 0
		    vmmap_flags & VM_MAP_TOPDOWN ? " TOPDOWN" :
#endif
		    "");
		printf("    timestamp = %u }\n", D(vm_map, vm_map)->timestamp);
	}
	if (print_ddb) {
		printf("MAP %p: [0x%lx->0x%lx]\n", P(vm_map),
		    D(vm_map, vm_map)->min_offset,
		    D(vm_map, vm_map)->max_offset);
		printf("\tsz=%ld, ref=%d, version=%d, flags=0x%x\n",
		    D(vm_map, vm_map)->size,
		    D(vm_map, vm_map)->ref_count,
		    D(vm_map, vm_map)->timestamp,
		    D(vm_map, vm_map)->flags);
		printf("\tpmap=%p(resident=<unknown>)\n",
		    D(vm_map, vm_map)->pmap);
	}

	/* headers */
#ifdef DISABLED_HEADERS
	if (print_map)
		printf("%-*s %-*s rwx RWX CPY NCP I W A\n",
		    (int)sizeof(long) * 2 + 2, "Start",
		    (int)sizeof(long) * 2 + 2, "End");
	if (print_maps)
		printf("%-*s %-*s rwxp %-*s Dev   Inode      File\n",
		    (int)sizeof(long) * 2 + 0, "Start",
		    (int)sizeof(long) * 2 + 0, "End",
		    (int)sizeof(long) * 2 + 0, "Offset");
	if (print_solaris)
		printf("%-*s %*s Protection        File\n",
		    (int)sizeof(long) * 2 + 0, "Start",
		    (int)sizeof(int) * 2 - 1,  "Size ");
#endif
	if (print_all)
		printf("%-*s %-*s %*s %-*s rwxpc  RWX  I/W/A Dev  %*s - File\n",
		    (int)sizeof(long) * 2, "Start",
		    (int)sizeof(long) * 2, "End",
		    (int)sizeof(int)  * 2, "Size ",
		    (int)sizeof(long) * 2, "Offset",
		    (int)sizeof(int)  * 2, "Inode");

	/* these are the "sub entries" */
	RB_ROOT(&D(vm_map, vm_map)->addr) =
	    load_vm_map_entries(kd, RB_ROOT(&D(vm_map, vm_map)->addr), NULL);
	RB_FOREACH(vm_map_entry, uvm_map_addr, &D(vm_map, vm_map)->addr)
		total += dump_vm_map_entry(kd, vmspace, vm_map_entry, sum);
	unload_vm_map_entries(RB_ROOT(&D(vm_map, vm_map)->addr));

	if (print_solaris)
		printf("%-*s %8luK\n",
		    (int)sizeof(void *) * 2 - 2, " total",
		    (unsigned long)total);
	if (print_all)
		printf("%-*s %9luk\n",
		    (int)sizeof(void *) * 4 - 1, " total",
		    (unsigned long)total);
}