int
mib_fetch_route(void)
{
	u_char *rtab, *next;
	size_t len;
	struct sroute *r, *r1;
	struct rt_msghdr *rtm;
	struct sockaddr *addrs[RTAX_MAX];

	if (route_tick != 0 && route_tick + ROUTE_UPDATE_INTERVAL > this_tick)
		return (0);

	/*
	 * Remove all routes
	 */
	r = RB_MIN(sroutes, &sroutes);
	while (r != NULL) {
		r1 = RB_NEXT(sroutes, &sroutes, r);
		RB_REMOVE(sroutes, &sroutes, r);
		free(r);
		r = r1;
	}
	route_total = 0;

	if ((rtab = mib_fetch_rtab(AF_INET, NET_RT_DUMP, 0, &len)) == NULL)
		return (-1);

	next = rtab;
	for (next = rtab; next < rtab + len; next += rtm->rtm_msglen) {
		rtm = (struct rt_msghdr *)(void *)next;
		if (rtm->rtm_type != RTM_GET ||
		    !(rtm->rtm_flags & RTF_UP))
			continue;
		mib_extract_addrs(rtm->rtm_addrs, (u_char *)(rtm + 1), addrs);

		
		mib_sroute_process(rtm, addrs[RTAX_GATEWAY], addrs[RTAX_DST],
		    addrs[RTAX_NETMASK]);
	}

#if 0
	u_int n = 0;
	r = RB_MIN(sroutes, &sroutes);
	while (r != NULL) {
		printf("%u: ", n++);
		sroute_print(r);
		printf("\n");
		r = RB_NEXT(sroutes, &sroutes, r);
	}
#endif
	free(rtab);
	route_tick = get_ticks();

	return (0);
}
/*
 * Timeout inactive nodes.
 */
void Voodoo80211Device::
ieee80211_clean_nodes(struct ieee80211com *ic)
{
	struct ieee80211_node *ni, *next_ni;
	u_int gen = ic->ic_scangen++;		/* NB: ok 'cuz single-threaded*/
	int s;
    
	s = splnet();
	for (ni = RB_MIN(ieee80211_tree, &ic->ic_tree);
         ni != NULL; ni = next_ni) {
		next_ni = RB_NEXT(ieee80211_tree, &ic->ic_tree, ni);
		if (ic->ic_nnodes < ic->ic_max_nnodes)
			break;
		if (ni->ni_scangen == gen)	/* previously handled */
			continue;
		ni->ni_scangen = gen;
		if (ni->ni_refcnt > 0)
			continue;
		DPRINTF(("station %s purged from LRU cache\n",
                 ether_sprintf(ni->ni_macaddr)));
		/*
		 * Send a deauthenticate frame.
		 */
			ieee80211_free_node(ic, ni);
		ic->ic_stats.is_node_timeout++;
	}
	splx(s);
}
Exemple #3
0
static int
kqueue_gc(void)
{
    int rv;
    struct kqueue *n1, *n2;

    /* Free any kqueue descriptor that is no longer needed */
    /* Sadly O(N), however needed in the case that a descriptor is
       closed and kevent(2) will never again be called on it. */
    for (n1 = RB_MIN(kqt, &kqtree); n1 != NULL; n1 = n2) {
        n2 = RB_NEXT(kqt, &kqtree, n1);

        if (n1->kq_ref == 0) {
            kqueue_free(n1);
        } else {
            rv = kqueue_validate(n1);
            if (rv == 0) 
                kqueue_free(n1);
            else if (rv < 0) 
                return (-1);
        }
    }

    return (0);
}
Exemple #4
0
uint32_t Flush_FlowTree(FlowSource_t *fs) {
struct FlowNode *node, *nxt;
uint32_t n = NumFlows;

	// Dump all incomplete flows to the file
	for (node = RB_MIN(FlowTree, FlowTree); node != NULL; node = nxt) {
		StorePcapFlow(fs, node);
		nxt = RB_NEXT(FlowTree, FlowTree, node);
#ifdef DEVEL
if ( node->left || node->right ) {
	assert(node->proto == 17);
	 node->left = node->right = NULL;
}
#endif
		Remove_Node(node);
	}

#ifdef DEVEL
	if ( NumFlows != 0 )
		LogError("### Flush_FlowTree() remaining flows: %u\n", NumFlows);
#endif

	UDP_list.list	= NULL;
	UDP_list.tail	= NULL;
	UDP_list.size	= 0;

	return n;

} // End of Flush_FlowTree
Exemple #5
0
/*
 * Dispatches signal {signum} to all active uv_signal_t watchers in all loops.
 * Returns 1 if the signal was dispatched to any watcher, or 0 if there were
 * no active signal watchers observing this signal.
 */
int uv__signal_dispatch(int signum) {
  uv_signal_t lookup;
  uv_signal_t* handle;
  int dispatched = 0;

  EnterCriticalSection(&uv__signal_lock);

  lookup.signum = signum;
  lookup.loop = NULL;

  for (handle = RB_NFIND(uv_signal_tree_s, &uv__signal_tree, &lookup);
       handle != NULL && handle->signum == signum;
       handle = RB_NEXT(uv_signal_tree_s, &uv__signal_tree, handle)) {
    unsigned long previous = InterlockedExchange(&handle->pending_signum, signum);

    if (!previous) {
      POST_COMPLETION_FOR_REQ(handle->loop, &handle->signal_req);
    }

    dispatched = 1;
  }

  LeaveCriticalSection(&uv__signal_lock);

  return dispatched;
}
Exemple #6
0
/*
 * Timeout inactive nodes.  Note that we cannot hold the node
 * lock while sending a frame as this would lead to a LOR.
 * Instead we use a generation number to mark nodes that we've
 * scanned and drop the lock and restart a scan if we have to
 * time out a node.  Since we are single-threaded by virtue of
 * controlling the inactivity timer we can be sure this will
 * process each node only once.
 */
void
ieee80211_clean_nodes(struct ieee80211com *ic)
{
	struct ieee80211_node *ni, *next_ni;
	u_int gen = ic->ic_scangen++;		/* NB: ok 'cuz single-threaded*/

	IEEE80211_NODE_LOCK(ic);
	for (ni = RB_MIN(ieee80211_tree, &ic->ic_tree);
	    ni != NULL; ni = next_ni) {
		next_ni = RB_NEXT(ieee80211_tree, &ic->ic_tree, ni);
		if (ic->ic_nnodes <= ic->ic_max_nnodes)
			break;
		if (ni->ni_scangen == gen)	/* previously handled */
			continue;
		ni->ni_scangen = gen;
		if (ni->ni_refcnt > 0)
			continue;
		IEEE80211_DPRINTF(("station %s purged from LRU cache\n",
		    ether_sprintf(ni->ni_macaddr)));
		/*
		 * Send a deauthenticate frame.
		 */
		if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
			IEEE80211_NODE_UNLOCK(ic);
			IEEE80211_SEND_MGMT(ic, ni,
			    IEEE80211_FC0_SUBTYPE_DEAUTH,
			    IEEE80211_REASON_AUTH_EXPIRE);
			IEEE80211_NODE_LOCK(ic);
			ieee80211_node_leave(ic, ni);
		} else
			ieee80211_free_node(ic, ni);
		ic->ic_stats.is_node_timeout++;
	}
	IEEE80211_NODE_UNLOCK(ic);
}
Exemple #7
0
/*
 * Free tags tree.
 */
void
unloadtags(void)
{
	struct ctag *var, *nxt;
	
	for (var = RB_MIN(tagtree, &tags); var != NULL; var = nxt) {
		nxt = RB_NEXT(tagtree, &tags, var);
		RB_REMOVE(tagtree, &tags, var);
		/* line parsed with fparseln needs to be freed */
		free(var->tag);
		free(var);
	}
}
Exemple #8
0
/* Free 32-bit file number generation structures. */
void
msdosfs_fileno_free(struct mount *mp)
{
	struct msdosfsmount *pmp = VFSTOMSDOSFS(mp);
	struct msdosfs_fileno *mf, *next;

	for (mf = RB_MIN(msdosfs_filenotree, &pmp->pm_filenos); mf != NULL;
	    mf = next) {
		next = RB_NEXT(msdosfs_filenotree, &pmp->pm_filenos, mf);
		RB_REMOVE(msdosfs_filenotree, &pmp->pm_filenos, mf);
		free(mf, M_MSDOSFSFILENO);
	}
}
Exemple #9
0
void Dispose_FlowTree(void) {
struct FlowNode *node, *nxt;

	// Dump all incomplete flows to the file
	for (node = RB_MIN(FlowTree, FlowTree); node != NULL; node = nxt) {
		nxt = RB_NEXT(FlowTree, FlowTree, node);
		RB_REMOVE(FlowTree, FlowTree, node);
		if ( node->data ) 
			free(node->data);
	}
	free(FlowElementCache);
	FlowElementCache 	 = NULL;
	FlowNode_FreeList 	 = NULL;
	CacheOverflow = 0;

} // End of Dispose_FlowTree
Exemple #10
0
/* Free a tree. */
void
format_free(struct format_tree *ft)
{
	struct format_entry	*fe, *fe_next;

	fe_next = RB_MIN(format_tree, ft);
	while (fe_next != NULL) {
		fe = fe_next;
		fe_next = RB_NEXT(format_tree, ft, fe);

		RB_REMOVE(format_tree, ft, fe);
		free(fe->value);
		free(fe->key);
		free(fe);
	}

	free(ft);
}
Exemple #11
0
void drm_drawable_free_all(struct drm_device *dev)
{
	struct bsd_drm_drawable_info *info, *next;

	DRM_SPINLOCK(&dev->drw_lock);
	for (info = RB_MIN(drawable_tree, &dev->drw_head);
	    info != NULL ; info = next) {
		next = RB_NEXT(drawable_tree, &dev->drw_head, info);
		RB_REMOVE(drawable_tree, &dev->drw_head,
		    (struct bsd_drm_drawable_info *)info);
		DRM_SPINUNLOCK(&dev->drw_lock);
		free_unr(dev->drw_unrhdr, info->handle);
		free(info->info.rects, DRM_MEM_DRAWABLE);
		free(info, DRM_MEM_DRAWABLE);
		DRM_SPINLOCK(&dev->drw_lock);
	}
	DRM_SPINUNLOCK(&dev->drw_lock);
}
Exemple #12
0
void
ieee80211_inact_timeout(void *arg)
{
	struct ieee80211com *ic = arg;
	struct ieee80211_node *ni, *next_ni;
	int s;

	s = splnet();
	for (ni = RB_MIN(ieee80211_tree, &ic->ic_tree);
	    ni != NULL; ni = next_ni) {
		next_ni = RB_NEXT(ieee80211_tree, &ic->ic_tree, ni);
		if (ni->ni_refcnt > 0)
			continue;
		if (ni->ni_inact < IEEE80211_INACT_MAX)
			ni->ni_inact++;
	}
	splx(s);

	timeout_add_sec(&ic->ic_inact_timeout, IEEE80211_INACT_WAIT);
}
Exemple #13
0
static int
ramstat_iter(void **iter, char **name, struct stat_value *val)
{
	struct ramstat_entry *np;

	log_trace(TRACE_STAT, "ramstat: iter");
	if (RB_EMPTY(&stats))
		return 0;

	if (*iter == NULL)
		np = RB_ROOT(&stats);
	else
		np = RB_NEXT(stats_tree, &stats, *iter);

	*iter = np;
	if (np == NULL)
		return 0;

	*name = np->key;
	*val  = np->value;
	return 1;
}
int
cmd_set_option_exec(struct cmd *self, struct cmd_ctx *ctx)
{
	struct cmd_target_data		*data = self->data;
	const struct set_option_entry	*table;
	struct session			*s;
	struct winlink			*wl;
	struct client			*c;
	struct options			*oo;
	const struct set_option_entry   *entry, *opt;
	struct jobs			*jobs;
	struct job			*job, *nextjob;
	u_int				 i;
	int				 try_again;

	if (cmd_check_flag(data->chflags, 's')) {
		oo = &global_options;
		table = set_option_table;
	} else if (cmd_check_flag(data->chflags, 'w')) {
		table = set_window_option_table;
		if (cmd_check_flag(data->chflags, 'g'))
			oo = &global_w_options;
		else {
			wl = cmd_find_window(ctx, data->target, NULL);
			if (wl == NULL)
				return (-1);
			oo = &wl->window->options;
		}
	} else {
		table = set_session_option_table;
		if (cmd_check_flag(data->chflags, 'g'))
			oo = &global_s_options;
		else {
			s = cmd_find_session(ctx, data->target);
			if (s == NULL)
				return (-1);
			oo = &s->options;
		}
	}

	if (*data->arg == '\0') {
		ctx->error(ctx, "invalid option");
		return (-1);
	}

	entry = NULL;
	for (opt = table; opt->name != NULL; opt++) {
		if (strncmp(opt->name, data->arg, strlen(data->arg)) != 0)
			continue;
		if (entry != NULL) {
			ctx->error(ctx, "ambiguous option: %s", data->arg);
			return (-1);
		}
		entry = opt;

		/* Bail now if an exact match. */
		if (strcmp(entry->name, data->arg) == 0)
			break;
	}
	if (entry == NULL) {
		ctx->error(ctx, "unknown option: %s", data->arg);
		return (-1);
	}

	if (cmd_check_flag(data->chflags, 'u')) {
		if (cmd_check_flag(data->chflags, 'g')) {
			ctx->error(ctx,
			    "can't unset global option: %s", entry->name);
			return (-1);
		}
		if (data->arg2 != NULL) {
			ctx->error(ctx,
			    "value passed to unset option: %s", entry->name);
			return (-1);
		}

		options_remove(oo, entry->name);
		ctx->info(ctx, "unset option: %s", entry->name);
	} else {
		switch (entry->type) {
		case SET_OPTION_STRING:
			cmd_set_option_string(ctx, oo, entry,
			    data->arg2, cmd_check_flag(data->chflags, 'a'));
			break;
		case SET_OPTION_NUMBER:
			cmd_set_option_number(ctx, oo, entry, data->arg2);
			break;
		case SET_OPTION_KEYS:
			cmd_set_option_keys(ctx, oo, entry, data->arg2);
			break;
		case SET_OPTION_COLOUR:
			cmd_set_option_colour(ctx, oo, entry, data->arg2);
			break;
		case SET_OPTION_ATTRIBUTES:
			cmd_set_option_attributes(ctx, oo, entry, data->arg2);
			break;
		case SET_OPTION_FLAG:
			cmd_set_option_flag(ctx, oo, entry, data->arg2);
			break;
		case SET_OPTION_CHOICE:
			cmd_set_option_choice(ctx, oo, entry, data->arg2);
			break;
		}
	}

	recalculate_sizes();
	for (i = 0; i < ARRAY_LENGTH(&clients); i++) {
		c = ARRAY_ITEM(&clients, i);
		if (c != NULL && c->session != NULL)
			server_redraw_client(c);
	}

	/*
	 * Special-case: kill all persistent jobs if status-left, status-right
	 * or set-titles-string have changed. Persistent jobs are only used by
	 * the status line at the moment so this works XXX.
	 */
	if (strcmp(entry->name, "status-left") == 0 ||
	    strcmp(entry->name, "status-right") == 0 ||
	    strcmp(entry->name, "status") == 0 ||
	    strcmp(entry->name, "set-titles-string") == 0 ||
	    strcmp(entry->name, "window-status-format") == 0) {
		for (i = 0; i < ARRAY_LENGTH(&clients); i++) {
			c = ARRAY_ITEM(&clients, i);
			if (c == NULL || c->session == NULL)
				continue;

			jobs = &c->status_jobs;
			do {
				try_again = 0;
				job = RB_ROOT(jobs);
				while (job != NULL) {
					nextjob = RB_NEXT(jobs, jobs, job);
					if (job->flags & JOB_PERSIST) {
						job_remove(jobs, job);
						try_again = 1;
						break;
					}
					job = nextjob;
				}
			} while (try_again);
			server_redraw_client(c);
		}
	}

	return (0);
}
int
ieee80211_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
	struct ieee80211com *ic = (void *)ifp;
	struct ifreq *ifr = (struct ifreq *)data;
	int i, error = 0;
	struct ieee80211_nwid nwid;
	struct ieee80211_wpapsk *psk;
	struct ieee80211_wmmparams *wmm;
	struct ieee80211_power *power;
	struct ieee80211_bssid *bssid;
	struct ieee80211chanreq *chanreq;
	struct ieee80211_channel *chan;
	struct ieee80211_txpower *txpower;
	static const u_int8_t empty_macaddr[IEEE80211_ADDR_LEN] = {
		0x00, 0x00, 0x00, 0x00, 0x00, 0x00
	};
	struct ieee80211_nodereq *nr, nrbuf;
	struct ieee80211_nodereq_all *na;
	struct ieee80211_node *ni;
	u_int32_t flags;

	switch (cmd) {
	case SIOCSIFADDR:
	case SIOCGIFADDR:
		error = ether_ioctl(ifp, &ic->ic_ac, cmd, data);
		break;
	case SIOCSIFMEDIA:
	case SIOCGIFMEDIA:
		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
		break;
	case SIOCS80211NWID:
		if ((error = suser(curproc, 0)) != 0)
			break;
		if ((error = copyin(ifr->ifr_data, &nwid, sizeof(nwid))) != 0)
			break;
		if (nwid.i_len > IEEE80211_NWID_LEN) {
			error = EINVAL;
			break;
		}
		memset(ic->ic_des_essid, 0, IEEE80211_NWID_LEN);
		ic->ic_des_esslen = nwid.i_len;
		memcpy(ic->ic_des_essid, nwid.i_nwid, nwid.i_len);
		error = ENETRESET;
		break;
	case SIOCG80211NWID:
		memset(&nwid, 0, sizeof(nwid));
		switch (ic->ic_state) {
		case IEEE80211_S_INIT:
		case IEEE80211_S_SCAN:
			nwid.i_len = ic->ic_des_esslen;
			memcpy(nwid.i_nwid, ic->ic_des_essid, nwid.i_len);
			break;
		default:
			nwid.i_len = ic->ic_bss->ni_esslen;
			memcpy(nwid.i_nwid, ic->ic_bss->ni_essid, nwid.i_len);
			break;
		}
		error = copyout(&nwid, ifr->ifr_data, sizeof(nwid));
		break;
	case SIOCS80211NWKEY:
		if ((error = suser(curproc, 0)) != 0)
			break;
		error = ieee80211_ioctl_setnwkeys(ic, (void *)data);
		break;
	case SIOCG80211NWKEY:
		error = ieee80211_ioctl_getnwkeys(ic, (void *)data);
		break;
	case SIOCS80211WMMPARMS:
		if ((error = suser(curproc, 0)) != 0)
			break;
		if (!(ic->ic_flags & IEEE80211_C_QOS)) {
			error = ENODEV;
			break;
		}
		wmm = (struct ieee80211_wmmparams *)data;
		if (wmm->i_enabled)
			ic->ic_flags |= IEEE80211_F_QOS;
		else
			ic->ic_flags &= ~IEEE80211_F_QOS;
		error = ENETRESET;
		break;
	case SIOCG80211WMMPARMS:
		wmm = (struct ieee80211_wmmparams *)data;
		wmm->i_enabled = (ic->ic_flags & IEEE80211_F_QOS) ? 1 : 0;
		break;
	case SIOCS80211WPAPARMS:
		if ((error = suser(curproc, 0)) != 0)
			break;
		error = ieee80211_ioctl_setwpaparms(ic, (void *)data);
		break;
	case SIOCG80211WPAPARMS:
		error = ieee80211_ioctl_getwpaparms(ic, (void *)data);
		break;
	case SIOCS80211WPAPSK:
		if ((error = suser(curproc, 0)) != 0)
			break;
		psk = (struct ieee80211_wpapsk *)data;
		if (psk->i_enabled) {
			ic->ic_flags |= IEEE80211_F_PSK;
			memcpy(ic->ic_psk, psk->i_psk, sizeof(ic->ic_psk));
		} else {
			ic->ic_flags &= ~IEEE80211_F_PSK;
			memset(ic->ic_psk, 0, sizeof(ic->ic_psk));
		}
		error = ENETRESET;
		break;
	case SIOCG80211WPAPSK:
		psk = (struct ieee80211_wpapsk *)data;
		if (ic->ic_flags & IEEE80211_F_PSK) {
			psk->i_enabled = 1;
			/* do not show any keys to non-root user */
			if (suser(curproc, 0) != 0) {
				psk->i_enabled = 2;
				memset(psk->i_psk, 0, sizeof(psk->i_psk));
				break;	/* return ok but w/o key */
			}
			memcpy(psk->i_psk, ic->ic_psk, sizeof(psk->i_psk));
		} else
			psk->i_enabled = 0;
		break;
	case SIOCS80211POWER:
		if ((error = suser(curproc, 0)) != 0)
			break;
		power = (struct ieee80211_power *)data;
		ic->ic_lintval = power->i_maxsleep;
		if (power->i_enabled != 0) {
			if ((ic->ic_caps & IEEE80211_C_PMGT) == 0)
				error = EINVAL;
			else if ((ic->ic_flags & IEEE80211_F_PMGTON) == 0) {
				ic->ic_flags |= IEEE80211_F_PMGTON;
				error = ENETRESET;
			}
		} else {
			if (ic->ic_flags & IEEE80211_F_PMGTON) {
				ic->ic_flags &= ~IEEE80211_F_PMGTON;
				error = ENETRESET;
			}
		}
		break;
	case SIOCG80211POWER:
		power = (struct ieee80211_power *)data;
		power->i_enabled = (ic->ic_flags & IEEE80211_F_PMGTON) ? 1 : 0;
		power->i_maxsleep = ic->ic_lintval;
		break;
	case SIOCS80211BSSID:
		if ((error = suser(curproc, 0)) != 0)
			break;
		bssid = (struct ieee80211_bssid *)data;
		if (IEEE80211_ADDR_EQ(bssid->i_bssid, empty_macaddr))
			ic->ic_flags &= ~IEEE80211_F_DESBSSID;
		else {
			ic->ic_flags |= IEEE80211_F_DESBSSID;
			IEEE80211_ADDR_COPY(ic->ic_des_bssid, bssid->i_bssid);
		}
		if (ic->ic_opmode == IEEE80211_M_HOSTAP)
			break;
		switch (ic->ic_state) {
		case IEEE80211_S_INIT:
		case IEEE80211_S_SCAN:
			error = ENETRESET;
			break;
		default:
			if ((ic->ic_flags & IEEE80211_F_DESBSSID) &&
			    !IEEE80211_ADDR_EQ(ic->ic_des_bssid,
			    ic->ic_bss->ni_bssid))
				error = ENETRESET;
			break;
		}
		break;
	case SIOCG80211BSSID:
		bssid = (struct ieee80211_bssid *)data;
		switch (ic->ic_state) {
		case IEEE80211_S_INIT:
		case IEEE80211_S_SCAN:
			if (ic->ic_opmode == IEEE80211_M_HOSTAP)
				IEEE80211_ADDR_COPY(bssid->i_bssid,
				    ic->ic_myaddr);
			else if (ic->ic_flags & IEEE80211_F_DESBSSID)
				IEEE80211_ADDR_COPY(bssid->i_bssid,
				    ic->ic_des_bssid);
			else
				memset(bssid->i_bssid, 0, IEEE80211_ADDR_LEN);
			break;
		default:
			IEEE80211_ADDR_COPY(bssid->i_bssid,
			    ic->ic_bss->ni_bssid);
			break;
		}
		break;
	case SIOCS80211CHANNEL:
		if ((error = suser(curproc, 0)) != 0)
			break;
		chanreq = (struct ieee80211chanreq *)data;
		if (chanreq->i_channel == IEEE80211_CHAN_ANY)
			ic->ic_des_chan = IEEE80211_CHAN_ANYC;
		else if (chanreq->i_channel > IEEE80211_CHAN_MAX ||
		    isclr(ic->ic_chan_active, chanreq->i_channel)) {
			error = EINVAL;
			break;
		} else
			ic->ic_ibss_chan = ic->ic_des_chan =
			    &ic->ic_channels[chanreq->i_channel];
		switch (ic->ic_state) {
		case IEEE80211_S_INIT:
		case IEEE80211_S_SCAN:
			error = ENETRESET;
			break;
		default:
			if (ic->ic_opmode == IEEE80211_M_STA) {
				if (ic->ic_des_chan != IEEE80211_CHAN_ANYC &&
				    ic->ic_bss->ni_chan != ic->ic_des_chan)
					error = ENETRESET;
			} else {
				if (ic->ic_bss->ni_chan != ic->ic_ibss_chan)
					error = ENETRESET;
			}
			break;
		}
		break;
	case SIOCG80211CHANNEL:
		chanreq = (struct ieee80211chanreq *)data;
		switch (ic->ic_state) {
		case IEEE80211_S_INIT:
		case IEEE80211_S_SCAN:
			if (ic->ic_opmode == IEEE80211_M_STA)
				chan = ic->ic_des_chan;
			else
				chan = ic->ic_ibss_chan;
			break;
		default:
			chan = ic->ic_bss->ni_chan;
			break;
		}
		chanreq->i_channel = ieee80211_chan2ieee(ic, chan);
		break;
#if 0
	case SIOCG80211ZSTATS:
#endif
	case SIOCG80211STATS:
		ifr = (struct ifreq *)data;
		copyout(&ic->ic_stats, ifr->ifr_data, sizeof (ic->ic_stats));
#if 0
		if (cmd == SIOCG80211ZSTATS)
			memset(&ic->ic_stats, 0, sizeof(ic->ic_stats));
#endif
		break;
	case SIOCS80211TXPOWER:
		if ((error = suser(curproc, 0)) != 0)
			break;
		txpower = (struct ieee80211_txpower *)data;
		if ((ic->ic_caps & IEEE80211_C_TXPMGT) == 0) {
			error = EINVAL;
			break;
		}
		if (IEEE80211_TXPOWER_MIN > txpower->i_val ||
		    txpower->i_val > IEEE80211_TXPOWER_MAX) {
			error = EINVAL;
			break;
		}
		ic->ic_txpower = txpower->i_val;
		error = ENETRESET;
		break;
	case SIOCG80211TXPOWER:
		txpower = (struct ieee80211_txpower *)data;
		if ((ic->ic_caps & IEEE80211_C_TXPMGT) == 0)
			error = EINVAL;
		else
			txpower->i_val = ic->ic_txpower;
		break;
	case SIOCSIFMTU:
		ifr = (struct ifreq *)data;
		if (!(IEEE80211_MTU_MIN <= ifr->ifr_mtu &&
		    ifr->ifr_mtu <= IEEE80211_MTU_MAX))
			error = EINVAL;
		else
			ifp->if_mtu = ifr->ifr_mtu;
		break;
	case SIOCS80211SCAN:
		if ((error = suser(curproc, 0)) != 0)
			break;
		if (ic->ic_opmode == IEEE80211_M_HOSTAP)
			break;
		if ((ifp->if_flags & IFF_UP) == 0) {
			error = ENETDOWN;
			break;
		}
		if ((ic->ic_scan_lock & IEEE80211_SCAN_REQUEST) == 0) {
			if (ic->ic_scan_lock & IEEE80211_SCAN_LOCKED)
				ic->ic_scan_lock |= IEEE80211_SCAN_RESUME;
			ic->ic_scan_lock |= IEEE80211_SCAN_REQUEST;
			if (ic->ic_state != IEEE80211_S_SCAN)
				ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
		}
		/* Let the userspace process wait for completion */
		error = tsleep(&ic->ic_scan_lock, PCATCH, "80211scan",
		    hz * IEEE80211_SCAN_TIMEOUT);
		break;
	case SIOCG80211NODE:
		nr = (struct ieee80211_nodereq *)data;
		ni = ieee80211_find_node(ic, nr->nr_macaddr);
		if (ni == NULL) {
			error = ENOENT;
			break;
		}
		ieee80211_node2req(ic, ni, nr);
		break;
	case SIOCS80211NODE:
		if ((error = suser(curproc, 0)) != 0)
			break;
		if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
			error = EINVAL;
			break;
		}
		nr = (struct ieee80211_nodereq *)data;

		ni = ieee80211_find_node(ic, nr->nr_macaddr);
		if (ni == NULL)
			ni = ieee80211_alloc_node(ic, nr->nr_macaddr);
		if (ni == NULL) {
			error = ENOENT;
			break;
		}

		if (nr->nr_flags & IEEE80211_NODEREQ_COPY)
			ieee80211_req2node(ic, nr, ni);
		break;
	case SIOCS80211DELNODE:
		if ((error = suser(curproc, 0)) != 0)
			break;
		nr = (struct ieee80211_nodereq *)data;
		ni = ieee80211_find_node(ic, nr->nr_macaddr);
		if (ni == NULL)
			error = ENOENT;
		else if (ni == ic->ic_bss)
			error = EPERM;
		else {
			if (ni->ni_state == IEEE80211_STA_COLLECT)
				break;

			/* Disassociate station. */
			if (ni->ni_state == IEEE80211_STA_ASSOC)
				IEEE80211_SEND_MGMT(ic, ni,
				    IEEE80211_FC0_SUBTYPE_DISASSOC,
				    IEEE80211_REASON_ASSOC_LEAVE);

			/* Deauth station. */
			if (ni->ni_state >= IEEE80211_STA_AUTH)
				IEEE80211_SEND_MGMT(ic, ni,
				    IEEE80211_FC0_SUBTYPE_DEAUTH,
				    IEEE80211_REASON_AUTH_LEAVE);

			ieee80211_release_node(ic, ni);
		}
		break;
	case SIOCG80211ALLNODES:
		na = (struct ieee80211_nodereq_all *)data;
		na->na_nodes = i = 0;
		ni = RB_MIN(ieee80211_tree, &ic->ic_tree);
		while (ni && na->na_size >=
		    i + sizeof(struct ieee80211_nodereq)) {
			ieee80211_node2req(ic, ni, &nrbuf);
			error = copyout(&nrbuf, (caddr_t)na->na_node + i,
			    sizeof(struct ieee80211_nodereq));
			if (error)
				break;
			i += sizeof(struct ieee80211_nodereq);
			na->na_nodes++;
			ni = RB_NEXT(ieee80211_tree, &ic->ic_tree, ni);
		}
		break;
	case SIOCG80211FLAGS:
		flags = ic->ic_flags;
		if (ic->ic_opmode != IEEE80211_M_HOSTAP)
			flags &= ~IEEE80211_F_HOSTAPMASK;
		ifr->ifr_flags = flags >> IEEE80211_F_USERSHIFT;
		break;
	case SIOCS80211FLAGS:
		if ((error = suser(curproc, 0)) != 0)
			break;
		flags = (u_int32_t)ifr->ifr_flags << IEEE80211_F_USERSHIFT;
		if (ic->ic_opmode != IEEE80211_M_HOSTAP &&
		    (flags & IEEE80211_F_HOSTAPMASK)) {
			error = EINVAL;
			break;
		}
		ic->ic_flags = (ic->ic_flags & ~IEEE80211_F_USERMASK) | flags;
		error = ENETRESET;
		break;
	default:
		error = ENOTTY;
		break;
	}
	return error;
}
Exemple #16
0
int
cmd_set_option_exec(struct cmd *self, struct cmd_ctx *ctx)
{
	struct cmd_target_data			*data = self->data;
	const struct options_table_entry	*table, *oe, *oe_loop;
	struct session				*s;
	struct winlink				*wl;
	struct client				*c;
	struct options				*oo;
	struct jobs				*jobs;
	struct job				*job, *nextjob;
	u_int					 i;
	int					 try_again;

	/* Work out the options tree and table to use. */
	if (cmd_check_flag(data->chflags, 's')) {
		oo = &global_options;
		table = server_options_table;
	} else if (cmd_check_flag(data->chflags, 'w')) {
		table = window_options_table;
		if (cmd_check_flag(data->chflags, 'g'))
			oo = &global_w_options;
		else {
			wl = cmd_find_window(ctx, data->target, NULL);
			if (wl == NULL)
				return (-1);
			oo = &wl->window->options;
		}
	} else {
		table = session_options_table;
		if (cmd_check_flag(data->chflags, 'g'))
			oo = &global_s_options;
		else {
			s = cmd_find_session(ctx, data->target);
			if (s == NULL)
				return (-1);
			oo = &s->options;
		}
	}

	/* Find the option table entry. */
	oe = NULL;
	for (oe_loop = table; oe_loop->name != NULL; oe_loop++) {
		if (strncmp(oe_loop->name, data->arg, strlen(data->arg)) != 0)
			continue;
		if (oe != NULL) {
			ctx->error(ctx, "ambiguous option: %s", data->arg);
			return (-1);
		}
		oe = oe_loop;

		/* Bail now if an exact match. */
		if (strcmp(oe->name, data->arg) == 0)
			break;
	}
	if (oe == NULL) {
		ctx->error(ctx, "unknown option: %s", data->arg);
		return (-1);
	}

	/* Unset or set the option. */
	if (cmd_check_flag(data->chflags, 'u')) {
		if (cmd_set_option_unset(self, ctx, oe, oo) != 0)
			return (-1);
	} else {
		if (cmd_set_option_set(self, ctx, oe, oo) != 0)
			return (-1);
	}

	/* Update sizes and redraw. May not need it but meh. */
	recalculate_sizes();
	for (i = 0; i < ARRAY_LENGTH(&clients); i++) {
		c = ARRAY_ITEM(&clients, i);
		if (c != NULL && c->session != NULL)
			server_redraw_client(c);
	}

	/*
	 * Special-case: kill all persistent jobs if status-left, status-right
	 * or set-titles-string have changed. Persistent jobs are only used by
	 * the status line at the moment so this works XXX.
	 */
	if (strcmp(oe->name, "status-left") == 0 ||
	    strcmp(oe->name, "status-right") == 0 ||
	    strcmp(oe->name, "status") == 0 ||
	    strcmp(oe->name, "set-titles-string") == 0 ||
	    strcmp(oe->name, "window-status-format") == 0) {
		for (i = 0; i < ARRAY_LENGTH(&clients); i++) {
			c = ARRAY_ITEM(&clients, i);
			if (c == NULL || c->session == NULL)
				continue;

			jobs = &c->status_jobs;
			do {
				try_again = 0;
				job = RB_ROOT(jobs);
				while (job != NULL) {
					nextjob = RB_NEXT(jobs, jobs, job);
					if (job->flags & JOB_PERSIST) {
						job_remove(jobs, job);
						try_again = 1;
						break;
					}
					job = nextjob;
				}
			} while (try_again);
			server_redraw_client(c);
		}
	}

	return (0);
}
Exemple #17
0
tud_t *tud_next(tud_t *tud)
	{ return RB_NEXT(tudtree_s, &Head, tud); }
static int __init rb_test_init(void)
{
        struct fileInfo *filp = NULL;
        struct scan_cache_record *scan_cache_record_ptr = NULL;
        int ret = 0;
        printk("RB test init\n");
        init_scan_cache();

        filp = kzalloc(sizeof(struct fileInfo), GFP_KERNEL);
        if (filp) {
                filp->dev_id = 100;
                filp->inode_number = 1001;
                filp->inode_generation = 12345;
                ret = insert_scan_cache_record(filp);
                if (ret != 0)
                        printk("Fail to insert : %lu\n", filp->inode_number);

                kfree(filp);
                filp = NULL;
        }


        filp = kzalloc(sizeof(struct fileInfo), GFP_KERNEL);
        if (filp) {
                filp->dev_id = 100;
                filp->inode_number = 1009;
                filp->inode_generation = 12346;
                ret = insert_scan_cache_record(filp);
                if (ret != 0)
                        printk("Fail to insert : %lu\n", filp->inode_number);
                kfree(filp);
                filp = NULL;
        }


        filp = kzalloc(sizeof(struct fileInfo), GFP_KERNEL);
        if (filp) {
                filp->dev_id = 100;
                filp->inode_number = 1003;
                filp->inode_generation = 12347;
                ret = insert_scan_cache_record(filp);
                if (ret != 0)
                        printk("Fail to insert : %lu\n", filp->inode_number);

                kfree(filp);
                filp = NULL;
        }


        filp = kzalloc(sizeof(struct fileInfo), GFP_KERNEL);
        if (filp) {
                filp->dev_id = 100;
                filp->inode_number = 1002;
                filp->inode_generation = 12348;
                ret = insert_scan_cache_record(filp);
                if (ret != 0)
                        printk("Fail to insert : %lu\n", filp->inode_number);

                kfree(filp);
                filp = NULL;
        }


        filp = kzalloc(sizeof(struct fileInfo), GFP_KERNEL);
        if (filp) {
                filp->dev_id = 100;
                filp->inode_number = 1006;
                filp->inode_generation = 12349;
                ret = insert_scan_cache_record(filp);
                if (ret != 0)
                        printk("Fail to insert : %lu\n", filp->inode_number);

                kfree(filp);
                filp = NULL;
        }




        filp = kzalloc(sizeof(struct fileInfo), GFP_KERNEL);
        if (filp) {
                filp->dev_id = 99;
                filp->inode_number = 1006;
                filp->inode_generation = 12351;
                ret = insert_scan_cache_record(filp);
                if (ret != 0)
                        printk("Fail to insert : %lu\n", filp->inode_number);

                kfree(filp);
                filp = NULL;
        }


        filp = kzalloc(sizeof(struct fileInfo), GFP_KERNEL);
        if (filp) {
                filp->dev_id = 98;
                filp->inode_number = 1006;
                filp->inode_generation = 12352;
                ret = insert_scan_cache_record(filp);
                if (ret != 0)
                        printk("Fail to insert : %lu\n", filp->inode_number);

                kfree(filp);
                filp = NULL;
        }


        //Dupicate Entry , which should get updated as ID and inode_number key is same
        filp = kzalloc(sizeof(struct fileInfo), GFP_KERNEL);
        if (filp) {
                filp->dev_id = 100;
                filp->inode_number = 1006;
                filp->inode_generation = 12350;
                ret = insert_scan_cache_record(filp);
                if (ret != 0)
                        printk("Fail to insert : %lu\n", filp->inode_number);

                kfree(filp);
                filp = NULL;
        }

        //First Element:
        scan_cache_record_ptr = RB_FIRST(&scan_cache_rb_tree,
                                      struct scan_cache_record, cache_rb_node);
        if (scan_cache_record_ptr) {
                printk("First: %lu %lu %u\n", scan_cache_record_ptr->dev_id,
                       scan_cache_record_ptr->inode_number,
                       scan_cache_record_ptr->inode_generation);
        }


        //Last Element:
        scan_cache_record_ptr = RB_LAST(&scan_cache_rb_tree,
                                      struct scan_cache_record, cache_rb_node);
        if (scan_cache_record_ptr) {
                printk("Last: %lu %lu %u\n", scan_cache_record_ptr->dev_id,
                       scan_cache_record_ptr->inode_number,
                       scan_cache_record_ptr->inode_generation);
        }


        //Iterating
        scan_cache_record_ptr = RB_FIRST(&scan_cache_rb_tree,
                                      struct scan_cache_record, cache_rb_node);
        if (scan_cache_record_ptr) {
                printk("First: %lu %lu %u\n", scan_cache_record_ptr->dev_id,
                       scan_cache_record_ptr->inode_number,
                       scan_cache_record_ptr->inode_generation);
        }

        while (scan_cache_record_ptr) {
                scan_cache_record_ptr = RB_NEXT(scan_cache_record_ptr, cache_rb_node);
                if (scan_cache_record_ptr) {
                        printk("Next: %lu %lu %u\n", scan_cache_record_ptr->dev_id,
                        scan_cache_record_ptr->inode_number,
                        scan_cache_record_ptr->inode_generation);
                }
        }
        // End of iteration

        //Find and Delete
        filp = kzalloc(sizeof(struct fileInfo), GFP_KERNEL);
        if (filp) {
                bool ret;
                filp->dev_id = 98;
                filp->inode_number = 1006;
                filp->inode_generation = 12352;
                ret = find_remove_scan_cache_record(filp);
                if (ret == false)
                        printk("Fail to find and delete : %lu %lu %lu\n",
                               filp->dev_id,
                               filp->inode_number,
                               filp->inode_generation);
                kfree(filp);
                filp = NULL;
        }
#if 1
        printk("------ After Deleting Node: Iterating------------\n");
        //Iterating
        scan_cache_record_ptr = RB_FIRST(&scan_cache_rb_tree,
                                      struct scan_cache_record, cache_rb_node);
        if (scan_cache_record_ptr) {
                printk("First: %lu %lu %u\n", scan_cache_record_ptr->dev_id,
                       scan_cache_record_ptr->inode_number,
                       scan_cache_record_ptr->inode_generation);
        }

        while (scan_cache_record_ptr) {
                scan_cache_record_ptr = RB_NEXT(scan_cache_record_ptr, cache_rb_node);
                if (scan_cache_record_ptr) {
                        printk("Next: %lu %lu %u\n", scan_cache_record_ptr->dev_id,
                        scan_cache_record_ptr->inode_number,
                        scan_cache_record_ptr->inode_generation);
                }
        }
        // End of iteration
#endif


        return 0;    // Non-zero return means that the module couldn't be loaded.
}
Exemple #19
0
ypresp_key_val *
ypproc_next_2_svc(ypreq_key *arg, struct svc_req *req)
{
	struct userent			 ukey;
	struct userent			*ue;
	struct groupent			 gkey;
	struct groupent			*ge;
	char				*line;
	static struct ypresp_key_val	 res;
	char				 key[YPMAXRECORD+1];

	if (yp_valid_domain(arg->domain, (struct ypresp_val *)&res) == -1)
		return (&res);

	if (strcmp(arg->map, "passwd.byname") == 0 ||
	    strcmp(arg->map, "master.passwd.byname") == 0) {
		bzero(key, sizeof(key));
		(void)strncpy(key, arg->key.keydat_val,
		    arg->key.keydat_len);
		ukey.ue_line = key;
		if ((ue = RB_FIND(user_name_tree, env->sc_user_names,
		    &ukey)) == NULL) {
			/*
			 * canacar's trick:
			 * the user might have been deleted in between calls
			 * to next since the tree may be modified by a reload.
			 * next should still return the next user in
			 * lexicographical order, hence insert the search key
			 * and look up the next field, then remove it again.
			 */
			RB_INSERT(user_name_tree, env->sc_user_names, &ukey);
			if ((ue = RB_NEXT(user_name_tree, &env->sc_user_names,
			    &ukey)) == NULL) {
				RB_REMOVE(user_name_tree, env->sc_user_names,
				    &ukey);
				res.stat = YP_NOKEY;
				return (&res);
			}
			RB_REMOVE(user_name_tree, env->sc_user_names, &ukey);
		}
		line = ue->ue_line + (strlen(ue->ue_line) + 1);
		line = line + (strlen(line) + 1);
		yp_make_keyval(&res, line, line);
		return (&res);


	} else if (strcmp(arg->map, "group.byname") == 0) {
		bzero(key, sizeof(key));
		(void)strncpy(key, arg->key.keydat_val,
		    arg->key.keydat_len);
		
		gkey.ge_line = key;
		if ((ge = RB_FIND(group_name_tree, env->sc_group_names,
		    &gkey)) == NULL) {
			/*
			 * canacar's trick reloaded.
			 */
			RB_INSERT(group_name_tree, env->sc_group_names, &gkey);
			if ((ge = RB_NEXT(group_name_tree, &env->sc_group_names,
			    &gkey)) == NULL) {
				RB_REMOVE(group_name_tree, env->sc_group_names,
				    &gkey);
				res.stat = YP_NOKEY;
				return (&res);
			}
			RB_REMOVE(group_name_tree, env->sc_group_names, &gkey);
		}

		line = ge->ge_line + (strlen(ge->ge_line) + 1);
		line = line + (strlen(line) + 1);
		yp_make_keyval(&res, line, line);
		return (&res);
	} else {
		log_debug("unknown map %s", arg->map);
		res.stat = YP_NOMAP;
		return (&res);
	}
}
Exemple #20
0
struct options_entry *
options_next(struct options_entry *o)
{
	return (RB_NEXT(options_tree, &oo->tree, o));
}
Exemple #21
0
/*
 * Helper function for tmpfs_readdir.  Returns as much directory entries
 * as can fit in the uio space.  The read starts at uio->uio_offset.
 * The function returns 0 on success, -1 if there was not enough space
 * in the uio structure to hold the directory entry or an appropriate
 * error code if another error happens.
 *
 * Caller must hold the node locked (shared ok)
 */
int
tmpfs_dir_getdents(struct tmpfs_node *node, struct uio *uio, off_t *cntp)
{
	int error;
	off_t startcookie;
	struct tmpfs_dirent *de;

	TMPFS_VALIDATE_DIR(node);

	/*
	 * Locate the first directory entry we have to return.  We have cached
	 * the last readdir in the node, so use those values if appropriate.
	 * Otherwise do a linear scan to find the requested entry.
	 */
	startcookie = uio->uio_offset;
	KKASSERT(startcookie != TMPFS_DIRCOOKIE_DOT);
	KKASSERT(startcookie != TMPFS_DIRCOOKIE_DOTDOT);

	if (startcookie == TMPFS_DIRCOOKIE_EOF)
		return 0;

	de = tmpfs_dir_lookupbycookie(node, startcookie);
	if (de == NULL)
		return EINVAL;

	/*
	 * Read as much entries as possible; i.e., until we reach the end of
	 * the directory or we exhaust uio space.
	 */
	do {
		ino_t d_ino;
		uint8_t d_type;

		/* Create a dirent structure representing the current
		 * tmpfs_node and fill it. */
		d_ino = de->td_node->tn_id;
		switch (de->td_node->tn_type) {
		case VBLK:
			d_type = DT_BLK;
			break;

		case VCHR:
			d_type = DT_CHR;
			break;

		case VDIR:
			d_type = DT_DIR;
			break;

		case VFIFO:
			d_type = DT_FIFO;
			break;

		case VLNK:
			d_type = DT_LNK;
			break;

		case VREG:
			d_type = DT_REG;
			break;

		case VSOCK:
			d_type = DT_SOCK;
			break;

		default:
			panic("tmpfs_dir_getdents: type %p %d",
			    de->td_node, (int)de->td_node->tn_type);
		}
		KKASSERT(de->td_namelen < 256); /* 255 + 1 */

		if (vop_write_dirent(&error, uio, d_ino, d_type,
		    de->td_namelen, de->td_name)) {
			error = -1;
			break;
		}

		(*cntp)++;
		de = RB_NEXT(tmpfs_dirtree_cookie,
			     node->tn_dir.tn_cookietree, de);
	} while (error == 0 && uio->uio_resid > 0 && de != NULL);

	/* Update the offset and cache. */
	if (de == NULL) {
		uio->uio_offset = TMPFS_DIRCOOKIE_EOF;
	} else {
		uio->uio_offset = tmpfs_dircookie(de);
	}

	return error;
}
Exemple #22
0
/*
 * Timeout inactive nodes.
 *
 * If called because of a cache timeout, which happens only in hostap and ibss
 * modes, clean all inactive cached or authenticated nodes but don't de-auth
 * any associated nodes.
 *
 * Else, this function is called because a new node must be allocated but the
 * node cache is full. In this case, return as soon as a free slot was made
 * available. If acting as hostap, clean cached nodes regardless of their
 * recent activity and also allow de-authing of authenticated nodes older
 * than one cache wait interval, and de-authing of inactive associated nodes.
 */
void
ieee80211_clean_nodes(struct ieee80211com *ic, int cache_timeout)
{
	struct ieee80211_node *ni, *next_ni;
	u_int gen = ic->ic_scangen++;		/* NB: ok 'cuz single-threaded*/
	int s;
#ifndef IEEE80211_STA_ONLY
	int nnodes = 0;
	struct ifnet *ifp = &ic->ic_if;
#endif

	s = splnet();
	for (ni = RB_MIN(ieee80211_tree, &ic->ic_tree);
	    ni != NULL; ni = next_ni) {
		next_ni = RB_NEXT(ieee80211_tree, &ic->ic_tree, ni);
		if (!cache_timeout && ic->ic_nnodes < ic->ic_max_nnodes)
			break;
		if (ni->ni_scangen == gen)	/* previously handled */
			continue;
#ifndef IEEE80211_STA_ONLY
		nnodes++;
#endif
		ni->ni_scangen = gen;
		if (ni->ni_refcnt > 0)
			continue;
#ifndef IEEE80211_STA_ONLY
		if ((ic->ic_opmode == IEEE80211_M_HOSTAP ||
		    ic->ic_opmode == IEEE80211_M_IBSS) &&
		    ic->ic_state == IEEE80211_S_RUN) {
			if (cache_timeout) {
				if (ni->ni_state != IEEE80211_STA_COLLECT &&
				    (ni->ni_state == IEEE80211_STA_ASSOC ||
				    ni->ni_inact < IEEE80211_INACT_MAX))
					continue;
			} else {
				if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
				    ((ni->ni_state == IEEE80211_STA_ASSOC &&
				    ni->ni_inact < IEEE80211_INACT_MAX) ||
				    (ni->ni_state == IEEE80211_STA_AUTH &&
				     ni->ni_inact == 0)))
				    	continue;

				if (ic->ic_opmode == IEEE80211_M_IBSS &&
				    ni->ni_state != IEEE80211_STA_COLLECT &&
				    ni->ni_state != IEEE80211_STA_CACHE &&
				    ni->ni_inact < IEEE80211_INACT_MAX)
					continue;
			}
		}
		if (ifp->if_flags & IFF_DEBUG)
			printf("%s: station %s purged from node cache\n",
			    ifp->if_xname, ether_sprintf(ni->ni_macaddr));
#endif
		/*
		 * If we're hostap and the node is authenticated, send
		 * a deauthentication frame. The node will be freed when
		 * the driver calls ieee80211_release_node().
		 */
#ifndef IEEE80211_STA_ONLY
		nnodes--;
		if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
		    ni->ni_state >= IEEE80211_STA_AUTH &&
		    ni->ni_state != IEEE80211_STA_COLLECT) {
			splx(s);
			IEEE80211_SEND_MGMT(ic, ni,
			    IEEE80211_FC0_SUBTYPE_DEAUTH,
			    IEEE80211_REASON_AUTH_EXPIRE);
			s = splnet();
			ieee80211_node_leave(ic, ni);
		} else
#endif
			ieee80211_free_node(ic, ni);
		ic->ic_stats.is_node_timeout++;
	}

#ifndef IEEE80211_STA_ONLY
	/* 
	 * During a cache timeout we iterate over all nodes.
	 * Check for node leaks by comparing the actual number of cached
	 * nodes with the ic_nnodes count, which is maintained while adding
	 * and removing nodes from the cache.
	 */
	if ((ifp->if_flags & IFF_DEBUG) && cache_timeout &&
	    nnodes != ic->ic_nnodes)
		printf("%s: number of cached nodes is %d, expected %d,"
		    "possible nodes leak\n", ifp->if_xname, nnodes,
		    ic->ic_nnodes);
#endif
	splx(s);
}
void
benchmark_tree (void *push, void *pub, void *router, unsigned int N_KEYS,
		unsigned int *keys, struct dbkey_rb_t dbkey_rb,int N_THREADS
		)
{

/*cleaning cache   */
  system ("./script.sh");



  float stat;
  int64_t diff = zclock_time ();
  unsigned int iter;
  int stop;
  unsigned int counter = 0;
  int more_requested = 0;


  iter = 0;
  while (iter < N_KEYS)
    {
      dbkey_t *dbkey;
      size_t vallen;

      int64_t diff2 = zclock_time ();
      while (1)
	{
          if(zclock_time()-diff2>1){
	  zframe_t *frame = zframe_recv_nowait (router);
	  if (frame != NULL)
	    {
	      zframe_destroy (&frame);
	      frame = zframe_recv_nowait (router);
              
	      if (zframe_size (frame) == strlen ("m"))
		{
	          zframe_destroy (&frame);
		  break;
		}
	    }
            diff2=zclock_time();
          }





	  dbkey = (dbkey_t *) malloc (sizeof (dbkey_t));
	  dbkey->key = keys[iter];


	  RB_INSERT (dbkey_rb_t, &dbkey_rb, dbkey);



	  if (iter == N_KEYS - 1)
	    {
	      iter++;
	      break;
	    }
	  else
	    {

	      iter++;
	    }
	}

      dbkey_t *tr_iter = RB_MIN (dbkey_rb_t, &dbkey_rb);

      while (tr_iter)
	{

	  zframe_t *frame = zframe_new (&(tr_iter->key), 4);
	  zframe_send (&frame, push, 0);


	  dbkey_t *temp = tr_iter;
	  tr_iter = RB_NEXT (dbkey_rb_t, &dbkey_rb, tr_iter);

	  RB_REMOVE (dbkey_rb_t, &dbkey_rb, temp);
	  free (temp);
	}


    }

  stop = 1;
  zframe_t *frame = zframe_new (&stop, 4);
  zframe_send (&frame, pub, 0);

  iter = 0;
  while (iter < N_THREADS)
    {
      unsigned int temp;
      zmsg_t *msg = zmsg_recv (router);
      zframe_t *frame = zmsg_unwrap (msg);
      zframe_destroy (&frame);
      frame=zmsg_first (msg);
      if (zframe_size (frame) == strlen ("m"))
	{
	}
      else
	{
	  memcpy (&temp, zframe_data (frame), 4);
	  counter = counter + temp;
	  iter++;
	}
      zmsg_destroy (&msg);
    }

  printf ("\nkeys processed:%u", counter);

  diff = zclock_time () - diff;

  stat = ((float) counter * 1000) / (float) diff;
  printf ("\nrandom read with an rb_tree:  %f keys per sec\n", stat);

}
Exemple #24
0
cur_t *cur_next(cur_t *cur)
    { return RB_NEXT(curtree_s, &Head, cur); }
Exemple #25
0
/*
 * Complete a scan of potential channels.
 */
void
ieee80211_end_scan(struct ifnet *ifp)
{
	struct ieee80211com *ic = (void *)ifp;
	struct ieee80211_node *ni, *nextbs, *selbs;

	if (ifp->if_flags & IFF_DEBUG)
		printf("%s: end %s scan\n", ifp->if_xname,
			(ic->ic_flags & IEEE80211_F_ASCAN) ?
				"active" : "passive");

	if (ic->ic_scan_count)
		ic->ic_flags &= ~IEEE80211_F_ASCAN;

	ni = RB_MIN(ieee80211_tree, &ic->ic_tree);

#ifndef IEEE80211_STA_ONLY
	if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
		/* XXX off stack? */
		u_char occupied[howmany(IEEE80211_CHAN_MAX, NBBY)];
		int i, fail;

		/*
		 * The passive scan to look for existing AP's completed,
		 * select a channel to camp on.  Identify the channels
		 * that already have one or more AP's and try to locate
		 * an unnoccupied one.  If that fails, pick a random
		 * channel from the active set.
		 */
		memset(occupied, 0, sizeof(occupied));
		RB_FOREACH(ni, ieee80211_tree, &ic->ic_tree)
			setbit(occupied, ieee80211_chan2ieee(ic, ni->ni_chan));
		for (i = 0; i < IEEE80211_CHAN_MAX; i++)
			if (isset(ic->ic_chan_active, i) && isclr(occupied, i))
				break;
		if (i == IEEE80211_CHAN_MAX) {
			fail = arc4random() & 3;	/* random 0-3 */
			for (i = 0; i < IEEE80211_CHAN_MAX; i++)
				if (isset(ic->ic_chan_active, i) && fail-- == 0)
					break;
		}
		ieee80211_create_ibss(ic, &ic->ic_channels[i]);
		goto wakeup;
	}
#endif
	if (ni == NULL) {
		DPRINTF(("no scan candidate\n"));
 notfound:

#ifndef IEEE80211_STA_ONLY
		if (ic->ic_opmode == IEEE80211_M_IBSS &&
		    (ic->ic_flags & IEEE80211_F_IBSSON) &&
		    ic->ic_des_esslen != 0) {
			ieee80211_create_ibss(ic, ic->ic_ibss_chan);
			goto wakeup;
		}
#endif
		/*
		 * Scan the next mode if nothing has been found. This
		 * is necessary if the device supports different
		 * incompatible modes in the same channel range, like
		 * like 11b and "pure" 11G mode. This will loop
		 * forever except for user-initiated scans.
		 */
		if (ieee80211_next_mode(ifp) == IEEE80211_MODE_AUTO) {
			if (ic->ic_scan_lock & IEEE80211_SCAN_REQUEST &&
			    ic->ic_scan_lock & IEEE80211_SCAN_RESUME) {
				ic->ic_scan_lock = IEEE80211_SCAN_LOCKED;
				/* Return from an user-initiated scan */
				wakeup(&ic->ic_scan_lock);
			} else if (ic->ic_scan_lock & IEEE80211_SCAN_REQUEST)
				goto wakeup;
			ic->ic_scan_count++;
		}

		/*
		 * Reset the list of channels to scan and start again.
		 */
		ieee80211_next_scan(ifp);
		return;
	}
	selbs = NULL;

	for (; ni != NULL; ni = nextbs) {
		nextbs = RB_NEXT(ieee80211_tree, &ic->ic_tree, ni);
		if (ni->ni_fails) {
			/*
			 * The configuration of the access points may change
			 * during my scan.  So delete the entry for the AP
			 * and retry to associate if there is another beacon.
			 */
			if (ni->ni_fails++ > 2)
				ieee80211_free_node(ic, ni);
			continue;
		}
		if (ieee80211_match_bss(ic, ni) == 0) {
			if (selbs == NULL)
				selbs = ni;
			else if (ni->ni_rssi > selbs->ni_rssi)
				selbs = ni;
		}
	}
	if (selbs == NULL)
		goto notfound;
	(*ic->ic_node_copy)(ic, ic->ic_bss, selbs);
	ni = ic->ic_bss;

	/*
	 * Set the erp state (mostly the slot time) to deal with
	 * the auto-select case; this should be redundant if the
	 * mode is locked.
	 */
	ic->ic_curmode = ieee80211_chan2mode(ic, ni->ni_chan);
	ieee80211_reset_erp(ic);

	if (ic->ic_flags & IEEE80211_F_RSNON)
		ieee80211_choose_rsnparams(ic);
	else if (ic->ic_flags & IEEE80211_F_WEPON)
		ni->ni_rsncipher = IEEE80211_CIPHER_USEGROUP;

	ieee80211_node_newstate(selbs, IEEE80211_STA_BSS);
#ifndef IEEE80211_STA_ONLY
	if (ic->ic_opmode == IEEE80211_M_IBSS) {
		ieee80211_fix_rate(ic, ni, IEEE80211_F_DOFRATE |
		    IEEE80211_F_DONEGO | IEEE80211_F_DODEL);
		if (ni->ni_rates.rs_nrates == 0)
			goto notfound;
		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
	} else
#endif
		ieee80211_new_state(ic, IEEE80211_S_AUTH, -1);

 wakeup:
	if (ic->ic_scan_lock & IEEE80211_SCAN_REQUEST) {
		/* Return from an user-initiated scan */
		wakeup(&ic->ic_scan_lock);
	}

	ic->ic_scan_lock = IEEE80211_SCAN_UNLOCKED;
}
/*
 * Complete a scan of potential channels.
 */
void Voodoo80211Device::
ieee80211_end_scan(struct ieee80211com *ic)
{
	struct ieee80211_node *ni, *nextbs, *selbs;
    
    /* TODO
	if (ifp->if_flags & IFF_DEBUG)
		printf("%s: end %s scan\n", ifp->if_xname,
               (ic->ic_flags & IEEE80211_F_ASCAN) ?
               "active" : "passive");
     */
    
	if (ic->ic_scan_count)
		ic->ic_flags &= ~IEEE80211_F_ASCAN;
    
	ni = RB_MIN(ieee80211_tree, &ic->ic_tree);
    
	if (ni == NULL) {
		DPRINTF(("no scan candidate\n"));
    notfound:
        
		/*
		 * Scan the next mode if nothing has been found. This
		 * is necessary if the device supports different
		 * incompatible modes in the same channel range, like
		 * like 11b and "pure" 11G mode. This will loop
		 * forever except for user-initiated scans.
		 */
		if (ieee80211_next_mode(ic) == IEEE80211_MODE_AUTO) {
			if (ic->ic_scan_lock & IEEE80211_SCAN_REQUEST &&
			    ic->ic_scan_lock & IEEE80211_SCAN_RESUME) {
				ic->ic_scan_lock = IEEE80211_SCAN_LOCKED;
				/* Return from an user-initiated scan */
				wakeupOn(&ic->ic_scan_lock);
				// XXX: pvaibhav: do this here?
				fInterface->postMessage(APPLE80211_M_SCAN_DONE);
			} else if (ic->ic_scan_lock & IEEE80211_SCAN_REQUEST)
				goto wakeup;
			ic->ic_scan_count++;
		}
        
		/*
		 * Reset the list of channels to scan and start again.
		 */
		ieee80211_next_scan(ic);
		return;
	}
	selbs = NULL;
    
	for (; ni != NULL; ni = nextbs) {
		nextbs = RB_NEXT(ieee80211_tree, &ic->ic_tree, ni);
		if (ni->ni_fails) {
			/*
			 * The configuration of the access points may change
			 * during my scan.  So delete the entry for the AP
			 * and retry to associate if there is another beacon.
			 */
			if (ni->ni_fails++ > 2)
				ieee80211_free_node(ic, ni);
			continue;
		}
		if (ieee80211_match_bss(ic, ni) == 0) {
			if (selbs == NULL)
				selbs = ni;
			else if (ni->ni_rssi > selbs->ni_rssi)
				selbs = ni;
		}
	}
	if (selbs == NULL)
		goto notfound;
	ieee80211_node_copy(ic, ic->ic_bss, selbs);
	ni = ic->ic_bss;
    
	/*
	 * Set the erp state (mostly the slot time) to deal with
	 * the auto-select case; this should be redundant if the
	 * mode is locked.
	 */
	ic->ic_curmode = ieee80211_chan2mode(ic, ni->ni_chan);
	ieee80211_reset_erp(ic);
    
	if (ic->ic_flags & IEEE80211_F_RSNON)
		ieee80211_choose_rsnparams(ic);
	else if (ic->ic_flags & IEEE80211_F_WEPON)
		ni->ni_rsncipher = IEEE80211_CIPHER_USEGROUP;
    
	ieee80211_node_newstate(selbs, IEEE80211_STA_BSS);
		ieee80211_newstate(ic, IEEE80211_S_AUTH, -1);
    
wakeup:
	if (ic->ic_scan_lock & IEEE80211_SCAN_REQUEST) {
		/* Return from an user-initiated scan */
		wakeupOn(&ic->ic_scan_lock);
		// XXX: pvaibhav: do this here?
		fInterface->postMessage(APPLE80211_M_SCAN_DONE);
	}
    
	ic->ic_scan_lock = IEEE80211_SCAN_UNLOCKED;
}
Exemple #27
0
static int
tmpfs_readdir(struct vop_readdir_args *v)
{
	struct vnode *vp = v->a_vp;
	struct uio *uio = v->a_uio;
	int *eofflag = v->a_eofflag;
	off_t **cookies = v->a_cookies;
	int *ncookies = v->a_ncookies;
	struct tmpfs_mount *tmp;
	int error;
	off_t startoff;
	off_t cnt = 0;
	struct tmpfs_node *node;

	/* This operation only makes sense on directory nodes. */
	if (vp->v_type != VDIR) {
		return ENOTDIR;
	}

	tmp = VFS_TO_TMPFS(vp->v_mount);
	node = VP_TO_TMPFS_DIR(vp);
	startoff = uio->uio_offset;

	if (uio->uio_offset == TMPFS_DIRCOOKIE_DOT) {
		error = tmpfs_dir_getdotdent(node, uio);
		if (error != 0) {
			TMPFS_NODE_LOCK_SH(node);
			goto outok;
		}
		cnt++;
	}

	if (uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT) {
		/* may lock parent, cannot hold node lock */
		error = tmpfs_dir_getdotdotdent(tmp, node, uio);
		if (error != 0) {
			TMPFS_NODE_LOCK_SH(node);
			goto outok;
		}
		cnt++;
	}

	TMPFS_NODE_LOCK_SH(node);
	error = tmpfs_dir_getdents(node, uio, &cnt);

outok:
	KKASSERT(error >= -1);

	if (error == -1)
		error = 0;

	if (eofflag != NULL)
		*eofflag =
		    (error == 0 && uio->uio_offset == TMPFS_DIRCOOKIE_EOF);

	/* Update NFS-related variables. */
	if (error == 0 && cookies != NULL && ncookies != NULL) {
		off_t i;
		off_t off = startoff;
		struct tmpfs_dirent *de = NULL;

		*ncookies = cnt;
		*cookies = kmalloc(cnt * sizeof(off_t), M_TEMP, M_WAITOK);

		for (i = 0; i < cnt; i++) {
			KKASSERT(off != TMPFS_DIRCOOKIE_EOF);
			if (off == TMPFS_DIRCOOKIE_DOT) {
				off = TMPFS_DIRCOOKIE_DOTDOT;
			} else {
				if (off == TMPFS_DIRCOOKIE_DOTDOT) {
					de = RB_MIN(tmpfs_dirtree_cookie,
						&node->tn_dir.tn_cookietree);
				} else if (de != NULL) {
					de = RB_NEXT(tmpfs_dirtree_cookie,
					       &node->tn_dir.tn_cookietree, de);
				} else {
					de = tmpfs_dir_lookupbycookie(node,
								      off);
					KKASSERT(de != NULL);
					de = RB_NEXT(tmpfs_dirtree_cookie,
					       &node->tn_dir.tn_cookietree, de);
				}
				if (de == NULL)
					off = TMPFS_DIRCOOKIE_EOF;
				else
					off = tmpfs_dircookie(de);
			}
			(*cookies)[i] = off;
		}
		KKASSERT(uio->uio_offset == off);
	}
	TMPFS_NODE_UNLOCK(node);

	if ((node->tn_status & TMPFS_NODE_ACCESSED) == 0) {
		TMPFS_NODE_LOCK(node);
		node->tn_status |= TMPFS_NODE_ACCESSED;
		TMPFS_NODE_UNLOCK(node);
	}
	return error;
}
Exemple #28
0
static udata_t *udata_next(udata_t *udata)
    { return RB_NEXT(udatatree_s, &Head, udata); }