Пример #1
0
int
mib_fetch_route(void)
{
	u_char *rtab, *next;
	size_t len;
	struct sroute *r, *r1;
	struct rt_msghdr *rtm;
	struct sockaddr *addrs[RTAX_MAX];

	if (route_tick != 0 && route_tick + ROUTE_UPDATE_INTERVAL > this_tick)
		return (0);

	/*
	 * Remove all routes
	 */
	r = RB_MIN(sroutes, &sroutes);
	while (r != NULL) {
		r1 = RB_NEXT(sroutes, &sroutes, r);
		RB_REMOVE(sroutes, &sroutes, r);
		free(r);
		r = r1;
	}
	route_total = 0;

	if ((rtab = mib_fetch_rtab(AF_INET, NET_RT_DUMP, 0, &len)) == NULL)
		return (-1);

	next = rtab;
	for (next = rtab; next < rtab + len; next += rtm->rtm_msglen) {
		rtm = (struct rt_msghdr *)(void *)next;
		if (rtm->rtm_type != RTM_GET ||
		    !(rtm->rtm_flags & RTF_UP))
			continue;
		mib_extract_addrs(rtm->rtm_addrs, (u_char *)(rtm + 1), addrs);

		
		mib_sroute_process(rtm, addrs[RTAX_GATEWAY], addrs[RTAX_DST],
		    addrs[RTAX_NETMASK]);
	}

#if 0
	u_int n = 0;
	r = RB_MIN(sroutes, &sroutes);
	while (r != NULL) {
		printf("%u: ", n++);
		sroute_print(r);
		printf("\n");
		r = RB_NEXT(sroutes, &sroutes, r);
	}
#endif
	free(rtab);
	route_tick = get_ticks();

	return (0);
}
Пример #2
0
uint32_t Flush_FlowTree(FlowSource_t *fs) {
struct FlowNode *node, *nxt;
uint32_t n = NumFlows;

	// Dump all incomplete flows to the file
	for (node = RB_MIN(FlowTree, FlowTree); node != NULL; node = nxt) {
		StorePcapFlow(fs, node);
		nxt = RB_NEXT(FlowTree, FlowTree, node);
#ifdef DEVEL
if ( node->left || node->right ) {
	assert(node->proto == 17);
	 node->left = node->right = NULL;
}
#endif
		Remove_Node(node);
	}

#ifdef DEVEL
	if ( NumFlows != 0 )
		LogError("### Flush_FlowTree() remaining flows: %u\n", NumFlows);
#endif

	UDP_list.list	= NULL;
	UDP_list.tail	= NULL;
	UDP_list.size	= 0;

	return n;

} // End of Flush_FlowTree
Пример #3
0
/*
 * Timeout inactive nodes.
 */
void Voodoo80211Device::
ieee80211_clean_nodes(struct ieee80211com *ic)
{
	struct ieee80211_node *ni, *next_ni;
	u_int gen = ic->ic_scangen++;		/* NB: ok 'cuz single-threaded*/
	int s;
    
	s = splnet();
	for (ni = RB_MIN(ieee80211_tree, &ic->ic_tree);
         ni != NULL; ni = next_ni) {
		next_ni = RB_NEXT(ieee80211_tree, &ic->ic_tree, ni);
		if (ic->ic_nnodes < ic->ic_max_nnodes)
			break;
		if (ni->ni_scangen == gen)	/* previously handled */
			continue;
		ni->ni_scangen = gen;
		if (ni->ni_refcnt > 0)
			continue;
		DPRINTF(("station %s purged from LRU cache\n",
                 ether_sprintf(ni->ni_macaddr)));
		/*
		 * Send a deauthenticate frame.
		 */
			ieee80211_free_node(ic, ni);
		ic->ic_stats.is_node_timeout++;
	}
	splx(s);
}
Пример #4
0
/*
 * Timeout inactive nodes.  Note that we cannot hold the node
 * lock while sending a frame as this would lead to a LOR.
 * Instead we use a generation number to mark nodes that we've
 * scanned and drop the lock and restart a scan if we have to
 * time out a node.  Since we are single-threaded by virtue of
 * controlling the inactivity timer we can be sure this will
 * process each node only once.
 */
void
ieee80211_clean_nodes(struct ieee80211com *ic)
{
	struct ieee80211_node *ni, *next_ni;
	u_int gen = ic->ic_scangen++;		/* NB: ok 'cuz single-threaded*/

	IEEE80211_NODE_LOCK(ic);
	for (ni = RB_MIN(ieee80211_tree, &ic->ic_tree);
	    ni != NULL; ni = next_ni) {
		next_ni = RB_NEXT(ieee80211_tree, &ic->ic_tree, ni);
		if (ic->ic_nnodes <= ic->ic_max_nnodes)
			break;
		if (ni->ni_scangen == gen)	/* previously handled */
			continue;
		ni->ni_scangen = gen;
		if (ni->ni_refcnt > 0)
			continue;
		IEEE80211_DPRINTF(("station %s purged from LRU cache\n",
		    ether_sprintf(ni->ni_macaddr)));
		/*
		 * Send a deauthenticate frame.
		 */
		if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
			IEEE80211_NODE_UNLOCK(ic);
			IEEE80211_SEND_MGMT(ic, ni,
			    IEEE80211_FC0_SUBTYPE_DEAUTH,
			    IEEE80211_REASON_AUTH_EXPIRE);
			IEEE80211_NODE_LOCK(ic);
			ieee80211_node_leave(ic, ni);
		} else
			ieee80211_free_node(ic, ni);
		ic->ic_stats.is_node_timeout++;
	}
	IEEE80211_NODE_UNLOCK(ic);
}
Пример #5
0
static void
unload_cb(ErlNifEnv *env, void *priv_data)
{
	struct atom_node *an;

	enif_rwlock_rwlock(gbl->atom_lock);

	/* when we unload, we want to tell all of the active caches to die,
	   then join() their bg_threads to wait until they're completely gone */
	while ((an = RB_MIN(atom_tree, &(gbl->atom_head)))) {
		struct cache *c = an->cache;
		enif_rwlock_rwunlock(gbl->atom_lock);

		enif_mutex_lock(c->ctrl_lock);
		c->flags |= FL_DYING;
		enif_mutex_unlock(c->ctrl_lock);
		enif_cond_broadcast(c->check_cond);

		enif_thread_join(c->bg_thread, NULL);

		enif_rwlock_rwlock(gbl->atom_lock);
	}

	enif_rwlock_rwunlock(gbl->atom_lock);
	enif_rwlock_destroy(gbl->atom_lock);
	enif_clear_env(gbl->atom_env);
	enif_free(gbl);

	gbl = NULL;
}
Пример #6
0
void
cfg_default_done(__unused struct cmd_q *cmdq)
{
	if (--cfg_references != 0)
		return;
	cfg_finished = 1;

	if (!RB_EMPTY(&sessions))
		cfg_show_causes(RB_MIN(sessions, &sessions));

	cmdq_free(cfg_cmd_q);
	cfg_cmd_q = NULL;

	if (cfg_client != NULL) {
		/*
		 * The client command queue starts with client_exit set to 1 so
		 * only continue if not empty (that is, we have been delayed
		 * during configuration parsing for long enough that the
		 * MSG_COMMAND has arrived), else the client will exit before
		 * the MSG_COMMAND which might tell it not to.
		 */
		if (!TAILQ_EMPTY(&cfg_client->cmdq->queue))
			cmdq_continue(cfg_client->cmdq);
		server_client_unref(cfg_client);
		cfg_client = NULL;
	}
}
Пример #7
0
static int
kqueue_gc(void)
{
    int rv;
    struct kqueue *n1, *n2;

    /* Free any kqueue descriptor that is no longer needed */
    /* Sadly O(N), however needed in the case that a descriptor is
       closed and kevent(2) will never again be called on it. */
    for (n1 = RB_MIN(kqt, &kqtree); n1 != NULL; n1 = n2) {
        n2 = RB_NEXT(kqt, &kqtree, n1);

        if (n1->kq_ref == 0) {
            kqueue_free(n1);
        } else {
            rv = kqueue_validate(n1);
            if (rv == 0) 
                kqueue_free(n1);
            else if (rv < 0) 
                return (-1);
        }
    }

    return (0);
}
Пример #8
0
/*
 * return boolean whether or not the last ctfile_list contained
 * filename.
 */
int
ct_file_on_server(struct ct_global_state *state, char *filename)
{
	struct ctfile_list_tree	 results;
	struct ctfile_list_file	*file = NULL;
	char			*filelist[2];
	int			 exists = 0;

	RB_INIT(&results);
	filelist[0] = filename;
	filelist[1] = NULL;
	ctfile_list_complete(&state->ctfile_list_files, CT_MATCH_GLOB,
	    filelist, NULL, &results);

	/* Check to see if we already have a secrets file on the server */
	if (RB_MIN(ctfile_list_tree, &results) != NULL) {
		exists = 1;
	}
	while ((file = RB_ROOT(&results)) != NULL) {
		RB_REMOVE(ctfile_list_tree, &results, file);
		e_free(&file);
	}

	return (exists);
}
Пример #9
0
/*
 * Helper function for tmpfs_readdir.  Creates a '..' entry for the given
 * directory and returns it in the uio space.  The function returns 0
 * on success, -1 if there was not enough space in the uio structure to
 * hold the directory entry or an appropriate error code if another
 * error happens.
 */
int
tmpfs_dir_getdotdotdent(struct tmpfs_mount *tmp, struct tmpfs_node *node,
			struct uio *uio)
{
	int error;
	ino_t d_ino;

	TMPFS_VALIDATE_DIR(node);
	KKASSERT(uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT);

	if (node->tn_dir.tn_parent) {
		TMPFS_NODE_LOCK(node);
		if (node->tn_dir.tn_parent)
			d_ino = node->tn_dir.tn_parent->tn_id;
		else
			d_ino = tmp->tm_root->tn_id;
		TMPFS_NODE_UNLOCK(node);
	} else {
		d_ino = tmp->tm_root->tn_id;
	}

	if (vop_write_dirent(&error, uio, d_ino, DT_DIR, 2, ".."))
		return -1;
	if (error == 0) {
		struct tmpfs_dirent *de;
		de = RB_MIN(tmpfs_dirtree_cookie, &node->tn_dir.tn_cookietree);
		if (de == NULL)
			uio->uio_offset = TMPFS_DIRCOOKIE_EOF;
		else
			uio->uio_offset = tmpfs_dircookie(de);
	}
	return error;
}
Пример #10
0
void uv__run_timers(uv_loop_t* loop) {
  uv_timer_t* handle;

  while ((handle = RB_MIN(uv__timers, &loop->timer_handles))) {
    if (handle->timeout > loop->time) break;

    uv_timer_stop(handle);
    uv_timer_again(handle);
    handle->timer_cb(handle, 0);
  }
}
Пример #11
0
/* Free 32-bit file number generation structures. */
void
msdosfs_fileno_free(struct mount *mp)
{
	struct msdosfsmount *pmp = VFSTOMSDOSFS(mp);
	struct msdosfs_fileno *mf, *next;

	for (mf = RB_MIN(msdosfs_filenotree, &pmp->pm_filenos); mf != NULL;
	    mf = next) {
		next = RB_NEXT(msdosfs_filenotree, &pmp->pm_filenos, mf);
		RB_REMOVE(msdosfs_filenotree, &pmp->pm_filenos, mf);
		free(mf, M_MSDOSFSFILENO);
	}
}
Пример #12
0
unsigned int uv__next_timeout(uv_loop_t* loop) {
  uv_timer_t* handle;

  handle = RB_MIN(uv__timers, &loop->timer_handles);

  if (handle == NULL)
    return (unsigned int) -1; /* block indefinitely */

  if (handle->timeout <= loop->time)
    return 0;

  return handle->timeout - loop->time;
}
Пример #13
0
Файл: tags.c Проект: hackalog/mg
/*
 * Free tags tree.
 */
void
unloadtags(void)
{
	struct ctag *var, *nxt;
	
	for (var = RB_MIN(tagtree, &tags); var != NULL; var = nxt) {
		nxt = RB_NEXT(tagtree, &tags, var);
		RB_REMOVE(tagtree, &tags, var);
		/* line parsed with fparseln needs to be freed */
		free(var->tag);
		free(var);
	}
}
Пример #14
0
int uv__next_timeout(const uv_loop_t* loop) {
  const uv_timer_t* handle;

  /* RB_MIN expects a non-const tree root. That's okay, it doesn't modify it. */
  handle = RB_MIN(uv__timers, (struct uv__timers*) &loop->timer_handles);

  if (handle == NULL)
    return -1; /* block indefinitely */

  if (handle->timeout <= loop->time)
    return 0;

  return handle->timeout - loop->time;
}
Пример #15
0
void
ieee80211_free_allnodes(struct ieee80211com *ic)
{
	struct ieee80211_node *ni;

	IEEE80211_DPRINTF(("%s\n", __func__));
	IEEE80211_NODE_LOCK_BH(ic);
	while ((ni = RB_MIN(ieee80211_tree, &ic->ic_tree)) != NULL)
		ieee80211_free_node(ic, ni);
	IEEE80211_NODE_UNLOCK_BH(ic);

	if (ic->ic_bss != NULL)
		ieee80211_node_cleanup(ic, ic->ic_bss);	/* for station mode */
}
Пример #16
0
int
main(int argc, char **argv)
{
	struct node *tmp, *ins;
	int i, max, min;

	RB_INIT(&root);

	for (i = 0; i < ITER; i++) {
		tmp = malloc(sizeof(struct node));
		if (tmp == NULL) err(1, "malloc");
		do {
			tmp->key = arc4random_uniform(MAX-MIN);
			tmp->key += MIN;
		} while (RB_FIND(tree, &root, tmp) != NULL);
		if (i == 0)
			max = min = tmp->key;
		else {
			if (tmp->key > max)
				max = tmp->key;
			if (tmp->key < min)
				min = tmp->key;
		}
		if (RB_INSERT(tree, &root, tmp) != NULL)
			errx(1, "RB_INSERT failed");
	}

	ins = RB_MIN(tree, &root);
	if (ins->key != min)
		errx(1, "min does not match");
	tmp = ins;
	ins = RB_MAX(tree, &root);
	if (ins->key != max)
		errx(1, "max does not match");

	if (RB_REMOVE(tree, &root, tmp) != tmp)
		errx(1, "RB_REMOVE failed");

	for (i = 0; i < ITER - 1; i++) {
		tmp = RB_ROOT(&root);
		if (tmp == NULL)
			errx(1, "RB_ROOT error");
		if (RB_REMOVE(tree, &root, tmp) != tmp)
			errx(1, "RB_REMOVE error");
		free(tmp);
	}

	exit(0);
}
Пример #17
0
void
ieee80211_free_allnodes(struct ieee80211com *ic)
{
	struct ieee80211_node *ni;
	int s;

	DPRINTF(("freeing all nodes\n"));
	s = splnet();
	while ((ni = RB_MIN(ieee80211_tree, &ic->ic_tree)) != NULL)
		ieee80211_free_node(ic, ni);
	splx(s);

	if (ic->ic_bss != NULL)
		ieee80211_node_cleanup(ic, ic->ic_bss);	/* for station mode */
}
Пример #18
0
void Dispose_FlowTree(void) {
struct FlowNode *node, *nxt;

	// Dump all incomplete flows to the file
	for (node = RB_MIN(FlowTree, FlowTree); node != NULL; node = nxt) {
		nxt = RB_NEXT(FlowTree, FlowTree, node);
		RB_REMOVE(FlowTree, FlowTree, node);
		if ( node->data ) 
			free(node->data);
	}
	free(FlowElementCache);
	FlowElementCache 	 = NULL;
	FlowNode_FreeList 	 = NULL;
	CacheOverflow = 0;

} // End of Dispose_FlowTree
Пример #19
0
void drm_drawable_free_all(struct drm_device *dev)
{
	struct bsd_drm_drawable_info *info, *next;

	DRM_SPINLOCK(&dev->drw_lock);
	for (info = RB_MIN(drawable_tree, &dev->drw_head);
	    info != NULL ; info = next) {
		next = RB_NEXT(drawable_tree, &dev->drw_head, info);
		RB_REMOVE(drawable_tree, &dev->drw_head,
		    (struct bsd_drm_drawable_info *)info);
		DRM_SPINUNLOCK(&dev->drw_lock);
		free_unr(dev->drw_unrhdr, info->handle);
		free(info->info.rects, DRM_MEM_DRAWABLE);
		free(info, DRM_MEM_DRAWABLE);
		DRM_SPINLOCK(&dev->drw_lock);
	}
	DRM_SPINUNLOCK(&dev->drw_lock);
}
Пример #20
0
/* Free a tree. */
void
format_free(struct format_tree *ft)
{
	struct format_entry	*fe, *fe_next;

	fe_next = RB_MIN(format_tree, ft);
	while (fe_next != NULL) {
		fe = fe_next;
		fe_next = RB_NEXT(format_tree, ft, fe);

		RB_REMOVE(format_tree, ft, fe);
		free(fe->value);
		free(fe->key);
		free(fe);
	}

	free(ft);
}
Пример #21
0
static void handle_pane_key(__unused struct tmate_session *_session,
			    struct tmate_unpacker *uk)
{
	struct session *s;
	struct window_pane *wp;

	int pane_id = unpack_int(uk);
	key_code key = unpack_int(uk);

	s = RB_MIN(sessions, &sessions);
	if (!s)
		return;

	wp = find_window_pane(s, pane_id);
	if (!wp)
		return;

	window_pane_key(wp, NULL, s, key, NULL);
}
Пример #22
0
void
ieee80211_inact_timeout(void *arg)
{
	struct ieee80211com *ic = arg;
	struct ieee80211_node *ni, *next_ni;
	int s;

	s = splnet();
	for (ni = RB_MIN(ieee80211_tree, &ic->ic_tree);
	    ni != NULL; ni = next_ni) {
		next_ni = RB_NEXT(ieee80211_tree, &ic->ic_tree, ni);
		if (ni->ni_refcnt > 0)
			continue;
		if (ni->ni_inact < IEEE80211_INACT_MAX)
			ni->ni_inact++;
	}
	splx(s);

	timeout_add_sec(&ic->ic_inact_timeout, IEEE80211_INACT_WAIT);
}
Пример #23
0
static int has_expired_keys_in_layer(void *cb_context_, void *entry,
                                     const size_t sizeof_entry)
{
    (void) sizeof_entry;
    HasExpiredKeysInLayerCBContext *cb_context = cb_context_;
    HttpHandlerContext * const context = cb_context->context;
    const time_t now = context->now;    
    Layer * const layer = entry;    
    PanDB * const pan_db = &layer->pan_db;
    Expirables * const expirables = &pan_db->expirables;
    if (expirables == NULL) {
        return 0;
    }
    const Expirable * const expirable = RB_MIN(Expirables_, expirables);
    if (expirable != NULL && now >= expirable->ts) {
        cb_context->has_expired_keys = 1;
        return 1;
    }
    return 0;
}
Пример #24
0
/*
 * Get a tdio with minimum virtual deadline and virtual eligible
 * time smaller than the current virtual time.
 * If there is no such tdio, update the current virtual time to
 * the minimum ve in the queue. (And there must be one eligible then)
 */
struct bfq_thread_io *
wf2q_get_next_thread_io(struct wf2q_t *wf2q)
{
	struct bfq_thread_io *tdio;
	struct wf2q_augtree_t *tree = &wf2q->wf2q_augtree;
	if (!(tdio = wf2q_augtree_get_eligible_with_min_vd(tree, wf2q->wf2q_virtual_time))) {
		tdio = RB_MIN(wf2q_augtree_t, tree);
		if (!tdio)
			return NULL;
		wf2q->wf2q_virtual_time = tdio->ve;
		tdio = wf2q_augtree_get_eligible_with_min_vd(tree, wf2q->wf2q_virtual_time);
	}
	if (!tdio) {
		kprintf("!!!wf2q: wf2q_tdio_count=%d\n", wf2q->wf2q_tdio_count);
		wf2q_tree_dump(RB_ROOT(tree), 0);
		KKASSERT(0);
	}
	RB_REMOVE(wf2q_augtree_t, tree, tdio);
	wf2q->wf2q_tdio_count--;
	return tdio;
}
Пример #25
0
static void tmate_client_pane_key(struct tmate_unpacker *uk)
{
	struct session *s;
	struct window *w;
	struct window_pane *wp;

	int key = unpack_int(uk);

	s = RB_MIN(sessions, &sessions);
	if (!s)
		return;

	w = s->curw->window;
	if (!w)
		return;

	wp = w->active;
	if (!wp)
		return;

	window_pane_key(wp, s, key);
}
Пример #26
0
static void handle_legacy_pane_key(__unused struct tmate_session *_session,
				   struct tmate_unpacker *uk)
{
	struct session *s;
	struct window *w;
	struct window_pane *wp;

	int key = unpack_int(uk);

	s = RB_MIN(sessions, &sessions);
	if (!s)
		return;

	w = s->curw->window;
	if (!w)
		return;

	wp = w->active;
	if (!wp)
		return;

	window_pane_key(wp, NULL, s, key, NULL);
}
Пример #27
0
void
pfi_destroy(void)
{
	struct pfi_kif *p;
	ifnet_t *ifp;
	int s;
	int bound;

	pfil_remove_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, if_pfil);
	pfil_remove_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, if_pfil);

	bound = curlwp_bind();
	s = pserialize_read_enter();
	IFNET_READER_FOREACH(ifp) {
		struct psref psref;
		psref_acquire(&psref, &ifp->if_psref, ifnet_psref_class);
		pserialize_read_exit(s);

		pfi_detach_ifnet(ifp);
		pfi_destroy_groups(ifp);

		s = pserialize_read_enter();
		psref_release(&psref, &ifp->if_psref, ifnet_psref_class);
	}
	pserialize_read_exit(s);
	curlwp_bindx(bound);

	while ((p = RB_MIN(pfi_ifhead, &pfi_ifs))) {
		RB_REMOVE(pfi_ifhead, &pfi_ifs, p);
		free(p, PFI_MTYPE);
	}

	pool_destroy(&pfi_addr_pl);

	free(pfi_buffer, PFI_MTYPE);
}
Пример #28
0
int
cmd_source_file_exec(struct cmd *self, struct cmd_ctx *ctx)
{
	struct args		*args = self->args;
	struct causelist	 causes;
	char			*cause;
	struct window_pane	*wp;
	int			 retval;
	u_int			 i;

	ARRAY_INIT(&causes);

	retval = load_cfg(args->argv[0], ctx, &causes);
	if (ARRAY_EMPTY(&causes))
		return (retval);

	if (retval == 1 && !RB_EMPTY(&sessions) && ctx->cmdclient != NULL) {
		wp = RB_MIN(sessions, &sessions)->curw->window->active;
		window_pane_set_mode(wp, &window_copy_mode);
		window_copy_init_for_output(wp);
		for (i = 0; i < ARRAY_LENGTH(&causes); i++) {
			cause = ARRAY_ITEM(&causes, i);
			window_copy_add(wp, "%s", cause);
			xfree(cause);
		}
	} else {
		for (i = 0; i < ARRAY_LENGTH(&causes); i++) {
			cause = ARRAY_ITEM(&causes, i);
			ctx->print(ctx, "%s", cause);
			xfree(cause);
		}
	}
	ARRAY_FREE(&causes);

	return (retval);
}
Пример #29
0
static int
tmpfs_readdir(struct vop_readdir_args *v)
{
	struct vnode *vp = v->a_vp;
	struct uio *uio = v->a_uio;
	int *eofflag = v->a_eofflag;
	off_t **cookies = v->a_cookies;
	int *ncookies = v->a_ncookies;
	struct tmpfs_mount *tmp;
	int error;
	off_t startoff;
	off_t cnt = 0;
	struct tmpfs_node *node;

	/* This operation only makes sense on directory nodes. */
	if (vp->v_type != VDIR) {
		return ENOTDIR;
	}

	tmp = VFS_TO_TMPFS(vp->v_mount);
	node = VP_TO_TMPFS_DIR(vp);
	startoff = uio->uio_offset;

	if (uio->uio_offset == TMPFS_DIRCOOKIE_DOT) {
		error = tmpfs_dir_getdotdent(node, uio);
		if (error != 0) {
			TMPFS_NODE_LOCK_SH(node);
			goto outok;
		}
		cnt++;
	}

	if (uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT) {
		/* may lock parent, cannot hold node lock */
		error = tmpfs_dir_getdotdotdent(tmp, node, uio);
		if (error != 0) {
			TMPFS_NODE_LOCK_SH(node);
			goto outok;
		}
		cnt++;
	}

	TMPFS_NODE_LOCK_SH(node);
	error = tmpfs_dir_getdents(node, uio, &cnt);

outok:
	KKASSERT(error >= -1);

	if (error == -1)
		error = 0;

	if (eofflag != NULL)
		*eofflag =
		    (error == 0 && uio->uio_offset == TMPFS_DIRCOOKIE_EOF);

	/* Update NFS-related variables. */
	if (error == 0 && cookies != NULL && ncookies != NULL) {
		off_t i;
		off_t off = startoff;
		struct tmpfs_dirent *de = NULL;

		*ncookies = cnt;
		*cookies = kmalloc(cnt * sizeof(off_t), M_TEMP, M_WAITOK);

		for (i = 0; i < cnt; i++) {
			KKASSERT(off != TMPFS_DIRCOOKIE_EOF);
			if (off == TMPFS_DIRCOOKIE_DOT) {
				off = TMPFS_DIRCOOKIE_DOTDOT;
			} else {
				if (off == TMPFS_DIRCOOKIE_DOTDOT) {
					de = RB_MIN(tmpfs_dirtree_cookie,
						&node->tn_dir.tn_cookietree);
				} else if (de != NULL) {
					de = RB_NEXT(tmpfs_dirtree_cookie,
					       &node->tn_dir.tn_cookietree, de);
				} else {
					de = tmpfs_dir_lookupbycookie(node,
								      off);
					KKASSERT(de != NULL);
					de = RB_NEXT(tmpfs_dirtree_cookie,
					       &node->tn_dir.tn_cookietree, de);
				}
				if (de == NULL)
					off = TMPFS_DIRCOOKIE_EOF;
				else
					off = tmpfs_dircookie(de);
			}
			(*cookies)[i] = off;
		}
		KKASSERT(uio->uio_offset == off);
	}
	TMPFS_NODE_UNLOCK(node);

	if ((node->tn_status & TMPFS_NODE_ACCESSED) == 0) {
		TMPFS_NODE_LOCK(node);
		node->tn_status |= TMPFS_NODE_ACCESSED;
		TMPFS_NODE_UNLOCK(node);
	}
	return error;
}
Пример #30
0
int
ieee80211_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
	struct ieee80211com *ic = (void *)ifp;
	struct ifreq *ifr = (struct ifreq *)data;
	int i, error = 0;
	struct ieee80211_nwid nwid;
	struct ieee80211_wpapsk *psk;
	struct ieee80211_wmmparams *wmm;
	struct ieee80211_power *power;
	struct ieee80211_bssid *bssid;
	struct ieee80211chanreq *chanreq;
	struct ieee80211_channel *chan;
	struct ieee80211_txpower *txpower;
	static const u_int8_t empty_macaddr[IEEE80211_ADDR_LEN] = {
		0x00, 0x00, 0x00, 0x00, 0x00, 0x00
	};
	struct ieee80211_nodereq *nr, nrbuf;
	struct ieee80211_nodereq_all *na;
	struct ieee80211_node *ni;
	u_int32_t flags;

	switch (cmd) {
	case SIOCSIFADDR:
	case SIOCGIFADDR:
		error = ether_ioctl(ifp, &ic->ic_ac, cmd, data);
		break;
	case SIOCSIFMEDIA:
	case SIOCGIFMEDIA:
		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
		break;
	case SIOCS80211NWID:
		if ((error = suser(curproc, 0)) != 0)
			break;
		if ((error = copyin(ifr->ifr_data, &nwid, sizeof(nwid))) != 0)
			break;
		if (nwid.i_len > IEEE80211_NWID_LEN) {
			error = EINVAL;
			break;
		}
		memset(ic->ic_des_essid, 0, IEEE80211_NWID_LEN);
		ic->ic_des_esslen = nwid.i_len;
		memcpy(ic->ic_des_essid, nwid.i_nwid, nwid.i_len);
		error = ENETRESET;
		break;
	case SIOCG80211NWID:
		memset(&nwid, 0, sizeof(nwid));
		switch (ic->ic_state) {
		case IEEE80211_S_INIT:
		case IEEE80211_S_SCAN:
			nwid.i_len = ic->ic_des_esslen;
			memcpy(nwid.i_nwid, ic->ic_des_essid, nwid.i_len);
			break;
		default:
			nwid.i_len = ic->ic_bss->ni_esslen;
			memcpy(nwid.i_nwid, ic->ic_bss->ni_essid, nwid.i_len);
			break;
		}
		error = copyout(&nwid, ifr->ifr_data, sizeof(nwid));
		break;
	case SIOCS80211NWKEY:
		if ((error = suser(curproc, 0)) != 0)
			break;
		error = ieee80211_ioctl_setnwkeys(ic, (void *)data);
		break;
	case SIOCG80211NWKEY:
		error = ieee80211_ioctl_getnwkeys(ic, (void *)data);
		break;
	case SIOCS80211WMMPARMS:
		if ((error = suser(curproc, 0)) != 0)
			break;
		if (!(ic->ic_flags & IEEE80211_C_QOS)) {
			error = ENODEV;
			break;
		}
		wmm = (struct ieee80211_wmmparams *)data;
		if (wmm->i_enabled)
			ic->ic_flags |= IEEE80211_F_QOS;
		else
			ic->ic_flags &= ~IEEE80211_F_QOS;
		error = ENETRESET;
		break;
	case SIOCG80211WMMPARMS:
		wmm = (struct ieee80211_wmmparams *)data;
		wmm->i_enabled = (ic->ic_flags & IEEE80211_F_QOS) ? 1 : 0;
		break;
	case SIOCS80211WPAPARMS:
		if ((error = suser(curproc, 0)) != 0)
			break;
		error = ieee80211_ioctl_setwpaparms(ic, (void *)data);
		break;
	case SIOCG80211WPAPARMS:
		error = ieee80211_ioctl_getwpaparms(ic, (void *)data);
		break;
	case SIOCS80211WPAPSK:
		if ((error = suser(curproc, 0)) != 0)
			break;
		psk = (struct ieee80211_wpapsk *)data;
		if (psk->i_enabled) {
			ic->ic_flags |= IEEE80211_F_PSK;
			memcpy(ic->ic_psk, psk->i_psk, sizeof(ic->ic_psk));
		} else {
			ic->ic_flags &= ~IEEE80211_F_PSK;
			memset(ic->ic_psk, 0, sizeof(ic->ic_psk));
		}
		error = ENETRESET;
		break;
	case SIOCG80211WPAPSK:
		psk = (struct ieee80211_wpapsk *)data;
		if (ic->ic_flags & IEEE80211_F_PSK) {
			psk->i_enabled = 1;
			/* do not show any keys to non-root user */
			if (suser(curproc, 0) != 0) {
				psk->i_enabled = 2;
				memset(psk->i_psk, 0, sizeof(psk->i_psk));
				break;	/* return ok but w/o key */
			}
			memcpy(psk->i_psk, ic->ic_psk, sizeof(psk->i_psk));
		} else
			psk->i_enabled = 0;
		break;
	case SIOCS80211POWER:
		if ((error = suser(curproc, 0)) != 0)
			break;
		power = (struct ieee80211_power *)data;
		ic->ic_lintval = power->i_maxsleep;
		if (power->i_enabled != 0) {
			if ((ic->ic_caps & IEEE80211_C_PMGT) == 0)
				error = EINVAL;
			else if ((ic->ic_flags & IEEE80211_F_PMGTON) == 0) {
				ic->ic_flags |= IEEE80211_F_PMGTON;
				error = ENETRESET;
			}
		} else {
			if (ic->ic_flags & IEEE80211_F_PMGTON) {
				ic->ic_flags &= ~IEEE80211_F_PMGTON;
				error = ENETRESET;
			}
		}
		break;
	case SIOCG80211POWER:
		power = (struct ieee80211_power *)data;
		power->i_enabled = (ic->ic_flags & IEEE80211_F_PMGTON) ? 1 : 0;
		power->i_maxsleep = ic->ic_lintval;
		break;
	case SIOCS80211BSSID:
		if ((error = suser(curproc, 0)) != 0)
			break;
		bssid = (struct ieee80211_bssid *)data;
		if (IEEE80211_ADDR_EQ(bssid->i_bssid, empty_macaddr))
			ic->ic_flags &= ~IEEE80211_F_DESBSSID;
		else {
			ic->ic_flags |= IEEE80211_F_DESBSSID;
			IEEE80211_ADDR_COPY(ic->ic_des_bssid, bssid->i_bssid);
		}
		if (ic->ic_opmode == IEEE80211_M_HOSTAP)
			break;
		switch (ic->ic_state) {
		case IEEE80211_S_INIT:
		case IEEE80211_S_SCAN:
			error = ENETRESET;
			break;
		default:
			if ((ic->ic_flags & IEEE80211_F_DESBSSID) &&
			    !IEEE80211_ADDR_EQ(ic->ic_des_bssid,
			    ic->ic_bss->ni_bssid))
				error = ENETRESET;
			break;
		}
		break;
	case SIOCG80211BSSID:
		bssid = (struct ieee80211_bssid *)data;
		switch (ic->ic_state) {
		case IEEE80211_S_INIT:
		case IEEE80211_S_SCAN:
			if (ic->ic_opmode == IEEE80211_M_HOSTAP)
				IEEE80211_ADDR_COPY(bssid->i_bssid,
				    ic->ic_myaddr);
			else if (ic->ic_flags & IEEE80211_F_DESBSSID)
				IEEE80211_ADDR_COPY(bssid->i_bssid,
				    ic->ic_des_bssid);
			else
				memset(bssid->i_bssid, 0, IEEE80211_ADDR_LEN);
			break;
		default:
			IEEE80211_ADDR_COPY(bssid->i_bssid,
			    ic->ic_bss->ni_bssid);
			break;
		}
		break;
	case SIOCS80211CHANNEL:
		if ((error = suser(curproc, 0)) != 0)
			break;
		chanreq = (struct ieee80211chanreq *)data;
		if (chanreq->i_channel == IEEE80211_CHAN_ANY)
			ic->ic_des_chan = IEEE80211_CHAN_ANYC;
		else if (chanreq->i_channel > IEEE80211_CHAN_MAX ||
		    isclr(ic->ic_chan_active, chanreq->i_channel)) {
			error = EINVAL;
			break;
		} else
			ic->ic_ibss_chan = ic->ic_des_chan =
			    &ic->ic_channels[chanreq->i_channel];
		switch (ic->ic_state) {
		case IEEE80211_S_INIT:
		case IEEE80211_S_SCAN:
			error = ENETRESET;
			break;
		default:
			if (ic->ic_opmode == IEEE80211_M_STA) {
				if (ic->ic_des_chan != IEEE80211_CHAN_ANYC &&
				    ic->ic_bss->ni_chan != ic->ic_des_chan)
					error = ENETRESET;
			} else {
				if (ic->ic_bss->ni_chan != ic->ic_ibss_chan)
					error = ENETRESET;
			}
			break;
		}
		break;
	case SIOCG80211CHANNEL:
		chanreq = (struct ieee80211chanreq *)data;
		switch (ic->ic_state) {
		case IEEE80211_S_INIT:
		case IEEE80211_S_SCAN:
			if (ic->ic_opmode == IEEE80211_M_STA)
				chan = ic->ic_des_chan;
			else
				chan = ic->ic_ibss_chan;
			break;
		default:
			chan = ic->ic_bss->ni_chan;
			break;
		}
		chanreq->i_channel = ieee80211_chan2ieee(ic, chan);
		break;
#if 0
	case SIOCG80211ZSTATS:
#endif
	case SIOCG80211STATS:
		ifr = (struct ifreq *)data;
		copyout(&ic->ic_stats, ifr->ifr_data, sizeof (ic->ic_stats));
#if 0
		if (cmd == SIOCG80211ZSTATS)
			memset(&ic->ic_stats, 0, sizeof(ic->ic_stats));
#endif
		break;
	case SIOCS80211TXPOWER:
		if ((error = suser(curproc, 0)) != 0)
			break;
		txpower = (struct ieee80211_txpower *)data;
		if ((ic->ic_caps & IEEE80211_C_TXPMGT) == 0) {
			error = EINVAL;
			break;
		}
		if (IEEE80211_TXPOWER_MIN > txpower->i_val ||
		    txpower->i_val > IEEE80211_TXPOWER_MAX) {
			error = EINVAL;
			break;
		}
		ic->ic_txpower = txpower->i_val;
		error = ENETRESET;
		break;
	case SIOCG80211TXPOWER:
		txpower = (struct ieee80211_txpower *)data;
		if ((ic->ic_caps & IEEE80211_C_TXPMGT) == 0)
			error = EINVAL;
		else
			txpower->i_val = ic->ic_txpower;
		break;
	case SIOCSIFMTU:
		ifr = (struct ifreq *)data;
		if (!(IEEE80211_MTU_MIN <= ifr->ifr_mtu &&
		    ifr->ifr_mtu <= IEEE80211_MTU_MAX))
			error = EINVAL;
		else
			ifp->if_mtu = ifr->ifr_mtu;
		break;
	case SIOCS80211SCAN:
		if ((error = suser(curproc, 0)) != 0)
			break;
		if (ic->ic_opmode == IEEE80211_M_HOSTAP)
			break;
		if ((ifp->if_flags & IFF_UP) == 0) {
			error = ENETDOWN;
			break;
		}
		if ((ic->ic_scan_lock & IEEE80211_SCAN_REQUEST) == 0) {
			if (ic->ic_scan_lock & IEEE80211_SCAN_LOCKED)
				ic->ic_scan_lock |= IEEE80211_SCAN_RESUME;
			ic->ic_scan_lock |= IEEE80211_SCAN_REQUEST;
			if (ic->ic_state != IEEE80211_S_SCAN)
				ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
		}
		/* Let the userspace process wait for completion */
		error = tsleep(&ic->ic_scan_lock, PCATCH, "80211scan",
		    hz * IEEE80211_SCAN_TIMEOUT);
		break;
	case SIOCG80211NODE:
		nr = (struct ieee80211_nodereq *)data;
		ni = ieee80211_find_node(ic, nr->nr_macaddr);
		if (ni == NULL) {
			error = ENOENT;
			break;
		}
		ieee80211_node2req(ic, ni, nr);
		break;
	case SIOCS80211NODE:
		if ((error = suser(curproc, 0)) != 0)
			break;
		if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
			error = EINVAL;
			break;
		}
		nr = (struct ieee80211_nodereq *)data;

		ni = ieee80211_find_node(ic, nr->nr_macaddr);
		if (ni == NULL)
			ni = ieee80211_alloc_node(ic, nr->nr_macaddr);
		if (ni == NULL) {
			error = ENOENT;
			break;
		}

		if (nr->nr_flags & IEEE80211_NODEREQ_COPY)
			ieee80211_req2node(ic, nr, ni);
		break;
	case SIOCS80211DELNODE:
		if ((error = suser(curproc, 0)) != 0)
			break;
		nr = (struct ieee80211_nodereq *)data;
		ni = ieee80211_find_node(ic, nr->nr_macaddr);
		if (ni == NULL)
			error = ENOENT;
		else if (ni == ic->ic_bss)
			error = EPERM;
		else {
			if (ni->ni_state == IEEE80211_STA_COLLECT)
				break;

			/* Disassociate station. */
			if (ni->ni_state == IEEE80211_STA_ASSOC)
				IEEE80211_SEND_MGMT(ic, ni,
				    IEEE80211_FC0_SUBTYPE_DISASSOC,
				    IEEE80211_REASON_ASSOC_LEAVE);

			/* Deauth station. */
			if (ni->ni_state >= IEEE80211_STA_AUTH)
				IEEE80211_SEND_MGMT(ic, ni,
				    IEEE80211_FC0_SUBTYPE_DEAUTH,
				    IEEE80211_REASON_AUTH_LEAVE);

			ieee80211_release_node(ic, ni);
		}
		break;
	case SIOCG80211ALLNODES:
		na = (struct ieee80211_nodereq_all *)data;
		na->na_nodes = i = 0;
		ni = RB_MIN(ieee80211_tree, &ic->ic_tree);
		while (ni && na->na_size >=
		    i + sizeof(struct ieee80211_nodereq)) {
			ieee80211_node2req(ic, ni, &nrbuf);
			error = copyout(&nrbuf, (caddr_t)na->na_node + i,
			    sizeof(struct ieee80211_nodereq));
			if (error)
				break;
			i += sizeof(struct ieee80211_nodereq);
			na->na_nodes++;
			ni = RB_NEXT(ieee80211_tree, &ic->ic_tree, ni);
		}
		break;
	case SIOCG80211FLAGS:
		flags = ic->ic_flags;
		if (ic->ic_opmode != IEEE80211_M_HOSTAP)
			flags &= ~IEEE80211_F_HOSTAPMASK;
		ifr->ifr_flags = flags >> IEEE80211_F_USERSHIFT;
		break;
	case SIOCS80211FLAGS:
		if ((error = suser(curproc, 0)) != 0)
			break;
		flags = (u_int32_t)ifr->ifr_flags << IEEE80211_F_USERSHIFT;
		if (ic->ic_opmode != IEEE80211_M_HOSTAP &&
		    (flags & IEEE80211_F_HOSTAPMASK)) {
			error = EINVAL;
			break;
		}
		ic->ic_flags = (ic->ic_flags & ~IEEE80211_F_USERMASK) | flags;
		error = ENETRESET;
		break;
	default:
		error = ENOTTY;
		break;
	}
	return error;
}