Ejemplo n.º 1
0
int
main(void)
{
    struct lgtd_lifx_gateway gw;
    uint8_t bulb_addr[LGTD_LIFX_ADDR_LENGTH] = { 5, 4, 3, 2, 1, 0 };
    struct lgtd_lifx_bulb *bulb = lgtd_lifx_bulb_open(&gw, bulb_addr);

    bulb->state.power = LGTD_LIFX_POWER_ON;
    LGTD_STATS_ADD_AND_UPDATE_PROCTITLE(bulbs_powered_on, 1);

    lgtd_lifx_bulb_close(bulb);

    if (!RB_EMPTY(&lgtd_lifx_bulbs_table)) {
        errx(1, "The bulbs table should be empty!");
    }

    if (LGTD_STATS_GET(bulbs) != 0) {
        errx(1, "The bulbs counter is %d (expected 0)", LGTD_STATS_GET(bulbs));
    }

    if (LGTD_STATS_GET(bulbs_powered_on) != 0) {
        errx(
            1, "The powered on bulbs counter is %d (expected 0)",
            LGTD_STATS_GET(bulbs_powered_on)
        );
    }

    return 0;
}
Ejemplo n.º 2
0
/* ARGSUSED */
static int
nwfs_sync(struct mount *mp, int waitfor)
{
	struct vnode *vp;
	int error, allerror = 0;
	/*
	 * Force stale buffer cache information to be flushed.
	 */
loop:
	for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
	     vp != NULL;
	     vp = TAILQ_NEXT(vp, v_nmntvnodes)) {
		/*
		 * If the vnode that we are about to sync is no longer
		 * associated with this mount point, start over.
		 */
		if (vp->v_mount != mp)
			goto loop;
		if (vn_islocked(vp) || RB_EMPTY(&vp->v_rbdirty_tree) ||
		    (waitfor & MNT_LAZY))
			continue;
		if (vget(vp, LK_EXCLUSIVE))
			goto loop;
		/* XXX vp may not be retained */
		error = VOP_FSYNC(vp, waitfor, 0);
		if (error)
			allerror = error;
		vput(vp);
	}
	return (allerror);
}
Ejemplo n.º 3
0
void
ieee80211_free_node(struct ieee80211com *ic, struct ieee80211_node *ni)
{
	if (ni == ic->ic_bss)
		panic("freeing bss node");

	DPRINTF(("%s\n", ether_sprintf(ni->ni_macaddr)));
#ifndef IEEE80211_STA_ONLY
	timeout_del(&ni->ni_eapol_to);
	timeout_del(&ni->ni_sa_query_to);
	IEEE80211_AID_CLR(ni->ni_associd, ic->ic_aid_bitmap);
#endif
	RB_REMOVE(ieee80211_tree, &ic->ic_tree, ni);
	ic->ic_nnodes--;
#ifndef IEEE80211_STA_ONLY
	if (!IF_IS_EMPTY(&ni->ni_savedq)) {
		IF_PURGE(&ni->ni_savedq);
		if (ic->ic_set_tim != NULL)
			(*ic->ic_set_tim)(ic, ni->ni_associd, 0);
	}
#endif
	if (RB_EMPTY(&ic->ic_tree))
		ic->ic_inact_timer = 0;
	(*ic->ic_node_free)(ic, ni);
	/* TBD indicate to drivers that a new node can be allocated */
}
Ejemplo n.º 4
0
void
ieee80211_setup_node(struct ieee80211com *ic,
	struct ieee80211_node *ni, const u_int8_t *macaddr)
{
	int s;

	DPRINTF(("%s\n", ether_sprintf((u_int8_t *)macaddr)));
	IEEE80211_ADDR_COPY(ni->ni_macaddr, macaddr);
	ieee80211_node_newstate(ni, IEEE80211_STA_CACHE);

	ni->ni_ic = ic;	/* back-pointer */
#ifndef IEEE80211_STA_ONLY
	IFQ_SET_MAXLEN(&ni->ni_savedq, IEEE80211_PS_MAX_QUEUE);
	timeout_set(&ni->ni_eapol_to, ieee80211_eapol_timeout, ni);
	timeout_set(&ni->ni_sa_query_to, ieee80211_sa_query_timeout, ni);
#endif

	/*
	 * Note we don't enable the inactive timer when acting
	 * as a station.  Nodes created in this mode represent
	 * AP's identified while scanning.  If we time them out
	 * then several things happen: we can't return the data
	 * to users to show the list of AP's we encountered, and
	 * more importantly, we'll incorrectly deauthenticate
	 * ourself because the inactivity timer will kick us off.
	 */
	s = splnet();
	if (ic->ic_opmode != IEEE80211_M_STA &&
	    RB_EMPTY(&ic->ic_tree))
		ic->ic_inact_timer = IEEE80211_INACT_WAIT;
	RB_INSERT(ieee80211_tree, &ic->ic_tree, ni);
	splx(s);
}
Ejemplo n.º 5
0
enum cmd_retval
cmd_unbind_key_mode_table(struct cmd *self, struct cmd_q *cmdq, key_code key)
{
	struct args			*args = self->args;
	const char			*tablename;
	const struct mode_key_table	*mtab;
	struct mode_key_binding		*mbind, mtmp;

	tablename = args_get(args, 't');
	if ((mtab = mode_key_findtable(tablename)) == NULL) {
		cmdq_error(cmdq, "unknown key table: %s", tablename);
		return (CMD_RETURN_ERROR);
	}

	if (key == KEYC_UNKNOWN) {
		while (!RB_EMPTY(mtab->tree)) {
			mbind = RB_ROOT(mtab->tree);
			RB_REMOVE(mode_key_tree, mtab->tree, mbind);
			free(mbind);
		}
		return (CMD_RETURN_NORMAL);
	}

	mtmp.key = key;
	mtmp.mode = !!args_has(args, 'c');
	if ((mbind = RB_FIND(mode_key_tree, mtab->tree, &mtmp)) != NULL) {
		RB_REMOVE(mode_key_tree, mtab->tree, mbind);
		free(mbind);
	}
	return (CMD_RETURN_NORMAL);
}
Ejemplo n.º 6
0
int
cmd_unbind_key_table(struct cmd *self, struct cmd_ctx *ctx, int key)
{
	struct args			*args = self->args;
	const char			*tablename;
	const struct mode_key_table	*mtab;
	struct mode_key_binding		*mbind, mtmp;

	tablename = args_get(args, 't');
	if ((mtab = mode_key_findtable(tablename)) == NULL) {
		ctx->error(ctx, "unknown key table: %s", tablename);
		return (-1);
	}

	if (key == KEYC_NONE) {
		while (!RB_EMPTY(mtab->tree)) {
			mbind = RB_ROOT(mtab->tree);
			RB_REMOVE(mode_key_tree, mtab->tree, mbind);
			xfree(mbind);
		}
		return (0);
	}

	mtmp.key = key;
	mtmp.mode = !!args_has(args, 'c');
	if ((mbind = RB_FIND(mode_key_tree, mtab->tree, &mtmp)) != NULL) {
		RB_REMOVE(mode_key_tree, mtab->tree, mbind);
		xfree(mbind);
	}
	return (0);
}
Ejemplo n.º 7
0
void
pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset)
{
	struct pf_anchor	*parent;
	int			 i;

	while (ruleset != NULL) {
		if (ruleset == &pf_main_ruleset || ruleset->anchor == NULL ||
		    !RB_EMPTY(&ruleset->anchor->children) ||
		    ruleset->anchor->refcnt > 0 || ruleset->tables > 0 ||
		    ruleset->topen)
			return;
		for (i = 0; i < PF_RULESET_MAX; ++i)
			if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) ||
			    !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) ||
			    ruleset->rules[i].inactive.open)
				return;
		RB_REMOVE(pf_anchor_global, &pf_anchors, ruleset->anchor);
		if ((parent = ruleset->anchor->parent) != NULL)
			RB_REMOVE(pf_anchor_node, &parent->children,
			    ruleset->anchor);
		rs_free(ruleset->anchor);
		if (parent == NULL)
			return;
		ruleset = &parent->ruleset;
	}
}
Ejemplo n.º 8
0
int
cmd_unbind_key_exec(struct cmd *self, unused struct cmd_ctx *ctx)
{
	struct args		*args = self->args;
	struct key_binding	*bd;
	int			 key;

	if (!args_has(args, 'a')) {
		key = key_string_lookup_string(args->argv[0]);
		if (key == KEYC_NONE) {
			ctx->error(ctx, "unknown key: %s", args->argv[0]);
			return (-1);
		}
	} else
		key = KEYC_NONE;

	if (args_has(args, 't'))
		return (cmd_unbind_key_table(self, ctx, key));

	if (key == KEYC_NONE) {
		while (!RB_EMPTY(&key_bindings)) {
			bd = RB_ROOT(&key_bindings);
			key_bindings_remove(bd->key);
		}
		return (0);
	}

	if (!args_has(args, 'n'))
		key |= KEYC_PREFIX;
	key_bindings_remove(key);
	return (0);
}
Ejemplo n.º 9
0
int ext4_block_cache_shake(struct ext4_blockdev *bdev)
{
	int r = EOK;
	struct ext4_buf *buf;
	if (bdev->bc->dont_shake)
		return EOK;

	bdev->bc->dont_shake = true;

	while (!RB_EMPTY(&bdev->bc->lru_root) &&
		ext4_bcache_is_full(bdev->bc)) {

		buf = ext4_buf_lowest_lru(bdev->bc);
		ext4_assert(buf);
		if (ext4_bcache_test_flag(buf, BC_DIRTY)) {
			r = ext4_block_flush_buf(bdev, buf);
			if (r != EOK)
				break;

		}

		ext4_bcache_drop_buf(bdev->bc, buf);
	}
	bdev->bc->dont_shake = false;
	return r;
}
Ejemplo n.º 10
0
int
ct_match_rb_is_empty(struct ct_match *match)
{
	if (match->cm_mode != CT_MATCH_RB)
		CABORTX("match mode %d is not rb", match->cm_mode);
	return (RB_EMPTY(match->cm_rb_head));
}
Ejemplo n.º 11
0
void
cfg_default_done(__unused struct cmd_q *cmdq)
{
	if (--cfg_references != 0)
		return;
	cfg_finished = 1;

	if (!RB_EMPTY(&sessions))
		cfg_show_causes(RB_MIN(sessions, &sessions));

	cmdq_free(cfg_cmd_q);
	cfg_cmd_q = NULL;

	if (cfg_client != NULL) {
		/*
		 * The client command queue starts with client_exit set to 1 so
		 * only continue if not empty (that is, we have been delayed
		 * during configuration parsing for long enough that the
		 * MSG_COMMAND has arrived), else the client will exit before
		 * the MSG_COMMAND which might tell it not to.
		 */
		if (!TAILQ_EMPTY(&cfg_client->cmdq->queue))
			cmdq_continue(cfg_client->cmdq);
		server_client_unref(cfg_client);
		cfg_client = NULL;
	}
}
Ejemplo n.º 12
0
static VALUE
rb_revtree_empty_p(VALUE self)
{
	if (RB_EMPTY(rb_rcsfile_revs(self)))
		return Qtrue;
	else
		return Qfalse;
}
Ejemplo n.º 13
0
void
hammer_destroy_dedup_cache(hammer_mount_t hmp)
{
	hammer_dedup_cache_t dcp;

	while ((dcp = TAILQ_FIRST(&hmp->dedup_lru_list)) != NULL) {
		RB_REMOVE(hammer_dedup_crc_rb_tree, &hmp->rb_dedup_crc_root, dcp);
		RB_REMOVE(hammer_dedup_off_rb_tree, &hmp->rb_dedup_off_root, dcp);
		TAILQ_REMOVE(&hmp->dedup_lru_list, dcp, lru_entry);
		--hmp->dedup_cache_count;
		kfree(dcp, hmp->m_misc);
	}

	KKASSERT(RB_EMPTY(&hmp->rb_dedup_crc_root));
	KKASSERT(RB_EMPTY(&hmp->rb_dedup_off_root));
	KKASSERT(TAILQ_EMPTY(&hmp->dedup_lru_list));

	KKASSERT(hmp->dedup_cache_count == 0);
}
Ejemplo n.º 14
0
/* Destroy a job tree. */
void
job_tree_free(struct jobs *jobs)
{
	struct job	*job;

	while (!RB_EMPTY(jobs)) {
		job = RB_ROOT(jobs);
		RB_REMOVE(jobs, jobs, job);
		job_free(job);
	}
}
Ejemplo n.º 15
0
static int
ffs_rawread_sync(struct vnode *vp)
{
	int error;

	/*
	 * Check for dirty mmap, pending writes and dirty buffers
	 */
	lwkt_gettoken(&vp->v_token);
	if (bio_track_active(&vp->v_track_write) ||
	    !RB_EMPTY(&vp->v_rbdirty_tree) ||
	    (vp->v_flag & VOBJDIRTY) != 0) {
		/* Attempt to msync mmap() regions to clean dirty mmap */ 
		if ((vp->v_flag & VOBJDIRTY) != 0) {
			struct vm_object *obj;
			if ((obj = vp->v_object) != NULL)
				vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
		}

		/* Wait for pending writes to complete */
		error = bio_track_wait(&vp->v_track_write, 0, 0);
		if (error != 0) {
			goto done;
		}
		/* Flush dirty buffers */
		if (!RB_EMPTY(&vp->v_rbdirty_tree)) {
			if ((error = VOP_FSYNC(vp, MNT_WAIT, 0)) != 0) {
				goto done;
			}
			if (bio_track_active(&vp->v_track_write) ||
			    !RB_EMPTY(&vp->v_rbdirty_tree))
				panic("ffs_rawread_sync: dirty bufs");
		}
	} else {
		error = 0;
	}
done:
	lwkt_reltoken(&vp->v_token);
	return error;
}
Ejemplo n.º 16
0
/* Free an environment. */
void
environ_free(struct environ *env)
{
	struct environ_entry	*envent;

	while (!RB_EMPTY(env)) {
		envent = RB_ROOT(env);
		RB_REMOVE(environ, env, envent);
		free(envent->name);
		free(envent->value);
		free(envent);
	}
}
Ejemplo n.º 17
0
void Voodoo80211Device::
ieee80211_free_node(struct ieee80211com *ic, struct ieee80211_node *ni)
{
	if (ni == ic->ic_bss)
		panic("freeing bss node");
    
	DPRINTF(("%s\n", ether_sprintf(ni->ni_macaddr)));
	RB_REMOVE(ieee80211_tree, &ic->ic_tree, ni);
	ic->ic_nnodes--;
	if (RB_EMPTY(&ic->ic_tree))
		ic->ic_inact_timer = 0;
	ieee80211_node_free(ic, ni);
	/* TBD indicate to drivers that a new node can be allocated */
}
Ejemplo n.º 18
0
void
options_free(struct options *oo)
{
	struct options_entry	*o;

	while (!RB_EMPTY(&oo->tree)) {
		o = RB_ROOT(&oo->tree);
		RB_REMOVE(options_tree, &oo->tree, o);
		free(o->name);
		if (o->type == OPTIONS_STRING)
			free(o->str);
		free(o);
	}
}
Ejemplo n.º 19
0
/*
 * Removes the vnode from the syncer list.  Since we might block while
 * acquiring the syncer_token we have to recheck conditions.
 *
 * vp->v_token held on call
 */
void
vn_syncer_remove(struct vnode *vp)
{
	struct syncer_ctx *ctx;

	ctx = vn_get_syncer(vp);

	lwkt_gettoken(&ctx->sc_token);

	if ((vp->v_flag & VONWORKLST) && RB_EMPTY(&vp->v_rbdirty_tree)) {
		vclrflags(vp, VONWORKLST);
		LIST_REMOVE(vp, v_synclist);
	}

	lwkt_reltoken(&ctx->sc_token);
}
Ejemplo n.º 20
0
/*
 * Removes the vnode from the syncer list.  Since we might block while
 * acquiring the syncer_token we have to recheck conditions.
 *
 * vp->v_token held on call
 */
void
vn_syncer_remove(struct vnode *vp)
{
	struct syncer_ctx *ctx;

	ctx = vp->v_mount->mnt_syncer_ctx;
	lwkt_gettoken(&ctx->sc_token);

	if ((vp->v_flag & (VISDIRTY | VONWORKLST | VOBJDIRTY)) == VONWORKLST &&
	    RB_EMPTY(&vp->v_rbdirty_tree)) {
		vclrflags(vp, VONWORKLST);
		LIST_REMOVE(vp, v_synclist);
	}

	lwkt_reltoken(&ctx->sc_token);
}
Ejemplo n.º 21
0
/* ========================================================================= */
void AFE_Manager( void )
{   
   DelayCmd dc;
   if( afe.ext_op_delay != 0 ) {
      if( afe.ext_op_delay > 0 ) {
         if( --afe.ext_op_delay == 0 ) {
            AFE_SwitchExtAmplifier( true );
            afe.ext_op_on = KAL_TRUE;
         }
      }
      else { /* afe.ext_op_delay < 0 */
         if( ++afe.ext_op_delay == 0 ) {
            AFE_SwitchExtAmplifier( false );
            afe.ext_op_on = KAL_FALSE;
         }
      }
   }
   
   if( !RB_EMPTY( afe.regq ) ) {
      RB_GET( afe.regq, dc );
      *dc.addr = dc.val;
   }

   if( afe.refresh )
   {
      afe.refresh = KAL_FALSE;
      L1Audio_LSetEvent(afe.aud_id, NULL);
   }
   /*
   if(afe.v8k_off_request)
   {
      *MD2GSYS_CG_SET2 = PDN_CON2_VAFE;
      *AFE_VMCU_CON  &= ~0x0001;
      afe.v8k_off_request = KAL_FALSE;
		L1Audio_Msg_AFE_Switch( L1AUDIO_Str_onoff(0), AFE_Switch_Name(0) );
   }
   
   if(afe.aClk_off_request)
   {
      *MD2GSYS_CG_SET2 = PDN_CON2_AAFE;
      *AFE_AMCU_CON0  &= ~0x0001;
      afe.aClk_off_request = KAL_FALSE;
   }
   */
}
Ejemplo n.º 22
0
int
ct_rb_match(struct ct_match_tree *head, char *file)
{
	struct ct_match_node	*n, nfind;

	if (RB_EMPTY(head))
		return (1); /* no pattern means nothing matches */

	nfind.cmn_string = file;
	n = RB_FIND(ct_match_tree, head, &nfind);
	if (n == NULL)
		return (1);
	RB_REMOVE(ct_match_tree, head, n);
	e_free(&n->cmn_string);
	e_free(&n);

	return (0);
}
Ejemplo n.º 23
0
static void
ieee80211_free_node(struct ieee80211com *ic, struct ieee80211_node *ni)
{
	if (ni == ic->ic_bss)
		panic("freeing bss node");

	IEEE80211_DPRINTF(("%s %s\n", __func__, ether_sprintf(ni->ni_macaddr)));
	IEEE80211_AID_CLR(ni->ni_associd, ic->ic_aid_bitmap);
	RB_REMOVE(ieee80211_tree, &ic->ic_tree, ni);
	ic->ic_nnodes--;
	if (!IF_IS_EMPTY(&ni->ni_savedq)) {
		IF_PURGE(&ni->ni_savedq);
		if (ic->ic_set_tim)
			(*ic->ic_set_tim)(ic, ni->ni_associd, 0);
	}
	if (RB_EMPTY(&ic->ic_tree))
		ic->ic_inact_timer = 0;
	(*ic->ic_node_free)(ic, ni);
	/* TBD indicate to drivers that a new node can be allocated */
}
Ejemplo n.º 24
0
enum cmd_retval
cmd_unbind_key_exec(struct cmd *self, struct cmd_q *cmdq)
{
	struct args		*args = self->args;
	struct key_binding	*bd;
	int			 key;

	if (!args_has(args, 'a')) {
		if (args->argc != 1) {
			cmdq_error(cmdq, "missing key");
			return (CMD_RETURN_ERROR);
		}
		key = key_string_lookup_string(args->argv[0]);
		if (key == KEYC_NONE) {
			cmdq_error(cmdq, "unknown key: %s", args->argv[0]);
			return (CMD_RETURN_ERROR);
		}
	} else {
		if (args->argc != 0) {
			cmdq_error(cmdq, "key given with -a");
			return (CMD_RETURN_ERROR);
		}
		key = KEYC_NONE;
	}

	if (args_has(args, 't'))
		return (cmd_unbind_key_table(self, cmdq, key));

	if (key == KEYC_NONE) {
		while (!RB_EMPTY(&key_bindings)) {
			bd = RB_ROOT(&key_bindings);
			key_bindings_remove(bd->key);
		}
		return (CMD_RETURN_NORMAL);
	}

	if (!args_has(args, 'n'))
		key |= KEYC_PREFIX;
	key_bindings_remove(key);
	return (CMD_RETURN_NORMAL);
}
Ejemplo n.º 25
0
/* Unlock socket and remove it if it has no references. */
static void
net2_udpsock_unlock(struct net2_udpsocket *sock)
{
	int		do_rm;
	int		want_err;

	do_rm = RB_EMPTY(&sock->conns) &&
	    sock->refcnt == 0; /* XXX and no acceptor fn */
	net2_mutex_unlock(sock->guard);

	if (do_rm) {
		want_err = net2_workq_want(sock->workq, 0);
		assert(want_err == 0 || want_err == EDEADLK);
		net2_workq_io_destroy(sock->ev);
		net2_mutex_free(sock->guard);
		if (want_err == 0)
			net2_workq_unwant(sock->workq);
		net2_workq_release(sock->workq);
		net2_free(sock);
	}
}
Ejemplo n.º 26
0
static int
ramstat_iter(void **iter, char **name, struct stat_value *val)
{
	struct ramstat_entry *np;

	log_trace(TRACE_STAT, "ramstat: iter");
	if (RB_EMPTY(&stats))
		return 0;

	if (*iter == NULL)
		np = RB_ROOT(&stats);
	else
		np = RB_NEXT(stats_tree, &stats, *iter);

	*iter = np;
	if (np == NULL)
		return 0;

	*name = np->key;
	*val  = np->value;
	return 1;
}
Ejemplo n.º 27
0
int
cmd_source_file_exec(struct cmd *self, struct cmd_ctx *ctx)
{
	struct args		*args = self->args;
	struct causelist	 causes;
	char			*cause;
	struct window_pane	*wp;
	int			 retval;
	u_int			 i;

	ARRAY_INIT(&causes);

	retval = load_cfg(args->argv[0], ctx, &causes);
	if (ARRAY_EMPTY(&causes))
		return (retval);

	if (retval == 1 && !RB_EMPTY(&sessions) && ctx->cmdclient != NULL) {
		wp = RB_MIN(sessions, &sessions)->curw->window->active;
		window_pane_set_mode(wp, &window_copy_mode);
		window_copy_init_for_output(wp);
		for (i = 0; i < ARRAY_LENGTH(&causes); i++) {
			cause = ARRAY_ITEM(&causes, i);
			window_copy_add(wp, "%s", cause);
			xfree(cause);
		}
	} else {
		for (i = 0; i < ARRAY_LENGTH(&causes); i++) {
			cause = ARRAY_ITEM(&causes, i);
			ctx->print(ctx, "%s", cause);
			xfree(cause);
		}
	}
	ARRAY_FREE(&causes);

	return (retval);
}
Ejemplo n.º 28
0
/*
 * hpfs_fsync(struct vnode *a_vp, int a_waitfor)
 */
static int
hpfs_fsync(struct vop_fsync_args *ap)
{
	struct vnode *vp = ap->a_vp;

	/*
	 * Flush all dirty buffers associated with a vnode.
	 */
#ifdef DIAGNOSTIC
loop:
#endif
	vfsync(vp, ap->a_waitfor, 0, NULL, NULL);
#ifdef DIAGNOSTIC
	if (ap->a_waitfor == MNT_WAIT && !RB_EMPTY(&vp->v_rbdirty_tree)) {
		vprint("hpfs_fsync: dirty", vp);
		goto loop;
	}
#endif

	/*
	 * Write out the on-disc version of the vnode.
	 */
	return hpfs_update(VTOHP(vp));
}
Ejemplo n.º 29
0
static void
ieee80211_setup_node(struct ieee80211com *ic,
	struct ieee80211_node *ni, u_int8_t *macaddr)
{
	IEEE80211_DPRINTF(("%s %s\n", __func__, ether_sprintf(macaddr)));
	IEEE80211_ADDR_COPY(ni->ni_macaddr, macaddr);
	ieee80211_node_newstate(ni, IEEE80211_STA_CACHE);
	IEEE80211_NODE_LOCK_BH(ic);

	/* 
	 * Note we don't enable the inactive timer when acting
	 * as a station.  Nodes created in this mode represent
	 * AP's identified while scanning.  If we time them out
	 * then several things happen: we can't return the data
	 * to users to show the list of AP's we encountered, and
	 * more importantly, we'll incorrectly deauthenticate
	 * ourself because the inactivity timer will kick us off.
	 */
	if (ic->ic_opmode != IEEE80211_M_STA &&
	    RB_EMPTY(&ic->ic_tree))
		ic->ic_inact_timer = IEEE80211_INACT_WAIT;
	RB_INSERT(ieee80211_tree, &ic->ic_tree, ni);
	IEEE80211_NODE_UNLOCK_BH(ic);
}
Ejemplo n.º 30
0
/*
 * Truncate the inode oip to at most length size, freeing the
 * disk blocks.
 */
int
ffs_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred)
{
	struct vnode *ovp = vp;
	ufs_daddr_t lastblock;
	struct inode *oip;
	ufs_daddr_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR];
	ufs_daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR];
	struct fs *fs;
	struct buf *bp;
	int offset, size, level;
	long count, nblocks, blocksreleased = 0;
	int i;
	int aflags, error, allerror;
	off_t osize;

	oip = VTOI(ovp);
	fs = oip->i_fs;
	if (length < 0)
		return (EINVAL);
	if (length > fs->fs_maxfilesize)
		return (EFBIG);
	if (ovp->v_type == VLNK &&
	    (oip->i_size < ovp->v_mount->mnt_maxsymlinklen || oip->i_din.di_blocks == 0)) {
#ifdef DIAGNOSTIC
		if (length != 0)
			panic("ffs_truncate: partial truncate of symlink");
#endif /* DIAGNOSTIC */
		bzero((char *)&oip->i_shortlink, (uint)oip->i_size);
		oip->i_size = 0;
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (ffs_update(ovp, 1));
	}
	if (oip->i_size == length) {
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (ffs_update(ovp, 0));
	}
	if (fs->fs_ronly)
		panic("ffs_truncate: read-only filesystem");
#ifdef QUOTA
	error = ufs_getinoquota(oip);
	if (error)
		return (error);
#endif
	ovp->v_lasta = ovp->v_clen = ovp->v_cstart = ovp->v_lastw = 0;
	if (DOINGSOFTDEP(ovp)) {
		if (length > 0 || softdep_slowdown(ovp)) {
			/*
			 * If a file is only partially truncated, then
			 * we have to clean up the data structures
			 * describing the allocation past the truncation
			 * point. Finding and deallocating those structures
			 * is a lot of work. Since partial truncation occurs
			 * rarely, we solve the problem by syncing the file
			 * so that it will have no data structures left.
			 */
			if ((error = VOP_FSYNC(ovp, MNT_WAIT, 0)) != 0)
				return (error);
		} else {
#ifdef QUOTA
			(void) ufs_chkdq(oip, -oip->i_blocks, NOCRED, 0);
#endif
			softdep_setup_freeblocks(oip, length);
			vinvalbuf(ovp, 0, 0, 0);
			nvnode_pager_setsize(ovp, 0, fs->fs_bsize, 0);
			oip->i_flag |= IN_CHANGE | IN_UPDATE;
			return (ffs_update(ovp, 0));
		}
	}
	osize = oip->i_size;

	/*
	 * Lengthen the size of the file. We must ensure that the
	 * last byte of the file is allocated. Since the smallest
	 * value of osize is 0, length will be at least 1.
	 *
	 * nvextendbuf() only breads the old buffer.  The blocksize
	 * of the new buffer must be specified so it knows how large
	 * to make the VM object.
	 */
	if (osize < length) {
		nvextendbuf(vp, osize, length,
			    blkoffsize(fs, oip, osize),	/* oblksize */
			    blkoffresize(fs, length),	/* nblksize */
			    blkoff(fs, osize),
			    blkoff(fs, length),
			    0);

		aflags = B_CLRBUF;
		if (flags & IO_SYNC)
			aflags |= B_SYNC;
		/* BALLOC will reallocate the fragment at the old EOF */
		error = VOP_BALLOC(ovp, length - 1, 1, cred, aflags, &bp);
		if (error)
			return (error);
		oip->i_size = length;
		if (bp->b_bufsize == fs->fs_bsize)
			bp->b_flags |= B_CLUSTEROK;
		if (aflags & B_SYNC)
			bwrite(bp);
		else
			bawrite(bp);
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (ffs_update(ovp, 1));
	}

	/*
	 * Shorten the size of the file.
	 *
	 * NOTE: The block size specified in nvtruncbuf() is the blocksize
	 *	 of the buffer containing length prior to any reallocation
	 *	 of the block.
	 */
	allerror = nvtruncbuf(ovp, length, blkoffsize(fs, oip, length),
			      blkoff(fs, length), 0);
	offset = blkoff(fs, length);
	if (offset == 0) {
		oip->i_size = length;
	} else {
		lbn = lblkno(fs, length);
		aflags = B_CLRBUF;
		if (flags & IO_SYNC)
			aflags |= B_SYNC;
		error = VOP_BALLOC(ovp, length - 1, 1, cred, aflags, &bp);
		if (error)
			return (error);

		/*
		 * When we are doing soft updates and the UFS_BALLOC
		 * above fills in a direct block hole with a full sized
		 * block that will be truncated down to a fragment below,
		 * we must flush out the block dependency with an FSYNC
		 * so that we do not get a soft updates inconsistency
		 * when we create the fragment below.
		 *
		 * nvtruncbuf() may have re-dirtied the underlying block
		 * as part of its truncation zeroing code.  To avoid a
		 * 'locking against myself' panic in the second fsync we
		 * can simply undirty the bp since the redirtying was
		 * related to areas of the buffer that we are going to
		 * throw away anyway, and we will b*write() the remainder
		 * anyway down below.
		 */
		if (DOINGSOFTDEP(ovp) && lbn < NDADDR &&
		    fragroundup(fs, blkoff(fs, length)) < fs->fs_bsize) {
			bundirty(bp);
			error = VOP_FSYNC(ovp, MNT_WAIT, 0);
			if (error) {
				bdwrite(bp);
				return (error);
			}
		}
		oip->i_size = length;
		size = blksize(fs, oip, lbn);
#if 0
		/* remove - nvtruncbuf deals with this */
		if (ovp->v_type != VDIR)
			bzero((char *)bp->b_data + offset,
			    (uint)(size - offset));
#endif
		/* Kirk's code has reallocbuf(bp, size, 1) here */
		allocbuf(bp, size);
		if (bp->b_bufsize == fs->fs_bsize)
			bp->b_flags |= B_CLUSTEROK;
		if (aflags & B_SYNC)
			bwrite(bp);
		else
			bawrite(bp);
	}
	/*
	 * Calculate index into inode's block list of
	 * last direct and indirect blocks (if any)
	 * which we want to keep.  Lastblock is -1 when
	 * the file is truncated to 0.
	 */
	lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1;
	lastiblock[SINGLE] = lastblock - NDADDR;
	lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
	lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
	nblocks = btodb(fs->fs_bsize);

	/*
	 * Update file and block pointers on disk before we start freeing
	 * blocks.  If we crash before free'ing blocks below, the blocks
	 * will be returned to the free list.  lastiblock values are also
	 * normalized to -1 for calls to ffs_indirtrunc below.
	 */
	bcopy((caddr_t)&oip->i_db[0], (caddr_t)oldblks, sizeof oldblks);
	for (level = TRIPLE; level >= SINGLE; level--)
		if (lastiblock[level] < 0) {
			oip->i_ib[level] = 0;
			lastiblock[level] = -1;
		}
	for (i = NDADDR - 1; i > lastblock; i--)
		oip->i_db[i] = 0;
	oip->i_flag |= IN_CHANGE | IN_UPDATE;
	error = ffs_update(ovp, 1);
	if (error && allerror == 0)
		allerror = error;
	
	/*
	 * Having written the new inode to disk, save its new configuration
	 * and put back the old block pointers long enough to process them.
	 * Note that we save the new block configuration so we can check it
	 * when we are done.
	 */
	bcopy((caddr_t)&oip->i_db[0], (caddr_t)newblks, sizeof newblks);
	bcopy((caddr_t)oldblks, (caddr_t)&oip->i_db[0], sizeof oldblks);
	oip->i_size = osize;

	if (error && allerror == 0)
		allerror = error;

	/*
	 * Indirect blocks first.
	 */
	indir_lbn[SINGLE] = -NDADDR;
	indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1;
	indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1;
	for (level = TRIPLE; level >= SINGLE; level--) {
		bn = oip->i_ib[level];
		if (bn != 0) {
			error = ffs_indirtrunc(oip, indir_lbn[level],
			    fsbtodb(fs, bn), lastiblock[level], level, &count);
			if (error)
				allerror = error;
			blocksreleased += count;
			if (lastiblock[level] < 0) {
				oip->i_ib[level] = 0;
				ffs_blkfree(oip, bn, fs->fs_bsize);
				blocksreleased += nblocks;
			}
		}
		if (lastiblock[level] >= 0)
			goto done;
	}

	/*
	 * All whole direct blocks or frags.
	 */
	for (i = NDADDR - 1; i > lastblock; i--) {
		long bsize;

		bn = oip->i_db[i];
		if (bn == 0)
			continue;
		oip->i_db[i] = 0;
		bsize = blksize(fs, oip, i);
		ffs_blkfree(oip, bn, bsize);
		blocksreleased += btodb(bsize);
	}
	if (lastblock < 0)
		goto done;

	/*
	 * Finally, look for a change in size of the
	 * last direct block; release any frags.
	 */
	bn = oip->i_db[lastblock];
	if (bn != 0) {
		long oldspace, newspace;

		/*
		 * Calculate amount of space we're giving
		 * back as old block size minus new block size.
		 */
		oldspace = blksize(fs, oip, lastblock);
		oip->i_size = length;
		newspace = blksize(fs, oip, lastblock);
		if (newspace == 0)
			panic("ffs_truncate: newspace");
		if (oldspace - newspace > 0) {
			/*
			 * Block number of space to be free'd is
			 * the old block # plus the number of frags
			 * required for the storage we're keeping.
			 */
			bn += numfrags(fs, newspace);
			ffs_blkfree(oip, bn, oldspace - newspace);
			blocksreleased += btodb(oldspace - newspace);
		}
	}
done:
#ifdef DIAGNOSTIC
	for (level = SINGLE; level <= TRIPLE; level++)
		if (newblks[NDADDR + level] != oip->i_ib[level])
			panic("ffs_truncate1");
	for (i = 0; i < NDADDR; i++)
		if (newblks[i] != oip->i_db[i])
			panic("ffs_truncate2");
	if (length == 0 && !RB_EMPTY(&ovp->v_rbdirty_tree))
		panic("ffs_truncate3");
#endif /* DIAGNOSTIC */
	/*
	 * Put back the real size.
	 */
	oip->i_size = length;
	oip->i_blocks -= blocksreleased;

	if (oip->i_blocks < 0)			/* sanity */
		oip->i_blocks = 0;
	oip->i_flag |= IN_CHANGE;
#ifdef QUOTA
	(void) ufs_chkdq(oip, -blocksreleased, NOCRED, 0);
#endif
	return (allerror);
}