Exemple #1
0
/*
 * A buffer is written to the snapshotted block device. Copy to
 * backing store if needed.
 */
static int
fss_copy_on_write(void *v, struct buf *bp, bool data_valid)
{
	int error;
	u_int32_t cl, ch, c;
	struct fss_softc *sc = v;

	mutex_enter(&sc->sc_slock);
	if (!FSS_ISVALID(sc)) {
		mutex_exit(&sc->sc_slock);
		return 0;
	}

	cl = FSS_BTOCL(sc, dbtob(bp->b_blkno));
	ch = FSS_BTOCL(sc, dbtob(bp->b_blkno)+bp->b_bcount-1);
	error = 0;
	if (curlwp == uvm.pagedaemon_lwp) {
		for (c = cl; c <= ch; c++)
			if (isclr(sc->sc_copied, c)) {
				error = ENOMEM;
				break;
			}
	}
	mutex_exit(&sc->sc_slock);

	if (error == 0)
		for (c = cl; c <= ch; c++) {
			error = fss_read_cluster(sc, c);
			if (error)
				break;
		}

	return error;
}
Exemple #2
0
int chkuse( daddr_t blkno, int cnt ) {
	int cg;
	daddr_t fsbn, bn;
	fsbn = dbtofsb( fs, blkno );
	if ( (unsigned) ( fsbn + cnt ) > fs->fs_size ) {
		printf( "block %ld out of range of file system\n", (long) blkno );
		return ( 1 );
	}
	cg = dtog( fs, fsbn );
	if ( fsbn < cgdmin( fs, cg ) ) {
		if ( cg == 0 || ( fsbn + cnt ) > cgsblock( fs, cg ) ) {
			printf( "block %ld in non-data area: cannot attach\n", (long) blkno );
			return ( 1 );
		}
	} else {
		if ( ( fsbn + cnt ) > cgbase( fs, cg + 1 ) ) {
			printf( "block %ld in non-data area: cannot attach\n", (long) blkno );
			return ( 1 );
		}
	}
	if ( cgread1( &disk, cg ) != 1 ) {
		fprintf( stderr, "cg %d: could not be read\n", cg );
		errs++;
		return ( 1 );
	}
	if ( !cg_chkmagic( &acg ) ) {
		fprintf( stderr, "cg %d: bad magic number\n", cg );
		errs++;
		return ( 1 );
	}
	bn = dtogd( fs, fsbn );
	if ( isclr( cg_blksfree( &acg ), bn ) ) printf( "Warning: sector %ld is in use\n", (long) blkno );
	return ( 0 );
}
Exemple #3
0
/*
 * Determine whether an inode can be allocated.
 *
 * Check to see if an inode is available, and if it is,
 * allocate it using the following policy:
 *   1) allocate the requested inode.
 *   2) allocate the next available inode after the requested
 *	  inode in the specified cylinder group.
 */
static daddr_t
ext2fs_nodealloccg(struct inode *ip, int cg, daddr_t ipref, int mode)
{
	struct m_ext2fs *fs;
	char *ibp;
	struct buf *bp;
	int error, start, len, loc, map, i;

	ipref--; /* to avoid a lot of (ipref -1) */
	if (ipref == -1)
		ipref = 0;
	fs = ip->i_e2fs;
	if (fs->e2fs_gd[cg].ext2bgd_nifree == 0)
		return (0);
	error = bread(ip->i_devvp, fsbtodb(fs,
		fs->e2fs_gd[cg].ext2bgd_i_bitmap),
		(int)fs->e2fs_bsize, NOCRED, B_MODIFY, &bp);
	if (error) {
		brelse(bp, 0);
		return (0);
	}
	ibp = (char *)bp->b_data;
	if (ipref) {
		ipref %= fs->e2fs.e2fs_ipg;
		if (isclr(ibp, ipref))
			goto gotit;
	}
	start = ipref / NBBY;
	len = howmany(fs->e2fs.e2fs_ipg - ipref, NBBY);
	loc = skpc(0xff, len, &ibp[start]);
	if (loc == 0) {
		len = start + 1;
		start = 0;
		loc = skpc(0xff, len, &ibp[0]);
		if (loc == 0) {
			printf("cg = %d, ipref = %lld, fs = %s\n",
				cg, (long long)ipref, fs->e2fs_fsmnt);
			panic("ext2fs_nodealloccg: map corrupted");
			/* NOTREACHED */
		}
	}
	i = start + len - loc;
	map = ibp[i] ^ 0xff;
	if (map == 0) {
		printf("fs = %s\n", fs->e2fs_fsmnt);
		panic("ext2fs_nodealloccg: block not in map");
	}
	ipref = i * NBBY + ffs(map) - 1;
gotit:
	setbit(ibp, ipref);
	fs->e2fs.e2fs_ficount--;
	fs->e2fs_gd[cg].ext2bgd_nifree--;
	fs->e2fs_fmod = 1;
	if ((mode & IFMT) == IFDIR) {
		fs->e2fs_gd[cg].ext2bgd_ndirs++;
	}
	bdwrite(bp);
	return (cg * fs->e2fs.e2fs_ipg + ipref +1);
}
int
spec_open_clone(struct vop_open_args *ap)
{
	struct vnode *cvp, *vp = ap->a_vp;
	struct cloneinfo *cip;
	int error, i;

	DNPRINTF("cloning vnode\n");

	if (minor(vp->v_rdev) >= (1 << CLONE_SHIFT))
		return (ENXIO);

	for (i = 1; i < sizeof(vp->v_specbitmap) * NBBY; i++)
		if (isclr(vp->v_specbitmap, i)) {
			setbit(vp->v_specbitmap, i);
			break;
		}

	if (i == sizeof(vp->v_specbitmap) * NBBY)
		return (EBUSY); /* too many open instances */

	error = cdevvp(makedev(major(vp->v_rdev),
	    (i << CLONE_SHIFT) | minor(vp->v_rdev)), &cvp);
	if (error) {
		clrbit(vp->v_specbitmap, i);
		return (error); /* out of vnodes */
	}

	VOP_UNLOCK(vp, 0, ap->a_p);

	error = cdevsw[major(vp->v_rdev)].d_open(cvp->v_rdev, ap->a_mode,
	    S_IFCHR, ap->a_p);

	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, ap->a_p);

	if (error) {
		vput(cvp);
		clrbit(vp->v_specbitmap, i);
		return (error); /* device open failed */
	}

	cvp->v_flag |= VCLONE;

	cip = malloc(sizeof(struct cloneinfo), M_TEMP, M_WAITOK);
	cip->ci_data = vp->v_data;
	cip->ci_vp = cvp;

	cvp->v_specparent = vp;
	vp->v_flag |= VCLONED;
	vp->v_data = cip;

	DNPRINTF("clone of vnode %p is vnode %p\n", vp, cvp);

	return (0); /* device cloned */
}
Exemple #5
0
static union dinode *
get_inode(int fd, struct fs *super, ino_t ino)
{
	static caddr_t ipbuf;
	static struct cg *cgp;
	static ino_t last;
	static int cg;
	struct ufs2_dinode *di2;

	if (fd < 0) {		/* flush cache */
		if (ipbuf) {
			free(ipbuf);
			ipbuf = NULL;
			if (super != NULL && super->fs_magic == FS_UFS2_MAGIC) {
				free(cgp);
				cgp = NULL;
			}
		}
		return 0;
	}

	if (!ipbuf || ino < last || ino >= last + INOCNT(super)) {
		if (super->fs_magic == FS_UFS2_MAGIC &&
		    (!cgp || cg != ino_to_cg(super, ino))) {
			cg = ino_to_cg(super, ino);
			if (!cgp && !(cgp = malloc(super->fs_cgsize)))
				errx(1, "allocate cg");
			if (pread(fd, cgp, super->fs_cgsize,
			    (off_t)cgtod(super, cg) << super->fs_fshift)
			    != super->fs_cgsize)
			if (read(fd, cgp, super->fs_cgsize) != super->fs_cgsize)
				err(1, "read cg");
			if (!cg_chkmagic(cgp))
				errx(1, "cg has bad magic");
		}
		if (!ipbuf && !(ipbuf = malloc(INOSZ(super))))
			err(1, "allocate inodes");
		last = (ino / INOCNT(super)) * INOCNT(super);
		if (lseek(fd, (off_t)ino_to_fsba(super, last)
		    << super->fs_fshift, SEEK_SET) < 0 ||
		    read(fd, ipbuf, INOSZ(super)) != INOSZ(super)) {
			err(1, "read inodes");
		}
	}

	if (super->fs_magic == FS_UFS1_MAGIC)
		return ((union dinode *)
		    &((struct ufs1_dinode *)ipbuf)[ino % INOCNT(super)]);
	di2 = &((struct ufs2_dinode *)ipbuf)[ino % INOCNT(super)];
	/* If the inode is unused, it might be unallocated too, so zero it. */
	if (isclr(cg_inosused(cgp), ino % super->fs_ipg))
		memset(di2, 0, sizeof(*di2));
	return ((union dinode *)di2);
}
Exemple #6
0
void printpage(struct page *page, FILE *fp) {
	struct slot slot = { 0 };
	unsigned end;

	while (slot.base < SLOTCOUNT) {
		while (isclr(page->ptr, slot.base)) {
			if (++slot.base >= SLOTCOUNT)
				return;
		}

		for (end = slot.base + 1; end < SLOTCOUNT && isset(page->index, end) && isclr(page->ptr, end); end++)
			;;

		slot.count = end - slot.base;

		fprintf(fp, "(%u:%u)\n", slot.base, slot.count);
		printslot(page, &slot, fp);

		slot.base += slot.count;
	}
} /* printpage() */
Exemple #7
0
static void scan(struct page *page, struct slot *slot, unsigned *next) {
	unsigned base, end;

	for (base = *next; base < SLOTCOUNT && isset(page->index, base); base++)
		;;

	for (end = base; end < SLOTCOUNT && isclr(page->index, end); end++)
		;;

	*next = end;

	slot->base  = base;
	slot->count = end - base;
} /* scan() */
Exemple #8
0
/*
 * Free an inode.
 *
 */
int
ext2_vfree(struct vnode *pvp, ino_t ino, int mode)
{
    struct m_ext2fs *fs;
    struct inode *pip;
    struct buf *bp;
    struct ext2mount *ump;
    int error, cg;
    char * ibp;

    pip = VTOI(pvp);
    fs = pip->i_e2fs;
    ump = pip->i_ump;
    if ((u_int)ino > fs->e2fs_ipg * fs->e2fs_gcount)
        panic("ext2_vfree: range: devvp = %p, ino = %ju, fs = %s",
              pip->i_devvp, (uintmax_t)ino, fs->e2fs_fsmnt);

    cg = ino_to_cg(fs, ino);
    error = bread(pip->i_devvp,
                  fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_i_bitmap),
                  (int)fs->e2fs_bsize, NOCRED, &bp);
    if (error) {
        brelse(bp);
        return (0);
    }
    ibp = (char *)bp->b_data;
    ino = (ino - 1) % fs->e2fs->e2fs_ipg;
    if (isclr(ibp, ino)) {
        printf("ino = %llu, fs = %s\n",
               (unsigned long long)ino, fs->e2fs_fsmnt);
        if (fs->e2fs_ronly == 0)
            panic("ext2_vfree: freeing free inode");
    }
    clrbit(ibp, ino);
    EXT2_LOCK(ump);
    fs->e2fs->e2fs_ficount++;
    fs->e2fs_gd[cg].ext2bgd_nifree++;
    if ((mode & IFMT) == IFDIR) {
        fs->e2fs_gd[cg].ext2bgd_ndirs--;
        fs->e2fs_total_dir--;
    }
    fs->e2fs_fmod = 1;
    EXT2_UNLOCK(ump);
    bdwrite(bp);
    return (0);
}
Exemple #9
0
struct ieee80211_channel *
radar_handle_interference(struct ieee80211com *ic)
{
    struct ieee80211_channel *c;

    int index = 0;
    int chan;
    u_int32_t firstTimeout = jiffies;;

    /* Mark current channel as having radar on it */
    chan = ic->ic_ibss_chan - ic->ic_channels;

    printk ("%s: Marking channel %d as radar disturbed, time=%lu.\n", __func__, chan, jiffies);

    if (ic->ic_ibss_chan != NULL)
	ic->ic_channelList[chan] = jiffies;

    /* Find next appropriate channel */ 
    while (index < IEEE80211_CHAN_MAX) {
		if (!isclr(ic->ic_chan_active, index) &&
		    (ic->ic_channelList[index] == 0 || 
		    (jiffies - ic->ic_channelList[index]) > RADAR_CHANNEL_REUSE_LIMIT)) {
			printk ("%s: Hopping to channel %d (%u).\n", 
				ic->ic_dev->name, index, ic->ic_channelList[index]);
			break;
		}
		/* channel blocked */
		if((ic->ic_channelList[index] != 0) &&
		    (ic->ic_channelList[index] < firstTimeout)){
			firstTimeout = ic->ic_channelList[index];
		}
		index ++;
    }
    if (index == IEEE80211_CHAN_MAX) {
		/* No channels are availiable */
		printk("%s: all channels blocked, first timeout=%i\n", 
		       ic->ic_dev->name, firstTimeout + RADAR_CHANNEL_REUSE_LIMIT);
		ic->ic_radar_reanimate.expires = firstTimeout + RADAR_CHANNEL_REUSE_LIMIT;
		add_timer(&ic->ic_radar_reanimate);
		return NULL;
    }
    c = &ic->ic_channels[index];

    return c;
}
Exemple #10
0
ino_t
cgialloc(struct uufsd *disk)
{
	struct ufs2_dinode *dp2;
	u_int8_t *inosused;
	struct cg *cgp;
	struct fs *fs;
	ino_t ino;
	int i;

	fs = &disk->d_fs;
	cgp = &disk->d_cg;
	inosused = cg_inosused(cgp);
	for (ino = 0; ino < fs->fs_ipg; ino++)
		if (isclr(inosused, ino))
			goto gotit;
	return (0);
gotit:
	if (fs->fs_magic == FS_UFS2_MAGIC &&
	    ino + INOPB(fs) > cgp->cg_initediblk &&
	    cgp->cg_initediblk < cgp->cg_niblk) {
		char block[MAXBSIZE];
		bzero(block, (int)fs->fs_bsize);
		dp2 = (struct ufs2_dinode *)&block;
		for (i = 0; i < INOPB(fs); i++) {
			dp2->di_gen = arc4random() / 2 + 1;
			dp2++;
		}
		if (bwrite(disk, ino_to_fsba(fs,
		    cgp->cg_cgx * fs->fs_ipg + cgp->cg_initediblk),
		    block, fs->fs_bsize))
			return (0);
		cgp->cg_initediblk += INOPB(fs);
	}

	setbit(inosused, ino);
	cgp->cg_irotor = ino;
	cgp->cg_cs.cs_nifree--;
	fs->fs_cstotal.cs_nifree--;
	fs->fs_cs(fs, cgp->cg_cgx).cs_nifree--;
	fs->fs_fmod = 1;

	return (ino + (cgp->cg_cgx * fs->fs_ipg));
}
Exemple #11
0
/*
 * Free an inode.
 *
 * The specified inode is placed back in the free map.
 */
int
ext2fs_vfree(struct vnode *pvp, ino_t ino, int mode)
{
	struct m_ext2fs *fs;
	char *ibp;
	struct inode *pip;
	struct buf *bp;
	int error, cg;

	pip = VTOI(pvp);
	fs = pip->i_e2fs;
	if ((u_int)ino > fs->e2fs.e2fs_icount || (u_int)ino < EXT2_FIRSTINO)
		panic("ifree: range: dev = 0x%llx, ino = %llu, fs = %s",
		    (unsigned long long)pip->i_dev, (unsigned long long)ino,
		    fs->e2fs_fsmnt);
	cg = ino_to_cg(fs, ino);
	error = bread(pip->i_devvp,
		fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_i_bitmap),
		(int)fs->e2fs_bsize, NOCRED, B_MODIFY, &bp);
	if (error) {
		brelse(bp, 0);
		return (0);
	}
	ibp = (char *)bp->b_data;
	ino = (ino - 1) % fs->e2fs.e2fs_ipg;
	if (isclr(ibp, ino)) {
		printf("dev = 0x%llx, ino = %llu, fs = %s\n",
		    (unsigned long long)pip->i_dev,
		    (unsigned long long)ino, fs->e2fs_fsmnt);
		if (fs->e2fs_ronly == 0)
			panic("ifree: freeing free inode");
	}
	clrbit(ibp, ino);
	fs->e2fs.e2fs_ficount++;
	fs->e2fs_gd[cg].ext2bgd_nifree++;
	if ((mode & IFMT) == IFDIR) {
		fs->e2fs_gd[cg].ext2bgd_ndirs--;
	}
	fs->e2fs_fmod = 1;
	bdwrite(bp);
	return (0);
}
Exemple #12
0
/*
 * Free a block or fragment.
 *
 */
void
ext2_blkfree(struct inode *ip, e4fs_daddr_t bno, long size)
{
    struct m_ext2fs *fs;
    struct buf *bp;
    struct ext2mount *ump;
    int cg, error;
    char *bbp;

    fs = ip->i_e2fs;
    ump = ip->i_ump;
    cg = dtog(fs, bno);
    if ((u_int)bno >= fs->e2fs->e2fs_bcount) {
        printf("bad block %lld, ino %llu\n", (long long)bno,
               (unsigned long long)ip->i_number);
        ext2_fserr(fs, ip->i_uid, "bad block");
        return;
    }
    error = bread(ip->i_devvp,
                  fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap),
                  (int)fs->e2fs_bsize, NOCRED, &bp);
    if (error) {
        brelse(bp);
        return;
    }
    bbp = (char *)bp->b_data;
    bno = dtogd(fs, bno);
    if (isclr(bbp, bno)) {
        printf("block = %lld, fs = %s\n",
               (long long)bno, fs->e2fs_fsmnt);
        panic("ext2_blkfree: freeing free block");
    }
    clrbit(bbp, bno);
    EXT2_LOCK(ump);
    ext2_clusteracct(fs, bbp, cg, bno, 1);
    fs->e2fs->e2fs_fbcount++;
    fs->e2fs_gd[cg].ext2bgd_nbfree++;
    fs->e2fs_fmod = 1;
    EXT2_UNLOCK(ump);
    bdwrite(bp);
}
Exemple #13
0
/*
 * Free a block.
 *
 * The specified block is placed back in the
 * free map.
 */
void
ext2fs_blkfree(struct inode *ip, daddr_t bno)
{
	struct m_ext2fs *fs;
	char *bbp;
	struct buf *bp;
	int error, cg;

	fs = ip->i_e2fs;
	cg = dtog(fs, bno);
	if ((u_int)bno >= fs->e2fs.e2fs_bcount) {
		printf("bad block %lld, ino %llu\n", (long long)bno,
		    (unsigned long long)ip->i_number);
		ext2fs_fserr(fs, ip->i_uid, "bad block");
		return;
	}
	error = bread(ip->i_devvp,
		fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap),
		(int)fs->e2fs_bsize, NOCRED, B_MODIFY, &bp);
	if (error) {
		brelse(bp, 0);
		return;
	}
	bbp = (char *)bp->b_data;
	bno = dtogd(fs, bno);
	if (isclr(bbp, bno)) {
		printf("dev = 0x%llx, block = %lld, fs = %s\n",
		    (unsigned long long)ip->i_dev, (long long)bno,
		    fs->e2fs_fsmnt);
		panic("blkfree: freeing free block");
	}
	clrbit(bbp, bno);
	fs->e2fs.e2fs_fbcount++;
	fs->e2fs_gd[cg].ext2bgd_nbfree++;

	fs->e2fs_fmod = 1;
	bdwrite(bp);
}
Exemple #14
0
static void
ath_hal_dumpkeycache(FILE *fd, int nkeys)
{
	static const char *keytypenames[] = {
		"WEP-40", 	/* AR_KEYTABLE_TYPE_40 */
		"WEP-104",	/* AR_KEYTABLE_TYPE_104 */
		"#2",
		"WEP-128",	/* AR_KEYTABLE_TYPE_128 */
		"TKIP",		/* AR_KEYTABLE_TYPE_TKIP */
		"AES-OCB",	/* AR_KEYTABLE_TYPE_AES */
		"AES-CCM",	/* AR_KEYTABLE_TYPE_CCM */
		"CLR",		/* AR_KEYTABLE_TYPE_CLR */
	};
	int micEnabled = SREV(state.revs.ah_macVersion, state.revs.ah_macRev) < SREV(4,8) ? 0 :
	       OS_REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_CRPT_MIC_ENABLE;
	u_int8_t mac[IEEE80211_ADDR_LEN];
	u_int8_t ismic[128/NBBY];
	int entry;
	int first = 1;

	memset(ismic, 0, sizeof(ismic));
	for (entry = 0; entry < nkeys; entry++) {
		u_int32_t macLo, macHi, type;
		u_int32_t key0, key1, key2, key3, key4;

		macHi = OS_REG_READ(ah, AR_KEYTABLE_MAC1(entry));
		if ((macHi & AR_KEYTABLE_VALID) == 0 && isclr(ismic, entry))
			continue;
		macLo = OS_REG_READ(ah, AR_KEYTABLE_MAC0(entry));
		macHi <<= 1;
		if (macLo & (1<<31))
			macHi |= 1;
		macLo <<= 1;
		mac[4] = macHi & 0xff;
		mac[5] = macHi >> 8;
		mac[0] = macLo & 0xff;
		mac[1] = macLo >> 8;
		mac[2] = macLo >> 16;
		mac[3] = macLo >> 24;
		type = OS_REG_READ(ah, AR_KEYTABLE_TYPE(entry));
		if ((type & 7) == AR_KEYTABLE_TYPE_TKIP && micEnabled)
			setbit(ismic, entry+64);
		key0 = OS_REG_READ(ah, AR_KEYTABLE_KEY0(entry));
		key1 = OS_REG_READ(ah, AR_KEYTABLE_KEY1(entry));
		key2 = OS_REG_READ(ah, AR_KEYTABLE_KEY2(entry));
		key3 = OS_REG_READ(ah, AR_KEYTABLE_KEY3(entry));
		key4 = OS_REG_READ(ah, AR_KEYTABLE_KEY4(entry));
		if (first) {
			fprintf(fd, "\n");
			first = 0;
		}
		fprintf(fd, "KEY[%03u] MAC %s %-7s %02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x\n"
			, entry
			, ether_sprintf(mac)
			, isset(ismic, entry) ? "MIC" : keytypenames[type & 7]
			, (key0 >>  0) & 0xff
			, (key0 >>  8) & 0xff
			, (key0 >> 16) & 0xff
			, (key0 >> 24) & 0xff
			, (key1 >>  0) & 0xff
			, (key1 >>  8) & 0xff
			, (key2 >>  0) & 0xff
			, (key2 >>  8) & 0xff
			, (key2 >> 16) & 0xff
			, (key2 >> 24) & 0xff
			, (key3 >>  0) & 0xff
			, (key3 >>  8) & 0xff
			, (key4 >>  0) & 0xff
			, (key4 >>  8) & 0xff
			, (key4 >> 16) & 0xff
			, (key4 >> 24) & 0xff
		);
	}
}
Exemple #15
0
/*
 * Set the current phy mode and recalculate the active channel
 * set based on the available channels for this mode.  Also
 * select a new default/current channel if the current one is
 * inappropriate for this mode.
 */
int
ieee80211_setmode(struct ieee80211com *ic, enum ieee80211_phymode mode)
{
#define	N(a)	(sizeof(a) / sizeof(a[0]))
	static const u_int chanflags[] = {
		0,			/* IEEE80211_MODE_AUTO */
		IEEE80211_CHAN_A,	/* IEEE80211_MODE_11A */
		IEEE80211_CHAN_B,	/* IEEE80211_MODE_11B */
		IEEE80211_CHAN_PUREG,	/* IEEE80211_MODE_11G */
		IEEE80211_CHAN_FHSS,	/* IEEE80211_MODE_FH */
		IEEE80211_CHAN_T,	/* IEEE80211_MODE_TURBO	*/
	};
	struct ieee80211_channel *c;
	u_int modeflags;
	int i;

	/* validate new mode */
	if ((ic->ic_modecaps & (1<<mode)) == 0) {
		IEEE80211_DPRINTF(("%s: mode %u not supported (caps 0x%x)\n",
			__func__, mode, ic->ic_modecaps));
		return EINVAL;
	}

	/*
	 * Verify at least one channel is present in the available
	 * channel list before committing to the new mode.
	 */
	IASSERT(mode < N(chanflags), ("Unexpected mode %u\n", mode));
	modeflags = chanflags[mode];
	for (i = 0; i <= IEEE80211_CHAN_MAX; i++) {
		c = &ic->ic_channels[i];
		if (mode == IEEE80211_MODE_AUTO) {
			/* ignore turbo channels for autoselect */
			if ((c->ic_flags &~ IEEE80211_CHAN_TURBO) != 0)
				break;
		} else {
			if ((c->ic_flags & modeflags) == modeflags)
				break;
		}
	}
	if (i > IEEE80211_CHAN_MAX) {
		IEEE80211_DPRINTF(("%s: no channels found for mode %u\n",
			__func__, mode));
		return EINVAL;
	}

	/*
	 * Calculate the active channel set.
	 */
	memset(ic->ic_chan_active, 0, sizeof(ic->ic_chan_active));
	for (i = 0; i <= IEEE80211_CHAN_MAX; i++) {
		c = &ic->ic_channels[i];
		if (mode == IEEE80211_MODE_AUTO) {
			/* take anything but pure turbo channels */
			if ((c->ic_flags &~ IEEE80211_CHAN_TURBO) != 0)
				setbit(ic->ic_chan_active, i);
		} else {
			if ((c->ic_flags & modeflags) == modeflags)
				setbit(ic->ic_chan_active, i);
		}
	}
	/*
	 * If no current/default channel is setup or the current
	 * channel is wrong for the mode then pick the first
	 * available channel from the active list.  This is likely
	 * not the right one.
	 */
	if (ic->ic_ibss_chan == NULL ||
	    isclr(ic->ic_chan_active, ieee80211_chan2ieee(ic, ic->ic_ibss_chan))) {
		for (i = 0; i <= IEEE80211_CHAN_MAX; i++)
			if (isset(ic->ic_chan_active, i)) {
				ic->ic_ibss_chan = &ic->ic_channels[i];
				break;
			}
		IASSERT(ic->ic_ibss_chan != NULL &&
		    isset(ic->ic_chan_active,
			ieee80211_chan2ieee(ic, ic->ic_ibss_chan)),
		    ("Bad IBSS channel %u\n",
		     ieee80211_chan2ieee(ic, ic->ic_ibss_chan)));
	}

	/*
	 * Set/reset state flags that influence beacon contents, etc.
	 *
	 * XXX what if we have stations already associated???
	 * XXX probably not right for autoselect?
	 */
	if (ic->ic_caps & IEEE80211_C_SHPREAMBLE)
		ic->ic_flags |= IEEE80211_F_SHPREAMBLE;
	if (mode == IEEE80211_MODE_11G) {
		if (ic->ic_caps & IEEE80211_C_SHSLOT)
			ic->ic_flags |= IEEE80211_F_SHSLOT;
	} else
		ic->ic_flags &= ~IEEE80211_F_SHSLOT;

	ic->ic_curmode = mode;
	return 0;
#undef N
}
Exemple #16
0
/*
 * Set the current phy mode and recalculate the active channel
 * set based on the available channels for this mode.  Also
 * select a new default/current channel if the current one is
 * inappropriate for this mode.
 */
int
ieee80211_setmode(struct ieee80211com *ic, enum ieee80211_phymode mode)
{
#define	N(a)	(sizeof(a) / sizeof(a[0]))
	static const u_int chanflags[] = {
		0,			/* IEEE80211_MODE_AUTO */
		IEEE80211_CHAN_A,	/* IEEE80211_MODE_11A */
		IEEE80211_CHAN_B,	/* IEEE80211_MODE_11B */
		IEEE80211_CHAN_PUREG,	/* IEEE80211_MODE_11G */
		IEEE80211_CHAN_FHSS,	/* IEEE80211_MODE_FH */
		IEEE80211_CHAN_T,	/* IEEE80211_MODE_TURBO_A */
		IEEE80211_CHAN_108G,	/* IEEE80211_MODE_TURBO_G */
	};
	struct ieee80211_channel *c;
	u_int modeflags;
	int i;

	/* validate new mode */
	if ((ic->ic_modecaps & (1<<mode)) == 0) {
		IEEE80211_DPRINTF(ic, IEEE80211_MSG_ANY,
			"%s: mode %u not supported (caps 0x%x)\n",
			__func__, mode, ic->ic_modecaps);
		return EINVAL;
	}

	/*
	 * Verify at least one channel is present in the available
	 * channel list before committing to the new mode.
	 */
	IASSERT(mode < N(chanflags), ("Unexpected mode %u", mode));
	modeflags = chanflags[mode];
	for (i = 0; i <= IEEE80211_CHAN_MAX; i++) {
		c = &ic->ic_channels[i];
		if (c->ic_flags == 0)
			continue;
		if (mode == IEEE80211_MODE_AUTO) {
			/* ignore turbo channels for autoselect */
			if ((c->ic_flags & IEEE80211_CHAN_TURBO) == 0)
				break;
		} else {
			if ((c->ic_flags & modeflags) == modeflags)
				break;
		}
	}
	if (i > IEEE80211_CHAN_MAX) {
		IEEE80211_DPRINTF(ic, IEEE80211_MSG_ANY,
			"%s: no channels found for mode %u\n", __func__, mode);
		return EINVAL;
	}

	/*
	 * Calculate the active channel set.
	 */
	memset(ic->ic_chan_active, 0, sizeof(ic->ic_chan_active));
	for (i = 0; i <= IEEE80211_CHAN_MAX; i++) {
		c = &ic->ic_channels[i];
		if (c->ic_flags == 0)
			continue;
		if (mode == IEEE80211_MODE_AUTO) {
			/* take anything but pure turbo channels */
			if ((c->ic_flags & IEEE80211_CHAN_TURBO) == 0)
				setbit(ic->ic_chan_active, i);
		} else {
			if ((c->ic_flags & modeflags) == modeflags)
				setbit(ic->ic_chan_active, i);
		}
	}
	/*
	 * If no current/default channel is setup or the current
	 * channel is wrong for the mode then pick the first
	 * available channel from the active list.  This is likely
	 * not the right one.
	 */
	if (ic->ic_ibss_chan == NULL ||
	    isclr(ic->ic_chan_active, ieee80211_chan2ieee(ic, ic->ic_ibss_chan))) {
		for (i = 0; i <= IEEE80211_CHAN_MAX; i++)
			if (isset(ic->ic_chan_active, i)) {
				ic->ic_ibss_chan = &ic->ic_channels[i];
				break;
			}
		IASSERT(ic->ic_ibss_chan != NULL &&
		    isset(ic->ic_chan_active,
			ieee80211_chan2ieee(ic, ic->ic_ibss_chan)),
		    ("Bad IBSS channel %u",
		     ieee80211_chan2ieee(ic, ic->ic_ibss_chan)));
	}
	/*
	 * If the desired channel is set but no longer valid then reset it.
	 */
	if (ic->ic_des_chan != IEEE80211_CHAN_ANYC &&
	    isclr(ic->ic_chan_active, ieee80211_chan2ieee(ic, ic->ic_des_chan)))
		ic->ic_des_chan = IEEE80211_CHAN_ANYC;

	/*
	 * Do mode-specific rate setup.
	 */
	if (mode == IEEE80211_MODE_11G) {
		/*
		 * Use a mixed 11b/11g rate set.
		 */
		ieee80211_set11gbasicrates(&ic->ic_sup_rates[mode],
			IEEE80211_MODE_11G);
	} else if (mode == IEEE80211_MODE_11B) {
		/*
		 * Force pure 11b rate set.
		 */
		ieee80211_set11gbasicrates(&ic->ic_sup_rates[mode],
			IEEE80211_MODE_11B);
	}
	/*
	 * Setup an initial rate set according to the
	 * current/default channel selected above.  This
	 * will be changed when scanning but must exist
	 * now so driver have a consistent state of ic_ibss_chan.
	 */
	if (ic->ic_bss)		/* NB: can be called before lateattach */
		ic->ic_bss->ni_rates = ic->ic_sup_rates[mode];

	ic->ic_curmode = mode;
	ieee80211_reset_erp(ic);	/* reset ERP state */
	ieee80211_wme_initparams(ic);	/* reset WME stat */

	return 0;
#undef N
}
Exemple #17
0
/*
 * Complete a scan of potential channels.
 */
void
ieee80211_end_scan(struct ifnet *ifp)
{
	struct ieee80211com *ic = (void *)ifp;
	struct ieee80211_node *ni, *nextbs, *selbs;

	if (ifp->if_flags & IFF_DEBUG)
		printf("%s: end %s scan\n", ifp->if_xname,
			(ic->ic_flags & IEEE80211_F_ASCAN) ?
				"active" : "passive");

	if (ic->ic_scan_count)
		ic->ic_flags &= ~IEEE80211_F_ASCAN;

	ni = RB_MIN(ieee80211_tree, &ic->ic_tree);

#ifndef IEEE80211_STA_ONLY
	if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
		/* XXX off stack? */
		u_char occupied[howmany(IEEE80211_CHAN_MAX, NBBY)];
		int i, fail;

		/*
		 * The passive scan to look for existing AP's completed,
		 * select a channel to camp on.  Identify the channels
		 * that already have one or more AP's and try to locate
		 * an unnoccupied one.  If that fails, pick a random
		 * channel from the active set.
		 */
		memset(occupied, 0, sizeof(occupied));
		RB_FOREACH(ni, ieee80211_tree, &ic->ic_tree)
			setbit(occupied, ieee80211_chan2ieee(ic, ni->ni_chan));
		for (i = 0; i < IEEE80211_CHAN_MAX; i++)
			if (isset(ic->ic_chan_active, i) && isclr(occupied, i))
				break;
		if (i == IEEE80211_CHAN_MAX) {
			fail = arc4random() & 3;	/* random 0-3 */
			for (i = 0; i < IEEE80211_CHAN_MAX; i++)
				if (isset(ic->ic_chan_active, i) && fail-- == 0)
					break;
		}
		ieee80211_create_ibss(ic, &ic->ic_channels[i]);
		goto wakeup;
	}
#endif
	if (ni == NULL) {
		DPRINTF(("no scan candidate\n"));
 notfound:

#ifndef IEEE80211_STA_ONLY
		if (ic->ic_opmode == IEEE80211_M_IBSS &&
		    (ic->ic_flags & IEEE80211_F_IBSSON) &&
		    ic->ic_des_esslen != 0) {
			ieee80211_create_ibss(ic, ic->ic_ibss_chan);
			goto wakeup;
		}
#endif
		/*
		 * Scan the next mode if nothing has been found. This
		 * is necessary if the device supports different
		 * incompatible modes in the same channel range, like
		 * like 11b and "pure" 11G mode. This will loop
		 * forever except for user-initiated scans.
		 */
		if (ieee80211_next_mode(ifp) == IEEE80211_MODE_AUTO) {
			if (ic->ic_scan_lock & IEEE80211_SCAN_REQUEST &&
			    ic->ic_scan_lock & IEEE80211_SCAN_RESUME) {
				ic->ic_scan_lock = IEEE80211_SCAN_LOCKED;
				/* Return from an user-initiated scan */
				wakeup(&ic->ic_scan_lock);
			} else if (ic->ic_scan_lock & IEEE80211_SCAN_REQUEST)
				goto wakeup;
			ic->ic_scan_count++;
		}

		/*
		 * Reset the list of channels to scan and start again.
		 */
		ieee80211_next_scan(ifp);
		return;
	}
	selbs = NULL;

	for (; ni != NULL; ni = nextbs) {
		nextbs = RB_NEXT(ieee80211_tree, &ic->ic_tree, ni);
		if (ni->ni_fails) {
			/*
			 * The configuration of the access points may change
			 * during my scan.  So delete the entry for the AP
			 * and retry to associate if there is another beacon.
			 */
			if (ni->ni_fails++ > 2)
				ieee80211_free_node(ic, ni);
			continue;
		}
		if (ieee80211_match_bss(ic, ni) == 0) {
			if (selbs == NULL)
				selbs = ni;
			else if (ni->ni_rssi > selbs->ni_rssi)
				selbs = ni;
		}
	}
	if (selbs == NULL)
		goto notfound;
	(*ic->ic_node_copy)(ic, ic->ic_bss, selbs);
	ni = ic->ic_bss;

	/*
	 * Set the erp state (mostly the slot time) to deal with
	 * the auto-select case; this should be redundant if the
	 * mode is locked.
	 */
	ic->ic_curmode = ieee80211_chan2mode(ic, ni->ni_chan);
	ieee80211_reset_erp(ic);

	if (ic->ic_flags & IEEE80211_F_RSNON)
		ieee80211_choose_rsnparams(ic);
	else if (ic->ic_flags & IEEE80211_F_WEPON)
		ni->ni_rsncipher = IEEE80211_CIPHER_USEGROUP;

	ieee80211_node_newstate(selbs, IEEE80211_STA_BSS);
#ifndef IEEE80211_STA_ONLY
	if (ic->ic_opmode == IEEE80211_M_IBSS) {
		ieee80211_fix_rate(ic, ni, IEEE80211_F_DOFRATE |
		    IEEE80211_F_DONEGO | IEEE80211_F_DODEL);
		if (ni->ni_rates.rs_nrates == 0)
			goto notfound;
		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
	} else
#endif
		ieee80211_new_state(ic, IEEE80211_S_AUTH, -1);

 wakeup:
	if (ic->ic_scan_lock & IEEE80211_SCAN_REQUEST) {
		/* Return from an user-initiated scan */
		wakeup(&ic->ic_scan_lock);
	}

	ic->ic_scan_lock = IEEE80211_SCAN_UNLOCKED;
}
Exemple #18
0
static int32_t
ext2fs_alloccg(struct inode *ip, int cg, int32_t bpref, int size)
{
	struct m_ext2fs *fs;
	char *bbp;
	struct buf *bp;
	int error, bno, start, end, loc;

	fs = ip->i_e2fs;
	if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0)
		return (0);
	error = bread(ip->i_devvp, fsbtodb(fs,
		fs->e2fs_gd[cg].ext2bgd_b_bitmap),
		(int)fs->e2fs_bsize, &bp);
	if (error || fs->e2fs_gd[cg].ext2bgd_nbfree == 0) {
		brelse(bp);
		return (0);
	}
	bbp = (char *)bp->b_data;

	if (dtog(fs, bpref) != cg)
		bpref = 0;
	if (bpref != 0) {
		bpref = dtogd(fs, bpref);
		/*
		 * if the requested block is available, use it
		 */
		if (isclr(bbp, bpref)) {
			bno = bpref;
			goto gotit;
		}
	}
	/*
	 * no blocks in the requested cylinder, so take next
	 * available one in this cylinder group.
	 * first try to get 8 contigous blocks, then fall back to a single
	 * block.
	 */
	if (bpref)
		start = dtogd(fs, bpref) / NBBY;
	else
		start = 0;
	end = howmany(fs->e2fs.e2fs_fpg, NBBY) - start;
	for (loc = start; loc < end; loc++) {
		if (bbp[loc] == 0) {
			bno = loc * NBBY;
			goto gotit;
		}
	}
	for (loc = 0; loc < start; loc++) {
		if (bbp[loc] == 0) {
			bno = loc * NBBY;
			goto gotit;
		}
	}

	bno = ext2fs_mapsearch(fs, bbp, bpref);
	if (bno < 0)
		return (0);
gotit:
#ifdef DIAGNOSTIC
	if (isset(bbp, (long)bno)) {
		printf("ext2fs_alloccgblk: cg=%d bno=%d fs=%s\n",
			cg, bno, fs->e2fs_fsmnt);
		panic("ext2fs_alloccg: dup alloc");
	}
#endif
	setbit(bbp, (long)bno);
	fs->e2fs.e2fs_fbcount--;
	fs->e2fs_gd[cg].ext2bgd_nbfree--;
	fs->e2fs_fmod = 1;
	bdwrite(bp);
	return (cg * fs->e2fs.e2fs_fpg + fs->e2fs.e2fs_first_dblock + bno);
}
Exemple #19
0
/*
 * The kernel thread (one for every active snapshot).
 *
 * After wakeup it cleans the cache and runs the I/O requests.
 */
static void
fss_bs_thread(void *arg)
{
	bool thread_idle, is_valid;
	int error, i, todo, len, crotor, is_read;
	long off;
	char *addr;
	u_int32_t c, cl, ch, *indirp;
	struct buf *bp, *nbp;
	struct fss_softc *sc;
	struct fss_cache *scp, *scl;

	sc = arg;
	scl = sc->sc_cache+sc->sc_cache_size;
	crotor = 0;
	thread_idle = false;

	mutex_enter(&sc->sc_slock);

	for (;;) {
		if (thread_idle)
			cv_wait(&sc->sc_work_cv, &sc->sc_slock);
		thread_idle = true;
		if ((sc->sc_flags & FSS_BS_THREAD) == 0) {
			mutex_exit(&sc->sc_slock);
			kthread_exit(0);
		}

		/*
		 * Process I/O requests (persistent)
		 */

		if (sc->sc_flags & FSS_PERSISTENT) {
			if ((bp = bufq_get(sc->sc_bufq)) == NULL)
				continue;
			is_valid = FSS_ISVALID(sc);
			is_read = (bp->b_flags & B_READ);
			thread_idle = false;
			mutex_exit(&sc->sc_slock);

			if (is_valid) {
				disk_busy(sc->sc_dkdev);
				error = fss_bs_io(sc, FSS_READ, 0,
				    dbtob(bp->b_blkno), bp->b_bcount,
				    bp->b_data);
				disk_unbusy(sc->sc_dkdev,
				    (error ? 0 : bp->b_bcount), is_read);
			} else
				error = ENXIO;

			bp->b_error = error;
			bp->b_resid = (error ? bp->b_bcount : 0);
			biodone(bp);

			mutex_enter(&sc->sc_slock);
			continue;
		}

		/*
		 * Clean the cache
		 */
		for (i = 0; i < sc->sc_cache_size; i++) {
			crotor = (crotor + 1) % sc->sc_cache_size;
			scp = sc->sc_cache + crotor;
			if (scp->fc_type != FSS_CACHE_VALID)
				continue;
			mutex_exit(&sc->sc_slock);

			thread_idle = false;
			indirp = fss_bs_indir(sc, scp->fc_cluster);
			if (indirp != NULL) {
				error = fss_bs_io(sc, FSS_WRITE, sc->sc_clnext,
				    0, FSS_CLSIZE(sc), scp->fc_data);
			} else
				error = EIO;

			mutex_enter(&sc->sc_slock);
			if (error == 0) {
				*indirp = sc->sc_clnext++;
				sc->sc_indir_dirty = 1;
			} else
				fss_error(sc, "write error on backing store");

			scp->fc_type = FSS_CACHE_FREE;
			cv_broadcast(&sc->sc_cache_cv);
			break;
		}

		/*
		 * Process I/O requests
		 */
		if ((bp = bufq_get(sc->sc_bufq)) == NULL)
			continue;
		is_valid = FSS_ISVALID(sc);
		is_read = (bp->b_flags & B_READ);
		thread_idle = false;

		if (!is_valid) {
			mutex_exit(&sc->sc_slock);

			bp->b_error = ENXIO;
			bp->b_resid = bp->b_bcount;
			biodone(bp);

			mutex_enter(&sc->sc_slock);
			continue;
		}

		disk_busy(sc->sc_dkdev);

		/*
		 * First read from the snapshotted block device unless
		 * this request is completely covered by backing store.
		 */

		cl = FSS_BTOCL(sc, dbtob(bp->b_blkno));
		off = FSS_CLOFF(sc, dbtob(bp->b_blkno));
		ch = FSS_BTOCL(sc, dbtob(bp->b_blkno)+bp->b_bcount-1);
		error = 0;
		bp->b_resid = 0;
		bp->b_error = 0;
		for (c = cl; c <= ch; c++) {
			if (isset(sc->sc_copied, c))
				continue;
			mutex_exit(&sc->sc_slock);

			/* Not on backing store, read from device. */
			nbp = getiobuf(NULL, true);
			nbp->b_flags = B_READ;
			nbp->b_resid = nbp->b_bcount = bp->b_bcount;
			nbp->b_bufsize = bp->b_bcount;
			nbp->b_data = bp->b_data;
			nbp->b_blkno = bp->b_blkno;
			nbp->b_lblkno = 0;
			nbp->b_dev = sc->sc_bdev;
			SET(nbp->b_cflags, BC_BUSY);	/* mark buffer busy */

			bdev_strategy(nbp);

			error = biowait(nbp);
			if (error != 0) {
				bp->b_resid = bp->b_bcount;
				bp->b_error = nbp->b_error;
				disk_unbusy(sc->sc_dkdev, 0, is_read);
				biodone(bp);
			}
			putiobuf(nbp);

			mutex_enter(&sc->sc_slock);
			break;
		}
		if (error)
			continue;

		/*
		 * Replace those parts that have been saved to backing store.
		 */

		addr = bp->b_data;
		todo = bp->b_bcount;
		for (c = cl; c <= ch; c++, off = 0, todo -= len, addr += len) {
			len = FSS_CLSIZE(sc)-off;
			if (len > todo)
				len = todo;
			if (isclr(sc->sc_copied, c))
				continue;
			mutex_exit(&sc->sc_slock);

			indirp = fss_bs_indir(sc, c);
			if (indirp == NULL || *indirp == 0) {
				/*
				 * Not on backing store. Either in cache
				 * or hole in the snapshotted block device.
				 */

				mutex_enter(&sc->sc_slock);
				for (scp = sc->sc_cache; scp < scl; scp++)
					if (scp->fc_type == FSS_CACHE_VALID &&
					    scp->fc_cluster == c)
						break;
				if (scp < scl)
					memcpy(addr, (char *)scp->fc_data+off,
					    len);
				else
					memset(addr, 0, len);
				continue;
			}

			/*
			 * Read from backing store.
			 */
			error =
			    fss_bs_io(sc, FSS_READ, *indirp, off, len, addr);

			mutex_enter(&sc->sc_slock);
			if (error) {
				bp->b_resid = bp->b_bcount;
				bp->b_error = error;
				break;
			}
		}
		mutex_exit(&sc->sc_slock);

		disk_unbusy(sc->sc_dkdev, (error ? 0 : bp->b_bcount), is_read);
		biodone(bp);

		mutex_enter(&sc->sc_slock);
	}
}
Exemple #20
0
/*
 * Set the current phy mode and recalculate the active channel
 * set based on the available channels for this mode.  Also
 * select a new default/current channel if the current one is
 * inappropriate for this mode.
 */
int
ieee80211_setmode(struct ieee80211com *ic, enum ieee80211_phymode mode)
{
#define	N(a)	(sizeof(a) / sizeof(a[0]))
	struct ifnet *ifp = &ic->ic_if;
	static const u_int chanflags[] = {
		0,			/* IEEE80211_MODE_AUTO */
		IEEE80211_CHAN_A,	/* IEEE80211_MODE_11A */
		IEEE80211_CHAN_B,	/* IEEE80211_MODE_11B */
		IEEE80211_CHAN_PUREG,	/* IEEE80211_MODE_11G */
		IEEE80211_CHAN_T,	/* IEEE80211_MODE_TURBO	*/
	};
	const struct ieee80211_channel *c;
	u_int modeflags;
	int i;

	/* validate new mode */
	if ((ic->ic_modecaps & (1<<mode)) == 0) {
		DPRINTF(("mode %u not supported (caps 0x%x)\n",
		    mode, ic->ic_modecaps));
		return EINVAL;
	}

	/*
	 * Verify at least one channel is present in the available
	 * channel list before committing to the new mode.
	 */
	if (mode >= N(chanflags))
		panic("Unexpected mode %u", mode);
	modeflags = chanflags[mode];
	for (i = 0; i <= IEEE80211_CHAN_MAX; i++) {
		c = &ic->ic_channels[i];
		if (mode == IEEE80211_MODE_AUTO) {
			/* ignore turbo channels for autoselect */
			if ((c->ic_flags &~ IEEE80211_CHAN_TURBO) != 0)
				break;
		} else {
			if ((c->ic_flags & modeflags) == modeflags)
				break;
		}
	}
	if (i > IEEE80211_CHAN_MAX) {
		DPRINTF(("no channels found for mode %u\n", mode));
		return EINVAL;
	}

	/*
	 * Calculate the active channel set.
	 */
	memset(ic->ic_chan_active, 0, sizeof(ic->ic_chan_active));
	for (i = 0; i <= IEEE80211_CHAN_MAX; i++) {
		c = &ic->ic_channels[i];
		if (mode == IEEE80211_MODE_AUTO) {
			/* take anything but pure turbo channels */
			if ((c->ic_flags &~ IEEE80211_CHAN_TURBO) != 0)
				setbit(ic->ic_chan_active, i);
		} else {
			if ((c->ic_flags & modeflags) == modeflags)
				setbit(ic->ic_chan_active, i);
		}
	}
	/*
	 * If no current/default channel is setup or the current
	 * channel is wrong for the mode then pick the first
	 * available channel from the active list.  This is likely
	 * not the right one.
	 */
	if (ic->ic_ibss_chan == NULL || isclr(ic->ic_chan_active,
	    ieee80211_chan2ieee(ic, ic->ic_ibss_chan))) {
		for (i = 0; i <= IEEE80211_CHAN_MAX; i++)
			if (isset(ic->ic_chan_active, i)) {
				ic->ic_ibss_chan = &ic->ic_channels[i];
				break;
			}
		if ((ic->ic_ibss_chan == NULL) || isclr(ic->ic_chan_active,
		    ieee80211_chan2ieee(ic, ic->ic_ibss_chan)))
			panic("Bad IBSS channel %u",
			    ieee80211_chan2ieee(ic, ic->ic_ibss_chan));
	}

	/*
	 * Reset the scan state for the new mode. This avoids scanning
	 * of invalid channels, ie. 5GHz channels in 11b mode.
	 */
	ieee80211_reset_scan(ifp);

	ic->ic_curmode = mode;
	ieee80211_reset_erp(ic);	/* reset ERP state */

	return 0;
#undef N
}
Exemple #21
0
/*
 * Determine whether a block can be allocated.
 *
 * Check to see if a block of the appropriate size is available,
 * and if it is, allocate it.
 */
static daddr_t
ext2_alloccg(struct inode *ip, int cg, daddr_t bpref, int size)
{
    struct m_ext2fs *fs;
    struct buf *bp;
    struct ext2mount *ump;
    daddr_t bno, runstart, runlen;
    int bit, loc, end, error, start;
    char *bbp;
    /* XXX ondisk32 */
    fs = ip->i_e2fs;
    ump = ip->i_ump;
    if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0)
        return (0);
    EXT2_UNLOCK(ump);
    error = bread(ip->i_devvp, fsbtodb(fs,
                                       fs->e2fs_gd[cg].ext2bgd_b_bitmap),
                  (int)fs->e2fs_bsize, NOCRED, &bp);
    if (error) {
        brelse(bp);
        EXT2_LOCK(ump);
        return (0);
    }
    if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0) {
        /*
         * Another thread allocated the last block in this
         * group while we were waiting for the buffer.
         */
        brelse(bp);
        EXT2_LOCK(ump);
        return (0);
    }
    bbp = (char *)bp->b_data;

    if (dtog(fs, bpref) != cg)
        bpref = 0;
    if (bpref != 0) {
        bpref = dtogd(fs, bpref);
        /*
         * if the requested block is available, use it
         */
        if (isclr(bbp, bpref)) {
            bno = bpref;
            goto gotit;
        }
    }
    /*
     * no blocks in the requested cylinder, so take next
     * available one in this cylinder group.
     * first try to get 8 contigous blocks, then fall back to a single
     * block.
     */
    if (bpref)
        start = dtogd(fs, bpref) / NBBY;
    else
        start = 0;
    end = howmany(fs->e2fs->e2fs_fpg, NBBY) - start;
retry:
    runlen = 0;
    runstart = 0;
    for (loc = start; loc < end; loc++) {
        if (bbp[loc] == (char)0xff) {
            runlen = 0;
            continue;
        }

        /* Start of a run, find the number of high clear bits. */
        if (runlen == 0) {
            bit = fls(bbp[loc]);
            runlen = NBBY - bit;
            runstart = loc * NBBY + bit;
        } else if (bbp[loc] == 0) {
            /* Continue a run. */
            runlen += NBBY;
        } else {
            /*
             * Finish the current run.  If it isn't long
             * enough, start a new one.
             */
            bit = ffs(bbp[loc]) - 1;
            runlen += bit;
            if (runlen >= 8) {
                bno = runstart;
                goto gotit;
            }

            /* Run was too short, start a new one. */
            bit = fls(bbp[loc]);
            runlen = NBBY - bit;
            runstart = loc * NBBY + bit;
        }

        /* If the current run is long enough, use it. */
        if (runlen >= 8) {
            bno = runstart;
            goto gotit;
        }
    }
    if (start != 0) {
        end = start;
        start = 0;
        goto retry;
    }

    bno = ext2_mapsearch(fs, bbp, bpref);
    if (bno < 0) {
        brelse(bp);
        EXT2_LOCK(ump);
        return (0);
    }
gotit:
#ifdef INVARIANTS
    if (isset(bbp, bno)) {
        printf("ext2fs_alloccgblk: cg=%d bno=%jd fs=%s\n",
               cg, (intmax_t)bno, fs->e2fs_fsmnt);
        panic("ext2fs_alloccg: dup alloc");
    }
#endif
    setbit(bbp, bno);
    EXT2_LOCK(ump);
    ext2_clusteracct(fs, bbp, cg, bno, -1);
    fs->e2fs->e2fs_fbcount--;
    fs->e2fs_gd[cg].ext2bgd_nbfree--;
    fs->e2fs_fmod = 1;
    EXT2_UNLOCK(ump);
    bdwrite(bp);
    return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno);
}
int
ieee80211_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
	struct ieee80211com *ic = (void *)ifp;
	struct ifreq *ifr = (struct ifreq *)data;
	int i, error = 0;
	struct ieee80211_nwid nwid;
	struct ieee80211_wpapsk *psk;
	struct ieee80211_wmmparams *wmm;
	struct ieee80211_power *power;
	struct ieee80211_bssid *bssid;
	struct ieee80211chanreq *chanreq;
	struct ieee80211_channel *chan;
	struct ieee80211_txpower *txpower;
	static const u_int8_t empty_macaddr[IEEE80211_ADDR_LEN] = {
		0x00, 0x00, 0x00, 0x00, 0x00, 0x00
	};
	struct ieee80211_nodereq *nr, nrbuf;
	struct ieee80211_nodereq_all *na;
	struct ieee80211_node *ni;
	u_int32_t flags;

	switch (cmd) {
	case SIOCSIFADDR:
	case SIOCGIFADDR:
		error = ether_ioctl(ifp, &ic->ic_ac, cmd, data);
		break;
	case SIOCSIFMEDIA:
	case SIOCGIFMEDIA:
		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
		break;
	case SIOCS80211NWID:
		if ((error = suser(curproc, 0)) != 0)
			break;
		if ((error = copyin(ifr->ifr_data, &nwid, sizeof(nwid))) != 0)
			break;
		if (nwid.i_len > IEEE80211_NWID_LEN) {
			error = EINVAL;
			break;
		}
		memset(ic->ic_des_essid, 0, IEEE80211_NWID_LEN);
		ic->ic_des_esslen = nwid.i_len;
		memcpy(ic->ic_des_essid, nwid.i_nwid, nwid.i_len);
		error = ENETRESET;
		break;
	case SIOCG80211NWID:
		memset(&nwid, 0, sizeof(nwid));
		switch (ic->ic_state) {
		case IEEE80211_S_INIT:
		case IEEE80211_S_SCAN:
			nwid.i_len = ic->ic_des_esslen;
			memcpy(nwid.i_nwid, ic->ic_des_essid, nwid.i_len);
			break;
		default:
			nwid.i_len = ic->ic_bss->ni_esslen;
			memcpy(nwid.i_nwid, ic->ic_bss->ni_essid, nwid.i_len);
			break;
		}
		error = copyout(&nwid, ifr->ifr_data, sizeof(nwid));
		break;
	case SIOCS80211NWKEY:
		if ((error = suser(curproc, 0)) != 0)
			break;
		error = ieee80211_ioctl_setnwkeys(ic, (void *)data);
		break;
	case SIOCG80211NWKEY:
		error = ieee80211_ioctl_getnwkeys(ic, (void *)data);
		break;
	case SIOCS80211WMMPARMS:
		if ((error = suser(curproc, 0)) != 0)
			break;
		if (!(ic->ic_flags & IEEE80211_C_QOS)) {
			error = ENODEV;
			break;
		}
		wmm = (struct ieee80211_wmmparams *)data;
		if (wmm->i_enabled)
			ic->ic_flags |= IEEE80211_F_QOS;
		else
			ic->ic_flags &= ~IEEE80211_F_QOS;
		error = ENETRESET;
		break;
	case SIOCG80211WMMPARMS:
		wmm = (struct ieee80211_wmmparams *)data;
		wmm->i_enabled = (ic->ic_flags & IEEE80211_F_QOS) ? 1 : 0;
		break;
	case SIOCS80211WPAPARMS:
		if ((error = suser(curproc, 0)) != 0)
			break;
		error = ieee80211_ioctl_setwpaparms(ic, (void *)data);
		break;
	case SIOCG80211WPAPARMS:
		error = ieee80211_ioctl_getwpaparms(ic, (void *)data);
		break;
	case SIOCS80211WPAPSK:
		if ((error = suser(curproc, 0)) != 0)
			break;
		psk = (struct ieee80211_wpapsk *)data;
		if (psk->i_enabled) {
			ic->ic_flags |= IEEE80211_F_PSK;
			memcpy(ic->ic_psk, psk->i_psk, sizeof(ic->ic_psk));
		} else {
			ic->ic_flags &= ~IEEE80211_F_PSK;
			memset(ic->ic_psk, 0, sizeof(ic->ic_psk));
		}
		error = ENETRESET;
		break;
	case SIOCG80211WPAPSK:
		psk = (struct ieee80211_wpapsk *)data;
		if (ic->ic_flags & IEEE80211_F_PSK) {
			psk->i_enabled = 1;
			/* do not show any keys to non-root user */
			if (suser(curproc, 0) != 0) {
				psk->i_enabled = 2;
				memset(psk->i_psk, 0, sizeof(psk->i_psk));
				break;	/* return ok but w/o key */
			}
			memcpy(psk->i_psk, ic->ic_psk, sizeof(psk->i_psk));
		} else
			psk->i_enabled = 0;
		break;
	case SIOCS80211POWER:
		if ((error = suser(curproc, 0)) != 0)
			break;
		power = (struct ieee80211_power *)data;
		ic->ic_lintval = power->i_maxsleep;
		if (power->i_enabled != 0) {
			if ((ic->ic_caps & IEEE80211_C_PMGT) == 0)
				error = EINVAL;
			else if ((ic->ic_flags & IEEE80211_F_PMGTON) == 0) {
				ic->ic_flags |= IEEE80211_F_PMGTON;
				error = ENETRESET;
			}
		} else {
			if (ic->ic_flags & IEEE80211_F_PMGTON) {
				ic->ic_flags &= ~IEEE80211_F_PMGTON;
				error = ENETRESET;
			}
		}
		break;
	case SIOCG80211POWER:
		power = (struct ieee80211_power *)data;
		power->i_enabled = (ic->ic_flags & IEEE80211_F_PMGTON) ? 1 : 0;
		power->i_maxsleep = ic->ic_lintval;
		break;
	case SIOCS80211BSSID:
		if ((error = suser(curproc, 0)) != 0)
			break;
		bssid = (struct ieee80211_bssid *)data;
		if (IEEE80211_ADDR_EQ(bssid->i_bssid, empty_macaddr))
			ic->ic_flags &= ~IEEE80211_F_DESBSSID;
		else {
			ic->ic_flags |= IEEE80211_F_DESBSSID;
			IEEE80211_ADDR_COPY(ic->ic_des_bssid, bssid->i_bssid);
		}
		if (ic->ic_opmode == IEEE80211_M_HOSTAP)
			break;
		switch (ic->ic_state) {
		case IEEE80211_S_INIT:
		case IEEE80211_S_SCAN:
			error = ENETRESET;
			break;
		default:
			if ((ic->ic_flags & IEEE80211_F_DESBSSID) &&
			    !IEEE80211_ADDR_EQ(ic->ic_des_bssid,
			    ic->ic_bss->ni_bssid))
				error = ENETRESET;
			break;
		}
		break;
	case SIOCG80211BSSID:
		bssid = (struct ieee80211_bssid *)data;
		switch (ic->ic_state) {
		case IEEE80211_S_INIT:
		case IEEE80211_S_SCAN:
			if (ic->ic_opmode == IEEE80211_M_HOSTAP)
				IEEE80211_ADDR_COPY(bssid->i_bssid,
				    ic->ic_myaddr);
			else if (ic->ic_flags & IEEE80211_F_DESBSSID)
				IEEE80211_ADDR_COPY(bssid->i_bssid,
				    ic->ic_des_bssid);
			else
				memset(bssid->i_bssid, 0, IEEE80211_ADDR_LEN);
			break;
		default:
			IEEE80211_ADDR_COPY(bssid->i_bssid,
			    ic->ic_bss->ni_bssid);
			break;
		}
		break;
	case SIOCS80211CHANNEL:
		if ((error = suser(curproc, 0)) != 0)
			break;
		chanreq = (struct ieee80211chanreq *)data;
		if (chanreq->i_channel == IEEE80211_CHAN_ANY)
			ic->ic_des_chan = IEEE80211_CHAN_ANYC;
		else if (chanreq->i_channel > IEEE80211_CHAN_MAX ||
		    isclr(ic->ic_chan_active, chanreq->i_channel)) {
			error = EINVAL;
			break;
		} else
			ic->ic_ibss_chan = ic->ic_des_chan =
			    &ic->ic_channels[chanreq->i_channel];
		switch (ic->ic_state) {
		case IEEE80211_S_INIT:
		case IEEE80211_S_SCAN:
			error = ENETRESET;
			break;
		default:
			if (ic->ic_opmode == IEEE80211_M_STA) {
				if (ic->ic_des_chan != IEEE80211_CHAN_ANYC &&
				    ic->ic_bss->ni_chan != ic->ic_des_chan)
					error = ENETRESET;
			} else {
				if (ic->ic_bss->ni_chan != ic->ic_ibss_chan)
					error = ENETRESET;
			}
			break;
		}
		break;
	case SIOCG80211CHANNEL:
		chanreq = (struct ieee80211chanreq *)data;
		switch (ic->ic_state) {
		case IEEE80211_S_INIT:
		case IEEE80211_S_SCAN:
			if (ic->ic_opmode == IEEE80211_M_STA)
				chan = ic->ic_des_chan;
			else
				chan = ic->ic_ibss_chan;
			break;
		default:
			chan = ic->ic_bss->ni_chan;
			break;
		}
		chanreq->i_channel = ieee80211_chan2ieee(ic, chan);
		break;
#if 0
	case SIOCG80211ZSTATS:
#endif
	case SIOCG80211STATS:
		ifr = (struct ifreq *)data;
		copyout(&ic->ic_stats, ifr->ifr_data, sizeof (ic->ic_stats));
#if 0
		if (cmd == SIOCG80211ZSTATS)
			memset(&ic->ic_stats, 0, sizeof(ic->ic_stats));
#endif
		break;
	case SIOCS80211TXPOWER:
		if ((error = suser(curproc, 0)) != 0)
			break;
		txpower = (struct ieee80211_txpower *)data;
		if ((ic->ic_caps & IEEE80211_C_TXPMGT) == 0) {
			error = EINVAL;
			break;
		}
		if (IEEE80211_TXPOWER_MIN > txpower->i_val ||
		    txpower->i_val > IEEE80211_TXPOWER_MAX) {
			error = EINVAL;
			break;
		}
		ic->ic_txpower = txpower->i_val;
		error = ENETRESET;
		break;
	case SIOCG80211TXPOWER:
		txpower = (struct ieee80211_txpower *)data;
		if ((ic->ic_caps & IEEE80211_C_TXPMGT) == 0)
			error = EINVAL;
		else
			txpower->i_val = ic->ic_txpower;
		break;
	case SIOCSIFMTU:
		ifr = (struct ifreq *)data;
		if (!(IEEE80211_MTU_MIN <= ifr->ifr_mtu &&
		    ifr->ifr_mtu <= IEEE80211_MTU_MAX))
			error = EINVAL;
		else
			ifp->if_mtu = ifr->ifr_mtu;
		break;
	case SIOCS80211SCAN:
		if ((error = suser(curproc, 0)) != 0)
			break;
		if (ic->ic_opmode == IEEE80211_M_HOSTAP)
			break;
		if ((ifp->if_flags & IFF_UP) == 0) {
			error = ENETDOWN;
			break;
		}
		if ((ic->ic_scan_lock & IEEE80211_SCAN_REQUEST) == 0) {
			if (ic->ic_scan_lock & IEEE80211_SCAN_LOCKED)
				ic->ic_scan_lock |= IEEE80211_SCAN_RESUME;
			ic->ic_scan_lock |= IEEE80211_SCAN_REQUEST;
			if (ic->ic_state != IEEE80211_S_SCAN)
				ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
		}
		/* Let the userspace process wait for completion */
		error = tsleep(&ic->ic_scan_lock, PCATCH, "80211scan",
		    hz * IEEE80211_SCAN_TIMEOUT);
		break;
	case SIOCG80211NODE:
		nr = (struct ieee80211_nodereq *)data;
		ni = ieee80211_find_node(ic, nr->nr_macaddr);
		if (ni == NULL) {
			error = ENOENT;
			break;
		}
		ieee80211_node2req(ic, ni, nr);
		break;
	case SIOCS80211NODE:
		if ((error = suser(curproc, 0)) != 0)
			break;
		if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
			error = EINVAL;
			break;
		}
		nr = (struct ieee80211_nodereq *)data;

		ni = ieee80211_find_node(ic, nr->nr_macaddr);
		if (ni == NULL)
			ni = ieee80211_alloc_node(ic, nr->nr_macaddr);
		if (ni == NULL) {
			error = ENOENT;
			break;
		}

		if (nr->nr_flags & IEEE80211_NODEREQ_COPY)
			ieee80211_req2node(ic, nr, ni);
		break;
	case SIOCS80211DELNODE:
		if ((error = suser(curproc, 0)) != 0)
			break;
		nr = (struct ieee80211_nodereq *)data;
		ni = ieee80211_find_node(ic, nr->nr_macaddr);
		if (ni == NULL)
			error = ENOENT;
		else if (ni == ic->ic_bss)
			error = EPERM;
		else {
			if (ni->ni_state == IEEE80211_STA_COLLECT)
				break;

			/* Disassociate station. */
			if (ni->ni_state == IEEE80211_STA_ASSOC)
				IEEE80211_SEND_MGMT(ic, ni,
				    IEEE80211_FC0_SUBTYPE_DISASSOC,
				    IEEE80211_REASON_ASSOC_LEAVE);

			/* Deauth station. */
			if (ni->ni_state >= IEEE80211_STA_AUTH)
				IEEE80211_SEND_MGMT(ic, ni,
				    IEEE80211_FC0_SUBTYPE_DEAUTH,
				    IEEE80211_REASON_AUTH_LEAVE);

			ieee80211_release_node(ic, ni);
		}
		break;
	case SIOCG80211ALLNODES:
		na = (struct ieee80211_nodereq_all *)data;
		na->na_nodes = i = 0;
		ni = RB_MIN(ieee80211_tree, &ic->ic_tree);
		while (ni && na->na_size >=
		    i + sizeof(struct ieee80211_nodereq)) {
			ieee80211_node2req(ic, ni, &nrbuf);
			error = copyout(&nrbuf, (caddr_t)na->na_node + i,
			    sizeof(struct ieee80211_nodereq));
			if (error)
				break;
			i += sizeof(struct ieee80211_nodereq);
			na->na_nodes++;
			ni = RB_NEXT(ieee80211_tree, &ic->ic_tree, ni);
		}
		break;
	case SIOCG80211FLAGS:
		flags = ic->ic_flags;
		if (ic->ic_opmode != IEEE80211_M_HOSTAP)
			flags &= ~IEEE80211_F_HOSTAPMASK;
		ifr->ifr_flags = flags >> IEEE80211_F_USERSHIFT;
		break;
	case SIOCS80211FLAGS:
		if ((error = suser(curproc, 0)) != 0)
			break;
		flags = (u_int32_t)ifr->ifr_flags << IEEE80211_F_USERSHIFT;
		if (ic->ic_opmode != IEEE80211_M_HOSTAP &&
		    (flags & IEEE80211_F_HOSTAPMASK)) {
			error = EINVAL;
			break;
		}
		ic->ic_flags = (ic->ic_flags & ~IEEE80211_F_USERMASK) | flags;
		error = ENETRESET;
		break;
	default:
		error = ENOTTY;
		break;
	}
	return error;
}
Exemple #23
0
int
ieee80211_match_bss(struct ieee80211com *ic, struct ieee80211_node *ni)
{
	u_int8_t rate;
	int fail;

	fail = 0;
	if (isclr(ic->ic_chan_active, ieee80211_chan2ieee(ic, ni->ni_chan)))
		fail |= 0x01;
	if (ic->ic_des_chan != IEEE80211_CHAN_ANYC &&
	    ni->ni_chan != ic->ic_des_chan)
		fail |= 0x01;
#ifndef IEEE80211_STA_ONLY
	if (ic->ic_opmode == IEEE80211_M_IBSS) {
		if ((ni->ni_capinfo & IEEE80211_CAPINFO_IBSS) == 0)
			fail |= 0x02;
	} else
#endif
	{
		if ((ni->ni_capinfo & IEEE80211_CAPINFO_ESS) == 0)
			fail |= 0x02;
	}
	if (ic->ic_flags & (IEEE80211_F_WEPON | IEEE80211_F_RSNON)) {
		if ((ni->ni_capinfo & IEEE80211_CAPINFO_PRIVACY) == 0)
			fail |= 0x04;
	} else {
		if (ni->ni_capinfo & IEEE80211_CAPINFO_PRIVACY)
			fail |= 0x04;
	}

	rate = ieee80211_fix_rate(ic, ni, IEEE80211_F_DONEGO);
	if (rate & IEEE80211_RATE_BASIC)
		fail |= 0x08;
	if (ic->ic_des_esslen != 0 &&
	    (ni->ni_esslen != ic->ic_des_esslen ||
	     memcmp(ni->ni_essid, ic->ic_des_essid, ic->ic_des_esslen) != 0))
		fail |= 0x10;
	if ((ic->ic_flags & IEEE80211_F_DESBSSID) &&
	    !IEEE80211_ADDR_EQ(ic->ic_des_bssid, ni->ni_bssid))
		fail |= 0x20;

	if (ic->ic_flags & IEEE80211_F_RSNON) {
		/*
		 * If at least one RSN IE field from the AP's RSN IE fails
		 * to overlap with any value the STA supports, the STA shall
		 * decline to associate with that AP.
		 */
		if ((ni->ni_rsnprotos & ic->ic_rsnprotos) == 0)
			fail |= 0x40;
		if ((ni->ni_rsnakms & ic->ic_rsnakms) == 0)
			fail |= 0x40;
		if ((ni->ni_rsnakms & ic->ic_rsnakms &
		     ~(IEEE80211_AKM_PSK | IEEE80211_AKM_SHA256_PSK)) == 0) {
			/* AP only supports PSK AKMPs */
			if (!(ic->ic_flags & IEEE80211_F_PSK))
				fail |= 0x40;
		}
		if (ni->ni_rsngroupcipher != IEEE80211_CIPHER_WEP40 &&
		    ni->ni_rsngroupcipher != IEEE80211_CIPHER_TKIP &&
		    ni->ni_rsngroupcipher != IEEE80211_CIPHER_CCMP &&
		    ni->ni_rsngroupcipher != IEEE80211_CIPHER_WEP104)
			fail |= 0x40;
		if ((ni->ni_rsnciphers & ic->ic_rsnciphers) == 0)
			fail |= 0x40;

		/* we only support BIP as the IGTK cipher */
		if ((ni->ni_rsncaps & IEEE80211_RSNCAP_MFPC) &&
		    ni->ni_rsngroupmgmtcipher != IEEE80211_CIPHER_BIP)
			fail |= 0x40;

		/* we do not support MFP but AP requires it */
		if (!(ic->ic_caps & IEEE80211_C_MFP) &&
		    (ni->ni_rsncaps & IEEE80211_RSNCAP_MFPR))
			fail |= 0x40;

		/* we require MFP but AP does not support it */
		if ((ic->ic_caps & IEEE80211_C_MFP) &&
		    (ic->ic_flags & IEEE80211_F_MFPR) &&
		    !(ni->ni_rsncaps & IEEE80211_RSNCAP_MFPC))
			fail |= 0x40;
	}

#ifdef IEEE80211_DEBUG
	if (ic->ic_if.if_flags & IFF_DEBUG) {
		printf(" %c %s", fail ? '-' : '+',
		    ether_sprintf(ni->ni_macaddr));
		printf(" %s%c", ether_sprintf(ni->ni_bssid),
		    fail & 0x20 ? '!' : ' ');
		printf(" %3d%c", ieee80211_chan2ieee(ic, ni->ni_chan),
			fail & 0x01 ? '!' : ' ');
		printf(" %+4d", ni->ni_rssi);
		printf(" %2dM%c", (rate & IEEE80211_RATE_VAL) / 2,
		    fail & 0x08 ? '!' : ' ');
		printf(" %4s%c",
		    (ni->ni_capinfo & IEEE80211_CAPINFO_ESS) ? "ess" :
		    (ni->ni_capinfo & IEEE80211_CAPINFO_IBSS) ? "ibss" :
		    "????",
		    fail & 0x02 ? '!' : ' ');
		printf(" %7s%c ",
		    (ni->ni_capinfo & IEEE80211_CAPINFO_PRIVACY) ?
		    "privacy" : "no",
		    fail & 0x04 ? '!' : ' ');
		printf(" %3s%c ",
		    (ic->ic_flags & IEEE80211_F_RSNON) ?
		    "rsn" : "no",
		    fail & 0x40 ? '!' : ' ');
		ieee80211_print_essid(ni->ni_essid, ni->ni_esslen);
		printf("%s\n", fail & 0x10 ? "!" : "");
	}
#endif
	return fail;
}
Exemple #24
0
/*
 * Determine whether a cluster can be allocated.
 */
static daddr_t
ext2_clusteralloc(struct inode *ip, int cg, daddr_t bpref, int len)
{
    struct m_ext2fs *fs;
    struct ext2mount *ump;
    struct buf *bp;
    char *bbp;
    int bit, error, got, i, loc, run;
    int32_t *lp;
    daddr_t bno;

    fs = ip->i_e2fs;
    ump = ip->i_ump;

    if (fs->e2fs_maxcluster[cg] < len)
        return (0);

    EXT2_UNLOCK(ump);
    error = bread(ip->i_devvp,
                  fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap),
                  (int)fs->e2fs_bsize, NOCRED, &bp);
    if (error)
        goto fail_lock;

    bbp = (char *)bp->b_data;
    EXT2_LOCK(ump);
    /*
     * Check to see if a cluster of the needed size (or bigger) is
     * available in this cylinder group.
     */
    lp = &fs->e2fs_clustersum[cg].cs_sum[len];
    for (i = len; i <= fs->e2fs_contigsumsize; i++)
        if (*lp++ > 0)
            break;
    if (i > fs->e2fs_contigsumsize) {
        /*
         * Update the cluster summary information to reflect
         * the true maximum-sized cluster so that future cluster
         * allocation requests can avoid reading the bitmap only
         * to find no cluster.
         */
        lp = &fs->e2fs_clustersum[cg].cs_sum[len - 1];
        for (i = len - 1; i > 0; i--)
            if (*lp-- > 0)
                break;
        fs->e2fs_maxcluster[cg] = i;
        goto fail;
    }
    EXT2_UNLOCK(ump);

    /* Search the bitmap to find a big enough cluster like in FFS. */
    if (dtog(fs, bpref) != cg)
        bpref = 0;
    if (bpref != 0)
        bpref = dtogd(fs, bpref);
    loc = bpref / NBBY;
    bit = 1 << (bpref % NBBY);
    for (run = 0, got = bpref; got < fs->e2fs->e2fs_fpg; got++) {
        if ((bbp[loc] & bit) != 0)
            run = 0;
        else {
            run++;
            if (run == len)
                break;
        }
        if ((got & (NBBY - 1)) != (NBBY - 1))
            bit <<= 1;
        else {
            loc++;
            bit = 1;
        }
    }

    if (got >= fs->e2fs->e2fs_fpg)
        goto fail_lock;

    /* Allocate the cluster that we found. */
    for (i = 1; i < len; i++)
        if (!isclr(bbp, got - run + i))
            panic("ext2_clusteralloc: map mismatch");

    bno = got - run + 1;
    if (bno >= fs->e2fs->e2fs_fpg)
        panic("ext2_clusteralloc: allocated out of group");

    EXT2_LOCK(ump);
    for (i = 0; i < len; i += fs->e2fs_fpb) {
        setbit(bbp, bno + i);
        ext2_clusteracct(fs, bbp, cg, bno + i, -1);
        fs->e2fs->e2fs_fbcount--;
        fs->e2fs_gd[cg].ext2bgd_nbfree--;
    }
    fs->e2fs_fmod = 1;
    EXT2_UNLOCK(ump);

    bdwrite(bp);
    return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno);

fail_lock:
    EXT2_LOCK(ump);
fail:
    brelse(bp);
    return (0);
}
Exemple #25
0
int
ieee80211_match_bss(struct ieee80211com *ic, struct ieee80211_node *ni)
{
	u_int8_t rate;
	int fail;

	fail = 0;
	if (isclr(ic->ic_chan_active, ieee80211_chan2ieee(ic, ni->ni_chan)))
		fail |= 0x01;
	if (ic->ic_des_chan != IEEE80211_CHAN_ANYC &&
	    ni->ni_chan != ic->ic_des_chan)
		fail |= 0x01;
	if (ic->ic_opmode == IEEE80211_M_IBSS) {
		if ((ni->ni_capinfo & IEEE80211_CAPINFO_IBSS) == 0)
			fail |= 0x02;
	} else {
		if ((ni->ni_capinfo & IEEE80211_CAPINFO_ESS) == 0)
			fail |= 0x02;
	}
	if (ic->ic_flags & IEEE80211_F_WEPON) {
		if ((ni->ni_capinfo & IEEE80211_CAPINFO_PRIVACY) == 0)
			fail |= 0x04;
	} else {
		/* XXX does this mean privacy is supported or required? */
		if (ni->ni_capinfo & IEEE80211_CAPINFO_PRIVACY)
			fail |= 0x04;
	}
	rate = ieee80211_fix_rate(ic, ni, IEEE80211_F_DONEGO);
	if (rate & IEEE80211_RATE_BASIC)
		fail |= 0x08;
	if (ic->ic_des_esslen != 0 &&
	    (ni->ni_esslen != ic->ic_des_esslen ||
	     memcmp(ni->ni_essid, ic->ic_des_essid, ic->ic_des_esslen) != 0))
		fail |= 0x10;
	if ((ic->ic_flags & IEEE80211_F_DESBSSID) &&
	    !IEEE80211_ADDR_EQ(ic->ic_des_bssid, ni->ni_bssid))
		fail |= 0x20;
#ifdef IEEE80211_DEBUG
	if (ic->ic_if.if_flags & IFF_DEBUG) {
		printf(" %c %s", fail ? '-' : '+',
		    ether_sprintf(ni->ni_macaddr));
		printf(" %s%c", ether_sprintf(ni->ni_bssid),
		    fail & 0x20 ? '!' : ' ');
		printf(" %3d%c", ieee80211_chan2ieee(ic, ni->ni_chan),
			fail & 0x01 ? '!' : ' ');
		printf(" %+4d", ni->ni_rssi);
		printf(" %2dM%c", (rate & IEEE80211_RATE_VAL) / 2,
		    fail & 0x08 ? '!' : ' ');
		printf(" %4s%c",
		    (ni->ni_capinfo & IEEE80211_CAPINFO_ESS) ? "ess" :
		    (ni->ni_capinfo & IEEE80211_CAPINFO_IBSS) ? "ibss" :
		    "????",
		    fail & 0x02 ? '!' : ' ');
		printf(" %3s%c ",
		    (ni->ni_capinfo & IEEE80211_CAPINFO_PRIVACY) ?
		    "wep" : "no",
		    fail & 0x04 ? '!' : ' ');
		ieee80211_print_essid(ni->ni_essid, ni->ni_esslen);
		printf("%s\n", fail & 0x10 ? "!" : "");
	}
#endif
	return fail;
}
Exemple #26
0
/*
 * Determine whether an inode can be allocated.
 *
 * Check to see if an inode is available, and if it is,
 * allocate it using tode in the specified cylinder group.
 */
static daddr_t
ext2_nodealloccg(struct inode *ip, int cg, daddr_t ipref, int mode)
{
    struct m_ext2fs *fs;
    struct buf *bp;
    struct ext2mount *ump;
    int error, start, len;
    char *ibp, *loc;
    ipref--; /* to avoid a lot of (ipref -1) */
    if (ipref == -1)
        ipref = 0;
    fs = ip->i_e2fs;
    ump = ip->i_ump;
    if (fs->e2fs_gd[cg].ext2bgd_nifree == 0)
        return (0);
    EXT2_UNLOCK(ump);
    error = bread(ip->i_devvp, fsbtodb(fs,
                                       fs->e2fs_gd[cg].ext2bgd_i_bitmap),
                  (int)fs->e2fs_bsize, NOCRED, &bp);
    if (error) {
        brelse(bp);
        EXT2_LOCK(ump);
        return (0);
    }
    if (fs->e2fs_gd[cg].ext2bgd_nifree == 0) {
        /*
         * Another thread allocated the last i-node in this
         * group while we were waiting for the buffer.
         */
        brelse(bp);
        EXT2_LOCK(ump);
        return (0);
    }
    ibp = (char *)bp->b_data;
    if (ipref) {
        ipref %= fs->e2fs->e2fs_ipg;
        if (isclr(ibp, ipref))
            goto gotit;
    }
    start = ipref / NBBY;
    len = howmany(fs->e2fs->e2fs_ipg - ipref, NBBY);
    loc = memcchr(&ibp[start], 0xff, len);
    if (loc == NULL) {
        len = start + 1;
        start = 0;
        loc = memcchr(&ibp[start], 0xff, len);
        if (loc == NULL) {
            printf("cg = %d, ipref = %lld, fs = %s\n",
                   cg, (long long)ipref, fs->e2fs_fsmnt);
            panic("ext2fs_nodealloccg: map corrupted");
            /* NOTREACHED */
        }
    }
    ipref = (loc - ibp) * NBBY + ffs(~*loc) - 1;
gotit:
    setbit(ibp, ipref);
    EXT2_LOCK(ump);
    fs->e2fs_gd[cg].ext2bgd_nifree--;
    fs->e2fs->e2fs_ficount--;
    fs->e2fs_fmod = 1;
    if ((mode & IFMT) == IFDIR) {
        fs->e2fs_gd[cg].ext2bgd_ndirs++;
        fs->e2fs_total_dir++;
    }
    EXT2_UNLOCK(ump);
    bdwrite(bp);
    return (cg * fs->e2fs->e2fs_ipg + ipref +1);
}
Exemple #27
0
/*
 * check_arena -- (internal) perform a consistency check on an arena
 */
static int
check_arena(struct btt *bttp, struct arena *arenap)
{
	LOG(3, "bttp %p arenap %p", bttp, arenap);

	int consistent = 1;

	off_t map_entry_off = arenap->mapoff;
	int bitmapsize = howmany(arenap->internal_nlba, 8);
	char *bitmap = Malloc(bitmapsize);
	if (bitmap == NULL) {
		LOG(1, "!Malloc for bitmap");
		return -1;
	}
	memset(bitmap, '\0', bitmapsize);

	/*
	 * Go through every post-map LBA mentioned in the map and make sure
	 * there are no duplicates.  bitmap is used to track which LBAs have
	 * been seen so far.
	 */
	uint32_t *mapp = NULL;
	int mlen;
	int next_index = 0;
	int remaining = 0;
	for (int i = 0; i < arenap->external_nlba; i++) {
		uint32_t entry;

		if (remaining == 0) {
			/* request a mapping of remaining map area */
			mlen = (*bttp->ns_cbp->nsmap)(bttp->ns,
				0, (void **)&mapp,
				(arenap->external_nlba - i) * sizeof (uint32_t),
				map_entry_off);

			if (mlen < 0)
				return -1;

			remaining = mlen;
			next_index = 0;
		}
		entry = le32toh(mapp[next_index]);

		/* for debug, dump non-zero map entries at log level 11 */
		if ((entry & BTT_MAP_ENTRY_ZERO) == 0)
			LOG(11, "map[%d]: %u%s%s", i,
				entry & BTT_MAP_ENTRY_LBA_MASK,
				(entry & BTT_MAP_ENTRY_ERROR) ? " ERROR" : "",
				(entry & BTT_MAP_ENTRY_ZERO) ? " ZERO" : "");

		entry &= BTT_MAP_ENTRY_LBA_MASK;

		if (isset(bitmap, entry)) {
			LOG(1, "map[%d] duplicate entry: %u", i, entry);
			consistent = 0;
		} else
			setbit(bitmap, entry);

		map_entry_off += sizeof (uint32_t);
		next_index++;
		remaining -= sizeof (uint32_t);
	}

	/*
	 * Go through the free blocks in the flog, adding them to bitmap
	 * and checking for duplications.  It is sufficient to read the
	 * run-time flog here, avoiding more calls to nsread.
	 */
	for (int i = 0; i < bttp->nfree; i++) {
		uint32_t entry = arenap->flogs[i].flog.old_map;
		entry &= BTT_MAP_ENTRY_LBA_MASK;

		if (isset(bitmap, entry)) {
			LOG(1, "flog[%d] duplicate entry: %u", i, entry);
			consistent = 0;
		} else
			setbit(bitmap, entry);
	}

	/*
	 * Make sure every possible post-map LBA was accounted for
	 * in the two loops above.
	 */
	for (int i = 0; i < arenap->internal_nlba; i++)
		if (isclr(bitmap, i)) {
			LOG(1, "unreferenced lba: %u", i);
			consistent = 0;
		}


	Free(bitmap);

	return consistent;
}