TStringID TDynamicTemplateMatcher::AddTemplate(const std::string &templatestr) {
	std::vector<size_t> powerof2(1,1); 
	CheckOnce.addstring(templatestr, 0);
	size_t currentpos = 0; 
	templates.push_back(templatestr);
	template_groups[0].push_back(templates.size() - 1);
	Matchers[0].push_back(TStaticTemplateMatcher());
	Matchers[0].back().AddTemplate(templatestr);
	while (Matchers[currentpos].size() > 1) {
		TStaticTemplateMatcher tmp;
		if (template_groups.size() - 1 == currentpos) {
			template_groups.push_back(std::vector<size_t>());
			Matchers.push_back(std::vector<TStaticTemplateMatcher>());
			
		}
		if (powerof2.size() - 1 == currentpos)
			powerof2.push_back(powerof2.back()*2);
		while (Matchers[currentpos].size() > 0)
			Matchers[currentpos].pop_back();
		while (!template_groups[currentpos].empty()) {
			template_groups[currentpos + 1].push_back(template_groups[currentpos].back());
			tmp.AddTemplate(templates[template_groups[currentpos].back()]);
			template_groups[currentpos].pop_back();
		}
		Matchers[currentpos + 1].push_back(std::move(tmp));
		currentpos++;
	}
	return templates.size() - 1;	
}
Ejemplo n.º 2
0
int
wapbl_getdisksize(struct vnode *vp, uint64_t *numsecp, unsigned int *secsizep)
{
	struct partinfo dpart;
	unsigned int secsize;
	uint64_t numsec;
	int error;

	error = VOP_IOCTL(vp, DIOCGPART, &dpart, FREAD, NOCRED);
	if (error == 0) {
		secsize = dpart.disklab->d_secsize;
		numsec  = dpart.part->p_size;
	}

	if (error == 0 &&
	    (secsize == 0 || secsize > MAXBSIZE || !powerof2(secsize) ||
	     numsec == 0)) {
#ifdef DIAGNOSTIC
		printf("%s: vnode %p returns invalid disksize values"
		    " (secsize = %u, numsec = %llu)\n", __func__, vp,
		    secsize, numsec);
#endif
		error = EINVAL;
	}
	if (error == 0) {
		*secsizep = secsize;
		*numsecp  = numsec;
	}

	return error;
}
Ejemplo n.º 3
0
static struct buf_ring *
buf_ring_alloc_(int count, struct malloc_type *type, int flags, struct mtx *lock, int brflags, int id, int nqs)
{
	struct buf_ring *br;
	int alloc_count;

	KASSERT(powerof2(count), ("buf ring must be size power of 2"));
	alloc_count = (brflags & BR_FLAGS_ALIGNED) ? (count * ALIGN_SCALE) : count; 

	br = malloc(sizeof(struct buf_ring) + alloc_count*sizeof(caddr_t),
	    type, flags|M_ZERO);
	if (br == NULL)
		return (NULL);
	br->br_flags = brflags;
#ifdef DEBUG_BUFRING
	br->br_lock = lock;
#endif	
	br->br_prod_size = br->br_cons_size = count;
	br->br_prod_mask = br->br_cons_mask = count-1;
	br->br_prod_head = br->br_cons_head = 0;
	br->br_prod_tail = br->br_cons_tail = 0;
	br->br_id = id;
	br->br_nqs = nqs;

	return (br);
}
Ejemplo n.º 4
0
/*---------------------------------------------------------------------------*/
struct hg_atomic_queue *
hg_atomic_queue_alloc(unsigned int count)
{
    struct hg_atomic_queue *hg_atomic_queue = NULL;

    if (!powerof2(count)) {
       HG_UTIL_LOG_ERROR("atomic queue size must be power of 2");
       goto done;
    }

    hg_atomic_queue = malloc(sizeof(struct hg_atomic_queue) +
        count * HG_ATOMIC_QUEUE_ELT_SIZE);
    if (!hg_atomic_queue) {
        HG_UTIL_LOG_ERROR("Could not allocate atomic queue");
        goto done;
    }

    hg_atomic_queue->prod_size = hg_atomic_queue->cons_size = count;
    hg_atomic_queue->prod_mask = hg_atomic_queue->cons_mask = count - 1;
    hg_atomic_init32(&hg_atomic_queue->prod_head, 0);
    hg_atomic_init32(&hg_atomic_queue->cons_head, 0);
    hg_atomic_init32(&hg_atomic_queue->prod_tail, 0);
    hg_atomic_init32(&hg_atomic_queue->cons_tail, 0);

done:
    return hg_atomic_queue;
}
Ejemplo n.º 5
0
/*
 * This routine precomputes a lookup table for divisors 1..lim16
 * - table size is stored in item #0 to check for buffer overruns
 */
void
fastu16div16gentab(struct divu16 *duptr, uint32_t lim16)
{
    uint32_t magic = lim16;
    uint32_t info = 0;
    uint32_t div;
    uint32_t val;
    uint32_t shift;

    /* store array size into the first item to avoid buffer overruns */
    duptr->magic = magic;
    duptr->info = info;
    duptr++;
    for (div = 2 ; div <= lim16 ; div++) {
        duptr++;
        lzero32(div, val);
        val -= 16;
        shift = 15 - val;
        if (!powerof2(div)) {
            uint32_t val32;
            uint32_t res32;
            uint32_t rem;
            uint32_t lim;
            uint32_t e;

            lim = UINT32_C(1) << shift;
            val32 = lim;
            val32 <<= 16;
            magic = val32 / div;
            /* elimnated rem = val32 % div */
            res32 = magic;
            res32 *= div;
            val32 -= res32;
            e = div - val32;
            if (e < lim) {
                info = shift;
            } else {
                rem = val32;
                magic <<= 1;
                rem <<= 1;
                info = shift | FASTU32DIVADDBIT;
                if (rem >= div || rem < val32) {
                    magic++;
                }
            }
            magic++;
        } else {
            info = shift;
            magic = 0;
            info |= FASTU32DIVSHIFTBIT;
        }
        duptr->magic = magic;
        duptr->info = info;
    }

    return;
}
Ejemplo n.º 6
0
/* initialize()
 *
 * function used to initialize some data structures of the program
 */
void initialize() {
   //TUNABLE_ULONG_FETCH("net.pf.states_hashsize", &pf_hashsize);
   if (pf_hashsize == 0 || !powerof2(pf_hashsize))
      pf_hashsize = 32768;
   pf_hashmask = pf_hashsize - 1;

   pf_hashseed = arc4random();

   pfnt_hash = (struct pf_nattrack_hash *)calloc(sizeof(struct pf_nattrack_hash), pf_hashsize);
}
Ejemplo n.º 7
0
void
syncache_init(void)
{
	int i;

	tcp_syncache.cache_count = 0;
	tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
	tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT;
	tcp_syncache.cache_limit =
	    tcp_syncache.hashsize * tcp_syncache.bucket_limit;
	tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
	tcp_syncache.hash_secret = arc4random();

	TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize",
	    &tcp_syncache.hashsize);
	TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit",
	    &tcp_syncache.cache_limit);
	TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
	    &tcp_syncache.bucket_limit);
	if (!powerof2(tcp_syncache.hashsize)) {
		printf("WARNING: syncache hash size is not a power of 2.\n");
		tcp_syncache.hashsize = 512;	/* safe default */
	}
	tcp_syncache.hashmask = tcp_syncache.hashsize - 1;

	/* Allocate the hash table. */
	MALLOC(tcp_syncache.hashbase, struct syncache_head *,
	    tcp_syncache.hashsize * sizeof(struct syncache_head),
	    M_SYNCACHE, M_WAITOK);

	/* Initialize the hash buckets. */
	for (i = 0; i < tcp_syncache.hashsize; i++) {
		TAILQ_INIT(&tcp_syncache.hashbase[i].sch_bucket);
		tcp_syncache.hashbase[i].sch_length = 0;
	}

	/* Initialize the timer queues. */
	for (i = 0; i <= SYNCACHE_MAXREXMTS; i++) {
		TAILQ_INIT(&tcp_syncache.timerq[i]);
		callout_init(&tcp_syncache.tt_timerq[i],
			debug_mpsafenet ? CALLOUT_MPSAFE : 0);
	}

	/*
	 * Allocate the syncache entries.  Allow the zone to allocate one
	 * more entry than cache limit, so a new entry can bump out an
	 * older one.
	 */
	tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache),
	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
	uma_zone_set_max(tcp_syncache.zone, tcp_syncache.cache_limit);
	tcp_syncache.cache_limit -= 1;
}
Ejemplo n.º 8
0
/*
 * This routine precomputes a lookup table for divisors 1..lim64
 * - table size is stored in item #0 to check for buffer overruns
 */
void
fastu64divgentab(struct divu64 *duptr, uint32_t lim64)
{
    uint64_t magic = lim64;
    uint64_t info = 0;
    uint64_t div;
    uint32_t val;
    uint32_t shift;
    
    /* store array size into the first item to avoid buffer overruns */
    duptr->magic = magic;
    duptr->info = info;
    duptr++;
    for (div = 2 ; div < lim64 ; div++) {
        duptr++;
        shift = 63;
        lzero64(div, val);
        shift -= val;
        if (!powerof2(div)) {
            uint64_t val64;
            uint64_t lim;
            uint64_t val;
            uint64_t rem;
            uint64_t e;

            lim = 1U << shift;
            magic = val64 / div;
            rem = val64 % div;
            e = div - rem;
            if (e < lim) {
                info = shift;
            } else {
                val = rem;
                magic <<= 1;
                val <<= 1;
                info |= FASTU32DIVADDBIT;
                if (val >= div || val < rem) {
                    magic++;
                }
            }
            magic++;
        } else {
            info = shift;
            magic = 0;
            info |= FASTU32DIVSHIFTBIT;
        }
        duptr->magic = magic;
        duptr->info = info;
    }

    return;
}
Ejemplo n.º 9
0
int debug_posix_memalign(void** memptr, size_t alignment, size_t size) {
  if (DebugCallsDisabled()) {
    return g_dispatch->posix_memalign(memptr, alignment, size);
  }

  if (!powerof2(alignment)) {
    return EINVAL;
  }
  int saved_errno = errno;
  *memptr = debug_memalign(alignment, size);
  errno = saved_errno;
  return (*memptr != nullptr) ? 0 : ENOMEM;
}
Ejemplo n.º 10
0
int main()
{
  volatile int x;
  x = powerof2(16);
  x = powerof2(32);
  x = powerof2(24);
  x = powerof2(8);
  x = powerof2(10);
  x = powerof2(12);
  x = powerof2(128);
}
extern "C" int chk_posix_memalign(void** memptr, size_t alignment, size_t size) {
  if (DebugCallsDisabled()) {
    return g_malloc_dispatch->posix_memalign(memptr, alignment, size);
  }

  if (!powerof2(alignment)) {
    return EINVAL;
  }
  int saved_errno = errno;
  *memptr = chk_memalign(alignment, size);
  errno = saved_errno;
  return (*memptr != NULL) ? 0 : ENOMEM;
}
Ejemplo n.º 12
0
/*
 * Tcp initialization
 */
void
tcp_init()
{
	int hashsize = TCBHASHSIZE;
	
	tcp_ccgen = 1;
	tcp_cleartaocache();

	tcp_delacktime = TCPTV_DELACK;
	tcp_keepinit = TCPTV_KEEP_INIT;
	tcp_keepidle = TCPTV_KEEP_IDLE;
	tcp_keepintvl = TCPTV_KEEPINTVL;
	tcp_maxpersistidle = TCPTV_KEEP_IDLE;
	tcp_msl = TCPTV_MSL;
	tcp_rexmit_min = TCPTV_MIN;
	tcp_rexmit_slop = TCPTV_CPU_VAR;

	LIST_INIT(&tcb);
	tcbinfo.listhead = &tcb;
	TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize);
	if (!powerof2(hashsize)) {
		printf("WARNING: TCB hash size not a power of 2\n");
		hashsize = 512; /* safe default */
	}
	tcp_tcbhashsize = hashsize;
	tcbinfo.hashbase = hashinit(hashsize, M_PCB, &tcbinfo.hashmask);
	tcbinfo.porthashbase = hashinit(hashsize, M_PCB,
					&tcbinfo.porthashmask);
	tcbinfo.ipi_zone = zinit("tcpcb", sizeof(struct inp_tp), maxsockets,
				 ZONE_INTERRUPT, 0);

	tcp_reass_maxseg = nmbclusters / 16;
	TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments",
	    &tcp_reass_maxseg);

#ifdef INET6
#define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
#else /* INET6 */
#define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
#endif /* INET6 */
	if (max_protohdr < TCP_MINPROTOHDR)
		max_protohdr = TCP_MINPROTOHDR;
	if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
		panic("tcp_init");
#undef TCP_MINPROTOHDR

	syncache_init();
}
Ejemplo n.º 13
0
static inline size_t
p2roundup(size_t n)
{

	if (!powerof2(n)) {
		n--;
		n |= n >> 1;
		n |= n >> 2;
		n |= n >> 4;
		n |= n >> 8;
		n |= n >> 16;
#if SIZE_T_MAX > 0xffffffffU
		n |= n >> 32;
#endif
		n++;
	}
Ejemplo n.º 14
0
static unsigned int
roundup_nearest_power_of_2(unsigned int n)
{
	unsigned int shift;

	if (powerof2(n))
		return (n);

	shift = 0;
	while (n != 1) {
		n >>= 1;
		shift++;
	}

	return (1 << shift);
}
Ejemplo n.º 15
0
static __inline int
k6_mrmake(struct mem_range_desc *desc, u_int32_t *mtrr) {
	u_int32_t len = 0, wc, uc;
	register int bit;

	if (desc->mr_base &~ 0xfffe0000)
		return EINVAL;
	if (desc->mr_len < 131072 || !powerof2(desc->mr_len))
		return EINVAL;
	if (desc->mr_flags &~ (MDF_WRITECOMBINE|MDF_UNCACHEABLE))
		return EOPNOTSUPP;

	for (bit = ffs(desc->mr_len >> 17) - 1; bit < 15; bit++)
		len |= 1 << (14 - bit); 
	wc = (desc->mr_flags & MDF_WRITECOMBINE) ? 1 : 0;
	uc = (desc->mr_flags & MDF_UNCACHEABLE) ? 1 : 0;

	*mtrr = k6_reg_make(desc->mr_base, len, wc, uc);
	return 0;
}
Ejemplo n.º 16
0
static void *
memalign_check (size_t alignment, size_t bytes, const void *caller)
{
  void *mem;

  if (alignment <= MALLOC_ALIGNMENT)
    return malloc_check (bytes, NULL);

  if (alignment < MINSIZE)
    alignment = MINSIZE;

  /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
     power of 2 and will cause overflow in the check below.  */
  if (alignment > SIZE_MAX / 2 + 1)
    {
      __set_errno (EINVAL);
      return 0;
    }

  /* Check for overflow.  */
  if (bytes > SIZE_MAX - alignment - MINSIZE)
    {
      __set_errno (ENOMEM);
      return 0;
    }

  /* Make sure alignment is power of 2.  */
  if (!powerof2 (alignment))
    {
      size_t a = MALLOC_ALIGNMENT * 2;
      while (a < alignment)
        a <<= 1;
      alignment = a;
    }

  (void) mutex_lock (&main_arena.mutex);
  mem = (top_check () >= 0) ? _int_memalign (&main_arena, alignment, bytes + 1) :
        NULL;
  (void) mutex_unlock (&main_arena.mutex);
  return mem2mem_check (mem, bytes);
}
Ejemplo n.º 17
0
static __inline int
k6_mrmake(struct mem_range_desc *desc, u_int32_t *mtrr)
{
	u_int32_t len = 0, wc, uc;
	int bit;

	if (desc->mr_base &~ 0xfffe0000)
		return (EINVAL);
	if (desc->mr_len < 131072 || !powerof2(desc->mr_len))
		return (EINVAL);
	if (desc->mr_flags &~ (MDF_WRITECOMBINE|MDF_UNCACHEABLE|MDF_FORCE))
		return (EOPNOTSUPP);

	for (bit = ffs(desc->mr_len >> 17) - 1; bit < 15; bit++)
		len |= 1 << bit; 
	wc = (desc->mr_flags & MDF_WRITECOMBINE) ? 1 : 0;
	uc = (desc->mr_flags & MDF_UNCACHEABLE) ? 1 : 0;

	*mtrr = K6_REG_MAKE(desc->mr_base, len, wc, uc);
	return (0);
}
Ejemplo n.º 18
0
struct buf_ring *
buf_ring_alloc(int count, struct malloc_type *type, int flags, struct mtx *lock)
{
    struct buf_ring *br;

    KASSERT(powerof2(count), ("buf ring must be size power of 2"));

    br = malloc(sizeof(struct buf_ring) + count*sizeof(caddr_t),
                type, flags|M_ZERO);
    if (br == NULL)
        return (NULL);
#ifdef DEBUG_BUFRING
    br->br_lock = lock;
#endif
    br->br_prod_size = br->br_cons_size = count;
    br->br_prod_mask = br->br_cons_mask = count-1;
    br->br_prod_head = br->br_cons_head = 0;
    br->br_prod_tail = br->br_cons_tail = 0;

    return (br);
}
extern "C" void* chk_memalign(size_t alignment, size_t bytes) {
    if (DebugCallsDisabled()) {
        return g_malloc_dispatch->memalign(alignment, bytes);
    }

    if (alignment <= MALLOC_ALIGNMENT) {
        return chk_malloc(bytes);
    }

    // Make the alignment a power of two.
    if (!powerof2(alignment)) {
        alignment = BIONIC_ROUND_UP_POWER_OF_2(alignment);
    }

    // here, alignment is at least MALLOC_ALIGNMENT<<1 bytes
    // we will align by at least MALLOC_ALIGNMENT bytes
    // and at most alignment-MALLOC_ALIGNMENT bytes
    size_t size = (alignment-MALLOC_ALIGNMENT) + bytes;
    if (size < bytes) { // Overflow.
        return NULL;
    }

    void* base = g_malloc_dispatch->malloc(sizeof(hdr_t) + size + sizeof(ftr_t));
    if (base != NULL) {
        // Check that the actual pointer that will be returned is aligned
        // properly.
        uintptr_t ptr = reinterpret_cast<uintptr_t>(user(reinterpret_cast<hdr_t*>(base)));
        if ((ptr % alignment) != 0) {
            // Align the pointer.
            ptr += ((-ptr) % alignment);
        }

        hdr_t* hdr = meta(reinterpret_cast<void*>(ptr));
        hdr->base = base;
        hdr->bt_depth = GET_BACKTRACE(hdr->bt, MAX_BACKTRACE_DEPTH);
        add(hdr, bytes);
        return user(hdr);
    }
    return base;
}
Ejemplo n.º 20
0
/* User optimization.  */
void
__aio_init (const struct aioinit *init)
{
  /* Get the mutex.  */
  pthread_mutex_lock (&__aio_requests_mutex);

  /* Only allow writing new values if the table is not yet allocated.  */
  if (pool == NULL)
    {
      optim.aio_threads = init->aio_threads < 1 ? 1 : init->aio_threads;
      assert (powerof2 (ENTRIES_PER_ROW));
      optim.aio_num = (init->aio_num < ENTRIES_PER_ROW
		       ? ENTRIES_PER_ROW
		       : init->aio_num & ~(ENTRIES_PER_ROW - 1));
    }

  if (init->aio_idle_time != 0)
    optim.aio_idle_time = init->aio_idle_time;

  /* Release the mutex.  */
  pthread_mutex_unlock (&__aio_requests_mutex);
}
Ejemplo n.º 21
0
TEST(UNISTD_TEST, pathconf_fpathconf) {
  TemporaryFile tf;
  long rc = 0L;
  // As a file system's block size is always power of 2, the configure values
  // for ALLOC and XFER should be power of 2 as well.
  rc = pathconf(tf.filename, _PC_ALLOC_SIZE_MIN);
  ASSERT_TRUE(rc > 0 && powerof2(rc));
  rc = pathconf(tf.filename, _PC_REC_MIN_XFER_SIZE);
  ASSERT_TRUE(rc > 0 && powerof2(rc));
  rc = pathconf(tf.filename, _PC_REC_XFER_ALIGN);
  ASSERT_TRUE(rc > 0 && powerof2(rc));

  rc = fpathconf(tf.fd, _PC_ALLOC_SIZE_MIN);
  ASSERT_TRUE(rc > 0 && powerof2(rc));
  rc = fpathconf(tf.fd, _PC_REC_MIN_XFER_SIZE);
  ASSERT_TRUE(rc > 0 && powerof2(rc));
  rc = fpathconf(tf.fd, _PC_REC_XFER_ALIGN);
  ASSERT_TRUE(rc > 0 && powerof2(rc));
}
Ejemplo n.º 22
0
int
virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align,
    vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp)
{
	struct virtqueue *vq;
	int error;

	*vqp = NULL;
	error = 0;

	if (size == 0) {
		device_printf(dev,
		    "virtqueue %d (%s) does not exist (size is zero)\n",
		    queue, info->vqai_name);
		return (ENODEV);
	} else if (!powerof2(size)) {
		device_printf(dev,
		    "virtqueue %d (%s) size is not a power of 2: %d\n",
		    queue, info->vqai_name, size);
		return (ENXIO);
	} else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) {
		device_printf(dev, "virtqueue %d (%s) requested too many "
		    "indirect descriptors: %d, max %d\n",
		    queue, info->vqai_name, info->vqai_maxindirsz,
		    VIRTIO_MAX_INDIRECT);
		return (EINVAL);
	}

	vq = kmalloc(sizeof(struct virtqueue) +
	    size * sizeof(struct vq_desc_extra), M_DEVBUF, M_INTWAIT | M_ZERO);
	if (vq == NULL) {
		device_printf(dev, "cannot allocate virtqueue\n");
		return (ENOMEM);
	}

	vq->vq_dev = dev;
	strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
	vq->vq_queue_index = queue;
	vq->vq_alignment = align;
	vq->vq_nentries = size;
	vq->vq_free_cnt = size;
	vq->vq_intrhand = info->vqai_intr;
	vq->vq_intrhand_arg = info->vqai_intr_arg;

	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
		vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;

	if (info->vqai_maxindirsz > 1) {
		error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
		if (error)
			goto fail;
	}

	vq->vq_ring_size = round_page(vring_size(size, align));
	vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
	    M_WAITOK | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
	if (vq->vq_ring_mem == NULL) {
		device_printf(dev,
		    "cannot allocate memory for virtqueue ring\n");
		error = ENOMEM;
		goto fail;
	}

	vq_ring_init(vq);
	virtqueue_disable_intr(vq);

	*vqp = vq;

fail:
	if (error)
		virtqueue_free(vq);

	return (error);
}
Ejemplo n.º 23
0
static int
resize_dynamic_table(struct ip_fw_chain *chain, int nbuckets)
{
	int i, k, nbuckets_old;
	ipfw_dyn_rule *q;
	struct ipfw_dyn_bucket *dyn_v, *dyn_v_old;

	/* Check if given number is power of 2 and less than 64k */
	if ((nbuckets > 65536) || (!powerof2(nbuckets)))
		return 1;

	CTR3(KTR_NET, "%s: resize dynamic hash: %d -> %d", __func__,
	    V_curr_dyn_buckets, nbuckets);

	/* Allocate and initialize new hash */
	dyn_v = malloc(nbuckets * sizeof(ipfw_dyn_rule), M_IPFW,
	    M_WAITOK | M_ZERO);

	for (i = 0 ; i < nbuckets; i++)
		IPFW_BUCK_LOCK_INIT(&dyn_v[i]);

	/*
	 * Call upper half lock, as get_map() do to ease
	 * read-only access to dynamic rules hash from sysctl
	 */
	IPFW_UH_WLOCK(chain);

	/*
	 * Acquire chain write lock to permit hash access
	 * for main traffic path without additional locks
	 */
	IPFW_WLOCK(chain);

	/* Save old values */
	nbuckets_old = V_curr_dyn_buckets;
	dyn_v_old = V_ipfw_dyn_v;

	/* Skip relinking if array is not set up */
	if (V_ipfw_dyn_v == NULL)
		V_curr_dyn_buckets = 0;

	/* Re-link all dynamic states */
	for (i = 0 ; i < V_curr_dyn_buckets ; i++) {
		while (V_ipfw_dyn_v[i].head != NULL) {
			/* Remove from current chain */
			q = V_ipfw_dyn_v[i].head;
			V_ipfw_dyn_v[i].head = q->next;

			/* Get new hash value */
			k = hash_packet(&q->id, nbuckets);
			q->bucket = k;
			/* Add to the new head */
			q->next = dyn_v[k].head;
			dyn_v[k].head = q;
             }
	}

	/* Update current pointers/buckets values */
	V_curr_dyn_buckets = nbuckets;
	V_ipfw_dyn_v = dyn_v;

	IPFW_WUNLOCK(chain);

	IPFW_UH_WUNLOCK(chain);

	/* Start periodic callout on initial creation */
	if (dyn_v_old == NULL) {
        	callout_reset_on(&V_ipfw_timeout, hz, ipfw_dyn_tick, curvnet, 0);
		return (0);
	}

	/* Destroy all mutexes */
	for (i = 0 ; i < nbuckets_old ; i++)
		IPFW_BUCK_LOCK_DESTROY(&dyn_v_old[i]);

	/* Free old hash */
	free(dyn_v_old, M_IPFW);

	return 0;
}
Ejemplo n.º 24
0
static void
print_arg (argument *a, bfd_vma memaddr, struct disassemble_info *info)
{
  LONGLONG longdisp, mask;
  int sign_flag = 0;
  int relative = 0;
  bfd_vma number;
  int op_index = 0;
  char string[200];
  PTR stream = info->stream;
  fprintf_ftype func = info->fprintf_func;

  switch (a->type)
    {
    case arg_copr:
      func (stream, "%s", getcopregname (a->cr, CRX_C_REGTYPE));
      break;

    case arg_copsr:
      func (stream, "%s", getcopregname (a->cr, CRX_CS_REGTYPE));
      break;

    case arg_r:
      if (IS_INSN_MNEMONIC ("mtpr") || IS_INSN_MNEMONIC ("mfpr"))
	func (stream, "%s", getprocregname (a->r));
      else
	func (stream, "%s", getregname (a->r));
      break;

    case arg_ic:
      if (IS_INSN_MNEMONIC ("excp"))
	func (stream, "%s", gettrapstring (a->constant));

      else if (IS_INSN_MNEMONIC ("cinv"))
	func (stream, "%s", getcinvstring (a->constant));

      else if (INST_HAS_REG_LIST)
        {
	  REG_ARG_TYPE reg_arg_type = IS_INSN_TYPE (COP_REG_INS) ? 
				 COP_ARG : IS_INSN_TYPE (COPS_REG_INS) ? 
				 COPS_ARG : (instruction->flags & USER_REG) ?
				 USER_REG_ARG : REG_ARG;

          if ((reg_arg_type == COP_ARG) || (reg_arg_type == COPS_ARG))
	    {
		/*  Check for proper argument number.  */
		if (processing_argument_number == 2)
		  {
		    getregliststring (a->constant, string, reg_arg_type);
		    func (stream, "%s", string);
		  }
		else
		  func (stream, "$0x%lx", a->constant);
	    }
	  else
            {
              getregliststring (a->constant, string, reg_arg_type);
              func (stream, "%s", string);
            }
        }
      else
	func (stream, "$0x%lx", a->constant);
      break;

    case arg_idxr:
      func (stream, "0x%lx(%s,%s,%d)", a->constant, getregname (a->r),
	    getregname (a->i_r), powerof2 (a->scale));
      break;

    case arg_rbase:
      func (stream, "(%s)", getregname (a->r));
      break;

    case arg_cr:
      func (stream, "0x%lx(%s)", a->constant, getregname (a->r));

      if (IS_INSN_TYPE (LD_STOR_INS_INC))
	func (stream, "+");
      break;

    case arg_c:
      /* Removed the *2 part as because implicit zeros are no more required.
	 Have to fix this as this needs a bit of extension in terms of branchins.
	 Have to add support for cmp and branch instructions.  */
      if (IS_INSN_TYPE (BRANCH_INS) || IS_INSN_MNEMONIC ("bal")
	  || IS_INSN_TYPE (CMPBR_INS) || IS_INSN_TYPE (DCR_BRANCH_INS)
	  || IS_INSN_TYPE (COP_BRANCH_INS))
        {
	  relative = 1;
          longdisp = a->constant;
          longdisp <<= 1;

          switch (a->size)
            {
            case 8:
	    case 16:
	    case 24:
	    case 32:
	      mask = ((LONGLONG)1 << a->size) - 1;
              if (longdisp & ((LONGLONG)1 << a->size))
                {
                  sign_flag = 1;
                  longdisp = ~(longdisp) + 1;
                }
              a->constant = (unsigned long int) (longdisp & mask);
              break;
            default:
	      func (stream,
		    "Wrong offset used in branch/bal instruction");
              break;
            }

        }
      /* For branch Neq instruction it is 2*offset + 2.  */
      else if (IS_INSN_TYPE (BRANCH_NEQ_INS))
	a->constant = 2 * a->constant + 2;
      else if (IS_INSN_TYPE (LD_STOR_INS_INC)
	  || IS_INSN_TYPE (LD_STOR_INS)
	  || IS_INSN_TYPE (STOR_IMM_INS)
	  || IS_INSN_TYPE (CSTBIT_INS))
        {
          op_index = instruction->flags & REVERSE_MATCH ? 0 : 1;
          if (instruction->operands[op_index].op_type == abs16)
	    a->constant |= 0xFFFF0000;
        }
      func (stream, "%s", "0x");
      number = (relative ? memaddr : 0)
	       + (sign_flag ? -a->constant : a->constant);
      (*info->print_address_func) (number, info);
      break;
    default:
      break;
    }
}
Ejemplo n.º 25
0
int
hn_rndis_query_rsscaps(struct hn_softc *sc, int *rxr_cnt0)
{
	struct ndis_rss_caps in, caps;
	size_t caps_len;
	int error, indsz, rxr_cnt, hash_fnidx;
	uint32_t hash_func = 0, hash_types = 0;

	*rxr_cnt0 = 0;

	if (sc->hn_ndis_ver < HN_NDIS_VERSION_6_20)
		return (EOPNOTSUPP);

	memset(&in, 0, sizeof(in));
	in.ndis_hdr.ndis_type = NDIS_OBJTYPE_RSS_CAPS;
	in.ndis_hdr.ndis_rev = NDIS_RSS_CAPS_REV_2;
	in.ndis_hdr.ndis_size = NDIS_RSS_CAPS_SIZE;

	caps_len = NDIS_RSS_CAPS_SIZE;
	error = hn_rndis_query2(sc, OID_GEN_RECEIVE_SCALE_CAPABILITIES,
	    &in, NDIS_RSS_CAPS_SIZE, &caps, &caps_len, NDIS_RSS_CAPS_SIZE_6_0);
	if (error)
		return (error);

	/*
	 * Preliminary verification.
	 */
	if (caps.ndis_hdr.ndis_type != NDIS_OBJTYPE_RSS_CAPS) {
		if_printf(sc->hn_ifp, "invalid NDIS objtype 0x%02x\n",
		    caps.ndis_hdr.ndis_type);
		return (EINVAL);
	}
	if (caps.ndis_hdr.ndis_rev < NDIS_RSS_CAPS_REV_1) {
		if_printf(sc->hn_ifp, "invalid NDIS objrev 0x%02x\n",
		    caps.ndis_hdr.ndis_rev);
		return (EINVAL);
	}
	if (caps.ndis_hdr.ndis_size > caps_len) {
		if_printf(sc->hn_ifp, "invalid NDIS objsize %u, "
		    "data size %zu\n", caps.ndis_hdr.ndis_size, caps_len);
		return (EINVAL);
	} else if (caps.ndis_hdr.ndis_size < NDIS_RSS_CAPS_SIZE_6_0) {
		if_printf(sc->hn_ifp, "invalid NDIS objsize %u\n",
		    caps.ndis_hdr.ndis_size);
		return (EINVAL);
	}

	/*
	 * Save information for later RSS configuration.
	 */
	if (caps.ndis_nrxr == 0) {
		if_printf(sc->hn_ifp, "0 RX rings!?\n");
		return (EINVAL);
	}
	if (bootverbose)
		if_printf(sc->hn_ifp, "%u RX rings\n", caps.ndis_nrxr);
	rxr_cnt = caps.ndis_nrxr;

	if (caps.ndis_hdr.ndis_size == NDIS_RSS_CAPS_SIZE &&
	    caps.ndis_hdr.ndis_rev >= NDIS_RSS_CAPS_REV_2) {
		if (caps.ndis_nind > NDIS_HASH_INDCNT) {
			if_printf(sc->hn_ifp,
			    "too many RSS indirect table entries %u\n",
			    caps.ndis_nind);
			return (EOPNOTSUPP);
		}
		if (!powerof2(caps.ndis_nind)) {
			if_printf(sc->hn_ifp, "RSS indirect table size is not "
			    "power-of-2 %u\n", caps.ndis_nind);
		}

		if (bootverbose) {
			if_printf(sc->hn_ifp, "RSS indirect table size %u\n",
			    caps.ndis_nind);
		}
		indsz = caps.ndis_nind;
	} else {
		indsz = NDIS_HASH_INDCNT;
	}
	if (indsz < rxr_cnt) {
		if_printf(sc->hn_ifp, "# of RX rings (%d) > "
		    "RSS indirect table size %d\n", rxr_cnt, indsz);
		rxr_cnt = indsz;
	}

	/*
	 * NOTE:
	 * Toeplitz is at the lowest bit, and it is prefered; so ffs(),
	 * instead of fls(), is used here.
	 */
	hash_fnidx = ffs(caps.ndis_caps & NDIS_RSS_CAP_HASHFUNC_MASK);
	if (hash_fnidx == 0) {
		if_printf(sc->hn_ifp, "no hash functions, caps 0x%08x\n",
		    caps.ndis_caps);
		return (EOPNOTSUPP);
	}
	hash_func = 1 << (hash_fnidx - 1); /* ffs is 1-based */

	if (caps.ndis_caps & NDIS_RSS_CAP_IPV4)
		hash_types |= NDIS_HASH_IPV4 | NDIS_HASH_TCP_IPV4;
	if (caps.ndis_caps & NDIS_RSS_CAP_IPV6)
		hash_types |= NDIS_HASH_IPV6 | NDIS_HASH_TCP_IPV6;
	if (caps.ndis_caps & NDIS_RSS_CAP_IPV6_EX)
		hash_types |= NDIS_HASH_IPV6_EX | NDIS_HASH_TCP_IPV6_EX;
	if (hash_types == 0) {
		if_printf(sc->hn_ifp, "no hash types, caps 0x%08x\n",
		    caps.ndis_caps);
		return (EOPNOTSUPP);
	}
	if (bootverbose)
		if_printf(sc->hn_ifp, "RSS caps %#x\n", caps.ndis_caps);

	/* Commit! */
	sc->hn_rss_ind_size = indsz;
	sc->hn_rss_hcap = hash_func | hash_types;
	if (sc->hn_caps & HN_CAP_UDPHASH) {
		/* UDP 4-tuple hash is unconditionally enabled. */
		sc->hn_rss_hcap |= NDIS_HASH_UDP_IPV4_X;
	}
	*rxr_cnt0 = rxr_cnt;
	return (0);
}
Ejemplo n.º 26
0
/*
 * Read in the super block and its summary info, convert to host byte order.
 */
static int
readsb(int listerr)
{
	daddr_t super = bflag ? bflag : SBOFF / dev_bsize;

	if (bread(fsreadfd, (char *)sblk.b_un.b_fs, super, (long)SBSIZE) != 0)
		return 0;
	sblk.b_bno = super;
	sblk.b_size = SBSIZE;

	/* Copy the superblock in memory */
	e2fs_sbload(sblk.b_un.b_fs, &sblock.e2fs);
	
	/*
	 * run a few consistency checks of the super block
	 */
	if (sblock.e2fs.e2fs_magic != E2FS_MAGIC) {
		badsb(listerr, "MAGIC NUMBER WRONG");
		return 0;
	}
	if (sblock.e2fs.e2fs_log_bsize > 2) {
		badsb(listerr, "BAD LOG_BSIZE");
		return 0;
	}
	if (sblock.e2fs.e2fs_rev > E2FS_REV0 &&
	    (!powerof2(sblock.e2fs.e2fs_inode_size) ||
	     sblock.e2fs.e2fs_inode_size < EXT2_REV0_DINODE_SIZE ||
	     sblock.e2fs.e2fs_inode_size >
	      (1024 << sblock.e2fs.e2fs_log_bsize))) {
		badsb(listerr, "BAD INODE_SIZE");
		return 0;
	}

	/* compute the dynamic fields of the in-memory sb */
	/* compute dynamic sb infos */
	sblock.e2fs_ncg =
	    howmany(sblock.e2fs.e2fs_bcount - sblock.e2fs.e2fs_first_dblock,
	    sblock.e2fs.e2fs_bpg);
	/* XXX assume hw bsize = 512 */
	sblock.e2fs_fsbtodb = sblock.e2fs.e2fs_log_bsize + 1;
	sblock.e2fs_bsize = 1024 << sblock.e2fs.e2fs_log_bsize;
	sblock.e2fs_bshift = LOG_MINBSIZE + sblock.e2fs.e2fs_log_bsize;
	sblock.e2fs_qbmask = sblock.e2fs_bsize - 1;
	sblock.e2fs_bmask = ~sblock.e2fs_qbmask;
	sblock.e2fs_ngdb = howmany(sblock.e2fs_ncg,
	    sblock.e2fs_bsize / sizeof(struct ext2_gd));
	sblock.e2fs_ipb = sblock.e2fs_bsize / EXT2_DINODE_SIZE(&sblock);
	sblock.e2fs_itpg = howmany(sblock.e2fs.e2fs_ipg, sblock.e2fs_ipb);

	/*
	 * Compute block size that the filesystem is based on,
	 * according to fsbtodb, and adjust superblock block number
	 * so we can tell if this is an alternate later.
	 */
	super *= dev_bsize;
	dev_bsize = sblock.e2fs_bsize / EXT2_FSBTODB(&sblock, 1);
	sblk.b_bno = super / dev_bsize;

	if (sblock.e2fs_ncg == 1) {
		/* no alternate superblock; assume it's okay */
		havesb = 1;
		return 1;
	}
	getblk(&asblk, 1 * sblock.e2fs.e2fs_bpg + sblock.e2fs.e2fs_first_dblock,
		(long)SBSIZE);
	if (asblk.b_errs)
		return 0;
	if (bflag) {
		havesb = 1;
		return 1;
	}

	/*
	 * Set all possible fields that could differ, then do check
	 * of whole super block against an alternate super block.
	 * When an alternate super-block is specified this check is skipped.
	 */
	asblk.b_un.b_fs->e2fs_rbcount = sblk.b_un.b_fs->e2fs_rbcount;
	asblk.b_un.b_fs->e2fs_fbcount = sblk.b_un.b_fs->e2fs_fbcount;
	asblk.b_un.b_fs->e2fs_ficount = sblk.b_un.b_fs->e2fs_ficount;
	asblk.b_un.b_fs->e2fs_mtime = sblk.b_un.b_fs->e2fs_mtime;
	asblk.b_un.b_fs->e2fs_wtime = sblk.b_un.b_fs->e2fs_wtime;
	asblk.b_un.b_fs->e2fs_mnt_count = sblk.b_un.b_fs->e2fs_mnt_count;
	asblk.b_un.b_fs->e2fs_max_mnt_count =
	    sblk.b_un.b_fs->e2fs_max_mnt_count;
	asblk.b_un.b_fs->e2fs_state = sblk.b_un.b_fs->e2fs_state;
	asblk.b_un.b_fs->e2fs_beh = sblk.b_un.b_fs->e2fs_beh;
	asblk.b_un.b_fs->e2fs_lastfsck = sblk.b_un.b_fs->e2fs_lastfsck;
	asblk.b_un.b_fs->e2fs_fsckintv = sblk.b_un.b_fs->e2fs_fsckintv;
	asblk.b_un.b_fs->e2fs_ruid = sblk.b_un.b_fs->e2fs_ruid;
	asblk.b_un.b_fs->e2fs_rgid = sblk.b_un.b_fs->e2fs_rgid;
	asblk.b_un.b_fs->e2fs_block_group_nr =
	    sblk.b_un.b_fs->e2fs_block_group_nr;
	asblk.b_un.b_fs->e2fs_features_rocompat &= ~EXT2F_ROCOMPAT_LARGEFILE;
	asblk.b_un.b_fs->e2fs_features_rocompat |=
	    sblk.b_un.b_fs->e2fs_features_rocompat & EXT2F_ROCOMPAT_LARGEFILE;
	if (sblock.e2fs.e2fs_rev > E2FS_REV0 &&
	    ((sblock.e2fs.e2fs_features_incompat & ~EXT2F_INCOMPAT_SUPP_FSCK) ||
	    (sblock.e2fs.e2fs_features_rocompat & ~EXT2F_ROCOMPAT_SUPP_FSCK))) {
		if (debug) {
			printf("compat 0x%08x, incompat 0x%08x, compat_ro "
			    "0x%08x\n",
			    sblock.e2fs.e2fs_features_compat,
			    sblock.e2fs.e2fs_features_incompat,
			    sblock.e2fs.e2fs_features_rocompat);

			if ((sblock.e2fs.e2fs_features_rocompat & ~EXT2F_ROCOMPAT_SUPP_FSCK)) {
				char buf[512];

				snprintb(buf, sizeof(buf), EXT2F_ROCOMPAT_BITS,
					sblock.e2fs.e2fs_features_rocompat & ~EXT2F_ROCOMPAT_SUPP_FSCK);
				printf("unsupported rocompat features: %s\n", buf);
			}
			if ((sblock.e2fs.e2fs_features_incompat & ~EXT2F_INCOMPAT_SUPP_FSCK)) {
				char buf[512];

				snprintb(buf, sizeof(buf), EXT2F_INCOMPAT_BITS,
					sblock.e2fs.e2fs_features_incompat & ~EXT2F_INCOMPAT_SUPP_FSCK);
				printf("unsupported incompat features: %s\n", buf);
			}
		}
		badsb(listerr, "INCOMPATIBLE FEATURE BITS IN SUPER BLOCK");
		return 0;
	}
	if (memcmp(sblk.b_un.b_fs, asblk.b_un.b_fs, SBSIZE)) {
		if (debug) {
			u_int32_t *nlp, *olp, *endlp;

			printf("superblock mismatches\n");
			nlp = (u_int32_t *)asblk.b_un.b_fs;
			olp = (u_int32_t *)sblk.b_un.b_fs;
			endlp = olp + (SBSIZE / sizeof(*olp));
			for ( ; olp < endlp; olp++, nlp++) {
				if (*olp == *nlp)
					continue;
				printf("offset %ld, original %ld, "
				    "alternate %ld\n",
				    (long)(olp - (u_int32_t *)sblk.b_un.b_fs),
				    (long)fs2h32(*olp),
				    (long)fs2h32(*nlp));
			}
		}
		badsb(listerr,
		    "VALUES IN SUPER BLOCK DISAGREE WITH "
		    "THOSE IN FIRST ALTERNATE");
		return 0;
	}
	havesb = 1;
	return 1;
}
Ejemplo n.º 27
0
void* debug_memalign(size_t alignment, size_t bytes) {
  if (DebugCallsDisabled()) {
    return g_dispatch->memalign(alignment, bytes);
  }
  ScopedDisableDebugCalls disable;

  if (bytes == 0) {
    bytes = 1;
  }

  void* pointer;
  if (g_debug->need_header()) {
    if (bytes > Header::max_size()) {
      errno = ENOMEM;
      return nullptr;
    }

    // Make the alignment a power of two.
    if (!powerof2(alignment)) {
      alignment = BIONIC_ROUND_UP_POWER_OF_2(alignment);
    }
    // Force the alignment to at least MINIMUM_ALIGNMENT_BYTES to guarantee
    // that the header is aligned properly.
    if (alignment < MINIMUM_ALIGNMENT_BYTES) {
      alignment = MINIMUM_ALIGNMENT_BYTES;
    }

    // We don't have any idea what the natural alignment of
    // the underlying native allocator is, so we always need to
    // over allocate.
    size_t real_size = alignment + bytes + g_debug->extra_bytes();
    if (real_size < bytes) {
      // Overflow.
      errno = ENOMEM;
      return nullptr;
    }

    pointer = g_dispatch->malloc(real_size);
    if (pointer == nullptr) {
      return nullptr;
    }

    uintptr_t value = reinterpret_cast<uintptr_t>(pointer) + g_debug->pointer_offset();
    // Now align the pointer.
    value += (-value % alignment);

    Header* header = g_debug->GetHeader(reinterpret_cast<void*>(value));
    pointer = InitHeader(header, pointer, bytes);
  } else {
    size_t real_size = bytes + g_debug->extra_bytes();
    if (real_size < bytes) {
      // Overflow.
      errno = ENOMEM;
      return nullptr;
    }
    pointer = g_dispatch->memalign(alignment, real_size);
  }

  if (pointer != nullptr && g_debug->config().options & FILL_ON_ALLOC) {
    size_t bytes = internal_malloc_usable_size(pointer);
    size_t fill_bytes = g_debug->config().fill_on_alloc_bytes;
    bytes = (bytes < fill_bytes) ? bytes : fill_bytes;
    memset(pointer, g_debug->config().fill_alloc_value, bytes);
  }

  return pointer;
}
Ejemplo n.º 28
0
static int
create_volume(int ac, char **av)
{
	struct mfi_config_data *config;
	struct mfi_array *ar;
	struct mfi_ld_config *ld;
	struct config_id_state state;
	size_t config_size;
	char *p, *cfg_arrays, *cfg_volumes;
	int error, fd, i, raid_type;
	int narrays, nvolumes, arrays_per_volume;
	struct array_info *arrays;
	long stripe_size;
#ifdef DEBUG
	int dump;
#endif
	int ch, verbose;

	/*
	 * Backwards compat.  Map 'create volume' to 'create' and
	 * 'create spare' to 'add'.
	 */
	if (ac > 1) {
		if (strcmp(av[1], "volume") == 0) {
			av++;
			ac--;
		} else if (strcmp(av[1], "spare") == 0) {
			av++;
			ac--;
			return (add_spare(ac, av));
		}
	}

	if (ac < 2) {
		warnx("create volume: volume type required");
		return (EINVAL);
	}

	bzero(&state, sizeof(state));
	config = NULL;
	arrays = NULL;
	narrays = 0;
	error = 0;

	fd = mfi_open(mfi_unit);
	if (fd < 0) {
		error = errno;
		warn("mfi_open");
		return (error);
	}

	if (!mfi_reconfig_supported()) {
		warnx("The current mfi(4) driver does not support "
		    "configuration changes.");
		error = EOPNOTSUPP;
		goto error;
	}

	/* Lookup the RAID type first. */
	raid_type = -1;
	for (i = 0; raid_type_table[i].name != NULL; i++)
		if (strcasecmp(raid_type_table[i].name, av[1]) == 0) {
			raid_type = raid_type_table[i].raid_type;
			break;
		}

	if (raid_type == -1) {
		warnx("Unknown or unsupported volume type %s", av[1]);
		error = EINVAL;
		goto error;
	}

	/* Parse any options. */
	optind = 2;
#ifdef DEBUG
	dump = 0;
#endif
	verbose = 0;
	stripe_size = 64 * 1024;

	while ((ch = getopt(ac, av, "ds:v")) != -1) {
		switch (ch) {
#ifdef DEBUG
		case 'd':
			dump = 1;
			break;
#endif
		case 's':
			stripe_size = dehumanize(optarg);
			if ((stripe_size < 512) || (!powerof2(stripe_size)))
				stripe_size = 64 * 1024;
			break;
		case 'v':
			verbose = 1;
			break;
		case '?':
		default:
			error = EINVAL;
			goto error;
		}
	}
	ac -= optind;
	av += optind;

	/* Parse all the arrays. */
	narrays = ac;
	if (narrays == 0) {
		warnx("At least one drive list is required");
		error = EINVAL;
		goto error;
	}
	switch (raid_type) {
	case RT_RAID0:
	case RT_RAID1:
	case RT_RAID5:
	case RT_RAID6:
	case RT_CONCAT:
		if (narrays != 1) {
			warnx("Only one drive list can be specified");
			error = EINVAL;
			goto error;
		}
		break;
	case RT_RAID10:
	case RT_RAID50:
	case RT_RAID60:
		if (narrays < 1) {
			warnx("RAID10, RAID50, and RAID60 require at least "
			    "two drive lists");
			error = EINVAL;
			goto error;
		}
		if (narrays > MFI_MAX_SPAN_DEPTH) {
			warnx("Volume spans more than %d arrays",
			    MFI_MAX_SPAN_DEPTH);
			error = EINVAL;
			goto error;
		}
		break;
	}
	arrays = calloc(narrays, sizeof(*arrays));
	if (arrays == NULL) {
		warnx("malloc failed");
		error = ENOMEM;
		goto error;
	}
	for (i = 0; i < narrays; i++) {
		error = parse_array(fd, raid_type, av[i], &arrays[i]);
		if (error)
			goto error;
	}

	switch (raid_type) {
	case RT_RAID10:
	case RT_RAID50:
	case RT_RAID60:
		for (i = 1; i < narrays; i++) {
			if (arrays[i].drive_count != arrays[0].drive_count) {
				warnx("All arrays must contain the same "
				    "number of drives");
				error = EINVAL;
				goto error;
			}
		}
		break;
	}

	/*
	 * Fetch the current config and build sorted lists of existing
	 * array and volume identifiers.
	 */
	if (mfi_config_read(fd, &config) < 0) {
		error = errno;
		warn("Failed to read configuration");
		goto error;
	}
	p = (char *)config->array;
	state.array_ref = 0xffff;
	state.target_id = 0xff;
	state.array_count = config->array_count;
	if (config->array_count > 0) {
		state.arrays = calloc(config->array_count, sizeof(int));
		if (state.arrays == NULL) {
			warnx("malloc failed");
			error = ENOMEM;
			goto error;
		}
		for (i = 0; i < config->array_count; i++) {
			ar = (struct mfi_array *)p;
			state.arrays[i] = ar->array_ref;
			p += config->array_size;
		}
		qsort(state.arrays, config->array_count, sizeof(int),
		    compare_int);
	} else
		state.arrays = NULL;
	state.log_drv_count = config->log_drv_count;
	if (config->log_drv_count) {
		state.volumes = calloc(config->log_drv_count, sizeof(int));
		if (state.volumes == NULL) {
			warnx("malloc failed");
			error = ENOMEM;
			goto error;
		}
		for (i = 0; i < config->log_drv_count; i++) {
			ld = (struct mfi_ld_config *)p;
			state.volumes[i] = ld->properties.ld.v.target_id;
			p += config->log_drv_size;
		}
		qsort(state.volumes, config->log_drv_count, sizeof(int),
		    compare_int);
	} else
		state.volumes = NULL;
	free(config);

	/* Determine the size of the configuration we will build. */
	switch (raid_type) {
	case RT_RAID0:
	case RT_RAID1:
	case RT_RAID5:
	case RT_RAID6:
	case RT_CONCAT:
	case RT_JBOD:
		/* Each volume spans a single array. */
		nvolumes = narrays;
		break;
	case RT_RAID10:
	case RT_RAID50:
	case RT_RAID60:
		/* A single volume spans multiple arrays. */
		nvolumes = 1;
		break;
	default:
		/* Pacify gcc. */
		abort();
	}

	config_size = sizeof(struct mfi_config_data) +
	    sizeof(struct mfi_ld_config) * nvolumes + MFI_ARRAY_SIZE * narrays;
	config = calloc(1, config_size);
	if (config == NULL) {
		warnx("malloc failed");
		error = ENOMEM;
		goto error;
	}
	config->size = config_size;
	config->array_count = narrays;
	config->array_size = MFI_ARRAY_SIZE;	/* XXX: Firmware hardcode */
	config->log_drv_count = nvolumes;
	config->log_drv_size = sizeof(struct mfi_ld_config);
	config->spares_count = 0;
	config->spares_size = 40;		/* XXX: Firmware hardcode */
	cfg_arrays = (char *)config->array;
	cfg_volumes = cfg_arrays + config->array_size * narrays;

	/* Build the arrays. */
	for (i = 0; i < narrays; i++) {
		build_array(fd, cfg_arrays, &arrays[i], &state, verbose);
		cfg_arrays += config->array_size;
	}

	/* Now build the volume(s). */
	arrays_per_volume = narrays / nvolumes;
	for (i = 0; i < nvolumes; i++) {
		build_volume(cfg_volumes, arrays_per_volume,
		    &arrays[i * arrays_per_volume], raid_type, stripe_size,
		    &state, verbose);
		cfg_volumes += config->log_drv_size;
	}

#ifdef DEBUG
	if (dump)
		dump_config(fd, config);
#endif

	/* Send the new config to the controller. */
	if (mfi_dcmd_command(fd, MFI_DCMD_CFG_ADD, config, config_size,
	    NULL, 0, NULL) < 0) {
		error = errno;
		warn("Failed to add volume");
		/* FALLTHROUGH */
	}

error:
	/* Clean up. */
	free(config);
	free(state.volumes);
	free(state.arrays);
	for (i = 0; i < narrays; i++)
		free(arrays[i].drives);
	free(arrays);
	close(fd);

	return (error);
}
Ejemplo n.º 29
0
/*
 * Tcp initialization
 */
void
tcp_init()
{
	int hashsize = TCBHASHSIZE;
	vm_size_t       str_size;
	int i;
	
	tcp_ccgen = 1;
	tcp_cleartaocache();

	tcp_delacktime = TCPTV_DELACK;
	tcp_keepinit = TCPTV_KEEP_INIT;
	tcp_keepidle = TCPTV_KEEP_IDLE;
	tcp_keepintvl = TCPTV_KEEPINTVL;
	tcp_maxpersistidle = TCPTV_KEEP_IDLE;
	tcp_msl = TCPTV_MSL;
	read_random(&tcp_now, sizeof(tcp_now));
	tcp_now  = tcp_now & 0x7fffffffffffffff; /* Starts tcp internal 500ms clock at a random value */


	LIST_INIT(&tcb);
	tcbinfo.listhead = &tcb;
#ifndef __APPLE__
	TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize);
#endif
	if (!powerof2(hashsize)) {
		printf("WARNING: TCB hash size not a power of 2\n");
		hashsize = 512; /* safe default */
	}
	tcp_tcbhashsize = hashsize;
	tcbinfo.hashsize = hashsize;
	tcbinfo.hashbase = hashinit(hashsize, M_PCB, &tcbinfo.hashmask);
	tcbinfo.porthashbase = hashinit(hashsize, M_PCB,
					&tcbinfo.porthashmask);
#ifdef __APPLE__
	str_size = (vm_size_t) sizeof(struct inp_tp);
	tcbinfo.ipi_zone = (void *) zinit(str_size, 120000*str_size, 8192, "tcpcb");
#else
	tcbinfo.ipi_zone = zinit("tcpcb", sizeof(struct inp_tp), maxsockets,
				 ZONE_INTERRUPT, 0);
#endif

	tcp_reass_maxseg = nmbclusters / 16;
#ifndef __APPLE__
	TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments",
	    &tcp_reass_maxseg);
#endif

#if INET6
#define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
#else /* INET6 */
#define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
#endif /* INET6 */
	if (max_protohdr < TCP_MINPROTOHDR)
		max_protohdr = TCP_MINPROTOHDR;
	if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
		panic("tcp_init");
#undef TCP_MINPROTOHDR
	tcbinfo.last_pcb = 0;
	dummy_tcb.t_state = TCP_NSTATES;
	dummy_tcb.t_flags = 0;
	tcbinfo.dummy_cb = (caddr_t) &dummy_tcb;
	in_pcb_nat_init(&tcbinfo, AF_INET, IPPROTO_TCP, SOCK_STREAM);

	delack_bitmask = _MALLOC((4 * hashsize)/32, M_PCB, M_WAITOK);
	if (delack_bitmask == 0) 
	     panic("Delack Memory");

	for (i=0; i < (tcbinfo.hashsize / 32); i++)
	         delack_bitmask[i] = 0;

	for (i=0; i < N_TIME_WAIT_SLOTS; i++) {
	     LIST_INIT(&time_wait_slots[i]);
	}
}
Ejemplo n.º 30
0
void
mke2fs(const char *fsys, int fi, int fo)
{
	struct timeval tv;
	int64_t minfssize;
	uint bcount, fbcount, ficount;
	uint blocks_gd, blocks_per_cg, inodes_per_cg, iblocks_per_cg;
	uint minblocks_per_cg, blocks_lastcg;
	uint ncg, cylno, sboff;
	uuid_t uuid;
	uint32_t uustat;
	int i, len, col, delta, fld_width, max_cols;
	struct winsize winsize;

	gettimeofday(&tv, NULL);
	fsi = fi;
	fso = fo;

	/*
	 * collect and verify the block and fragment sizes
	 */
	if (!powerof2(bsize)) {
		errx(EXIT_FAILURE,
		    "block size must be a power of 2, not %u\n",
		    bsize);
	}
	if (!powerof2(fsize)) {
		errx(EXIT_FAILURE,
		    "fragment size must be a power of 2, not %u\n",
		    fsize);
	}
	if (fsize < sectorsize) {
		errx(EXIT_FAILURE,
		    "fragment size %u is too small, minimum is %u\n",
		    fsize, sectorsize);
	}
	if (bsize < MINBSIZE) {
		errx(EXIT_FAILURE,
		    "block size %u is too small, minimum is %u\n",
		    bsize, MINBSIZE);
	}
	if (bsize > EXT2_MAXBSIZE) {
		errx(EXIT_FAILURE,
		    "block size %u is too large, maximum is %u\n",
		    bsize, MAXBSIZE);
	}
	if (bsize != fsize) {
		/*
		 * There is no fragment support on current ext2fs (yet?),
		 * but some kernel code refers fsize or fpg as bsize or bpg
		 * and Linux seems to set the same values to them.
		 */
		errx(EXIT_FAILURE,
		    "block size (%u) can't be different from "
		    "fragment size (%u)\n",
		    bsize, fsize);
	}

	/* variable inodesize is REV1 feature */
	if (Oflag == 0 && inodesize != EXT2_REV0_DINODE_SIZE) {
		errx(EXIT_FAILURE, "GOOD_OLD_REV file system format"
		    " doesn't support %d byte inode\n", inodesize);
	}

	sblock.e2fs.e2fs_log_bsize = ilog2(bsize) - LOG_MINBSIZE;
	/* Umm, why not e2fs_log_fsize? */
	sblock.e2fs.e2fs_fsize = ilog2(fsize) - LOG_MINBSIZE;

	sblock.e2fs_bsize = bsize;
	sblock.e2fs_bshift = sblock.e2fs.e2fs_log_bsize + LOG_MINBSIZE;
	sblock.e2fs_qbmask = sblock.e2fs_bsize - 1;
	sblock.e2fs_bmask = ~sblock.e2fs_qbmask;
	sblock.e2fs_fsbtodb = ilog2(sblock.e2fs_bsize) - ilog2(sectorsize);
	sblock.e2fs_ipb = sblock.e2fs_bsize / inodesize;

	/*
	 * Ext2fs preserves BBSIZE (1024 bytes) space at the top for
	 * bootloader (though it is not enough at all for our bootloader).
	 * If bsize == BBSIZE we have to preserve one block.
	 * If bsize > BBSIZE, the first block already contains BBSIZE space
	 * before superblock because superblock is allocated at SBOFF and
	 * bsize is a power of two (i.e. 2048 bytes or more).
	 */
	sblock.e2fs.e2fs_first_dblock = (sblock.e2fs_bsize > BBSIZE) ? 0 : 1;
	minfssize = fsbtodb(&sblock,
	    sblock.e2fs.e2fs_first_dblock +
	    NBLOCK_SUPERBLOCK +
	    1 /* at least one group descriptor */ +
	    NBLOCK_BLOCK_BITMAP	+
	    NBLOCK_INODE_BITMAP +
	    1 /* at least one inode table block */ +
	    1 /* at least one data block for rootdir */ +
	    1 /* at least one data block for data */
	    );			/* XXX and more? */

	if (fssize < minfssize)
		errx(EXIT_FAILURE, "Filesystem size %" PRId64
		    " < minimum size of %" PRId64 "\n", fssize, minfssize);

	bcount = dbtofsb(&sblock, fssize);

	/*
	 * While many people claim that ext2fs is a (bad) clone of ufs/ffs,
	 * it isn't actual ffs so maybe we should call it "block group"
	 * as their native name rather than ffs derived "cylinder group."
	 * But we'll use the latter here since other kernel sources use it.
	 * (I also agree "cylinder" based allocation is obsolete though)
	 */

	/* maybe "simple is the best" */
	blocks_per_cg = sblock.e2fs_bsize * NBBY;

	ncg = howmany(bcount - sblock.e2fs.e2fs_first_dblock, blocks_per_cg);
	blocks_gd = howmany(sizeof(struct ext2_gd) * ncg, bsize);

	/* check range of inode number */
	if (num_inodes < EXT2_FIRSTINO)
		num_inodes = EXT2_FIRSTINO;	/* needs reserved inodes + 1 */
	if (num_inodes > UINT16_MAX * ncg)
		num_inodes = UINT16_MAX * ncg;	/* ext2bgd_nifree is uint16_t */

	inodes_per_cg = num_inodes / ncg;
	iblocks_per_cg = howmany(inodesize * inodes_per_cg, bsize);

	/* Check that the last cylinder group has enough space for inodes */
	minblocks_per_cg =
	    NBLOCK_BLOCK_BITMAP +
	    NBLOCK_INODE_BITMAP +
	    iblocks_per_cg +
	    1;	/* at least one data block */
	if (Oflag == 0 || cg_has_sb(ncg - 1) != 0)
		minblocks_per_cg += NBLOCK_SUPERBLOCK + blocks_gd;

	blocks_lastcg = bcount - sblock.e2fs.e2fs_first_dblock -
	    blocks_per_cg * (ncg - 1);
	if (blocks_lastcg < minblocks_per_cg) {
		/*
		 * Since we make all the cylinder groups the same size, the
		 * last will only be small if there are more than one
		 * cylinder groups. If the last one is too small to store
		 * filesystem data, just kill it.
		 *
		 * XXX: Does fsck_ext2fs(8) properly handle this case?
		 */
		bcount -= blocks_lastcg;
		ncg--;
		blocks_lastcg = blocks_per_cg;
		blocks_gd = howmany(sizeof(struct ext2_gd) * ncg, bsize);
		inodes_per_cg = num_inodes / ncg;
	}
	/* roundup inodes_per_cg to make it use whole inode table blocks */
	inodes_per_cg = roundup(inodes_per_cg, sblock.e2fs_ipb);
	num_inodes = inodes_per_cg * ncg;
	iblocks_per_cg = inodes_per_cg / sblock.e2fs_ipb;

	/* XXX: probably we should check these adjusted values again */

	sblock.e2fs.e2fs_bcount = bcount;
	sblock.e2fs.e2fs_icount = num_inodes;

	sblock.e2fs_ncg = ncg;
	sblock.e2fs_ngdb = blocks_gd;
	sblock.e2fs_itpg = iblocks_per_cg;

	sblock.e2fs.e2fs_rbcount = sblock.e2fs.e2fs_bcount * minfree / 100;
	/* e2fs_fbcount will be accounted later */
	/* e2fs_ficount will be accounted later */

	sblock.e2fs.e2fs_bpg = blocks_per_cg;
	sblock.e2fs.e2fs_fpg = blocks_per_cg;

	sblock.e2fs.e2fs_ipg = inodes_per_cg;

	sblock.e2fs.e2fs_mtime = 0;
	sblock.e2fs.e2fs_wtime = tv.tv_sec;
	sblock.e2fs.e2fs_mnt_count = 0;
	/* XXX: should add some entropy to avoid checking all fs at once? */
	sblock.e2fs.e2fs_max_mnt_count = EXT2_DEF_MAX_MNT_COUNT;

	sblock.e2fs.e2fs_magic = E2FS_MAGIC;
	sblock.e2fs.e2fs_state = E2FS_ISCLEAN;
	sblock.e2fs.e2fs_beh = E2FS_BEH_DEFAULT;
	sblock.e2fs.e2fs_minrev = 0;
	sblock.e2fs.e2fs_lastfsck = tv.tv_sec;
	sblock.e2fs.e2fs_fsckintv = EXT2_DEF_FSCKINTV;

	/*
	 * Maybe we can use E2FS_OS_FREEBSD here and it would be more proper,
	 * but the purpose of this newfs_ext2fs(8) command is to provide
	 * a filesystem which can be recognized by firmware on some
	 * Linux based appliances that can load bootstrap files only from
	 * (their native) ext2fs, and anyway we will (and should) try to
	 * act like them as much as possible.
	 *
	 * Anyway, I hope that all newer such boxes will keep their support
	 * for the "GOOD_OLD_REV" ext2fs.
	 */
	sblock.e2fs.e2fs_creator = E2FS_OS_LINUX;

	if (Oflag == 0) {
		sblock.e2fs.e2fs_rev = E2FS_REV0;
		sblock.e2fs.e2fs_features_compat   = 0;
		sblock.e2fs.e2fs_features_incompat = 0;
		sblock.e2fs.e2fs_features_rocompat = 0;
	} else {
		sblock.e2fs.e2fs_rev = E2FS_REV1;
		/*
		 * e2fsprogs say "REV1" is "dynamic" so
		 * it isn't quite a version and maybe it means
		 * "extended from REV0 so check compat features."
		 *
		 * XXX: We don't have any native tool to activate
		 *      the EXT2F_COMPAT_RESIZE feature and
		 *      fsck_ext2fs(8) might not fix structures for it.
		 */
		sblock.e2fs.e2fs_features_compat   = EXT2F_COMPAT_RESIZE;
		sblock.e2fs.e2fs_features_incompat = EXT2F_INCOMPAT_FTYPE;
		sblock.e2fs.e2fs_features_rocompat =
		    EXT2F_ROCOMPAT_SPARSESUPER | EXT2F_ROCOMPAT_LARGEFILE;
	}

	sblock.e2fs.e2fs_ruid = geteuid();
	sblock.e2fs.e2fs_rgid = getegid();

	sblock.e2fs.e2fs_first_ino = EXT2_FIRSTINO;
	sblock.e2fs.e2fs_inode_size = inodesize;

	/* e2fs_block_group_nr is set on writing superblock to each group */

	uuid_create(&uuid, &uustat);
	if (uustat != uuid_s_ok)
		errx(EXIT_FAILURE, "Failed to generate uuid\n");
	uuid_enc_le(sblock.e2fs.e2fs_uuid, &uuid);
	if (volname != NULL) {
		if (strlen(volname) > sizeof(sblock.e2fs.e2fs_vname))
			errx(EXIT_FAILURE, "Volume name is too long");
		strlcpy(sblock.e2fs.e2fs_vname, volname,
		    sizeof(sblock.e2fs.e2fs_vname));
	}

	sblock.e2fs.e2fs_fsmnt[0] = '\0';
	sblock.e2fs_fsmnt[0] = '\0';

	sblock.e2fs.e2fs_algo = 0;		/* XXX unsupported? */
	sblock.e2fs.e2fs_prealloc = 0;		/* XXX unsupported? */
	sblock.e2fs.e2fs_dir_prealloc = 0;	/* XXX unsupported? */

	/* calculate blocks for reserved group descriptors for resize */
	sblock.e2fs.e2fs_reserved_ngdb = 0;
	if (sblock.e2fs.e2fs_rev > E2FS_REV0 &&
	    (sblock.e2fs.e2fs_features_compat & EXT2F_COMPAT_RESIZE) != 0) {
		uint64_t target_blocks;
		uint target_ncg, target_ngdb, reserved_ngdb;

		/* reserve descriptors for size as 1024 times as current */
		target_blocks =
		    (sblock.e2fs.e2fs_bcount - sblock.e2fs.e2fs_first_dblock)
		    * 1024ULL;
		/* number of blocks must be in uint32_t */
		if (target_blocks > UINT32_MAX)
			target_blocks = UINT32_MAX;
		target_ncg = howmany(target_blocks, sblock.e2fs.e2fs_bpg);
		target_ngdb = howmany(sizeof(struct ext2_gd) * target_ncg,
		    sblock.e2fs_bsize);
		/*
		 * Reserved group descriptor blocks are preserved as
		 * the second level double indirect reference blocks in
		 * the EXT2_RESIZEINO inode, so the maximum number of
		 * the blocks is NINDIR(fs).
		 * (see also descriptions in init_resizeino() function)
		 *
		 * We check a number including current e2fs_ngdb here
		 * because they will be moved into reserved gdb on
		 * possible future size shrink, though e2fsprogs don't
		 * seem to care about it.
		 */
		if (target_ngdb > NINDIR(&sblock))
			target_ngdb = NINDIR(&sblock);

		reserved_ngdb = target_ngdb - sblock.e2fs_ngdb;

		/* make sure reserved_ngdb fits in the last cg */
		if (reserved_ngdb >= blocks_lastcg - cgoverhead(ncg - 1))
			reserved_ngdb = blocks_lastcg - cgoverhead(ncg - 1);
		if (reserved_ngdb == 0) {
			/* if no space for reserved gdb, disable the feature */
			sblock.e2fs.e2fs_features_compat &=
			    ~EXT2F_COMPAT_RESIZE;
		}
		sblock.e2fs.e2fs_reserved_ngdb = reserved_ngdb;
	}

	/*
	 * Initialize group descriptors
	 */
	gd = malloc(sblock.e2fs_ngdb * bsize);
	if (gd == NULL)
		errx(EXIT_FAILURE, "Can't allocate descriptors buffer");
	memset(gd, 0, sblock.e2fs_ngdb * bsize);

	fbcount = 0;
	ficount = 0;
	for (cylno = 0; cylno < ncg; cylno++) {
		uint boffset;

		boffset = cgbase(&sblock, cylno);
		if (sblock.e2fs.e2fs_rev == E2FS_REV0 ||
		    (sblock.e2fs.e2fs_features_rocompat &
		     EXT2F_ROCOMPAT_SPARSESUPER) == 0 ||
		    cg_has_sb(cylno)) {
			boffset += NBLOCK_SUPERBLOCK + sblock.e2fs_ngdb;
			if (sblock.e2fs.e2fs_rev > E2FS_REV0 &&
			    (sblock.e2fs.e2fs_features_compat &
			     EXT2F_COMPAT_RESIZE) != 0)
				boffset += sblock.e2fs.e2fs_reserved_ngdb;
		}
		gd[cylno].ext2bgd_b_bitmap = boffset;
		boffset += NBLOCK_BLOCK_BITMAP;
		gd[cylno].ext2bgd_i_bitmap = boffset;
		boffset += NBLOCK_INODE_BITMAP;
		gd[cylno].ext2bgd_i_tables = boffset;
		if (cylno == (ncg - 1))
			gd[cylno].ext2bgd_nbfree =
			    blocks_lastcg - cgoverhead(cylno);
		else
			gd[cylno].ext2bgd_nbfree =
			    sblock.e2fs.e2fs_bpg - cgoverhead(cylno);
		fbcount += gd[cylno].ext2bgd_nbfree;
		gd[cylno].ext2bgd_nifree = sblock.e2fs.e2fs_ipg;
		if (cylno == 0) {
			/* take reserved inodes off nifree */
			gd[cylno].ext2bgd_nifree -= EXT2_RESERVED_INODES;
		}
		ficount += gd[cylno].ext2bgd_nifree;
		gd[cylno].ext2bgd_ndirs = 0;
	}
	sblock.e2fs.e2fs_fbcount = fbcount;
	sblock.e2fs.e2fs_ficount = ficount;

	/*
	 * Dump out summary information about file system.
	 */
	if (verbosity > 0) {
		printf("%s: %u.%1uMB (%" PRId64 " sectors) "
		    "block size %u, fragment size %u\n",
		    fsys,
		    (uint)(((uint64_t)bcount * bsize) / (1024 * 1024)),
		    (uint)((uint64_t)bcount * bsize -
		    rounddown((uint64_t)bcount * bsize, 1024 * 1024))
		    / 1024 / 100,
		    fssize, bsize, fsize);
		printf("\tusing %u block groups of %u.0MB, %u blks, "
		    "%u inodes.\n",
		    ncg, bsize * sblock.e2fs.e2fs_bpg / (1024 * 1024),
		    sblock.e2fs.e2fs_bpg, sblock.e2fs.e2fs_ipg);
	}

	/*
	 * allocate space for superblock and group descriptors
	 */
	iobufsize = (NBLOCK_SUPERBLOCK + sblock.e2fs_ngdb) * sblock.e2fs_bsize;
	iobuf = mmap(0, iobufsize, PROT_READ|PROT_WRITE,
	    MAP_ANON|MAP_PRIVATE, -1, 0);
	if (iobuf == NULL)
		errx(EXIT_FAILURE, "Cannot allocate I/O buffer\n");
	memset(iobuf, 0, iobufsize);

	/*
	 * We now start writing to the filesystem
	 */

	if (!Nflag) {
		static const uint pbsize[] = { 1024, 2048, 4096, 0 };
		uint pblock, epblock;
		/*
		 * Validate the given file system size.
		 * Verify that its last block can actually be accessed.
		 * Convert to file system fragment sized units.
		 */
		if (fssize <= 0)
			errx(EXIT_FAILURE, "Preposterous size %" PRId64 "\n",
			    fssize);
		wtfs(fssize - 1, sectorsize, iobuf);

		/*
		 * Ensure there is nothing that looks like a filesystem
		 * superblock anywhere other than where ours will be.
		 * If fsck_ext2fs finds the wrong one all hell breaks loose!
		 *
		 * XXX: needs to check how fsck_ext2fs programs even
		 *      on other OSes determine alternate superblocks
		 */
		for (i = 0; pbsize[i] != 0; i++) {
			epblock = (uint64_t)bcount * bsize / pbsize[i];
			for (pblock = ((pbsize[i] == SBSIZE) ? 1 : 0);
			    pblock < epblock;
			    pblock += pbsize[i] * NBBY /* bpg */)
				zap_old_sblock((daddr_t)pblock *
				    pbsize[i] / sectorsize);
		}
	}

	if (verbosity >= 3)
		printf("super-block backups (for fsck_ext2fs -b #) at:\n");
	/* If we are printing more than one line of numbers, line up columns */
	fld_width = verbosity < 4 ? 1 : snprintf(NULL, 0, "%" PRIu64,
	    (uint64_t)cgbase(&sblock, ncg - 1));
	/* Get terminal width */
	if (ioctl(fileno(stdout), TIOCGWINSZ, &winsize) == 0)
		max_cols = winsize.ws_col;
	else
		max_cols = 80;
	if (Nflag && verbosity == 3)
		/* Leave space to add " ..." after one row of numbers */
		max_cols -= 4;
#define BASE 0x10000	/* For some fixed-point maths */
	col = 0;
	delta = verbosity > 2 ? 0 : max_cols * BASE / ncg;
	for (cylno = 0; cylno < ncg; cylno++) {
		fflush(stdout);
		initcg(cylno);
		if (verbosity < 2)
			continue;
		/* the first one is a master, not backup */
		if (cylno == 0)
			continue;
		/* skip if this cylinder doesn't have a backup */
		if (sblock.e2fs.e2fs_rev > E2FS_REV0 &&
		    (sblock.e2fs.e2fs_features_rocompat &
		     EXT2F_ROCOMPAT_SPARSESUPER) != 0 &&
		    cg_has_sb(cylno) == 0)
			continue;

		if (delta > 0) {
			if (Nflag)
				/* No point doing dots for -N */
				break;
			/* Print dots scaled to end near RH margin */
			for (col += delta; col > BASE; col -= BASE)
				printf(".");
			continue;
		}
		/* Print superblock numbers */
		len = printf(" %*" PRIu64 "," + !col, fld_width,
		    (uint64_t)cgbase(&sblock, cylno));
		col += len;
		if (col + len < max_cols)
			/* Next number fits */
			continue;
		/* Next number won't fit, need a newline */
		if (verbosity <= 3) {
			/* Print dots for subsequent cylinder groups */
			delta = sblock.e2fs_ncg - cylno - 1;
			if (delta != 0) {
				if (Nflag) {
					printf(" ...");
					break;
				}
				delta = max_cols * BASE / delta;
			}
		}
		col = 0;
		printf("\n");
	}
#undef BASE
	if (col > 0)
		printf("\n");
	if (Nflag)
		return;

	/*
	 * Now construct the initial file system,
	 */
	if (fsinit(&tv) == 0)
		errx(EXIT_FAILURE, "Error making filesystem");
	/*
	 * Write out the superblock and group descriptors
	 */
	sblock.e2fs.e2fs_block_group_nr = 0;
	sboff = 0;
	if (cgbase(&sblock, 0) == 0) {
		/*
		 * If the first block contains the boot block sectors,
		 * (i.e. in case of sblock.e2fs.e2fs_bsize > BBSIZE)
		 * we have to preserve data in it.
		 */
		sboff = SBOFF;
	}
	e2fs_sbsave(&sblock.e2fs, (struct ext2fs *)(iobuf + sboff));
	e2fs_cgsave(gd, (struct ext2_gd *)(iobuf + sblock.e2fs_bsize),
	   sizeof(struct ext2_gd) * sblock.e2fs_ncg);
	wtfs(fsbtodb(&sblock, cgbase(&sblock, 0)) + sboff / sectorsize,
	    iobufsize - sboff, iobuf + sboff);

	munmap(iobuf, iobufsize);
}