Example #1
0
/* multipath_check(). */
enum ofperr
multipath_check(const struct nx_action_multipath *mp, const struct flow *flow)
{
    uint32_t n_links = ntohs(mp->max_link) + 1;
    size_t min_n_bits = log_2_ceil(n_links);
    struct mf_subfield dst;
    enum ofperr error;

    nxm_decode(&dst, mp->dst, mp->ofs_nbits);
    error = mf_check_dst(&dst, flow);
    if (error) {
        return error;
    }

    if (!flow_hash_fields_valid(ntohs(mp->fields))) {
        VLOG_WARN_RL(&rl, "unsupported fields %"PRIu16, ntohs(mp->fields));
    } else if (mp->algorithm != htons(NX_MP_ALG_MODULO_N)
               && mp->algorithm != htons(NX_MP_ALG_HASH_THRESHOLD)
               && mp->algorithm != htons(NX_MP_ALG_HRW)
               && mp->algorithm != htons(NX_MP_ALG_ITER_HASH)) {
        VLOG_WARN_RL(&rl, "unsupported algorithm %"PRIu16,
                     ntohs(mp->algorithm));
    } else if (dst.n_bits < min_n_bits) {
        VLOG_WARN_RL(&rl, "multipath action requires at least %zu bits for "
                     "%"PRIu32" links", min_n_bits, n_links);
    } else {
        return 0;
    }

    return OFPERR_OFPBAC_BAD_ARGUMENT;
}
Example #2
0
/* Converts 'nam' into 'mp'.  Returns 0 if successful, otherwise an
 * OFPERR_*. */
enum ofperr
multipath_from_openflow(const struct nx_action_multipath *nam,
                        struct ofpact_multipath *mp)
{
    uint32_t n_links = ntohs(nam->max_link) + 1;
    size_t min_n_bits = log_2_ceil(n_links);

    ofpact_init_MULTIPATH(mp);
    mp->fields = ntohs(nam->fields);
    mp->basis = ntohs(nam->basis);
    mp->algorithm = ntohs(nam->algorithm);
    mp->max_link = ntohs(nam->max_link);
    mp->arg = ntohl(nam->arg);
    mp->dst.field = mf_from_nxm_header(ntohl(nam->dst));
    mp->dst.ofs = nxm_decode_ofs(nam->ofs_nbits);
    mp->dst.n_bits = nxm_decode_n_bits(nam->ofs_nbits);

    if (!flow_hash_fields_valid(mp->fields)) {
        VLOG_WARN_RL(&rl, "unsupported fields %d", (int) mp->fields);
        return OFPERR_OFPBAC_BAD_ARGUMENT;
    } else if (mp->algorithm != NX_MP_ALG_MODULO_N
               && mp->algorithm != NX_MP_ALG_HASH_THRESHOLD
               && mp->algorithm != NX_MP_ALG_HRW
               && mp->algorithm != NX_MP_ALG_ITER_HASH) {
        VLOG_WARN_RL(&rl, "unsupported algorithm %d", (int) mp->algorithm);
        return OFPERR_OFPBAC_BAD_ARGUMENT;
    } else if (mp->dst.n_bits < min_n_bits) {
        VLOG_WARN_RL(&rl, "multipath action requires at least %zu bits for "
                     "%"PRIu32" links", min_n_bits, n_links);
        return OFPERR_OFPBAC_BAD_ARGUMENT;
    }

    return multipath_check(mp, NULL);
}
Example #3
0
//
// Allocate a chunk of blocks that is at least min and at most max
// blocks in size. This API is used by the nursery allocator that
// wants contiguous memory preferably, but doesn't require it.  When
// memory is fragmented we might have lots of chunks that are
// less than a full megablock, so allowing the nursery allocator to
// use these reduces fragmentation considerably.  e.g. on a GHC build
// with +RTS -H, I saw fragmentation go from 17MB down to 3MB on a
// single compile.
//
// Further to this: in #7257 there is a program that creates serious
// fragmentation such that the heap is full of tiny <4 block chains.
// The nursery allocator therefore has to use single blocks to avoid
// fragmentation, but we make sure that we allocate large blocks
// preferably if there are any.
//
bdescr *
allocLargeChunk (W_ min, W_ max)
{
    bdescr *bd;
    StgWord ln, lnmax;

    if (min >= BLOCKS_PER_MBLOCK) {
        return allocGroup(max);
    }

    ln = log_2_ceil(min);
    lnmax = log_2_ceil(max); // tops out at MAX_FREE_LIST

    while (ln < lnmax && free_list[ln] == NULL) {
        ln++;
    }
    if (ln == lnmax) {
        return allocGroup(max);
    }
    bd = free_list[ln];

    if (bd->blocks <= max)              // exactly the right size!
    {
        dbl_link_remove(bd, &free_list[ln]);
        initGroup(bd);
    }
    else   // block too big...
    {                              
        bd = split_free_block(bd, max, ln);
        ASSERT(bd->blocks == max);
        initGroup(bd);
    }

    n_alloc_blocks += bd->blocks;
    if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks;

    IF_DEBUG(sanity, memset(bd->start, 0xaa, bd->blocks * BLOCK_SIZE));
    IF_DEBUG(sanity, checkFreeListSanity());
    return bd;
}
Example #4
0
bdescr *
allocGroup (W_ n)
{
    bdescr *bd, *rem;
    StgWord ln;

    if (n == 0) barf("allocGroup: requested zero blocks");
    
    if (n >= BLOCKS_PER_MBLOCK)
    {
        StgWord mblocks;

        mblocks = BLOCKS_TO_MBLOCKS(n);

        // n_alloc_blocks doesn't count the extra blocks we get in a
        // megablock group.
        n_alloc_blocks += mblocks * BLOCKS_PER_MBLOCK;
        if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks;

        bd = alloc_mega_group(mblocks);
        // only the bdescrs of the first MB are required to be initialised
        initGroup(bd);
        goto finish;
    }
    
    n_alloc_blocks += n;
    if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks;

    ln = log_2_ceil(n);

    while (ln < MAX_FREE_LIST && free_list[ln] == NULL) {
        ln++;
    }

    if (ln == MAX_FREE_LIST) {
#if 0  /* useful for debugging fragmentation */
        if ((W_)mblocks_allocated * BLOCKS_PER_MBLOCK * BLOCK_SIZE_W
             - (W_)((n_alloc_blocks - n) * BLOCK_SIZE_W) > (2*1024*1024)/sizeof(W_)) {
            debugBelch("Fragmentation, wanted %d blocks, %ld MB free\n", n, ((mblocks_allocated * BLOCKS_PER_MBLOCK) - n_alloc_blocks) / BLOCKS_PER_MBLOCK);
            RtsFlags.DebugFlags.block_alloc = 1;
            checkFreeListSanity();
        }
#endif

        bd = alloc_mega_group(1);
        bd->blocks = n;
        initGroup(bd);		         // we know the group will fit
        rem = bd + n;
        rem->blocks = BLOCKS_PER_MBLOCK-n;
        initGroup(rem); // init the slop
        n_alloc_blocks += rem->blocks;
        freeGroup(rem);      	         // add the slop on to the free list
        goto finish;
    }

    bd = free_list[ln];

    if (bd->blocks == n)	        // exactly the right size!
    {
        dbl_link_remove(bd, &free_list[ln]);
        initGroup(bd);
    }
    else if (bd->blocks >  n)            // block too big...
    {                              
        bd = split_free_block(bd, n, ln);
        ASSERT(bd->blocks == n);
        initGroup(bd);
    }
    else
    {
        barf("allocGroup: free list corrupted");
    }

finish:
    IF_DEBUG(sanity, memset(bd->start, 0xaa, bd->blocks * BLOCK_SIZE));
    IF_DEBUG(sanity, checkFreeListSanity());
    return bd;
}