예제 #1
0
/* Execute a group entry of type INDIRECT. */
static void
execute_indirect(struct group_entry *entry, struct packet *pkt) {

    if (entry->desc->buckets_num > 0) {
        struct ofl_bucket *bucket = entry->desc->buckets[0];
        struct packet *p = packet_clone(pkt);

        if (VLOG_IS_DBG_ENABLED(LOG_MODULE)) {
            char *b = ofl_structs_bucket_to_string(bucket, entry->dp->exp);
            VLOG_DBG_RL(LOG_MODULE, &rl, "Writing bucket: %s.", b);
            free(b);
        }

        action_set_write_actions(p->action_set, bucket->actions_num, bucket->actions);

        entry->stats->byte_count += p->buffer->size;
        entry->stats->packet_count++;
        entry->stats->counters[0]->byte_count += p->buffer->size;
        entry->stats->counters[0]->packet_count++;

        action_set_execute(p->action_set, p);
        packet_destroy(p);
    } else {
        VLOG_DBG_RL(LOG_MODULE, &rl, "No bucket in group.");
    }
}
예제 #2
0
/* Execute a group entry of type FAILFAST. */
static void
execute_ff(struct group_entry *entry, struct packet *pkt) {
    size_t b  = select_from_ff_group(entry);

    if (b != -1) {
        struct ofl_bucket *bucket = entry->desc->buckets[b];
        struct packet *p = packet_clone(pkt);

        if (VLOG_IS_DBG_ENABLED(LOG_MODULE)) {
            char *b = ofl_structs_bucket_to_string(bucket, entry->dp->exp);
            VLOG_DBG_RL(LOG_MODULE, &rl, "Writing bucket: %s.", b);
            free(b);
        }

        action_set_write_actions(p->action_set, bucket->actions_num, bucket->actions);

        entry->stats->byte_count += p->buffer->size;
        entry->stats->packet_count++;
        entry->stats->counters[b]->byte_count += p->buffer->size;
        entry->stats->counters[b]->packet_count++;
        /* Cookie field is set 0xffffffffffffffff
           because we cannot associate to any
           particular flow */
        action_set_execute(p->action_set, p, 0xffffffffffffffff);
        packet_destroy(p);
    } else {
        VLOG_DBG_RL(LOG_MODULE, &rl, "No bucket in group.");
    }
}
예제 #3
0
static uint32_t
nx_entry_ok(const void *p, unsigned int match_len)
{
    unsigned int payload_len;
    ovs_be32 header_be;
    uint32_t header;

    if (match_len < 4) {
        if (match_len) {
            VLOG_DBG_RL(&rl, "nx_match ends with partial (%u-byte) nxm_header",
                        match_len);
        }
        return 0;
    }
    memcpy(&header_be, p, 4);
    header = ntohl(header_be);

    payload_len = NXM_LENGTH(header);
    if (!payload_len) {
        VLOG_DBG_RL(&rl, "nxm_entry %08"PRIx32" has invalid payload "
                    "length 0", header);
        return 0;
    }
    if (match_len < payload_len + 4) {
        VLOG_DBG_RL(&rl, "%"PRIu32"-byte nxm_entry but only "
                    "%u bytes left in nx_match", payload_len + 4, match_len);
        return 0;
    }

    return header;
}
예제 #4
0
void
action_set_write_actions(struct action_set *set,
                         size_t actions_num,
                         struct ofl_action_header **actions) {
    size_t i;
    VLOG_DBG_RL(LOG_MODULE, &rl, "Writing to action set.");
    for (i=0; i<actions_num; i++) {
        action_set_write_action(set, actions[i]);
    }
    VLOG_DBG_RL(LOG_MODULE, &rl, action_set_to_string(set));
}
예제 #5
0
/* Free the specified entry which keeps state for one bundle ID. */
static void
bundle_table_entry_destroy(struct bundle_table_entry *entry) {
    struct bundle_message *bundle_msg, *bundle_msg_next;

    LIST_FOR_EACH_SAFE (bundle_msg, bundle_msg_next, struct bundle_message, node, &entry->bundle_message_list) {
        VLOG_DBG_RL(LOG_MODULE, &rl, "Free message with type %u and length %u\n",
               bundle_msg->message->type,
               ntohs(bundle_msg->message->length));
        list_remove(&bundle_msg->node);
        bundle_message_free(bundle_msg);
    }
    list_remove(&entry->node);
    VLOG_DBG_RL(LOG_MODULE, &rl, "Destroyed bundle table entry for bundle ID %u.\n", entry->bundle_id);
    free(entry);
}
예제 #6
0
/* Helper function for nl_dump_next(). */
static int
nl_dump_recv(struct nl_dump *dump)
{
    struct nlmsghdr *nlmsghdr;
    int retval;

    retval = nl_sock_recv__(dump->sock, &dump->buffer, true);
    if (retval) {
        return retval == EINTR ? EAGAIN : retval;
    }

    nlmsghdr = nl_msg_nlmsghdr(&dump->buffer);
    if (dump->seq != nlmsghdr->nlmsg_seq) {
        VLOG_DBG_RL(&rl, "ignoring seq %#"PRIx32" != expected %#"PRIx32,
                    nlmsghdr->nlmsg_seq, dump->seq);
        return EAGAIN;
    }

    if (nl_msg_nlmsgerr(&dump->buffer, &retval)) {
        VLOG_INFO_RL(&rl, "netlink dump request error (%s)",
                     ovs_strerror(retval));
        return retval && retval != EAGAIN ? retval : EPROTO;
    }

    return 0;
}
예제 #7
0
파일: hmap.c 프로젝트: David-B55/ovs
static void
resize(struct hmap *hmap, size_t new_mask, const char *where)
{
    struct hmap tmp;
    size_t i;

    ovs_assert(is_pow2(new_mask + 1));

    hmap_init(&tmp);
    if (new_mask) {
        tmp.buckets = xmalloc(sizeof *tmp.buckets * (new_mask + 1));
        tmp.mask = new_mask;
        for (i = 0; i <= tmp.mask; i++) {
            tmp.buckets[i] = NULL;
        }
    }
    for (i = 0; i <= hmap->mask; i++) {
        struct hmap_node *node, *next;
        int count = 0;
        for (node = hmap->buckets[i]; node; node = next) {
            next = node->next;
            hmap_insert_fast(&tmp, node, node->hash);
            count++;
        }
        if (count > 5) {
            static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
            COVERAGE_INC(hmap_pathological);
            VLOG_DBG_RL(&rl, "%s: %d nodes in bucket (%"PRIuSIZE" nodes, %"PRIuSIZE" buckets)",
                        where, count, hmap->n, hmap->mask + 1);
        }
    }
    hmap_swap(hmap, &tmp);
    hmap_destroy(&tmp);
}
예제 #8
0
/* Sends the 'request' member of the 'n' transactions in 'transactions' on
 * 'sock', in order, and receives responses to all of them.  Fills in the
 * 'error' member of each transaction with 0 if it was successful, otherwise
 * with a positive errno value.  If 'reply' is nonnull, then it will be filled
 * with the reply if the message receives a detailed reply.  In other cases,
 * i.e. where the request failed or had no reply beyond an indication of
 * success, 'reply' will be cleared if it is nonnull.
 *
 * The caller is responsible for destroying each request and reply, and the
 * transactions array itself.
 *
 * Before sending each message, this function will finalize nlmsg_len in each
 * 'request' to match the ofpbuf's size,  set nlmsg_pid to 'sock''s pid, and
 * initialize nlmsg_seq.
 *
 * Bare Netlink is an unreliable transport protocol.  This function layers
 * reliable delivery and reply semantics on top of bare Netlink.  See
 * nl_sock_transact() for some caveats.
 */
void
nl_sock_transact_multiple(struct nl_sock *sock,
                          struct nl_transaction **transactions, size_t n)
{
    int max_batch_count;
    int error;

    if (!n) {
        return;
    }

    error = nl_sock_cow__(sock);
    if (error) {
        nl_sock_record_errors__(transactions, n, error);
        return;
    }

    /* In theory, every request could have a 64 kB reply.  But the default and
     * maximum socket rcvbuf size with typical Dom0 memory sizes both tend to
     * be a bit below 128 kB, so that would only allow a single message in a
     * "batch".  So we assume that replies average (at most) 4 kB, which allows
     * a good deal of batching.
     *
     * In practice, most of the requests that we batch either have no reply at
     * all or a brief reply. */
    max_batch_count = MAX(sock->rcvbuf / 4096, 1);
    max_batch_count = MIN(max_batch_count, max_iovs);

    while (n > 0) {
        size_t count, bytes;
        size_t done;

        /* Batch up to 'max_batch_count' transactions.  But cap it at about a
         * page of requests total because big skbuffs are expensive to
         * allocate in the kernel.  */
#if defined(PAGESIZE)
        enum { MAX_BATCH_BYTES = MAX(1, PAGESIZE - 512) };
#else
        enum { MAX_BATCH_BYTES = 4096 - 512 };
#endif
        bytes = transactions[0]->request->size;
        for (count = 1; count < n && count < max_batch_count; count++) {
            if (bytes + transactions[count]->request->size > MAX_BATCH_BYTES) {
                break;
            }
            bytes += transactions[count]->request->size;
        }

        error = nl_sock_transact_multiple__(sock, transactions, count, &done);
        transactions += done;
        n -= done;

        if (error == ENOBUFS) {
            VLOG_DBG_RL(&rl, "receive buffer overflow, resending request");
        } else if (error) {
            VLOG_ERR_RL(&rl, "transaction error (%s)", strerror(error));
            nl_sock_record_errors__(transactions, n, error);
        }
    }
}
예제 #9
0
static enum ofperr
oxm_pull_match__(struct ofpbuf *b, bool strict, struct match *match)
{
    struct ofp11_match_header *omh = ofpbuf_data(b);
    uint8_t *p;
    uint16_t match_len;

    if (ofpbuf_size(b) < sizeof *omh) {
        return OFPERR_OFPBMC_BAD_LEN;
    }

    match_len = ntohs(omh->length);
    if (match_len < sizeof *omh) {
        return OFPERR_OFPBMC_BAD_LEN;
    }

    if (omh->type != htons(OFPMT_OXM)) {
        return OFPERR_OFPBMC_BAD_TYPE;
    }

    p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
    if (!p) {
        VLOG_DBG_RL(&rl, "oxm length %u, rounded up to a "
                    "multiple of 8, is longer than space in message (max "
                    "length %"PRIu32")", match_len, ofpbuf_size(b));
        return OFPERR_OFPBMC_BAD_LEN;
    }

    return nx_pull_raw(p + sizeof *omh, match_len - sizeof *omh,
                       strict, match, NULL, NULL);
}
예제 #10
0
void
group_entry_execute(struct group_entry *entry,
                          struct packet *packet) {

    VLOG_DBG_RL(LOG_MODULE, &rl, "Executing group %u.", entry->stats->group_id);

    /* NOTE: Packet is copied for all buckets now (even if there is only one).
     * This allows execution of the original packet onward. It is not clear
     * whether that is allowed or not according to the spec. though. */

    switch (entry->desc->type) {
        case (OFPGT_ALL): {
            execute_all(entry, packet);
            break;
        }
        case (OFPGT_SELECT): {
            execute_select(entry, packet);
            break;
        }
        case (OFPGT_INDIRECT): {
            execute_indirect(entry, packet);
            break;
        }
        case (OFPGT_FF): {
            execute_ff(entry, packet);
            break;
        }
        default: {
            VLOG_WARN_RL(LOG_MODULE, &rl, "Trying to execute unknown group type (%u) in group (%u).", entry->desc->type, entry->stats->group_id);
        }
    }
}
예제 #11
0
/* Executes a group entry of type ALL. */
static void
execute_all(struct group_entry *entry, struct packet *pkt) {
    size_t i;

    /* TODO Zoltan: Currently packets are always cloned. However it should
     * be possible to see if cloning is necessary, or not, based on bucket actions. */
    for (i=0; i<entry->desc->buckets_num; i++) {
        struct ofl_bucket *bucket = entry->desc->buckets[i];
        struct packet *p = packet_clone(pkt);

        if (VLOG_IS_DBG_ENABLED(LOG_MODULE)) {
            char *b = ofl_structs_bucket_to_string(bucket, entry->dp->exp);
            VLOG_DBG_RL(LOG_MODULE, &rl, "Writing bucket: %s.", b);
            free(b);
        }

        action_set_write_actions(p->action_set, bucket->actions_num, bucket->actions);

        entry->stats->byte_count += p->buffer->size;
        entry->stats->packet_count++;
        entry->stats->counters[i]->byte_count += p->buffer->size;
        entry->stats->counters[i]->packet_count++;

        /* Cookie field is set 0xffffffffffffffff
           because we cannot associate to any
           particular flow */
        action_set_execute(p->action_set, p, 0xffffffffffffffff);

        packet_destroy(p);
    }
}
예제 #12
0
파일: mac-learning.c 프로젝트: InCNTRE/OFTT
/* Attempts to make 'ml' learn from the fact that a frame from 'src_mac' was
 * just observed arriving from 'src_port' on the given 'vlan'.
 *
 * Returns nonzero if we actually learned something from this, zero if it just
 * confirms what we already knew.  The nonzero return value is the tag of flows
 * that now need revalidation.
 *
 * The 'vlan' parameter is used to maintain separate per-VLAN learning tables.
 * Specify 0 if this behavior is undesirable.
 *
 * 'lock_type' specifies whether the entry should be locked or existing locks
 * are check. */
tag_type
mac_learning_learn(struct mac_learning *ml,
                   const uint8_t src_mac[ETH_ADDR_LEN], uint16_t vlan,
                   uint16_t src_port, enum grat_arp_lock_type lock_type)
{
    struct mac_entry *e;
    struct list *bucket;

    if (!is_learning_vlan(ml, vlan)) {
        return 0;
    }

    if (eth_addr_is_multicast(src_mac)) {
        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 30);
        VLOG_DBG_RL(&rl, "multicast packet source "ETH_ADDR_FMT,
                    ETH_ADDR_ARGS(src_mac));
        return 0;
    }

    bucket = mac_table_bucket(ml, src_mac, vlan);
    e = search_bucket(bucket, src_mac, vlan);
    if (!e) {
        if (!list_is_empty(&ml->free)) {
            e = mac_entry_from_lru_node(ml->free.next);
        } else {
            e = mac_entry_from_lru_node(ml->lrus.next);
            list_remove(&e->hash_node);
        }
        memcpy(e->mac, src_mac, ETH_ADDR_LEN);
        list_push_front(bucket, &e->hash_node);
        e->port = -1;
        e->vlan = vlan;
        e->tag = make_unknown_mac_tag(ml, src_mac, vlan);
        e->grat_arp_lock = TIME_MIN;
    }

    if (lock_type != GRAT_ARP_LOCK_CHECK || time_now() >= e->grat_arp_lock) {
        /* Make the entry most-recently-used. */
        list_remove(&e->lru_node);
        list_push_back(&ml->lrus, &e->lru_node);
        e->expires = time_now() + MAC_ENTRY_IDLE_TIME;
        if (lock_type == GRAT_ARP_LOCK_SET) {
            e->grat_arp_lock = time_now() + MAC_GRAT_ARP_LOCK_TIME;
        }

        /* Did we learn something? */
        if (e->port != src_port) {
            tag_type old_tag = e->tag;
            e->port = src_port;
            e->tag = tag_create_random();
            COVERAGE_INC(mac_learning_learned);
            return old_tag;
        }
    }

    return 0;
}
예제 #13
0
/* Create the table entry for the specified bundle ID. */
static struct bundle_table_entry *
bundle_table_entry_create(uint32_t bundle_id, uint16_t flags) {
    struct bundle_table_entry *entry;

    entry = xmalloc(sizeof(struct bundle_table_entry));
    list_init(&entry->node);
    list_init(&entry->bundle_message_list);
    entry->bundle_id = bundle_id;
    entry->flags = flags;
    entry->closed = false;
    VLOG_DBG_RL(LOG_MODULE, &rl, "Created bundle table entry for bundle ID %u.\n", bundle_id);

    return entry;
}
예제 #14
0
static enum ofperr
nx_pull_match__(struct ofpbuf *b, unsigned int match_len, bool strict,
                struct match *match,
                ovs_be64 *cookie, ovs_be64 *cookie_mask)
{
    uint8_t *p = NULL;

    if (match_len) {
        p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
        if (!p) {
            VLOG_DBG_RL(&rl, "nx_match length %u, rounded up to a "
                        "multiple of 8, is longer than space in message (max "
                        "length %"PRIu32")", match_len, ofpbuf_size(b));
            return OFPERR_OFPBMC_BAD_LEN;
        }
    }

    return nx_pull_raw(p, match_len, strict, match, cookie, cookie_mask);
}
예제 #15
0
/* Create a structure to retain an appended message. */
static struct bundle_message *
bundle_message_create(struct ofl_msg_bundle_add_msg *add_msg) {
    struct bundle_message *msg;
    size_t message_length;

    msg = xmalloc(sizeof(struct bundle_message));
    list_init(&msg->node);

    message_length = ntohs(add_msg->message->length);
    msg->message = xmalloc(message_length);
    memcpy(msg->message, add_msg->message, message_length);

    VLOG_DBG_RL(LOG_MODULE, &rl,
           "Created %u byte bundle message entry for bundle ID %u.\n",
           message_length,
           add_msg->bundle_id);

    return msg;
}
예제 #16
0
/* Processes 'msg', which should be an OpenFlow received on 'rconn', according
 * to the learning switch state in 'sw'.  The most likely result of processing
 * is that flow-setup and packet-out OpenFlow messages will be sent out on
 * 'rconn'.  */
static void
lswitch_process_packet(struct lswitch *sw, const struct ofpbuf *msg)
{
    enum ofptype type;
    struct ofpbuf b;

    b = *msg;
    if (ofptype_pull(&type, &b)) {
        return;
    }

    if (sw->state == S_FEATURES_REPLY
        && type != OFPTYPE_ECHO_REQUEST
        && type != OFPTYPE_FEATURES_REPLY) {
        return;
    }

    if (type == OFPTYPE_ECHO_REQUEST) {
        process_echo_request(sw, msg->data);
    } else if (type == OFPTYPE_FEATURES_REPLY) {
        if (sw->state == S_FEATURES_REPLY) {
            if (!process_switch_features(sw, msg->data)) {
                sw->state = S_SWITCHING;
            } else {
                rconn_disconnect(sw->rconn);
            }
        }
    } else if (type == OFPTYPE_PACKET_IN) {
        process_packet_in(sw, msg->data);
    } else if (type == OFPTYPE_FLOW_REMOVED) {
        /* Nothing to do. */
    } else if (VLOG_IS_DBG_ENABLED()) {
        char *s = ofp_to_string(msg->data, msg->size, 2);
        VLOG_DBG_RL(&rl, "%016llx: OpenFlow packet ignored: %s",
                    sw->datapath_id, s);
        free(s);
    }
}
예제 #17
0
static enum ofperr
nx_pull_raw(const uint8_t *p, unsigned int match_len, bool strict,
            struct match *match, ovs_be64 *cookie, ovs_be64 *cookie_mask)
{
    uint32_t header;

    ovs_assert((cookie != NULL) == (cookie_mask != NULL));

    match_init_catchall(match);
    if (cookie) {
        *cookie = *cookie_mask = htonll(0);
    }
    if (!match_len) {
        return 0;
    }

    for (;
         (header = nx_entry_ok(p, match_len)) != 0;
         p += 4 + NXM_LENGTH(header), match_len -= 4 + NXM_LENGTH(header)) {
        const struct mf_field *mf;
        enum ofperr error;

        mf = mf_from_nxm_header(header);
        if (!mf) {
            if (strict) {
                error = OFPERR_OFPBMC_BAD_FIELD;
            } else {
                continue;
            }
        } else if (!mf_are_prereqs_ok(mf, &match->flow)) {
            error = OFPERR_OFPBMC_BAD_PREREQ;
        } else if (!mf_is_all_wild(mf, &match->wc)) {
            error = OFPERR_OFPBMC_DUP_FIELD;
        } else {
            unsigned int width = mf->n_bytes;
            union mf_value value;

            memcpy(&value, p + 4, width);
            if (!mf_is_value_valid(mf, &value)) {
                error = OFPERR_OFPBMC_BAD_VALUE;
            } else if (!NXM_HASMASK(header)) {
                error = 0;
                mf_set_value(mf, &value, match);
            } else {
                union mf_value mask;

                memcpy(&mask, p + 4 + width, width);
                if (!mf_is_mask_valid(mf, &mask)) {
                    error = OFPERR_OFPBMC_BAD_MASK;
                } else {
                    error = check_mask_consistency(p, mf);
                    if (!error) {
                        mf_set(mf, &value, &mask, match);
                    }
                }
            }
        }

        /* Check if the match is for a cookie rather than a classifier rule. */
        if ((header == NXM_NX_COOKIE || header == NXM_NX_COOKIE_W) && cookie) {
            if (*cookie_mask) {
                error = OFPERR_OFPBMC_DUP_FIELD;
            } else {
                unsigned int width = sizeof *cookie;

                memcpy(cookie, p + 4, width);
                if (NXM_HASMASK(header)) {
                    memcpy(cookie_mask, p + 4 + width, width);
                } else {
                    *cookie_mask = OVS_BE64_MAX;
                }
                error = 0;
            }
        }

        if (error) {
            VLOG_DBG_RL(&rl, "bad nxm_entry %#08"PRIx32" (vendor=%"PRIu32", "
                        "field=%"PRIu32", hasmask=%"PRIu32", len=%"PRIu32"), "
                        "(%s)", header,
                        NXM_VENDOR(header), NXM_FIELD(header),
                        NXM_HASMASK(header), NXM_LENGTH(header),
                        ofperr_to_string(error));
            return error;
        }
    }

    return match_len ? OFPERR_OFPBMC_BAD_LEN : 0;
}
예제 #18
0
/* Tries to receive an openflow message from datapath 'dp_idx' on 'sock'.  If
 * successful, stores the received message into '*msgp' and returns 0.  The
 * caller is responsible for destroying the message with ofpbuf_delete().  On
 * failure, returns a positive errno value and stores a null pointer into
 * '*msgp'.
 *
 * Only Netlink messages with embedded OpenFlow messages are accepted.  Other
 * Netlink messages provoke errors.
 *
 * If 'wait' is true, dpif_recv_openflow waits for a message to be ready;
 * otherwise, returns EAGAIN if the 'sock' receive buffer is empty. */
int
dpif_recv_openflow(struct dpif *dp, int dp_idx, struct ofpbuf **bufferp,
                   bool wait)
{
    struct nlattr *attrs[ARRAY_SIZE(openflow_policy)];
    struct ofpbuf *buffer;
    struct ofp_header *oh;
    uint16_t ofp_len;

    buffer = *bufferp = NULL;
    do {
        int retval;

        do {
            ofpbuf_delete(buffer);
            retval = nl_sock_recv(dp->sock, &buffer, wait);
        } while (retval == ENOBUFS
                 || (!retval
                     && (nl_msg_nlmsghdr(buffer)->nlmsg_type == NLMSG_DONE
                         || nl_msg_nlmsgerr(buffer, NULL))));
        if (retval) {
            if (retval != EAGAIN) {
                VLOG_WARN_RL(&rl, "dpif_recv_openflow: %s", strerror(retval));
            }
            return retval;
        }

        if (nl_msg_genlmsghdr(buffer) == NULL) {
            VLOG_DBG_RL(&rl, "received packet too short for Generic Netlink");
            goto error;
        }
        if (nl_msg_nlmsghdr(buffer)->nlmsg_type != openflow_family) {
            VLOG_DBG_RL(&rl,
                        "received type (%"PRIu16") != openflow family (%d)",
                        nl_msg_nlmsghdr(buffer)->nlmsg_type, openflow_family);
            goto error;
        }

        if (!nl_policy_parse(buffer, NLMSG_HDRLEN + GENL_HDRLEN,
                             openflow_policy, attrs,
                             ARRAY_SIZE(openflow_policy))) {
            goto error;
        }
    } while (nl_attr_get_u32(attrs[DP_GENL_A_DP_IDX]) != dp_idx);

    oh = buffer->data = (void *) nl_attr_get(attrs[DP_GENL_A_OPENFLOW]);
    buffer->size = nl_attr_get_size(attrs[DP_GENL_A_OPENFLOW]);
    ofp_len = ntohs(oh->length);
    if (ofp_len != buffer->size) {
        VLOG_WARN_RL(&rl,
                     "ofp_header.length %"PRIu16" != attribute length %zu\n",
                     ofp_len, buffer->size);
        buffer->size = MIN(ofp_len, buffer->size);
    }
    *bufferp = buffer;
    return 0;

error:
    ofpbuf_delete(buffer);
    return EPROTO;
}
예제 #19
0
/* Commit operation. */
static ofl_err
bundle_commit(struct datapath *dp,
              struct bundle_table *table,
              uint32_t bundle_id,
              uint16_t flags,
              const struct sender *sender) {
    struct bundle_table_entry *entry;
    struct bundle_message *bundle_msg;
    struct ofl_msg_header *msg;
    uint32_t xid;
    ofl_err error;
    ofl_err last_error = ofl_error(OFPET_BUNDLE_FAILED, OFPBFC_BAD_ID);

    /* Find and process commit operation for bundle ID */
    entry = bundle_table_entry_find(table, bundle_id);
    if (entry != NULL) {
        /* Ensure flags are consistent with flags specified previously */
        if (entry->flags != flags) {
            last_error = ofl_error(OFPET_BUNDLE_FAILED, OFPBFC_BAD_FLAGS);
        } else {
            /* Save state in case failure occurs */
            last_error = 0;

            dp_save_state(dp);

            /* Commit all messages in bundle, stopping at first error */
            LIST_FOR_EACH (bundle_msg, struct bundle_message, node, &entry->bundle_message_list) {
                VLOG_DBG_RL(LOG_MODULE, &rl, "Commit of message with type %u and length %u\n",
                       bundle_msg->message->type,
                       ntohs(bundle_msg->message->length));

                error = ofl_msg_unpack((uint8_t *)bundle_msg->message,
                                       ntohs(bundle_msg->message->length),
                                       &msg, &xid, dp->exp);

                if (!error) {
                    /* This prototype only properly supports bundling of
                     * messages that do not generate replies (other than
                     * error replies).  TODO: keep replies in a holding
                     * area and only release them to the controller when
                     * the commit succeeds. */
                    error = handle_control_msg(dp, msg, sender);

                    if (error) {
                        ofl_msg_free(msg, dp->exp);
                    }
                }

                if (error) {
                    last_error = error;
                    break;
                }
            }

            /* Restore state if failures occurred */
            if (last_error) {
                dp_restore_state(dp);
            } else {
                /* TODO free memory used to save state without restoring
                 * (not required currently as variables used to save/restore
                 * state are re-used) */
            }

	    /* We need to generate the error ourselves. The spec say that
	     * the error need to refer to the offending message in the budle.
	     * If we just return the error code, the error message would refer
	     * to the commit message. */
            if (last_error) {
                struct sender orig_sender = {.remote = sender->remote,
					     .conn_id = sender->conn_id,
					     .xid = xid};

		struct ofl_msg_error orig_err =
                            {{.type = OFPT_ERROR},
                             .type = ofl_error_type(last_error),
                             .code = ofl_error_code(last_error),
                             .data_length = ntohs(bundle_msg->message->length),
                             .data        = (uint8_t *)bundle_msg->message};
		dp_send_message(dp, (struct ofl_msg_header *)&orig_err, &orig_sender);
		/* Trigger second error message. */
		last_error = ofl_error(OFPET_BUNDLE_FAILED, OFPBFC_MSG_FAILED);
	    }
        }

        /* Whether or not commit succeeded: free entry for bundle ID */
        bundle_table_entry_destroy(entry);
    }

    return last_error;
}
예제 #20
0
void
pipeline_process_packet(struct pipeline *pl, struct packet *pkt) {
    struct flow_table *table, *next_table;

    if (VLOG_IS_DBG_ENABLED(LOG_MODULE)) {
        char *pkt_str = packet_to_string(pkt);
        VLOG_DBG_RL(LOG_MODULE, &rl, "processing packet: %s", pkt_str);
        free(pkt_str);
    }
 
    if (!packet_handle_std_is_ttl_valid(pkt->handle_std)) {
        if ((pl->dp->config.flags & OFPC_INVALID_TTL_TO_CONTROLLER) != 0) {
            VLOG_DBG_RL(LOG_MODULE, &rl, "Packet has invalid TTL, sending to controller.");
            
            send_packet_to_controller(pl, pkt, 0/*table_id*/, OFPR_INVALID_TTL);
        } else {
            VLOG_DBG_RL(LOG_MODULE, &rl, "Packet has invalid TTL, dropping.");
        }
        packet_destroy(pkt);
        return;
    }

    next_table = pl->tables[0];
    while (next_table != NULL) {
        struct flow_entry *entry;

        VLOG_DBG_RL(LOG_MODULE, &rl, "trying table %u.", next_table->stats->table_id);

        pkt->table_id = next_table->stats->table_id;
        table         = next_table;
        next_table    = NULL;
         
        entry = flow_table_lookup(table, pkt);
        if (entry != NULL) {
	        if (VLOG_IS_DBG_ENABLED(LOG_MODULE)) {
                char *m = ofl_structs_flow_stats_to_string(entry->stats, pkt->dp->exp);
                VLOG_DBG_RL(LOG_MODULE, &rl, "found matching entry: %s.", m);
                free(m);
            }

            execute_entry(pl, entry, &next_table, &pkt);
            /* Packet could be destroyed by a meter instruction */
            if (!pkt)
                return;
            
            if (next_table == NULL) {
               /* Cookie field is set 0xffffffffffffffff
                because we cannot associate it to any
                particular flow */
                action_set_execute(pkt->action_set, pkt, 0xffffffffffffffff);
                packet_destroy(pkt);
                return;
            }

        } else {
			/* OpenFlow 1.3 default behavior on a table miss */
			VLOG_DBG_RL(LOG_MODULE, &rl, "No matching entry found. Dropping packet.");
			packet_destroy(pkt);
			return;
        }
    }
    VLOG_WARN_RL(LOG_MODULE, &rl, "Reached outside of pipeline processing cycle.");
}
예제 #21
0
static int
nl_sock_transact_multiple__(struct nl_sock *sock,
                            struct nl_transaction **transactions, size_t n,
                            size_t *done)
{
    uint64_t tmp_reply_stub[1024 / 8];
    struct nl_transaction tmp_txn;
    struct ofpbuf tmp_reply;

    uint32_t base_seq;
    struct iovec iovs[MAX_IOVS];
    struct msghdr msg;
    int error;
    int i;

    base_seq = nl_sock_allocate_seq(sock, n);
    *done = 0;
    for (i = 0; i < n; i++) {
        struct nl_transaction *txn = transactions[i];
        struct nlmsghdr *nlmsg = nl_msg_nlmsghdr(txn->request);

        nlmsg->nlmsg_len = txn->request->size;
        nlmsg->nlmsg_seq = base_seq + i;
        nlmsg->nlmsg_pid = sock->pid;

        iovs[i].iov_base = txn->request->data;
        iovs[i].iov_len = txn->request->size;
    }

    memset(&msg, 0, sizeof msg);
    msg.msg_iov = iovs;
    msg.msg_iovlen = n;
    do {
        error = sendmsg(sock->fd, &msg, 0) < 0 ? errno : 0;
    } while (error == EINTR);

    for (i = 0; i < n; i++) {
        struct nl_transaction *txn = transactions[i];

        log_nlmsg(__func__, error, txn->request->data, txn->request->size,
                  sock->protocol);
    }
    if (!error) {
        COVERAGE_ADD(netlink_sent, n);
    }

    if (error) {
        return error;
    }

    ofpbuf_use_stub(&tmp_reply, tmp_reply_stub, sizeof tmp_reply_stub);
    tmp_txn.request = NULL;
    tmp_txn.reply = &tmp_reply;
    tmp_txn.error = 0;
    while (n > 0) {
        struct nl_transaction *buf_txn, *txn;
        uint32_t seq;

        /* Find a transaction whose buffer we can use for receiving a reply.
         * If no such transaction is left, use tmp_txn. */
        buf_txn = &tmp_txn;
        for (i = 0; i < n; i++) {
            if (transactions[i]->reply) {
                buf_txn = transactions[i];
                break;
            }
        }

        /* Receive a reply. */
        error = nl_sock_recv__(sock, buf_txn->reply, false);
        if (error) {
            if (error == EAGAIN) {
                nl_sock_record_errors__(transactions, n, 0);
                *done += n;
                error = 0;
            }
            break;
        }

        /* Match the reply up with a transaction. */
        seq = nl_msg_nlmsghdr(buf_txn->reply)->nlmsg_seq;
        if (seq < base_seq || seq >= base_seq + n) {
            VLOG_DBG_RL(&rl, "ignoring unexpected seq %#"PRIx32, seq);
            continue;
        }
        i = seq - base_seq;
        txn = transactions[i];

        /* Fill in the results for 'txn'. */
        if (nl_msg_nlmsgerr(buf_txn->reply, &txn->error)) {
            if (txn->reply) {
                ofpbuf_clear(txn->reply);
            }
            if (txn->error) {
                VLOG_DBG_RL(&rl, "received NAK error=%d (%s)",
                            error, ovs_strerror(txn->error));
            }
        } else {
            txn->error = 0;
            if (txn->reply && txn != buf_txn) {
                /* Swap buffers. */
                struct ofpbuf *reply = buf_txn->reply;
                buf_txn->reply = txn->reply;
                txn->reply = reply;
            }
        }

        /* Fill in the results for transactions before 'txn'.  (We have to do
         * this after the results for 'txn' itself because of the buffer swap
         * above.) */
        nl_sock_record_errors__(transactions, i, 0);

        /* Advance. */
        *done += i + 1;
        transactions += i + 1;
        n -= i + 1;
        base_seq += i + 1;
    }
    ofpbuf_uninit(&tmp_reply);

    return error;
}
예제 #22
0
void
meter_entry_apply(struct meter_entry *entry, struct packet **pkt){
	
	size_t b;
	bool drop = false;

    entry->stats->packet_in_count++;
    entry->stats->byte_in_count += (*pkt)->buffer->size;

	b = choose_band(entry, *pkt);
	if(b != -1){
        struct ofl_meter_band_header *band_header = (struct ofl_meter_band_header*)  entry->config->bands[b];
        switch(band_header->type){
            case OFPMBT_DROP:{
                drop = true;
                break;
            }
            case OFPMBT_DSCP_REMARK:{
            	packet_handle_std_validate((*pkt)->handle_std);
    		if ((*pkt)->handle_std->valid)
    		{
                struct ofl_meter_band_dscp_remark *band_header = (struct ofl_meter_band_dscp_remark *)  entry->config->bands[b];
                /* Nothing prevent this band to be used for non-IP packets, so filter them out. Jean II */
                if ((*pkt)->handle_std->proto->ipv4 != NULL) {
                    // Fetch dscp in ipv4 header
                    struct ip_header *ipv4 = (*pkt)->handle_std->proto->ipv4;
                    uint8_t old_drop = ipv4->ip_tos & 0x1C;
                    /* The spec says that we need to increase
                                       the drop precedence of the packet.
                                       We need a valid DSCP out of the process,
                                       so we can only modify dscp if the
                                       drop precedence is low (tos 0x***010**)
                                       or medium (tos 0x***100**). Jean II */
                    if (((old_drop == 0x8) && (band_header->prec_level <= 2)) || ((old_drop == 0x10) && (band_header->prec_level <= 1))) {
                        uint8_t new_drop = old_drop + (band_header->prec_level << 3);
                        uint8_t new_tos = new_drop | (ipv4->ip_tos & 0xE3);
                        uint16_t old_val = htons((ipv4->ip_ihl_ver << 8) + ipv4->ip_tos);
                        uint16_t new_val = htons((ipv4->ip_ihl_ver << 8) + new_tos);
                        ipv4->ip_csum = recalc_csum16(ipv4->ip_csum, old_val, new_val);
                        ipv4->ip_tos = new_tos;
                    }
                }
                else if ((*pkt)->handle_std->proto->ipv6 != NULL){
                    struct ipv6_header *ipv6 = (*pkt)->handle_std->proto->ipv6;
                    uint32_t ipv6_ver_tc_fl = ntohl(ipv6->ipv6_ver_tc_fl);
                    uint32_t old_drop = ipv6_ver_tc_fl & 0x1C00000;
                    if (((old_drop == 0x800000) && (band_header->prec_level <= 2)) || ((old_drop == 0x1000000) && (band_header->prec_level <= 1))){
                        uint32_t prec_level = band_header->prec_level << 23;
                        uint32_t new_drop = old_drop + prec_level;
                        ipv6->ipv6_ver_tc_fl = htonl(new_drop | (ipv6_ver_tc_fl & 0xFE3FFFFF));
                    }
                }
                (*pkt)->handle_std->valid = false;
		}
                break;
            }
            case OFPMBT_EXPERIMENTER:{
                break;
            }
        }
        entry->stats->band_stats[b]->byte_band_count += (*pkt)->buffer->size;
        entry->stats->band_stats[b]->packet_band_count++;
        if (drop){
            VLOG_DBG_RL(LOG_MODULE, &rl, "Dropping packet: rate %d", band_header->rate);
            packet_destroy(*pkt);
            *pkt = NULL;
        }
    }

}