示例#1
0
static bool
switch_status_remote_packet_cb(struct relay *r, void *ss_)
{
    struct switch_status *ss = ss_;
    struct rconn *rc = r->halves[HALF_REMOTE].rconn;
    struct ofpbuf *msg = r->halves[HALF_REMOTE].rxbuf;
    struct switch_status_category *c;
    struct nicira_header *request;
    struct nicira_header *reply;
    struct status_reply sr;
    struct ofpbuf *b;
    int retval;

    if (msg->size < sizeof(struct nicira_header)) {
        return false;
    }
    request = msg->data;
    if (request->header.type != OFPT_EXPERIMENTER
        || request->vendor != htonl(NX_VENDOR_ID)
        || request->subtype != htonl(NXT_STATUS_REQUEST)) {
        return false;
    }

    sr.request.string = (void *) (request + 1);
    sr.request.length = msg->size - sizeof *request;
    ds_init(&sr.output);
    for (c = ss->categories; c < &ss->categories[ss->n_categories]; c++) {
        if (!memcmp(c->name, sr.request.string,
                    MIN(strlen(c->name), sr.request.length))) {
            sr.category = c;
            c->cb(&sr, c->aux);
        }
    }
    reply = make_openflow_xid(sizeof *reply + sr.output.length,
                              OFPT_EXPERIMENTER, request->header.xid, &b);
    reply->vendor = htonl(NX_VENDOR_ID);
    reply->subtype = htonl(NXT_STATUS_REPLY);
    memcpy(reply + 1, sr.output.string, sr.output.length);
    retval = rconn_send(rc, b, NULL);
    if (retval && retval != EAGAIN) {
        VLOG_WARN(LOG_MODULE, "send failed (%s)", strerror(retval));
    }
    ds_destroy(&sr.output);
    return true;
}
示例#2
0
/* Takes care of necessary 'sw' activity, except for receiving packets (which
 * the caller must do). */
void
lswitch_run(struct lswitch *sw, struct rconn *rconn)
{
    long long int now = time_msec();

    if (sw->ml) {
        mac_learning_run(sw->ml, NULL);
    }

    /* If we're waiting for more replies, keeping waiting for up to 10 s. */
    if (sw->last_reply != LLONG_MIN) {
        if (now - sw->last_reply > 10000) {
            VLOG_ERR_RL(&rl, "%012llx: No more flow stat replies last 10 s",
                        sw->datapath_id);
            sw->last_reply = LLONG_MIN;
            sw->last_query = LLONG_MIN;
            schedule_query(sw, 0);
        } else {
            return;
        }
    }

    /* If we're waiting for any reply at all, keep waiting for up to 10 s. */
    if (sw->last_query != LLONG_MIN) {
        if (now - sw->last_query > 10000) {
            VLOG_ERR_RL(&rl, "%012llx: No flow stat replies in last 10 s",
                        sw->datapath_id);
            sw->last_query = LLONG_MIN;
            schedule_query(sw, 0);
        } else {
            return;
        }
    }

    /* If it's time to send another query, do so. */
    if (sw->next_query != LLONG_MIN && now >= sw->next_query) {
        sw->next_query = LLONG_MIN;
        if (!rconn_is_connected(rconn)) {
            schedule_query(sw, 1000);
        } else {
            struct ofp_stats_request *osr;
            struct ofp_flow_stats_request *ofsr;
            struct ofpbuf *b;
            int error;

            VLOG_DBG("%012llx: Sending flow stats request to implement STP",
                     sw->datapath_id);

            sw->last_query = now;
            sw->query_xid = random_uint32();
            sw->n_flows = 0;
            sw->n_no_recv = 0;
            sw->n_no_send = 0;
            osr = make_openflow_xid(sizeof *osr + sizeof *ofsr,
                                    OFPT_STATS_REQUEST, sw->query_xid, &b);
            osr->type = htons(OFPST_FLOW);
            osr->flags = htons(0);
            ofsr = (struct ofp_flow_stats_request *) osr->body;
            ofsr->match.wildcards = htonl(OFPFW_ALL);
            ofsr->table_id = 0xff;
            ofsr->out_port = htons(OFPP_NONE);

            error = rconn_send(rconn, b, NULL);
            if (error) {
                VLOG_WARN_RL(&rl, "%012llx: sending flow stats request "
                             "failed: %s", sw->datapath_id, strerror(error));
                ofpbuf_delete(b);
                schedule_query(sw, 1000);
            }
        }
    }
}