static void test_atomic_flag(void) { atomic_flag flag = ATOMIC_FLAG_INIT; ovs_assert(atomic_flag_test_and_set(&flag) == false); ovs_assert(atomic_flag_test_and_set(&flag) == true); atomic_flag_clear(&flag); ovs_assert(atomic_flag_test_and_set(&flag) == false); }
/* Adds a new cell in the current row of 'table', which must have been added * with table_add_row(). Cells are filled in the same order that the columns * were added with table_add_column(). * * The caller is responsible for filling in the returned cell, in one of two * fashions: * * - If the cell should contain an ovsdb_datum, formatted according to the * table style, then fill in the 'json' member with the JSON representation * of the datum and 'type' with its type. * * - If the cell should contain a fixed text string, then the caller should * assign that string to the 'text' member. This is undesirable if the * cell actually contains OVSDB data because 'text' cannot be formatted * according to the table style; it is always output verbatim. */ struct cell * table_add_cell(struct table *table) { size_t x, y; ovs_assert(table->n_rows > 0); ovs_assert(table->current_column < table->n_columns); x = table->current_column++; y = table->n_rows - 1; return table_cell__(table, y, x); }
void replication_init(const char *sync_from_, const char *exclude_tables, const struct uuid *server) { free(sync_from); sync_from = xstrdup(sync_from_); char *err = set_blacklist_tables(exclude_tables, false); /* Caller should have verified that the 'exclude_tables' is * parseable. An error here is unexpected. */ ovs_assert(!err); replication_dbs_destroy(); shash_clear(&local_dbs); if (session) { jsonrpc_session_close(session); } session = jsonrpc_session_open(sync_from, true); session_seqno = UINT_MAX; /* Keep a copy of local server uuid. */ server_uuid = *server; state = RPL_S_INIT; }
static void resize(struct hmap *hmap, size_t new_mask, const char *where) { struct hmap tmp; size_t i; ovs_assert(is_pow2(new_mask + 1)); hmap_init(&tmp); if (new_mask) { tmp.buckets = xmalloc(sizeof *tmp.buckets * (new_mask + 1)); tmp.mask = new_mask; for (i = 0; i <= tmp.mask; i++) { tmp.buckets[i] = NULL; } } for (i = 0; i <= hmap->mask; i++) { struct hmap_node *node, *next; int count = 0; for (node = hmap->buckets[i]; node; node = next) { next = node->next; hmap_insert_fast(&tmp, node, node->hash); count++; } if (count > 5) { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10); COVERAGE_INC(hmap_pathological); VLOG_DBG_RL(&rl, "%s: %d nodes in bucket (%"PRIuSIZE" nodes, %"PRIuSIZE" buckets)", where, count, hmap->n, hmap->mask + 1); } } hmap_swap(hmap, &tmp); hmap_destroy(&tmp); }
static struct netdev_bsd * netdev_bsd_cast(const struct netdev *netdev) { ovs_assert(is_netdev_bsd_class(netdev_dev_get_class( netdev_get_dev(netdev)))); return CONTAINER_OF(netdev, struct netdev_bsd, netdev); }
static void emit_ct(struct action_context *ctx, bool recirc_next, bool commit) { struct ofpact_conntrack *ct = ofpact_put_CT(ctx->ofpacts); ct->flags |= commit ? NX_CT_F_COMMIT : 0; /* If "recirc" is set, we automatically go to the next table. */ if (recirc_next) { if (ctx->cur_ltable < ctx->n_tables) { ct->recirc_table = ctx->first_ptable + ctx->cur_ltable + 1; } else { action_error(ctx, "\"ct_next\" action not allowed in last table."); return; } } else { ct->recirc_table = NX_CT_RECIRC_NONE; } ct->zone_src.field = mf_from_id(MFF_LOG_CT_ZONE); ct->zone_src.ofs = 0; ct->zone_src.n_bits = 16; /* We do not support ALGs yet. */ ct->alg = 0; /* CT only works with IP, so set up a prerequisite. */ struct expr *expr; char *error; expr = expr_parse_string("ip", ctx->symtab, &error); ovs_assert(!error); ctx->prereqs = expr_combine(EXPR_T_AND, ctx->prereqs, expr); }
void dpdk_set_lcore_id(unsigned cpu) { /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */ ovs_assert(cpu != NON_PMD_CORE_ID); RTE_PER_LCORE(_lcore_id) = cpu; }
/* Decodes information about a queue from the OFPT_QUEUE_GET_CONFIG_REPLY in * 'reply' and stores it in '*queue'. ofputil_decode_queue_get_config_reply() * must already have pulled off the main header. * * This function returns EOF if the last queue has already been decoded, 0 if a * queue was successfully decoded into '*queue', or an ofperr if there was a * problem decoding 'reply'. */ int ofputil_pull_queue_get_config_reply(struct ofpbuf *msg, struct ofputil_queue_config *queue) { enum ofpraw raw; if (!msg->header) { /* Pull OpenFlow header. */ raw = ofpraw_pull_assert(msg); /* Pull protocol-specific ofp_queue_get_config_reply header (OF1.4 * doesn't have one at all). */ if (raw == OFPRAW_OFPT10_QUEUE_GET_CONFIG_REPLY) { ofpbuf_pull(msg, sizeof(struct ofp10_queue_get_config_reply)); } else if (raw == OFPRAW_OFPT11_QUEUE_GET_CONFIG_REPLY) { ofpbuf_pull(msg, sizeof(struct ofp11_queue_get_config_reply)); } else { ovs_assert(raw == OFPRAW_OFPST14_QUEUE_DESC_REPLY); } } else { raw = ofpraw_decode_assert(msg->header); } queue->min_rate = UINT16_MAX; queue->max_rate = UINT16_MAX; if (!msg->size) { return EOF; } else if (raw == OFPRAW_OFPST14_QUEUE_DESC_REPLY) { return ofputil_pull_queue_get_config_reply14(msg, queue); } else { return ofputil_pull_queue_get_config_reply10(msg, queue); } }
static ssize_t ssl_recv(struct stream *stream, void *buffer, size_t n) { struct ssl_stream *sslv = ssl_stream_cast(stream); int old_state; ssize_t ret; /* Behavior of zero-byte SSL_read is poorly defined. */ ovs_assert(n > 0); old_state = SSL_get_state(sslv->ssl); ret = SSL_read(sslv->ssl, buffer, n); if (old_state != SSL_get_state(sslv->ssl)) { sslv->tx_want = SSL_NOTHING; } sslv->rx_want = SSL_NOTHING; if (ret > 0) { return ret; } else { int error = SSL_get_error(sslv->ssl, ret); if (error == SSL_ERROR_ZERO_RETURN) { return 0; } else { return -interpret_ssl_error("SSL_read", ret, error, &sslv->rx_want); } } }
/* Change entry's 'priority' and keep the vector ordered. */ void cpvector_change_priority(struct cpvector *cpvec, void *ptr, int priority) { struct pvector *old = cpvec->temp; int index; if (!old) { old = cpvector_get_pvector(cpvec); } index = pvector_find(old, ptr); ovs_assert(index >= 0); /* Now at the index of the entry to be updated. */ /* Check if can not update in place. */ if ((priority > old->vector[index].priority && index > 0 && priority > old->vector[index - 1].priority) || (priority < old->vector[index].priority && index < old->size - 1 && priority < old->vector[index + 1].priority)) { /* Have to use a temp. */ if (!cpvec->temp) { /* Have to reallocate to reorder. */ cpvec->temp = pvector_dup(old); old = cpvec->temp; /* Publish will sort. */ } } old->vector[index].priority = priority; }
/* Reserves 'size' bytes of headroom so that they can be later allocated with * dp_packet_push_uninit() without reallocating the dp_packet. */ void dp_packet_reserve(struct dp_packet *b, size_t size) { ovs_assert(!dp_packet_size(b)); dp_packet_prealloc_tailroom(b, size); dp_packet_set_data(b, (char*)dp_packet_data(b) + size); }
/* Reserves 'size' bytes of headroom so that they can be later allocated with * ofpbuf_push_uninit() without reallocating the ofpbuf. */ void ofpbuf_reserve(struct ofpbuf *b, size_t size) { ovs_assert(!b->size); ofpbuf_prealloc_tailroom(b, size); b->data = (char*)b->data + size; }
/* Reserves 'size' bytes of headroom so that they can be later allocated with * ofpbuf_push_uninit() without reallocating the ofpbuf. */ void ofpbuf_reserve(struct ofpbuf *b, size_t size) { ovs_assert(!ofpbuf_size(b)); ofpbuf_prealloc_tailroom(b, size); ofpbuf_set_data(b, (char*)ofpbuf_data(b) + size); }
/* Adds 'c' at the head of 'q', which must not be full. */ void byteq_put(struct byteq *q, uint8_t c) { ovs_assert(!byteq_is_full(q)); *byteq_head(q) = c; q->head++; }
static struct dp_packet_batch * prepare_packets(size_t n, bool change, unsigned tid, ovs_be16 *dl_type) { struct dp_packet_batch *pkt_batch = xzalloc(sizeof *pkt_batch); struct flow flow; size_t i; ovs_assert(n <= ARRAY_SIZE(pkt_batch->packets)); dp_packet_batch_init(pkt_batch); for (i = 0; i < n; i++) { struct udp_header *udp; struct dp_packet *pkt = dp_packet_new(sizeof payload/2); dp_packet_put_hex(pkt, payload, NULL); flow_extract(pkt, &flow); udp = dp_packet_l4(pkt); udp->udp_src = htons(ntohs(udp->udp_src) + tid); if (change) { udp->udp_dst = htons(ntohs(udp->udp_dst) + i); } dp_packet_batch_add(pkt_batch, pkt); *dl_type = flow.dl_type; } return pkt_batch; }
static const char * lex_parse_mask(const char *p, struct lex_token *token) { struct lex_token mask; /* Parse just past the '/' as a second integer. Handle errors. */ p = lex_parse_integer__(p + 1, &mask); if (mask.type == LEX_T_ERROR) { lex_token_swap(&mask, token); lex_token_destroy(&mask); return p; } ovs_assert(mask.type == LEX_T_INTEGER); /* Now convert the value and mask into a masked integer token. * We have a few special cases. */ token->type = LEX_T_MASKED_INTEGER; memset(&token->mask, 0, sizeof token->mask); uint32_t prefix_bits = ntohll(mask.value.integer); if (token->format == mask.format) { /* Same format value and mask is always OK. */ token->mask = mask.value; } else if (token->format == LEX_F_IPV4 && mask.format == LEX_F_DECIMAL && prefix_bits <= 32) { /* IPv4 address with decimal mask is a CIDR prefix. */ token->mask.integer = htonll(ntohl(be32_prefix_mask(prefix_bits))); } else if (token->format == LEX_F_IPV6 && mask.format == LEX_F_DECIMAL && prefix_bits <= 128) { /* IPv6 address with decimal mask is a CIDR prefix. */ token->mask.ipv6 = ipv6_create_mask(prefix_bits); } else if (token->format == LEX_F_DECIMAL && mask.format == LEX_F_HEXADECIMAL && token->value.integer == 0) { /* Special case for e.g. 0/0x1234. */ token->format = LEX_F_HEXADECIMAL; token->mask = mask.value; } else { lex_error(token, "Value and mask have incompatible formats."); return p; } /* Check invariant that a 1-bit in the value corresponds to a 1-bit in the * mask. */ for (int i = 0; i < ARRAY_SIZE(token->mask.be32); i++) { ovs_be32 v = token->value.be32[i]; ovs_be32 m = token->mask.be32[i]; if (v & ~m) { lex_error(token, "Value contains unmasked 1-bits."); break; } } /* Done! */ lex_token_destroy(&mask); return p; }
/* Returns the back element in 'list_'. Undefined behavior if 'list_' is empty. */ struct list * list_back(const struct list *list_) { struct list *list = CONST_CAST(struct list *, list_); ovs_assert(!list_is_empty(list)); return list->prev; }
/* Returns the front element in 'list_'. Undefined behavior if 'list_' is empty. */ struct list * list_front(const struct list *list_) { struct list *list = CONST_CAST(struct list *, list_); ovs_assert(!list_is_empty(list)); return list->next; }
/* Initializes 'q' as an empty byteq that uses the 'size' bytes of 'buffer' to * store data. 'size' must be a power of 2. * * The caller must ensure that 'buffer' remains available to the byteq as long * as 'q' is in use. */ void byteq_init(struct byteq *q, uint8_t *buffer, size_t size) { ovs_assert(is_pow2(size)); q->buffer = buffer; q->size = size; q->head = q->tail = 0; }
/* Decodes the NXT_FLOW_MOD_TABLE_ID message at 'oh'. Returns the message's * argument, that is, whether the flow_mod_table_id feature should be * enabled. */ bool ofputil_decode_nx_flow_mod_table_id(const struct ofp_header *oh) { struct ofpbuf b = ofpbuf_const_initializer(oh, ntohs(oh->length)); ovs_assert(ofpraw_pull_assert(&b) == OFPRAW_NXT_FLOW_MOD_TABLE_ID); uint8_t *enable = ofpbuf_pull(&b, 8); return *enable != 0; }
/* Reserves 'headroom' bytes at the head and 'tailroom' at the end so that * they can be later allocated with dp_packet_push_uninit() or * dp_packet_put_uninit() without reallocating the dp_packet. */ void dp_packet_reserve_with_tailroom(struct dp_packet *b, size_t headroom, size_t tailroom) { ovs_assert(!dp_packet_size(b)); dp_packet_prealloc_tailroom(b, headroom + tailroom); dp_packet_set_data(b, (char*)dp_packet_data(b) + headroom); }
/* Reserves 'headroom' bytes at the head and 'tailroom' at the end so that * they can be later allocated with ofpbuf_push_uninit() or * ofpbuf_put_uninit() without reallocating the ofpbuf. */ void ofpbuf_reserve_with_tailroom(struct ofpbuf *b, size_t headroom, size_t tailroom) { ovs_assert(!ofpbuf_size(b)); ofpbuf_prealloc_tailroom(b, headroom + tailroom); ofpbuf_set_data(b, (char*)ofpbuf_data(b) + headroom); }
void match_set_xreg_masked(struct match *match, unsigned int xreg_idx, uint64_t value, uint64_t mask) { ovs_assert(xreg_idx < FLOW_N_XREGS); flow_wildcards_set_xreg_mask(&match->wc, xreg_idx, mask); flow_set_xreg(&match->flow, xreg_idx, value & mask); }
static void odp_set_tunnel_action(const struct nlattr *a, struct flow_tnl *tun_key) { enum odp_key_fitness fitness; fitness = odp_tun_key_from_attr(a, tun_key); ovs_assert(fitness != ODP_FIT_ERROR); }
/* A daemon doesn't normally have any use for the file descriptors for stdin, * stdout, and stderr after it detaches. To keep these file descriptors from * e.g. holding an SSH session open, by default detaching replaces each of * these file descriptors by /dev/null. But a few daemons expect the user to * redirect stdout or stderr to a file, in which case it is desirable to keep * these file descriptors. This function, therefore, disables replacing 'fd' * by /dev/null when the daemon detaches. */ void daemon_save_fd(int fd) { ovs_assert(fd == STDIN_FILENO || fd == STDOUT_FILENO || fd == STDERR_FILENO); save_fds[fd] = true; }
void match_set_reg_masked(struct match *match, unsigned int reg_idx, uint32_t value, uint32_t mask) { ovs_assert(reg_idx < FLOW_N_REGS); flow_wildcards_set_reg_mask(&match->wc, reg_idx, mask); match->flow.regs[reg_idx] = value & mask; }
static const struct triplet * ofperr_get_triplet__(enum ofperr error, const struct ofperr_domain *domain) { size_t ofs = error - OFPERR_OFS; ovs_assert(ofperr_is_valid(error)); return &domain->errors[ofs]; }
/* Removes a byte from the tail of 'q' and returns it. 'q' must not be * empty. */ uint8_t byteq_get(struct byteq *q) { uint8_t c; ovs_assert(!byteq_is_empty(q)); c = *byteq_tail(q); q->tail++; return c; }
/* Removes 'size' bytes from the head end of 'b', which must contain at least * 'size' bytes of data. Returns the first byte of data removed. */ void * ofpbuf_pull(struct ofpbuf *b, size_t size) { void *data = b->data; ovs_assert(b->size >= size); b->data = (char*)b->data + size; b->size -= size; return data; }
static void jsonrpc_error(struct jsonrpc *rpc, int error) { ovs_assert(error); if (!rpc->status) { rpc->status = error; jsonrpc_cleanup(rpc); } }