struct list_res res_walk_parents (const struct res *out, const struct hs *hs, int in_port, array_t* out_arr) { struct res *curr_res = (struct res*) out; struct list_res currq = {0}; // set up initial result to start inversion struct hs int_hs; hs_isect_arr (&int_hs, &out->hs, out_arr); list_append (&currq, res_extend (out, &int_hs, out->port, true)); struct res *cur; while (curr_res) { if (curr_res->rules.cur) { for (int i = curr_res->rules.cur - 1; i >= 0; i--) { struct list_res nextq = {0}; struct res_rule r = curr_res->rules.arr[i]; while ((cur = currq.head)) { list_pop (&currq); struct list_res tmp = rule_inv_apply (r.tf_tf, r.tf_rule, cur, false); list_concat (&nextq, &tmp); res_free (cur); } // for each current result from rule inversion currq = nextq; } // for each rule } else return currq; // set (hs,port) which the inverted (hs,port) results must intersect struct res *parent = curr_res->parent; struct hs *next_hs = hs_create (curr_res->hs.len); int next_port; if (parent) { hs_copy (next_hs, &parent->hs); next_port = parent->port; } else { hs_copy (next_hs, hs); next_port = in_port; } // Intersect the results in `currq` with the target (hs,port) struct list_res nextq = {0}; while ((cur = currq.head)) { list_pop (&currq); struct hs *new_hs = hs_isect_a (&cur->hs, next_hs); if (cur->port == next_port && new_hs) list_append (&nextq, res_extend (cur, new_hs, next_port, false)); else res_free (cur); } currq = nextq; curr_res = parent; } return currq; }
static struct list_res rule_apply (const struct rule *r, const struct tf *tf, const struct res *in, bool append, uint32_t *app, int *napp) { struct list_res res = {0}; if (!r->out) app_add (r->idx, app, napp); if (!r->out || r->out == in->port) return res; struct hs hs; if (!r->match) hs_copy (&hs, &in->hs); else { if (!hs_isect_arr (&hs, &in->hs, DATA_ARR (r->match))) return res; if (r->deps) deps_diff (&hs, in->port, DEPS (tf, r->deps), tf, app, *napp); if (!hs_compact_m (&hs, r->mask ? DATA_ARR (r->mask) : NULL)) { hs_destroy (&hs); return res; } if (r->mask) hs_rewrite (&hs, DATA_ARR (r->mask), DATA_ARR (r->rewrite)); } bool used_hs = false; uint32_t n, x; const uint32_t *a; if (r->out > 0) { n = 1; x = r->out; a = &x; } else { const struct ports *p = PORTS (tf, r->out); n = p->n; a = p->arr; } for (int i = 0; i < n; i++) { if (a[i] == in->port) continue; struct res *tmp; if (used_hs) tmp = res_extend (in, &hs, a[i], append); else { tmp = res_extend (in, NULL, a[i], append); tmp->hs = hs; used_hs = true; } res_rule_add (tmp, tf, r->idx); list_append (&res, tmp); } if (res.head) app_add (r->idx, app, napp); if (!used_hs) hs_destroy (&hs); return res; }
struct list_res rule_inv_apply (const struct tf *tf, const struct rule *r, const struct res *in, bool append) { /* Given a rule `r` in a tf `tf`, apply the inverse of `r` on the input (headerspace,port) `in`. */ struct list_res res = {0}; // prune cases where rule outport doesn't include the current port if (r->out > 0 && r->out != in->port) return res; if (r->out < 0 && !port_match(in->port, r->out, tf)) return res; if (!r->out) return res; // set up inverse match and rewrite arrays array_t *inv_rw=0, *inv_mat=0; if (r->mask) { // rewrite rule inv_mat = rule_set_inv_mat (r, in->hs.len); inv_rw = rule_set_inv_rw (r, in->hs.len); } else { // fwding and topology rules if (r->match) inv_mat = array_copy (DATA_ARR (r->match), in->hs.len); } struct hs hs; if (!r->match) hs_copy (&hs, &in->hs); // topology rule else { // fwding and rewrite rules if (!hs_isect_arr (&hs, &in->hs, inv_mat)) return res; if (r->mask) hs_rewrite (&hs, DATA_ARR (r->mask), inv_rw); } // there is a new hs result corresponding to each rule inport bool used_hs = port_append_res (&res, r, tf, in, r->in, append, &hs, true); if (inv_rw) array_free (inv_rw); if (inv_mat) array_free (inv_mat); if (!used_hs) hs_destroy (&hs); return res; }
static struct list_res rule_apply (const struct rule *r, const struct tf *tf, const struct res *in, bool append, uint32_t *app, int *napp) { struct list_res res = {0}; if (!r->out) app_add (r->idx, app, napp); if (!r->out || r->out == in->port) return res; struct hs hs; if (!r->match) hs_copy (&hs, &in->hs); else { if (!hs_isect_arr (&hs, &in->hs, DATA_ARR (r->match))) return res; if (r->deps) deps_diff (&hs, in->port, DEPS (tf, r->deps), tf, app, *napp); if (!hs_compact_m (&hs, r->mask ? DATA_ARR (r->mask) : NULL)) { hs_destroy (&hs); return res; } if (r->mask) hs_rewrite (&hs, DATA_ARR (r->mask), DATA_ARR (r->rewrite)); } bool used_hs = port_append_res (&res, r, tf, in, r->out, append, &hs, false); if (res.head) app_add (r->idx, app, napp); if (!used_hs) hs_destroy (&hs); return res; }
static void * reach_thread (void *vdata) { struct tdata *data = vdata; int sw = data->sw; struct list_res *res = &data->res; const uint32_t *out = g_out; int nout = g_nout; int ntfs = data_file->ntfs - 1; //int count = 0, loops = 0; while (true) { struct list_res queue = {0}; pthread_mutex_lock (&wait_lock); //fprintf (stderr, "%d %d\n", sw, queues[sw].n); while (!queues[sw].head) { waiters |= 1 << sw; if (waiters + 1 == 1 << ntfs) { for (int i = 0; i < ntfs; i++) { if (i == sw) continue; pthread_cond_broadcast (&conds[i]); } pthread_mutex_unlock (&wait_lock); return NULL; } pthread_cond_wait (&conds[sw], &wait_lock); if (waiters + 1 == 1 << ntfs) { pthread_mutex_unlock (&wait_lock); return NULL; } assert (waiters | (1 << sw)); } queue = queues[sw]; memset (&queues[sw], 0, sizeof queues[sw]); pthread_mutex_unlock (&wait_lock); struct res *cur; struct hs hs_isect_res; while ((cur = queue.head)) { list_pop (&queue); bool new_res = false; struct list_res nextqs[ntfs]; memset (nextqs, 0, sizeof nextqs); struct list_res ntf_res = ntf_apply (cur, sw); struct res *ntf_cur = ntf_res.head; while (ntf_cur) { struct res *ntf_next = ntf_cur->next; if (!g_find_loop && (!out || int_find (ntf_cur->port, out, nout)) && hs_isect_arr(&hs_isect_res, &ntf_cur->hs, g_out_arr)) { int count = 0; if (g_hop_count > 0) { for (const struct res *r = cur; r != NULL; r = r->parent, count++); } if (count == 0 || count == g_hop_count-1) { list_append (res, ntf_cur); ref_add (ntf_cur, cur); if (out) { ntf_cur = ntf_next; continue; } } } struct list_res ttf_res = tf_apply (tf_get (0), ntf_cur, true); struct res *ttf_cur = ttf_res.head; while (ttf_cur) { struct res *ttf_next = ttf_cur->next; if (is_loop (ttf_cur->port, cur)) { if (!g_find_loop) { res_free (ttf_cur); ttf_cur = ttf_next; } else { list_append (res, ttf_cur); ref_add (ttf_cur, cur); ttf_cur = ttf_next; } //loops++; continue; } ref_add (ttf_cur, cur); if (!g_find_loop && ((out && int_find (ttf_cur->port, out, nout)) && hs_isect_arr(&hs_isect_res, &ttf_cur->hs, g_out_arr))) { list_append (res, ttf_cur); } else { int new_sw = ntf_get_sw (ttf_cur->port); list_append (&nextqs[new_sw], ttf_cur); //count++; new_res = true; } ttf_cur = ttf_next; } if (out) res_free (ntf_cur); ntf_cur = ntf_next; } res_free_mt (cur, true); if (!new_res) continue; pthread_mutex_lock (&wait_lock); unsigned int wake = 0; for (int i = 0; i < ntfs; i++) { if (!nextqs[i].head) continue; list_concat (&queues[i], &nextqs[i]); pthread_cond_broadcast (&conds[i]); wake |= 1 << i; } waiters &= ~wake; pthread_mutex_unlock (&wait_lock); } } }