static HashSet * frt_get_fields(VALUE rfields) { VALUE rval; HashSet *fields; char *s, *p, *str; if (rfields == Qnil) return NULL; fields = hs_new_str(&free); if (TYPE(rfields) == T_ARRAY) { int i; for (i = 0; i < RARRAY(rfields)->len; i++) { rval = rb_obj_as_string(RARRAY(rfields)->ptr[i]); hs_add(fields, estrdup(RSTRING(rval)->ptr)); } } else { rval = rb_obj_as_string(rfields); if (strcmp("*", RSTRING(rval)->ptr) == 0) { hs_destroy(fields); fields = NULL; } else { s = str = estrdup(RSTRING(rval)->ptr); while ((p = strchr(s, '|')) != '\0') { *p = '\0'; hs_add(fields, estrdup(s)); s = p + 1; } hs_add(fields, estrdup(s)); free(str); } } return fields; }
/* * call-seq: * query_parser.tokenized_fields = fields -> self * * Set the list of tokenized_fields. These tokenized_fields are tokenized in * the queries. If this is set to Qnil then all fields will be tokenized. */ static VALUE frt_qp_set_tkz_fields(VALUE self, VALUE rfields) { GET_QP; if (qp->tokenized_fields) hs_destroy(qp->tokenized_fields); qp->tokenized_fields = frt_get_fields(rfields); return self; }
static struct list_res rule_apply (const struct rule *r, const struct tf *tf, const struct res *in, bool append, uint32_t *app, int *napp) { struct list_res res = {0}; if (!r->out) app_add (r->idx, app, napp); if (!r->out || r->out == in->port) return res; struct hs hs; if (!r->match) hs_copy (&hs, &in->hs); else { if (!hs_isect_arr (&hs, &in->hs, DATA_ARR (r->match))) return res; if (r->deps) deps_diff (&hs, in->port, DEPS (tf, r->deps), tf, app, *napp); if (!hs_compact_m (&hs, r->mask ? DATA_ARR (r->mask) : NULL)) { hs_destroy (&hs); return res; } if (r->mask) hs_rewrite (&hs, DATA_ARR (r->mask), DATA_ARR (r->rewrite)); } bool used_hs = false; uint32_t n, x; const uint32_t *a; if (r->out > 0) { n = 1; x = r->out; a = &x; } else { const struct ports *p = PORTS (tf, r->out); n = p->n; a = p->arr; } for (int i = 0; i < n; i++) { if (a[i] == in->port) continue; struct res *tmp; if (used_hs) tmp = res_extend (in, &hs, a[i], append); else { tmp = res_extend (in, NULL, a[i], append); tmp->hs = hs; used_hs = true; } res_rule_add (tmp, tf, r->idx); list_append (&res, tmp); } if (res.head) app_add (r->idx, app, napp); if (!used_hs) hs_destroy (&hs); return res; }
void hs_minus (struct hs *a, const struct hs *b) { assert (a->len == b->len); struct hs tmp; hs_copy (&tmp, b); hs_cmpl (&tmp); hs_isect (a, &tmp); hs_destroy (&tmp); hs_compact (a); }
void res_free (struct res *res) { if (res->refs) { res->next = NULL; return; } hs_destroy (&res->hs); pthread_mutex_destroy (&res->lock); struct res *parent = res->parent; free (res); if (parent) { parent->refs--; res_free (parent); } }
void index_destroy(Index *self) { mutex_destroy(&self->mutex); INDEX_CLOSE_READER(self); if (self->iw) iw_close(self->iw); store_deref(self->store); a_deref(self->analyzer); if (self->qp) qp_destroy(self->qp); if (self->key) hs_destroy(self->key); free(self); }
static bool port_append_res (struct list_res *res, const struct rule *r, const struct tf *tf, const struct res *in, int32_t ports, bool append, const struct hs *hs, bool inv_remove_deps) { /* Create new result containing headerspace `hs` for each port in `ports`. */ bool used_hs = false; struct hs *new_hs; uint32_t n, x; const uint32_t *a; if (ports > 0) { n = 1; x = ports; a = &x; } else { const struct ports *p = PORTS (tf, ports); n = p->n; a = p->arr; } for (int i = 0; i < n; i++) { if (a[i] == in->port) continue; if (inv_remove_deps) { /* For inversion, also remove dependencies for each input port of the inverted rule. */ new_hs = hs_create (hs->len); hs_copy (new_hs, hs); if (r->deps) deps_diff_inv (new_hs, a[i], DEPS (tf, r->deps), tf); if (!hs_compact_m (new_hs, r->mask ? DATA_ARR (r->mask) : NULL)) { hs_destroy(new_hs); continue; } } else new_hs = (struct hs*) hs; // now *new_hs has the latest hs at this port struct res *tmp; if (! inv_remove_deps) { if (used_hs) tmp = res_extend (in, hs, a[i], append); else { tmp = res_extend (in, NULL, a[i], append); tmp->hs = *hs; used_hs = true; } } else { tmp = res_extend (in, NULL, a[i], append); tmp->hs = *new_hs; } res_rule_add (tmp, tf, r->idx, r); list_append (res, tmp); } return used_hs; }
static struct list_res rule_apply (const struct rule *r, const struct tf *tf, const struct res *in, bool append, uint32_t *app, int *napp) { struct list_res res = {0}; if (!r->out) app_add (r->idx, app, napp); if (!r->out || r->out == in->port) return res; struct hs hs; if (!r->match) hs_copy (&hs, &in->hs); else { if (!hs_isect_arr (&hs, &in->hs, DATA_ARR (r->match))) return res; if (r->deps) deps_diff (&hs, in->port, DEPS (tf, r->deps), tf, app, *napp); if (!hs_compact_m (&hs, r->mask ? DATA_ARR (r->mask) : NULL)) { hs_destroy (&hs); return res; } if (r->mask) hs_rewrite (&hs, DATA_ARR (r->mask), DATA_ARR (r->rewrite)); } bool used_hs = port_append_res (&res, r, tf, in, r->out, append, &hs, false); if (res.head) app_add (r->idx, app, napp); if (!used_hs) hs_destroy (&hs); return res; }
void cw_close(CompoundWriter *cw) { OutStream *os = NULL; int i; if (cw->ids->size <= 0) { RAISE(STATE_ERROR, "Tried to merge compound file with no entries"); } os = cw->store->new_output(cw->store, cw->name); os_write_vint(os, ary_size(cw->file_entries)); /* Write the directory with all offsets at 0. * Remember the positions of directory entries so that we can adjust the * offsets later */ for (i = 0; i < ary_size(cw->file_entries); i++) { cw->file_entries[i].dir_offset = os_pos(os); os_write_u64(os, 0); /* for now */ os_write_string(os, cw->file_entries[i].name); } /* Open the files and copy their data into the stream. Remember the * locations of each file's data section. */ for (i = 0; i < ary_size(cw->file_entries); i++) { cw->file_entries[i].data_offset = os_pos(os); cw_copy_file(cw, &cw->file_entries[i], os); } /* Write the data offsets into the directory of the compound stream */ for (i = 0; i < ary_size(cw->file_entries); i++) { os_seek(os, cw->file_entries[i].dir_offset); os_write_u64(os, cw->file_entries[i].data_offset); } if (os) { os_close(os); } hs_destroy(cw->ids); ary_free(cw->file_entries); free(cw); }
/* * call-seq: * query_parser.fields = fields -> self * * Set the list of fields. These fields are expanded for searches on "*". */ static VALUE frt_qp_set_fields(VALUE self, VALUE rfields) { GET_QP; HashSet *fields = frt_get_fields(rfields); if (qp->def_fields == qp->all_fields) { qp->def_fields = NULL; } if (fields == NULL) { fields = hs_new_str(&free); } hs_destroy(qp->all_fields); qp->all_fields = fields; if (qp->def_fields == NULL) { qp->def_fields = fields; } return self; }
struct list_res rule_inv_apply (const struct tf *tf, const struct rule *r, const struct res *in, bool append) { /* Given a rule `r` in a tf `tf`, apply the inverse of `r` on the input (headerspace,port) `in`. */ struct list_res res = {0}; // prune cases where rule outport doesn't include the current port if (r->out > 0 && r->out != in->port) return res; if (r->out < 0 && !port_match(in->port, r->out, tf)) return res; if (!r->out) return res; // set up inverse match and rewrite arrays array_t *inv_rw=0, *inv_mat=0; if (r->mask) { // rewrite rule inv_mat = rule_set_inv_mat (r, in->hs.len); inv_rw = rule_set_inv_rw (r, in->hs.len); } else { // fwding and topology rules if (r->match) inv_mat = array_copy (DATA_ARR (r->match), in->hs.len); } struct hs hs; if (!r->match) hs_copy (&hs, &in->hs); // topology rule else { // fwding and rewrite rules if (!hs_isect_arr (&hs, &in->hs, inv_mat)) return res; if (r->mask) hs_rewrite (&hs, DATA_ARR (r->mask), inv_rw); } // there is a new hs result corresponding to each rule inport bool used_hs = port_append_res (&res, r, tf, in, r->in, append, &hs, true); if (inv_rw) array_free (inv_rw); if (inv_mat) array_free (inv_mat); if (!used_hs) hs_destroy (&hs); return res; }
void res_free_mt (struct res *res, bool lock) { if (lock) pthread_mutex_lock (&res->lock); if (res->refs) { res->next = NULL; pthread_mutex_unlock (&res->lock); return; } pthread_mutex_unlock (&res->lock); hs_destroy (&res->hs); pthread_mutex_destroy (&res->lock); struct res *parent = res->parent; free (res); if (parent) { pthread_mutex_lock (&parent->lock); parent->refs--; res_free_mt (parent, false); } }
void hs_comp_diff (struct hs *hs) { struct hs_vec *v = &hs->list, new_list = {0}; for (int i = 0; i < v->used; i++) { struct hs tmp = {hs->len}, tmp2 = {hs->len}; vec_append (&tmp.list, v->elems[i], false); v->elems[i] = NULL; tmp2.list = v->diff[i]; hs_minus (&tmp, &tmp2); if (!new_list.used) new_list = tmp.list; else { for (int j = 0; j < tmp.list.used; j++) { vec_append (&new_list, tmp.list.elems[j], false); tmp.list.elems[j] = NULL; } hs_destroy (&tmp); } } vec_destroy (v); hs->list = new_list; }
void hs_free (struct hs *hs) { hs_destroy (hs); free (hs); }
void ut_destroy(struct upTree* ut) { hs_free_values(ut->hs); hs_destroy(ut->hs); free(ut); }