ccpnode *ccpnode_alloc() {
  ccpnode *c = aligned_alloc(32, sizeof(ccpnode));
  assert(c);

  c->cidx         = -1;
  c->parent       = NULL;
  c->children     = NULL;
  c->num_children = 0;
  c->pidx_begin   = -1;
  c->lt           = NULL;
  clone_init(&c->diff_parent);
  clone_init(&c->generator);
  clone_init(&c->clone);
  
  return c;
}
void test_clone_iterator() {
  for(int i = 0; i < 1000; ++i) {
    /* construct random clone, but store the list of predicates it consist of */
    clone cl;
    clone_init(&cl);
  
    size_t num = 10;
    pred preds[num];
    for(int i = 0; i < num; ++i) {
      random_pred(preds+i);
      clone_insert_pred(&cl, preds+i);
    }

    /* use clon iterator to extract predicates */
    pred *plist;
    size_t sz;
    clone_get_predicates(&cl, &plist, &sz);

    /* compare that the result of clone iterator coincide with original list of
     * predicates */
    int flag = 1;
    for(int i = 0; i < num; ++i) {
      int j;
      for(j = 0; j < sz; ++j) {
        if(pred_eq(preds+i, plist+j)) break;
      }
      if(j == sz) { flag = 0; break; }
    }
    if(flag) {
      for(int j = 0; j < sz; ++j) {
        int i;
        for(i = 0; i < num; ++i) {
          if(pred_eq(preds+i, plist+j)) break;
        }
        if(i == num) { flag = 0; break; }
      }
    }

    if(!flag) {
      printf("\nError. Clone iterator gives:\n");
      clone_print_verbosely(stdout, &cl);
      printf("\nBut original predicates were:\n");
      for(pred *p = preds; p < preds + num; ++p) {
        char str[pred_print_extensional_size()];
        pred_print_extensional(str, p);
        printf("%s\n", str);
      }
      return;
    }

    free(plist);
  }
}
void script_closure_pred_equivalence_classes() {
  closure_operator *clop = clop_alloc_straightforward();

  pred *ess_preds;
  size_t ess_sz;
  get_essential_predicates(2, &ess_preds, &ess_sz);

  /** We use a hash table to store a mapping between clones (equivalence
   * classes) and predicates that generate those clones (closure-equivalent
   * predicates). */
  hashtable *ht = hashtable_alloc(512, clone_hash, (int (*)(const void *, const void *))clone_eq);

  /* construct the closure of all essential predicates */
  for(pred *p = ess_preds; p < ess_preds + ess_sz; ++p) {
    clone *closure = aligned_alloc(32, sizeof(clone));
    assert(closure);
    closure_one_pred(clop, p, closure);
    /* lookup equivalence class corresponding to `p` */
    clone *equiv_preds = hashtable_lookup(ht, closure);
    if(equiv_preds == NULL) {
      equiv_preds = malloc(sizeof(clone));
      assert(equiv_preds);
      clone_init(equiv_preds);
      hashtable_insert(ht, closure, equiv_preds);
    } else {
      free(closure);
    }
    clone_insert_pred(equiv_preds, p);
  }

  /* print the equivalence classes */
  int idx = 1;
  for(hashtable_iterator it = hashtable_iterator_begin(ht); !hashtable_iterator_end(&it); hashtable_iterator_next(&it)) {
    hash_elem *elem = hashtable_iterator_deref(&it);
    printf("====== class %u ====================================\n", idx);
    for(clone_iterator itc = clone_iterator_begin((clone *)elem->value); !clone_iterator_end((clone *)elem->value, &itc); clone_iterator_next(&itc)) {
      pred p = clone_iterator_deref(&itc);
      printf("%s\t%s\n",
             pred_print_fingerprint(&p),
             pred_print_extensional_ex(&p));
    }
    printf("\n");

    free(elem->key);
    free(elem->value);
    ++idx;
  }
  
  hashtable_free(ht);
  free(ess_preds);
  clop_free(clop);
}
Beispiel #4
0
/*
 * Generate a new unfragmented bio with the given size
 * This should never violate the device limitations
 *
 * This function may be called concurrently. If we allocate from the mempool
 * concurrently, there is a possibility of deadlock. For example, if we have
 * mempool of 256 pages, two processes, each wanting 256, pages allocate from
 * the mempool concurrently, it may deadlock in a situation where both processes
 * have allocated 128 pages and the mempool is exhausted.
 *
 * In order to avoid this scenario we allocate the pages under a mutex.
 *
 * In order to not degrade performance with excessive locking, we try
 * non-blocking allocations without a mutex first but on failure we fallback
 * to blocking allocations with a mutex.
 */
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
{
	struct crypt_config *cc = io->cc;
	struct bio *clone;
	unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
	unsigned i, len, remaining_size;
	struct page *page;
	struct bio_vec *bvec;

retry:
	if (unlikely(gfp_mask & __GFP_WAIT))
		mutex_lock(&cc->bio_alloc_lock);

	clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
	if (!clone)
		goto return_clone;

	clone_init(io, clone);

	remaining_size = size;

	for (i = 0; i < nr_iovecs; i++) {
		page = mempool_alloc(cc->page_pool, gfp_mask);
		if (!page) {
			crypt_free_buffer_pages(cc, clone);
			bio_put(clone);
			gfp_mask |= __GFP_WAIT;
			goto retry;
		}

		len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;

		bvec = &clone->bi_io_vec[clone->bi_vcnt++];
		bvec->bv_page = page;
		bvec->bv_len = len;
		bvec->bv_offset = 0;

		clone->bi_iter.bi_size += len;

		remaining_size -= len;
	}

return_clone:
	if (unlikely(gfp_mask & __GFP_WAIT))
		mutex_unlock(&cc->bio_alloc_lock);

	return clone;
}
Beispiel #5
0
/*
 * Generate a new unfragmented bio with the given size
 * This should never violate the device limitations
 * May return a smaller bio when running out of pages, indicated by
 * *out_of_pages set to 1.
 */
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
                                      unsigned *out_of_pages)
{
    struct crypt_config *cc = io->cc;
    struct bio *clone;
    unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
    gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
    unsigned i, len;
    struct page *page;

    clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
    if (!clone)
        return NULL;

    clone_init(io, clone);
    *out_of_pages = 0;

    for (i = 0; i < nr_iovecs; i++) {
        page = mempool_alloc(cc->page_pool, gfp_mask);
        if (!page) {
            *out_of_pages = 1;
            break;
        }

        /*
         * If additional pages cannot be allocated without waiting,
         * return a partially-allocated bio.  The caller will then try
         * to allocate more bios while submitting this partial bio.
         */
        gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;

        len = (size > PAGE_SIZE) ? PAGE_SIZE : size;

        if (!bio_add_page(clone, page, len, 0)) {
            mempool_free(page, cc->page_pool);
            break;
        }

        size -= len;
    }

    if (!clone->bi_size) {
        bio_put(clone);
        return NULL;
    }

    return clone;
}
void clop_straightforward_closure_clone_ex(void *internals, const clone *base, const clone *suppl, struct clone *closure) {
  clone recruit;
  clone_init(&recruit);
  
  for(clone_iterator it1 = clone_iterator_begin(suppl); !clone_iterator_end(suppl, &it1); clone_iterator_next(&it1)) {
    pred p1 = clone_iterator_deref(&it1);
    
    /* apply ops of arity 1 for supplement predicates */
    op_permut(&p1, &recruit);
    op_proj(&p1, &recruit);
    op_ident(&p1, &recruit);

    /* apply ops of arity 2:
     * the first predicate is taken from  the supplement,
     * while the second — from the supplement set and from the base set */
    for(clone_iterator it2 = clone_iterator_begin(suppl); !clone_iterator_end(suppl, &it2); clone_iterator_next(&it2)) {
      pred p2 = clone_iterator_deref(&it2);
      op_conj(&p1, &p2, &recruit);
      op6(&p1, &p2, &recruit);
      op_trans(&p1, &p2, &recruit);
    }
    for(clone_iterator it3 = clone_iterator_begin(base); !clone_iterator_end(base, &it3); clone_iterator_next(&it3)) {
      pred p3 = clone_iterator_deref(&it3);
      op_conj(&p1, &p3, &recruit);
      op6(&p1, &p3, &recruit);
      op_trans(&p1, &p3, &recruit);
    }
  }

  clone new_base;
  clone_union(base, suppl, &new_base);

  clone diff;
  clone_diff(&recruit, &new_base, &diff);
  
  if(!clone_is_empty(&diff)) {
    /* if we've found new predicates, recursively continue computation */
    clop_straightforward_closure_clone_ex(internals, &new_base, &diff, closure);
  } else {
    /* if we haven't found new predicates, the computation is finished */
    clone_copy(&new_base, closure);
  }
}
void ccplt_construct_step(const closure_operator *clop, ccplt *lt, pred_idx_t pidx) {
  const pred *p = idx_pred(lt->pred_num, pidx);
  size_t cur_step_num_nodes = lt->num_nodes;
  for(ccpnode **cp = lt->nodes; cp < lt->nodes + cur_step_num_nodes; ++cp) {
    ccpnode *current = *cp;
    
    /* if the current ccpnode contains the predicate to be added, skip it */
    if(clone_test_pred(&current->clone, p)) {
      ccpnode_set_child(current, p, current);
      continue;
    }

    /* compute the closure of the union {p} ∪ current */
    clone closure;
    if(current->parent == NULL) {
      /* if the current clone is the top clone, use a direct approach */
      clone clone_p;
      clone_init(&clone_p);
      clone_insert_pred(&clone_p, p);
      closure_clone_ex(clop, &current->clone, &clone_p, &closure);
    } else {
      /* if the current clone has a parent, compute the closure <{p} ∪ current>
       * using the result of the closure of `p` and its parent:
       * <{p}∪current> == <<{p}∪parent> ∪ (current\parent)> ==
       *               == <<{p}∪parent> ∪ (current\parent\<{p}∪parent>)> */
      
      /* parent_p == <{p}∪parent> */
      ccpnode *parent_p = ccpnode_get_child(current->parent, p);
      /* the closure <{p} ∪ parent> should have already been computed */
      assert(parent_p != NULL);
      /* tmp == (current\parent) \ <{p}∪parent> */
      clone tmp;
      clone_diff(&current->diff_parent, &parent_p->clone, &tmp);
      
      closure_clone_ex(clop, &parent_p->clone, &tmp, &closure);
    }

    /* test if we've constructed a new ccpnode */
    ccpnode *child = ccplt_lookup(lt, &closure);
    assert(child != current);
    if(child == NULL) {
      /* if we've constructed a new ccpnode, add it to the ccplt */
      child = ccpnode_alloc(lt);
      child->parent = current;
      clone_diff(&closure, &current->clone, &child->diff_parent);
      clone_copy(&current->generator, &child->generator);
      clone_insert_pred(&child->generator, p);
      clone_copy(&closure, &child->clone);
      ccplt_insert_node(lt, child, pidx+1);
    } else {
      /* If we've found a new parent for current clone, check if the difference
       * between it and the clone is smaller than the current child->diff_parent.
       * We want to select the parent such that the different is the smallest.
       * This heuristics gives significant overall computation speed-up.
       *
       * We apply this heuristics for `current < child` only because on each step
       * the parents must be proceeded strictly before their children,
       * otherwise the closure of a child will depend on a not-yet-computed
       * closure of its parent */
      if(current < child) {
        size_t diff_card1 = clone_cardinality(&child->diff_parent);
        clone diff2;
        clone_diff(&closure, &current->clone, &diff2);
        size_t diff_card2 = clone_cardinality(&diff2);
        if(diff_card2 < diff_card1) {
          child->parent = current;
          clone_copy(&diff2, &child->diff_parent);
        }
      }
    }

    /* link the current ccpnode and the child ccpnode */
    ccpnode_set_child(current, p, child);
  }
}