Beispiel #1
0
//-----------------------------------------------------------------------------------------------------
//Function called by worker thread. Processes items in IN_QUEUE by retrieving them, executing them,
// then adding them to OUT_QUEUE.
//-----------------------------------------------------------------------------------------------------
void* process_rules(void* arg)
{
	rule_t* cur_rule;

	while(1)
	{
		//If something gets stuck in dep_q this could be a problem, but helper thread should
		// exit if something is stuck
		
		//Only allow as many threads into in_q as items in the in_q
		sem_wait(&jobs_sem);

		//Check exit condition
		if(adding_Rules == 0 && in_q->index == 0 && dep_q->index == 0) break;

		//Get rule, execute, and put in out_q
		if((cur_rule = get_rule(IN_QUEUE, NULL)) == NULL) { fprintf(stderr, "Error, tried to access NULL queue\n"); continue; }
		//printf("EXECUTE(%u): %s\n", (unsigned int)pthread_self(), cur_rule->target);
		pthread_mutex_lock(&exec_mutex);
		fake_exec(cur_rule);
		pthread_mutex_unlock(&exec_mutex);
		add_rule(OUT_QUEUE, cur_rule);
	}

	//printf("Worker(%u) exiting\n", (unsigned int)pthread_self());
	return NULL;
}
Beispiel #2
0
void expand_rule(char exp[MAX_EXPANSION_SIZE],
								 int *exp_size, struct rule_set *rules)
{
  static char new_exp[MAX_EXPANSION_SIZE];
  char *rule = NULL;
  int i;
  int exp_pos = 0;
  int rule_pos = 0;
  int rule_len;
 
  for(i=0; i < *exp_size; ++i) {
    if(exp_pos >= MAX_EXPANSION_SIZE) break;

    if(exp[i] > rules->num_rules) 
			new_exp[exp_pos++] = exp[i]; /* Not a rule, just a char */
    else {
      rule = get_rule(exp[i], rules);
      rule_len = rules->rule_size;
      rule_pos = 0;
      
      rule_len--;
      /* copy chars */
      while(rule_pos <= rule_len && exp_pos < MAX_EXPANSION_SIZE)
				new_exp[exp_pos++] = rule[rule_pos++];
    }
  }
  /* 
  printf("Expand: %i -> %i\n", *exp_size, exp_pos);
  */
  *exp_size = exp_pos;
  memcpy(exp, new_exp, exp_pos); 
}
Beispiel #3
0
void test_tree(struct tree *tree, int its)
{
  int i;
  char *rule;

  init_tree(tree);

  tree->pos.x = 0;
  tree->pos.y = 0;

  tree->seed.rule_size = 10;
  tree->seed.num_rules = 1;
  init_rule_set(&tree->seed);

  rule = get_rule(0, &tree->seed);
  strncpy(rule, "(++!0)--!0", 10);  
  chars_to_rule(rule, tree->seed.rule_size, tree->seed.num_rules);

  /* Expand the tree N times*/
  for(i=0; i < its; ++i) { 
    expand_rule(tree->expansion, &tree->exp_size, &tree->seed);
    /*print_syms(tree->expansion, tree->exp_size, tree->seed.num_rules);*/
  }

  gen_branches(tree);
  printf("Generated Branches\n");
  tree->pos.x = 320;
  tree->pos.y = 400;
}
Beispiel #4
0
void test_attr_file__simple_read(void)
{
	git_attr_file *file;
	git_attr_assignment *assign;
	git_attr_rule *rule;

	cl_git_pass(git_attr_file__new_and_load(&file, cl_fixture("attr/attr0")));

	cl_assert_equal_s(cl_fixture("attr/attr0"), file->key + 2);
	cl_assert(file->rules.length == 1);

	rule = get_rule(0);
	cl_assert(rule != NULL);
	cl_assert_equal_s("*", rule->match.pattern);
	cl_assert(rule->match.length == 1);
	cl_assert((rule->match.flags & GIT_ATTR_FNMATCH_HASWILD) != 0);

	cl_assert(rule->assigns.length == 1);
	assign = get_assign(rule, 0);
	cl_assert(assign != NULL);
	cl_assert_equal_s("binary", assign->name);
	cl_assert(GIT_ATTR_TRUE(assign->value));

	git_attr_file__free(file);
}
Beispiel #5
0
void test_attr_file__simple_read(void)
{
	git_attr_file *file;
	git_attr_assignment *assign;
	git_attr_rule *rule;

	cl_git_pass(git_attr_file__new(&file));
	cl_git_pass(git_attr_file__from_file(NULL, cl_fixture("attr/attr0"), file));
	cl_assert_strequal(cl_fixture("attr/attr0"), file->path);
	cl_assert(file->rules.length == 1);

	rule = get_rule(0);
	cl_assert(rule != NULL);
	cl_assert_strequal("*", rule->match.pattern);
	cl_assert(rule->match.length == 1);
	cl_assert(rule->match.flags == 0);

	cl_assert(rule->assigns.length == 1);
	assign = get_assign(rule, 0);
	cl_assert(assign != NULL);
	cl_assert_strequal("binary", assign->name);
	cl_assert(assign->value == GIT_ATTR_TRUE);
	cl_assert(!assign->is_allocated);

	git_attr_file__free(file);
}
Beispiel #6
0
int main(void){

	read = fopen("Shinjuku_SSL_day.txt", "r");
	
	//エラー処理
	if (read == NULL){
		printf("ファイルを開けません.\n");
		return -1;
	}

	//初回読み取りで列車種別・行き先・注意点をインプット
	get_rule();
	get_goto();
	get_caution();

	//ホーム画面
	printf("何をしますか?\n");
	printf("1)現在時刻から、帰宅までの時刻を取得(未実装)\n");
	printf("2)指定時刻から、帰宅までの時刻を取得 (強制実行されます。)\n");
	
	printf("何時に出発予定ですか? >> ");
	scanf("%d", &input_num.hour);
	printf("何分に出発予定ですか? >> ");
	scanf("%d", &input_num.min);

	printf("%s",all_caution[2]);
	return -1;

	//時刻表から時間を求めます。
	get_timetable();

	return 0;
}
Beispiel #7
0
static void parse_precedence_after_advance(precedence precedence)
{
	// Get the rule for the previous token.
	ParseFn prefix_rule = get_rule(parser.previous.type)->prefix;
	if (prefix_rule == NULL)
	{
		error("Expected expression.");
		return;
	}

	prefix_rule();

	while (precedence <= get_rule(parser.current.type)->precedence)
	{
		advance();
		ParseFn infix_rule = get_rule(parser.previous.type)->infix;
		infix_rule();
	}
}
Beispiel #8
0
void test_attr_file__check_attr_examples(void)
{
	git_attr_file *file;
	git_attr_rule *rule;
	git_attr_assignment *assign;

	cl_git_pass(git_attr_file__new(&file));
	cl_git_pass(git_attr_file__from_file(NULL, cl_fixture("attr/attr3"), file));
	cl_assert_strequal(cl_fixture("attr/attr3"), file->path);
	cl_assert(file->rules.length == 3);

	rule = get_rule(0);
	cl_assert_strequal("*.java", rule->match.pattern);
	cl_assert(rule->assigns.length == 3);
	assign = git_attr_rule__lookup_assignment(rule, "diff");
	cl_assert_strequal("diff", assign->name);
	cl_assert_strequal("java", assign->value);
	assign = git_attr_rule__lookup_assignment(rule, "crlf");
	cl_assert_strequal("crlf", assign->name);
	cl_assert(GIT_ATTR_FALSE == assign->value);
	assign = git_attr_rule__lookup_assignment(rule, "myAttr");
	cl_assert_strequal("myAttr", assign->name);
	cl_assert(GIT_ATTR_TRUE == assign->value);
	assign = git_attr_rule__lookup_assignment(rule, "missing");
	cl_assert(assign == NULL);

	rule = get_rule(1);
	cl_assert_strequal("NoMyAttr.java", rule->match.pattern);
	cl_assert(rule->assigns.length == 1);
	assign = get_assign(rule, 0);
	cl_assert_strequal("myAttr", assign->name);
	cl_assert(assign->value == NULL);

	rule = get_rule(2);
	cl_assert_strequal("README", rule->match.pattern);
	cl_assert(rule->assigns.length == 1);
	assign = get_assign(rule, 0);
	cl_assert_strequal("caveat", assign->name);
	cl_assert_strequal("unspecified", assign->value);

	git_attr_file__free(file);
}
Beispiel #9
0
static void check_one_assign(
	git_attr_file *file,
	int rule_idx,
	int assign_idx,
	const char *pattern,
	const char *name,
	enum attr_expect_t expected,
	const char *expected_str)
{
	git_attr_rule *rule = get_rule(rule_idx);
	git_attr_assignment *assign = get_assign(rule, assign_idx);

	cl_assert_equal_s(pattern, rule->match.pattern);
	cl_assert(rule->assigns.length == 1);
	cl_assert_equal_s(name, assign->name);
	cl_assert(assign->name_hash == git_attr_file__name_hash(assign->name));

	attr_check_expected(expected, expected_str, assign->value);
}
Beispiel #10
0
// Right associative
static void binary_right()
{
	// Remember the operator.
	token_type operator_type = parser.previous.type;

	// Compile the right operand.
	ParseRule *rule = get_rule(operator_type);
	parse_precedence((precedence)(rule->precedence));
	// Right hand side now loaded.

	// Emit the operator instruction.
	switch (operator_type)
	{
		case TOKEN_POW:
			emit_byte(OP_POW);
			break;
		default:
			printf("Invalid binary-right operator %d\n", operator_type);
			return; // Unreachable.
	}
}
Beispiel #11
0
/*
 * Parse the input stream and return an action stack.
 * See Wikipedia again.
 */
static obj_t *parse(instream_t *in)
{
    AUTO_ROOT(actions, NIL);
    AUTO_ROOT(yylval, NIL);
    AUTO_ROOT(tmp, make_fixnum(TOK_EOF));
    AUTO_ROOT(stack, NIL);
    stack_push(&stack, tmp);
    tmp = make_fixnum(sym_index(start_symbol));
    stack_push(&stack, tmp);
    int tok = yylex(&yylval, in);
    while (true) {
	int sym = fixnum_value(stack_pop(&stack));
	assert(0 <= sym && sym < symbols_size);
	uint_fast8_t rule = get_rule(symbols[sym], tok);
	if (rule != NO_RULE) {
	    const production_t *pp = &grammar[rule];
	    int j;
	    for (j = strlen(pp->p_rhs); --j >= 0; ) {
		tmp = make_fixnum(sym_index(pp->p_rhs[j]));
		stack_push(&stack, tmp);
	    }
	    if (pp->p_action)
		stack_push(&actions, *pp->p_action);
	} else {
	    if (sym == TOK_EOF)
		break;
	    /* XXX raise an exception here. */
	    assert(sym == tok && "syntax error");
	    if (!is_null(yylval))
		stack_push(&actions, yylval);
	    if (!stack_is_empty(actions) &&
		fixnum_value(stack_top(stack)) == TOK_EOF)
		break;
	    yylval = NIL;
	    tok = yylex(&yylval, in);
	}
    }
    POP_FUNCTION_ROOTS();
    return actions;
}
Beispiel #12
0
static void check_one_assign(
	git_attr_file *file,
	int rule_idx,
	int assign_idx,
	const char *pattern,
	const char *name,
	const char *value,
	int is_allocated)
{
	git_attr_rule *rule = get_rule(rule_idx);
	git_attr_assignment *assign = get_assign(rule, assign_idx);

	cl_assert_strequal(pattern, rule->match.pattern);
	cl_assert(rule->assigns.length == 1);
	cl_assert_strequal(name, assign->name);
	cl_assert(assign->name_hash == git_attr_file__name_hash(assign->name));
	cl_assert(assign->is_allocated == is_allocated);
	if (is_allocated)
		cl_assert_strequal(value, assign->value);
	else
		cl_assert(assign->value == value);
}
Beispiel #13
0
int check_expression(Resources *res, TToken **last_token, index_t *last_index) {
    args_assert(res != NULL, INTERNAL_ERROR);

    TToken *input_token = NULL;
    TToken *top_token = NULL;
    TToken *tmp = NULL;
    index_t top_index = ZERO_INDEX;
    index_t input_index = ZERO_INDEX;
    TStack stack;
    int iRet = RETURN_OK;
    int return_type;

    init_stack(&stack);

    new_item(&res->struct_buff, top_index, top_token);
    top_token->token_type = END_OF_EXPR;
    push(&res->struct_buff, &stack, top_index); // $ on top of the stack

    if ((*last_token) != NULL) 
        input_index = *last_index;
    else 
        input_index = get_token(res->source, &res->string_buff, &res->struct_buff);
    
    catch_internal_error(
        dereference_structure(&res->struct_buff, input_index, (void **)&input_token),
        INTERNAL_ERROR,
        "Failed to dereference structure buffer."
    );

    if (input_token->token_type == ERRORT) {
        iRet = LEXICAL_ERROR;
        goto EXIT;
    }
    
        
    catch_internal_error(
        dereference_structure(&res->struct_buff, top_index, (void **)&top_token),
        INTERNAL_ERROR,
        "Failed to dereference structure buffer."
    );

    do {
#if DEBUG
         print_stack(&res->struct_buff, &stack);
#endif
         debug_print("%s %d\n", "TOP", top_token->token_type);
         debug_print("%s %d\n", "INPUT", input_token->token_type);
        
        if (top_token->token_type == IDENTIFIER 
            && input_token->token_type == OPENING_BRACKET) {
            debug_print("%s\n", "FUNCTION CALL IN EXPR");
            
            index_t last_id = top_token->token_index;
            catch_undefined_error(is_func_declared(res, last_id),
                                 SEMANTIC_ERROR, "Function declaration check failed.", 1
            );
            
            dereference_structure(&res->struct_buff, input_index, (void **)last_token);

            if ((iRet = generate_function_call(res, last_id)) != 0) goto EXIT;
            return_type = get_return_type(res, top_token->token_index);
            catch_internal_error(return_type, SYNTAX_ERROR, "Failed to get function return type.");

            // Reduction of function call
            if((iRet = reduce(&res->struct_buff, &stack, return_type)) != RETURN_OK)
                goto EXIT;

            top_index = stack.top;
            catch_syntax_error(
                get_first_token(&res->struct_buff, &stack, &top_index),
                INTERNAL_ERROR,
                "Failed to get first token", 1
            );
            input_index = get_token(res->source, &res->string_buff, &res->struct_buff);
            catch_internal_error(
                dereference_structure(&res->struct_buff, input_index, (void **)&input_token),
                INTERNAL_ERROR,
                "Failed to dereference structure buffer."
            );

            if (input_token->token_type == ERRORT) {
                iRet = LEXICAL_ERROR;
                goto EXIT;
            }

            catch_internal_error(
                dereference_structure(&res->struct_buff, top_index, (void **)&top_token),
                INTERNAL_ERROR,
                "Failed to dereference structure buffer."
            );
            if (type_filter(top_token->token_type) == END_OF_EXPR &&
                type_filter(input_token->token_type) == END_OF_EXPR)
                break;

        }

        switch(precedence_table[type_filter(top_token->token_type)]
                               [type_filter(input_token->token_type)]) {
            case H:
                debug_print("%s\n", "CASE H");
                top_index = input_index;
                push(&res->struct_buff, &stack, top_index);
                input_index = get_token(res->source, &res->string_buff, &res->struct_buff);
                catch_internal_error(
                    dereference_structure(&res->struct_buff, input_index, (void **)&input_token),
                    INTERNAL_ERROR,
                    "Failed to dereference structure buffer."
                );
                
                if (input_token->token_type == ERRORT) {
                    iRet = LEXICAL_ERROR;
                    goto EXIT;
                }

                catch_internal_error(
                    dereference_structure(&res->struct_buff, top_index, (void **)&top_token),
                    INTERNAL_ERROR,
                    "Failed to dereference structure buffer."
                );

                break;

            case S:
                debug_print("%s\n", "CASE S");
                new_item(&res->struct_buff, top_index, top_token);
                catch_internal_error(
                    dereference_structure(&res->struct_buff, stack.top, (void **)&tmp),
                    INTERNAL_ERROR,
                    "Failed to dereference structure buffer."
                );

                top_token->token_type = SHIFT;

                if (tmp->token_type == RVALUE) {
                    index_t rvalue_index = stack.top;
                    pop(&res->struct_buff, &stack);
                    push(&res->struct_buff, &stack, top_index);
                    push(&res->struct_buff, &stack, rvalue_index);

                } else
                    push(&res->struct_buff, &stack, top_index);
                
                catch_internal_error(
                    dereference_structure(&res->struct_buff, input_index, (void **)&input_token),
                    INTERNAL_ERROR,
                    "Failed to dereference structure buffer."
                );

                top_index = input_index;
                push(&res->struct_buff, &stack, top_index);
                input_index = get_token(res->source, &res->string_buff, &res->struct_buff);
                catch_internal_error(
                    dereference_structure(&res->struct_buff, input_index, (void **)&input_token),
                    INTERNAL_ERROR,
                    "Failed to dereference structure buffer."
                );

                if (input_token->token_type == ERRORT) {
                    iRet = LEXICAL_ERROR;
                    goto EXIT;
                }

                catch_internal_error(
                    dereference_structure(&res->struct_buff, top_index, (void **)&top_token),
                    INTERNAL_ERROR,
                    "Failed to dereference structure buffer."
                );
                break;
            
            case R:
                debug_print("%s\n", "CASE R");
                if ((iRet = get_rule(res, &stack)) != RETURN_OK)
                    goto EXIT;
                
                top_index = stack.top;
                
                catch_syntax_error(
                    get_first_token(&res->struct_buff, &stack, &top_index),
                    INTERNAL_ERROR,
                    "Failed to get first token", 1
                );
                catch_internal_error(
                    dereference_structure(&res->struct_buff, top_index, (void **)&top_token),
                    INTERNAL_ERROR,
                    "Failed to dereference structure buffer."
                );
                break;
 
            case E:
                debug_print("%s\n", "CASE E");
                if (type_filter(top_token->token_type) == END_OF_EXPR && 
                    type_filter(input_token->token_type) == CLOSING_BRACKET) {
                    catch_internal_error(
                         dereference_structure(&res->struct_buff, input_index, (void **)last_token),
                         INTERNAL_ERROR,
                         "Failed to dereference structure buffer."
                    );
                    
                    catch_internal_error(
                         dereference_structure(&res->struct_buff, stack.top, (void **)&top_token),
                         INTERNAL_ERROR,
                         "Failed to dereference structure buffer."
                    );
                    if (top_token->original_type == 0) {      // Empty expression, there was nothing reduced on top
                        debug_print("%s: %d\n", "EMPTY EXPRESSION RETURN", SYNTAX_ERROR);
                        iRet = SYNTAX_ERROR;
                        goto EXIT;
                    }

                    goto FINISH;

                }

                iRet = SYNTAX_ERROR;
                goto EXIT;
            
            default:
                debug_print("%s", "DEFAULT\n");
                iRet = INTERNAL_ERROR;
                goto EXIT;
        }
                  
    } while (type_filter(top_token->token_type) != END_OF_EXPR || type_filter(input_token->token_type) != END_OF_EXPR);
    
    catch_internal_error(
         dereference_structure(&res->struct_buff, input_index, (void **)last_token),
         INTERNAL_ERROR,
         "Failed to dereference structure buffer."
    );
    
    catch_internal_error(
         dereference_structure(&res->struct_buff, stack.top, (void **)&top_token),
         INTERNAL_ERROR,
         "Failed to dereference structure buffer."
    );

FINISH:
    debug_print("%s: %d\n", "TYPE OF EXPRESSION", top_token->original_type);
    // send type of expression back to syntax_analysis
    (*last_token)->original_type = top_token->original_type;
    
    // set type of stack top on runtime stack
    catch_internal_error(new_instruction_int_int(&res->instruction_buffer, 0lu, top_token->original_type, 0, SET_TYPE),
                         INTERNAL_ERROR, "Failed to generate new instruction");
    

EXIT:
    debug_print("%s: %d\n", "RETURN", iRet);
    return iRet;
}
Beispiel #14
0
void MultiGridRefiner::refine()
{
	assert(m_pMG && "refiner not has to be assigned to a multi-grid!");
	if(!m_pMG)
		return;

//	the multi-grid
	MultiGrid& mg = *m_pMG;

//	make sure that the required options are enabled.
	if(!mg.option_is_enabled(GRIDOPT_FULL_INTERCONNECTION))
	{
		LOG("WARNING in MultiGridRefiner::refine(): auto-enabling GRIDOPT_FULL_INTERCONNECTION.\n");
		mg.enable_options(GRIDOPT_FULL_INTERCONNECTION);
	}

//	access position attachments
	Grid::VertexAttachmentAccessor<APosition> aaPos;
	if(mg.has_vertex_attachment(aPosition))
		aaPos.access(mg, aPosition);

//	collect objects for refine
	collect_objects_for_refine();

//	notify derivates that refinement begins
	refinement_step_begins();
	
//	cout << "num marked edges: " << m_selMarks.num<Edge>() << endl;
//	cout << "num marked faces: " << m_selMarks.num<Face>() << endl;

//	we want to add new elements in a new layer.
	bool bHierarchicalInsertionWasEnabled = mg.hierarchical_insertion_enabled();
	if(!bHierarchicalInsertionWasEnabled)
		mg.enable_hierarchical_insertion(true);


//	some buffers
	vector<Vertex*> vVrts;
	vector<Vertex*> vEdgeVrts;
	vector<Edge*>	vEdges;
	vector<Face*>		vFaces;

//	some repeatedly used objects
	EdgeDescriptor ed;
	FaceDescriptor fd;
	VolumeDescriptor vd;

//LOG("creating new vertices\n");
//	create new vertices from marked vertices
	for(VertexIterator iter = m_selMarks.begin<Vertex>();
		iter != m_selMarks.end<Vertex>(); ++iter)
	{
		Vertex* v = *iter;
		if(!mg.get_child_vertex(v))
		{
		//	create a new vertex in the next layer.
			Vertex* nVrt = *mg.create_by_cloning(v, v);

			if(aaPos.valid())
			{
				aaPos[nVrt] = aaPos[v];
			//	change z-coord to visualise the hierarchy
				//aaPos[nVrt].z() += 0.01;
			}
		}
	}

//LOG("creating new edges\n");
//	create new vertices and edges from marked edges
	for(EdgeIterator iter = m_selMarks.begin<Edge>();
		iter != m_selMarks.end<Edge>(); ++iter)
	{
	//	collect_objects_for_refine removed all edges that already were
	//	refined. No need to check that again.
		Edge* e = *iter;
		int rule = get_rule(e);
		switch(rule)
		{
		case RM_COPY:
			{
			//	clone the edge.
				ed.set_vertices(mg.get_child_vertex(e->vertex(0)),
								mg.get_child_vertex(e->vertex(1)));
				Edge* newEdge = *mg.create_by_cloning(e, ed, e);
				set_status(newEdge, SM_COPY);
			}break;

		default:
			{
			//	create two new edges by edge-split
				RegularVertex* nVrt = *mg.create<RegularVertex>(e);
				Vertex* substituteVrts[2];
				substituteVrts[0] = mg.get_child_vertex(e->vertex(0));
				substituteVrts[1] = mg.get_child_vertex(e->vertex(1));

				if(aaPos.valid())
				{
					VecScaleAdd(aaPos[nVrt], 0.5, aaPos[e->vertex(0)],
											0.5, aaPos[e->vertex(1)]);
				//	change z-coord to visualise the hierarchy
					//aaPos[nVrt].z() += 0.01;
				}

			//	split the edge
				e->refine(vEdges, nVrt, substituteVrts);
				assert((vEdges.size() == 2) && "RegularEdge refine produced wrong number of edges.");
				mg.register_element(vEdges[0], e);
				mg.register_element(vEdges[1], e);
				set_status(vEdges[0], SM_REGULAR);
				set_status(vEdges[1], SM_REGULAR);
			}break;
		}
	}

//LOG("creating new faces\n");
//	create new vertices and faces from marked faces
	for(FaceIterator iter = m_selMarks.begin<Face>();
		iter != m_selMarks.end<Face>(); ++iter)
	{
		Face* f = *iter;

		int rule = get_rule(f);
		switch(rule)
		{
		case RM_COPY:
			{
			//	clone the face.
				if(fd.num_vertices() != f->num_vertices())
					fd.set_num_vertices(f->num_vertices());

				for(size_t i = 0; i < fd.num_vertices(); ++i)
					fd.set_vertex(i, mg.get_child_vertex(f->vertex(i)));

				Face* nf = *mg.create_by_cloning(f, fd, f);
				set_status(nf, SM_COPY);
			}break;

		default:
			{
			//	collect child-vertices
				vVrts.clear();
				for(uint j = 0; j < f->num_vertices(); ++j){
					vVrts.push_back(mg.get_child_vertex(f->vertex(j)));
				}

			//	collect the associated edges
				vEdgeVrts.clear();
				//bool bIrregular = false;
				for(uint j = 0; j < f->num_edges(); ++j){
					Vertex* vrt = mg.get_child_vertex(mg.get_edge(f, j));
					vEdgeVrts.push_back(vrt);
					//if(!vrt)
					//	bIrregular = true;
				}
/*
				if(bIrregular){
					assert((get_rule(f) != RM_REFINE) && "Bad refinement-rule set during collect_objects_for_refine!");
		//TODO:	care about anisotropy
					set_rule(f, RM_IRREGULAR);
				}
*/
				Vertex* newVrt;
				if(f->refine(vFaces, &newVrt, &vEdgeVrts.front(), NULL, &vVrts.front())){
				//	if a new vertex was generated, we have to register it
					if(newVrt){
						mg.register_element(newVrt, f);
						if(aaPos.valid()){
							aaPos[newVrt] = CalculateCenter(f, aaPos);
						//	change z-coord to visualise the hierarchy
							//aaPos[newVrt].z() += 0.01;
						}
					}

					int oldRule = get_rule(f);

				//	register the new faces and assign status
					for(size_t j = 0; j < vFaces.size(); ++j){
						mg.register_element(vFaces[j], f);
						switch(oldRule)
						{
						case RM_REFINE:	set_status(vFaces[j], SM_REGULAR); break;
						case RM_IRREGULAR:	set_status(vFaces[j], SM_IRREGULAR); break;
						default:
							assert((oldRule == RM_REFINE) && "rule not yet handled.");//always fails.
							break;
						}
					}
				}
				else{
					LOG("  WARNING in Refine: could not refine face.\n");
				}
			}
		}
	}

//	done - clean up
	if(!bHierarchicalInsertionWasEnabled)
		mg.enable_hierarchical_insertion(false);

	m_selMarks.clear();
	
//	notify derivates that refinement ends
	refinement_step_ends();
}
Beispiel #15
0
void print_rule_set(struct rule_set *rules)
{
  int i;
  for(i=0; i < rules->num_rules; ++i) 
    print_syms(get_rule(i, rules), rules->rule_size, rules->num_rules);
}
Beispiel #16
0
void test_attr_file__match_variants(void)
{
	git_attr_file *file;
	git_attr_rule *rule;
	git_attr_assignment *assign;

	cl_git_pass(git_attr_file__new(&file));
	cl_git_pass(git_attr_file__from_file(NULL, cl_fixture("attr/attr1"), file));
	cl_assert_strequal(cl_fixture("attr/attr1"), file->path);
	cl_assert(file->rules.length == 10);

	/* let's do a thorough check of this rule, then just verify
	 * the things that are unique for the later rules
	 */
	rule = get_rule(0);
	cl_assert(rule);
	cl_assert_strequal("pat0", rule->match.pattern);
	cl_assert(rule->match.length == strlen("pat0"));
	cl_assert(rule->match.flags == 0);
	cl_assert(rule->assigns.length == 1);
	assign = get_assign(rule,0);
	cl_assert_strequal("attr0", assign->name);
	cl_assert(assign->name_hash == git_attr_file__name_hash(assign->name));
	cl_assert(assign->value == GIT_ATTR_TRUE);
	cl_assert(!assign->is_allocated);

	rule = get_rule(1);
	cl_assert_strequal("pat1", rule->match.pattern);
	cl_assert(rule->match.length == strlen("pat1"));
	cl_assert(rule->match.flags == GIT_ATTR_FNMATCH_NEGATIVE);

	rule = get_rule(2);
	cl_assert_strequal("pat2", rule->match.pattern);
	cl_assert(rule->match.length == strlen("pat2"));
	cl_assert(rule->match.flags == GIT_ATTR_FNMATCH_DIRECTORY);

	rule = get_rule(3);
	cl_assert_strequal("pat3dir/pat3file", rule->match.pattern);
	cl_assert(rule->match.flags == GIT_ATTR_FNMATCH_FULLPATH);

	rule = get_rule(4);
	cl_assert_strequal("pat4.*", rule->match.pattern);
	cl_assert(rule->match.flags == 0);

	rule = get_rule(5);
	cl_assert_strequal("*.pat5", rule->match.pattern);

	rule = get_rule(7);
	cl_assert_strequal("pat7[a-e]??[xyz]", rule->match.pattern);
	cl_assert(rule->assigns.length == 1);
	assign = get_assign(rule,0);
	cl_assert_strequal("attr7", assign->name);
	cl_assert(assign->value == GIT_ATTR_TRUE);

	rule = get_rule(8);
	cl_assert_strequal("pat8 with spaces", rule->match.pattern);
	cl_assert(rule->match.length == strlen("pat8 with spaces"));
	cl_assert(rule->match.flags == 0);

	rule = get_rule(9);
	cl_assert_strequal("pat9", rule->match.pattern);

	git_attr_file__free(file);
}
Beispiel #17
0
void test_attr_file__assign_variants(void)
{
	git_attr_file *file;
	git_attr_rule *rule;
	git_attr_assignment *assign;

	cl_git_pass(git_attr_file__new(&file));
	cl_git_pass(git_attr_file__from_file(NULL, cl_fixture("attr/attr2"), file));
	cl_assert_strequal(cl_fixture("attr/attr2"), file->path);
	cl_assert(file->rules.length == 11);

	check_one_assign(file, 0, 0, "pat0", "simple", GIT_ATTR_TRUE, 0);
	check_one_assign(file, 1, 0, "pat1", "neg", GIT_ATTR_FALSE, 0);
	check_one_assign(file, 2, 0, "*", "notundef", GIT_ATTR_TRUE, 0);
	check_one_assign(file, 3, 0, "pat2", "notundef", NULL, 0);
	check_one_assign(file, 4, 0, "pat3", "assigned", "test-value", 1);
	check_one_assign(file, 5, 0, "pat4", "rule-with-more-chars", "value-with-more-chars", 1);
	check_one_assign(file, 6, 0, "pat5", "empty", GIT_ATTR_TRUE, 0);
	check_one_assign(file, 7, 0, "pat6", "negempty", GIT_ATTR_FALSE, 0);

	rule = get_rule(8);
	cl_assert_strequal("pat7", rule->match.pattern);
	cl_assert(rule->assigns.length == 5);
	/* assignments will be sorted by hash value, so we have to do
	 * lookups by search instead of by position
	 */
	assign = git_attr_rule__lookup_assignment(rule, "multiple");
	cl_assert(assign);
	cl_assert_strequal("multiple", assign->name);
	cl_assert(assign->value == GIT_ATTR_TRUE);
	assign = git_attr_rule__lookup_assignment(rule, "single");
	cl_assert(assign);
	cl_assert_strequal("single", assign->name);
	cl_assert(assign->value == GIT_ATTR_FALSE);
	assign = git_attr_rule__lookup_assignment(rule, "values");
	cl_assert(assign);
	cl_assert_strequal("values", assign->name);
	cl_assert_strequal("1", assign->value);
	assign = git_attr_rule__lookup_assignment(rule, "also");
	cl_assert(assign);
	cl_assert_strequal("also", assign->name);
	cl_assert_strequal("a-really-long-value/*", assign->value);
	assign = git_attr_rule__lookup_assignment(rule, "happy");
	cl_assert(assign);
	cl_assert_strequal("happy", assign->name);
	cl_assert_strequal("yes!", assign->value);
	assign = git_attr_rule__lookup_assignment(rule, "other");
	cl_assert(!assign);

	rule = get_rule(9);
	cl_assert_strequal("pat8", rule->match.pattern);
	cl_assert(rule->assigns.length == 2);
	assign = git_attr_rule__lookup_assignment(rule, "again");
	cl_assert(assign);
	cl_assert_strequal("again", assign->name);
	cl_assert(assign->value == GIT_ATTR_TRUE);
	assign = git_attr_rule__lookup_assignment(rule, "another");
	cl_assert(assign);
	cl_assert_strequal("another", assign->name);
	cl_assert_strequal("12321", assign->value);

	check_one_assign(file, 10, 0, "pat9", "at-eof", GIT_ATTR_FALSE, 0);

	git_attr_file__free(file);
}
void njd_set_accent_type(NJD * njd)
{
   NJDNode *node;
   NJDNode *top_node = NULL;
   char rule[MAXBUFLEN];
   int add_type = 0;
   int mora_size = 0;

   if (njd == NULL || njd->head == NULL)
      return;

   for (node = njd->head; node != NULL; node = node->next) {
      if (NJDNode_get_string(node) == NULL)
         continue;
      if ((node == njd->head) || (NJDNode_get_chain_flag(node) != 1)) {
         /* store the top node */
         top_node = node;
         mora_size = 0;
      } else if (node->prev != NULL && NJDNode_get_chain_flag(node) == 1) {
         /* get accent change type */
         get_rule(NJDNode_get_chain_rule(node), NJDNode_get_pos(node->prev), rule, &add_type);

         /* change accent type */
         if (strcmp(rule, "*") == 0) {  /* no chnage */
         } else if (strcmp(rule, "F1") == 0) {  /* for ancillary word */
         } else if (strcmp(rule, "F2") == 0) {
            if (NJDNode_get_acc(top_node) == 0)
               NJDNode_set_acc(top_node, mora_size + add_type);
         } else if (strcmp(rule, "F3") == 0) {
            if (NJDNode_get_acc(top_node) != 0)
               NJDNode_set_acc(top_node, mora_size + add_type);
         } else if (strcmp(rule, "F4") == 0) {
            NJDNode_set_acc(top_node, mora_size + add_type);
         } else if (strcmp(rule, "F5") == 0) {
            NJDNode_set_acc(top_node, 0);
         } else if (strcmp(rule, "C1") == 0) {  /* for noun */
            NJDNode_set_acc(top_node, mora_size + NJDNode_get_acc(node));
         } else if (strcmp(rule, "C2") == 0) {
            NJDNode_set_acc(top_node, mora_size + 1);
         } else if (strcmp(rule, "C3") == 0) {
            NJDNode_set_acc(top_node, mora_size);
         } else if (strcmp(rule, "C4") == 0) {
            NJDNode_set_acc(top_node, 0);
         } else if (strcmp(rule, "C5") == 0) {
         } else if (strcmp(rule, "P1") == 0) {  /* for postfix */
            if (NJDNode_get_acc(node) == 0)
               NJDNode_set_acc(top_node, 0);
            else
               NJDNode_set_acc(top_node, mora_size + NJDNode_get_acc(node));
         } else if (strcmp(rule, "P2") == 0) {
            if (NJDNode_get_acc(node) == 0)
               NJDNode_set_acc(top_node, mora_size + 1);
            else
               NJDNode_set_acc(top_node, mora_size + NJDNode_get_acc(node));
         } else if (strcmp(rule, "P6") == 0) {
            NJDNode_set_acc(top_node, 0);
         } else if (strcmp(rule, "P14") == 0) {
            if (NJDNode_get_acc(node) != 0)
               NJDNode_set_acc(top_node, mora_size + NJDNode_get_acc(node));
         }
      }

      /* change accent type for digit */
      if (node->prev != NULL && NJDNode_get_chain_flag(node) == 1 &&
          strcmp(NJDNode_get_pos_group1(node->prev), NJD_SET_ACCENT_TYPE_KAZU) == 0 &&
          strcmp(NJDNode_get_pos_group1(node), NJD_SET_ACCENT_TYPE_KAZU) == 0) {
         if (strcmp(NJDNode_get_string(node), NJD_SET_ACCENT_TYPE_JYUU) == 0) { /* 10^1 */
            if (NJDNode_get_string(node->prev) != NULL &&
                (strcmp(NJDNode_get_string(node->prev), NJD_SET_ACCENT_TYPE_SAN) == 0 ||
                 strcmp(NJDNode_get_string(node->prev), NJD_SET_ACCENT_TYPE_YON) == 0 ||
                 strcmp(NJDNode_get_string(node->prev), NJD_SET_ACCENT_TYPE_KYUU) == 0 ||
                 strcmp(NJDNode_get_string(node->prev), NJD_SET_ACCENT_TYPE_NAN) == 0 ||
                 strcmp(NJDNode_get_string(node->prev), NJD_SET_ACCENT_TYPE_SUU) == 0)) {
               NJDNode_set_acc(node->prev, 1);
            } else {
               NJDNode_set_acc(node->prev, 1);
            }
            if (NJDNode_get_string(node->prev) != NULL &&
                (strcmp(NJDNode_get_string(node->prev), NJD_SET_ACCENT_TYPE_GO) == 0 ||
                 strcmp(NJDNode_get_string(node->prev), NJD_SET_ACCENT_TYPE_ROKU) == 0 ||
                 strcmp(NJDNode_get_string(node->prev), NJD_SET_ACCENT_TYPE_HACHI) == 0)) {
               if (node->next != NULL && NJDNode_get_string(node->next) != NULL
                   && (strcmp(NJDNode_get_string(node->next), NJD_SET_ACCENT_TYPE_ICHI) == 0
                       || strcmp(NJDNode_get_string(node->next), NJD_SET_ACCENT_TYPE_NI) == 0
                       || strcmp(NJDNode_get_string(node->next), NJD_SET_ACCENT_TYPE_SAN) == 0
                       || strcmp(NJDNode_get_string(node->next), NJD_SET_ACCENT_TYPE_YON) == 0
                       || strcmp(NJDNode_get_string(node->next), NJD_SET_ACCENT_TYPE_GO) == 0
                       || strcmp(NJDNode_get_string(node->next), NJD_SET_ACCENT_TYPE_ROKU) == 0
                       || strcmp(NJDNode_get_string(node->next), NJD_SET_ACCENT_TYPE_NANA) == 0
                       || strcmp(NJDNode_get_string(node->next), NJD_SET_ACCENT_TYPE_HACHI) == 0
                       || strcmp(NJDNode_get_string(node->next), NJD_SET_ACCENT_TYPE_KYUU) == 0))
                  NJDNode_set_acc(node->prev, 0);
            }
         } else if (strcmp(NJDNode_get_string(node), NJD_SET_ACCENT_TYPE_HYAKU) == 0) { /* 10^2 */
            if (NJDNode_get_string(node->prev) != NULL
                && strcmp(NJDNode_get_string(node->prev), NJD_SET_ACCENT_TYPE_NANA) == 0) {
               NJDNode_set_acc(node->prev, 2);
            } else if (NJDNode_get_string(node->prev) != NULL &&
                       (strcmp(NJDNode_get_string(node->prev), NJD_SET_ACCENT_TYPE_SAN) == 0 ||
                        strcmp(NJDNode_get_string(node->prev), NJD_SET_ACCENT_TYPE_YON) == 0 ||
                        strcmp(NJDNode_get_string(node->prev), NJD_SET_ACCENT_TYPE_KYUU) == 0 ||
                        strcmp(NJDNode_get_string(node->prev), NJD_SET_ACCENT_TYPE_NAN) == 0)) {
               NJDNode_set_acc(node->prev, 1);
            } else {
               NJDNode_set_acc(node->prev,
                               NJDNode_get_mora_size(node->prev) + NJDNode_get_mora_size(node));
            }
         } else if (strcmp(NJDNode_get_string(node), NJD_SET_ACCENT_TYPE_SEN) == 0) {   /* 10^3 */
            NJDNode_set_acc(node->prev, NJDNode_get_mora_size(node->prev) + 1);
         } else if (strcmp(NJDNode_get_string(node), NJD_SET_ACCENT_TYPE_MAN) == 0) {   /* 10^4 */
            NJDNode_set_acc(node->prev, NJDNode_get_mora_size(node->prev) + 1);
         } else if (strcmp(NJDNode_get_string(node), NJD_SET_ACCENT_TYPE_OKU) == 0) {   /* 10^8 */
            if (NJDNode_get_string(node->prev) != NULL &&
                (strcmp(NJDNode_get_string(node->prev), NJD_SET_ACCENT_TYPE_ICHI) == 0 ||
                 strcmp(NJDNode_get_string(node->prev), NJD_SET_ACCENT_TYPE_ROKU) == 0 ||
                 strcmp(NJDNode_get_string(node->prev), NJD_SET_ACCENT_TYPE_NANA) == 0 ||
                 strcmp(NJDNode_get_string(node->prev), NJD_SET_ACCENT_TYPE_HACHI) == 0 ||
                 strcmp(NJDNode_get_string(node->prev), NJD_SET_ACCENT_TYPE_IKU) == 0)) {
               NJDNode_set_acc(node->prev, 2);
            } else {
               NJDNode_set_acc(node->prev, 1);
            }
         } else if (strcmp(NJDNode_get_string(node), NJD_SET_ACCENT_TYPE_CHOU) == 0) {  /* 10^12 */
            if (NJDNode_get_string(node->prev) != NULL &&
                (strcmp(NJDNode_get_string(node->prev), NJD_SET_ACCENT_TYPE_ROKU) == 0 ||
                 strcmp(NJDNode_get_string(node->prev), NJD_SET_ACCENT_TYPE_NANA) == 0)) {
               NJDNode_set_acc(node->prev, 2);
            } else {
               NJDNode_set_acc(node->prev, 1);
            }
         }
      }

      if (strcmp(NJDNode_get_string(node), NJD_SET_ACCENT_TYPE_JYUU) == 0 &&
          NJDNode_get_chain_flag(node) != 1 && node->next != NULL &&
          strcmp(NJDNode_get_pos_group1(node->next), NJD_SET_ACCENT_TYPE_KAZU) == 0) {
         NJDNode_set_acc(node, 0);
      }

      mora_size += NJDNode_get_mora_size(node);
   }
}
Beispiel #19
0
//--------------------------------------------------------------------------------------------------------
// Function called by the main thread's helper thread. Deals with rules that have been added to the dep_q
// by checking if they are ready to be added to the in_q and be processed.
//--------------------------------------------------------------------------------------------------------
void* main_thread_helper(void* arg)
{
	rule_t* rule;
	str_node_t* dep;
	int i, j;
	int addRule, foundDep;

	while(1)
	{
		//check exit condition
		if(adding_Rules == 0 && dep_q->index == 0) break;

		//Wait for signal of items being added to dep_sem
		//if(sem_trywait(&dep_sem)) printf("Sem get blocked!\n");
		sem_wait(&dep_sem);
		if(dep_q->index == 0) continue;//Do nothing if nothing is in dep_q

		for(i = 0; i < dep_q->index; i++) //each rule in dep_q
		{
			addRule = true;
			rule = dep_q->queue[i];
			for(dep = rule->deps; dep != NULL; dep = dep->next)
			{
				foundDep = false;
				for(j = 0; j < out_q->index; j++) //each rule in out_q
				{
					//printf("[%s] vs [%s]\n", dep->str, out_q->queue[j]->target);
					if(strcmp(dep->str, out_q->queue[j]->target) == 0) { foundDep = true; break; }
				} //end out_q
				//printf("(%d)\n", foundDep);
				if(foundDep == false) { addRule = false; break; }
			} //end deps
			//printf("Helper (%s) %d\n", rule->target, addRule);
			if(addRule)
			{
				if(add_rule(IN_QUEUE, rule))
				{
					//remove rule from dep queue
					get_rule(DEP_QUEUE, rule);
					printf("Helper: Rule added to in_q (%s) \n", rule->target);
					
					sem_post(&jobs_sem);
				}
			}
			else //rules dependencies havent been executed
			{
				//do something
				
			}
		} //end dep_q
	}
		/*
		
		
		
		
		
		
		
		//Since there is only one helper thread that should be removing items from dep_q,
		// dont need to worry about using a mutex when looking through items.
		//The case of items being added to dep_q by the main thread while helper thread is
		// processing shouldnt be an issue
		for(i = 0; i < dep_q->index; i++)
		{
			//No need to hold the out_q mutex lock because items cant be taken out of it
			int addRule;
			int foundDep = false;
			rule = dep_q->queue[i];
			printf("Helper (%s)\n", rule->target);
			if(rule->deps == NULL) //something is wrong
			//Compare the rules dependencies with the rules in the out_q
			for(dep = rule->deps; dep != NULL; dep = dep->next)
			{
				addRule = false;
				for(j = 0; j < out_q->index; j++)
				{
					if(dep->str ==  out_q->queue[j]->target) { addRule |= true; break; }
					else { addRule &= false; }
				}
			}

			//rule's deps are in out_q
			if(addRule)
			{
				if(add_rule(IN_QUEUE, rule))
				{	
					//remove rule from dep queue
					get_rule(DEP_QUEUE, rule);
					printf("Helper: Rule added to in_q (%s) \n", rule->target);

					//notify workers of new job
					sem_post(&jobs_sem);
				}
			}
			else //rule's deps arent in out_q
			{
				//No more rules will be added to in_q and
				if(!adding_Rules && in_q->index == 0) { fprintf(stderr, "Rule in dep queue that will never be executed\n"); exit(1); }
			}
		}
	}
	*/
	printf("Helper(%u) exiting\n", (unsigned int)pthread_self());
	return NULL;
}
Beispiel #20
0
static void binary()
{
	// Remember the operator.
	token_type operator_type = parser.previous.type;

	// Compile the right operand.
	ParseRule *rule = get_rule(operator_type);
	parse_precedence((precedence)(rule->precedence + 1));
	// Right hand side now loaded.

	// Emit the operator instruction.
	switch (operator_type)
	{
		case TOKEN_EQUAL:
			emit_byte(OP_EQUAL);
			break;
		case TOKEN_NOT_EQUAL:
			emit_byte(OP_EQUAL);
			emit_byte(OP_NOT);
			break;
		case TOKEN_GREATER:
			emit_byte(OP_GREATER);
			break;
		case TOKEN_LESS:
			emit_byte(OP_LESS);
			break;
		case TOKEN_GREATER_EQUAL:
			emit_byte(OP_LESS);
			emit_byte(OP_NOT);
			break;
		case TOKEN_LESS_EQUAL:
			emit_byte(OP_GREATER);
			emit_byte(OP_NOT);
			break;
		case TOKEN_RIGHT_SHIFT:
			emit_byte(OP_RIGHT_SHIFT);
			break;
		case TOKEN_LEFT_SHIFT:
			emit_byte(OP_LEFT_SHIFT);
			break;
		case TOKEN_RIGHT_SHIFT_LOGIC:
			emit_byte(OP_RIGHT_SHIFT_LOGIC);
			break;
		case TOKEN_BIT_AND:
			emit_byte(OP_BIT_AND);
			break;
		case TOKEN_BIT_OR:
			emit_byte(OP_BIT_OR);
			break;
		case TOKEN_BIT_XOR:
			emit_byte(OP_BIT_XOR);
			break;
		case TOKEN_PLUS:
			emit_byte(OP_ADD);
			break;
		case TOKEN_MINUS:
			emit_byte(OP_SUB);
			break;
		case TOKEN_MULT:
			emit_byte(OP_MULT);
			break;
		case TOKEN_DIV:
			emit_byte(OP_DIV);
			break;
		case TOKEN_POW:
			emit_byte(OP_POW);
			break;
		case TOKEN_AND:
			printf("Not implemented\n");
			break;
		default:
			printf("Invalid binary operator %d\n", operator_type);
			return; // Unreachable.
	}
}
Beispiel #21
0
void MultiGridRefiner::
select_closure(std::vector<Vertex*>& vVrts)
{
	vector<Edge*> 	vEdges;//	vector used for temporary results
	vector<Face*> 		vFaces;//	vector used for temporary results
	vector<Volume*>		vVolumes;//	vector used for temporary results

//	regard the multi-grid as a grid
	Grid& grid =*static_cast<Grid*>(m_pMG);
	MultiGrid& mg = *m_pMG;
	
//	collect associated faces of refine-edges that will be used for the closure.
//	associated volumes will be collected implicitly later on from refine-faces.
	if(mg.num<Face>() > 0)
	{
	//	store end-iterator so that newly marked edges won't be considered.
		for(EdgeIterator iter = m_selMarks.begin<Edge>();
			iter != m_selMarks.end<Edge>(); ++iter)
		{
		//	as soon as we encounter an edge that is scheduled for COPY, we're done in here
			if(get_rule(*iter) == RM_COPY)
				break;
			
			CollectFaces(vFaces, grid, *iter);
			for(size_t i = 0; i < vFaces.size(); ++i){
				Face* f = vFaces[i];
				if(!m_selMarks.is_selected(f) && (!mg.has_children(f)))
				{
					mark_for_refinement(f);
					
				//	we now have to check all associated edges.
				//	unselected ones will be added as copy-elements
					size_t numRegular = 0;
					CollectEdges(vEdges, grid, f);
					for(size_t j = 0; j < vEdges.size(); ++j){
						Edge* e = vEdges[j];
						if(m_selMarks.is_selected(e)){
							if(get_rule(e) == RM_REFINE)
								++numRegular;
						}
						else{
							if(!mg.has_children(e)){
								set_rule(e, RM_COPY);
								mark_for_refinement(e);
							}
						}
					}
					
				//	set rule
				//	if all associated edges are refined regular,
				//	we'll refine the face regular, too.
					if(numRegular == vEdges.size())
						set_rule(f, RM_REFINE);
					else
						set_rule(f, RM_IRREGULAR);
						
				//	finally we have to make sure that all vertices are selected.
					for(size_t j = 0; j < f->num_vertices(); ++j)
					{
						if(!m_selMarks.is_selected(f->vertex(j))){
							if(get_copy_range() > 0)
								vVrts.push_back(f->vertex(j));
							mark_for_refinement(f->vertex(j));
							set_rule(f->vertex(j), RM_COPY);
						}
					}
				}
			}
		}
	}

//	collect associated volumes of refine-faces that will be used for the closure.
//	we don't have to check associated volumes of refine-edges, since
//	those are associated volumes of refine-faces, too.
	if(mg.num<Volume>() > 0)
	{
	//	store end-iterator so that newly marked faces won't be considered.
		for(FaceIterator iter = m_selMarks.begin<Face>();
			iter != m_selMarks.end<Face>(); ++iter)
		{
		//	as soon as we encounter a face that is scheduled for COPY, we're done in here
			if(get_rule(*iter) == RM_COPY)
				break;

			CollectVolumes(vVolumes, grid, *iter);
			for(size_t i = 0; i < vVolumes.size(); ++i){
				Volume* v = vVolumes[i];
				if(!m_selMarks.is_selected(v) && (!mg.has_children(v)))
				{
					mark_for_refinement(v);
					
				//	we now have to check all associated faces.
				//	unselected ones will be added as copy-elements.
					size_t numRegular = 0;
					CollectFaces(vFaces, grid, v);
					for(size_t j = 0; j < vFaces.size(); ++j){
						Face* f = vFaces[j];
						if(m_selMarks.is_selected(f)){
							if(get_rule(f) == RM_REFINE)
								++numRegular;
						}
						else{
							if(!mg.has_children(f)){
								set_rule(f, RM_COPY);
								mark_for_refinement(f);
							}
						}
					}
					
				//	set rule
				//	if all faces are refined regular, we'll refine the volume regular, too.
					if(numRegular == vFaces.size())
						set_rule(v, RM_REFINE);
					else
						set_rule(v, RM_IRREGULAR);
					
				//	we now have to check all associated edges.
				//	unselected ones will be added as copy-elements.
					CollectEdges(vEdges, grid, v);
					for(size_t j = 0; j < vEdges.size(); ++j){
						Edge* e = vEdges[j];
						if(!m_selMarks.is_selected(e)
							&& (!mg.has_children(e)))
						{
							if(!mg.has_children(e)){
								set_rule(e, RM_COPY);
								mark_for_refinement(e);
							}
						}
					}
					
				//	finally we have to make sure that all vertices are selected.
					for(size_t j = 0; j < v->num_vertices(); ++j)
					{
						if(!m_selMarks.is_selected(v->vertex(j))){
							if(get_copy_range() > 0)
								vVrts.push_back(v->vertex(j));
							mark_for_refinement(v->vertex(j));
							set_rule(v->vertex(j), RM_COPY);
						}
					}
				}
			}
		}
	}
}