static tree cast_to_orig_type(struct visited *visited, gimple stmt, const_tree orig_node, tree new_node) { const_gimple assign; tree orig_type = TREE_TYPE(orig_node); gimple_stmt_iterator gsi = gsi_for_stmt(stmt); assign = build_cast_stmt(visited, orig_type, new_node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false); return get_lhs(assign); }
void insert_join (Join eqn) {//inserts an join in rep and parse trees LOG_DEBUG1( "O: inserting join Ob" ) Ob j = get_join(eqn); Assert3(!isDepricated(j), "insert_join: bad join"); Ob l = get_lhs(eqn); Assert3(!isDepricated(l), "insert_join: bad lhs"); Ob r = get_rhs(eqn); Assert3(!isDepricated(r), "insert_join: bad rhs"); //update bool properties j(BOOL_PROPERTIES) |= STRUCTURAL & l(BOOL_PROPERTIES) & r(BOOL_PROPERTIES); }
void insert_comp (Comp eqn) {//inserts an comp in rep and parse trees LOG_DEBUG1( "O: inserting comp Ob" ) Ob cmp = get_comp(eqn);Assert3(!isDepricated(cmp), "insert_comp: bad comp"); Ob lhs = get_lhs(eqn); Assert3(!isDepricated(lhs), "insert_comp: bad lhs"); Ob rhs = get_rhs(eqn); Assert3(!isDepricated(rhs), "insert_comp: bad rhs"); //update bool properties cmp(BOOL_PROPERTIES) |= STRUCTURAL & lhs(BOOL_PROPERTIES) & rhs(BOOL_PROPERTIES); }
//insertion & removal void insert_app (App eqn) {//inserts an app in rep and parse trees LOG_DEBUG1( "O: inserting app Ob" ) Ob app = get_app(eqn); Assert3(!isDepricated(app), "insert_app: bad app"); Ob lhs = get_lhs(eqn); Assert3(!isDepricated(lhs), "insert_app: bad lhs"); Ob rhs = get_rhs(eqn); Assert3(!isDepricated(rhs), "insert_app: bad rhs"); //update bool properties app(BOOL_PROPERTIES) |= STRUCTURAL & lhs(BOOL_PROPERTIES) & rhs(BOOL_PROPERTIES); }
void URuleTraining::update_probability(const AnnotatedNodeCountMap& nc_map) { const std::vector<double>& node_counts = nc_map[get_lhs()]; for (unsigned i = 0; i < counts.size(); ++i) { const double node_count = node_counts.at(i); for (unsigned j = 0; j < counts[i].size(); ++j) { counts[i][j] /= node_count; if (counts[i][j] > 1 or counts[i][j] < 0) { std::cerr << counts[i][j] << std::endl; } assert(!std::isnan(counts[i][j])); assert(counts[i][j] <= 1.0 + std::numeric_limits<double>::epsilon()); assert(counts[i][j] >= 0.0 - std::numeric_limits<double>::epsilon()); } } counts.swap(probabilities); }
void URuleTraining::merge(const Merge_map& annotation_sets_to_merge, int split_size, const ProportionsMap& proportions, const AnnotatedLabelsInfo& a, const std::vector<std::map<int,int> >& annot_reorder) { //TODO get these centrally from grammar unsigned lhs_dim = probabilities.size(); unsigned rhs_dim = probabilities[0].size(); unsigned new_lhs_dim = a.get_number_of_annotations(get_lhs()); unsigned new_rhs_dim = a.get_number_of_annotations(get_rhs0()); // std::cout << "new lhs dim is " << new_lhs_dim // << " from sets to merge we have " << annotation_sets_to_merge[get_lhs()].size() // << std::endl; //First calculate what the probabilities are for the merged rules //the iterator that stores annotations for lhs and rhs; Merge_map::const_iterator annotation_sets_iter; //rhs0: calculate the probabities when rhs0 is merged int rhs = get_rhs0(); if ((annotation_sets_iter = annotation_sets_to_merge.find(rhs)) != annotation_sets_to_merge.end() ){ // std::cout << "it's here alrighty RHS...." << rhs << '\n' // << "new rhs dim is " << new_rhs_dim // << " from sets to merge we have " << annotation_sets_to_merge[get_rhs(0)].size() // << std::endl; //for each annotation_set to merge with this label id for (unsigned i = 0; i< annotation_sets_iter->second.size();++i){ // std::cout << "merging set " << annotation_sets_to_merge[rhs0][i] << std::endl; int start = annotation_sets_iter->second[i]; //for each annotation in set //(don't need to look at the first one - we add to the first one in the innermost loop) for (int rhs_a=start+1; rhs_a< (start + split_size); ++rhs_a){ // std::cout << "for each annotion in set rhs0: " << rhs0_a << std::endl; //for each annotion of lhs // std::cout << "Total number of annotations for LHS: " << num_annotations_lhs << std::endl; for(unsigned lhs_a = 0; lhs_a < lhs_dim; ++lhs_a){ probabilities[lhs_a][start] += probabilities[lhs_a][rhs_a]; probabilities[lhs_a][rhs_a] = 0.0;//i.e. null } } } } //lhs: calculate the probabities when lhs is merged int lhs = get_lhs(); if ((annotation_sets_iter = annotation_sets_to_merge.find(lhs)) != annotation_sets_to_merge.end() ){ //std::cout << "it's here alrighty LHS...." << lhs << std::endl; //new_lhs_dim = annotation_sets_to_merge[lhs].size(); //for each annotation_set to merge with this label id for (unsigned int i = 0; i< annotation_sets_iter->second.size();i++){ int start = annotation_sets_iter->second[i]; for(unsigned rhs_a = 0; rhs_a< rhs_dim; ++rhs_a) probabilities[start][rhs_a] *= proportions[lhs].at(start); for (int lhs_a = start + 1 ; lhs_a < (start + split_size); ++lhs_a){ //for each annotion of rhs0 for(unsigned rhs_a = 0; rhs_a< rhs_dim; ++rhs_a) { //add to what is already there probabilities[start][rhs_a] += proportions[lhs].at(lhs_a) * probabilities[lhs_a][rhs_a]; probabilities[lhs_a][rhs_a] = 0.0;//i.e. null } } } } // Now create the new probabilities data structure std::vector< std::vector<double> > new_probabilities(new_lhs_dim, std::vector<double>(new_rhs_dim)); const std::map<int,int>& lhs_reorder_map = annot_reorder[lhs]; //for each old annotation of LHS for(unsigned i = 0; i < lhs_dim; ++i){ if(lhs_reorder_map.count(i)) { const std::map<int,int>& rhs_reorder_map = annot_reorder[rhs]; for(unsigned j = 0; j < rhs_dim; ++j){ if(rhs_reorder_map.count(j)) new_probabilities[lhs_reorder_map.find(i)->second][rhs_reorder_map.find(j)->second] = probabilities[i][j]; } } } probabilities = new_probabilities; // std::cout << *this << std::endl; }