/** * checks the current information displayed in the summary and makes the graph match that data. */ void Summary::on_graphChoice_activated(int index) { if(ui->tabWidget->tabText(ui->tabWidget->currentIndex()).contains("Grant",Qt::CaseInsensitive) == true){ if(index == 2) ui->dateSelector->setEnabled(true); else ui->dateSelector->setEnabled(false); } else if(ui->tabWidget->tabText(ui->tabWidget->currentIndex()).contains("Pub",Qt::CaseInsensitive) == true){ if(index == 1 || index == 2) ui->dateSelector->setEnabled(true); else ui->dateSelector->setEnabled(false); } else if(ui->tabWidget->tabText(ui->tabWidget->currentIndex()).contains("Pres",Qt::CaseInsensitive) == true){ if(index == 1) ui->dateSelector->setEnabled(true); else ui->dateSelector->setEnabled(false); } else if(ui->tabWidget->tabText(ui->tabWidget->currentIndex()).contains("Teach",Qt::CaseInsensitive) == true){ if(index == 1 || index == 2 || index == 7) ui->dateSelector->setEnabled(true); else ui->dateSelector->setEnabled(false); } graphs(); }
/** * When the filter button is clicked this function is called which applies the * current date filter to the data displayed. If date is invalid a message box * is produced to inform the client. */ void Summary::on_pushButton_clicked() { int tabindex = ui->tabWidget->currentIndex(); QString currentTabText = ui->tabWidget->tabText(tabindex); ui->lineEdit->setValidator(new QIntValidator(1000, 3000, this)); ui->lineEdit_2->setValidator(new QIntValidator(1000, 3000, this)); int earliest = ui->lineEdit->text().toInt(); int latest = ui->lineEdit_2->text().toInt(); if(earliest > latest) { QMessageBox messageBox; messageBox.critical(0,"Error","Invalid input !"); messageBox.setFixedSize(600,200); } else { qDebug() << ui->tabWidget->tabText(tabindex); if(/*(GrantSummary !=NULL) &&*/ (ui->tabWidget->tabText(tabindex).contains("Grant")) && currentTabText.contains("Grant")) { GrantSummary->getCreateTreeWidget()->clear(); GrantSummary->AddRoot("", "", ""); GrantSummary->fillTreeWidgetForGrant(db, earliest, latest); } if(/*(PublicationSummary != NULL) &&*/ (ui->tabWidget->tabText(tabindex).contains("Publication")) && currentTabText.contains("Publication")) { PublicationSummary->getCreateTreeWidget()->clear(); PublicationSummary->AddRoot("", ""); PublicationSummary->fillTreeWidgetForPublication(db, earliest, latest); } if(/*(PresentationSummary != NULL) && */(ui->tabWidget->tabText(tabindex).contains("Presentation")) && currentTabText.contains("Presentation")) { PresentationSummary->getCreateTreeWidget()->clear(); PresentationSummary->AddRoot("", ""); PresentationSummary->fillTreeWidgetForPresentation(db, earliest, latest); } if(/*(PresentationSummary != NULL) && */(ui->tabWidget->tabText(tabindex).contains("Teaching")) && currentTabText.contains("Teaching")) { TeachingSummary->getCreateTreeWidget()->clear(); int startYear = ui->lineEdit->text().toInt(), endYear = ui->lineEdit_2->text().toInt(), different = endYear-startYear; std::vector<std::vector<std::string>> PME_Name(different, std::vector<std::string>()), UME_Name(different, std::vector<std::string>()), CME_Name(different, std::vector<std::string>()), Other_Name(different, std::vector<std::string>()); std::vector<std::vector<double>> PME_Time(different, std::vector<double>()), UME_Time(different, std::vector<double>()), CME_Time(different, std::vector<double>()), Other_Time(different, std::vector<double>()); db.teachingSummary(PME_Name, UME_Name, CME_Name, Other_Name,PME_Time, UME_Time, CME_Time, Other_Time, startYear, endYear ); double totalPME = TeachingSummary->calcTotal(PME_Name, PME_Time); TeachingSummary->fillTreeWidgetForTeaching("PME", totalPME, PME_Name, PME_Time,startYear); double totalUME = TeachingSummary->calcTotal(UME_Name, UME_Time); TeachingSummary->fillTreeWidgetForTeaching("UME", totalUME, UME_Name, UME_Time,startYear); double totalCME = TeachingSummary->calcTotal(CME_Name, CME_Time); TeachingSummary->fillTreeWidgetForTeaching("CME", totalCME, CME_Name, CME_Time,startYear); double totalOTHER = TeachingSummary->calcTotal(Other_Name, Other_Time); TeachingSummary->fillTreeWidgetForTeaching("OTHER", totalOTHER, Other_Name, Other_Time,startYear); } } graphs(); //ui->tabWidget->setCurrentIndex(tabindex); }
/** * Gets called when the name box is selected. Displays a list of names for the * client to choose in order to filter the graphing. * */ void Summary::on_nameBox_activated(int index) { FieldMatchFilter nameMatch(MEMBER_NAME, ""); vector<grantsAndFundingField> nameFields;; vector<vector<string>> nameResults; vector<Filter*> filters; int earliest = ui->lineEdit->text().toInt(); int latest = ui->lineEdit_2->text().toInt(); if(latest == 0){ time_t t = std::time(0); struct tm * now = localtime(&t); earliest = 1970; latest = now->tm_year + 1900; ui->lineEdit->setText(QString::number(earliest)); ui->lineEdit_2->setText(QString::number(latest)); } DateFilter df(earliest,latest); filters.push_back(&df); filters = chosenName(filters, &nameMatch); if(ui->tabWidget->tabText(ui->tabWidget->currentIndex()).contains("Pub",Qt::CaseInsensitive) == true){ nameFields.push_back(STATUS_DATE); db.getPublications(nameResults,filters,nameFields); } else if(ui->tabWidget->tabText(ui->tabWidget->currentIndex()).contains("Grant",Qt::CaseInsensitive) == true){ nameFields.push_back(START_DATE); db.getGrantsAndFunding(nameResults,filters,nameFields); } else if(ui->tabWidget->tabText(ui->tabWidget->currentIndex()).contains("Pres",Qt::CaseInsensitive) == true){ nameFields.push_back(DATES); db.getPresentations(nameResults,filters,nameFields); } else if(ui->tabWidget->tabText(ui->tabWidget->currentIndex()).contains("Teach",Qt::CaseInsensitive) == true){ nameFields.push_back(START_DATE); db.getTeaching(nameResults,filters,nameFields); } processDate(nameResults); graphs(); graphs(); }
//*************************************************************** // Calculates the number of overlapping graphs in one edge and gives them // a color calculated from the choosen colorscheme String SimDrawColorizer::SimDrawColorScheme::getColor(int subGraphBits, int numberOfGraphs) { String color = "#", s; int r = 0x00, g = 0x00, b = 0x00; int numberOfGraphsInEdge = 0; Array<bool> graphs(numberOfGraphs); //Ueberlagerungen von Graphen bei dieser Kante /* Loest den Integerwert SubGraphBits in die einzelnen Bits auf und findet somit heraus, welche Graphen sich in dieser Kante ueberlagern */ for (int i = 0; i < numberOfGraphs; i++) { graphs[i] = 0; if((subGraphBits & (1 << i)) != 0) graphs[i]=1; } /* Bestimmt den Mittelwert der Farben der uebereinanderliegenden Graphen */ for (int i = 0; i < numberOfGraphs; i++) { if (graphs[i] == 1) { r += red[i]; g += green[i]; b += blue[i]; numberOfGraphsInEdge++; } } if (numberOfGraphsInEdge == numberOfGraphs) { r = 0x00; // Kanten werden schwarz eingefaerbt g = 0x00; // wenn sie zu allen Graphen gehoeren b = 0x00; } else { OGDF_ASSERT(numberOfGraphsInEdge > 0); r /= numberOfGraphsInEdge; g /= numberOfGraphsInEdge; b /= numberOfGraphsInEdge; } /* Setzt die einzelnen Farben zu eine Hex Farbcode zusammen */ s.sprintf("%x",r); if (s.length() == 1) color += "0"; color += s; s.sprintf("%x",g); if (s.length() == 1) color += "0"; color += s; s.sprintf("%x",b); if (s.length() == 1) color += "0"; color += s; return color; } // end getColor
int main(int argc, char ** argv) { utils::mpi_world mpi_world(argc, argv); const int mpi_rank = MPI::COMM_WORLD.Get_rank(); const int mpi_size = MPI::COMM_WORLD.Get_size(); try { options(argc, argv); cicada::optimize::LineSearch::value_min = value_lower; cicada::optimize::LineSearch::value_max = value_upper; if (scorer_list) { std::cout << cicada::eval::Scorer::lists(); return 0; } if (int(yield_sentence) + yield_alignment + yield_span > 1) throw std::runtime_error("specify either sentence|alignment|span yield"); if (int(yield_sentence) + yield_alignment + yield_span == 0) yield_sentence = true; if (weights_file.empty() || ! boost::filesystem::exists(weights_file)) throw std::runtime_error("no weight file? " + weights_file.string()); if (direction_name.empty()) throw std::runtime_error("no direction?"); // read reference set scorer_document_type scorers(scorer_name); read_refset(refset_files, scorers); if (mpi_rank == 0 && debug) std::cerr << "# of references: " << scorers.size() << std::endl; // read test set if (mpi_rank == 0 && debug) std::cerr << "reading hypergraphs" << std::endl; hypergraph_set_type graphs(scorers.size()); read_tstset(tstset_files, graphs); weight_set_type weights; { utils::compress_istream is(weights_file, 1024 * 1024); is >> weights; } weight_set_type direction; direction[direction_name] = 1.0; segment_document_type segments(graphs.size()); compute_envelope(scorers, graphs, weights, direction, segments); if (mpi_rank == 0) { line_search_type line_search(debug); utils::compress_ostream os(output_file, 1024 * 1024); line_search(segments, value_lower, value_upper, OutputIterator(os, weights[direction_name])); } } catch (const std::exception& err) { std::cerr << "error: " << err.what() << std::endl; return 1; } return 0; }
/***************************************************************************** Move nodes to neighbouring communities such that each move improves the given quality function maximally (i.e. greedily) for multiple layers, i.e. for multiplex networks. Each node will be in the same community in each layer, but the method may be different, or the weighting may be different for different layers. Notably, this can be used in the case of negative links, where you would like to weigh the negative links with a negative weight. Parameters: partitions -- The partitions to optimise. layer_weights -- The weights used for the different layers. ******************************************************************************/ double Optimiser::move_nodes(vector<MutableVertexPartition*> partitions, vector<double> layer_weights, int consider_comms) { #ifdef DEBUG cerr << "double Optimiser::move_nodes_multiplex(vector<MutableVertexPartition*> partitions, vector<double> weights)" << endl; #endif // Number of multiplex layers size_t nb_layers = partitions.size(); if (nb_layers == 0) return -1.0; // Get graphs vector<Graph*> graphs(nb_layers, NULL); for (size_t layer = 0; layer < nb_layers; layer++) graphs[layer] = partitions[layer]->get_graph(); // Number of nodes in the graph size_t n = graphs[0]->vcount(); // Number of iterations size_t itr = 0; // Total improvement while moving nodes double total_improv = 0.0; // Improvement for one loop double improv = 2*this->eps*nb_layers; for (size_t layer = 0; layer < nb_layers; layer++) if (graphs[layer]->vcount() != n) throw Exception("Number of nodes are not equal for all graphs."); // Number of moved nodes during one loop size_t nb_moves = 2*n*nb_layers; // Initialize the degree vector // If we want to debug the function, we will calculate some additional values. // In particular, the following consistencies could be checked: // (1) - The difference in the quality function after a move should match // the reported difference when calling diff_move. // (2) - The quality function should be exactly the same value after // aggregating/collapsing the graph. // As long as we keep on improving while (improv > this->eps*nb_layers && nb_moves > n*this->delta*nb_layers && itr < this->max_itr) { itr += 1; nb_moves = 0; improv = 0.0; // Establish vertex order vector<size_t> vertex_order = range(n); if (this->random_order) random_shuffle( vertex_order.begin(), vertex_order.end() ); // For each node for(vector<size_t>::iterator it_vertex = vertex_order.begin(); it_vertex != vertex_order.end(); ++it_vertex) { size_t v = *it_vertex; // The actual vertex we will now consider // Only take into account nodes of degree higher than zero map<size_t, double> comm_improvs; size_t v_comm = -1; size_t neigh_comm; set<size_t>* neigh_comms = NULL; Graph* graph = NULL; MutableVertexPartition* partition = NULL; switch (consider_comms) { /****************************ALL COMMS**********************************/ case ALL_COMMS: #ifdef DEBUG cerr << "Consider all communities." << endl; #endif for (size_t layer = 0; layer < nb_layers; layer++) { graph = graphs[layer]; partition = partitions[layer]; for(size_t comm = 0; comm < partition->nb_communities(); comm++) { // What is the current community of the node (this should be the same for all layers) v_comm = partition->membership(v); if (graph->degree(v, IGRAPH_ALL) > 0) { // Make sure to multiply it by the weight per layer comm_improvs[comm] += layer_weights[layer]*partition->diff_move(v, comm); } } } break; /****************************ALL NEIGH COMMS*****************************/ case ALL_NEIGH_COMMS: #ifdef DEBUG cerr << "Consider all neighbour communities." << endl; #endif for (size_t layer = 0; layer < nb_layers; layer++) { graph = graphs[layer]; partition = partitions[layer]; neigh_comms = partition->get_neigh_comms(v, IGRAPH_ALL); for (set<size_t>::iterator neigh_comm_it = neigh_comms->begin(); neigh_comm_it != neigh_comms->end(); ++neigh_comm_it) { neigh_comm = *neigh_comm_it; // Make sure to multiply it by the weight per layer comm_improvs[neigh_comm] += layer_weights[layer]*partition->diff_move(v, neigh_comm); } delete neigh_comms; } break; /****************************RAND COMM***********************************/ case RAND_COMM: neigh_comm = partitions[0]->membership(graphs[0]->get_random_node()); #ifdef DEBUG cerr << "Consider random community " << neigh_comm << "." << endl; #endif for (size_t layer = 0; layer < nb_layers; layer++) { comm_improvs[neigh_comm] += layer_weights[layer]*partitions[layer]->diff_move(v, neigh_comm); } break; /****************************RAND NEIGH COMM*****************************/ case RAND_NEIGH_COMM: // Community membership should be consistent across layers // anyway, so just read it once. // First select a random layer size_t rand_layer = graphs[0]->get_random_int(0, nb_layers - 1); neigh_comm = partitions[0]->membership(graphs[rand_layer]->get_random_neighbour(v, IGRAPH_ALL)); #ifdef DEBUG cerr << "Consider random neighbour community " << neigh_comm << "." << endl; #endif for (size_t layer = 0; layer < nb_layers; layer++) { comm_improvs[neigh_comm] += layer_weights[layer]*partitions[layer]->diff_move(v, neigh_comm); } break; } size_t max_comm = v_comm; double max_improv = 0.0; // Determine the maximum improvement for (map<size_t, double>::iterator improv_it = comm_improvs.begin(); improv_it != comm_improvs.end(); improv_it++) { size_t comm = improv_it->first; double improv = improv_it->second; if (improv > max_improv) max_comm = comm; } for (size_t layer = 0; layer < nb_layers; layer++) { MutableVertexPartition* partition = partitions[layer]; #ifdef DEBUG // If we are debugging, calculate quality function double q1 = partition->quality(); #endif // If we actually plan to move the nove if (max_comm != v_comm) { // Keep track of improvement improv += max_improv; // Actually move the node partition->move_node(v, max_comm); // Keep track of number of moves nb_moves += 1; } #ifdef DEBUG // If we are debugging, calculate quality function // and report difference double q2 = partition->quality(); if (fabs((q2 - q1) - max_improv) > 1e-6) { cerr << "ERROR: Inconsistency while moving nodes, improvement as measured by quality function did not equal the improvement measured by the diff_move function." << endl; //throw Exception("ERROR: Inconsistency while moving nodes, improvement as measured by quality function did not equal the improvement measured by the diff_move function."); } cerr << "Move node " << v << " from " << v_comm << " to " << max_comm << " (diff_move=" << max_improv << ", q2 - q1=" << q2 - q1 << ")" << endl; #endif } } // Keep track of total improvement over multiple loops total_improv += improv; } partitions[0]->renumber_communities(); vector<size_t> membership = partitions[0]->membership(); for (size_t layer = 1; layer < nb_layers; layer++) { partitions[layer]->renumber_communities(membership); } return total_improv; }
/***************************************************************************** Optimize the providede partitions simultaneously. We here use the sum of the difference of the moves as the overall quality function, each partition weighted by the layer weight. *****************************************************************************/ double Optimiser::optimize_partition(vector<MutableVertexPartition*> partitions, vector<double> layer_weights) { #ifdef DEBUG cerr << "vector<MutableVertexPartition*> Optimiser::find_partition(vector<MutableVertexPartition*> partitions)" << endl; #endif // Number of multiplex layers size_t nb_layers = partitions.size(); if (nb_layers == 0) throw Exception("No partitions provided."); // Get graphs for all layers vector<Graph*> graphs(nb_layers, NULL); for (size_t layer = 0; layer < nb_layers; layer++) graphs[layer] = partitions[layer]->get_graph(); // Number of nodes in the graphs. Should be the same across // all graphs, so we only take the first one. size_t n = graphs[0]->vcount(); // Make sure that all graphs contain the exact same number of nodes. // We assume the index of each vertex in the graph points to the // same node (but then in a different layer). for (size_t layer = 0; layer < nb_layers; layer++) if (graphs[layer]->vcount() != n) throw Exception("Number of nodes are not equal for all graphs."); // Initialize the vector of the collapsed graphs for all layers vector<Graph*> collapsed_graphs(nb_layers, NULL); vector<MutableVertexPartition*> collapsed_partitions(nb_layers, NULL); // Do one iteration of optimisation double improv = this->move_nodes(partitions, layer_weights, this->consider_comms); // As long as there remains improvement iterate while (improv > this->eps) { // First collapse graphs (i.e. community graph) for (size_t layer = 0; layer < nb_layers; layer++) { // Get graph and partition for current layer Graph* graph = graphs[layer]; MutableVertexPartition* partition = partitions[layer]; // Create collapsed graph Graph* collapsed_graph = graph->collapse_graph(partition); collapsed_graphs[layer] = collapsed_graph; // Create collapsed partition (i.e. default partition of each node in its own community). MutableVertexPartition* collapsed_partition = partitions[layer]->create(collapsed_graph); collapsed_partitions[layer] = collapsed_partition; // Create partition for collapsed graph #ifdef DEBUG cerr << "Calculate partition quality." << endl; double q = partition->quality(); cerr << "Calculate collapsed partition quality." << endl; double q_collapsed = collapsed_partition->quality(); if (fabs(q - q_collapsed) > 1e-6) { cerr << "ERROR: Quality of original partition and collapsed partition are not equal." << endl; } cerr << "partition->quality()=" << q << ", collapsed_partition->quality()=" << q_collapsed << endl; cerr << "graph->total_weight()=" << graph->total_weight() << ", collapsed_graph->total_weight()=" << collapsed_graph->total_weight() << endl; cerr << "graph->ecount()=" << graph->ecount() << ", collapsed_graph->ecount()=" << collapsed_graph->ecount() << endl; cerr << "graph->is_directed()=" << graph->is_directed() << ", collapsed_graph->is_directed()=" << collapsed_graph->is_directed() << endl; #endif } // Optimise partition for all collapsed graphs improv = this->move_nodes(collapsed_partitions, layer_weights, this->consider_comms); // Make sure improvement on coarser scale is reflected on the // scale of the graphs as a whole. for (size_t layer = 0; layer < nb_layers; layer++) { partitions[layer]->from_coarser_partition(collapsed_partitions[layer]); delete collapsed_partitions[layer]; delete collapsed_graphs[layer]; } } // Make sure the resulting communities are called 0,...,r-1 // where r is the number of communities. double q = 0.0; partitions[0]->renumber_communities(); vector<size_t> membership = partitions[0]->membership(); // We only renumber the communities for the first graph, // since the communities for the other graphs should just be equal // to the membership of the first graph. for (size_t layer = 1; layer < nb_layers; layer++) { partitions[layer]->renumber_communities(membership); q += partitions[layer]->quality()*layer_weights[layer]; } return q; }
int main(int argc, char ** argv) { try { options(argc, argv); cicada::optimize::LineSearch::value_min = value_lower; cicada::optimize::LineSearch::value_max = value_upper; if (scorer_list) { std::cout << cicada::eval::Scorer::lists(); return 0; } if (int(yield_sentence) + yield_alignment + yield_span > 1) throw std::runtime_error("specify either sentence|alignment|span yield"); if (int(yield_sentence) + yield_alignment + yield_span == 0) yield_sentence = true; if (regularize_l1 && regularize_l2) throw std::runtime_error("you cannot use both of L1 and L2..."); if (regularize_l1 || regularize_l2) { if (C <= 0.0) throw std::runtime_error("the scaling for L1/L2 must be positive"); } if (weight_normalize_l1 && weight_normalize_l2) throw std::runtime_error("you cannot use both of L1 and L2 for weight normalization..."); threads = utils::bithack::max(threads, 1); // read reference set scorer_document_type scorers(scorer_name); read_refset(refset_files, scorers); const size_t scorers_size = scorers.size(); if (iterative && tstset_files.size() > 1) { scorer_document_type scorers_iterative(scorer_name); scorers_iterative.resize(scorers.size() * tstset_files.size()); for (size_t i = 0; i != tstset_files.size(); ++ i) std::copy(scorers.begin(), scorers.end(), scorers_iterative.begin() + scorers.size() * i); scorers.swap(scorers_iterative); } if (debug) std::cerr << "# of references: " << scorers.size() << std::endl; if (debug) std::cerr << "reading hypergraphs" << std::endl; hypergraph_set_type graphs(scorers.size()); read_tstset(tstset_files, graphs, scorers_size); // collect initial weights weight_set_collection_type weights; if (! feature_weights_files.empty()) { for (path_set_type::const_iterator fiter = feature_weights_files.begin(); fiter != feature_weights_files.end(); ++ fiter) { if (*fiter != "-" && ! boost::filesystem::exists(*fiter)) throw std::runtime_error("no file? " + fiter->string()); utils::compress_istream is(*fiter); weights.push_back(weight_set_type()); is >> weights.back(); } if (initial_average && weights.size() > 1) { weight_set_type weight; weight_set_collection_type::const_iterator witer_end = weights.end(); for (weight_set_collection_type::const_iterator witer = weights.begin(); witer != witer_end; ++ witer) weight += *witer; weight *= (1.0 / weights.size()); weights.push_back(weight); } std::set<weight_set_type, std::less<weight_set_type>, std::allocator<weight_set_type> > uniques; uniques.insert(weights.begin(), weights.end()); weights.clear(); weights.insert(weights.end(), uniques.begin(), uniques.end()); } else { weights.push_back(weight_set_type()); // all one weight... for (feature_type::id_type id = 0; id < feature_type::allocated(); ++ id) if (! feature_type(id).empty()) weights.back()[feature_type(id)] = 1.0; } // collect lower/upper bounds weight_set_type bound_lower; weight_set_type bound_upper; if (! bound_lower_file.empty()) { if (bound_lower_file == "-" || boost::filesystem::exists(bound_lower_file)) { typedef cicada::FeatureVector<double> feature_vector_type; feature_vector_type bounds; utils::compress_istream is(bound_lower_file); is >> bounds; bound_lower.allocate(cicada::optimize::LineSearch::value_min); for (feature_vector_type::const_iterator biter = bounds.begin(); biter != bounds.end(); ++ biter) bound_lower[biter->first] = biter->second; } else throw std::runtime_error("no lower-bound file?" + bound_lower_file.string()); }
/** * recreates the graph when the date range is changed */ void Summary::on_dateSelector_activated(int index) { graphs(); }
/** * Controls what happens when a new tab is selected in the summary window. * Depending on the tab selected, changes the displayed summary data to the * appropriate type. Also changes the labels and data in the graphing section * to match. */ void Summary::on_tabWidget_currentChanged(int index) { ui->graphChoice->clear(); ui->dateSelector->setEnabled(false); vector<grantsAndFundingField> nameFields; nameFields.push_back(MEMBER_NAME); vector<vector<string>> nameResults; vector<Filter*> filters; int earliest = ui->lineEdit->text().toInt(); int latest = ui->lineEdit_2->text().toInt(); if(latest == 0){ time_t t = std::time(0); struct tm * now = localtime(&t); earliest = 1970; latest = now->tm_year + 1900; ui->lineEdit->setText(QString::number(earliest)); ui->lineEdit_2->setText(QString::number(latest)); } DateFilter df(earliest,latest); filters.push_back(&df); if(ui->tabWidget->tabText(ui->tabWidget->currentIndex()).contains("Pub",Qt::CaseInsensitive) == true){ ui->graphChoice->addItem("Number of Publications by Year"); ui->graphChoice->addItem("Number of Publications by Type"); ui->graphChoice->addItem("Number of each Role by Type for Single Year"); nameFields.push_back(STATUS_DATE); db.getPublications(nameResults,filters,nameFields); processChoices(nameResults); graphs(); } else if(ui->tabWidget->tabText(ui->tabWidget->currentIndex()).contains("Grant",Qt::CaseInsensitive) == true){ ui->graphChoice->addItem("Amount of Grant Funding by Year"); ui->graphChoice->addItem("Number of Grants per Year"); ui->graphChoice->addItem("Roles taken in Grants for Single year"); nameFields.push_back(START_DATE); db.getGrantsAndFunding(nameResults,filters,nameFields); processChoices(nameResults); graphs(); } else if(ui->tabWidget->tabText(ui->tabWidget->currentIndex()).contains("Pres",Qt::CaseInsensitive) == true){ ui->graphChoice->addItem("Number of Presentations by Year"); ui->graphChoice->addItem("Number of Presentations by Type"); nameFields.push_back(DATES); db.getPresentations(nameResults,filters,nameFields); processChoices(nameResults); graphs(); } else if(ui->tabWidget->tabText(ui->tabWidget->currentIndex()).contains("Teach",Qt::CaseInsensitive) == true){ ui->graphChoice->addItem("Number of Teaching Programs by Year"); ui->graphChoice->addItem("Number of Teaching Programs by Type"); ui->graphChoice->addItem("Number of Teaching Courses by Type"); ui->graphChoice->addItem("Number of Teaching Courses by Year"); ui->graphChoice->addItem("Number of Hours per Teaching Session by Year"); ui->graphChoice->addItem("Number of Teaching Sessions by Year"); ui->graphChoice->addItem("Total Number of Hours by Year"); ui->graphChoice->addItem("Number of Hours by Member Name"); nameFields.push_back(START_DATE); db.getTeaching(nameResults,filters,nameFields); processChoices(nameResults); graphs(); } }