bool DataBaseEngine::initialize_database() { #ifndef USE_MYSQL QFile db_file(":/assets/" DB_F_NAME); const char fname[] = WORK_DIR "/" DB_F_NAME; #ifdef CLEAN_DB QFile(fname).remove(); #endif if (db_file.exists() && !(QFile(fname).exists())) { QDir().mkdir(WORK_DIR); db_file.copy(fname); QFile::setPermissions(fname, QFile::WriteOwner | QFile::ReadOwner); } QSqlDatabase db = QSqlDatabase::addDatabase("QSQLITE", DB_NAME); db.setDatabaseName(fname); #else QSqlDatabase db = QSqlDatabase::addDatabase("QMYSQL", DB_NAME); db.setUserName("user"); db.setPassword(""); db.setDatabaseName("face_recognizer"); #endif return db.open(); }
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // void DatabaseHandle::Save() { Check_Object(this); // //-------------------------------------- // If there were no changes, we are done //-------------------------------------- // if (m_readOnly || !m_dirtyFlag) return; // //---------------------------------------------------------------------- // We will need to adjust pointers as we write this thing out, so make a // copy of the database object now //---------------------------------------------------------------------- // Database output_db; output_db.m_numberOfRecords = m_dataBase->m_numberOfRecords; output_db.m_nextRecordID = m_dataBase->m_nextRecordID; // //---------------------------------------------------- // Build a table to deal with rethreading the database //---------------------------------------------------- // struct OutputRecord { DWORD m_ID, m_offset, m_nextIDRecord, m_nextNameRecord; Record *m_data; }; OutputRecord *new_records = new OutputRecord[m_dataBase->m_numberOfRecords]; Check_Pointer(new_records); DWORD new_id_index[Database::e_DataBlockSize]; memset(new_id_index, 0, sizeof(new_id_index)); // //--------------------------------------------------------- // Start filling in the block information from the database //--------------------------------------------------------- // DWORD offset = sizeof(output_db); OutputRecord* new_record = new_records; for (DWORD i=0; i<Database::e_DataBlockSize; ++i) { Record* old_record = reinterpret_cast<Record*>(m_dataBase->m_idOffsets[i]); if (old_record) { output_db.m_idOffsets[i] = offset; new_id_index[i] = new_record - new_records; while (old_record) { old_record = reinterpret_cast<Record*>((DWORD)old_record + m_baseAddress); Check_Object(old_record); new_record->m_data = old_record; new_record->m_ID = old_record->m_ID; new_record->m_offset = offset; new_record->m_nextIDRecord = 0; new_record->m_nextNameRecord = 0; offset += sizeof(*old_record) + old_record->m_length + old_record->m_nameLength; old_record = reinterpret_cast<Record*>(old_record->m_nextIDRecord); if (old_record) new_record->m_nextIDRecord = offset; ++new_record; } } } // //----------------------------------------- // Make sure Hash index table is up to date //----------------------------------------- // for (i=0; i<Database::e_DataBlockSize; ++i) { Record* old_record = reinterpret_cast<Record*>(m_dataBase->m_nameOffsets[i]); if (old_record) { old_record = reinterpret_cast<Record*>((DWORD)old_record + m_baseAddress); Check_Object(old_record); // //--------------------------------- // Find this record in our new data //--------------------------------- // DWORD index = old_record->m_ID % Database::e_DataBlockSize; int j = new_id_index[index]; OutputRecord *new_record = &new_records[j]; for (; j<m_dataBase->m_numberOfRecords; ++j, ++new_record) { if (new_record->m_ID == old_record->m_ID) break; } Verify(j<m_dataBase->m_numberOfRecords); // //---------------------- // Set up the hash chain //---------------------- // output_db.m_nameOffsets[i] = new_record->m_offset; while (old_record) { Check_Object(old_record); // //----------------------------------------------------------- // Find the next record, and find where it is in our new data //----------------------------------------------------------- // old_record = reinterpret_cast<Record*>(old_record->m_nextNameRecord); Verify(j<m_dataBase->m_numberOfRecords); if (old_record) { old_record = reinterpret_cast<Record*>((DWORD)old_record + m_baseAddress); Check_Object(old_record); index = old_record->m_ID % Database::e_DataBlockSize; j = new_id_index[index]; OutputRecord *next_record = &new_records[j]; for (; j<m_dataBase->m_numberOfRecords; ++j, ++next_record) { if (next_record->m_ID == old_record->m_ID) break; } Verify(j<m_dataBase->m_numberOfRecords); new_record->m_nextNameRecord = next_record->m_offset; new_record = next_record; } } } } // //------------------------------------------------ // This file was read/write, so write it out again //------------------------------------------------ // FileStream db_file(m_fileName, FileStream::WriteOnly); db_file.WriteBytes(&output_db, sizeof(output_db)); // //---------------------- // Write out each record //---------------------- // new_record=new_records; for (i=0; i<m_dataBase->m_numberOfRecords; ++i, ++new_record) { Record* old_record = new_record->m_data; // //------------------------------------------------------------ // Save the old connection info, then replace it with the data // calculated in the new records //------------------------------------------------------------ // bool free_block = old_record->m_mustFree; DWORD next_id = old_record->m_nextIDRecord; DWORD next_name = old_record->m_nextNameRecord; old_record->m_mustFree = false; old_record->m_nextIDRecord = new_record->m_nextIDRecord; old_record->m_nextNameRecord = new_record->m_nextNameRecord; // //------------------------------------------ // Write out the info, then restore the data //------------------------------------------ // db_file.WriteBytes( old_record, sizeof(*old_record) + old_record->m_length + old_record->m_nameLength ); old_record->m_mustFree = free_block; old_record->m_nextIDRecord = next_id; old_record->m_nextNameRecord = next_name; } // //--------- // Clean up //--------- // delete[] new_records; m_dirtyFlag = false; }
int Database::ReadFile(int file_num) { printf("*--------------Database Read-----------------*\n"); std::string dir = MACHINEPATH; dir.append("Database/Corpus/"); std::string suffix = ".txt"; char buf[10]; sprintf(buf, "%d", file_num); std::ifstream database_setup; printf("Reading database setup file...\n"); std::string db_file(dir); db_file.append("database_summary_"); db_file.append(buf); db_file.append(suffix); database_setup.open(db_file.c_str()); database_setup >> mfcc_num_coef; database_setup >> fft_frame_length; fft_frame_length += 2; database_setup >> database_frame_num; database_setup >> num_tracks; database_setup.close(); printf("Complete\n"); std::ifstream mfccFile; printf("Reading mfcc file...\n"); std::string mf_file(dir); mf_file.append("mfcc_frames_"); mf_file.append(buf); mf_file.append(suffix); mfccFile.open(mf_file); mfcc_matrix = (double**)malloc(sizeof(double*)*database_frame_num); for(int i = 0; i < database_frame_num; i++){ mfcc_matrix[i] = (double*)malloc(sizeof(double)*mfcc_num_coef); for(int j = 0 ; j < mfcc_num_coef;j++){ mfccFile >> mfcc_matrix[i][j]; } } mfccFile.close(); printf("Complete\n"); std::ifstream fftFile; printf("Reading fft frames file...\n"); std::string ft_file(dir); ft_file.append("fft_frames_"); ft_file.append(buf); ft_file.append(suffix); fftFile.open(ft_file); fft_matrix = (double**)malloc(sizeof(double*)*database_frame_num); for(int i = 0 ; i < database_frame_num; i++){ fft_matrix[i] = (double*)malloc(sizeof(double)*fft_frame_length); for(int j = 0; j < fft_frame_length; j++){ fftFile >> fft_matrix[i][j]; } } fftFile.close(); printf("Complete\n"); debug_print(("Reading track frame lengths...")); std::string track_frame_l_file(MACHINEPATH); track_frame_l_file.append("Database/Corpus/track_frame_lengths.txt"); std::ifstream tr_frm_l_file; tr_frm_l_file.open(track_frame_l_file); track_frame_length = (int*)malloc(sizeof(int)*num_tracks); for(int i = 0 ; i < num_tracks; i++){ tr_frm_l_file >> track_frame_length[i]; } tr_frm_l_file.close(); debug_print(("\tComplete\n")); debug_print(("Reading buf lengths...")); std::string buf_s_file_p(MACHINEPATH); buf_s_file_p.append("Database/Corpus/buffer_sizes.txt"); std::ifstream buf_s_file; buf_s_file.open(buf_s_file_p); track_buffer_sizes = (long*)malloc(sizeof(long)*num_tracks); for(int i = 0 ; i < num_tracks; i++){ buf_s_file >> track_buffer_sizes[i]; } buf_s_file.close(); debug_print(("\tComplete\n")); debug_print(("Number MFCC Coefficients : %d\n",mfcc_num_coef)); debug_print(("Number values p FFT frame : %d\n",fft_frame_length)); debug_print(("Total num frames in database : %ld\n\n",database_frame_num)); printf("*--------------------------------------------*\n"); gr_size = (int)database_frame_num; gr_coef = mfcc_num_coef; gr_fft = fft_frame_length; return 0; }
int Database::ReadFileGroup(int file_num,int s_grouping,int type) { grouping = s_grouping; // grouping must be > 0 printf("\n*--------------Database Group Read-----------------*\n"); std::string dir = MACHINEPATH; switch(type){ case 0: dir.append("Database/Corpus/"); break; case 1: dir.append("Database/Corpus/DB/"); break; } //dir.append("Database/Corpus/"); std::string suffix = ".txt"; char buf[10]; // enough to hold all numbers up to 64-bits sprintf(buf, "%d", file_num); std::ifstream database_setup; printf("Reading database setup file..."); std::string db_file(dir); db_file.append("database_summary_"); db_file.append(buf); db_file.append(suffix); database_setup.open(db_file.c_str()); database_setup >> mfcc_num_coef; database_setup >> fft_frame_length; fft_frame_length += 2; database_setup >> database_frame_num; database_setup >> num_tracks; database_setup.close(); printf("\tComplete\n"); std::ifstream mfccFile; printf("Reading mfcc file..."); std::string mf_file(dir); mf_file.append("mfcc_frames_"); mf_file.append(buf); mf_file.append(suffix); mfccFile.open(mf_file); grouped_size = (long)floor(database_frame_num/grouping); int grouped_coef = mfcc_num_coef * grouping; gr_size = (int)grouped_size; gr_coef = grouped_coef; mfcc_matrix = (double**)malloc(sizeof(double*)*grouped_size); for(int i = 0; i < grouped_size; i++){ mfcc_matrix[i] = (double*)malloc(sizeof(double)*grouped_coef); } for(int i = 0; i < grouped_size ; i++){ for(int j = 0 ; j < grouped_coef;j++){ mfccFile >> mfcc_matrix[i][j]; } } mfccFile.close(); printf("\tComplete\n"); std::ifstream fftFile; printf("Reading fft frames file..."); std::string ft_file(dir); ft_file.append("fft_frames_"); ft_file.append(buf); ft_file.append(suffix); fftFile.open(ft_file); fft_length_grouped = fft_frame_length * grouping; printf("fft len group : %d\n",fft_length_grouped); fft_matrix = (double**)malloc(sizeof(double*)*grouped_size); for(int i = 0 ; i < grouped_size; i++){ fft_matrix[i] = (double*)malloc(sizeof(double)*fft_length_grouped); } for(int i = 0 ; i < grouped_size; i++){ for(int j = 0; j < fft_length_grouped; j++){ fftFile >> fft_matrix[i][j]; // printf("%f ",fft_matrix[i][j]); } } fftFile.close(); printf("\tComplete\n"); gr_size = (int)grouped_size; gr_fft = fft_length_grouped; debug_print(("Reading track frame lengths...")); std::string track_frame_l_file(dir); track_frame_l_file.append("track_frame_lengths.txt"); std::ifstream tr_frm_l_file; tr_frm_l_file.open(track_frame_l_file); track_frame_length = (int*)malloc(sizeof(int)*num_tracks); for(int i = 0 ; i < num_tracks; i++){ tr_frm_l_file >> track_frame_length[i]; printf("Track Length : %d \n",track_frame_length[i]); } tr_frm_l_file.close(); debug_print(("Reading buf lengths...")); std::string buf_s_file_p(dir); buf_s_file_p.append("buffer_sizes.txt"); std::ifstream buf_s_file; buf_s_file.open(buf_s_file_p); track_buffer_sizes = (long*)malloc(sizeof(long)*num_tracks); for(int i = 0 ; i < num_tracks; i++){ buf_s_file >> track_buffer_sizes[i]; } buf_s_file.close(); debug_print(("\tComplete\n")); debug_print(("\tComplete\n")); printf("Num Tracks : %d\n",num_tracks); debug_print(("Number MFCC Coefficients : %d\n",mfcc_num_coef)); debug_print(("Number values p FFT frame : %d\n",fft_frame_length)); debug_print(("Total num frames in database : %ld\n",database_frame_num)); debug_print(("Grouped MFCC num coefs : %d\n",grouped_coef)); debug_print(("Num frames when grouped : %ld\n",grouped_size)); GroupTrackLengths(); printf("*--------------------------------------------------*\n"); return 0; }
int main(int argc, char *argv[]) { QCoreApplication app(argc,argv); // check input args QStringList inputArgs = app.arguments(); if(inputArgs.size() != 2) { badInput(); return -1; } // open osmscout map osmscout::DatabaseParameter map_param; osmscout::Database map(map_param); if(!map.Open(inputArgs[1].toStdString())) { qDebug() << "ERROR: Failed to open osmscout map"; return -1; } osmscout::TypeConfig * typeConfig = map.GetTypeConfig(); osmscout::TypeSet typeSet; setTypesForAdminRegions(typeConfig,typeSet); // GeoBoundingBox tempbbox; // tempbbox.minLon = -83.3203; tempbbox.maxLon = -82.9688; // tempbbox.minLat = 42.1875; tempbbox.maxLat = 42.3633; //// tempbbox.minLon -= 1; tempbbox.maxLon += 1; //// tempbbox.minLat -= 1; tempbbox.maxLat += 1; // create search database Kompex::SQLiteDatabase * database; Kompex::SQLiteStatement * stmt; QString db_file_path = app.applicationDirPath()+"/searchdb.sqlite"; QFile db_file(db_file_path); if(db_file.exists()) { if(!db_file.remove()) { qDebug() << "ERROR: searchdb.sqlite exists and " "could not be deleted"; return -1; } } try { database = new Kompex::SQLiteDatabase("searchdb.sqlite", SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE,0); stmt = new Kompex::SQLiteStatement(database); stmt->SqlStatement("CREATE TABLE name_lookup(" "name_id INTEGER PRIMARY KEY NOT NULL," "name_lookup TEXT NOT NULL UNIQUE);"); stmt->SqlStatement("CREATE TABLE admin_regions(" "id INTEGER PRIMARY KEY NOT NULL," "node_offsets BLOB," "way_offsets BLOB," "area_offsets BLOB" ");"); stmt->SqlStatement("CREATE TABLE streets(" "id INTEGER PRIMARY KEY NOT NULL," "node_offsets BLOB," "way_offsets BLOB," "area_offsets BLOB" ");"); stmt->SqlStatement("CREATE TABLE pois(" "id INTEGER PRIMARY KEY NOT NULL," "node_offsets BLOB," "way_offsets BLOB," "area_offsets BLOB" ");"); } catch(Kompex::SQLiteException &exception) { qDebug() << "ERROR: SQLite exception creating database:" << QString::fromStdString(exception.GetString()); return -1; } // build a tile list for the dataset // // world bbox // GeoBoundingBox bbox_world; // bbox_world.minLon = -180.0; bbox_world.maxLon = 180.0; // bbox_world.minLat = -90.0; bbox_world.maxLat = 90.0; // map data bbox GeoBoundingBox bbox_map; map.GetBoundingBox(bbox_map.minLat,bbox_map.minLon, bbox_map.maxLat,bbox_map.maxLon); // generate tile list std::vector<Tile*> list_tiles; buildTileList(bbox_map,list_tiles); #ifdef DEBUG_WITH_OSG return displayTiles(bbox_map,list_tiles); #endif // [name_id] [name_key] int32_t name_id=1; boost::unordered_map<std::string,int32_t> table_names; // build database tables bool opOk=false; // admin_regions qDebug() << "INFO: Building admin_regions table..."; setTypesForAdminRegions(typeConfig,typeSet); opOk = buildTable(stmt,name_id,table_names,"admin_regions", list_tiles,map,typeSet,false,true,true); if(opOk) { qDebug() << "INFO: Finished building admin_regions table"; } else { qDebug() << "ERROR: Failed to build admin_regions table"; return -1; } // streets qDebug() << "INFO: Building streets table..."; setTypesForStreets(typeConfig,typeSet); opOk = buildTable(stmt,name_id,table_names,"streets", list_tiles,map,typeSet,false,false,false); if(opOk) { qDebug() << "INFO: Finished building streets table"; } else { qDebug() << "ERROR: Failed to build streets table"; return -1; } // pois qDebug() << "INFO: Building pois table..."; setTypesForPOIs(typeConfig,typeSet); opOk = buildTable(stmt,name_id,table_names,"pois", list_tiles,map,typeSet,false,false,false); if(opOk) { qDebug() << "INFO: Finished building pois table"; } else { qDebug() << "ERROR: Failed to build pois table"; return -1; } // build name_lookup table qDebug() << "INFO: Building name_lookup table..."; opOk = buildNameLookupTable(stmt,table_names); if(opOk) { qDebug() << "INFO: Finished building name_lookup table"; } else { qDebug() << "ERROR: Failed to build name_lookup table"; return -1; } // vacuum to minimize db try { stmt->SqlStatement("VACUUM;"); } catch(Kompex::SQLiteException &exception) { qDebug() << "ERROR: SQLite exception vacuuming:" << QString::fromStdString(exception.GetString()); return -1; } // clean up for(size_t i=0; i < list_tiles.size(); i++) { delete list_tiles[i]; } list_tiles.clear(); delete stmt; delete database; // // ### debug // std::map<int64_t,std::string> table_count_names; // boost::unordered_map<std::string,qint64>::iterator it; // for(it = table_names.begin(); it != table_names.end(); ++it) { // std::map<int64_t,int64_t>::iterator d_it; // d_it = g_table_nameid_count.find(it->second); // std::pair<int64_t,std::string> data; // data.first = d_it->second; // data.second = it->first; // table_count_names.insert(data); // } // std::map<int64_t,std::string>::iterator c_it; // for(c_it = table_count_names.begin(); // c_it != table_count_names.end(); ++c_it) { // qDebug() << QString::fromStdString(c_it->second) << ":" << c_it->first; // } // // debug // boost::unordered_map<std::string,qint64>::iterator it; // for(it = table_names.begin(); it != table_names.end(); ++it) // { // qDebug() << it->second << ": " << QString::fromStdString(it->first); // } }