/* ** COMMAND: test-tag ** %fossil test-tag (+|*|-)TAGNAME ARTIFACT-ID ?VALUE? ** ** Add a tag or anti-tag to the rebuildable tables of the local repository. ** No tag artifact is created so the new tag is erased the next ** time the repository is rebuilt. This routine is for testing ** use only. */ void testtag_cmd(void){ const char *zTag; const char *zValue; int rid; int tagtype; db_must_be_within_tree(); if( g.argc!=4 && g.argc!=5 ){ usage("TAGNAME ARTIFACT-ID ?VALUE?"); } zTag = g.argv[2]; switch( zTag[0] ){ case '+': tagtype = 1; break; case '*': tagtype = 2; break; case '-': tagtype = 0; break; default: fossil_fatal("tag should begin with '+', '*', or '-'"); return; } rid = name_to_rid(g.argv[3]); if( rid==0 ){ fossil_fatal("no such object: %s", g.argv[3]); } g.markPrivate = content_is_private(rid); zValue = g.argc==5 ? g.argv[4] : 0; db_begin_transaction(); tag_insert(zTag, tagtype, zValue, -1, 0.0, rid); db_end_transaction(0); }
Character *load_player_by_name(Connection *conn, const char *name) { char buf[400]; sql_stmt *stmt; db_begin_transaction(); int len = sprintf(buf, "select * from character natural join player where name='%s'", escape_sql_str(name)); if (sql_query(buf, len, &stmt) != SQL_OK) { log_data("could not prepare sql statement"); return 0; } Character *ch = new_char(); ch->pc = new_player(conn); if (sql_step(stmt) != SQL_DONE) { load_player_columns(conn->account, ch, stmt); } if (sql_finalize(stmt) != SQL_OK) { log_data("unable to finalize statement"); } load_char_objs(ch); load_char_affects(ch); db_end_transaction(); return ch; }
/* ** Implement the "fossil bundle append BUNDLE FILE..." command. Add ** the named files into the BUNDLE. Create the BUNDLE if it does not ** alraedy exist. */ static void bundle_append_cmd(void){ Blob content, hash; int i; Stmt q; verify_all_options(); bundle_attach_file(g.argv[3], "b1", 1); db_prepare(&q, "INSERT INTO bblob(blobid, uuid, sz, delta, data, notes) " "VALUES(NULL, $uuid, $sz, NULL, $data, $filename)"); db_begin_transaction(); for(i=4; i<g.argc; i++){ int sz; blob_read_from_file(&content, g.argv[i]); sz = blob_size(&content); sha1sum_blob(&content, &hash); blob_compress(&content, &content); db_bind_text(&q, "$uuid", blob_str(&hash)); db_bind_int(&q, "$sz", sz); db_bind_blob(&q, "$data", &content); db_bind_text(&q, "$filename", g.argv[i]); db_step(&q); db_reset(&q); blob_reset(&content); blob_reset(&hash); } db_end_transaction(0); db_finalize(&q); }
/* ** COMMAND: reconstruct* ** ** Usage: %fossil reconstruct FILENAME DIRECTORY ** ** This command studies the artifacts (files) in DIRECTORY and ** reconstructs the fossil record from them. It places the new ** fossil repository in FILENAME. Subdirectories are read, files ** with leading '.' in the filename are ignored. ** ** See also: deconstruct, rebuild */ void reconstruct_cmd(void) { char *zPassword; if( g.argc!=4 ){ usage("FILENAME DIRECTORY"); } if( file_isdir(g.argv[3])!=1 ){ fossil_print("\"%s\" is not a directory\n\n", g.argv[3]); usage("FILENAME DIRECTORY"); } db_create_repository(g.argv[2]); db_open_repository(g.argv[2]); db_open_config(0); db_begin_transaction(); db_initial_setup(0, 0, 0, 1); fossil_print("Reading files from directory \"%s\"...\n", g.argv[3]); recon_read_dir(g.argv[3]); fossil_print("\nBuilding the Fossil repository...\n"); rebuild_db(0, 1, 1); reconstruct_private_table(); /* Skip the verify_before_commit() step on a reconstruct. Most artifacts ** will have been changed and verification therefore takes a really, really ** long time. */ verify_cancel(); db_end_transaction(0); fossil_print("project-id: %s\n", db_get("project-code", 0)); fossil_print("server-id: %s\n", db_get("server-code", 0)); zPassword = db_text(0, "SELECT pw FROM user WHERE login=%Q", g.zLogin); fossil_print("admin-user: %s (initial password is \"%s\")\n", g.zLogin, zPassword); }
// Insert history into DB worker thread void insert_db_history(void *data) { int i; int num_elems = (int)data; // For an unknown reason, the cm160 sometimes sends a value > 12 for month // -> in that case we use the last valid month received. static int last_valid_month = 0; printf("insert %d elems\n", num_elems); printf("insert into db...\n"); clock_t cStartClock = clock(); db_begin_transaction(); for(i=0; i<num_elems; i++) { unsigned char *frame = history[i]; struct record_data rec; decode_frame(frame, &rec); if(rec.month < 0 || rec.month > 12) rec.month = last_valid_month; else last_valid_month = rec.month; db_insert_hist(&rec); printf("\r %.1f%%", min(100, 100*((double)i/num_elems))); fflush(stdout); } db_update_status(); db_end_transaction(); printf("\rinsert into db... 100%%\n"); fflush(stdout); printf("update db in %4.2f seconds\n", (clock() - cStartClock) / (double)CLOCKS_PER_SEC); }
static long long int _update_recent(volatile GList *slices, uint64_t seq) { INIT_QUERY; Connection_T c; volatile long long int count = 0; if (! (slices = g_list_first(slices))) return count; c = db_con_get(); TRY db_begin_transaction(c); while (slices) { Connection_execute(c, "UPDATE %smessages SET recent_flag = 0, seq = %" PRIu64 " WHERE recent_flag = 1 AND seq < %" PRIu64 " AND message_idnr IN (%s)", DBPFX, seq, seq, (gchar *)slices->data); count += Connection_rowsChanged(c); if (! g_list_next(slices)) break; slices = g_list_next(slices); } db_commit_transaction(c); CATCH(SQLException) LOG_SQLERROR; count = DM_EQUERY; db_rollback_transaction(c); FINALLY db_con_close(c); g_list_destroy(slices); END_TRY; return count; }
/* ** Create a new phantom with the given UUID and return its artifact ID. */ int content_new(const char *zUuid, int isPrivate){ int rid; static Stmt s1, s2, s3; assert( g.repositoryOpen ); db_begin_transaction(); if( uuid_is_shunned(zUuid) ){ db_end_transaction(0); return 0; } db_static_prepare(&s1, "INSERT INTO blob(rcvid,size,uuid,content)" "VALUES(0,-1,:uuid,NULL)" ); db_bind_text(&s1, ":uuid", zUuid); db_exec(&s1); rid = db_last_insert_rowid(); db_static_prepare(&s2, "INSERT INTO phantom VALUES(:rid)" ); db_bind_int(&s2, ":rid", rid); db_exec(&s2); if( g.markPrivate || isPrivate ){ db_multi_exec("INSERT INTO private VALUES(%d)", rid); }else{ db_static_prepare(&s3, "INSERT INTO unclustered VALUES(:rid)" ); db_bind_int(&s3, ":rid", rid); db_exec(&s3); } bag_insert(&contentCache.missing, rid); db_end_transaction(0); return rid; }
gboolean dive_db_update(gint dive_id,gchar *dive_datetime,gulong dive_duration,gdouble dive_maxdepth,gdouble dive_mintemp,gdouble dive_maxtemp,gchar *dive_notes,gint site_id,gdouble dive_visibility,gdouble dive_weight) { gint rc; gchar *sqlcmd,*sqlErrMsg=NULL; gboolean rval = TRUE; sqlcmd=sqlite3_mprintf( "UPDATE Dive SET dive_datetime='%s',dive_duration=%lu,dive_maxdepth=%f,dive_mintemp=%f,dive_maxtemp=%f," "dive_notes='%q',site_id=%d,dive_visibility=%f,dive_weight=%f WHERE dive_id=%d;", dive_datetime, dive_duration, dive_maxdepth, dive_mintemp, dive_maxtemp, dive_notes,site_id, dive_visibility, dive_weight, dive_id ); if(handle_transactions) db_begin_transaction(); rc=sqlite3_exec(logbook_db,sqlcmd,NULL,0,&sqlErrMsg); if(rc!=SQLITE_OK) { g_log(G_LOG_DOMAIN, G_LOG_LEVEL_ERROR,"Error in dive_db_update()\nCode=%d\nQuery='%s'\nError Message='%s'\n",rc,sqlcmd,sqlErrMsg); sqlite3_free(sqlErrMsg); rval=FALSE; if(handle_transactions) db_rollback_transaction(); } else { db_not_saved(); if(handle_transactions) db_commit_transaction(); preferences_load_template_dive_number(); } sqlite3_free(sqlcmd); return rval; }
Character *load_player_by_id(Connection *conn, identifier_t charId) { char buf[400]; sql_stmt *stmt; db_begin_transaction(); int len = sprintf(buf, "select * from character join player on playerId=characterId where characterId=%" PRId64, charId); if (sql_query(buf, len, &stmt) != SQL_OK) { log_data("could not prepare sql statement"); return 0; } Character *ch = new_char(); ch->pc = new_player(conn); if (sql_step(stmt) != SQL_DONE) { load_player_columns(conn->account, ch, stmt); } if (sql_finalize(stmt) != SQL_OK) { log_data("unable to finalize statement"); } load_char_objs(ch); load_char_affects(ch); db_end_transaction(); return ch; }
int dm_sievescript_activate(uint64_t user_idnr, char *scriptname) { Connection_T c; PreparedStatement_T s; volatile gboolean t = FALSE; assert(scriptname); c = db_con_get(); TRY db_begin_transaction(c); s = db_stmt_prepare(c,"UPDATE %ssievescripts SET active = 0 WHERE owner_idnr = ? ", DBPFX); db_stmt_set_u64(s, 1, user_idnr); db_stmt_exec(s); db_con_clear(c); s = db_stmt_prepare(c,"UPDATE %ssievescripts SET active = 1 WHERE owner_idnr = ? AND name = ?", DBPFX); db_stmt_set_u64(s, 1, user_idnr); db_stmt_set_str(s, 2, scriptname); db_stmt_exec(s); db_commit_transaction(c); t = TRUE; CATCH(SQLException) LOG_SQLERROR; db_rollback_transaction(c); FINALLY db_con_close(c); END_TRY; return t; }
/* fossil bundle import BUNDLE ?OPTIONS? ** ** Attempt to import the changes contained in BUNDLE. Make the change ** private so that they do not sync. ** ** OPTIONS: ** --force Import even if the project-code does not match ** --publish Imported changes are not private */ static void bundle_import_cmd(void){ int forceFlag = find_option("force","f",0)!=0; int isPriv = find_option("publish",0,0)==0; char *zMissingDeltas; verify_all_options(); if ( g.argc!=4 ) usage("import BUNDLE ?OPTIONS?"); bundle_attach_file(g.argv[3], "b1", 1); /* Only import a bundle that was generated from a repo with the same ** project code, unless the --force flag is true */ if( !forceFlag ){ if( !db_exists("SELECT 1 FROM config, bconfig" " WHERE config.name='project-code'" " AND bconfig.bcname='project-code'" " AND config.value=bconfig.bcvalue;") ){ fossil_fatal("project-code in the bundle does not match the " "repository project code. (override with --force)."); } } /* If the bundle contains deltas with a basis that is external to the ** bundle and those external basis files are missing from the local ** repo, then the delta encodings cannot be decoded and the bundle cannot ** be extracted. */ zMissingDeltas = db_text(0, "SELECT group_concat(substr(delta,1,10),' ')" " FROM bblob" " WHERE typeof(delta)='text' AND length(delta)=40" " AND NOT EXISTS(SELECT 1 FROM blob WHERE uuid=bblob.delta)"); if( zMissingDeltas && zMissingDeltas[0] ){ fossil_fatal("delta basis artifacts not found in repository: %s", zMissingDeltas); } db_begin_transaction(); db_multi_exec( "CREATE TEMP TABLE bix(" " blobid INTEGER PRIMARY KEY," " delta INTEGER" ");" "CREATE INDEX bixdelta ON bix(delta);" "INSERT INTO bix(blobid,delta)" " SELECT blobid," " CASE WHEN typeof(delta)=='integer'" " THEN delta ELSE 0 END" " FROM bblob" " WHERE NOT EXISTS(SELECT 1 FROM blob WHERE uuid=bblob.uuid AND size>=0);" "CREATE TEMP TABLE got(rid INTEGER PRIMARY KEY ON CONFLICT IGNORE);" ); manifest_crosslink_begin(); bundle_import_elements(0, 0, isPriv); manifest_crosslink_end(0); describe_artifacts_to_stdout("IN got", "Imported content:"); db_end_transaction(0); }
/* ** COMMAND: test-detach ?REPOSITORY? ** ** Change the project-code and make other changes in order to prevent ** the repository from ever again pushing or pulling to other ** repositories. Used to create a "test" repository for development ** testing by cloning a working project repository. */ void test_detach_cmd(void){ db_find_and_open_repository(0, 2); db_begin_transaction(); db_multi_exec( "DELETE FROM config WHERE name='last-sync-url';" "UPDATE config SET value=lower(hex(randomblob(20)))" " WHERE name='project-code';" "UPDATE config SET value='detached-' || value" " WHERE name='project-name' AND value NOT GLOB 'detached-*';" ); db_end_transaction(0); }
/* ** COMMAND: test-subtree ** ** Usage: %fossil test-subtree ?OPTIONS? ** ** Show the subset of check-ins that match the supplied options. This ** command is used to test the subtree_from_options() subroutine in the ** implementation and does not really have any other practical use that ** we know of. ** ** Options: ** --branch BRANCH Include only check-ins on BRANCH ** --from TAG Start the subtree at TAG ** --to TAG End the subtree at TAG ** --checkin TAG The subtree is the single check-in TAG ** --all Include FILE and TAG artifacts ** --exclusive Include FILES exclusively on check-ins */ void test_subtree_cmd(void){ int bAll = find_option("all",0,0)!=0; int bExcl = find_option("exclusive",0,0)!=0; db_find_and_open_repository(0,0); db_begin_transaction(); db_multi_exec("CREATE TEMP TABLE tobundle(rid INTEGER PRIMARY KEY);"); subtree_from_arguments("tobundle"); verify_all_options(); if( bAll ) find_checkin_associates("tobundle",bExcl); describe_artifacts_to_stdout("IN tobundle", 0); db_end_transaction(1); }
/* ** Attempt to convert more full-text blobs into delta-blobs for ** storage efficiency. */ static void extra_deltification(void){ Stmt q; int topid, previd, rid; int prevfnid, fnid; db_begin_transaction(); db_prepare(&q, "SELECT rid FROM event, blob" " WHERE blob.rid=event.objid" " AND event.type='ci'" " AND NOT EXISTS(SELECT 1 FROM delta WHERE rid=blob.rid)" " ORDER BY event.mtime DESC" ); topid = previd = 0; while( db_step(&q)==SQLITE_ROW ){ rid = db_column_int(&q, 0); if( topid==0 ){ topid = previd = rid; }else{ if( content_deltify(rid, previd, 0)==0 && previd!=topid ){ content_deltify(rid, topid, 0); } previd = rid; } } db_finalize(&q); db_prepare(&q, "SELECT blob.rid, mlink.fnid FROM blob, mlink, plink" " WHERE NOT EXISTS(SELECT 1 FROM delta WHERE rid=blob.rid)" " AND mlink.fid=blob.rid" " AND mlink.mid=plink.cid" " AND plink.cid=mlink.mid" " ORDER BY mlink.fnid, plink.mtime DESC" ); prevfnid = 0; while( db_step(&q)==SQLITE_ROW ){ rid = db_column_int(&q, 0); fnid = db_column_int(&q, 1); if( prevfnid!=fnid ){ prevfnid = fnid; topid = previd = rid; }else{ if( content_deltify(rid, previd, 0)==0 && previd!=topid ){ content_deltify(rid, topid, 0); } previd = rid; } } db_finalize(&q); db_end_transaction(0); }
int dm_sievescript_rename(uint64_t user_idnr, char *scriptname, char *newname) { int active = 0; Connection_T c; ResultSet_T r; PreparedStatement_T s; volatile int t = FALSE; assert(scriptname); /* * According to the draft RFC, a script with the same * name as an existing script should *atomically* replace it. */ c = db_con_get(); TRY db_begin_transaction(c); s = db_stmt_prepare(c,"SELECT active FROM %ssievescripts WHERE owner_idnr = ? AND name = ?", DBPFX); db_stmt_set_u64(s,1, user_idnr); db_stmt_set_str(s,2, newname); r = db_stmt_query(s); if (db_result_next(r)) { active = db_result_get_int(r,0); db_con_clear(c); s = db_stmt_prepare(c, "DELETE FROM %ssievescripts WHERE owner_idnr = ? AND name = ?", DBPFX); db_stmt_set_u64(s, 1, user_idnr); db_stmt_set_str(s, 2, newname); db_stmt_exec(s); } db_con_clear(c); s = db_stmt_prepare(c, "UPDATE %ssievescripts SET name = ?, active = ? WHERE owner_idnr = ? AND name = ?", DBPFX); db_stmt_set_str(s, 1, newname); db_stmt_set_int(s, 2, active); db_stmt_set_u64(s, 3, user_idnr); db_stmt_set_str(s, 4, scriptname); db_stmt_exec(s); t = db_commit_transaction(c); CATCH(SQLException) LOG_SQLERROR; t = DM_EQUERY; db_rollback_transaction(c); FINALLY db_con_close(c); END_TRY; return t; }
/* ** COMMAND: scrub* ** %fossil scrub ?OPTIONS? ?REPOSITORY? ** ** The command removes sensitive information (such as passwords) from a ** repository so that the repository can be sent to an untrusted reader. ** ** By default, only passwords are removed. However, if the --verily option ** is added, then private branches, concealed email addresses, IP ** addresses of correspondents, and similar privacy-sensitive fields ** are also purged. If the --private option is used, then only private ** branches are removed and all other information is left intact. ** ** This command permanently deletes the scrubbed information. THE EFFECTS ** OF THIS COMMAND ARE IRREVERSIBLE. USE WITH CAUTION! ** ** The user is prompted to confirm the scrub unless the --force option ** is used. ** ** Options: ** --force do not prompt for confirmation ** --private only private branches are removed from the repository ** --verily scrub real thoroughly (see above) */ void scrub_cmd(void){ int bVerily = find_option("verily",0,0)!=0; int bForce = find_option("force", "f", 0)!=0; int privateOnly = find_option("private",0,0)!=0; int bNeedRebuild = 0; db_find_and_open_repository(OPEN_ANY_SCHEMA, 2); db_close(1); db_open_repository(g.zRepositoryName); if( !bForce ){ Blob ans; char cReply; blob_zero(&ans); prompt_user( "Scrubbing the repository will permanently delete information.\n" "Changes cannot be undone. Continue (y/N)? ", &ans); cReply = blob_str(&ans)[0]; if( cReply!='y' && cReply!='Y' ){ fossil_exit(1); } } db_begin_transaction(); if( privateOnly || bVerily ){ bNeedRebuild = db_exists("SELECT 1 FROM private"); delete_private_content(); } if( !privateOnly ){ db_multi_exec( "UPDATE user SET pw='';" "DELETE FROM config WHERE name GLOB 'last-sync-*';" "DELETE FROM config WHERE name GLOB 'peer-*';" "DELETE FROM config WHERE name GLOB 'login-group-*';" "DELETE FROM config WHERE name GLOB 'skin:*';" "DELETE FROM config WHERE name GLOB 'subrepo:*';" ); if( bVerily ){ db_multi_exec( "DELETE FROM concealed;" "UPDATE rcvfrom SET ipaddr='unknown';" "DROP TABLE IF EXISTS accesslog;" "UPDATE user SET photo=NULL, info='';" ); } } if( !bNeedRebuild ){ db_end_transaction(0); db_multi_exec("VACUUM;"); }else{ rebuild_db(0, 1, 0); db_end_transaction(0); } }
void db_item_update (itemPtr item) { sqlite3_stmt *stmt; gint res; debug2 (DEBUG_DB, "update of item \"%s\" (id=%lu)", item->title, item->id); debug_start_measurement (DEBUG_DB); db_begin_transaction (); if (!item->id) { db_item_set_id (item); debug1(DEBUG_DB, "insert into table \"items\": \"%s\"", item->title); } /* Update the item... */ stmt = db_get_statement ("itemUpdateStmt"); sqlite3_bind_text (stmt, 1, item->title, -1, SQLITE_TRANSIENT); sqlite3_bind_int (stmt, 2, item->readStatus?1:0); sqlite3_bind_int (stmt, 3, item->updateStatus?1:0); sqlite3_bind_int (stmt, 4, item->popupStatus?1:0); sqlite3_bind_int (stmt, 5, item->flagStatus?1:0); sqlite3_bind_text (stmt, 6, item->source, -1, SQLITE_TRANSIENT); sqlite3_bind_text (stmt, 7, item->sourceId, -1, SQLITE_TRANSIENT); sqlite3_bind_int (stmt, 8, item->validGuid?1:0); sqlite3_bind_text (stmt, 9, item->description, -1, SQLITE_TRANSIENT); sqlite3_bind_int (stmt, 10, item->time); sqlite3_bind_text (stmt, 11, item->commentFeedId, -1, SQLITE_TRANSIENT); sqlite3_bind_int (stmt, 12, item->isComment?1:0); sqlite3_bind_int (stmt, 13, item->id); sqlite3_bind_int (stmt, 14, item->parentItemId); sqlite3_bind_text (stmt, 15, item->nodeId, -1, SQLITE_TRANSIENT); sqlite3_bind_text (stmt, 16, item->parentNodeId, -1, SQLITE_TRANSIENT); res = sqlite3_step (stmt); if (SQLITE_DONE != res) g_warning ("item update failed (error code=%d, %s)", res, sqlite3_errmsg (db)); sqlite3_finalize (stmt); db_item_metadata_update (item); db_item_search_folders_update (item); db_end_transaction (); debug_end_measurement (DEBUG_DB, "item update"); }
gint dive_db_insert(gchar *dive_datetime,gulong dive_duration,gdouble dive_maxdepth,gdouble dive_mintemp,gdouble dive_maxtemp, gchar *dive_notes,gint site_id,gdouble dive_visibility, gdouble dive_weight) { gint rc; gchar *sqlcmd,*sqlErrMsg=NULL; gint new_dive_id=0; sqlcmd=sqlite3_mprintf( "INSERT INTO Dive (dive_datetime,dive_duration,dive_maxdepth,dive_mintemp,dive_maxtemp,dive_notes,site_id,dive_visibility,dive_weight) VALUES ('%s',%lu,%f,%f,%f,'%q',%d,%f,%f)", dive_datetime, dive_duration,dive_maxdepth,dive_mintemp, dive_maxtemp,dive_notes,site_id,dive_visibility,dive_weight ); if(handle_transactions) db_begin_transaction(); rc=sqlite3_exec (logbook_db, sqlcmd, NULL, 0, &sqlErrMsg); if(rc!=SQLITE_OK) { g_log(G_LOG_DOMAIN, G_LOG_LEVEL_ERROR,"Error in dive_db_insert()\nCode=%d\nQuery='%s'\nError Message='%s'\n",rc,sqlcmd,sqlErrMsg); sqlite3_free(sqlErrMsg); } else { new_dive_id=sqlite3_last_insert_rowid(logbook_db); db_not_saved(); /* insert child entities for template */ if(dive_db_template_dive_number>0) { sqlite3_free (sqlcmd); sqlcmd =sqlite3_mprintf( "INSERT INTO Dive_Buddy (dive_id,buddy_id) SELECT %d,buddy_id FROM Dive_Buddy JOIN Dive ON Dive_Buddy.dive_id=Dive.dive_id WHERE dive_number=%ld;" "INSERT INTO Dive_Equipment (dive_id,equipment_id) SELECT %d,equipment_id FROM Dive_Equipment JOIN Dive ON Dive_Equipment.dive_id=Dive.dive_id WHERE dive_number=%ld;" "INSERT INTO Dive_Type (dive_id,type_id) SELECT %d,type_id FROM Dive_Type JOIN Dive ON Dive_Type.dive_id=Dive.dive_id WHERE dive_number=%ld;" "INSERT INTO Dive_Tank (dive_id,tank_id,dive_tank_avg_depth,dive_tank_O2,dive_tank_He,dive_tank_stime,dive_tank_etime,dive_tank_spressure,dive_tank_epressure) SELECT %d,tank_id,%f,dive_tank_O2,dive_tank_He,%d,%d,dive_tank_spressure,dive_tank_epressure FROM Dive_Tank JOIN Dive ON Dive_Tank.dive_id=Dive.dive_id WHERE dive_number=%ld", new_dive_id, dive_db_template_dive_number, new_dive_id, dive_db_template_dive_number, new_dive_id, dive_db_template_dive_number, new_dive_id, NULL_DEPTH, NULL_TIME, NULL_TIME, dive_db_template_dive_number ); rc=sqlite3_exec (logbook_db, sqlcmd, NULL, 0, &sqlErrMsg); if(rc != SQLITE_OK) { g_log(G_LOG_DOMAIN,G_LOG_LEVEL_ERROR,"Error in dive_db_insert()\nCode=%d\nError Message='%s'\n",rc,sqlErrMsg); sqlite3_free(sqlErrMsg); new_dive_id=0; } } } if(handle_transactions) { if(new_dive_id) db_commit_transaction(); else db_rollback_transaction(); preferences_load_template_dive_number(); } sqlite3_free(sqlcmd); return new_dive_id; }
/* ** Load a vfile from a record ID. */ void load_vfile_from_rid(int vid){ int rid, size; Stmt ins, ridq; Manifest *p; ManifestFile *pFile; if( db_exists("SELECT 1 FROM vfile WHERE vid=%d", vid) ){ return; } db_begin_transaction(); p = manifest_get(vid, CFTYPE_MANIFEST, 0); if( p==0 ) { db_end_transaction(1); return; } db_prepare(&ins, "INSERT INTO vfile(vid,isexe,islink,rid,mrid,pathname) " " VALUES(:vid,:isexe,:islink,:id,:id,:name)"); db_prepare(&ridq, "SELECT rid,size FROM blob WHERE uuid=:uuid"); db_bind_int(&ins, ":vid", vid); manifest_file_rewind(p); while( (pFile = manifest_file_next(p,0))!=0 ){ if( pFile->zUuid==0 || uuid_is_shunned(pFile->zUuid) ) continue; db_bind_text(&ridq, ":uuid", pFile->zUuid); if( db_step(&ridq)==SQLITE_ROW ){ rid = db_column_int(&ridq, 0); size = db_column_int(&ridq, 1); }else{ rid = 0; size = 0; } db_reset(&ridq); if( rid==0 || size<0 ){ fossil_warning("content missing for %s", pFile->zName); continue; } db_bind_int(&ins, ":isexe", ( manifest_file_mperm(pFile)==PERM_EXE )); db_bind_int(&ins, ":id", rid); db_bind_text(&ins, ":name", pFile->zName); db_bind_int(&ins, ":islink", ( manifest_file_mperm(pFile)==PERM_LNK )); db_step(&ins); db_reset(&ins); } db_finalize(&ridq); db_finalize(&ins); manifest_destroy(p); db_end_transaction(0); }
/* ** Change the storage of rid so that it is a delta of srcid. ** ** If rid is already a delta from some other place then no ** conversion occurs and this is a no-op unless force==1. ** ** Never generate a delta that carries a private artifact into a public ** artifact. Otherwise, when we go to send the public artifact on a ** sync operation, the other end of the sync will never be able to receive ** the source of the delta. It is OK to delta private->private and ** public->private and public->public. Just no private->public delta. ** ** If srcid is a delta that depends on rid, then srcid is ** converted to undeltaed text. ** ** If either rid or srcid contain less than 50 bytes, or if the ** resulting delta does not achieve a compression of at least 25% ** the rid is left untouched. ** ** Return 1 if a delta is made and 0 if no delta occurs. */ int content_deltify(int rid, int srcid, int force){ int s; Blob data, src, delta; Stmt s1, s2; int rc = 0; if( srcid==rid ) return 0; if( !force && findSrcid(rid)>0 ) return 0; if( content_is_private(srcid) && !content_is_private(rid) ){ return 0; } s = srcid; while( (s = findSrcid(s))>0 ){ if( s==rid ){ content_undelta(srcid); break; } } content_get(srcid, &src); if( blob_size(&src)<50 ){ blob_reset(&src); return 0; } content_get(rid, &data); if( blob_size(&data)<50 ){ blob_reset(&src); blob_reset(&data); return 0; } blob_delta_create(&src, &data, &delta); if( blob_size(&delta) <= blob_size(&data)*0.75 ){ blob_compress(&delta, &delta); db_prepare(&s1, "UPDATE blob SET content=:data WHERE rid=%d", rid); db_prepare(&s2, "REPLACE INTO delta(rid,srcid)VALUES(%d,%d)", rid, srcid); db_bind_blob(&s1, ":data", &delta); db_begin_transaction(); db_exec(&s1); db_exec(&s2); db_end_transaction(0); db_finalize(&s1); db_finalize(&s2); verify_before_commit(rid); rc = 1; } blob_reset(&src); blob_reset(&data); blob_reset(&delta); return rc; }
/* ** COMMAND: test-create-clusters ** ** Create clusters for all unclustered artifacts if the number of unclustered ** artifacts exceeds the current clustering threshold. */ void test_createcluster_cmd(void){ if( g.argc==3 ){ db_open_repository(g.argv[2]); }else{ db_find_and_open_repository(0, 0); if( g.argc!=2 ){ usage("?REPOSITORY-FILENAME?"); } db_close(1); db_open_repository(g.zRepositoryName); } db_begin_transaction(); create_cluster(); db_end_transaction(0); }
static void create_table(struct Map_info *flowline_vec, struct field_info **f_info, dbDriver ** driver, int write_scalar, int use_sampled_map) { dbString sql; char buf[200]; dbDriver *drvr; struct field_info *fi; db_init_string(&sql); fi = Vect_default_field_info(flowline_vec, 1, NULL, GV_1TABLE); *f_info = fi; Vect_map_add_dblink(flowline_vec, 1, NULL, fi->table, GV_KEY_COLUMN, fi->database, fi->driver); drvr = db_start_driver_open_database(fi->driver, Vect_subst_var(fi->database, flowline_vec)); if (drvr == NULL) { G_fatal_error(_("Unable to open database <%s> by driver <%s>"), Vect_subst_var(fi->database, flowline_vec), fi->driver); } db_set_error_handler_driver(drvr); *driver = drvr; sprintf(buf, "create table %s (cat integer, velocity double precision", fi->table); db_set_string(&sql, buf); if (write_scalar) db_append_string(&sql, ", input double precision"); if (use_sampled_map) db_append_string(&sql, ", sampled double precision"); db_append_string(&sql, ")"); db_begin_transaction(drvr); /* Create table */ if (db_execute_immediate(drvr, &sql) != DB_OK) { G_fatal_error(_("Unable to create table: %s"), db_get_string(&sql)); } if (db_create_index2(drvr, fi->table, fi->key) != DB_OK) G_warning(_("Unable to create index for table <%s>, key <%s>"), fi->table, fi->key); /* Grant */ if (db_grant_on_table (drvr, fi->table, DB_PRIV_SELECT, DB_GROUP | DB_PUBLIC) != DB_OK) { G_fatal_error(_("Unable to grant privileges on table <%s>"), fi->table); } }
/* ** Repopulate the ticket table */ void ticket_rebuild(void){ Stmt q; ticket_create_table(1); db_begin_transaction(); db_prepare(&q,"SELECT tagname FROM tag WHERE tagname GLOB 'tkt-*'"); while( db_step(&q)==SQLITE_ROW ){ const char *zName = db_column_text(&q, 0); int len; zName += 4; len = strlen(zName); if( len<20 || !validate16(zName, len) ) continue; ticket_rebuild_entry(zName); } db_finalize(&q); db_end_transaction(0); }
int MailboxState_info(T M) { volatile int t = DM_SUCCESS; Connection_T c = db_con_get(); TRY db_begin_transaction(c); db_getmailbox_info(M, c); CATCH(SQLException) LOG_SQLERROR; t = DM_EQUERY; FINALLY db_commit_transaction(c); db_con_close(c); END_TRY; return t; }
END_TEST START_TEST(test_get_rowid_in_transaction) { db_begin_transaction(); if ( sql_exec( "insert into "DBNAME"(name,intval) values('testA', '5')") != SQLITE_OK) { fail( "could not insert to test table" ); } int id = db_last_insert_rowid( ); fail_if(id == 0); db_end_transaction(); }
int dm_sievescript_add(uint64_t user_idnr, char *scriptname, char *script) { Connection_T c; ResultSet_T r; PreparedStatement_T s; volatile int t = FALSE; assert(scriptname); c = db_con_get(); TRY db_begin_transaction(c); s = db_stmt_prepare(c,"SELECT COUNT(*) FROM %ssievescripts WHERE owner_idnr = ? AND name = ?", DBPFX); db_stmt_set_u64(s, 1, user_idnr); db_stmt_set_str(s, 2, scriptname); r = db_stmt_query(s); if (db_result_next(r)) { db_con_clear(c); s = db_stmt_prepare(c,"DELETE FROM %ssievescripts WHERE owner_idnr = ? AND name = ?", DBPFX); db_stmt_set_u64(s, 1, user_idnr); db_stmt_set_str(s, 2, scriptname); db_stmt_exec(s); } db_con_clear(c); s = db_stmt_prepare(c,"INSERT INTO %ssievescripts (owner_idnr, name, script, active) VALUES (?,?,?,1)", DBPFX); db_stmt_set_u64(s, 1, user_idnr); db_stmt_set_str(s, 2, scriptname); db_stmt_set_blob(s, 3, script, strlen(script)); db_stmt_exec(s); t = db_commit_transaction(c); CATCH(SQLException) LOG_SQLERROR; db_rollback_transaction(c); t = DM_EQUERY; FINALLY db_con_close(c); END_TRY; return t; }
/* ** WEBPAGE: tktsetup_timeline */ void tktsetup_timeline_page(void){ login_check_credentials(); if( !g.perm.Setup ){ login_needed(0); return; } if( P("setup") ){ cgi_redirect("tktsetup"); } style_header("Ticket Display On Timelines"); db_begin_transaction(); cgi_printf("<form action=\"%s/tktsetup_timeline\" method=\"post\"><div>\n",(g.zTop)); login_insert_csrf_secret(); cgi_printf("<hr />\n"); entry_attribute("Ticket Title", 40, "ticket-title-expr", "t", "title", 0); cgi_printf("<p>An SQL expression in a query against the TICKET table that will\n" "return the title of the ticket for display purposes.</p>\n"); cgi_printf("<hr />\n"); entry_attribute("Ticket Status", 40, "ticket-status-column", "s", "status", 0); cgi_printf("<p>The name of the column in the TICKET table that contains the ticket\n" "status in human-readable form. Case sensitive.</p>\n"); cgi_printf("<hr />\n"); entry_attribute("Ticket Closed", 40, "ticket-closed-expr", "c", "status='Closed'", 0); cgi_printf("<p>An SQL expression that evaluates to true in a TICKET table query if\n" "the ticket is closed.</p>\n"); cgi_printf("<hr />\n" "<p>\n" "<input type=\"submit\" name=\"submit\" value=\"Apply Changes\" />\n" "<input type=\"submit\" name=\"setup\" value=\"Cancel\" />\n" "</p>\n" "</div></form>\n"); db_end_transaction(0); style_footer(); }
int do_migrate(int migrate_limit) { Connection_T c; ResultSet_T r; int id = 0; volatile int count = 0; DbmailMessage *m; qprintf ("Migrate legacy 2.2.x messageblks to mimeparts...\n"); if (!yes_to_all) { qprintf ("\tmigration skipped. Use -y option to perform migration.\n"); return 0; } qprintf ("Preparing to migrate up to %d physmessages.\n", migrate_limit); c = db_con_get(); TRY db_begin_transaction(c); r = db_query(c, "SELECT DISTINCT(physmessage_id) FROM %smessageblks LIMIT %d", DBPFX, migrate_limit); while (db_result_next(r)) { count++; id = db_result_get_u64(r,0); m = dbmail_message_new(NULL); m = dbmail_message_retrieve(m, id); if(! dm_message_store(m)) { if(verbose) qprintf ("%d ",id); db_update("DELETE FROM %smessageblks WHERE physmessage_id = %d", DBPFX, id); } dbmail_message_free(m); } db_commit_transaction(c); CATCH(SQLException) LOG_SQLERROR; db_rollback_transaction(c); return -1; FINALLY db_con_close(c); END_TRY; qprintf ("Migration complete. Migrated %d physmessages.\n", count); return 0; }
T MailboxState_new(Mempool_T pool, uint64_t id) { T M; Connection_T c; volatile int t = DM_SUCCESS; gboolean freepool = FALSE; if (! pool) { pool = mempool_open(); freepool = TRUE; } M = mempool_pop(pool, sizeof(*M)); M->pool = pool; M->freepool = freepool; if (! id) return M; M->id = id; M->recent_queue = g_tree_new((GCompareFunc)ucmp); M->keywords = g_tree_new_full((GCompareDataFunc)_compare_data,NULL,g_free,NULL); c = db_con_get(); TRY db_begin_transaction(c); // we need read-committed isolation state_load_metadata(M, c); state_load_messages(M, c); CATCH(SQLException) LOG_SQLERROR; t = DM_EQUERY; FINALLY db_commit_transaction(c); db_con_close(c); END_TRY; if (t == DM_EQUERY) { TRACE(TRACE_ERR, "Error opening mailbox"); MailboxState_free(&M); } return M; }
/* ** COMMAND: rm ** COMMAND: delete* ** ** Usage: %fossil rm FILE1 ?FILE2 ...? ** or: %fossil delete FILE1 ?FILE2 ...? ** ** Remove one or more files or directories from the repository. ** ** This command does NOT remove the files from disk. It just marks the ** files as no longer being part of the project. In other words, future ** changes to the named files will not be versioned. ** ** See also: addremove, add */ void delete_cmd(void){ int i; int vid; Stmt loop; db_must_be_within_tree(); vid = db_lget_int("checkout", 0); if( vid==0 ){ fossil_panic("no checkout to remove from"); } db_begin_transaction(); db_multi_exec("CREATE TEMP TABLE sfile(x TEXT PRIMARY KEY)"); for(i=2; i<g.argc; i++){ Blob treeName; char *zTreeName; file_tree_name(g.argv[i], &treeName, 1); zTreeName = blob_str(&treeName); db_multi_exec( "INSERT OR IGNORE INTO sfile" " SELECT pathname FROM vfile" " WHERE (pathname=%Q" " OR (pathname>'%q/' AND pathname<'%q0'))" " AND NOT deleted", zTreeName, zTreeName, zTreeName ); blob_reset(&treeName); } db_prepare(&loop, "SELECT x FROM sfile"); while( db_step(&loop)==SQLITE_ROW ){ fossil_print("DELETED %s\n", db_column_text(&loop, 0)); } db_finalize(&loop); db_multi_exec( "UPDATE vfile SET deleted=1 WHERE pathname IN sfile;" "DELETE FROM vfile WHERE rid=0 AND deleted;" ); db_end_transaction(0); }