/* ** Compute an aggregate MD5 checksum over the disk image of every ** file in vid. The file names are part of the checksum. The resulting ** checksum is the same as is expected on the R-card of a manifest. ** ** This function operates differently if the Global.aCommitFile ** variable is not NULL. In that case, the disk image is used for ** each file in aCommitFile[] and the repository image ** is used for all others). ** ** Newly added files that are not contained in the repository are ** omitted from the checksum if they are not in Global.aCommitFile[]. ** ** Newly deleted files are included in the checksum if they are not ** part of Global.aCommitFile[] ** ** Renamed files use their new name if they are in Global.aCommitFile[] ** and their original name if they are not in Global.aCommitFile[] ** ** Return the resulting checksum in blob pOut. */ void vfile_aggregate_checksum_disk(int vid, Blob *pOut){ FILE *in; Stmt q; char zBuf[4096]; db_must_be_within_tree(); db_prepare(&q, "SELECT %Q || pathname, pathname, origname, is_selected(id), rid" " FROM vfile" " WHERE (NOT deleted OR NOT is_selected(id)) AND vid=%d" " ORDER BY if_selected(id, pathname, origname) /*scan*/", g.zLocalRoot, vid ); md5sum_init(); while( db_step(&q)==SQLITE_ROW ){ const char *zFullpath = db_column_text(&q, 0); const char *zName = db_column_text(&q, 1); int isSelected = db_column_int(&q, 3); if( isSelected ){ md5sum_step_text(zName, -1); if( file_wd_islink(zFullpath) ){ /* Instead of file content, use link destination path */ Blob pathBuf; sqlite3_snprintf(sizeof(zBuf), zBuf, " %ld\n", blob_read_link(&pathBuf, zFullpath)); md5sum_step_text(zBuf, -1); md5sum_step_text(blob_str(&pathBuf), -1); blob_reset(&pathBuf); }else{ in = fossil_fopen(zFullpath,"rb"); if( in==0 ){ md5sum_step_text(" 0\n", -1); continue; } fseek(in, 0L, SEEK_END); sqlite3_snprintf(sizeof(zBuf), zBuf, " %ld\n", ftell(in)); fseek(in, 0L, SEEK_SET); md5sum_step_text(zBuf, -1); /*printf("%s %s %s",md5sum_current_state(),zName,zBuf); fflush(stdout);*/ for(;;){ int n; n = fread(zBuf, 1, sizeof(zBuf), in); if( n<=0 ) break; md5sum_step_text(zBuf, n); } fclose(in); } }else{ int rid = db_column_int(&q, 4); const char *zOrigName = db_column_text(&q, 2); char zBuf[100]; Blob file; if( zOrigName ) zName = zOrigName; if( rid>0 ){ md5sum_step_text(zName, -1); blob_zero(&file); content_get(rid, &file); sqlite3_snprintf(sizeof(zBuf), zBuf, " %d\n", blob_size(&file)); md5sum_step_text(zBuf, -1); md5sum_step_blob(&file); blob_reset(&file); } } } db_finalize(&q); md5sum_finish(pOut); }
/* ** Run a diff between the version zFrom and files on disk. zFrom might ** be NULL which means to simply show the difference between the edited ** files on disk and the check-out on which they are based. */ static void diff_all_against_disk( const char *zFrom, /* Version to difference from */ const char *zDiffCmd, /* Use this diff command. NULL for built-in */ int ignoreEolWs /* Ignore end-of-line whitespace */ ){ int vid; Blob sql; Stmt q; vid = db_lget_int("checkout", 0); vfile_check_signature(vid, 1); blob_zero(&sql); db_begin_transaction(); if( zFrom ){ int rid = name_to_rid(zFrom); if( !is_a_version(rid) ){ fossil_fatal("no such check-in: %s", zFrom); } load_vfile_from_rid(rid); blob_appendf(&sql, "SELECT v2.pathname, v2.deleted, v2.chnged, v2.rid==0, v1.rid" " FROM vfile v1, vfile v2 " " WHERE v1.pathname=v2.pathname AND v1.vid=%d AND v2.vid=%d" " AND (v2.deleted OR v2.chnged OR v1.rid!=v2.rid)" "UNION " "SELECT pathname, 1, 0, 0, 0" " FROM vfile v1" " WHERE v1.vid=%d" " AND NOT EXISTS(SELECT 1 FROM vfile v2" " WHERE v2.vid=%d AND v2.pathname=v1.pathname)" "UNION " "SELECT pathname, 0, 0, 1, 0" " FROM vfile v2" " WHERE v2.vid=%d" " AND NOT EXISTS(SELECT 1 FROM vfile v1" " WHERE v1.vid=%d AND v1.pathname=v2.pathname)" " ORDER BY 1", rid, vid, rid, vid, vid, rid ); }else{ blob_appendf(&sql, "SELECT pathname, deleted, chnged , rid==0, rid" " FROM vfile" " WHERE vid=%d" " AND (deleted OR chnged OR rid==0)" " ORDER BY pathname", vid ); } db_prepare(&q, blob_str(&sql)); while( db_step(&q)==SQLITE_ROW ){ const char *zPathname = db_column_text(&q,0); int isDeleted = db_column_int(&q, 1); int isChnged = db_column_int(&q,2); int isNew = db_column_int(&q,3); char *zFullName = mprintf("%s%s", g.zLocalRoot, zPathname); if( isDeleted ){ printf("DELETED %s\n", zPathname); }else if( access(zFullName, 0) ){ printf("MISSING %s\n", zPathname); }else if( isNew ){ printf("ADDED %s\n", zPathname); }else if( isChnged==3 ){ printf("ADDED_BY_MERGE %s\n", zPathname); }else{ int srcid = db_column_int(&q, 4); Blob content; content_get(srcid, &content); printf("Index: %s\n=======================================" "============================\n", zPathname ); diff_file(&content, zFullName, zPathname, zDiffCmd, ignoreEolWs); blob_reset(&content); } free(zFullName); } db_finalize(&q); db_end_transaction(1); /* ROLLBACK */ }
/* ** Look at every VFILE entry with the given vid and set update ** VFILE.CHNGED field on every file according to whether or not ** the file has changes. 0 means no change. 1 means edited. 2 means ** the file has changed due to a merge. 3 means the file was added ** by a merge. ** ** If VFILE.DELETED is true or if VFILE.RID is zero, then the file was ** either removed from managemented via "vcs rm" or added via ** "vcs add", respectively, and in both cases we always know that ** the file has changed without having the check the size, mtime, ** or on-disk content. ** ** If the size of the file has changed, then we always know that the file ** changed without having to look at the mtime or on-disk content. ** ** The mtime of the file is only a factor if the mtime-changes setting ** is false and the useSha1sum flag is false. If the mtime-changes ** setting is true (or undefined - it defaults to true) or if useSha1sum ** is true, then we do not trust the mtime and will examine the on-disk ** content to determine if a file really is the same. ** ** If the mtime is used, it is used only to determine if files are the same. ** If the mtime of a file has changed, we still examine the on-disk content ** to see whether or not the edit was a null-edit. */ void vfile_check_signature(int vid, int notFileIsFatal, int useSha1sum){ int nErr = 0; Stmt q; Blob fileCksum, origCksum; int useMtime = useSha1sum==0 && db_get_boolean("mtime-changes", 1); db_begin_transaction(); db_prepare(&q, "SELECT id, %Q || pathname," " vfile.mrid, deleted, chnged, uuid, size, mtime" " FROM vfile LEFT JOIN blob ON vfile.mrid=blob.rid" " WHERE vid=%d ", g.zLocalRoot, vid); while( db_step(&q)==SQLITE_ROW ){ int id, rid, isDeleted; const char *zName; int chnged = 0; int oldChnged; i64 oldMtime; i64 currentMtime; i64 origSize; i64 currentSize; id = db_column_int(&q, 0); zName = db_column_text(&q, 1); rid = db_column_int(&q, 2); isDeleted = db_column_int(&q, 3); oldChnged = chnged = db_column_int(&q, 4); oldMtime = db_column_int64(&q, 7); currentSize = file_wd_size(zName); origSize = db_column_int64(&q, 6); currentMtime = file_wd_mtime(0); if( chnged==0 && (isDeleted || rid==0) ){ /* "vcs rm" or "vcs add" always change the file */ chnged = 1; }else if( !file_wd_isfile_or_link(0) && currentSize>=0 ){ if( notFileIsFatal ){ vcs_warning("not an ordinary file: %s", zName); nErr++; } chnged = 1; } if( origSize!=currentSize ){ if( chnged!=1 ){ /* A file size change is definitive - the file has changed. No ** need to check the mtime or sha1sum */ chnged = 1; } }else if( chnged==1 && rid!=0 && !isDeleted ){ /* File is believed to have changed but it is the same size. ** Double check that it really has changed by looking at content. */ assert( origSize==currentSize ); db_ephemeral_blob(&q, 5, &origCksum); if( sha1sum_file(zName, &fileCksum) ){ blob_zero(&fileCksum); } if( blob_compare(&fileCksum, &origCksum)==0 ) chnged = 0; blob_reset(&origCksum); blob_reset(&fileCksum); }else if( chnged==0 && (useMtime==0 || currentMtime!=oldMtime) ){ /* For files that were formerly believed to be unchanged, if their ** mtime changes, or unconditionally if --sha1sum is used, check ** to see if they have been edited by looking at their SHA1 sum */ assert( origSize==currentSize ); db_ephemeral_blob(&q, 5, &origCksum); if( sha1sum_file(zName, &fileCksum) ){ blob_zero(&fileCksum); } if( blob_compare(&fileCksum, &origCksum) ){ chnged = 1; } blob_reset(&origCksum); blob_reset(&fileCksum); } if( currentMtime!=oldMtime || chnged!=oldChnged ){ db_multi_exec("UPDATE vfile SET mtime=%lld, chnged=%d WHERE id=%d", currentMtime, chnged, id); } } db_finalize(&q); if( nErr ) vcs_fatal("abort due to prior errors"); db_end_transaction(0); }
/* ** COMMAND: tag ** Usage: %fossil tag SUBCOMMAND ... ** ** Run various subcommands to control tags and properties ** ** %fossil tag add ?--raw? ?--propagate? TAGNAME CHECK-IN ?VALUE? ** ** Add a new tag or property to CHECK-IN. The tag will ** be usable instead of a CHECK-IN in commands such as ** update and merge. If the --propagate flag is present, ** the tag value propages to all descendants of CHECK-IN ** ** %fossil tag cancel ?--raw? TAGNAME CHECK-IN ** ** Remove the tag TAGNAME from CHECK-IN, and also remove ** the propagation of the tag to any descendants. ** ** %fossil tag find ?--raw? ?--type TYPE? TAGNAME ** ** List all objects that use TAGNAME. TYPE can be "ci" for ** checkins or "e" for events. ** ** %fossil tag list ?--raw? ?CHECK-IN? ** ** List all tags, or if CHECK-IN is supplied, list ** all tags and their values for CHECK-IN. ** ** The option --raw allows the manipulation of all types of tags ** used for various internal purposes in fossil. It also shows ** "cancel" tags for the "find" and "list" subcommands. You should ** not use this option to make changes unless you are sure what ** you are doing. ** ** If you need to use a tagname that might be confused with ** a hexadecimal baseline or artifact ID, you can explicitly ** disambiguate it by prefixing it with "tag:". For instance: ** ** fossil update decaf ** ** will be taken as an artifact or baseline ID and fossil will ** probably complain that no such revision was found. However ** ** fossil update tag:decaf ** ** will assume that "decaf" is a tag/branch name. ** ** only allow --date-override and --user-override in ** %fossil tag add --date-override 'YYYY-MMM-DD HH:MM:SS' \\ ** --user-override user ** in order to import history from other scm systems */ void tag_cmd(void){ int n; int fRaw = find_option("raw","",0)!=0; int fPropagate = find_option("propagate","",0)!=0; const char *zPrefix = fRaw ? "" : "sym-"; db_find_and_open_repository(0, 0); if( g.argc<3 ){ goto tag_cmd_usage; } n = strlen(g.argv[2]); if( n==0 ){ goto tag_cmd_usage; } if( strncmp(g.argv[2],"add",n)==0 ){ char *zValue; const char *zDateOvrd = find_option("date-override",0,1); const char *zUserOvrd = find_option("user-override",0,1); if( g.argc!=5 && g.argc!=6 ){ usage("add ?--raw? ?--propagate? TAGNAME CHECK-IN ?VALUE?"); } zValue = g.argc==6 ? g.argv[5] : 0; db_begin_transaction(); tag_add_artifact(zPrefix, g.argv[3], g.argv[4], zValue, 1+fPropagate,zDateOvrd,zUserOvrd); db_end_transaction(0); }else if( strncmp(g.argv[2],"branch",n)==0 ){ fossil_fatal("the \"fossil tag branch\" command is discontinued\n" "Use the \"fossil branch new\" command instead."); }else if( strncmp(g.argv[2],"cancel",n)==0 ){ if( g.argc!=5 ){ usage("cancel ?--raw? TAGNAME CHECK-IN"); } db_begin_transaction(); tag_add_artifact(zPrefix, g.argv[3], g.argv[4], 0, 0, 0, 0); db_end_transaction(0); }else if( strncmp(g.argv[2],"find",n)==0 ){ Stmt q; const char *zType = find_option("type","t",1); if( zType==0 || zType[0]==0 ) zType = "*"; if( g.argc!=4 ){ usage("find ?--raw? TAGNAME"); } if( fRaw ){ db_prepare(&q, "SELECT blob.uuid FROM tagxref, blob" " WHERE tagid=(SELECT tagid FROM tag WHERE tagname=%Q)" " AND tagxref.tagtype>0" " AND blob.rid=tagxref.rid", g.argv[3] ); while( db_step(&q)==SQLITE_ROW ){ fossil_print("%s\n", db_column_text(&q, 0)); } db_finalize(&q); }else{ int tagid = db_int(0, "SELECT tagid FROM tag WHERE tagname='sym-%q'", g.argv[3]); if( tagid>0 ){ db_prepare(&q, "%s" " AND event.type GLOB '%q'" " AND blob.rid IN (" " SELECT rid FROM tagxref" " WHERE tagtype>0 AND tagid=%d" ")" " ORDER BY event.mtime DESC", timeline_query_for_tty(), zType, tagid ); print_timeline(&q, 2000, 0); db_finalize(&q); } } }else if( strncmp(g.argv[2],"list",n)==0 ){ Stmt q; if( g.argc==3 ){ db_prepare(&q, "SELECT tagname FROM tag" " WHERE EXISTS(SELECT 1 FROM tagxref" " WHERE tagid=tag.tagid" " AND tagtype>0)" " ORDER BY tagname" ); while( db_step(&q)==SQLITE_ROW ){ const char *zName = db_column_text(&q, 0); if( fRaw ){ fossil_print("%s\n", zName); }else if( strncmp(zName, "sym-", 4)==0 ){ fossil_print("%s\n", &zName[4]); } } db_finalize(&q); }else if( g.argc==4 ){ int rid = name_to_rid(g.argv[3]); db_prepare(&q, "SELECT tagname, value FROM tagxref, tag" " WHERE tagxref.rid=%d AND tagxref.tagid=tag.tagid" " AND tagtype>%d" " ORDER BY tagname", rid, fRaw ? -1 : 0 ); while( db_step(&q)==SQLITE_ROW ){ const char *zName = db_column_text(&q, 0); const char *zValue = db_column_text(&q, 1); if( fRaw==0 ){ if( strncmp(zName, "sym-", 4)!=0 ) continue; zName += 4; } if( zValue && zValue[0] ){ fossil_print("%s=%s\n", zName, zValue); }else{ fossil_print("%s\n", zName); } } db_finalize(&q); }else{ usage("tag list ?CHECK-IN?"); } }else { goto tag_cmd_usage; } /* Cleanup */ return; tag_cmd_usage: usage("add|cancel|find|list ..."); }
/* ** COMMAND: merge ** ** Usage: %fossil merge ?OPTIONS? ?VERSION? ** ** The argument VERSION is a version that should be merged into the ** current checkout. All changes from VERSION back to the nearest ** common ancestor are merged. Except, if either of the --cherrypick or ** --backout options are used only the changes associated with the ** single check-in VERSION are merged. The --backout option causes ** the changes associated with VERSION to be removed from the current ** checkout rather than added. ** ** If the VERSION argument is omitted, then Fossil attempts to find ** a recent fork on the current branch to merge. ** ** Only file content is merged. The result continues to use the ** file and directory names from the current checkout even if those ** names might have been changed in the branch being merged in. ** ** Other options: ** ** --baseline BASELINE Use BASELINE as the "pivot" of the merge instead ** of the nearest common ancestor. This allows ** a sequence of changes in a branch to be merged ** without having to merge the entire branch. ** ** --binary GLOBPATTERN Treat files that match GLOBPATTERN as binary ** and do not try to merge parallel changes. This ** option overrides the "binary-glob" setting. ** ** --case-sensitive BOOL Override the case-sensitive setting. If false, ** files whose names differ only in case are taken ** to be the same file. ** ** -f|--force Force the merge even if it would be a no-op. ** ** --force-missing Force the merge even if there is missing content. ** ** --integrate Merged branch will be closed when committing. ** ** -n|--dry-run If given, display instead of run actions ** ** -v|--verbose Show additional details of the merge */ void merge_cmd(void){ int vid; /* Current version "V" */ int mid; /* Version we are merging from "M" */ int pid; /* The pivot version - most recent common ancestor P */ int verboseFlag; /* True if the -v|--verbose option is present */ int integrateFlag; /* True if the --integrate option is present */ int pickFlag; /* True if the --cherrypick option is present */ int backoutFlag; /* True if the --backout option is present */ int dryRunFlag; /* True if the --dry-run or -n option is present */ int forceFlag; /* True if the --force or -f option is present */ int forceMissingFlag; /* True if the --force-missing option is present */ const char *zBinGlob; /* The value of --binary */ const char *zPivot; /* The value of --baseline */ int debugFlag; /* True if --debug is present */ int nChng; /* Number of file name changes */ int *aChng; /* An array of file name changes */ int i; /* Loop counter */ int nConflict = 0; /* Number of conflicts seen */ int nOverwrite = 0; /* Number of unmanaged files overwritten */ Stmt q; /* Notation: ** ** V The current checkout ** M The version being merged in ** P The "pivot" - the most recent common ancestor of V and M. */ undo_capture_command_line(); verboseFlag = find_option("verbose","v",0)!=0; forceMissingFlag = find_option("force-missing",0,0)!=0; if( !verboseFlag ){ verboseFlag = find_option("detail",0,0)!=0; /* deprecated */ } pickFlag = find_option("cherrypick",0,0)!=0; integrateFlag = find_option("integrate",0,0)!=0; backoutFlag = find_option("backout",0,0)!=0; debugFlag = find_option("debug",0,0)!=0; zBinGlob = find_option("binary",0,1); dryRunFlag = find_option("dry-run","n",0)!=0; if( !dryRunFlag ){ dryRunFlag = find_option("nochange",0,0)!=0; /* deprecated */ } forceFlag = find_option("force","f",0)!=0; zPivot = find_option("baseline",0,1); verify_all_options(); db_must_be_within_tree(); if( zBinGlob==0 ) zBinGlob = db_get("binary-glob",0); vid = db_lget_int("checkout", 0); if( vid==0 ){ fossil_fatal("nothing is checked out"); } /* Find mid, the artifactID of the version to be merged into the current ** check-out */ if( g.argc==3 ){ /* Mid is specified as an argument on the command-line */ mid = name_to_typed_rid(g.argv[2], "ci"); if( mid==0 || !is_a_version(mid) ){ fossil_fatal("not a version: %s", g.argv[2]); } }else if( g.argc==2 ){ /* No version specified on the command-line so pick the most recent ** leaf that is (1) not the version currently checked out and (2) ** has not already been merged into the current checkout and (3) ** the leaf is not closed and (4) the leaf is in the same branch ** as the current checkout. */ Stmt q; if( pickFlag || backoutFlag || integrateFlag){ fossil_fatal("cannot use --backout, --cherrypick or --integrate with a fork merge"); } mid = db_int(0, "SELECT leaf.rid" " FROM leaf, event" " WHERE leaf.rid=event.objid" " AND leaf.rid!=%d" /* Constraint (1) */ " AND leaf.rid NOT IN (SELECT merge FROM vmerge)" /* Constraint (2) */ " AND NOT EXISTS(SELECT 1 FROM tagxref" /* Constraint (3) */ " WHERE rid=leaf.rid" " AND tagid=%d" " AND tagtype>0)" " AND (SELECT value FROM tagxref" /* Constraint (4) */ " WHERE tagid=%d AND rid=%d AND tagtype>0) =" " (SELECT value FROM tagxref" " WHERE tagid=%d AND rid=leaf.rid AND tagtype>0)" " ORDER BY event.mtime DESC LIMIT 1", vid, TAG_CLOSED, TAG_BRANCH, vid, TAG_BRANCH ); if( mid==0 ){ fossil_fatal("no unmerged forks of branch \"%s\"", db_text(0, "SELECT value FROM tagxref" " WHERE tagid=%d AND rid=%d AND tagtype>0", TAG_BRANCH, vid) ); } db_prepare(&q, "SELECT blob.uuid," " datetime(event.mtime%s)," " coalesce(ecomment, comment)," " coalesce(euser, user)" " FROM event, blob" " WHERE event.objid=%d AND blob.rid=%d", timeline_utc(), mid, mid ); if( db_step(&q)==SQLITE_ROW ){ char *zCom = mprintf("Merging fork [%S] at %s by %s: \"%s\"", db_column_text(&q, 0), db_column_text(&q, 1), db_column_text(&q, 3), db_column_text(&q, 2)); comment_print(zCom, db_column_text(&q,2), 0, -1, g.comFmtFlags); fossil_free(zCom); } db_finalize(&q); }else{ usage("?OPTIONS? ?VERSION?"); return; } if( zPivot ){ pid = name_to_typed_rid(zPivot, "ci"); if( pid==0 || !is_a_version(pid) ){ fossil_fatal("not a version: %s", zPivot); } if( pickFlag ){ fossil_fatal("incompatible options: --cherrypick & --baseline"); } }else if( pickFlag || backoutFlag ){ if( integrateFlag ){ fossil_fatal("incompatible options: --integrate & --cherrypick or --backout"); } pid = db_int(0, "SELECT pid FROM plink WHERE cid=%d AND isprim", mid); if( pid<=0 ){ fossil_fatal("cannot find an ancestor for %s", g.argv[2]); } }else{ pivot_set_primary(mid); pivot_set_secondary(vid); db_prepare(&q, "SELECT merge FROM vmerge WHERE id=0"); while( db_step(&q)==SQLITE_ROW ){ pivot_set_secondary(db_column_int(&q,0)); } db_finalize(&q); pid = pivot_find(); if( pid<=0 ){ fossil_fatal("cannot find a common ancestor between the current " "checkout and %s", g.argv[2]); } } if( backoutFlag ){ int t = pid; pid = mid; mid = t; } if( !is_a_version(pid) ){ fossil_fatal("not a version: record #%d", pid); } if( !forceFlag && mid==pid ){ fossil_print("Merge skipped because it is a no-op. " " Use --force to override.\n"); return; } if( integrateFlag && !is_a_leaf(mid)){ fossil_warning("ignoring --integrate: %s is not a leaf", g.argv[2]); integrateFlag = 0; } if( verboseFlag ){ print_checkin_description(mid, 12, integrateFlag?"integrate:":"merge-from:"); print_checkin_description(pid, 12, "baseline:"); } vfile_check_signature(vid, CKSIG_ENOTFILE); db_begin_transaction(); if( !dryRunFlag ) undo_begin(); if( load_vfile_from_rid(mid) && !forceMissingFlag ){ fossil_fatal("missing content, unable to merge"); } if( load_vfile_from_rid(pid) && !forceMissingFlag ){ fossil_fatal("missing content, unable to merge"); } if( debugFlag ){ char *z; z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", pid); fossil_print("P=%d %z\n", pid, z); z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", mid); fossil_print("M=%d %z\n", mid, z); z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", vid); fossil_print("V=%d %z\n", vid, z); } /* ** The vfile.pathname field is used to match files against each other. The ** FV table contains one row for each each unique filename in ** in the current checkout, the pivot, and the version being merged. */ db_multi_exec( "DROP TABLE IF EXISTS fv;" "CREATE TEMP TABLE fv(" " fn TEXT PRIMARY KEY %s," /* The filename */ " idv INTEGER," /* VFILE entry for current version */ " idp INTEGER," /* VFILE entry for the pivot */ " idm INTEGER," /* VFILE entry for version merging in */ " chnged BOOLEAN," /* True if current version has been edited */ " ridv INTEGER," /* Record ID for current version */ " ridp INTEGER," /* Record ID for pivot */ " ridm INTEGER," /* Record ID for merge */ " isexe BOOLEAN," /* Execute permission enabled */ " fnp TEXT %s," /* The filename in the pivot */ " fnm TEXT %s," /* the filename in the merged version */ " islinkv BOOLEAN," /* True if current version is a symlink */ " islinkm BOOLEAN" /* True if merged version in is a symlink */ ");", filename_collation(), filename_collation(), filename_collation() ); /* Add files found in V */ db_multi_exec( "INSERT OR IGNORE" " INTO fv(fn,fnp,fnm,idv,idp,idm,ridv,ridp,ridm,isexe,chnged)" " SELECT pathname, pathname, pathname, id, 0, 0, rid, 0, 0, isexe, chnged " " FROM vfile WHERE vid=%d", vid ); /* ** Compute name changes from P->V */ find_filename_changes(pid, vid, 0, &nChng, &aChng, debugFlag ? "P->V" : 0); if( nChng ){ for(i=0; i<nChng; i++){ char *z; z = db_text(0, "SELECT name FROM filename WHERE fnid=%d", aChng[i*2]); db_multi_exec( "UPDATE fv SET fnp=%Q, fnm=%Q" " WHERE fn=(SELECT name FROM filename WHERE fnid=%d)", z, z, aChng[i*2+1] ); free(z); } fossil_free(aChng); db_multi_exec("UPDATE fv SET fnm=fnp WHERE fnp!=fn"); } /* Add files found in P but not in V */ db_multi_exec( "INSERT OR IGNORE" " INTO fv(fn,fnp,fnm,idv,idp,idm,ridv,ridp,ridm,isexe,chnged)" " SELECT pathname, pathname, pathname, 0, 0, 0, 0, 0, 0, isexe, 0 " " FROM vfile" " WHERE vid=%d AND pathname %s NOT IN (SELECT fnp FROM fv)", pid, filename_collation() ); /* ** Compute name changes from P->M */ find_filename_changes(pid, mid, 0, &nChng, &aChng, debugFlag ? "P->M" : 0); if( nChng ){ if( nChng>4 ) db_multi_exec("CREATE INDEX fv_fnp ON fv(fnp)"); for(i=0; i<nChng; i++){ db_multi_exec( "UPDATE fv SET fnm=(SELECT name FROM filename WHERE fnid=%d)" " WHERE fnp=(SELECT name FROM filename WHERE fnid=%d)", aChng[i*2+1], aChng[i*2] ); } fossil_free(aChng); } /* Add files found in M but not in P or V. */ db_multi_exec( "INSERT OR IGNORE" " INTO fv(fn,fnp,fnm,idv,idp,idm,ridv,ridp,ridm,isexe,chnged)" " SELECT pathname, pathname, pathname, 0, 0, 0, 0, 0, 0, isexe, 0 " " FROM vfile" " WHERE vid=%d" " AND pathname %s NOT IN (SELECT fnp FROM fv UNION SELECT fnm FROM fv)", mid, filename_collation() ); /* ** Compute the file version ids for P and M. */ db_multi_exec( "UPDATE fv SET" " idp=coalesce((SELECT id FROM vfile WHERE vid=%d AND fnp=pathname),0)," " ridp=coalesce((SELECT rid FROM vfile WHERE vid=%d AND fnp=pathname),0)," " idm=coalesce((SELECT id FROM vfile WHERE vid=%d AND fnm=pathname),0)," " ridm=coalesce((SELECT rid FROM vfile WHERE vid=%d AND fnm=pathname),0)," " islinkv=coalesce((SELECT islink FROM vfile" " WHERE vid=%d AND fnm=pathname),0)," " islinkm=coalesce((SELECT islink FROM vfile" " WHERE vid=%d AND fnm=pathname),0)", pid, pid, mid, mid, vid, mid ); if( debugFlag ){ db_prepare(&q, "SELECT rowid, fn, fnp, fnm, chnged, ridv, ridp, ridm, " " isexe, islinkv, islinkm FROM fv" ); while( db_step(&q)==SQLITE_ROW ){ fossil_print("%3d: ridv=%-4d ridp=%-4d ridm=%-4d chnged=%d isexe=%d " " islinkv=%d islinkm=%d\n", db_column_int(&q, 0), db_column_int(&q, 5), db_column_int(&q, 6), db_column_int(&q, 7), db_column_int(&q, 4), db_column_int(&q, 8), db_column_int(&q, 9), db_column_int(&q, 10)); fossil_print(" fn = [%s]\n", db_column_text(&q, 1)); fossil_print(" fnp = [%s]\n", db_column_text(&q, 2)); fossil_print(" fnm = [%s]\n", db_column_text(&q, 3)); } db_finalize(&q); } /* ** Find files in M and V but not in P and report conflicts. ** The file in M will be ignored. It will be treated as if it ** does not exist. */ db_prepare(&q, "SELECT idm FROM fv WHERE idp=0 AND idv>0 AND idm>0" ); while( db_step(&q)==SQLITE_ROW ){ int idm = db_column_int(&q, 0); char *zName = db_text(0, "SELECT pathname FROM vfile WHERE id=%d", idm); fossil_warning("WARNING - no common ancestor: %s", zName); free(zName); db_multi_exec("UPDATE fv SET idm=0 WHERE idm=%d", idm); } db_finalize(&q); /* ** Add to V files that are not in V or P but are in M */ db_prepare(&q, "SELECT idm, rowid, fnm FROM fv AS x" " WHERE idp=0 AND idv=0 AND idm>0" ); while( db_step(&q)==SQLITE_ROW ){ int idm = db_column_int(&q, 0); int rowid = db_column_int(&q, 1); int idv; const char *zName; char *zFullName; db_multi_exec( "INSERT INTO vfile(vid,chnged,deleted,rid,mrid,isexe,islink,pathname)" " SELECT %d,%d,0,rid,mrid,isexe,islink,pathname FROM vfile WHERE id=%d", vid, integrateFlag?5:3, idm ); idv = db_last_insert_rowid(); db_multi_exec("UPDATE fv SET idv=%d WHERE rowid=%d", idv, rowid); zName = db_column_text(&q, 2); zFullName = mprintf("%s%s", g.zLocalRoot, zName); if( file_wd_isfile_or_link(zFullName) ){ fossil_print("ADDED %s (overwrites an unmanaged file)\n", zName); nOverwrite++; }else{ fossil_print("ADDED %s\n", zName); } fossil_free(zFullName); if( !dryRunFlag ){ undo_save(zName); vfile_to_disk(0, idm, 0, 0); } } db_finalize(&q); /* ** Find files that have changed from P->M but not P->V. ** Copy the M content over into V. */ db_prepare(&q, "SELECT idv, ridm, fn, islinkm FROM fv" " WHERE idp>0 AND idv>0 AND idm>0" " AND ridm!=ridp AND ridv=ridp AND NOT chnged" ); while( db_step(&q)==SQLITE_ROW ){ int idv = db_column_int(&q, 0); int ridm = db_column_int(&q, 1); const char *zName = db_column_text(&q, 2); int islinkm = db_column_int(&q, 3); /* Copy content from idm over into idv. Overwrite idv. */ fossil_print("UPDATE %s\n", zName); if( !dryRunFlag ){ undo_save(zName); db_multi_exec( "UPDATE vfile SET mtime=0, mrid=%d, chnged=%d, islink=%d " " WHERE id=%d", ridm, integrateFlag?4:2, islinkm, idv ); vfile_to_disk(0, idv, 0, 0); } } db_finalize(&q); /* ** Do a three-way merge on files that have changes on both P->M and P->V. */ db_prepare(&q, "SELECT ridm, idv, ridp, ridv, %s, fn, isexe, islinkv, islinkm FROM fv" " WHERE idp>0 AND idv>0 AND idm>0" " AND ridm!=ridp AND (ridv!=ridp OR chnged)", glob_expr("fv.fn", zBinGlob) ); while( db_step(&q)==SQLITE_ROW ){ int ridm = db_column_int(&q, 0); int idv = db_column_int(&q, 1); int ridp = db_column_int(&q, 2); int ridv = db_column_int(&q, 3); int isBinary = db_column_int(&q, 4); const char *zName = db_column_text(&q, 5); int isExe = db_column_int(&q, 6); int islinkv = db_column_int(&q, 7); int islinkm = db_column_int(&q, 8); int rc; char *zFullPath; Blob m, p, r; /* Do a 3-way merge of idp->idm into idp->idv. The results go into idv. */ if( verboseFlag ){ fossil_print("MERGE %s (pivot=%d v1=%d v2=%d)\n", zName, ridp, ridm, ridv); }else{ fossil_print("MERGE %s\n", zName); } if( islinkv || islinkm /* || file_wd_islink(zFullPath) */ ){ fossil_print("***** Cannot merge symlink %s\n", zName); nConflict++; }else{ undo_save(zName); zFullPath = mprintf("%s/%s", g.zLocalRoot, zName); content_get(ridp, &p); content_get(ridm, &m); if( isBinary ){ rc = -1; blob_zero(&r); }else{ unsigned mergeFlags = dryRunFlag ? MERGE_DRYRUN : 0; rc = merge_3way(&p, zFullPath, &m, &r, mergeFlags); } if( rc>=0 ){ if( !dryRunFlag ){ blob_write_to_file(&r, zFullPath); file_wd_setexe(zFullPath, isExe); } db_multi_exec("UPDATE vfile SET mtime=0 WHERE id=%d", idv); if( rc>0 ){ fossil_print("***** %d merge conflicts in %s\n", rc, zName); nConflict++; } }else{ fossil_print("***** Cannot merge binary file %s\n", zName); nConflict++; } blob_reset(&p); blob_reset(&m); blob_reset(&r); } db_multi_exec("INSERT OR IGNORE INTO vmerge(id,merge) VALUES(%d,%d)", idv,ridm); } db_finalize(&q); /* ** Drop files that are in P and V but not in M */ db_prepare(&q, "SELECT idv, fn, chnged FROM fv" " WHERE idp>0 AND idv>0 AND idm=0" ); while( db_step(&q)==SQLITE_ROW ){ int idv = db_column_int(&q, 0); const char *zName = db_column_text(&q, 1); int chnged = db_column_int(&q, 2); /* Delete the file idv */ fossil_print("DELETE %s\n", zName); if( chnged ){ fossil_warning("WARNING: local edits lost for %s\n", zName); nConflict++; } undo_save(zName); db_multi_exec( "UPDATE vfile SET deleted=1 WHERE id=%d", idv ); if( !dryRunFlag ){ char *zFullPath = mprintf("%s%s", g.zLocalRoot, zName); file_delete(zFullPath); free(zFullPath); } } db_finalize(&q); /* ** Rename files that have taken a rename on P->M but which keep the same ** name o P->V. If a file is renamed on P->V only or on both P->V and ** P->M then we retain the V name of the file. */ db_prepare(&q, "SELECT idv, fnp, fnm FROM fv" " WHERE idv>0 AND idp>0 AND idm>0 AND fnp=fn AND fnm!=fnp" ); while( db_step(&q)==SQLITE_ROW ){ int idv = db_column_int(&q, 0); const char *zOldName = db_column_text(&q, 1); const char *zNewName = db_column_text(&q, 2); fossil_print("RENAME %s -> %s\n", zOldName, zNewName); undo_save(zOldName); undo_save(zNewName); db_multi_exec( "UPDATE vfile SET pathname=%Q, origname=coalesce(origname,pathname)" " WHERE id=%d AND vid=%d", zNewName, idv, vid ); if( !dryRunFlag ){ char *zFullOldPath = mprintf("%s%s", g.zLocalRoot, zOldName); char *zFullNewPath = mprintf("%s%s", g.zLocalRoot, zNewName); if( file_wd_islink(zFullOldPath) ){ symlink_copy(zFullOldPath, zFullNewPath); }else{ file_copy(zFullOldPath, zFullNewPath); } file_delete(zFullOldPath); free(zFullNewPath); free(zFullOldPath); } } db_finalize(&q); /* Report on conflicts */ if( nConflict ){ fossil_warning("WARNING: %d merge conflicts", nConflict); } if( nOverwrite ){ fossil_warning("WARNING: %d unmanaged files were overwritten", nOverwrite); } if( dryRunFlag ){ fossil_warning("REMINDER: this was a dry run -" " no files were actually changed."); } /* ** Clean up the mid and pid VFILE entries. Then commit the changes. */ db_multi_exec("DELETE FROM vfile WHERE vid!=%d", vid); if( pickFlag ){ db_multi_exec("INSERT OR IGNORE INTO vmerge(id,merge) VALUES(-1,%d)",mid); /* For a cherry-pick merge, make the default check-in comment the same ** as the check-in comment on the check-in that is being merged in. */ db_multi_exec( "REPLACE INTO vvar(name,value)" " SELECT 'ci-comment', coalesce(ecomment,comment) FROM event" " WHERE type='ci' AND objid=%d", mid ); }else if( backoutFlag ){ db_multi_exec("INSERT OR IGNORE INTO vmerge(id,merge) VALUES(-2,%d)",pid); }else if( integrateFlag ){ db_multi_exec("INSERT OR IGNORE INTO vmerge(id,merge) VALUES(-4,%d)",mid); }else{ db_multi_exec("INSERT OR IGNORE INTO vmerge(id,merge) VALUES(0,%d)", mid); } undo_finish(); db_end_transaction(dryRunFlag); }
/* ** Expects pUser to contain fossil user fields in JSON form: name, ** uid, info, capabilities, password. ** ** At least one of (name, uid) must be included. All others are ** optional and their db fields will not be updated if those fields ** are not included in pUser. ** ** If uid is specified then name may refer to a _new_ name ** for a user, otherwise the name must refer to an existing user. ** If uid=-1 then the name must be specified and a new user is ** created (fails if one already exists). ** ** If uid is not set, this function might modify pUser to contain the ** db-found (or inserted) user ID. ** ** On error g.json's error state is set and one of the FSL_JSON_E_xxx ** values from FossilJsonCodes is returned. ** ** On success the db record for the given user is updated. ** ** Requires either Admin, Setup, or Password access. Non-admin/setup ** users can only change their own information. Non-setup users may ** not modify the 's' permission. Admin users without setup ** permissions may not edit any other user who has the 's' permission. ** */ int json_user_update_from_json( cson_object * pUser ){ #define CSTR(X) cson_string_cstr(cson_value_get_string( cson_object_get(pUser, X ) )) char const * zName = CSTR("name"); char const * zNameNew = zName; char * zNameFree = NULL; char const * zInfo = CSTR("info"); char const * zCap = CSTR("capabilities"); char const * zPW = CSTR("password"); cson_value const * forceLogout = cson_object_get(pUser, "forceLogout"); int gotFields = 0; #undef CSTR cson_int_t uid = cson_value_get_integer( cson_object_get(pUser, "uid") ); char const tgtHasSetup = zCap && (NULL!=strchr(zCap, 's')); char tgtHadSetup = 0; Blob sql = empty_blob; Stmt q = empty_Stmt; #if 0 if(!g.perm.Admin && !g.perm.Setup && !g.perm.Password){ return json_set_err( FSL_JSON_E_DENIED, "Password change requires 'a', 's', " "or 'p' permissions."); } #endif if(uid<=0 && (!zName||!*zName)){ return json_set_err(FSL_JSON_E_MISSING_ARGS, "One of 'uid' or 'name' is required."); }else if(uid>0){ zNameFree = db_text(NULL, "SELECT login FROM user WHERE uid=%d",uid); if(!zNameFree){ return json_set_err(FSL_JSON_E_RESOURCE_NOT_FOUND, "No login found for uid %d.", uid); } zName = zNameFree; }else if(-1==uid){ /* try to create a new user */ if(!g.perm.Admin && !g.perm.Setup){ json_set_err(FSL_JSON_E_DENIED, "Requires 'a' or 's' privileges."); goto error; }else if(!zName || !*zName){ json_set_err(FSL_JSON_E_MISSING_ARGS, "No name specified for new user."); goto error; }else if( db_exists("SELECT 1 FROM user WHERE login=%Q", zName) ){ json_set_err(FSL_JSON_E_RESOURCE_ALREADY_EXISTS, "User %s already exists.", zName); goto error; }else{ Stmt ins = empty_Stmt; db_prepare(&ins, "INSERT INTO user (login) VALUES(%Q)",zName); db_step( &ins ); db_finalize(&ins); uid = db_int(0,"SELECT uid FROM user WHERE login=%Q", zName); assert(uid>0); zNameNew = zName; cson_object_set( pUser, "uid", cson_value_new_integer(uid) ); } }else{ uid = db_int(0,"SELECT uid FROM user WHERE login=%Q", zName); if(uid<=0){ json_set_err(FSL_JSON_E_RESOURCE_NOT_FOUND, "No login found for user [%s].", zName); goto error; } cson_object_set( pUser, "uid", cson_value_new_integer(uid) ); } /* Maintenance note: all error-returns from here on out should go via 'goto error' in order to clean up. */ if(uid != g.userUid){ if(!g.perm.Admin && !g.perm.Setup){ json_set_err(FSL_JSON_E_DENIED, "Changing another user's data requires " "'a' or 's' privileges."); goto error; } } /* check if the target uid currently has setup rights. */ tgtHadSetup = db_int(0,"SELECT 1 FROM user where uid=%d" " AND cap GLOB '*s*'", uid); if((tgtHasSetup || tgtHadSetup) && !g.perm.Setup){ /* Do not allow a non-setup user to set or remove setup privileges. setup.c uses similar logic. */ json_set_err(FSL_JSON_E_DENIED, "Modifying 's' users/privileges requires " "'s' privileges."); goto error; } /* Potential todo: do not allow a setup user to remove 's' from himself, to avoid locking himself out? */ blob_append(&sql, "UPDATE user SET",-1 ); blob_append(&sql, " mtime=cast(strftime('%s') AS INTEGER)", -1); if((uid>0) && zNameNew){ /* Check for name change... */ if(0!=strcmp(zName,zNameNew)){ if( (!g.perm.Admin && !g.perm.Setup) && (zName != zNameNew)){ json_set_err( FSL_JSON_E_DENIED, "Modifying user names requires 'a' or 's' privileges."); goto error; } forceLogout = cson_value_true() /* reminders: 1) does not allocate. 2) we do this because changing a name invalidates any login token because the old name is part of the token hash. */; blob_append_sql(&sql, ", login=%Q", zNameNew); ++gotFields; } } if( zCap && *zCap ){ if(!g.perm.Admin || !g.perm.Setup){ /* we "could" arguably silently ignore cap in this case. */ json_set_err(FSL_JSON_E_DENIED, "Changing capabilities requires 'a' or 's' privileges."); goto error; } blob_append_sql(&sql, ", cap=%Q", zCap); ++gotFields; } if( zPW && *zPW ){ if(!g.perm.Admin && !g.perm.Setup && !g.perm.Password){ json_set_err( FSL_JSON_E_DENIED, "Password change requires 'a', 's', " "or 'p' permissions."); goto error; }else{ #define TRY_LOGIN_GROUP 0 /* login group support is not yet implemented. */ #if !TRY_LOGIN_GROUP char * zPWHash = NULL; ++gotFields; zPWHash = sha1_shared_secret(zPW, zNameNew ? zNameNew : zName, NULL); blob_append_sql(&sql, ", pw=%Q", zPWHash); free(zPWHash); #else ++gotFields; blob_append_sql(&sql, ", pw=coalesce(shared_secret(%Q,%Q," "(SELECT value FROM config WHERE name='project-code')))", zPW, zNameNew ? zNameNew : zName); /* shared_secret() func is undefined? */ #endif } } if( zInfo ){ blob_append_sql(&sql, ", info=%Q", zInfo); ++gotFields; } if((g.perm.Admin || g.perm.Setup) && forceLogout && cson_value_get_bool(forceLogout)){ blob_append(&sql, ", cookie=NULL, cexpire=NULL", -1); ++gotFields; } if(!gotFields){ json_set_err( FSL_JSON_E_MISSING_ARGS, "Required user data are missing."); goto error; } assert(uid>0); #if !TRY_LOGIN_GROUP blob_append_sql(&sql, " WHERE uid=%d", uid); #else /* need name for login group support :/ */ blob_append_sql(&sql, " WHERE login=%Q", zName); #endif #if 0 puts(blob_str(&sql)); cson_output_FILE( cson_object_value(pUser), stdout, NULL ); #endif db_prepare(&q, "%s", blob_sql_text(&sql)); db_exec(&q); db_finalize(&q); #if TRY_LOGIN_GROUP if( zPW || cson_value_get_bool(forceLogout) ){ Blob groupSql = empty_blob; char * zErr = NULL; blob_append_sql(&groupSql, "INSERT INTO user(login)" " SELECT %Q WHERE NOT EXISTS(SELECT 1 FROM user WHERE login=%Q);", zName, zName ); blob_append(&groupSql, blob_str(&sql), blob_size(&sql)); login_group_sql(blob_str(&groupSql), NULL, NULL, &zErr); blob_reset(&groupSql); if( zErr ){ json_set_err( FSL_JSON_E_UNKNOWN, "Repo-group update at least partially failed: %s", zErr); free(zErr); goto error; } } #endif /* TRY_LOGIN_GROUP */ #undef TRY_LOGIN_GROUP free( zNameFree ); blob_reset(&sql); return 0; error: assert(0 != g.json.resultCode); free(zNameFree); blob_reset(&sql); return g.json.resultCode; }
/* ** Insert a tag into the database. */ int tag_insert( const char *zTag, /* Name of the tag (w/o the "+" or "-" prefix */ int tagtype, /* 0:cancel 1:singleton 2:propagated */ const char *zValue, /* Value if the tag is really a property */ int srcId, /* Artifact that contains this tag */ double mtime, /* Timestamp. Use default if <=0.0 */ int rid /* Artifact to which the tag is to attached */ ){ Stmt s; const char *zCol; int tagid = tag_findid(zTag, 1); int rc; if( mtime<=0.0 ){ mtime = db_double(0.0, "SELECT julianday('now')"); } db_prepare(&s, "SELECT 1 FROM tagxref" " WHERE tagid=%d" " AND rid=%d" " AND mtime>=:mtime", tagid, rid ); db_bind_double(&s, ":mtime", mtime); rc = db_step(&s); db_finalize(&s); if( rc==SQLITE_ROW ){ /* Another entry that is more recent already exists. Do nothing */ return tagid; } db_prepare(&s, "REPLACE INTO tagxref(tagid,tagtype,srcId,origid,value,mtime,rid)" " VALUES(%d,%d,%d,%d,%Q,:mtime,%d)", tagid, tagtype, srcId, rid, zValue, rid ); db_bind_double(&s, ":mtime", mtime); db_step(&s); db_finalize(&s); if( tagid==TAG_BRANCH ) leaf_eventually_check(rid); if( tagtype==0 ){ zValue = 0; } zCol = 0; switch( tagid ){ case TAG_BGCOLOR: { zCol = "bgcolor"; break; } case TAG_COMMENT: { zCol = "ecomment"; break; } case TAG_USER: { zCol = "euser"; break; } case TAG_PRIVATE: { db_multi_exec( "INSERT OR IGNORE INTO private(rid) VALUES(%d);", rid ); } } if( zCol ){ db_multi_exec("UPDATE event SET %s=%Q WHERE objid=%d", zCol, zValue, rid); if( tagid==TAG_COMMENT ){ char *zCopy = mprintf("%s", zValue); wiki_extract_links(zCopy, rid, 0, mtime, 1, WIKI_INLINE); free(zCopy); } } if( tagid==TAG_DATE ){ db_multi_exec("UPDATE event " " SET mtime=julianday(%Q)," " omtime=coalesce(omtime,mtime)" " WHERE objid=%d", zValue, rid); } if( tagtype==1 ) tagtype = 0; tag_propagate(rid, tagid, tagtype, rid, zValue, mtime); return tagid; }
/* ** Generates an artifact Object for the given rid, ** which must refer to a Checkin. ** ** Returned value is NULL or an Object owned by the caller. */ cson_value * json_artifact_for_ci( int rid, char showFiles ){ cson_value * v = NULL; Stmt q = empty_Stmt; static cson_value * eventTypeLabel = NULL; if(!eventTypeLabel){ eventTypeLabel = json_new_string("checkin"); json_gc_add("$EVENT_TYPE_LABEL(commit)", eventTypeLabel); } db_prepare(&q, "SELECT b.uuid, " " cast(strftime('%%s',e.mtime) as int), " " strftime('%%s',e.omtime)," " e.user, " " e.comment" " FROM blob b, event e" " WHERE b.rid=%d" " AND e.objid=%d", rid, rid ); if( db_step(&q)==SQLITE_ROW ){ cson_object * o; cson_value * tmpV = NULL; const char *zUuid = db_column_text(&q, 0); const char *zUser; const char *zComment; char * zEUser, * zEComment; int mtime, omtime; v = cson_value_new_object(); o = cson_value_get_object(v); #define SET(K,V) cson_object_set(o,(K), (V)) SET("type", eventTypeLabel ); SET("uuid",json_new_string(zUuid)); SET("isLeaf", cson_value_new_bool(is_a_leaf(rid))); mtime = db_column_int(&q,1); SET("timestamp",json_new_int(mtime)); omtime = db_column_int(&q,2); if(omtime && (omtime!=mtime)){ SET("originTime",json_new_int(omtime)); } zUser = db_column_text(&q,3); zEUser = db_text(0, "SELECT value FROM tagxref WHERE tagid=%d AND rid=%d", TAG_USER, rid); if(zEUser){ SET("user", json_new_string(zEUser)); if(0!=strcmp(zEUser,zUser)){ SET("originUser",json_new_string(zUser)); } free(zEUser); }else{ SET("user",json_new_string(zUser)); } zComment = db_column_text(&q,4); zEComment = db_text(0, "SELECT value FROM tagxref WHERE tagid=%d AND rid=%d", TAG_COMMENT, rid); if(zEComment){ SET("comment",json_new_string(zEComment)); if(0 != strcmp(zEComment,zComment)){ SET("originComment", json_new_string(zComment)); } free(zEComment); }else{ SET("comment",json_new_string(zComment)); } tmpV = json_parent_uuids_for_ci(rid); if(tmpV){ SET("parents", tmpV); } tmpV = json_tags_for_checkin_rid(rid,0); if(tmpV){ SET("tags",tmpV); } if( showFiles ){ tmpV = json_get_changed_files(rid, 1); if(tmpV){ SET("files",tmpV); } } #undef SET } db_finalize(&q); return v; }
/* ** COMMAND: whatis* ** Usage: %fossil whatis NAME ** ** Resolve the symbol NAME into its canonical 40-character SHA1-hash ** artifact name and provide a description of what role that artifact ** plays. */ void whatis_cmd(void){ int rid; const char *zName; int verboseFlag; db_find_and_open_repository(0,0); verboseFlag = find_option("verbose","v",0)!=0; if( g.argc!=3 ) usage("whatis NAME"); zName = g.argv[2]; rid = symbolic_name_to_rid(zName, 0); if( rid<0 ){ fossil_print("Ambiguous artifact name prefix: %s\n", zName); }else if( rid==0 ){ fossil_print("Unknown artifact: %s\n", zName); }else{ Stmt q; db_prepare(&q, "SELECT uuid, size, datetime(mtime%s), ipaddr," " (SELECT group_concat(substr(tagname,5), ', ') FROM tag, tagxref" " WHERE tagname GLOB 'sym-*' AND tag.tagid=tagxref.tagid" " AND tagxref.rid=blob.rid AND tagxref.tagtype>0)" " FROM blob, rcvfrom" " WHERE rid=%d" " AND rcvfrom.rcvid=blob.rcvid", timeline_utc(), rid); if( db_step(&q)==SQLITE_ROW ){ const char *zTagList = db_column_text(&q, 4); if( verboseFlag ){ fossil_print("artifact: %s (%d)\n", db_column_text(&q,0), rid); fossil_print("size: %d bytes\n", db_column_int(&q,1)); fossil_print("received: %s from %s\n", db_column_text(&q, 2), db_column_text(&q, 3)); }else{ fossil_print("artifact: %s\n", db_column_text(&q,0)); fossil_print("size: %d bytes\n", db_column_int(&q,1)); } if( zTagList && zTagList[0] ){ fossil_print("tags: %s\n", zTagList); } } db_finalize(&q); db_prepare(&q, "SELECT type, datetime(mtime%s)," " coalesce(euser,user), coalesce(ecomment,comment)" " FROM event WHERE objid=%d", timeline_utc(), rid); if( db_step(&q)==SQLITE_ROW ){ const char *zType; switch( db_column_text(&q,0)[0] ){ case 'c': zType = "Check-in"; break; case 'w': zType = "Wiki-edit"; break; case 'e': zType = "Event"; break; case 't': zType = "Ticket-change"; break; case 'g': zType = "Tag-change"; break; default: zType = "Unknown"; break; } fossil_print("type: %s by %s on %s\n", zType, db_column_text(&q,2), db_column_text(&q, 1)); fossil_print("comment: "); comment_print(db_column_text(&q,3), 10, 78); } db_finalize(&q); db_prepare(&q, "SELECT filename.name, blob.uuid, datetime(event.mtime%s)," " coalesce(euser,user), coalesce(ecomment,comment)" " FROM mlink, filename, blob, event" " WHERE mlink.fid=%d" " AND filename.fnid=mlink.fnid" " AND event.objid=mlink.mid" " AND blob.rid=mlink.mid" " ORDER BY event.mtime DESC /*sort*/", timeline_utc(), rid); while( db_step(&q)==SQLITE_ROW ){ fossil_print("file: %s\n", db_column_text(&q,0)); fossil_print(" part of [%.10s] by %s on %s\n", db_column_text(&q, 1), db_column_text(&q, 3), db_column_text(&q, 2)); fossil_print(" "); comment_print(db_column_text(&q,4), 10, 78); } db_finalize(&q); } }
/* ** WEBPAGE: leaves ** ** Find leaves of all branches. */ void leaves_page(void){ Blob sql; Stmt q; int showAll = P("all")!=0; int showClosed = P("closed")!=0; login_check_credentials(); if( !g.perm.Read ){ login_needed(); return; } if( !showAll ){ style_submenu_element("All", "All", "leaves?all"); } if( !showClosed ){ style_submenu_element("Closed", "Closed", "leaves?closed"); } if( showClosed || showAll ){ style_submenu_element("Open", "Open", "leaves"); } style_header("Leaves"); login_anonymous_available(); style_sidebox_begin("Nomenclature:", "33%"); cgi_printf("<ol>\n" "<li> A <div class=\"sideboxDescribed\">leaf</div>\n" "is a check-in with no descendants in the same branch.</li>\n" "<li> An <div class=\"sideboxDescribed\">open leaf</div>\n" "is a leaf that does not have a \"closed\" tag\n" "and is thus assumed to still be in use.</li>\n" "<li> A <div class=\"sideboxDescribed\">closed leaf</div>\n" "has a \"closed\" tag and is thus assumed to\n" "be historical and no longer in active use.</li>\n" "</ol>\n"); style_sidebox_end(); if( showAll ){ cgi_printf("<h1>All leaves, both open and closed:</h1>\n"); }else if( showClosed ){ cgi_printf("<h1>Closed leaves:</h1>\n"); }else{ cgi_printf("<h1>Open leaves:</h1>\n"); } blob_zero(&sql); blob_append(&sql, timeline_query_for_www(), -1); blob_appendf(&sql, " AND blob.rid IN leaf"); if( showClosed ){ blob_appendf(&sql," AND %z", leaf_is_closed_sql("blob.rid")); }else if( !showAll ){ blob_appendf(&sql," AND NOT %z", leaf_is_closed_sql("blob.rid")); } db_prepare(&q, "%s ORDER BY event.mtime DESC", blob_str(&sql)); blob_reset(&sql); www_print_timeline(&q, TIMELINE_LEAFONLY, 0, 0, 0); db_finalize(&q); cgi_printf("<br />\n" "<script type=\"text/JavaScript\">\n" "function xin(id){\n" "}\n" "function xout(id){\n" "}\n" "</script>\n"); style_footer(); }
cson_value * json_artifact_file(cson_object * zParent, int rid){ cson_object * pay = NULL; Stmt q = empty_Stmt; cson_array * checkin_arr = NULL; char contentFormat; i64 contentSize = -1; char * parentUuid; if( ! g.perm.Read ){ json_set_err(FSL_JSON_E_DENIED, "Requires 'o' privileges."); return NULL; } pay = zParent; contentFormat = json_artifact_get_content_format_flag(); if( 0 != contentFormat ){ Blob content = empty_blob; const char *zMime; char const * zFormat = (contentFormat<1) ? "raw" : "html"; content_get(rid, &content); zMime = mimetype_from_content(&content); cson_object_set(zParent, "contentType", json_new_string(zMime ? zMime : "text/plain")); if(!zMime){/* text/plain */ if(0 < blob_size(&content)){ if( 0 < contentFormat ){/*HTML-size it*/ Blob html = empty_blob; wiki_convert(&content, &html, 0); assert( blob_size(&content) < blob_size(&html) ); blob_swap( &html, &content ); assert( blob_size(&content) > blob_size(&html) ); blob_reset( &html ); }/*else as-is*/ } cson_object_set(zParent, "content", cson_value_new_string(blob_str(&content), (unsigned int)blob_size(&content))); }/*else binary: ignore*/ contentSize = blob_size(&content); cson_object_set(zParent, "contentSize", json_new_int(contentSize) ); cson_object_set(zParent, "contentFormat", json_new_string(zFormat) ); blob_reset(&content); } contentSize = db_int64(-1, "SELECT size FROM blob WHERE rid=%d", rid); assert( -1 < contentSize ); cson_object_set(zParent, "size", json_new_int(contentSize) ); parentUuid = db_text(NULL, "SELECT DISTINCT p.uuid " "FROM blob p, blob f, mlink m " "WHERE m.pid=p.rid " "AND m.fid=f.rid " "AND f.rid=%d", rid ); if(parentUuid){ cson_object_set( zParent, "parent", json_new_string(parentUuid) ); fossil_free(parentUuid); } /* Find checkins associated with this file... */ db_prepare(&q, "SELECT filename.name AS name, " " (mlink.pid==0) AS isNew," " (mlink.fid==0) AS isDel," " cast(strftime('%%s',event.mtime) as int) AS timestamp," " coalesce(event.ecomment,event.comment) as comment," " coalesce(event.euser,event.user) as user," #if 0 " a.size AS size," /* same for all checkins. */ #endif " b.uuid as checkin, " #if 0 " mlink.mperm as mperm," #endif " coalesce((SELECT value FROM tagxref" " WHERE tagid=%d AND tagtype>0 AND " " rid=mlink.mid),'trunk') as branch" " FROM mlink, filename, event, blob a, blob b" " WHERE filename.fnid=mlink.fnid" " AND event.objid=mlink.mid" " AND a.rid=mlink.fid" " AND b.rid=mlink.mid" " AND mlink.fid=%d" " ORDER BY filename.name, event.mtime", TAG_BRANCH, rid ); /* TODO: add a "state" flag for the file in each checkin, e.g. "modified", "new", "deleted". */ checkin_arr = cson_new_array(); cson_object_set(pay, "checkins", cson_array_value(checkin_arr)); while( (SQLITE_ROW==db_step(&q) ) ){ cson_object * row = cson_value_get_object(cson_sqlite3_row_to_object(q.pStmt)); /* FIXME: move this isNew/isDel stuff into an SQL CASE statement. */ char const isNew = cson_value_get_bool(cson_object_get(row,"isNew")); char const isDel = cson_value_get_bool(cson_object_get(row,"isDel")); cson_object_set(row, "isNew", NULL); cson_object_set(row, "isDel", NULL); cson_object_set(row, "state", json_new_string(json_artifact_status_to_string(isNew, isDel))); cson_array_append( checkin_arr, cson_object_value(row) ); } db_finalize(&q); return cson_object_value(pay); }
/* ** WEBPAGE: doc ** URL: /doc?name=BASELINE/PATH ** URL: /doc/BASELINE/PATH ** ** BASELINE can be either a baseline uuid prefix or magic words "tip" ** to mean the most recently checked in baseline or "ckout" to mean the ** content of the local checkout, if any. PATH is the relative pathname ** of some file. This method returns the file content. ** ** If PATH matches the patterns *.wiki or *.txt then formatting content ** is added before returning the file. For all other names, the content ** is returned straight without any interpretation or processing. */ void doc_page(void){ const char *zName; /* Argument to the /doc page */ const char *zMime; /* Document MIME type */ int vid = 0; /* Artifact of baseline */ int rid = 0; /* Artifact of file */ int i; /* Loop counter */ Blob filebody; /* Content of the documentation file */ char zBaseline[UUID_SIZE+1]; /* Baseline UUID */ login_check_credentials(); if( !g.perm.Read ){ login_needed(); return; } zName = PD("name", "tip/index.wiki"); for(i=0; zName[i] && zName[i]!='/'; i++){} if( zName[i]==0 || i>UUID_SIZE ){ zName = "index.html"; goto doc_not_found; } memcpy(zBaseline, zName, i); zBaseline[i] = 0; zName += i; while( zName[0]=='/' ){ zName++; } if( !file_is_simple_pathname(zName) ){ int n = strlen(zName); if( n>0 && zName[n-1]=='/' ){ zName = mprintf("%sindex.html", zName); if( !file_is_simple_pathname(zName) ){ goto doc_not_found; } }else{ goto doc_not_found; } } if( fossil_strcmp(zBaseline,"ckout")==0 && db_open_local()==0 ){ sqlite3_snprintf(sizeof(zBaseline), zBaseline, "tip"); } if( fossil_strcmp(zBaseline,"ckout")==0 ){ /* Read from the local checkout */ char *zFullpath; db_must_be_within_tree(); zFullpath = mprintf("%s/%s", g.zLocalRoot, zName); if( !file_isfile(zFullpath) ){ goto doc_not_found; } if( blob_read_from_file(&filebody, zFullpath)<0 ){ goto doc_not_found; } }else{ db_begin_transaction(); if( fossil_strcmp(zBaseline,"tip")==0 ){ vid = db_int(0, "SELECT objid FROM event WHERE type='ci'" " ORDER BY mtime DESC LIMIT 1"); }else{ vid = name_to_typed_rid(zBaseline, "ci"); } /* Create the baseline cache if it does not already exist */ db_multi_exec( "CREATE TABLE IF NOT EXISTS vcache(\n" " vid INTEGER, -- baseline ID\n" " fname TEXT, -- filename\n" " rid INTEGER, -- artifact ID\n" " UNIQUE(vid,fname,rid)\n" ")" ); /* Check to see if the documentation file artifact ID is contained ** in the baseline cache */ rid = db_int(0, "SELECT rid FROM vcache" " WHERE vid=%d AND fname=%Q", vid, zName); if( rid==0 && db_exists("SELECT 1 FROM vcache WHERE vid=%d", vid) ){ goto doc_not_found; } if( rid==0 ){ Stmt s; Manifest *pM; ManifestFile *pFile; /* Add the vid baseline to the cache */ if( db_int(0, "SELECT count(*) FROM vcache")>10000 ){ db_multi_exec("DELETE FROM vcache"); } pM = manifest_get(vid, CFTYPE_MANIFEST); if( pM==0 ){ goto doc_not_found; } db_prepare(&s, "INSERT INTO vcache(vid,fname,rid)" " SELECT %d, :fname, rid FROM blob" " WHERE uuid=:uuid", vid ); manifest_file_rewind(pM); while( (pFile = manifest_file_next(pM,0))!=0 ){ db_bind_text(&s, ":fname", pFile->zName); db_bind_text(&s, ":uuid", pFile->zUuid); db_step(&s); db_reset(&s); } db_finalize(&s); manifest_destroy(pM); /* Try again to find the file */ rid = db_int(0, "SELECT rid FROM vcache" " WHERE vid=%d AND fname=%Q", vid, zName); } if( rid==0 ){ goto doc_not_found; } /* Get the file content */ if( content_get(rid, &filebody)==0 ){ goto doc_not_found; } db_end_transaction(0); } /* The file is now contained in the filebody blob. Deliver the ** file to the user */ zMime = P("mimetype"); if( zMime==0 ){ zMime = mimetype_from_name(zName); } Th_Store("doc_name", zName); Th_Store("doc_version", db_text(0, "SELECT '[' || substr(uuid,1,10) || ']'" " FROM blob WHERE rid=%d", vid)); Th_Store("doc_date", db_text(0, "SELECT datetime(mtime) FROM event" " WHERE objid=%d AND type='ci'", vid)); if( fossil_strcmp(zMime, "application/x-fossil-wiki")==0 ){ Blob title, tail; if( wiki_find_title(&filebody, &title, &tail) ){ style_header(blob_str(&title)); wiki_convert(&tail, 0, 0); }else{ style_header("Documentation"); wiki_convert(&filebody, 0, 0); } style_footer(); }else if( fossil_strcmp(zMime, "text/plain")==0 ){ style_header("Documentation"); @ <blockquote><pre>
/* ** COMMAND: revert ** ** Usage: %fossil revert ?-r REVISION? ?FILE ...? ** ** Revert to the current repository version of FILE, or to ** the version associated with baseline REVISION if the -r flag ** appears. ** ** If FILE was part of a rename operation, both the original file ** and the renamed file are reverted. ** ** Revert all files if no file name is provided. ** ** If a file is reverted accidently, it can be restored using ** the "fossil undo" command. ** ** Options: ** -r REVISION revert given FILE(s) back to given REVISION ** ** See also: redo, undo, update */ void revert_cmd(void) { const char *zFile; const char *zRevision; Blob record; int i; int errCode; Stmt q; undo_capture_command_line(); zRevision = find_option("revision", "r", 1); verify_all_options(); if( g.argc<2 ) { usage("?OPTIONS? [FILE] ..."); } if( zRevision && g.argc<3 ) { fossil_fatal("the --revision option does not work for the entire tree"); } db_must_be_within_tree(); db_begin_transaction(); undo_begin(); db_multi_exec("CREATE TEMP TABLE torevert(name UNIQUE);"); if( g.argc>2 ) { for(i=2; i<g.argc; i++) { Blob fname; zFile = mprintf("%/", g.argv[i]); blob_zero(&fname); file_tree_name(zFile, &fname, 0, 1); db_multi_exec( "REPLACE INTO torevert VALUES(%B);" "INSERT OR IGNORE INTO torevert" " SELECT pathname" " FROM vfile" " WHERE origname=%B;", &fname, &fname ); blob_reset(&fname); } } else { int vid; vid = db_lget_int("checkout", 0); vfile_check_signature(vid, 0); db_multi_exec( "DELETE FROM vmerge;" "INSERT OR IGNORE INTO torevert " " SELECT pathname" " FROM vfile " " WHERE chnged OR deleted OR rid=0 OR pathname!=origname;" ); } db_multi_exec( "INSERT OR IGNORE INTO torevert" " SELECT origname" " FROM vfile" " WHERE origname!=pathname AND pathname IN (SELECT name FROM torevert);" ); blob_zero(&record); db_prepare(&q, "SELECT name FROM torevert"); if( zRevision==0 ) { int vid = db_lget_int("checkout", 0); zRevision = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", vid); } while( db_step(&q)==SQLITE_ROW ) { int isExe = 0; int isLink = 0; char *zFull; zFile = db_column_text(&q, 0); zFull = mprintf("%/%/", g.zLocalRoot, zFile); errCode = historical_version_of_file(zRevision, zFile, &record, &isLink, &isExe, 0, 2); if( errCode==2 ) { if( db_int(0, "SELECT rid FROM vfile WHERE pathname=%Q OR origname=%Q", zFile, zFile)==0 ) { fossil_print("UNMANAGE %s\n", zFile); } else { undo_save(zFile); file_delete(zFull); fossil_print("DELETE %s\n", zFile); } db_multi_exec( "UPDATE OR REPLACE vfile" " SET pathname=origname, origname=NULL" " WHERE pathname=%Q AND origname!=pathname;" "DELETE FROM vfile WHERE pathname=%Q", zFile, zFile ); } else { sqlite3_int64 mtime; undo_save(zFile); if( file_wd_size(zFull)>=0 && (isLink || file_wd_islink(0)) ) { file_delete(zFull); } if( isLink ) { symlink_create(blob_str(&record), zFull); } else { blob_write_to_file(&record, zFull); } file_wd_setexe(zFull, isExe); fossil_print("REVERT %s\n", zFile); mtime = file_wd_mtime(zFull); db_multi_exec( "UPDATE vfile" " SET mtime=%lld, chnged=0, deleted=0, isexe=%d, islink=%d,mrid=rid" " WHERE pathname=%Q OR origname=%Q", mtime, isExe, isLink, zFile, zFile ); } blob_reset(&record); free(zFull); } db_finalize(&q); undo_finish(); db_end_transaction(0); }
/* ** COMMAND: update ** ** Usage: %fossil update ?OPTIONS? ?VERSION? ?FILES...? ** ** Change the version of the current checkout to VERSION. Any ** uncommitted changes are retained and applied to the new checkout. ** ** The VERSION argument can be a specific version or tag or branch ** name. If the VERSION argument is omitted, then the leaf of the ** subtree that begins at the current version is used, if there is ** only a single leaf. VERSION can also be "current" to select the ** leaf of the current version or "latest" to select the most recent ** check-in. ** ** If one or more FILES are listed after the VERSION then only the ** named files are candidates to be updated, and any updates to them ** will be treated as edits to the current version. Using a directory ** name for one of the FILES arguments is the same as using every ** subdirectory and file beneath that directory. ** ** If FILES is omitted, all files in the current checkout are subject ** to being updated and the version of the current checkout is changed ** to VERSION. Any uncommitted changes are retained and applied to the ** new checkout. ** ** The -n or --dry-run option causes this command to do a "dry run". ** It prints out what would have happened but does not actually make ** any changes to the current checkout or the repository. ** ** The -v or --verbose option prints status information about ** unchanged files in addition to those file that actually do change. ** ** Options: ** --case-sensitive <BOOL> override case-sensitive setting ** --debug print debug information on stdout ** --latest acceptable in place of VERSION, update to latest version ** --force-missing force update if missing content after sync ** -n|--dry-run If given, display instead of run actions ** -v|--verbose print status information about all files ** -W|--width <num> Width of lines (default is to auto-detect). Must be >20 ** or 0 (= no limit, resulting in a single line per entry). ** ** See also: revert */ void update_cmd(void) { int vid; /* Current version */ int tid=0; /* Target version - version we are changing to */ Stmt q; int latestFlag; /* --latest. Pick the latest version if true */ int dryRunFlag; /* -n or --dry-run. Do a dry run */ int verboseFlag; /* -v or --verbose. Output extra information */ int forceMissingFlag; /* --force-missing. Continue if missing content */ int debugFlag; /* --debug option */ int setmtimeFlag; /* --setmtime. Set mtimes on files */ int nChng; /* Number of file renames */ int *aChng; /* Array of file renames */ int i; /* Loop counter */ int nConflict = 0; /* Number of merge conflicts */ int nOverwrite = 0; /* Number of unmanaged files overwritten */ int nUpdate = 0; /* Number of changes of any kind */ int width; /* Width of printed comment lines */ Stmt mtimeXfer; /* Statement to transfer mtimes */ const char *zWidth; /* Width option string value */ if( !internalUpdate ) { undo_capture_command_line(); url_proxy_options(); } zWidth = find_option("width","W",1); if( zWidth ) { width = atoi(zWidth); if( (width!=0) && (width<=20) ) { fossil_fatal("-W|--width value must be >20 or 0"); } } else { width = -1; } latestFlag = find_option("latest",0, 0)!=0; dryRunFlag = find_option("dry-run","n",0)!=0; if( !dryRunFlag ) { dryRunFlag = find_option("nochange",0,0)!=0; /* deprecated */ } verboseFlag = find_option("verbose","v",0)!=0; forceMissingFlag = find_option("force-missing",0,0)!=0; debugFlag = find_option("debug",0,0)!=0; setmtimeFlag = find_option("setmtime",0,0)!=0; /* We should be done with options.. */ verify_all_options(); db_must_be_within_tree(); vid = db_lget_int("checkout", 0); user_select(); if( !dryRunFlag && !internalUpdate ) { if( autosync_loop(SYNC_PULL + SYNC_VERBOSE*verboseFlag, db_get_int("autosync-tries", 1)) ) { fossil_fatal("Cannot proceed with update"); } } /* Create any empty directories now, as well as after the update, ** so changes in settings are reflected now */ if( !dryRunFlag ) ensure_empty_dirs_created(); if( internalUpdate ) { tid = internalUpdate; } else if( g.argc>=3 ) { if( fossil_strcmp(g.argv[2], "current")==0 ) { /* If VERSION is "current", then use the same algorithm to find the ** target as if VERSION were omitted. */ } else if( fossil_strcmp(g.argv[2], "latest")==0 ) { /* If VERSION is "latest", then use the same algorithm to find the ** target as if VERSION were omitted and the --latest flag is present. */ latestFlag = 1; } else { tid = name_to_typed_rid(g.argv[2],"ci"); if( tid==0 || !is_a_version(tid) ) { fossil_fatal("no such check-in: %s", g.argv[2]); } } } /* If no VERSION is specified on the command-line, then look for a ** descendent of the current version. If there are multiple descendants, ** look for one from the same branch as the current version. If there ** are still multiple descendants, show them all and refuse to update ** until the user selects one. */ if( tid==0 ) { int closeCode = 1; compute_leaves(vid, closeCode); if( !db_exists("SELECT 1 FROM leaves") ) { closeCode = 0; compute_leaves(vid, closeCode); } if( !latestFlag && db_int(0, "SELECT count(*) FROM leaves")>1 ) { db_multi_exec( "DELETE FROM leaves WHERE rid NOT IN" " (SELECT leaves.rid FROM leaves, tagxref" " WHERE leaves.rid=tagxref.rid AND tagxref.tagid=%d" " AND tagxref.value==(SELECT value FROM tagxref" " WHERE tagid=%d AND rid=%d))", TAG_BRANCH, TAG_BRANCH, vid ); if( db_int(0, "SELECT count(*) FROM leaves")>1 ) { compute_leaves(vid, closeCode); db_prepare(&q, "%s " " AND event.objid IN leaves" " ORDER BY event.mtime DESC", timeline_query_for_tty() ); print_timeline(&q, -100, width, 0); db_finalize(&q); fossil_fatal("Multiple descendants"); } } tid = db_int(0, "SELECT rid FROM leaves, event" " WHERE event.objid=leaves.rid" " ORDER BY event.mtime DESC"); if( tid==0 ) tid = vid; } if( tid==0 ) { return; } db_begin_transaction(); vfile_check_signature(vid, CKSIG_ENOTFILE); if( !dryRunFlag && !internalUpdate ) undo_begin(); if( load_vfile_from_rid(tid) && !forceMissingFlag ) { fossil_fatal("missing content, unable to update"); }; /* ** The record.fn field is used to match files against each other. The ** FV table contains one row for each each unique filename in ** in the current checkout, the pivot, and the version being merged. */ db_multi_exec( "DROP TABLE IF EXISTS fv;" "CREATE TEMP TABLE fv(" " fn TEXT %s PRIMARY KEY," /* The filename relative to root */ " idv INTEGER," /* VFILE entry for current version */ " idt INTEGER," /* VFILE entry for target version */ " chnged BOOLEAN," /* True if current version has been edited */ " islinkv BOOLEAN," /* True if current file is a link */ " islinkt BOOLEAN," /* True if target file is a link */ " ridv INTEGER," /* Record ID for current version */ " ridt INTEGER," /* Record ID for target */ " isexe BOOLEAN," /* Does target have execute permission? */ " deleted BOOLEAN DEFAULT 0,"/* File marked by "rm" to become unmanaged */ " fnt TEXT %s" /* Filename of same file on target version */ ");", filename_collation(), filename_collation() ); /* Add files found in the current version */ db_multi_exec( "INSERT OR IGNORE INTO fv(fn,fnt,idv,idt,ridv,ridt,isexe,chnged,deleted)" " SELECT pathname, pathname, id, 0, rid, 0, isexe, chnged, deleted" " FROM vfile WHERE vid=%d", vid ); /* Compute file name changes on V->T. Record name changes in files that ** have changed locally. */ if( vid ) { find_filename_changes(vid, tid, 1, &nChng, &aChng, debugFlag ? "V->T": 0); if( nChng ) { for(i=0; i<nChng; i++) { db_multi_exec( "UPDATE fv" " SET fnt=(SELECT name FROM filename WHERE fnid=%d)" " WHERE fn=(SELECT name FROM filename WHERE fnid=%d) AND chnged", aChng[i*2+1], aChng[i*2] ); } fossil_free(aChng); } } /* Add files found in the target version T but missing from the current ** version V. */ db_multi_exec( "INSERT OR IGNORE INTO fv(fn,fnt,idv,idt,ridv,ridt,isexe,chnged)" " SELECT pathname, pathname, 0, 0, 0, 0, isexe, 0 FROM vfile" " WHERE vid=%d" " AND pathname %s NOT IN (SELECT fnt FROM fv)", tid, filename_collation() ); /* ** Compute the file version ids for T */ db_multi_exec( "UPDATE fv SET" " idt=coalesce((SELECT id FROM vfile WHERE vid=%d AND fnt=pathname),0)," " ridt=coalesce((SELECT rid FROM vfile WHERE vid=%d AND fnt=pathname),0)", tid, tid ); /* ** Add islink information */ db_multi_exec( "UPDATE fv SET" " islinkv=coalesce((SELECT islink FROM vfile" " WHERE vid=%d AND fnt=pathname),0)," " islinkt=coalesce((SELECT islink FROM vfile" " WHERE vid=%d AND fnt=pathname),0)", vid, tid ); if( debugFlag ) { db_prepare(&q, "SELECT rowid, fn, fnt, chnged, ridv, ridt, isexe," " islinkv, islinkt FROM fv" ); while( db_step(&q)==SQLITE_ROW ) { fossil_print("%3d: ridv=%-4d ridt=%-4d chnged=%d isexe=%d" " islinkv=%d islinkt=%d\n", db_column_int(&q, 0), db_column_int(&q, 4), db_column_int(&q, 5), db_column_int(&q, 3), db_column_int(&q, 6), db_column_int(&q, 7), db_column_int(&q, 8)); fossil_print(" fnv = [%s]\n", db_column_text(&q, 1)); fossil_print(" fnt = [%s]\n", db_column_text(&q, 2)); } db_finalize(&q); } /* If FILES appear on the command-line, remove from the "fv" table ** every entry that is not named on the command-line or which is not ** in a directory named on the command-line. */ if( g.argc>=4 ) { Blob sql; /* SQL statement to purge unwanted entries */ Blob treename; /* Normalized filename */ int i; /* Loop counter */ const char *zSep; /* Term separator */ blob_zero(&sql); blob_append(&sql, "DELETE FROM fv WHERE ", -1); zSep = ""; for(i=3; i<g.argc; i++) { file_tree_name(g.argv[i], &treename, 0, 1); if( file_wd_isdir(g.argv[i])==1 ) { if( blob_size(&treename) != 1 || blob_str(&treename)[0] != '.' ) { blob_append_sql(&sql, "%sfn NOT GLOB '%q/*' ", zSep /*safe-for-%s*/, blob_str(&treename)); } else { blob_reset(&sql); break; } } else { blob_append_sql(&sql, "%sfn<>%Q ", zSep /*safe-for-%s*/, blob_str(&treename)); } zSep = "AND "; blob_reset(&treename); } db_multi_exec("%s", blob_sql_text(&sql)); blob_reset(&sql); } /* ** Alter the content of the checkout so that it conforms with the ** target */ db_prepare(&q, "SELECT fn, idv, ridv, idt, ridt, chnged, fnt," " isexe, islinkv, islinkt, deleted FROM fv ORDER BY 1" ); db_prepare(&mtimeXfer, "UPDATE vfile SET mtime=(SELECT mtime FROM vfile WHERE id=:idv)" " WHERE id=:idt" ); assert( g.zLocalRoot!=0 ); assert( strlen(g.zLocalRoot)>0 ); assert( g.zLocalRoot[strlen(g.zLocalRoot)-1]=='/' ); while( db_step(&q)==SQLITE_ROW ) { const char *zName = db_column_text(&q, 0); /* The filename from root */ int idv = db_column_int(&q, 1); /* VFILE entry for current */ int ridv = db_column_int(&q, 2); /* RecordID for current */ int idt = db_column_int(&q, 3); /* VFILE entry for target */ int ridt = db_column_int(&q, 4); /* RecordID for target */ int chnged = db_column_int(&q, 5); /* Current is edited */ const char *zNewName = db_column_text(&q,6);/* New filename */ int isexe = db_column_int(&q, 7); /* EXE perm for new file */ int islinkv = db_column_int(&q, 8); /* Is current file is a link */ int islinkt = db_column_int(&q, 9); /* Is target file is a link */ int deleted = db_column_int(&q, 10); /* Marked for deletion */ char *zFullPath; /* Full pathname of the file */ char *zFullNewPath; /* Full pathname of dest */ char nameChng; /* True if the name changed */ zFullPath = mprintf("%s%s", g.zLocalRoot, zName); zFullNewPath = mprintf("%s%s", g.zLocalRoot, zNewName); nameChng = fossil_strcmp(zName, zNewName); nUpdate++; if( deleted ) { db_multi_exec("UPDATE vfile SET deleted=1 WHERE id=%d", idt); } if( idv>0 && ridv==0 && idt>0 && ridt>0 ) { /* Conflict. This file has been added to the current checkout ** but also exists in the target checkout. Use the current version. */ fossil_print("CONFLICT %s\n", zName); nConflict++; } else if( idt>0 && idv==0 ) { /* File added in the target. */ if( file_wd_isfile_or_link(zFullPath) ) { fossil_print("ADD %s - overwrites an unmanaged file\n", zName); nOverwrite++; } else { fossil_print("ADD %s\n", zName); } if( !dryRunFlag && !internalUpdate ) undo_save(zName); if( !dryRunFlag ) vfile_to_disk(0, idt, 0, 0); } else if( idt>0 && idv>0 && ridt!=ridv && (chnged==0 || deleted) ) { /* The file is unedited. Change it to the target version */ if( deleted ) { fossil_print("UPDATE %s - change to unmanaged file\n", zName); } else { fossil_print("UPDATE %s\n", zName); } if( !dryRunFlag && !internalUpdate ) undo_save(zName); if( !dryRunFlag ) vfile_to_disk(0, idt, 0, 0); } else if( idt>0 && idv>0 && !deleted && file_wd_size(zFullPath)<0 ) { /* The file missing from the local check-out. Restore it to the ** version that appears in the target. */ fossil_print("UPDATE %s\n", zName); if( !dryRunFlag && !internalUpdate ) undo_save(zName); if( !dryRunFlag ) vfile_to_disk(0, idt, 0, 0); } else if( idt==0 && idv>0 ) { if( ridv==0 ) { /* Added in current checkout. Continue to hold the file as ** as an addition */ db_multi_exec("UPDATE vfile SET vid=%d WHERE id=%d", tid, idv); } else if( chnged ) { /* Edited locally but deleted from the target. Do not track the ** file but keep the edited version around. */ fossil_print("CONFLICT %s - edited locally but deleted by update\n", zName); nConflict++; } else { fossil_print("REMOVE %s\n", zName); if( !dryRunFlag && !internalUpdate ) undo_save(zName); if( !dryRunFlag ) file_delete(zFullPath); } } else if( idt>0 && idv>0 && ridt!=ridv && chnged ) { /* Merge the changes in the current tree into the target version */ Blob r, t, v; int rc; if( nameChng ) { fossil_print("MERGE %s -> %s\n", zName, zNewName); } else { fossil_print("MERGE %s\n", zName); } if( islinkv || islinkt /* || file_wd_islink(zFullPath) */ ) { fossil_print("***** Cannot merge symlink %s\n", zNewName); nConflict++; } else { unsigned mergeFlags = dryRunFlag ? MERGE_DRYRUN : 0; if( !dryRunFlag && !internalUpdate ) undo_save(zName); content_get(ridt, &t); content_get(ridv, &v); rc = merge_3way(&v, zFullPath, &t, &r, mergeFlags); if( rc>=0 ) { if( !dryRunFlag ) { blob_write_to_file(&r, zFullNewPath); file_wd_setexe(zFullNewPath, isexe); } if( rc>0 ) { fossil_print("***** %d merge conflicts in %s\n", rc, zNewName); nConflict++; } } else { if( !dryRunFlag ) { blob_write_to_file(&t, zFullNewPath); file_wd_setexe(zFullNewPath, isexe); } fossil_print("***** Cannot merge binary file %s\n", zNewName); nConflict++; } } if( nameChng && !dryRunFlag ) file_delete(zFullPath); blob_reset(&v); blob_reset(&t); blob_reset(&r); } else { nUpdate--; if( chnged ) { if( verboseFlag ) fossil_print("EDITED %s\n", zName); } else { db_bind_int(&mtimeXfer, ":idv", idv); db_bind_int(&mtimeXfer, ":idt", idt); db_step(&mtimeXfer); db_reset(&mtimeXfer); if( verboseFlag ) fossil_print("UNCHANGED %s\n", zName); } } free(zFullPath); free(zFullNewPath); } db_finalize(&q); db_finalize(&mtimeXfer); fossil_print("%.79c\n",'-'); if( nUpdate==0 ) { show_common_info(tid, "checkout:", 1, 0); fossil_print("%-13s None. Already up-to-date\n", "changes:"); } else { show_common_info(tid, "updated-to:", 1, 0); fossil_print("%-13s %d file%s modified.\n", "changes:", nUpdate, nUpdate>1 ? "s" : ""); } /* Report on conflicts */ if( !dryRunFlag ) { Stmt q; int nMerge = 0; db_prepare(&q, "SELECT uuid, id FROM vmerge JOIN blob ON merge=rid" " WHERE id<=0"); while( db_step(&q)==SQLITE_ROW ) { const char *zLabel = "merge"; switch( db_column_int(&q, 1) ) { case -1: zLabel = "cherrypick merge"; break; case -2: zLabel = "backout merge"; break; } fossil_warning("uncommitted %s against %S.", zLabel, db_column_text(&q, 0)); nMerge++; } db_finalize(&q); leaf_ambiguity_warning(tid, tid); if( nConflict ) { if( internalUpdate ) { internalConflictCnt = nConflict; nConflict = 0; } else { fossil_warning("WARNING: %d merge conflicts", nConflict); } } if( nOverwrite ) { fossil_warning("WARNING: %d unmanaged files were overwritten", nOverwrite); } if( nMerge ) { fossil_warning("WARNING: %d uncommitted prior merges", nMerge); } } /* ** Clean up the mid and pid VFILE entries. Then commit the changes. */ if( dryRunFlag ) { db_end_transaction(1); /* With --dry-run, rollback changes */ } else { ensure_empty_dirs_created(); if( g.argc<=3 ) { /* All files updated. Shift the current checkout to the target. */ db_multi_exec("DELETE FROM vfile WHERE vid!=%d", tid); checkout_set_all_exe(tid); manifest_to_disk(tid); db_lset_int("checkout", tid); } else { /* A subset of files have been checked out. Keep the current ** checkout unchanged. */ db_multi_exec("DELETE FROM vfile WHERE vid!=%d", vid); } if( !internalUpdate ) undo_finish(); if( setmtimeFlag ) vfile_check_signature(tid, CKSIG_SETMTIME); db_end_transaction(0); } }
void Response::deserialize(sqlite3* db) { sqlite3_stmt *read; db_prepare(db, read, boost::format("select " "value, " "detect_sipc_acc_score, " "detect_sipc_objects, " "refine_sipc_rscore, " "refine_sipc_tscore, " "refine_sipc_cscore, " "refine_sipc_frames, " "avg_angle_err, " "avg_succ_angle_err, " "avg_trans_err, " "avg_succ_trans_err, " "avg_angle_sq_err, " "avg_succ_angle_sq_err, " "avg_trans_sq_err, " "avg_succ_trans_sq_err, " "succ_rate, " "mislabel_rate, " "none_rate, " "avg_keypoints, " "avg_detect_guesses, " "avg_detect_matches, " "avg_detect_inliers, " "avg_detect_choice_matches, " "avg_detect_choice_inliers, " "detect_tp, " "detect_fp, " "detect_fn, " "detect_tn, " "avg_refine_guesses, " "avg_refine_matches, " "avg_refine_inliers, " "avg_refine_choice_matches, " "avg_refine_choice_inliers, " "train_runtime, " "test_runtime " "from response where id=%d;") % id); db_step(read, SQLITE_ROW); int c = 0; value = sqlite3_column_double(read, c++); detect_sipc.acc_score = sqlite3_column_double(read, c++); detect_sipc.objects = sqlite3_column_int(read, c++); refine_sipc.rscore = sqlite3_column_double(read, c++); refine_sipc.tscore = sqlite3_column_double(read, c++); refine_sipc.cscore = sqlite3_column_double(read, c++); refine_sipc.frames = sqlite3_column_int(read, c++); avg_angle_err = sqlite3_column_double(read, c++); avg_succ_angle_err = sqlite3_column_double(read, c++); avg_trans_err = sqlite3_column_double(read, c++); avg_succ_trans_err = sqlite3_column_double(read, c++); avg_angle_sq_err = sqlite3_column_double(read, c++); avg_succ_angle_sq_err = sqlite3_column_double(read, c++); avg_trans_sq_err = sqlite3_column_double(read, c++); avg_succ_trans_sq_err = sqlite3_column_double(read, c++); succ_rate = sqlite3_column_double(read, c++); mislabel_rate = sqlite3_column_double(read, c++); none_rate = sqlite3_column_double(read, c++); avg_keypoints = sqlite3_column_double(read, c++); avg_detect_guesses = sqlite3_column_double(read, c++); avg_detect_matches = sqlite3_column_double(read, c++); avg_detect_inliers = sqlite3_column_double(read, c++); avg_detect_choice_matches = sqlite3_column_double(read, c++); avg_detect_choice_inliers = sqlite3_column_double(read, c++); detect_tp = sqlite3_column_int(read, c++); detect_fp = sqlite3_column_int(read, c++); detect_fn = sqlite3_column_int(read, c++); detect_tn = sqlite3_column_int(read, c++); avg_refine_guesses = sqlite3_column_double(read, c++); avg_refine_matches = sqlite3_column_double(read, c++); avg_refine_inliers = sqlite3_column_double(read, c++); avg_refine_choice_matches = sqlite3_column_double(read, c++); avg_refine_choice_inliers = sqlite3_column_double(read, c++); train_runtime = sqlite3_column_double(read, c++); test_runtime = sqlite3_column_double(read, c++); sqlite3_finalize(read); }
/* ** Convert a symbolic name into a RID. Acceptable forms: ** ** * SHA1 hash ** * SHA1 hash prefix of at least 4 characters ** * Symbolic Name ** * "tag:" + symbolic name ** * Date or date-time ** * "date:" + Date or date-time ** * symbolic-name ":" date-time ** * "tip" ** ** The following additional forms are available in local checkouts: ** ** * "current" ** * "prev" or "previous" ** * "next" ** ** Return the RID of the matching artifact. Or return 0 if the name does not ** match any known object. Or return -1 if the name is ambiguous. ** ** The zType parameter specifies the type of artifact: ci, t, w, e, g. ** If zType is NULL or "" or "*" then any type of artifact will serve. ** zType is "ci" in most use cases since we are usually searching for ** a check-in. */ int symbolic_name_to_rid(const char *zTag, const char *zType){ int vid; int rid = 0; int nTag; int i; if( zType==0 || zType[0]==0 ) zType = "*"; if( zTag==0 || zTag[0]==0 ) return 0; /* special keyword: "tip" */ if( fossil_strcmp(zTag, "tip")==0 && (zType[0]=='*' || zType[0]=='c') ){ rid = db_int(0, "SELECT objid" " FROM event" " WHERE type='ci'" " ORDER BY event.mtime DESC" ); if( rid ) return rid; } /* special keywords: "prev", "previous", "current", and "next" */ if( g.localOpen && (vid=db_lget_int("checkout",0))!=0 ){ if( fossil_strcmp(zTag, "current")==0 ){ rid = vid; }else if( fossil_strcmp(zTag, "prev")==0 || fossil_strcmp(zTag, "previous")==0 ){ rid = db_int(0, "SELECT pid FROM plink WHERE cid=%d AND isprim", vid); }else if( fossil_strcmp(zTag, "next")==0 ){ rid = db_int(0, "SELECT cid FROM plink WHERE pid=%d" " ORDER BY isprim DESC, mtime DESC", vid); } if( rid ) return rid; } /* Date and times */ if( memcmp(zTag, "date:", 5)==0 ){ rid = db_int(0, "SELECT objid FROM event" " WHERE mtime<=julianday(%Q,'utc') AND type GLOB '%q'" " ORDER BY mtime DESC LIMIT 1", &zTag[5], zType); return rid; } if( fossil_isdate(zTag) ){ rid = db_int(0, "SELECT objid FROM event" " WHERE mtime<=julianday(%Q,'utc') AND type GLOB '%q'" " ORDER BY mtime DESC LIMIT 1", zTag, zType); if( rid) return rid; } /* Deprecated date & time formats: "local:" + date-time and ** "utc:" + date-time */ if( memcmp(zTag, "local:", 6)==0 ){ rid = db_int(0, "SELECT objid FROM event" " WHERE mtime<=julianday(%Q) AND type GLOB '%q'" " ORDER BY mtime DESC LIMIT 1", &zTag[6], zType); return rid; } if( memcmp(zTag, "utc:", 4)==0 ){ rid = db_int(0, "SELECT objid FROM event" " WHERE mtime<=julianday('%qz') AND type GLOB '%q'" " ORDER BY mtime DESC LIMIT 1", &zTag[4], zType); return rid; } /* "tag:" + symbolic-name */ if( memcmp(zTag, "tag:", 4)==0 ){ rid = db_int(0, "SELECT event.objid, max(event.mtime)" " FROM tag, tagxref, event" " WHERE tag.tagname='sym-%q' " " AND tagxref.tagid=tag.tagid AND tagxref.tagtype>0 " " AND event.objid=tagxref.rid " " AND event.type GLOB '%q'", &zTag[4], zType ); return rid; } /* root:TAG -> The origin of the branch */ if( memcmp(zTag, "root:", 5)==0 ){ Stmt q; int rc; char *zBr; rid = symbolic_name_to_rid(zTag+5, zType); zBr = db_text("trunk","SELECT value FROM tagxref" " WHERE rid=%d AND tagid=%d" " AND tagtype>0", rid, TAG_BRANCH); db_prepare(&q, "SELECT pid, EXISTS(SELECT 1 FROM tagxref" " WHERE tagid=%d AND tagtype>0" " AND value=%Q AND rid=plink.pid)" " FROM plink" " WHERE cid=:cid AND isprim", TAG_BRANCH, zBr ); fossil_free(zBr); do{ db_reset(&q); db_bind_int(&q, ":cid", rid); rc = db_step(&q); if( rc!=SQLITE_ROW ) break; rid = db_column_int(&q, 0); }while( db_column_int(&q, 1)==1 && rid>0 ); db_finalize(&q); return rid; } /* symbolic-name ":" date-time */ nTag = strlen(zTag); for(i=0; i<nTag-10 && zTag[i]!=':'; i++){} if( zTag[i]==':' && fossil_isdate(&zTag[i+1]) ){ char *zDate = mprintf("%s", &zTag[i+1]); char *zTagBase = mprintf("%.*s", i, zTag); int nDate = strlen(zDate); if( sqlite3_strnicmp(&zDate[nDate-3],"utc",3)==0 ){ zDate[nDate-3] = 'z'; zDate[nDate-2] = 0; } rid = db_int(0, "SELECT event.objid, max(event.mtime)" " FROM tag, tagxref, event" " WHERE tag.tagname='sym-%q' " " AND tagxref.tagid=tag.tagid AND tagxref.tagtype>0 " " AND event.objid=tagxref.rid " " AND event.mtime<=julianday(%Q)" " AND event.type GLOB '%q'", zTagBase, zDate, zType ); return rid; } /* SHA1 hash or prefix */ if( nTag>=4 && nTag<=UUID_SIZE && validate16(zTag, nTag) ){ Stmt q; char zUuid[UUID_SIZE+1]; memcpy(zUuid, zTag, nTag+1); canonical16(zUuid, nTag); rid = 0; if( zType[0]=='*' ){ db_prepare(&q, "SELECT rid FROM blob WHERE uuid GLOB '%s*'", zUuid); }else{ db_prepare(&q, "SELECT blob.rid" " FROM blob, event" " WHERE blob.uuid GLOB '%s*'" " AND event.objid=blob.rid" " AND event.type GLOB '%q'", zUuid, zType ); } if( db_step(&q)==SQLITE_ROW ){ rid = db_column_int(&q, 0); if( db_step(&q)==SQLITE_ROW ) rid = -1; } db_finalize(&q); if( rid ) return rid; } /* Symbolic name */ rid = db_int(0, "SELECT event.objid, max(event.mtime)" " FROM tag, tagxref, event" " WHERE tag.tagname='sym-%q' " " AND tagxref.tagid=tag.tagid AND tagxref.tagtype>0 " " AND event.objid=tagxref.rid " " AND event.type GLOB '%q'", zTag, zType ); if( rid>0 ) return rid; /* Undocumented: numeric tags get translated directly into the RID */ if( memcmp(zTag, "rid:", 4)==0 ){ zTag += 4; for(i=0; fossil_isdigit(zTag[i]); i++){} if( zTag[i]==0 ){ if( strcmp(zType,"*")==0 ){ rid = atoi(zTag); }else{ rid = db_int(0, "SELECT event.objid" " FROM event" " WHERE event.objid=%s" " AND event.type GLOB '%q'", zTag, zType); } } } return rid; }
/* ** fossil branch new BRANCH-NAME ?ORIGIN-CHECK-IN? ?-bgcolor COLOR? ** argv0 argv1 argv2 argv3 argv4 */ void branch_new(void){ int rootid; /* RID of the root check-in - what we branch off of */ int brid; /* RID of the branch check-in */ int noSign; /* True if the branch is unsigned */ int i; /* Loop counter */ char *zUuid; /* Artifact ID of origin */ Stmt q; /* Generic query */ const char *zBranch; /* Name of the new branch */ char *zDate; /* Date that branch was created */ char *zComment; /* Check-in comment for the new branch */ const char *zColor; /* Color of the new branch */ Blob branch; /* manifest for the new branch */ Manifest *pParent; /* Parsed parent manifest */ Blob mcksum; /* Self-checksum on the manifest */ const char *zDateOvrd; /* Override date string */ const char *zUserOvrd; /* Override user name */ int isPrivate = 0; /* True if the branch should be private */ noSign = find_option("nosign","",0)!=0; zColor = find_option("bgcolor","c",1); isPrivate = find_option("private",0,0)!=0; zDateOvrd = find_option("date-override",0,1); zUserOvrd = find_option("user-override",0,1); verify_all_options(); if( g.argc<5 ){ usage("new BRANCH-NAME CHECK-IN ?-bgcolor COLOR?"); } db_find_and_open_repository(0, 0); noSign = db_get_int("omitsign", 0)|noSign; /* fossil branch new name */ zBranch = g.argv[3]; if( zBranch==0 || zBranch[0]==0 ){ fossil_panic("branch name cannot be empty"); } if( db_exists( "SELECT 1 FROM tagxref" " WHERE tagtype>0" " AND tagid=(SELECT tagid FROM tag WHERE tagname='sym-%s')", zBranch)!=0 ){ fossil_fatal("branch \"%s\" already exists", zBranch); } user_select(); db_begin_transaction(); rootid = name_to_typed_rid(g.argv[4], "ci"); if( rootid==0 ){ fossil_fatal("unable to locate check-in off of which to branch"); } pParent = manifest_get(rootid, CFTYPE_MANIFEST); if( pParent==0 ){ fossil_fatal("%s is not a valid check-in", g.argv[4]); } /* Create a manifest for the new branch */ blob_zero(&branch); if( pParent->zBaseline ){ blob_appendf(&branch, "B %s\n", pParent->zBaseline); } zComment = mprintf("Create new branch named \"%h\"", zBranch); blob_appendf(&branch, "C %F\n", zComment); zDate = date_in_standard_format(zDateOvrd ? zDateOvrd : "now"); blob_appendf(&branch, "D %s\n", zDate); /* Copy all of the content from the parent into the branch */ for(i=0; i<pParent->nFile; ++i){ blob_appendf(&branch, "F %F", pParent->aFile[i].zName); if( pParent->aFile[i].zUuid ){ blob_appendf(&branch, " %s", pParent->aFile[i].zUuid); if( pParent->aFile[i].zPerm && pParent->aFile[i].zPerm[0] ){ blob_appendf(&branch, " %s", pParent->aFile[i].zPerm); } } blob_append(&branch, "\n", 1); } zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", rootid); blob_appendf(&branch, "P %s\n", zUuid); if( pParent->zRepoCksum ){ blob_appendf(&branch, "R %s\n", pParent->zRepoCksum); } manifest_destroy(pParent); /* Add the symbolic branch name and the "branch" tag to identify ** this as a new branch */ if( content_is_private(rootid) ) isPrivate = 1; if( isPrivate && zColor==0 ) zColor = "#fec084"; if( zColor!=0 ){ blob_appendf(&branch, "T *bgcolor * %F\n", zColor); } blob_appendf(&branch, "T *branch * %F\n", zBranch); blob_appendf(&branch, "T *sym-%F *\n", zBranch); if( isPrivate ){ blob_appendf(&branch, "T +private *\n"); noSign = 1; } /* Cancel all other symbolic tags */ db_prepare(&q, "SELECT tagname FROM tagxref, tag" " WHERE tagxref.rid=%d AND tagxref.tagid=tag.tagid" " AND tagtype>0 AND tagname GLOB 'sym-*'" " ORDER BY tagname", rootid); while( db_step(&q)==SQLITE_ROW ){ const char *zTag = db_column_text(&q, 0); blob_appendf(&branch, "T -%F *\n", zTag); } db_finalize(&q); blob_appendf(&branch, "U %F\n", zUserOvrd ? zUserOvrd : g.zLogin); md5sum_blob(&branch, &mcksum); blob_appendf(&branch, "Z %b\n", &mcksum); if( !noSign && clearsign(&branch, &branch) ){ Blob ans; blob_zero(&ans); prompt_user("unable to sign manifest. continue (y/N)? ", &ans); if( blob_str(&ans)[0]!='y' ){ db_end_transaction(1); fossil_exit(1); } } brid = content_put_ex(&branch, 0, 0, 0, isPrivate); if( brid==0 ){ fossil_panic("trouble committing manifest: %s", g.zErrMsg); } db_multi_exec("INSERT OR IGNORE INTO unsent VALUES(%d)", brid); if( manifest_crosslink(brid, &branch)==0 ){ fossil_panic("unable to install new manifest"); } assert( blob_is_reset(&branch) ); content_deltify(rootid, brid, 0); zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", brid); fossil_print("New branch: %s\n", zUuid); if( g.argc==3 ){ fossil_print( "\n" "Note: the local check-out has not been updated to the new\n" " branch. To begin working on the new branch, do this:\n" "\n" " %s update %s\n", fossil_nameofexe(), zBranch ); } /* Commit */ db_end_transaction(0); /* Do an autosync push, if requested */ if( !isPrivate ) autosync(AUTOSYNC_PUSH); }
/* ** COMMAND: import ** ** Usage: %fossil import --git ?OPTIONS? NEW-REPOSITORY ** ** Read text generated by the git-fast-export command and use it to ** construct a new Fossil repository named by the NEW-REPOSITORY ** argument. The git-fast-export text is read from standard input. ** ** The git-fast-export file format is currently the only VCS interchange ** format that is understood, though other interchange formats may be added ** in the future. ** ** The --incremental option allows an existing repository to be extended ** with new content. ** ** Options: ** --incremental allow importing into an existing repository ** ** See also: export */ void git_import_cmd(void){ char *zPassword; FILE *pIn; Stmt q; int forceFlag = find_option("force", "f", 0)!=0; int incrFlag = find_option("incremental", "i", 0)!=0; find_option("git",0,0); /* Skip the --git option for now */ verify_all_options(); if( g.argc!=3 && g.argc!=4 ){ usage("REPOSITORY-NAME"); } if( g.argc==4 ){ pIn = fossil_fopen(g.argv[3], "rb"); }else{ pIn = stdin; fossil_binary_mode(pIn); } if( !incrFlag ){ if( forceFlag ) file_delete(g.argv[2]); db_create_repository(g.argv[2]); } db_open_repository(g.argv[2]); db_open_config(0); /* The following temp-tables are used to hold information needed for ** the import. ** ** The XMARK table provides a mapping from fast-import "marks" and symbols ** into artifact ids (UUIDs - the 40-byte hex SHA1 hash of artifacts). ** Given any valid fast-import symbol, the corresponding fossil rid and ** uuid can found by searching against the xmark.tname field. ** ** The XBRANCH table maps commit marks and symbols into the branch those ** commits belong to. If xbranch.tname is a fast-import symbol for a ** checkin then xbranch.brnm is the branch that checkin is part of. ** ** The XTAG table records information about tags that need to be applied ** to various branches after the import finishes. The xtag.tcontent field ** contains the text of an artifact that will add a tag to a check-in. ** The git-fast-export file format might specify the same tag multiple ** times but only the last tag should be used. And we do not know which ** occurrence of the tag is the last until the import finishes. */ db_multi_exec( "CREATE TEMP TABLE xmark(tname TEXT UNIQUE, trid INT, tuuid TEXT);" "CREATE TEMP TABLE xbranch(tname TEXT UNIQUE, brnm TEXT);" "CREATE TEMP TABLE xtag(tname TEXT UNIQUE, tcontent TEXT);" ); db_begin_transaction(); if( !incrFlag ) db_initial_setup(0, 0, 0, 1); git_fast_import(pIn); db_prepare(&q, "SELECT tcontent FROM xtag"); while( db_step(&q)==SQLITE_ROW ){ Blob record; db_ephemeral_blob(&q, 0, &record); fast_insert_content(&record, 0, 0); import_reset(0); } db_finalize(&q); db_end_transaction(0); db_begin_transaction(); fossil_print("Rebuilding repository meta-data...\n"); rebuild_db(0, 1, !incrFlag); verify_cancel(); db_end_transaction(0); fossil_print("Vacuuming..."); fflush(stdout); db_multi_exec("VACUUM"); fossil_print(" ok\n"); if( !incrFlag ){ fossil_print("project-id: %s\n", db_get("project-code", 0)); fossil_print("server-id: %s\n", db_get("server-code", 0)); zPassword = db_text(0, "SELECT pw FROM user WHERE login=%Q", g.zLogin); fossil_print("admin-user: %s (password is \"%s\")\n", g.zLogin, zPassword); } }
/* ** Send "config" cards using the new format for all elements of a group ** that have recently changed. ** ** Output goes into pOut. The groupMask identifies the group(s) to be sent. ** Send only entries whose timestamp is later than or equal to iStart. ** ** Return the number of cards sent. */ int configure_send_group( Blob *pOut, /* Write output here */ int groupMask, /* Mask of groups to be send */ sqlite3_int64 iStart /* Only write values changed since this time */ ) { Stmt q; Blob rec; int ii; int nCard = 0; blob_zero(&rec); if( groupMask & CONFIGSET_SHUN ) { db_prepare(&q, "SELECT mtime, quote(uuid), quote(scom) FROM shun" " WHERE mtime>=%lld", iStart); while( db_step(&q)==SQLITE_ROW ) { blob_appendf(&rec,"%s %s scom %s", db_column_text(&q, 0), db_column_text(&q, 1), db_column_text(&q, 2) ); blob_appendf(pOut, "config /shun %d\n%s\n", blob_size(&rec), blob_str(&rec)); nCard++; blob_reset(&rec); } db_finalize(&q); } if( groupMask & CONFIGSET_USER ) { db_prepare(&q, "SELECT mtime, quote(login), quote(pw), quote(cap)," " quote(info), quote(photo) FROM user" " WHERE mtime>=%lld", iStart); while( db_step(&q)==SQLITE_ROW ) { blob_appendf(&rec,"%s %s pw %s cap %s info %s photo %s", db_column_text(&q, 0), db_column_text(&q, 1), db_column_text(&q, 2), db_column_text(&q, 3), db_column_text(&q, 4), db_column_text(&q, 5) ); blob_appendf(pOut, "config /user %d\n%s\n", blob_size(&rec), blob_str(&rec)); nCard++; blob_reset(&rec); } db_finalize(&q); } if( groupMask & CONFIGSET_TKT ) { db_prepare(&q, "SELECT mtime, quote(title), quote(owner), quote(cols)," " quote(sqlcode) FROM reportfmt" " WHERE mtime>=%lld", iStart); while( db_step(&q)==SQLITE_ROW ) { blob_appendf(&rec,"%s %s owner %s cols %s sqlcode %s", db_column_text(&q, 0), db_column_text(&q, 1), db_column_text(&q, 2), db_column_text(&q, 3), db_column_text(&q, 4) ); blob_appendf(pOut, "config /reportfmt %d\n%s\n", blob_size(&rec), blob_str(&rec)); nCard++; blob_reset(&rec); } db_finalize(&q); } if( groupMask & CONFIGSET_ADDR ) { db_prepare(&q, "SELECT mtime, quote(hash), quote(content) FROM concealed" " WHERE mtime>=%lld", iStart); while( db_step(&q)==SQLITE_ROW ) { blob_appendf(&rec,"%s %s content %s", db_column_text(&q, 0), db_column_text(&q, 1), db_column_text(&q, 2) ); blob_appendf(pOut, "config /concealed %d\n%s\n", blob_size(&rec), blob_str(&rec)); nCard++; blob_reset(&rec); } db_finalize(&q); } db_prepare(&q, "SELECT mtime, quote(name), quote(value) FROM config" " WHERE name=:name AND mtime>=%lld", iStart); for(ii=0; ii<count(aConfig); ii++) { if( (aConfig[ii].groupMask & groupMask)!=0 && aConfig[ii].zName[0]!='@' ) { db_bind_text(&q, ":name", aConfig[ii].zName); while( db_step(&q)==SQLITE_ROW ) { blob_appendf(&rec,"%s %s value %s", db_column_text(&q, 0), db_column_text(&q, 1), db_column_text(&q, 2) ); blob_appendf(pOut, "config /config %d\n%s\n", blob_size(&rec), blob_str(&rec)); nCard++; blob_reset(&rec); } db_reset(&q); } } db_finalize(&q); return nCard; }
/* ** Create a temporary table named "leaves" if it does not ** already exist. Load this table with the RID of all ** check-ins that are leaves which are descended from ** check-in iBase. ** ** A "leaf" is a check-in that has no children in the same branch. ** There is a separate permanent table LEAF that contains all leaves ** in the tree. This routine is used to compute a subset of that ** table consisting of leaves that are descended from a single checkin. ** ** The closeMode flag determines behavior associated with the "closed" ** tag: ** ** closeMode==0 Show all leaves regardless of the "closed" tag. ** ** closeMode==1 Show only leaves without the "closed" tag. ** ** closeMode==2 Show only leaves with the "closed" tag. ** ** The default behavior is to ignore closed leaves (closeMode==0). To ** Show all leaves, use closeMode==1. To show only closed leaves, use ** closeMode==2. */ void compute_leaves(int iBase, int closeMode){ /* Create the LEAVES table if it does not already exist. Make sure ** it is empty. */ db_multi_exec( "CREATE TEMP TABLE IF NOT EXISTS leaves(" " rid INTEGER PRIMARY KEY" ");" "DELETE FROM leaves;" ); if( iBase>0 ){ Bag seen; /* Descendants seen */ Bag pending; /* Unpropagated descendants */ Stmt q1; /* Query to find children of a check-in */ Stmt isBr; /* Query to check to see if a check-in starts a new branch */ Stmt ins; /* INSERT statement for a new record */ /* Initialize the bags. */ bag_init(&seen); bag_init(&pending); bag_insert(&pending, iBase); /* This query returns all non-branch-merge children of check-in :rid. ** ** If a child is a merge of a fork within the same branch, it is ** returned. Only merge children in different branches are excluded. */ db_prepare(&q1, "SELECT cid FROM plink" " WHERE pid=:rid" " AND (isprim" " OR coalesce((SELECT value FROM tagxref" " WHERE tagid=%d AND rid=plink.pid), 'trunk')" "=coalesce((SELECT value FROM tagxref" " WHERE tagid=%d AND rid=plink.cid), 'trunk'))", TAG_BRANCH, TAG_BRANCH ); /* This query returns a single row if check-in :rid is the first ** check-in of a new branch. */ db_prepare(&isBr, "SELECT 1 FROM tagxref" " WHERE rid=:rid AND tagid=%d AND tagtype=2" " AND srcid>0", TAG_BRANCH ); /* This statement inserts check-in :rid into the LEAVES table. */ db_prepare(&ins, "INSERT OR IGNORE INTO leaves VALUES(:rid)"); while( bag_count(&pending) ){ int rid = bag_first(&pending); int cnt = 0; bag_remove(&pending, rid); db_bind_int(&q1, ":rid", rid); while( db_step(&q1)==SQLITE_ROW ){ int cid = db_column_int(&q1, 0); if( bag_insert(&seen, cid) ){ bag_insert(&pending, cid); } db_bind_int(&isBr, ":rid", cid); if( db_step(&isBr)==SQLITE_DONE ){ cnt++; } db_reset(&isBr); } db_reset(&q1); if( cnt==0 && !is_a_leaf(rid) ){ cnt++; } if( cnt==0 ){ db_bind_int(&ins, ":rid", rid); db_step(&ins); db_reset(&ins); } } db_finalize(&ins); db_finalize(&isBr); db_finalize(&q1); bag_clear(&pending); bag_clear(&seen); } if( closeMode==1 ){ db_multi_exec( "DELETE FROM leaves WHERE rid IN" " (SELECT leaves.rid FROM leaves, tagxref" " WHERE tagxref.rid=leaves.rid " " AND tagxref.tagid=%d" " AND tagxref.tagtype>0)", TAG_CLOSED ); }else if( closeMode==2 ){ db_multi_exec( "DELETE FROM leaves WHERE rid NOT IN" " (SELECT leaves.rid FROM leaves, tagxref" " WHERE tagxref.rid=leaves.rid " " AND tagxref.tagid=%d" " AND tagxref.tagtype>0)", TAG_CLOSED ); } }
/* ** Propagate the tag given by tagid to the children of pid. ** ** This routine assumes that tagid is a tag that should be ** propagated and that the tag is already present in pid. ** ** If tagtype is 2 then the tag is being propagated from an ** ancestor node. If tagtype is 0 it means a propagating tag is ** being blocked. */ static void tag_propagate( int pid, /* Propagate the tag to children of this node */ int tagid, /* Tag to propagate */ int tagType, /* 2 for a propagating tag. 0 for an antitag */ int origId, /* Artifact of tag, when tagType==2 */ const char *zValue, /* Value of the tag. Might be NULL */ double mtime /* Timestamp on the tag */ ){ PQueue queue; /* Queue of check-ins to be tagged */ Stmt s; /* Query the children of :pid to which to propagate */ Stmt ins; /* INSERT INTO tagxref */ Stmt eventupdate; /* UPDATE event */ assert( tagType==0 || tagType==2 ); pqueuex_init(&queue); pqueuex_insert(&queue, pid, 0.0, 0); /* Query for children of :pid to which to propagate the tag. ** Three returns: (1) rid of the child. (2) timestamp of child. ** (3) True to propagate or false to block. */ db_prepare(&s, "SELECT cid, plink.mtime," " coalesce(srcid=0 AND tagxref.mtime<:mtime, %d) AS doit" " FROM plink LEFT JOIN tagxref ON cid=rid AND tagid=%d" " WHERE pid=:pid AND isprim", tagType==2, tagid ); db_bind_double(&s, ":mtime", mtime); if( tagType==2 ){ /* Set the propagated tag marker on checkin :rid */ db_prepare(&ins, "REPLACE INTO tagxref(tagid, tagtype, srcid, origid, value, mtime, rid)" "VALUES(%d,2,0,%d,%Q,:mtime,:rid)", tagid, origId, zValue ); db_bind_double(&ins, ":mtime", mtime); }else{ /* Remove all references to the tag from checkin :rid */ zValue = 0; db_prepare(&ins, "DELETE FROM tagxref WHERE tagid=%d AND rid=:rid", tagid ); } if( tagid==TAG_BGCOLOR ){ db_prepare(&eventupdate, "UPDATE event SET bgcolor=%Q WHERE objid=:rid", zValue ); } while( (pid = pqueuex_extract(&queue, 0))!=0 ){ db_bind_int(&s, ":pid", pid); while( db_step(&s)==SQLITE_ROW ){ int doit = db_column_int(&s, 2); if( doit ){ int cid = db_column_int(&s, 0); double mtime = db_column_double(&s, 1); pqueuex_insert(&queue, cid, mtime, 0); db_bind_int(&ins, ":rid", cid); db_step(&ins); db_reset(&ins); if( tagid==TAG_BGCOLOR ){ db_bind_int(&eventupdate, ":rid", cid); db_step(&eventupdate); db_reset(&eventupdate); } if( tagid==TAG_BRANCH ){ leaf_eventually_check(cid); } } } db_reset(&s); } pqueuex_clear(&queue); db_finalize(&ins); db_finalize(&s); if( tagid==TAG_BGCOLOR ){ db_finalize(&eventupdate); } }
/* ** Impl of /json/dir. 98% of it was taken directly ** from browse.c::page_dir() */ static cson_value * json_page_dir_list(){ cson_object * zPayload = NULL; /* return value */ cson_array * zEntries = NULL; /* accumulated list of entries. */ cson_object * zEntry = NULL; /* a single dir/file entry. */ cson_array * keyStore = NULL; /* garbage collector for shared strings. */ cson_string * zKeyName = NULL; cson_string * zKeySize = NULL; cson_string * zKeyIsDir = NULL; cson_string * zKeyUuid = NULL; cson_string * zKeyTime = NULL; cson_string * zKeyRaw = NULL; char * zD = NULL; char const * zDX = NULL; int nD; char * zUuid = NULL; char const * zCI = NULL; Manifest * pM = NULL; Stmt q = empty_Stmt; int rid = 0; if( !g.perm.Read ){ json_set_err(FSL_JSON_E_DENIED, "Requires 'o' permissions."); return NULL; } zCI = json_find_option_cstr("checkin",NULL,"ci" ); /* If a specific check-in is requested, fetch and parse it. If the ** specific check-in does not exist, clear zCI. zCI==0 will cause all ** files from all check-ins to be displayed. */ if( zCI && *zCI ){ pM = manifest_get_by_name(zCI, &rid); if( pM ){ zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", rid); }else{ json_set_err(FSL_JSON_E_UNRESOLVED_UUID, "Checkin name [%s] is unresolved.", zCI); return NULL; } } /* Jump through some hoops to find the directory name... */ zDX = json_find_option_cstr("name",NULL,NULL); if(!zDX && !g.isHTTP){ zDX = json_command_arg(g.json.dispatchDepth+1); } if(zDX && (!*zDX || (0==strcmp(zDX,"/")))){ zDX = NULL; } zD = zDX ? fossil_strdup(zDX) : NULL; nD = zD ? strlen(zD)+1 : 0; while( nD>1 && zD[nD-2]=='/' ){ zD[(--nD)-1] = 0; } sqlite3_create_function(g.db, "pathelement", 2, SQLITE_UTF8, 0, pathelementFunc, 0, 0); /* Compute the temporary table "localfiles" containing the names ** of all files and subdirectories in the zD[] directory. ** ** Subdirectory names begin with "/". This causes them to sort ** first and it also gives us an easy way to distinguish files ** from directories in the loop that follows. */ if( zCI ){ Stmt ins; ManifestFile *pFile; ManifestFile *pPrev = 0; int nPrev = 0; int c; db_multi_exec( "CREATE TEMP TABLE json_dir_files(" " n UNIQUE NOT NULL," /* file name */ " fn UNIQUE NOT NULL," /* full file name */ " u DEFAULT NULL," /* file uuid */ " sz DEFAULT -1," /* file size */ " mtime DEFAULT NULL" /* file mtime in unix epoch format */ ");" ); db_prepare(&ins, "INSERT OR IGNORE INTO json_dir_files (n,fn,u,sz,mtime) " "SELECT" " pathelement(:path,0)," " CASE WHEN %Q IS NULL THEN '' ELSE %Q||'/' END ||:abspath," " a.uuid," " a.size," " CAST(strftime('%%s',e.mtime) AS INTEGER) " "FROM" " mlink m, " " event e," " blob a," " blob b " "WHERE" " e.objid=m.mid" " AND a.rid=m.fid"/*FILE artifact*/ " AND b.rid=m.mid"/*CHECKIN artifact*/ " AND a.uuid=:uuid", zD, zD ); manifest_file_rewind(pM); while( (pFile = manifest_file_next(pM,0))!=0 ){ if( nD>0 && ((pFile->zName[nD-1]!='/') || (0!=memcmp(pFile->zName, zD, nD-1))) ){ continue; } /*printf("zD=%s, nD=%d, pFile->zName=%s\n", zD, nD, pFile->zName);*/ if( pPrev && memcmp(&pFile->zName[nD],&pPrev->zName[nD],nPrev)==0 && (pFile->zName[nD+nPrev]==0 || pFile->zName[nD+nPrev]=='/') ){ continue; } db_bind_text( &ins, ":path", &pFile->zName[nD] ); db_bind_text( &ins, ":abspath", &pFile->zName[nD] ); db_bind_text( &ins, ":uuid", pFile->zUuid ); db_step(&ins); db_reset(&ins); pPrev = pFile; for(nPrev=0; (c=pPrev->zName[nD+nPrev]) && c!='/'; nPrev++){} if( c=='/' ) nPrev++; } db_finalize(&ins); }else if( zD && *zD ){ db_multi_exec( "CREATE TEMP VIEW json_dir_files AS" " SELECT DISTINCT(pathelement(name,%d)) AS n," " %Q||'/'||name AS fn," " NULL AS u, NULL AS sz, NULL AS mtime" " FROM filename" " WHERE name GLOB '%q/*'" " GROUP BY n", nD, zD, zD ); }else{ db_multi_exec( "CREATE TEMP VIEW json_dir_files" " AS SELECT DISTINCT(pathelement(name,0)) AS n, NULL AS fn" " FROM filename" ); } if(zCI){ db_prepare( &q, "SELECT" " n as name," " fn as fullname," " u as uuid," " sz as size," " mtime as mtime " "FROM json_dir_files ORDER BY n"); }else{/* UUIDs are all NULL. */ db_prepare( &q, "SELECT n, fn FROM json_dir_files ORDER BY n"); } zKeyName = cson_new_string("name",4); zKeyUuid = cson_new_string("uuid",4); zKeyIsDir = cson_new_string("isDir",5); keyStore = cson_new_array(); cson_array_append( keyStore, cson_string_value(zKeyName) ); cson_array_append( keyStore, cson_string_value(zKeyUuid) ); cson_array_append( keyStore, cson_string_value(zKeyIsDir) ); if( zCI ){ zKeySize = cson_new_string("size",4); cson_array_append( keyStore, cson_string_value(zKeySize) ); zKeyTime = cson_new_string("timestamp",9); cson_array_append( keyStore, cson_string_value(zKeyTime) ); zKeyRaw = cson_new_string("downloadPath",12); cson_array_append( keyStore, cson_string_value(zKeyRaw) ); } zPayload = cson_new_object(); cson_object_set_s( zPayload, zKeyName, json_new_string((zD&&*zD) ? zD : "/") ); if( zUuid ){ cson_object_set( zPayload, "checkin", json_new_string(zUuid) ); } while( (SQLITE_ROW==db_step(&q)) ){ cson_value * name = NULL; char const * n = db_column_text(&q,0); char const isDir = ('/'==*n); zEntry = cson_new_object(); if(!zEntries){ zEntries = cson_new_array(); cson_object_set( zPayload, "entries", cson_array_value(zEntries) ); } cson_array_append(zEntries, cson_object_value(zEntry) ); if(isDir){ name = json_new_string( n+1 ); cson_object_set_s(zEntry, zKeyIsDir, cson_value_true() ); } else{ name = json_new_string( n ); } cson_object_set_s(zEntry, zKeyName, name ); if( zCI && !isDir){ /* Don't add the uuid/size for dir entries - that data refers to one of the files in that directory :/. Entries with no --checkin may refer to N versions, and therefore we cannot associate a single size and uuid with them (and fetching all would be overkill for most use cases). */ char const * fullName = db_column_text(&q,1); char const * u = db_column_text(&q,2); sqlite_int64 const sz = db_column_int64(&q,3); sqlite_int64 const ts = db_column_int64(&q,4); cson_object_set_s(zEntry, zKeyUuid, json_new_string( u ) ); cson_object_set_s(zEntry, zKeySize, cson_value_new_integer( (cson_int_t)sz )); cson_object_set_s(zEntry, zKeyTime, cson_value_new_integer( (cson_int_t)ts )); cson_object_set_s(zEntry, zKeyRaw, json_new_string_f("/raw/%T?name=%t", fullName, u)); } } db_finalize(&q); if(pM){ manifest_destroy(pM); } cson_free_array( keyStore ); free( zUuid ); free( zD ); return cson_object_value(zPayload); }
/* ** COMMAND: user ** ** Usage: %fossil user SUBCOMMAND ... ?-R|--repository FILE? ** ** Run various subcommands on users of the open repository or of ** the repository identified by the -R or --repository option. ** ** %fossil user capabilities USERNAME ?STRING? ** ** Query or set the capabilities for user USERNAME ** ** %fossil user default ?USERNAME? ** ** Query or set the default user. The default user is the ** user for command-line interaction. ** ** %fossil user list ** ** List all users known to the repository ** ** %fossil user new ?USERNAME? ?CONTACT-INFO? ?PASSWORD? ** ** Create a new user in the repository. Users can never be ** deleted. They can be denied all access but they must continue ** to exist in the database. ** ** %fossil user password USERNAME ?PASSWORD? ** ** Change the web access password for a user. */ void user_cmd(void){ int n; db_find_and_open_repository(1); if( g.argc<3 ){ usage("capabilities|default|list|new|password ..."); } n = strlen(g.argv[2]); if( n>=2 && strncmp(g.argv[2],"new",n)==0 ){ Blob passwd, login, contact; char *zPw; if( g.argc>=4 ){ blob_init(&login, g.argv[3], -1); }else{ prompt_user("login: "******"SELECT 1 FROM user WHERE login=%B", &login) ){ fossil_fatal("user %b already exists", &login); } if( g.argc>=5 ){ blob_init(&contact, g.argv[4], -1); }else{ prompt_user("contact-info: ", &contact); } if( g.argc>=6 ){ blob_init(&passwd, g.argv[5], -1); }else{ prompt_for_password("password: "******"INSERT INTO user(login,pw,cap,info)" "VALUES(%B,%Q,'v',%B)", &login, zPw, &contact ); free(zPw); }else if( n>=2 && strncmp(g.argv[2],"default",n)==0 ){ user_select(); if( g.argc==3 ){ printf("%s\n", g.zLogin); }else{ if( !db_exists("SELECT 1 FROM user WHERE login=%Q", g.argv[3]) ){ fossil_fatal("no such user: %s", g.argv[3]); } if( g.localOpen ){ db_lset("default-user", g.argv[3]); }else{ db_set("default-user", g.argv[3], 0); } } }else if( n>=2 && strncmp(g.argv[2],"list",n)==0 ){ Stmt q; db_prepare(&q, "SELECT login, info FROM user ORDER BY login"); while( db_step(&q)==SQLITE_ROW ){ printf("%-12s %s\n", db_column_text(&q, 0), db_column_text(&q, 1)); } db_finalize(&q); }else if( n>=2 && strncmp(g.argv[2],"password",2)==0 ){ char *zPrompt; int uid; Blob pw; if( g.argc!=4 && g.argc!=5 ) usage("password USERNAME ?NEW-PASSWORD?"); uid = db_int(0, "SELECT uid FROM user WHERE login=%Q", g.argv[3]); if( uid==0 ){ fossil_fatal("no such user: %s", g.argv[3]); } if( g.argc==5 ){ blob_init(&pw, g.argv[4], -1); }else{ zPrompt = mprintf("new passwd for %s: ", g.argv[3]); prompt_for_password(zPrompt, &pw, 1); } if( blob_size(&pw)==0 ){ printf("password unchanged\n"); }else{ char *zSecret = sha1_shared_secret(blob_str(&pw), g.argv[3]); db_multi_exec("UPDATE user SET pw=%Q WHERE uid=%d", zSecret, uid); free(zSecret); } }else if( n>=2 && strncmp(g.argv[2],"capabilities",2)==0 ){ int uid; if( g.argc!=4 && g.argc!=5 ){ usage("user capabilities USERNAME ?PERMISSIONS?"); } uid = db_int(0, "SELECT uid FROM user WHERE login=%Q", g.argv[3]); if( uid==0 ){ fossil_fatal("no such user: %s", g.argv[3]); } if( g.argc==5 ){ db_multi_exec( "UPDATE user SET cap=%Q WHERE uid=%d", g.argv[4], uid ); } printf("%s\n", db_text(0, "SELECT cap FROM user WHERE uid=%d", uid)); }else{ fossil_panic("user subcommand should be one of: " "capabilities default list new password"); } }
/* ** COMMAND: addremove ** ** Usage: %fossil addremove ?OPTIONS? ** ** Do all necessary "add" and "rm" commands to synchronize the repository ** with the content of the working checkout: ** ** * All files in the checkout but not in the repository (that is, ** all files displayed using the "extra" command) are added as ** if by the "add" command. ** ** * All files in the repository but missing from the checkout (that is, ** all files that show as MISSING with the "status" command) are ** removed as if by the "rm" command. ** ** The command does not "commit". You must run the "commit" separately ** as a separate step. ** ** Files and directories whose names begin with "." are ignored unless ** the --dotfiles option is used. ** ** The --ignore option overrides the "ignore-glob" setting, as does the ** --case-sensitive option with the "case-sensitive" setting. See the ** documentation on the "settings" command for further information. ** ** The --test option shows what would happen without actually doing anything. ** ** This command can be used to track third party software. ** ** Options: ** --case-sensitive <BOOL> override case-sensitive setting ** --dotfiles include files beginning with a dot (".") ** --ignore <CSG> ignore files matching patterns from the ** comma separated list of glob patterns. ** --test If given, display instead of run actions ** ** See also: add, rm */ void addremove_cmd(void){ Blob path; const char *zIgnoreFlag = find_option("ignore",0,1); unsigned scanFlags = find_option("dotfiles",0,0)!=0 ? SCAN_ALL : 0; int isTest = find_option("test",0,0)!=0; int caseSensitive; int n; Stmt q; int vid; int nAdd = 0; int nDelete = 0; Glob *pIgnore; capture_case_sensitive_option(); db_must_be_within_tree(); caseSensitive = filenames_are_case_sensitive(); if( zIgnoreFlag==0 ){ zIgnoreFlag = db_get("ignore-glob", 0); } vid = db_lget_int("checkout",0); if( vid==0 ){ fossil_panic("no checkout to add to"); } db_begin_transaction(); /* step 1: ** Populate the temp table "sfile" with the names of all unmanged ** files currently in the check-out, except for files that match the ** --ignore or ignore-glob patterns and dot-files. Then add all of ** the files in the sfile temp table to the set of managed files. */ db_multi_exec("CREATE TEMP TABLE sfile(x TEXT PRIMARY KEY)"); n = strlen(g.zLocalRoot); blob_init(&path, g.zLocalRoot, n-1); /* now we read the complete file structure into a temp table */ pIgnore = glob_create(zIgnoreFlag); vfile_scan(&path, blob_size(&path), scanFlags, pIgnore); glob_free(pIgnore); nAdd = add_files_in_sfile(vid, caseSensitive); /* step 2: search for missing files */ db_prepare(&q, "SELECT pathname, %Q || pathname, deleted FROM vfile" " WHERE NOT deleted" " ORDER BY 1", g.zLocalRoot ); while( db_step(&q)==SQLITE_ROW ){ const char * zFile; const char * zPath; zFile = db_column_text(&q, 0); zPath = db_column_text(&q, 1); if( !file_wd_isfile_or_link(zPath) ){ if( !isTest ){ db_multi_exec("UPDATE vfile SET deleted=1 WHERE pathname=%Q", zFile); } fossil_print("DELETED %s\n", zFile); nDelete++; } } db_finalize(&q); /* show cmmand summary */ fossil_print("added %d files, deleted %d files\n", nAdd, nDelete); db_end_transaction(isTest); }
static void album_query(bmdb_t *b, void *db) { sqlite3_stmt *stmt; int rc; int album_id = atoi(b->b_query); rc = db_prepare(db, &stmt, "SELECT album.title, artist.title " "FROM album, artist " "WHERE album.id = ?1 " "AND artist.id = album.artist_id " "AND album.ds_id = 1"); if(rc != SQLITE_OK) return; sqlite3_bind_int(stmt, 1, album_id); if(db_step(stmt) == SQLITE_ROW) { rstr_t *album = db_rstr(stmt, 0); rstr_t *artist = db_rstr(stmt, 1); prop_set(b->b_metadata, "title", PROP_SET_RSTRING, album); prop_set(b->b_metadata, "artist_name", PROP_SET_RSTRING, artist); prop_t *p = prop_create_r(b->b_metadata, "album_art"); metadata_bind_albumart(p, artist, album); prop_ref_dec(p); rstr_release(album); rstr_release(artist); } sqlite3_finalize(stmt); rc = db_prepare(db, &stmt, "SELECT url, audioitem.title, track, duration, " "artist.title " "FROM audioitem,item,artist " "WHERE audioitem.item_id = item.id " "AND audioitem.artist_id = artist.id " "AND album_id = ?1 " "AND audioitem.ds_id = 1"); if(rc != SQLITE_OK) return; sqlite3_bind_int(stmt, 1, album_id); rstr_t *ct = rstr_alloc("audio"); while((rc = db_step(stmt)) == SQLITE_ROW) { add_item(b, (const char *)sqlite3_column_text(stmt, 0), NULL, ct, (const char *)sqlite3_column_text(stmt, 1), sqlite3_column_int(stmt, 2), (const char *)sqlite3_column_text(stmt, 4), sqlite3_column_int(stmt, 3)); } rstr_release(ct); sqlite3_finalize(stmt); }
/* ** COMMAND: mv ** COMMAND: rename* ** ** Usage: %fossil mv|rename OLDNAME NEWNAME ** or: %fossil mv|rename OLDNAME... DIR ** ** Move or rename one or more files or directories within the repository tree. ** You can either rename a file or directory or move it to another subdirectory. ** ** This command does NOT rename or move the files on disk. This command merely ** records the fact that filenames have changed so that appropriate notations ** can be made at the next commit/checkin. ** ** See also: changes, status */ void mv_cmd(void){ int i; int vid; char *zDest; Blob dest; Stmt q; db_must_be_within_tree(); vid = db_lget_int("checkout", 0); if( vid==0 ){ fossil_panic("no checkout rename files in"); } if( g.argc<4 ){ usage("OLDNAME NEWNAME"); } zDest = g.argv[g.argc-1]; db_begin_transaction(); file_tree_name(zDest, &dest, 1); db_multi_exec( "UPDATE vfile SET origname=pathname WHERE origname IS NULL;" ); db_multi_exec( "CREATE TEMP TABLE mv(f TEXT UNIQUE ON CONFLICT IGNORE, t TEXT);" ); if( file_wd_isdir(zDest)!=1 ){ Blob orig; if( g.argc!=4 ){ usage("OLDNAME NEWNAME"); } file_tree_name(g.argv[2], &orig, 1); db_multi_exec( "INSERT INTO mv VALUES(%B,%B)", &orig, &dest ); }else{ if( blob_eq(&dest, ".") ){ blob_reset(&dest); }else{ blob_append(&dest, "/", 1); } for(i=2; i<g.argc-1; i++){ Blob orig; char *zOrig; int nOrig; file_tree_name(g.argv[i], &orig, 1); zOrig = blob_str(&orig); nOrig = blob_size(&orig); db_prepare(&q, "SELECT pathname FROM vfile" " WHERE vid=%d" " AND (pathname='%q' OR (pathname>'%q/' AND pathname<'%q0'))" " ORDER BY 1", vid, zOrig, zOrig, zOrig ); while( db_step(&q)==SQLITE_ROW ){ const char *zPath = db_column_text(&q, 0); int nPath = db_column_bytes(&q, 0); const char *zTail; if( nPath==nOrig ){ zTail = file_tail(zPath); }else{ zTail = &zPath[nOrig+1]; } db_multi_exec( "INSERT INTO mv VALUES('%q','%q%q')", zPath, blob_str(&dest), zTail ); } db_finalize(&q); } } db_prepare(&q, "SELECT f, t FROM mv ORDER BY f"); while( db_step(&q)==SQLITE_ROW ){ const char *zFrom = db_column_text(&q, 0); const char *zTo = db_column_text(&q, 1); mv_one_file(vid, zFrom, zTo); } db_finalize(&q); db_end_transaction(0); }
/* ** Impl for /json/report/run ** ** Options/arguments: ** ** report=int (CLI: -report # or -r #) is the report number to run. ** ** limit=int (CLI: -limit # or -n #) -n is for compat. with other commands. ** ** format=a|o Specifies result format: a=each row is an arry, o=each ** row is an object. Default=o. */ static cson_value * json_report_run(){ int nReport; Stmt q = empty_Stmt; cson_object * pay = NULL; cson_array * tktList = NULL; char const * zFmt; char * zTitle = NULL; Blob sql = empty_blob; int limit = 0; cson_value * colNames = NULL; int i; if(!g.perm.RdTkt){ json_set_err(FSL_JSON_E_DENIED, "Requires 'r' privileges."); return NULL; } nReport = json_report_get_number(3); if(nReport <=0){ json_set_err(FSL_JSON_E_MISSING_ARGS, "Missing or invalid 'number' (-n) parameter."); goto error; } zFmt = json_find_option_cstr2("format",NULL,"f",3); if(!zFmt) zFmt = "o"; db_prepare(&q, "SELECT sqlcode, " " title" " FROM reportfmt" " WHERE rn=%d", nReport); if(SQLITE_ROW != db_step(&q)){ json_set_err(FSL_JSON_E_INVALID_ARGS, "Report number %d not found.", nReport); db_finalize(&q); goto error; } limit = json_find_option_int("limit",NULL,"n",-1); /* Copy over report's SQL...*/ blob_append(&sql, db_column_text(&q,0), -1); zTitle = mprintf("%s", db_column_text(&q,1)); db_finalize(&q); db_prepare(&q, "%s", blob_str(&sql)); /** Build the response... */ pay = cson_new_object(); cson_object_set(pay, "report", json_new_int(nReport)); cson_object_set(pay, "title", json_new_string(zTitle)); if(limit>0){ cson_object_set(pay, "limit", json_new_int((limit<0) ? 0 : limit)); } free(zTitle); zTitle = NULL; if(g.perm.TktFmt){ cson_object_set(pay, "sqlcode", cson_value_new_string(blob_str(&sql), (unsigned int)blob_size(&sql))); } blob_reset(&sql); colNames = cson_sqlite3_column_names(q.pStmt); cson_object_set( pay, "columnNames", colNames); for( i = 0 ; ((limit>0) ?(i < limit) : 1) && (SQLITE_ROW == db_step(&q)); ++i){ cson_value * row = ('a'==*zFmt) ? cson_sqlite3_row_to_array(q.pStmt) : cson_sqlite3_row_to_object2(q.pStmt, cson_value_get_array(colNames)); ; if(row && !tktList){ tktList = cson_new_array(); } cson_array_append(tktList, row); } db_finalize(&q); cson_object_set(pay, "tickets", tktList ? cson_array_value(tktList) : cson_value_null()); goto end; error: assert(0 != g.json.resultCode); cson_value_free( cson_object_value(pay) ); pay = NULL; end: return pay ? cson_object_value(pay) : NULL; }
/* ** Implementation of the /json/status page. ** */ cson_value * json_page_status(){ Stmt q = empty_Stmt; cson_object * oPay; /*cson_object * files;*/ int vid, nErr = 0; cson_object * tmpO; char * zTmp; i64 iMtime; cson_array * aFiles; if(!db_open_local(0)){ json_set_err(FSL_JSON_E_DB_NEEDS_CHECKOUT, NULL); return NULL; } oPay = cson_new_object(); cson_object_set(oPay, "repository", json_new_string(db_repository_filename())); cson_object_set(oPay, "localRoot", json_new_string(g.zLocalRoot)); vid = db_lget_int("checkout", 0); if(!vid){ json_set_err( FSL_JSON_E_UNKNOWN, "Can this even happen?" ); return 0; } /* TODO: dupe show_common_info() state */ tmpO = cson_new_object(); cson_object_set(oPay, "checkout", cson_object_value(tmpO)); zTmp = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", vid); cson_object_set(tmpO, "uuid", json_new_string(zTmp) ); free(zTmp); cson_object_set( tmpO, "tags", json_tags_for_checkin_rid(vid, 0) ); /* FIXME: optimize the datetime/timestamp queries into 1 query. */ zTmp = db_text(0, "SELECT datetime(mtime) || " "' UTC' FROM event WHERE objid=%d", vid); cson_object_set(tmpO, "datetime", json_new_string(zTmp)); free(zTmp); iMtime = db_int64(0, "SELECT CAST(strftime('%%s',mtime) AS INTEGER) " "FROM event WHERE objid=%d", vid); cson_object_set(tmpO, "timestamp", cson_value_new_integer((cson_int_t)iMtime)); #if 0 /* TODO: add parent artifact info */ tmpO = cson_new_object(); cson_object_set( oPay, "parent", cson_object_value(tmpO) ); cson_object_set( tmpO, "uuid", TODO ); cson_object_set( tmpO, "timestamp", TODO ); #endif /* Now get the list of non-pristine files... */ aFiles = cson_new_array(); cson_object_set( oPay, "files", cson_array_value( aFiles ) ); db_prepare(&q, "SELECT pathname, deleted, chnged, rid, coalesce(origname!=pathname,0)" " FROM vfile " " WHERE is_selected(id)" " AND (chnged OR deleted OR rid=0 OR pathname!=origname) ORDER BY 1" ); while( db_step(&q)==SQLITE_ROW ){ const char *zPathname = db_column_text(&q,0); int isDeleted = db_column_int(&q, 1); int isChnged = db_column_int(&q,2); int isNew = db_column_int(&q,3)==0; int isRenamed = db_column_int(&q,4); cson_object * oFile; char const * zStatus = "???"; char * zFullName = mprintf("%s%s", g.zLocalRoot, zPathname); if( isDeleted ){ zStatus = "deleted"; }else if( isNew ){ zStatus = "new" /* maintenance reminder: MUST come BEFORE the isChnged checks. */; }else if( isRenamed ){ zStatus = "renamed"; }else if( !file_wd_isfile_or_link(zFullName) ){ if( file_access(zFullName, F_OK)==0 ){ zStatus = "notAFile"; ++nErr; }else{ zStatus = "missing"; ++nErr; } }else if( 2==isChnged ){ zStatus = "updatedByMerge"; }else if( 3==isChnged ){ zStatus = "addedByMerge"; }else if( 4==isChnged ){ zStatus = "updatedByIntegrate"; }else if( 5==isChnged ){ zStatus = "addedByIntegrate"; }else if( 1==isChnged ){ if( file_contains_merge_marker(zFullName) ){ zStatus = "conflict"; }else{ zStatus = "edited"; } } oFile = cson_new_object(); cson_array_append( aFiles, cson_object_value(oFile) ); /* optimization potential: move these keys into cson_strings to take advantage of refcounting. */ cson_object_set( oFile, "name", json_new_string( zPathname ) ); cson_object_set( oFile, "status", json_new_string( zStatus ) ); free(zFullName); } cson_object_set( oPay, "errorCount", json_new_int( nErr ) ); db_finalize(&q); #if 0 /* TODO: add "merged with" status. First need (A) to decide on a structure and (B) to set up some tests for the multi-merge case.*/ db_prepare(&q, "SELECT uuid, id FROM vmerge JOIN blob ON merge=rid" " WHERE id<=0"); while( db_step(&q)==SQLITE_ROW ){ const char *zLabel = "MERGED_WITH"; switch( db_column_int(&q, 1) ){ case -1: zLabel = "CHERRYPICK "; break; case -2: zLabel = "BACKOUT "; break; case -4: zLabel = "INTEGRATE "; break; } blob_append(report, zPrefix, nPrefix); blob_appendf(report, "%s %s\n", zLabel, db_column_text(&q, 0)); } db_finalize(&q); if( nErr ){ fossil_fatal("aborting due to prior errors"); } #endif return cson_object_value( oPay ); }
/* ** Write all files from vid to the disk. Or if vid==0 and id!=0 ** write just the specific file where VFILE.ID=id. */ void vfile_to_disk( int vid, /* vid to write to disk */ int id, /* Write this one file, if not zero */ int verbose, /* Output progress information */ int promptFlag /* Prompt user to confirm overwrites */ ){ Stmt q; Blob content; int nRepos = strlen(g.zLocalRoot); if( vid>0 && id==0 ){ db_prepare(&q, "SELECT id, %Q || pathname, mrid, isexe, islink" " FROM vfile" " WHERE vid=%d AND mrid>0", g.zLocalRoot, vid); }else{ assert( vid==0 && id>0 ); db_prepare(&q, "SELECT id, %Q || pathname, mrid, isexe, islink" " FROM vfile" " WHERE id=%d AND mrid>0", g.zLocalRoot, id); } while( db_step(&q)==SQLITE_ROW ){ int id, rid, isExe, isLink; const char *zName; id = db_column_int(&q, 0); zName = db_column_text(&q, 1); rid = db_column_int(&q, 2); isExe = db_column_int(&q, 3); isLink = db_column_int(&q, 4); content_get(rid, &content); if( file_is_the_same(&content, zName) ){ blob_reset(&content); if( file_wd_setexe(zName, isExe) ){ db_multi_exec("UPDATE vfile SET mtime=%lld WHERE id=%d", file_wd_mtime(zName), id); } continue; } if( promptFlag && file_wd_size(zName)>=0 ){ Blob ans; char *zMsg; char cReply; zMsg = mprintf("overwrite %s (a=always/y/N)? ", zName); prompt_user(zMsg, &ans); free(zMsg); cReply = blob_str(&ans)[0]; blob_reset(&ans); if( cReply=='a' || cReply=='A' ){ promptFlag = 0; cReply = 'y'; } if( cReply=='n' || cReply=='N' ){ blob_reset(&content); continue; } } if( verbose ) vcs_print("%s\n", &zName[nRepos]); if( file_wd_isdir(zName) == 1 ){ /*TODO(dchest): remove directories? */ vcs_fatal("%s is directory, cannot overwrite\n", zName); } if( file_wd_size(zName)>=0 && (isLink || file_wd_islink(zName)) ){ file_delete(zName); } if( isLink ){ symlink_create(blob_str(&content), zName); }else{ blob_write_to_file(&content, zName); } file_wd_setexe(zName, isExe); blob_reset(&content); db_multi_exec("UPDATE vfile SET mtime=%lld WHERE id=%d", file_wd_mtime(zName), id); } db_finalize(&q); }
/* ** Scans the specified base directory for any directories within it, while ** keeping a count of how many files they each contains, either directly or ** indirectly. ** ** Subdirectories are scanned recursively. ** Omit files named in VFILE. ** ** Directories whose names begin with "." are omitted unless the SCAN_ALL ** flag is set. ** ** Any directories that match the glob patterns pIgnore* are excluded from ** the scan. Name matching occurs after the first nPrefix characters are ** elided from the filename. ** ** Returns the total number of files found. */ int vfile_dir_scan( Blob *pPath, /* Base directory to be scanned */ int nPrefix, /* Number of bytes in base directory name */ unsigned scanFlags, /* Zero or more SCAN_xxx flags */ Glob *pIgnore1, /* Do not add directories that match this GLOB */ Glob *pIgnore2, /* Omit directories matching this GLOB too */ Glob *pIgnore3 /* Omit directories matching this GLOB too */ ){ int result = 0; DIR *d; int origSize; struct dirent *pEntry; int skipAll = 0; static Stmt ins; static Stmt upd; static int depth = 0; void *zNative; origSize = blob_size(pPath); if( pIgnore1 || pIgnore2 || pIgnore3 ){ blob_appendf(pPath, "/"); if( glob_match(pIgnore1, &blob_str(pPath)[nPrefix+1]) ) skipAll = 1; if( glob_match(pIgnore2, &blob_str(pPath)[nPrefix+1]) ) skipAll = 1; if( glob_match(pIgnore3, &blob_str(pPath)[nPrefix+1]) ) skipAll = 1; blob_resize(pPath, origSize); } if( skipAll ) return result; if( depth==0 ){ db_multi_exec("DROP TABLE IF EXISTS dscan_temp;" "CREATE TEMP TABLE dscan_temp(" " x TEXT PRIMARY KEY %s, y INTEGER)", filename_collation()); db_prepare(&ins, "INSERT OR IGNORE INTO dscan_temp(x, y) SELECT :file, :count" " WHERE NOT EXISTS(SELECT 1 FROM vfile WHERE" " pathname GLOB :file || '/*' %s)", filename_collation() ); db_prepare(&upd, "UPDATE OR IGNORE dscan_temp SET y = coalesce(y, 0) + 1" " WHERE x=:file %s", filename_collation() ); } depth++; zNative = fossil_utf8_to_filename(blob_str(pPath)); d = opendir(zNative); if( d ){ while( (pEntry=readdir(d))!=0 ){ char *zOrigPath; char *zPath; char *zUtf8; if( pEntry->d_name[0]=='.' ){ if( (scanFlags & SCAN_ALL)==0 ) continue; if( pEntry->d_name[1]==0 ) continue; if( pEntry->d_name[1]=='.' && pEntry->d_name[2]==0 ) continue; } zOrigPath = mprintf("%s", blob_str(pPath)); zUtf8 = fossil_filename_to_utf8(pEntry->d_name); blob_appendf(pPath, "/%s", zUtf8); zPath = blob_str(pPath); if( glob_match(pIgnore1, &zPath[nPrefix+1]) || glob_match(pIgnore2, &zPath[nPrefix+1]) || glob_match(pIgnore3, &zPath[nPrefix+1]) ){ /* do nothing */ }else if( file_wd_isdir(zPath)==1 ){ if( (scanFlags & SCAN_NESTED) || !vfile_top_of_checkout(zPath) ){ char *zSavePath = mprintf("%s", zPath); int count = vfile_dir_scan(pPath, nPrefix, scanFlags, pIgnore1, pIgnore2, pIgnore3); db_bind_text(&ins, ":file", &zSavePath[nPrefix+1]); db_bind_int(&ins, ":count", count); db_step(&ins); db_reset(&ins); fossil_free(zSavePath); result += count; /* found X normal files? */ } }else if( file_wd_isfile_or_link(zPath) ){ db_bind_text(&upd, ":file", zOrigPath); db_step(&upd); db_reset(&upd); result++; /* found 1 normal file */ } fossil_filename_free(zUtf8); blob_resize(pPath, origSize); fossil_free(zOrigPath); } closedir(d); } fossil_filename_free(zNative); depth--; if( depth==0 ){ db_finalize(&upd); db_finalize(&ins); } return result; }