/* ** WEBPAGE: tarball ** URL: /tarball/RID.tar.gz ** ** Generate a compressed tarball for a checkin. ** Return that tarball as the HTTP reply content. */ void tarball_page(void){ int rid; char *zName, *zRid; int nName, nRid; Blob tarball; login_check_credentials(); if( !g.perm.Zip ){ login_needed(); return; } zName = mprintf("%s", PD("name","")); nName = strlen(zName); zRid = mprintf("%s", PD("uuid","trunk")); nRid = strlen(zRid); if( nName>7 && fossil_strcmp(&zName[nName-7], ".tar.gz")==0 ){ /* Special case: Remove the ".tar.gz" suffix. */ nName -= 7; zName[nName] = 0; }else{ /* If the file suffix is not ".tar.gz" then just remove the ** suffix up to and including the last "." */ for(nName=strlen(zName)-1; nName>5; nName--){ if( zName[nName]=='.' ){ zName[nName] = 0; break; } } } rid = name_to_typed_rid(nRid?zRid:zName, "ci"); if( rid==0 ){ @ Not found return; }
/* ** Add a fixed parameter to an HQuery. Or remove the parameters if zValue==0. */ void url_add_parameter(HQuery *p, const char *zName, const char *zValue){ int i; for(i=0; i<p->nParam; i++){ if( fossil_strcmp(p->azName[i],zName)==0 ){ if( zValue==0 ){ p->nParam--; p->azValue[i] = p->azValue[p->nParam]; p->azName[i] = p->azName[p->nParam]; }else{ p->azValue[i] = zValue; } return; } } assert( i==p->nParam ); if( zValue==0 ) return; if( i>=p->nAlloc ){ p->nAlloc = p->nAlloc*2 + 10; p->azName = fossil_realloc(p->azName, sizeof(p->azName[0])*p->nAlloc); p->azValue = fossil_realloc(p->azValue, sizeof(p->azValue[0])*p->nAlloc); } p->azName[i] = zName; p->azValue[i] = zValue; p->nParam++; }
/* ** COMMAND: branch ** ** Usage: %fossil branch SUBCOMMAND ... ?OPTIONS? ** ** Run various subcommands to manage branches of the open repository or ** of the repository identified by the -R or --repository option. ** ** %fossil branch new BRANCH-NAME BASIS ?--bgcolor COLOR? ?--private? ** ** Create a new branch BRANCH-NAME off of check-in BASIS. ** You can optionally give the branch a default color. The ** --private option makes the branch private. ** ** %fossil branch list ?-all | --closed? ** %fossil branch ls ?-all | --closed? ** ** List all branches. Use --all or --closed to list all branches ** or closed branches. The default is to show only open branches. ** ** Options: ** -R|--repository FILE Run commands on repository FILE */ void branch_cmd(void){ int n; const char *zCmd = "list"; db_find_and_open_repository(0, 0); if( g.argc<2 ){ usage("new|list|ls ..."); } if( g.argc>=3 ) zCmd = g.argv[2]; n = strlen(zCmd); if( strncmp(zCmd,"new",n)==0 ){ branch_new(); }else if( (strncmp(zCmd,"list",n)==0)||(strncmp(zCmd, "ls", n)==0) ){ Stmt q; int vid; char *zCurrent = 0; int showAll = find_option("all",0,0)!=0; int showClosed = find_option("closed",0,0)!=0; if( g.localOpen ){ vid = db_lget_int("checkout", 0); zCurrent = db_text(0, "SELECT value FROM tagxref" " WHERE rid=%d AND tagid=%d", vid, TAG_BRANCH); } branch_prepare_list_query(&q, showAll?1:(showClosed?-1:0)); while( db_step(&q)==SQLITE_ROW ){ const char *zBr = db_column_text(&q, 0); int isCur = zCurrent!=0 && fossil_strcmp(zCurrent,zBr)==0; fossil_print("%s%s\n", (isCur ? "* " : " "), zBr); } db_finalize(&q); }else{ fossil_panic("branch subcommand should be one of: " "new list ls"); } }
/* ** Return the index into azField[] of the given field name. ** Return -1 if zField is not in azField[]. */ static int fieldId(const char *zField){ int i; for(i=0; i<nField; i++){ if( fossil_strcmp(azField[i], zField)==0 ) return i; } return -1; }
/* ** Load all file information out of the gg.zFrom check-in */ static void import_prior_files(void){ Manifest *p; int rid; ManifestFile *pOld; ImportFile *pNew; if( gg.fromLoaded ) return; gg.fromLoaded = 1; if( gg.zFrom==0 && gg.zPrevCheckin!=0 && fossil_strcmp(gg.zBranch, gg.zPrevBranch)==0 ){ gg.zFrom = gg.zPrevCheckin; gg.zPrevCheckin = 0; } if( gg.zFrom==0 ) return; rid = fast_uuid_to_rid(gg.zFrom); if( rid==0 ) return; p = manifest_get(rid, CFTYPE_MANIFEST, 0); if( p==0 ) return; manifest_file_rewind(p); while( (pOld = manifest_file_next(p, 0))!=0 ){ pNew = import_add_file(); pNew->zName = fossil_strdup(pOld->zName); pNew->isExe = pOld->zPerm && strstr(pOld->zPerm, "x")!=0; pNew->isLink = pOld->zPerm && strstr(pOld->zPerm, "l")!=0; pNew->zUuid = fossil_strdup(pOld->zUuid); pNew->isFrom = 1; } manifest_destroy(p); }
/* ** Verify that all entries in the aMime[] table are in sorted order. ** Abort with a fatal error if any is out-of-order. */ static void mimetype_verify(void){ int i; for(i=1; i<ArraySize(aMime); i++){ if( fossil_strcmp(aMime[i-1].zSuffix,aMime[i].zSuffix)>=0 ){ fossil_fatal("mimetypes out of sequence: %s before %s", aMime[i-1].zSuffix, aMime[i].zSuffix); } } }
/* ** Guess the mime-type of a document based on its name. */ const char *mimetype_from_name(const char *zName){ const char *z; int i; int first, last; int len; char zSuffix[20]; #ifdef FOSSIL_DEBUG /* This is test code to make sure the table above is in the correct ** order */ if( fossil_strcmp(zName, "mimetype-test")==0 ){ mimetype_verify(); return "ok"; } #endif z = zName; for(i=0; zName[i]; i++){ if( zName[i]=='.' ) z = &zName[i+1]; } len = strlen(z); if( len<sizeof(zSuffix)-1 ){ sqlite3_snprintf(sizeof(zSuffix), zSuffix, "%s", z); for(i=0; zSuffix[i]; i++) zSuffix[i] = fossil_tolower(zSuffix[i]); first = 0; last = ArraySize(aMime) - 1; while( first<=last ){ int c; i = (first+last)/2; c = fossil_strcmp(zSuffix, aMime[i].zSuffix); if( c==0 ) return aMime[i].zMimetype; if( c<0 ){ last = i-1; }else{ first = i+1; } } } return "application/x-fossil-artifact"; }
/* ** COMMAND: leaves* ** ** Usage: %fossil leaves ?OPTIONS? ** ** Find leaves of all branches. By default show only open leaves. ** The -a|--all flag causes all leaves (closed and open) to be shown. ** The -c|--closed flag shows only closed leaves. ** ** The --recompute flag causes the content of the "leaf" table in the ** repository database to be recomputed. ** ** Options: ** -a|--all show ALL leaves ** -c|--closed show only closed leaves ** --bybranch order output by branch name ** --recompute recompute the "leaf" table in the repository DB ** ** See also: descendants, finfo, info, branch */ void leaves_cmd(void){ Stmt q; Blob sql; int showAll = find_option("all", "a", 0)!=0; int showClosed = find_option("closed", "c", 0)!=0; int recomputeFlag = find_option("recompute",0,0)!=0; int byBranch = find_option("bybranch",0,0)!=0; char *zLastBr = 0; int n; char zLineNo[10]; db_find_and_open_repository(0,0); if( recomputeFlag ) leaf_rebuild(); blob_zero(&sql); blob_append(&sql, timeline_query_for_tty(), -1); blob_appendf(&sql, " AND blob.rid IN leaf"); if( showClosed ){ blob_appendf(&sql," AND %z", leaf_is_closed_sql("blob.rid")); }else if( !showAll ){ blob_appendf(&sql," AND NOT %z", leaf_is_closed_sql("blob.rid")); } if( byBranch ){ db_prepare(&q, "%s ORDER BY nullif(branch,'trunk') COLLATE nocase," " event.mtime DESC", blob_str(&sql)); }else{ db_prepare(&q, "%s ORDER BY event.mtime DESC", blob_str(&sql)); } blob_reset(&sql); n = 0; while( db_step(&q)==SQLITE_ROW ){ const char *zId = db_column_text(&q, 1); const char *zDate = db_column_text(&q, 2); const char *zCom = db_column_text(&q, 3); const char *zBr = db_column_text(&q, 7); char *z; if( byBranch && fossil_strcmp(zBr, zLastBr)!=0 ){ fossil_print("*** %s ***\n", zBr); fossil_free(zLastBr); zLastBr = fossil_strdup(zBr); } n++; sqlite3_snprintf(sizeof(zLineNo), zLineNo, "(%d)", n); fossil_print("%6s ", zLineNo); z = mprintf("%s [%.10s] %s", zDate, zId, zCom); comment_print(z, 7, 79); fossil_free(z); } fossil_free(zLastBr); db_finalize(&q); }
/* ** COMMAND: test-ticket-rebuild ** ** Usage: %fossil test-ticket-rebuild TICKETID|all ** ** Rebuild the TICKET and TICKETCHNG tables for the given ticket ID ** or for ALL. */ void test_ticket_rebuild(void){ db_find_and_open_repository(0, 0); if( g.argc!=3 ) usage("TICKETID|all"); if( fossil_strcmp(g.argv[2], "all")==0 ){ ticket_rebuild(); }else{ const char *zUuid; zUuid = db_text(0, "SELECT substr(tagname,5) FROM tag" " WHERE tagname GLOB 'tkt-%q*'", g.argv[2]); if( zUuid==0 ) fossil_fatal("no such ticket: %s", g.argv[2]); ticket_rebuild_entry(zUuid); } }
/* ** Render the URL with a parameter override. ** ** Returned memory is transient and is overwritten on the next call to this ** routine for the same HQuery, or until the HQuery object is destroyed. */ char *url_render( HQuery *p, /* Base URL */ const char *zName1, /* First override */ const char *zValue1, /* First override value */ const char *zName2, /* Second override */ const char *zValue2 /* Second override value */ ){ const char *zSep = "?"; int i; blob_reset(&p->url); blob_appendf(&p->url, "%s/%s", g.zTop, p->zBase); for(i=0; i<p->nParam; i++){ const char *z = p->azValue[i]; if( zName1 && fossil_strcmp(zName1,p->azName[i])==0 ){ zName1 = 0; z = zValue1; if( z==0 ) continue; } if( zName2 && fossil_strcmp(zName2,p->azName[i])==0 ){ zName2 = 0; z = zValue2; if( z==0 ) continue; } blob_appendf(&p->url, "%s%s", zSep, p->azName[i]); if( z && z[0] ) blob_appendf(&p->url, "=%T", z); zSep = "&"; } if( zName1 && zValue1 ){ blob_appendf(&p->url, "%s%s", zSep, zName1); if( zValue1[0] ) blob_appendf(&p->url, "=%T", zValue1); } if( zName2 && zValue2 ){ blob_appendf(&p->url, "%s%s", zSep, zName2); if( zValue2[0] ) blob_appendf(&p->url, "=%T", zValue2); } return blob_str(&p->url); }
/* ** Query the database for all TICKET fields for the specific ** ticket whose name is given by the "name" CGI parameter. ** Load the values for all fields into the interpreter. ** ** Only load those fields which do not already exist as ** variables. ** ** Fields of the TICKET table that begin with "private_" are ** expanded using the db_reveal() function. If g.perm.RdAddr is ** true, then the db_reveal() function will decode the content ** using the CONCEALED table so that the content legable. ** Otherwise, db_reveal() is a no-op and the content remains ** obscured. */ static void initializeVariablesFromDb(void){ const char *zName; Stmt q; int i, n, size, j; zName = PD("name","-none-"); db_prepare(&q, "SELECT datetime(tkt_mtime,'localtime') AS tkt_datetime, *" " FROM ticket WHERE tkt_uuid GLOB '%q*'", zName); if( db_step(&q)==SQLITE_ROW ){ n = db_column_count(&q); for(i=0; i<n; i++){ const char *zVal = db_column_text(&q, i); const char *zName = db_column_name(&q, i); char *zRevealed = 0; if( zVal==0 ){ zVal = ""; }else if( strncmp(zName, "private_", 8)==0 ){ zVal = zRevealed = db_reveal(zVal); } for(j=0; j<nField; j++){ if( fossil_strcmp(azField[j],zName)==0 ){ azValue[j] = mprintf("%s", zVal); break; } } if( Th_Fetch(zName, &size)==0 ){ Th_Store(zName, zVal); } free(zRevealed); } }else{ db_finalize(&q); db_prepare(&q, "PRAGMA table_info(ticket)"); if( Th_Fetch("tkt_uuid",&size)==0 ){ Th_Store("tkt_uuid",zName); } while( db_step(&q)==SQLITE_ROW ){ const char *zField = db_column_text(&q, 1); if( Th_Fetch(zField, &size)==0 ){ Th_Store(zField, ""); } } if( Th_Fetch("tkt_datetime",&size)==0 ){ Th_Store("tkt_datetime",""); } } db_finalize(&q); }
/* ** Add all files in the sfile temp table. ** ** Automatically exclude the repository file. */ static int add_files_in_sfile(int vid, int caseSensitive){ const char *zRepo; /* Name of the repository database file */ int nAdd = 0; /* Number of files added */ int i; /* Loop counter */ const char *zReserved; /* Name of a reserved file */ Blob repoName; /* Treename of the repository */ Stmt loop; /* SQL to loop over all files to add */ int (*xCmp)(const char*,const char*); if( !file_tree_name(g.zRepositoryName, &repoName, 0) ){ blob_zero(&repoName); zRepo = ""; }else{ zRepo = blob_str(&repoName); } if( caseSensitive ){ xCmp = fossil_strcmp; }else{ xCmp = fossil_stricmp; db_multi_exec( "CREATE INDEX IF NOT EXISTS vfile_nocase" " ON vfile(pathname COLLATE nocase)" ); } db_prepare(&loop, "SELECT x FROM sfile ORDER BY x"); while( db_step(&loop)==SQLITE_ROW ){ const char *zToAdd = db_column_text(&loop, 0); if( fossil_strcmp(zToAdd, zRepo)==0 ) continue; for(i=0; (zReserved = fossil_reserved_name(i))!=0; i++){ if( xCmp(zToAdd, zReserved)==0 ) break; } if( zReserved ) continue; nAdd += add_one_file(zToAdd, vid, caseSensitive); } db_finalize(&loop); blob_reset(&repoName); return nAdd; }
/* ** Read the git-fast-import format from pIn and insert the corresponding ** content into the database. */ static void git_fast_import(FILE *pIn){ ImportFile *pFile, *pNew; int i, mx; char *z; char *zUuid; char *zName; char *zPerm; char *zFrom; char *zTo; char zLine[1000]; gg.xFinish = finish_noop; while( fgets(zLine, sizeof(zLine), pIn) ){ if( zLine[0]=='\n' || zLine[0]=='#' ) continue; if( memcmp(zLine, "blob", 4)==0 ){ gg.xFinish(); gg.xFinish = finish_blob; }else if( memcmp(zLine, "commit ", 7)==0 ){ gg.xFinish(); gg.xFinish = finish_commit; trim_newline(&zLine[7]); z = &zLine[7]; /* The argument to the "commit" line might match either of these ** patterns: ** ** (A) refs/heads/BRANCHNAME ** (B) refs/tags/TAGNAME ** ** If pattern A is used, then the branchname used is as shown. ** Except, the "master" branch which is the default branch name in ** Git is changed to "trunk" which is the default name in Fossil. ** If the pattern is B, then the new commit should be on the same ** branch as its parent. And, we might need to add the TAGNAME ** tag to the new commit. However, if there are multiple instances ** of pattern B with the same TAGNAME, then only put the tag on the ** last commit that holds that tag. ** ** None of the above is explained in the git-fast-export ** documentation. We had to figure it out via trial and error. */ for(i=strlen(z)-1; i>=0 && z[i]!='/'; i--){} gg.tagCommit = memcmp(&z[i-4], "tags", 4)==0; /* True for pattern B */ if( z[i+1]!=0 ) z += i+1; if( fossil_strcmp(z, "master")==0 ) z = "trunk"; gg.zBranch = fossil_strdup(z); gg.fromLoaded = 0; }else if( memcmp(zLine, "tag ", 4)==0 ){ gg.xFinish(); gg.xFinish = finish_tag; trim_newline(&zLine[4]); gg.zTag = fossil_strdup(&zLine[4]); }else if( memcmp(zLine, "reset ", 4)==0 ){ gg.xFinish(); }else if( memcmp(zLine, "checkpoint", 10)==0 ){ gg.xFinish(); }else if( memcmp(zLine, "feature", 7)==0 ){ gg.xFinish(); }else if( memcmp(zLine, "option", 6)==0 ){ gg.xFinish(); }else if( memcmp(zLine, "progress ", 9)==0 ){ gg.xFinish(); trim_newline(&zLine[9]); fossil_print("%s\n", &zLine[9]); fflush(stdout); }else if( memcmp(zLine, "data ", 5)==0 ){ fossil_free(gg.aData); gg.aData = 0; gg.nData = atoi(&zLine[5]); if( gg.nData ){ int got; gg.aData = fossil_malloc( gg.nData+1 ); got = fread(gg.aData, 1, gg.nData, pIn); if( got!=gg.nData ){ fossil_fatal("short read: got %d of %d bytes", got, gg.nData); } gg.aData[got] = 0; if( gg.zComment==0 && gg.xFinish==finish_commit ){ gg.zComment = gg.aData; gg.aData = 0; gg.nData = 0; } } }else if( memcmp(zLine, "author ", 7)==0 ){ /* No-op */ }else if( memcmp(zLine, "mark ", 5)==0 ){ trim_newline(&zLine[5]); fossil_free(gg.zMark); gg.zMark = fossil_strdup(&zLine[5]); }else if( memcmp(zLine, "tagger ", 7)==0 || memcmp(zLine, "committer ",10)==0 ){ sqlite3_int64 secSince1970; for(i=0; zLine[i] && zLine[i]!='<'; i++){} if( zLine[i]==0 ) goto malformed_line; z = &zLine[i+1]; for(i=i+1; zLine[i] && zLine[i]!='>'; i++){} if( zLine[i]==0 ) goto malformed_line; zLine[i] = 0; fossil_free(gg.zUser); gg.zUser = fossil_strdup(z); secSince1970 = 0; for(i=i+2; fossil_isdigit(zLine[i]); i++){ secSince1970 = secSince1970*10 + zLine[i] - '0'; } fossil_free(gg.zDate); gg.zDate = db_text(0, "SELECT datetime(%lld, 'unixepoch')", secSince1970); gg.zDate[10] = 'T'; }else if( memcmp(zLine, "from ", 5)==0 ){ trim_newline(&zLine[5]); fossil_free(gg.zFromMark); gg.zFromMark = fossil_strdup(&zLine[5]); fossil_free(gg.zFrom); gg.zFrom = resolve_committish(&zLine[5]); }else if( memcmp(zLine, "merge ", 6)==0 ){ trim_newline(&zLine[6]); if( gg.nMerge>=gg.nMergeAlloc ){ gg.nMergeAlloc = gg.nMergeAlloc*2 + 10; gg.azMerge = fossil_realloc(gg.azMerge, gg.nMergeAlloc*sizeof(char*)); } gg.azMerge[gg.nMerge] = resolve_committish(&zLine[6]); if( gg.azMerge[gg.nMerge] ) gg.nMerge++; }else if( memcmp(zLine, "M ", 2)==0 ){ import_prior_files(); z = &zLine[2]; zPerm = next_token(&z); zUuid = next_token(&z); zName = rest_of_line(&z); dequote_git_filename(zName); i = 0; pFile = import_find_file(zName, &i, gg.nFile); if( pFile==0 ){ pFile = import_add_file(); pFile->zName = fossil_strdup(zName); } pFile->isExe = (fossil_strcmp(zPerm, "100755")==0); pFile->isLink = (fossil_strcmp(zPerm, "120000")==0); fossil_free(pFile->zUuid); pFile->zUuid = resolve_committish(zUuid); pFile->isFrom = 0; }else if( memcmp(zLine, "D ", 2)==0 ){ import_prior_files(); z = &zLine[2]; zName = rest_of_line(&z); dequote_git_filename(zName); i = 0; while( (pFile = import_find_file(zName, &i, gg.nFile))!=0 ){ if( pFile->isFrom==0 ) continue; fossil_free(pFile->zName); fossil_free(pFile->zPrior); fossil_free(pFile->zUuid); *pFile = gg.aFile[--gg.nFile]; i--; } }else if( memcmp(zLine, "C ", 2)==0 ){ int nFrom; import_prior_files(); z = &zLine[2]; zFrom = next_token(&z); zTo = rest_of_line(&z); i = 0; mx = gg.nFile; nFrom = strlen(zFrom); while( (pFile = import_find_file(zFrom, &i, mx))!=0 ){ if( pFile->isFrom==0 ) continue; pNew = import_add_file(); pFile = &gg.aFile[i-1]; if( strlen(pFile->zName)>nFrom ){ pNew->zName = mprintf("%s%s", zTo, pFile->zName[nFrom]); }else{ pNew->zName = fossil_strdup(pFile->zName); } pNew->isExe = pFile->isExe; pNew->isLink = pFile->isLink; pNew->zUuid = fossil_strdup(pFile->zUuid); pNew->isFrom = 0; } }else if( memcmp(zLine, "R ", 2)==0 ){ int nFrom; import_prior_files(); z = &zLine[2]; zFrom = next_token(&z); zTo = rest_of_line(&z); i = 0; nFrom = strlen(zFrom); while( (pFile = import_find_file(zFrom, &i, gg.nFile))!=0 ){ if( pFile->isFrom==0 ) continue; pNew = import_add_file(); pFile = &gg.aFile[i-1]; if( strlen(pFile->zName)>nFrom ){ pNew->zName = mprintf("%s%s", zTo, pFile->zName[nFrom]); }else{ pNew->zName = fossil_strdup(pFile->zName); } pNew->zPrior = pFile->zName; pNew->isExe = pFile->isExe; pNew->isLink = pFile->isLink; pNew->zUuid = pFile->zUuid; pNew->isFrom = 0; gg.nFile--; *pFile = *pNew; memset(pNew, 0, sizeof(*pNew)); } fossil_fatal("cannot handle R records, use --full-tree"); }else if( memcmp(zLine, "deleteall", 9)==0 ){ gg.fromLoaded = 1; }else if( memcmp(zLine, "N ", 2)==0 ){ /* No-op */ }else { goto malformed_line; } } gg.xFinish(); if( gg.hasLinks ){ db_set_int("allow-symlinks", 1, 0); } import_reset(1); return; malformed_line: trim_newline(zLine); fossil_fatal("bad fast-import line: [%s]", zLine); return; }
/* ** Compare two entries in aField[] for sorting purposes */ static int nameCmpr(const void *a, const void *b){ return fossil_strcmp(((const struct tktFieldInfo*)a)->zName, ((const struct tktFieldInfo*)b)->zName); }
/* ** Compare two entries in azField for sorting purposes */ static int nameCmpr(const void *a, const void *b){ return fossil_strcmp(*(char**)a, *(char**)b); }
/* ** COMMAND: test-integrity ** ** Verify that all content can be extracted from the BLOB table correctly. ** If the BLOB table is correct, then the repository can always be ** successfully reconstructed using "fossil rebuild". */ void test_integrity(void){ Stmt q; Blob content; Blob cksum; int n1 = 0; int n2 = 0; int nErr = 0; int total; db_find_and_open_repository(OPEN_ANY_SCHEMA, 2); /* Make sure no public artifact is a delta from a private artifact */ db_prepare(&q, "SELECT " " rid, (SELECT uuid FROM blob WHERE rid=delta.rid)," " srcid, (SELECT uuid FROM blob WHERE rid=delta.srcid)" " FROM delta" " WHERE srcid in private AND rid NOT IN private" ); while( db_step(&q)==SQLITE_ROW ){ int rid = db_column_int(&q, 0); const char *zId = db_column_text(&q, 1); int srcid = db_column_int(&q, 2); const char *zSrc = db_column_text(&q, 3); fossil_print( "public artifact %S (%d) is a delta from private artifact %S (%d)\n", zId, rid, zSrc, srcid ); nErr++; } db_finalize(&q); db_prepare(&q, "SELECT rid, uuid, size FROM blob ORDER BY rid"); total = db_int(0, "SELECT max(rid) FROM blob"); while( db_step(&q)==SQLITE_ROW ){ int rid = db_column_int(&q, 0); const char *zUuid = db_column_text(&q, 1); int size = db_column_int(&q, 2); n1++; fossil_print(" %d/%d\r", n1, total); fflush(stdout); if( size<0 ){ fossil_print("skip phantom %d %s\n", rid, zUuid); continue; /* Ignore phantoms */ } content_get(rid, &content); if( blob_size(&content)!=size ){ fossil_print("size mismatch on artifact %d: wanted %d but got %d\n", rid, size, blob_size(&content)); nErr++; } sha1sum_blob(&content, &cksum); if( fossil_strcmp(blob_str(&cksum), zUuid)!=0 ){ fossil_print("checksum mismatch on artifact %d: wanted %s but got %s\n", rid, zUuid, blob_str(&cksum)); nErr++; } blob_reset(&cksum); blob_reset(&content); n2++; } db_finalize(&q); fossil_print("%d non-phantom blobs (out of %d total) checked: %d errors\n", n2, n1, nErr); }
/* ** Parse the given URL. Populate members of the provided UrlData structure ** as follows: ** ** isFile True if FILE: ** isHttps True if HTTPS: ** isSsh True if SSH: ** protocol "http" or "https" or "file" ** name Hostname for HTTP:, HTTPS:, SSH:. Filename for FILE: ** port TCP port number for HTTP or HTTPS. ** dfltPort Default TCP port number (80 or 443). ** path Path name for HTTP or HTTPS. ** user Userid. ** passwd Password. ** hostname HOST:PORT or just HOST if port is the default. ** canonical The URL in canonical form, omitting the password ** */ void url_parse_local( const char *zUrl, unsigned int urlFlags, UrlData *pUrlData ){ int i, j, c; char *zFile = 0; if( zUrl==0 ){ zUrl = db_get("last-sync-url", 0); if( zUrl==0 ) return; if( pUrlData->passwd==0 ){ pUrlData->passwd = unobscure(db_get("last-sync-pw", 0)); } } if( strncmp(zUrl, "http://", 7)==0 || strncmp(zUrl, "https://", 8)==0 || strncmp(zUrl, "ssh://", 6)==0 ){ int iStart; char *zLogin; char *zExe; char cQuerySep = '?'; pUrlData->isFile = 0; pUrlData->useProxy = 0; if( zUrl[4]=='s' ){ pUrlData->isHttps = 1; pUrlData->protocol = "https"; pUrlData->dfltPort = 443; iStart = 8; }else if( zUrl[0]=='s' ){ pUrlData->isSsh = 1; pUrlData->protocol = "ssh"; pUrlData->dfltPort = 22; pUrlData->fossil = "fossil"; iStart = 6; }else{ pUrlData->isHttps = 0; pUrlData->protocol = "http"; pUrlData->dfltPort = 80; iStart = 7; } for(i=iStart; (c=zUrl[i])!=0 && c!='/' && c!='@'; i++){} if( c=='@' ){ /* Parse up the user-id and password */ for(j=iStart; j<i && zUrl[j]!=':'; j++){} pUrlData->user = mprintf("%.*s", j-iStart, &zUrl[iStart]); dehttpize(pUrlData->user); if( j<i ){ if( ( urlFlags & URL_REMEMBER ) && pUrlData->isSsh==0 ){ urlFlags |= URL_ASK_REMEMBER_PW; } pUrlData->passwd = mprintf("%.*s", i-j-1, &zUrl[j+1]); dehttpize(pUrlData->passwd); } if( pUrlData->isSsh ){ urlFlags &= ~URL_ASK_REMEMBER_PW; } zLogin = mprintf("%t@", pUrlData->user); for(j=i+1; (c=zUrl[j])!=0 && c!='/' && c!=':'; j++){} pUrlData->name = mprintf("%.*s", j-i-1, &zUrl[i+1]); i = j; }else{ int inSquare = 0; int n; for(i=iStart; (c=zUrl[i])!=0 && c!='/' && (inSquare || c!=':'); i++){ if( c=='[' ) inSquare = 1; if( c==']' ) inSquare = 0; } pUrlData->name = mprintf("%.*s", i-iStart, &zUrl[iStart]); n = strlen(pUrlData->name); if( pUrlData->name[0]=='[' && n>2 && pUrlData->name[n-1]==']' ){ pUrlData->name++; pUrlData->name[n-2] = 0; } zLogin = mprintf(""); } url_tolower(pUrlData->name); if( c==':' ){ pUrlData->port = 0; i++; while( (c = zUrl[i])!=0 && fossil_isdigit(c) ){ pUrlData->port = pUrlData->port*10 + c - '0'; i++; } pUrlData->hostname = mprintf("%s:%d", pUrlData->name, pUrlData->port); }else{ pUrlData->port = pUrlData->dfltPort; pUrlData->hostname = pUrlData->name; } dehttpize(pUrlData->name); pUrlData->path = mprintf("%s", &zUrl[i]); for(i=0; pUrlData->path[i] && pUrlData->path[i]!='?'; i++){} if( pUrlData->path[i] ){ pUrlData->path[i] = 0; i++; } zExe = mprintf(""); while( pUrlData->path[i]!=0 ){ char *zName, *zValue; zName = &pUrlData->path[i]; zValue = zName; while( pUrlData->path[i] && pUrlData->path[i]!='=' ){ i++; } if( pUrlData->path[i]=='=' ){ pUrlData->path[i] = 0; i++; zValue = &pUrlData->path[i]; while( pUrlData->path[i] && pUrlData->path[i]!='&' ){ i++; } } if( pUrlData->path[i] ){ pUrlData->path[i] = 0; i++; } if( fossil_strcmp(zName,"fossil")==0 ){ pUrlData->fossil = zValue; dehttpize(pUrlData->fossil); zExe = mprintf("%cfossil=%T", cQuerySep, pUrlData->fossil); cQuerySep = '&'; } } dehttpize(pUrlData->path); if( pUrlData->dfltPort==pUrlData->port ){ pUrlData->canonical = mprintf( "%s://%s%T%T%s", pUrlData->protocol, zLogin, pUrlData->name, pUrlData->path, zExe ); }else{ pUrlData->canonical = mprintf( "%s://%s%T:%d%T%s", pUrlData->protocol, zLogin, pUrlData->name, pUrlData->port, pUrlData->path, zExe ); } if( pUrlData->isSsh && pUrlData->path[1] ) pUrlData->path++; free(zLogin); }else if( strncmp(zUrl, "file:", 5)==0 ){ pUrlData->isFile = 1; if( zUrl[5]=='/' && zUrl[6]=='/' ){ i = 7; }else{ i = 5; } zFile = mprintf("%s", &zUrl[i]); }else if( file_isfile(zUrl) ){ pUrlData->isFile = 1; zFile = mprintf("%s", zUrl); }else if( file_isdir(zUrl)==1 ){ zFile = mprintf("%s/FOSSIL", zUrl); if( file_isfile(zFile) ){ pUrlData->isFile = 1; }else{ free(zFile); zFile = 0; fossil_fatal("unknown repository: %s", zUrl); } }else{ fossil_fatal("unknown repository: %s", zUrl); } if( urlFlags ) pUrlData->flags = urlFlags; if( pUrlData->isFile ){ Blob cfile; dehttpize(zFile); file_canonical_name(zFile, &cfile, 0); free(zFile); zFile = 0; pUrlData->protocol = "file"; pUrlData->path = ""; pUrlData->name = mprintf("%b", &cfile); pUrlData->canonical = mprintf("file://%T", pUrlData->name); blob_reset(&cfile); }else if( pUrlData->user!=0 && pUrlData->passwd==0 && (urlFlags & URL_PROMPT_PW) ){ url_prompt_for_password_local(pUrlData); }else if( pUrlData->user!=0 && ( urlFlags & URL_ASK_REMEMBER_PW ) ){ if( isatty(fileno(stdin)) ){ if( save_password_prompt(pUrlData->passwd) ){ pUrlData->flags = urlFlags |= URL_REMEMBER_PW; }else{ pUrlData->flags = urlFlags &= ~URL_REMEMBER_PW; } } } }
/* ** Impl of /json/artifact. This basically just determines the type of ** an artifact and forwards the real work to another function. */ cson_value * json_page_artifact(){ cson_object * pay = NULL; char const * zName = NULL; char const * zType = NULL; char const * zUuid = NULL; cson_value * entry = NULL; Blob uuid = empty_blob; int rc; int rid = 0; ArtifactDispatchEntry const * dispatcher = &ArtifactDispatchList[0]; zName = json_find_option_cstr2("name", NULL, NULL, g.json.dispatchDepth+1); if(!zName || !*zName) { json_set_err(FSL_JSON_E_MISSING_ARGS, "Missing 'name' argument."); return NULL; } if( validate16(zName, strlen(zName)) ){ if( db_exists("SELECT 1 FROM ticket WHERE tkt_uuid GLOB '%q*'", zName) ){ zType = "ticket"; goto handle_entry; } if( db_exists("SELECT 1 FROM tag WHERE tagname GLOB 'event-%q*'", zName) ){ zType = "tag"; goto handle_entry; } } blob_set(&uuid,zName); rc = name_to_uuid(&uuid,-1,"*"); /* FIXME: check for a filename if all else fails. */ if(1==rc){ g.json.resultCode = FSL_JSON_E_RESOURCE_NOT_FOUND; goto error; }else if(2==rc){ g.json.resultCode = FSL_JSON_E_AMBIGUOUS_UUID; goto error; } zUuid = blob_str(&uuid); rid = db_int(0, "SELECT rid FROM blob WHERE uuid=%Q", zUuid); if(0==rid){ g.json.resultCode = FSL_JSON_E_RESOURCE_NOT_FOUND; goto error; } if( db_exists("SELECT 1 FROM mlink WHERE mid=%d", rid) || db_exists("SELECT 1 FROM plink WHERE cid=%d", rid) || db_exists("SELECT 1 FROM plink WHERE pid=%d", rid)){ zType = "checkin"; goto handle_entry; }else if( db_exists("SELECT 1 FROM tagxref JOIN tag USING(tagid)" " WHERE rid=%d AND tagname LIKE 'wiki-%%'", rid) ){ zType = "wiki"; goto handle_entry; }else if( db_exists("SELECT 1 FROM tagxref JOIN tag USING(tagid)" " WHERE rid=%d AND tagname LIKE 'tkt-%%'", rid) ){ zType = "ticket"; goto handle_entry; }else if ( db_exists("SELECT 1 FROM mlink WHERE fid = %d", rid) ){ zType = "file"; goto handle_entry; }else{ g.json.resultCode = FSL_JSON_E_RESOURCE_NOT_FOUND; goto error; } error: assert( 0 != g.json.resultCode ); goto veryend; handle_entry: pay = cson_new_object(); assert( (NULL != zType) && "Internal dispatching error." ); for( ; dispatcher->name; ++dispatcher ){ if(0!=fossil_strcmp(dispatcher->name, zType)){ continue; }else{ entry = (*dispatcher->func)(pay, rid); break; } } if(!g.json.resultCode){ assert( NULL != entry ); assert( NULL != zType ); cson_object_set( pay, "type", json_new_string(zType) ); cson_object_set( pay, "uuid", json_new_string(zUuid) ); /*cson_object_set( pay, "name", json_new_string(zName ? zName : zUuid) );*/ /*cson_object_set( pay, "rid", cson_value_new_integer(rid) );*/ if(cson_value_is_object(entry) && (cson_value_get_object(entry) != pay)){ cson_object_set(pay, "artifact", entry); } } veryend: blob_reset(&uuid); if(g.json.resultCode && pay){ cson_free_object(pay); pay = NULL; } return cson_object_value(pay); }
/* ** Generates an artifact Object for the given rid, ** which must refer to a Checkin. ** ** Returned value is NULL or an Object owned by the caller. */ cson_value * json_artifact_for_ci( int rid, char showFiles ){ cson_value * v = NULL; Stmt q = empty_Stmt; static cson_value * eventTypeLabel = NULL; if(!eventTypeLabel){ eventTypeLabel = json_new_string("checkin"); json_gc_add("$EVENT_TYPE_LABEL(commit)", eventTypeLabel); } db_prepare(&q, "SELECT b.uuid, " " cast(strftime('%%s',e.mtime) as int), " " strftime('%%s',e.omtime)," " e.user, " " e.comment" " FROM blob b, event e" " WHERE b.rid=%d" " AND e.objid=%d", rid, rid ); if( db_step(&q)==SQLITE_ROW ){ cson_object * o; cson_value * tmpV = NULL; const char *zUuid = db_column_text(&q, 0); const char *zUser; const char *zComment; char * zEUser, * zEComment; i64 mtime, omtime; v = cson_value_new_object(); o = cson_value_get_object(v); #define SET(K,V) cson_object_set(o,(K), (V)) SET("type", eventTypeLabel ); SET("uuid",json_new_string(zUuid)); SET("isLeaf", cson_value_new_bool(is_a_leaf(rid))); mtime = db_column_int64(&q,1); SET("timestamp",json_new_int(mtime)); omtime = db_column_int64(&q,2); if(omtime && (omtime!=mtime)){ SET("originTime",json_new_int(omtime)); } zUser = db_column_text(&q,3); zEUser = db_text(0, "SELECT value FROM tagxref WHERE tagid=%d AND rid=%d", TAG_USER, rid); if(zEUser){ SET("user", json_new_string(zEUser)); if(0!=fossil_strcmp(zEUser,zUser)){ SET("originUser",json_new_string(zUser)); } free(zEUser); }else{ SET("user",json_new_string(zUser)); } zComment = db_column_text(&q,4); zEComment = db_text(0, "SELECT value FROM tagxref WHERE tagid=%d AND rid=%d", TAG_COMMENT, rid); if(zEComment){ SET("comment",json_new_string(zEComment)); if(0 != fossil_strcmp(zEComment,zComment)){ SET("originComment", json_new_string(zComment)); } free(zEComment); }else{ SET("comment",json_new_string(zComment)); } tmpV = json_parent_uuids_for_ci(rid); if(tmpV){ SET("parents", tmpV); } tmpV = json_tags_for_checkin_rid(rid,0); if(tmpV){ SET("tags",tmpV); } if( showFiles ){ tmpV = json_get_changed_files(rid, 1); if(tmpV){ SET("files",tmpV); } } #undef SET } db_finalize(&q); return v; }
/* ** Compare two submenu items for sorting purposes */ static int submenuCompare(const void *a, const void *b){ const struct Submenu *A = (const struct Submenu*)a; const struct Submenu *B = (const struct Submenu*)b; return fossil_strcmp(A->zLabel, B->zLabel); }
/* ** WEBPAGE: doc ** URL: /doc?name=BASELINE/PATH ** URL: /doc/BASELINE/PATH ** ** BASELINE can be either a baseline uuid prefix or magic words "tip" ** to mean the most recently checked in baseline or "ckout" to mean the ** content of the local checkout, if any. PATH is the relative pathname ** of some file. This method returns the file content. ** ** If PATH matches the patterns *.wiki or *.txt then formatting content ** is added before returning the file. For all other names, the content ** is returned straight without any interpretation or processing. */ void doc_page(void){ const char *zName; /* Argument to the /doc page */ const char *zMime; /* Document MIME type */ int vid = 0; /* Artifact of baseline */ int rid = 0; /* Artifact of file */ int i; /* Loop counter */ Blob filebody; /* Content of the documentation file */ char zBaseline[UUID_SIZE+1]; /* Baseline UUID */ login_check_credentials(); if( !g.perm.Read ){ login_needed(); return; } zName = PD("name", "tip/index.wiki"); for(i=0; zName[i] && zName[i]!='/'; i++){} if( zName[i]==0 || i>UUID_SIZE ){ zName = "index.html"; goto doc_not_found; } memcpy(zBaseline, zName, i); zBaseline[i] = 0; zName += i; while( zName[0]=='/' ){ zName++; } if( !file_is_simple_pathname(zName) ){ int n = strlen(zName); if( n>0 && zName[n-1]=='/' ){ zName = mprintf("%sindex.html", zName); if( !file_is_simple_pathname(zName) ){ goto doc_not_found; } }else{ goto doc_not_found; } } if( fossil_strcmp(zBaseline,"ckout")==0 && db_open_local()==0 ){ sqlite3_snprintf(sizeof(zBaseline), zBaseline, "tip"); } if( fossil_strcmp(zBaseline,"ckout")==0 ){ /* Read from the local checkout */ char *zFullpath; db_must_be_within_tree(); zFullpath = mprintf("%s/%s", g.zLocalRoot, zName); if( !file_isfile(zFullpath) ){ goto doc_not_found; } if( blob_read_from_file(&filebody, zFullpath)<0 ){ goto doc_not_found; } }else{ db_begin_transaction(); if( fossil_strcmp(zBaseline,"tip")==0 ){ vid = db_int(0, "SELECT objid FROM event WHERE type='ci'" " ORDER BY mtime DESC LIMIT 1"); }else{ vid = name_to_typed_rid(zBaseline, "ci"); } /* Create the baseline cache if it does not already exist */ db_multi_exec( "CREATE TABLE IF NOT EXISTS vcache(\n" " vid INTEGER, -- baseline ID\n" " fname TEXT, -- filename\n" " rid INTEGER, -- artifact ID\n" " UNIQUE(vid,fname,rid)\n" ")" ); /* Check to see if the documentation file artifact ID is contained ** in the baseline cache */ rid = db_int(0, "SELECT rid FROM vcache" " WHERE vid=%d AND fname=%Q", vid, zName); if( rid==0 && db_exists("SELECT 1 FROM vcache WHERE vid=%d", vid) ){ goto doc_not_found; } if( rid==0 ){ Stmt s; Manifest *pM; ManifestFile *pFile; /* Add the vid baseline to the cache */ if( db_int(0, "SELECT count(*) FROM vcache")>10000 ){ db_multi_exec("DELETE FROM vcache"); } pM = manifest_get(vid, CFTYPE_MANIFEST); if( pM==0 ){ goto doc_not_found; } db_prepare(&s, "INSERT INTO vcache(vid,fname,rid)" " SELECT %d, :fname, rid FROM blob" " WHERE uuid=:uuid", vid ); manifest_file_rewind(pM); while( (pFile = manifest_file_next(pM,0))!=0 ){ db_bind_text(&s, ":fname", pFile->zName); db_bind_text(&s, ":uuid", pFile->zUuid); db_step(&s); db_reset(&s); } db_finalize(&s); manifest_destroy(pM); /* Try again to find the file */ rid = db_int(0, "SELECT rid FROM vcache" " WHERE vid=%d AND fname=%Q", vid, zName); } if( rid==0 ){ goto doc_not_found; } /* Get the file content */ if( content_get(rid, &filebody)==0 ){ goto doc_not_found; } db_end_transaction(0); } /* The file is now contained in the filebody blob. Deliver the ** file to the user */ zMime = P("mimetype"); if( zMime==0 ){ zMime = mimetype_from_name(zName); } Th_Store("doc_name", zName); Th_Store("doc_version", db_text(0, "SELECT '[' || substr(uuid,1,10) || ']'" " FROM blob WHERE rid=%d", vid)); Th_Store("doc_date", db_text(0, "SELECT datetime(mtime) FROM event" " WHERE objid=%d AND type='ci'", vid)); if( fossil_strcmp(zMime, "application/x-fossil-wiki")==0 ){ Blob title, tail; if( wiki_find_title(&filebody, &title, &tail) ){ style_header(blob_str(&title)); wiki_convert(&tail, 0, 0); }else{ style_header("Documentation"); wiki_convert(&filebody, 0, 0); } style_footer(); }else if( fossil_strcmp(zMime, "text/plain")==0 ){ style_header("Documentation"); @ <blockquote><pre>
/* ** COMMAND: update ** ** Usage: %fossil update ?OPTIONS? ?VERSION? ?FILES...? ** ** Change the version of the current checkout to VERSION. Any ** uncommitted changes are retained and applied to the new checkout. ** ** The VERSION argument can be a specific version or tag or branch ** name. If the VERSION argument is omitted, then the leaf of the ** subtree that begins at the current version is used, if there is ** only a single leaf. VERSION can also be "current" to select the ** leaf of the current version or "latest" to select the most recent ** check-in. ** ** If one or more FILES are listed after the VERSION then only the ** named files are candidates to be updated, and any updates to them ** will be treated as edits to the current version. Using a directory ** name for one of the FILES arguments is the same as using every ** subdirectory and file beneath that directory. ** ** If FILES is omitted, all files in the current checkout are subject ** to being updated and the version of the current checkout is changed ** to VERSION. Any uncommitted changes are retained and applied to the ** new checkout. ** ** The -n or --dry-run option causes this command to do a "dry run". ** It prints out what would have happened but does not actually make ** any changes to the current checkout or the repository. ** ** The -v or --verbose option prints status information about ** unchanged files in addition to those file that actually do change. ** ** Options: ** --case-sensitive <BOOL> override case-sensitive setting ** --debug print debug information on stdout ** --latest acceptable in place of VERSION, update to latest version ** --force-missing force update if missing content after sync ** -n|--dry-run If given, display instead of run actions ** -v|--verbose print status information about all files ** -W|--width <num> Width of lines (default is to auto-detect). Must be >20 ** or 0 (= no limit, resulting in a single line per entry). ** ** See also: revert */ void update_cmd(void) { int vid; /* Current version */ int tid=0; /* Target version - version we are changing to */ Stmt q; int latestFlag; /* --latest. Pick the latest version if true */ int dryRunFlag; /* -n or --dry-run. Do a dry run */ int verboseFlag; /* -v or --verbose. Output extra information */ int forceMissingFlag; /* --force-missing. Continue if missing content */ int debugFlag; /* --debug option */ int setmtimeFlag; /* --setmtime. Set mtimes on files */ int nChng; /* Number of file renames */ int *aChng; /* Array of file renames */ int i; /* Loop counter */ int nConflict = 0; /* Number of merge conflicts */ int nOverwrite = 0; /* Number of unmanaged files overwritten */ int nUpdate = 0; /* Number of changes of any kind */ int width; /* Width of printed comment lines */ Stmt mtimeXfer; /* Statement to transfer mtimes */ const char *zWidth; /* Width option string value */ if( !internalUpdate ) { undo_capture_command_line(); url_proxy_options(); } zWidth = find_option("width","W",1); if( zWidth ) { width = atoi(zWidth); if( (width!=0) && (width<=20) ) { fossil_fatal("-W|--width value must be >20 or 0"); } } else { width = -1; } latestFlag = find_option("latest",0, 0)!=0; dryRunFlag = find_option("dry-run","n",0)!=0; if( !dryRunFlag ) { dryRunFlag = find_option("nochange",0,0)!=0; /* deprecated */ } verboseFlag = find_option("verbose","v",0)!=0; forceMissingFlag = find_option("force-missing",0,0)!=0; debugFlag = find_option("debug",0,0)!=0; setmtimeFlag = find_option("setmtime",0,0)!=0; /* We should be done with options.. */ verify_all_options(); db_must_be_within_tree(); vid = db_lget_int("checkout", 0); user_select(); if( !dryRunFlag && !internalUpdate ) { if( autosync_loop(SYNC_PULL + SYNC_VERBOSE*verboseFlag, db_get_int("autosync-tries", 1)) ) { fossil_fatal("Cannot proceed with update"); } } /* Create any empty directories now, as well as after the update, ** so changes in settings are reflected now */ if( !dryRunFlag ) ensure_empty_dirs_created(); if( internalUpdate ) { tid = internalUpdate; } else if( g.argc>=3 ) { if( fossil_strcmp(g.argv[2], "current")==0 ) { /* If VERSION is "current", then use the same algorithm to find the ** target as if VERSION were omitted. */ } else if( fossil_strcmp(g.argv[2], "latest")==0 ) { /* If VERSION is "latest", then use the same algorithm to find the ** target as if VERSION were omitted and the --latest flag is present. */ latestFlag = 1; } else { tid = name_to_typed_rid(g.argv[2],"ci"); if( tid==0 || !is_a_version(tid) ) { fossil_fatal("no such check-in: %s", g.argv[2]); } } } /* If no VERSION is specified on the command-line, then look for a ** descendent of the current version. If there are multiple descendants, ** look for one from the same branch as the current version. If there ** are still multiple descendants, show them all and refuse to update ** until the user selects one. */ if( tid==0 ) { int closeCode = 1; compute_leaves(vid, closeCode); if( !db_exists("SELECT 1 FROM leaves") ) { closeCode = 0; compute_leaves(vid, closeCode); } if( !latestFlag && db_int(0, "SELECT count(*) FROM leaves")>1 ) { db_multi_exec( "DELETE FROM leaves WHERE rid NOT IN" " (SELECT leaves.rid FROM leaves, tagxref" " WHERE leaves.rid=tagxref.rid AND tagxref.tagid=%d" " AND tagxref.value==(SELECT value FROM tagxref" " WHERE tagid=%d AND rid=%d))", TAG_BRANCH, TAG_BRANCH, vid ); if( db_int(0, "SELECT count(*) FROM leaves")>1 ) { compute_leaves(vid, closeCode); db_prepare(&q, "%s " " AND event.objid IN leaves" " ORDER BY event.mtime DESC", timeline_query_for_tty() ); print_timeline(&q, -100, width, 0); db_finalize(&q); fossil_fatal("Multiple descendants"); } } tid = db_int(0, "SELECT rid FROM leaves, event" " WHERE event.objid=leaves.rid" " ORDER BY event.mtime DESC"); if( tid==0 ) tid = vid; } if( tid==0 ) { return; } db_begin_transaction(); vfile_check_signature(vid, CKSIG_ENOTFILE); if( !dryRunFlag && !internalUpdate ) undo_begin(); if( load_vfile_from_rid(tid) && !forceMissingFlag ) { fossil_fatal("missing content, unable to update"); }; /* ** The record.fn field is used to match files against each other. The ** FV table contains one row for each each unique filename in ** in the current checkout, the pivot, and the version being merged. */ db_multi_exec( "DROP TABLE IF EXISTS fv;" "CREATE TEMP TABLE fv(" " fn TEXT %s PRIMARY KEY," /* The filename relative to root */ " idv INTEGER," /* VFILE entry for current version */ " idt INTEGER," /* VFILE entry for target version */ " chnged BOOLEAN," /* True if current version has been edited */ " islinkv BOOLEAN," /* True if current file is a link */ " islinkt BOOLEAN," /* True if target file is a link */ " ridv INTEGER," /* Record ID for current version */ " ridt INTEGER," /* Record ID for target */ " isexe BOOLEAN," /* Does target have execute permission? */ " deleted BOOLEAN DEFAULT 0,"/* File marked by "rm" to become unmanaged */ " fnt TEXT %s" /* Filename of same file on target version */ ");", filename_collation(), filename_collation() ); /* Add files found in the current version */ db_multi_exec( "INSERT OR IGNORE INTO fv(fn,fnt,idv,idt,ridv,ridt,isexe,chnged,deleted)" " SELECT pathname, pathname, id, 0, rid, 0, isexe, chnged, deleted" " FROM vfile WHERE vid=%d", vid ); /* Compute file name changes on V->T. Record name changes in files that ** have changed locally. */ if( vid ) { find_filename_changes(vid, tid, 1, &nChng, &aChng, debugFlag ? "V->T": 0); if( nChng ) { for(i=0; i<nChng; i++) { db_multi_exec( "UPDATE fv" " SET fnt=(SELECT name FROM filename WHERE fnid=%d)" " WHERE fn=(SELECT name FROM filename WHERE fnid=%d) AND chnged", aChng[i*2+1], aChng[i*2] ); } fossil_free(aChng); } } /* Add files found in the target version T but missing from the current ** version V. */ db_multi_exec( "INSERT OR IGNORE INTO fv(fn,fnt,idv,idt,ridv,ridt,isexe,chnged)" " SELECT pathname, pathname, 0, 0, 0, 0, isexe, 0 FROM vfile" " WHERE vid=%d" " AND pathname %s NOT IN (SELECT fnt FROM fv)", tid, filename_collation() ); /* ** Compute the file version ids for T */ db_multi_exec( "UPDATE fv SET" " idt=coalesce((SELECT id FROM vfile WHERE vid=%d AND fnt=pathname),0)," " ridt=coalesce((SELECT rid FROM vfile WHERE vid=%d AND fnt=pathname),0)", tid, tid ); /* ** Add islink information */ db_multi_exec( "UPDATE fv SET" " islinkv=coalesce((SELECT islink FROM vfile" " WHERE vid=%d AND fnt=pathname),0)," " islinkt=coalesce((SELECT islink FROM vfile" " WHERE vid=%d AND fnt=pathname),0)", vid, tid ); if( debugFlag ) { db_prepare(&q, "SELECT rowid, fn, fnt, chnged, ridv, ridt, isexe," " islinkv, islinkt FROM fv" ); while( db_step(&q)==SQLITE_ROW ) { fossil_print("%3d: ridv=%-4d ridt=%-4d chnged=%d isexe=%d" " islinkv=%d islinkt=%d\n", db_column_int(&q, 0), db_column_int(&q, 4), db_column_int(&q, 5), db_column_int(&q, 3), db_column_int(&q, 6), db_column_int(&q, 7), db_column_int(&q, 8)); fossil_print(" fnv = [%s]\n", db_column_text(&q, 1)); fossil_print(" fnt = [%s]\n", db_column_text(&q, 2)); } db_finalize(&q); } /* If FILES appear on the command-line, remove from the "fv" table ** every entry that is not named on the command-line or which is not ** in a directory named on the command-line. */ if( g.argc>=4 ) { Blob sql; /* SQL statement to purge unwanted entries */ Blob treename; /* Normalized filename */ int i; /* Loop counter */ const char *zSep; /* Term separator */ blob_zero(&sql); blob_append(&sql, "DELETE FROM fv WHERE ", -1); zSep = ""; for(i=3; i<g.argc; i++) { file_tree_name(g.argv[i], &treename, 0, 1); if( file_wd_isdir(g.argv[i])==1 ) { if( blob_size(&treename) != 1 || blob_str(&treename)[0] != '.' ) { blob_append_sql(&sql, "%sfn NOT GLOB '%q/*' ", zSep /*safe-for-%s*/, blob_str(&treename)); } else { blob_reset(&sql); break; } } else { blob_append_sql(&sql, "%sfn<>%Q ", zSep /*safe-for-%s*/, blob_str(&treename)); } zSep = "AND "; blob_reset(&treename); } db_multi_exec("%s", blob_sql_text(&sql)); blob_reset(&sql); } /* ** Alter the content of the checkout so that it conforms with the ** target */ db_prepare(&q, "SELECT fn, idv, ridv, idt, ridt, chnged, fnt," " isexe, islinkv, islinkt, deleted FROM fv ORDER BY 1" ); db_prepare(&mtimeXfer, "UPDATE vfile SET mtime=(SELECT mtime FROM vfile WHERE id=:idv)" " WHERE id=:idt" ); assert( g.zLocalRoot!=0 ); assert( strlen(g.zLocalRoot)>0 ); assert( g.zLocalRoot[strlen(g.zLocalRoot)-1]=='/' ); while( db_step(&q)==SQLITE_ROW ) { const char *zName = db_column_text(&q, 0); /* The filename from root */ int idv = db_column_int(&q, 1); /* VFILE entry for current */ int ridv = db_column_int(&q, 2); /* RecordID for current */ int idt = db_column_int(&q, 3); /* VFILE entry for target */ int ridt = db_column_int(&q, 4); /* RecordID for target */ int chnged = db_column_int(&q, 5); /* Current is edited */ const char *zNewName = db_column_text(&q,6);/* New filename */ int isexe = db_column_int(&q, 7); /* EXE perm for new file */ int islinkv = db_column_int(&q, 8); /* Is current file is a link */ int islinkt = db_column_int(&q, 9); /* Is target file is a link */ int deleted = db_column_int(&q, 10); /* Marked for deletion */ char *zFullPath; /* Full pathname of the file */ char *zFullNewPath; /* Full pathname of dest */ char nameChng; /* True if the name changed */ zFullPath = mprintf("%s%s", g.zLocalRoot, zName); zFullNewPath = mprintf("%s%s", g.zLocalRoot, zNewName); nameChng = fossil_strcmp(zName, zNewName); nUpdate++; if( deleted ) { db_multi_exec("UPDATE vfile SET deleted=1 WHERE id=%d", idt); } if( idv>0 && ridv==0 && idt>0 && ridt>0 ) { /* Conflict. This file has been added to the current checkout ** but also exists in the target checkout. Use the current version. */ fossil_print("CONFLICT %s\n", zName); nConflict++; } else if( idt>0 && idv==0 ) { /* File added in the target. */ if( file_wd_isfile_or_link(zFullPath) ) { fossil_print("ADD %s - overwrites an unmanaged file\n", zName); nOverwrite++; } else { fossil_print("ADD %s\n", zName); } if( !dryRunFlag && !internalUpdate ) undo_save(zName); if( !dryRunFlag ) vfile_to_disk(0, idt, 0, 0); } else if( idt>0 && idv>0 && ridt!=ridv && (chnged==0 || deleted) ) { /* The file is unedited. Change it to the target version */ if( deleted ) { fossil_print("UPDATE %s - change to unmanaged file\n", zName); } else { fossil_print("UPDATE %s\n", zName); } if( !dryRunFlag && !internalUpdate ) undo_save(zName); if( !dryRunFlag ) vfile_to_disk(0, idt, 0, 0); } else if( idt>0 && idv>0 && !deleted && file_wd_size(zFullPath)<0 ) { /* The file missing from the local check-out. Restore it to the ** version that appears in the target. */ fossil_print("UPDATE %s\n", zName); if( !dryRunFlag && !internalUpdate ) undo_save(zName); if( !dryRunFlag ) vfile_to_disk(0, idt, 0, 0); } else if( idt==0 && idv>0 ) { if( ridv==0 ) { /* Added in current checkout. Continue to hold the file as ** as an addition */ db_multi_exec("UPDATE vfile SET vid=%d WHERE id=%d", tid, idv); } else if( chnged ) { /* Edited locally but deleted from the target. Do not track the ** file but keep the edited version around. */ fossil_print("CONFLICT %s - edited locally but deleted by update\n", zName); nConflict++; } else { fossil_print("REMOVE %s\n", zName); if( !dryRunFlag && !internalUpdate ) undo_save(zName); if( !dryRunFlag ) file_delete(zFullPath); } } else if( idt>0 && idv>0 && ridt!=ridv && chnged ) { /* Merge the changes in the current tree into the target version */ Blob r, t, v; int rc; if( nameChng ) { fossil_print("MERGE %s -> %s\n", zName, zNewName); } else { fossil_print("MERGE %s\n", zName); } if( islinkv || islinkt /* || file_wd_islink(zFullPath) */ ) { fossil_print("***** Cannot merge symlink %s\n", zNewName); nConflict++; } else { unsigned mergeFlags = dryRunFlag ? MERGE_DRYRUN : 0; if( !dryRunFlag && !internalUpdate ) undo_save(zName); content_get(ridt, &t); content_get(ridv, &v); rc = merge_3way(&v, zFullPath, &t, &r, mergeFlags); if( rc>=0 ) { if( !dryRunFlag ) { blob_write_to_file(&r, zFullNewPath); file_wd_setexe(zFullNewPath, isexe); } if( rc>0 ) { fossil_print("***** %d merge conflicts in %s\n", rc, zNewName); nConflict++; } } else { if( !dryRunFlag ) { blob_write_to_file(&t, zFullNewPath); file_wd_setexe(zFullNewPath, isexe); } fossil_print("***** Cannot merge binary file %s\n", zNewName); nConflict++; } } if( nameChng && !dryRunFlag ) file_delete(zFullPath); blob_reset(&v); blob_reset(&t); blob_reset(&r); } else { nUpdate--; if( chnged ) { if( verboseFlag ) fossil_print("EDITED %s\n", zName); } else { db_bind_int(&mtimeXfer, ":idv", idv); db_bind_int(&mtimeXfer, ":idt", idt); db_step(&mtimeXfer); db_reset(&mtimeXfer); if( verboseFlag ) fossil_print("UNCHANGED %s\n", zName); } } free(zFullPath); free(zFullNewPath); } db_finalize(&q); db_finalize(&mtimeXfer); fossil_print("%.79c\n",'-'); if( nUpdate==0 ) { show_common_info(tid, "checkout:", 1, 0); fossil_print("%-13s None. Already up-to-date\n", "changes:"); } else { show_common_info(tid, "updated-to:", 1, 0); fossil_print("%-13s %d file%s modified.\n", "changes:", nUpdate, nUpdate>1 ? "s" : ""); } /* Report on conflicts */ if( !dryRunFlag ) { Stmt q; int nMerge = 0; db_prepare(&q, "SELECT uuid, id FROM vmerge JOIN blob ON merge=rid" " WHERE id<=0"); while( db_step(&q)==SQLITE_ROW ) { const char *zLabel = "merge"; switch( db_column_int(&q, 1) ) { case -1: zLabel = "cherrypick merge"; break; case -2: zLabel = "backout merge"; break; } fossil_warning("uncommitted %s against %S.", zLabel, db_column_text(&q, 0)); nMerge++; } db_finalize(&q); leaf_ambiguity_warning(tid, tid); if( nConflict ) { if( internalUpdate ) { internalConflictCnt = nConflict; nConflict = 0; } else { fossil_warning("WARNING: %d merge conflicts", nConflict); } } if( nOverwrite ) { fossil_warning("WARNING: %d unmanaged files were overwritten", nOverwrite); } if( nMerge ) { fossil_warning("WARNING: %d uncommitted prior merges", nMerge); } } /* ** Clean up the mid and pid VFILE entries. Then commit the changes. */ if( dryRunFlag ) { db_end_transaction(1); /* With --dry-run, rollback changes */ } else { ensure_empty_dirs_created(); if( g.argc<=3 ) { /* All files updated. Shift the current checkout to the target. */ db_multi_exec("DELETE FROM vfile WHERE vid!=%d", tid); checkout_set_all_exe(tid); manifest_to_disk(tid); db_lset_int("checkout", tid); } else { /* A subset of files have been checked out. Keep the current ** checkout unchanged. */ db_multi_exec("DELETE FROM vfile WHERE vid!=%d", vid); } if( !internalUpdate ) undo_finish(); if( setmtimeFlag ) vfile_check_signature(tid, CKSIG_SETMTIME); db_end_transaction(0); } }
/* ** Guess the mime-type of a document based on its name. */ const char *mimetype_from_name(const char *zName){ const char *z; int i; int first, last; int len; char zSuffix[20]; /* A table of mimetypes based on file suffixes. ** Suffixes must be in sorted order so that we can do a binary ** search to find the mime-type */ static const struct { const char *zSuffix; /* The file suffix */ int size; /* Length of the suffix */ const char *zMimetype; /* The corresponding mimetype */ } aMime[] = { { "ai", 2, "application/postscript" }, { "aif", 3, "audio/x-aiff" }, { "aifc", 4, "audio/x-aiff" }, { "aiff", 4, "audio/x-aiff" }, { "arj", 3, "application/x-arj-compressed" }, { "asc", 3, "text/plain" }, { "asf", 3, "video/x-ms-asf" }, { "asx", 3, "video/x-ms-asx" }, { "au", 2, "audio/ulaw" }, { "avi", 3, "video/x-msvideo" }, { "bat", 3, "application/x-msdos-program" }, { "bcpio", 5, "application/x-bcpio" }, { "bin", 3, "application/octet-stream" }, { "c", 1, "text/plain" }, { "cc", 2, "text/plain" }, { "ccad", 4, "application/clariscad" }, { "cdf", 3, "application/x-netcdf" }, { "class", 5, "application/octet-stream" }, { "cod", 3, "application/vnd.rim.cod" }, { "com", 3, "application/x-msdos-program" }, { "cpio", 4, "application/x-cpio" }, { "cpt", 3, "application/mac-compactpro" }, { "csh", 3, "application/x-csh" }, { "css", 3, "text/css" }, { "dcr", 3, "application/x-director" }, { "deb", 3, "application/x-debian-package" }, { "dir", 3, "application/x-director" }, { "dl", 2, "video/dl" }, { "dms", 3, "application/octet-stream" }, { "doc", 3, "application/msword" }, { "drw", 3, "application/drafting" }, { "dvi", 3, "application/x-dvi" }, { "dwg", 3, "application/acad" }, { "dxf", 3, "application/dxf" }, { "dxr", 3, "application/x-director" }, { "eps", 3, "application/postscript" }, { "etx", 3, "text/x-setext" }, { "exe", 3, "application/octet-stream" }, { "ez", 2, "application/andrew-inset" }, { "f", 1, "text/plain" }, { "f90", 3, "text/plain" }, { "fli", 3, "video/fli" }, { "flv", 3, "video/flv" }, { "gif", 3, "image/gif" }, { "gl", 2, "video/gl" }, { "gtar", 4, "application/x-gtar" }, { "gz", 2, "application/x-gzip" }, { "h", 1, "text/plain" }, { "hdf", 3, "application/x-hdf" }, { "hh", 2, "text/plain" }, { "hqx", 3, "application/mac-binhex40" }, { "htm", 3, "text/html" }, { "html", 4, "text/html" }, { "ice", 3, "x-conference/x-cooltalk" }, { "ief", 3, "image/ief" }, { "iges", 4, "model/iges" }, { "igs", 3, "model/iges" }, { "ips", 3, "application/x-ipscript" }, { "ipx", 3, "application/x-ipix" }, { "jad", 3, "text/vnd.sun.j2me.app-descriptor" }, { "jar", 3, "application/java-archive" }, { "jpe", 3, "image/jpeg" }, { "jpeg", 4, "image/jpeg" }, { "jpg", 3, "image/jpeg" }, { "js", 2, "application/x-javascript" }, { "kar", 3, "audio/midi" }, { "latex", 5, "application/x-latex" }, { "lha", 3, "application/octet-stream" }, { "lsp", 3, "application/x-lisp" }, { "lzh", 3, "application/octet-stream" }, { "m", 1, "text/plain" }, { "m3u", 3, "audio/x-mpegurl" }, { "man", 3, "application/x-troff-man" }, { "me", 2, "application/x-troff-me" }, { "mesh", 4, "model/mesh" }, { "mid", 3, "audio/midi" }, { "midi", 4, "audio/midi" }, { "mif", 3, "application/x-mif" }, { "mime", 4, "www/mime" }, { "mov", 3, "video/quicktime" }, { "movie", 5, "video/x-sgi-movie" }, { "mp2", 3, "audio/mpeg" }, { "mp3", 3, "audio/mpeg" }, { "mpe", 3, "video/mpeg" }, { "mpeg", 4, "video/mpeg" }, { "mpg", 3, "video/mpeg" }, { "mpga", 4, "audio/mpeg" }, { "ms", 2, "application/x-troff-ms" }, { "msh", 3, "model/mesh" }, { "nc", 2, "application/x-netcdf" }, { "oda", 3, "application/oda" }, { "ogg", 3, "application/ogg" }, { "ogm", 3, "application/ogg" }, { "pbm", 3, "image/x-portable-bitmap" }, { "pdb", 3, "chemical/x-pdb" }, { "pdf", 3, "application/pdf" }, { "pgm", 3, "image/x-portable-graymap" }, { "pgn", 3, "application/x-chess-pgn" }, { "pgp", 3, "application/pgp" }, { "pl", 2, "application/x-perl" }, { "pm", 2, "application/x-perl" }, { "png", 3, "image/png" }, { "pnm", 3, "image/x-portable-anymap" }, { "pot", 3, "application/mspowerpoint" }, { "ppm", 3, "image/x-portable-pixmap" }, { "pps", 3, "application/mspowerpoint" }, { "ppt", 3, "application/mspowerpoint" }, { "ppz", 3, "application/mspowerpoint" }, { "pre", 3, "application/x-freelance" }, { "prt", 3, "application/pro_eng" }, { "ps", 2, "application/postscript" }, { "qt", 2, "video/quicktime" }, { "ra", 2, "audio/x-realaudio" }, { "ram", 3, "audio/x-pn-realaudio" }, { "rar", 3, "application/x-rar-compressed" }, { "ras", 3, "image/cmu-raster" }, { "rgb", 3, "image/x-rgb" }, { "rm", 2, "audio/x-pn-realaudio" }, { "roff", 4, "application/x-troff" }, { "rpm", 3, "audio/x-pn-realaudio-plugin" }, { "rtf", 3, "text/rtf" }, { "rtx", 3, "text/richtext" }, { "scm", 3, "application/x-lotusscreencam" }, { "set", 3, "application/set" }, { "sgm", 3, "text/sgml" }, { "sgml", 4, "text/sgml" }, { "sh", 2, "application/x-sh" }, { "shar", 4, "application/x-shar" }, { "silo", 4, "model/mesh" }, { "sit", 3, "application/x-stuffit" }, { "skd", 3, "application/x-koan" }, { "skm", 3, "application/x-koan" }, { "skp", 3, "application/x-koan" }, { "skt", 3, "application/x-koan" }, { "smi", 3, "application/smil" }, { "smil", 4, "application/smil" }, { "snd", 3, "audio/basic" }, { "sol", 3, "application/solids" }, { "spl", 3, "application/x-futuresplash" }, { "src", 3, "application/x-wais-source" }, { "step", 4, "application/STEP" }, { "stl", 3, "application/SLA" }, { "stp", 3, "application/STEP" }, { "sv4cpio", 7, "application/x-sv4cpio" }, { "sv4crc", 6, "application/x-sv4crc" }, { "svg", 3, "image/svg+xml" }, { "swf", 3, "application/x-shockwave-flash" }, { "t", 1, "application/x-troff" }, { "tar", 3, "application/x-tar" }, { "tcl", 3, "application/x-tcl" }, { "tex", 3, "application/x-tex" }, { "texi", 4, "application/x-texinfo" }, { "texinfo", 7, "application/x-texinfo" }, { "tgz", 3, "application/x-tar-gz" }, { "tif", 3, "image/tiff" }, { "tiff", 4, "image/tiff" }, { "tr", 2, "application/x-troff" }, { "tsi", 3, "audio/TSP-audio" }, { "tsp", 3, "application/dsptype" }, { "tsv", 3, "text/tab-separated-values" }, { "txt", 3, "text/plain" }, { "unv", 3, "application/i-deas" }, { "ustar", 5, "application/x-ustar" }, { "vcd", 3, "application/x-cdlink" }, { "vda", 3, "application/vda" }, { "viv", 3, "video/vnd.vivo" }, { "vivo", 4, "video/vnd.vivo" }, { "vrml", 4, "model/vrml" }, { "wav", 3, "audio/x-wav" }, { "wax", 3, "audio/x-ms-wax" }, { "wiki", 4, "application/x-fossil-wiki" }, { "wma", 3, "audio/x-ms-wma" }, { "wmv", 3, "video/x-ms-wmv" }, { "wmx", 3, "video/x-ms-wmx" }, { "wrl", 3, "model/vrml" }, { "wvx", 3, "video/x-ms-wvx" }, { "xbm", 3, "image/x-xbitmap" }, { "xlc", 3, "application/vnd.ms-excel" }, { "xll", 3, "application/vnd.ms-excel" }, { "xlm", 3, "application/vnd.ms-excel" }, { "xls", 3, "application/vnd.ms-excel" }, { "xlw", 3, "application/vnd.ms-excel" }, { "xml", 3, "text/xml" }, { "xpm", 3, "image/x-xpixmap" }, { "xwd", 3, "image/x-xwindowdump" }, { "xyz", 3, "chemical/x-pdb" }, { "zip", 3, "application/zip" }, }; #ifdef FOSSIL_DEBUG /* This is test code to make sure the table above is in the correct ** order */ if( fossil_strcmp(zName, "mimetype-test")==0 ){ for(i=1; i<sizeof(aMime)/sizeof(aMime[0]); i++){ if( fossil_strcmp(aMime[i-1].zSuffix,aMime[i].zSuffix)>=0 ){ fossil_fatal("mimetypes out of sequence: %s before %s", aMime[i-1].zSuffix, aMime[i].zSuffix); } } return "ok"; } #endif z = zName; for(i=0; zName[i]; i++){ if( zName[i]=='.' ) z = &zName[i+1]; } len = strlen(z); if( len<sizeof(zSuffix)-1 ){ sqlite3_snprintf(sizeof(zSuffix), zSuffix, "%s", z); for(i=0; zSuffix[i]; i++) zSuffix[i] = fossil_tolower(zSuffix[i]); first = 0; last = sizeof(aMime)/sizeof(aMime[0]); while( first<=last ){ int c; i = (first+last)/2; c = fossil_strcmp(zSuffix, aMime[i].zSuffix); if( c==0 ) return aMime[i].zMimetype; if( c<0 ){ last = i-1; }else{ first = i+1; } } } return "application/x-fossil-artifact"; }
/* ** Compare two ImportFile objects for sorting */ static int mfile_cmp(const void *pLeft, const void *pRight){ const ImportFile *pA = (const ImportFile*)pLeft; const ImportFile *pB = (const ImportFile*)pRight; return fossil_strcmp(pA->zName, pB->zName); }
/* ** Convert a symbolic name into a RID. Acceptable forms: ** ** * SHA1 hash ** * SHA1 hash prefix of at least 4 characters ** * Symbolic Name ** * "tag:" + symbolic name ** * Date or date-time ** * "date:" + Date or date-time ** * symbolic-name ":" date-time ** * "tip" ** ** The following additional forms are available in local checkouts: ** ** * "current" ** * "prev" or "previous" ** * "next" ** ** Return the RID of the matching artifact. Or return 0 if the name does not ** match any known object. Or return -1 if the name is ambiguous. ** ** The zType parameter specifies the type of artifact: ci, t, w, e, g. ** If zType is NULL or "" or "*" then any type of artifact will serve. ** If zType is "br" then find the first check-in of the named branch ** rather than the last. ** zType is "ci" in most use cases since we are usually searching for ** a check-in. ** ** Note that the input zTag for types "t" and "e" is the SHA1 hash of ** the ticket-change or event-change artifact, not the randomly generated ** hexadecimal identifier assigned to tickets and events. Those identifiers ** live in a separate namespace. */ int symbolic_name_to_rid(const char *zTag, const char *zType){ int vid; int rid = 0; int nTag; int i; int startOfBranch = 0; if( zType==0 || zType[0]==0 ){ zType = "*"; }else if( zType[0]=='b' ){ zType = "ci"; startOfBranch = 1; } if( zTag==0 || zTag[0]==0 ) return 0; /* special keyword: "tip" */ if( fossil_strcmp(zTag, "tip")==0 && (zType[0]=='*' || zType[0]=='c') ){ rid = db_int(0, "SELECT objid" " FROM event" " WHERE type='ci'" " ORDER BY event.mtime DESC" ); if( rid ) return rid; } /* special keywords: "prev", "previous", "current", and "next" */ if( g.localOpen && (vid=db_lget_int("checkout",0))!=0 ){ if( fossil_strcmp(zTag, "current")==0 ){ rid = vid; }else if( fossil_strcmp(zTag, "prev")==0 || fossil_strcmp(zTag, "previous")==0 ){ rid = db_int(0, "SELECT pid FROM plink WHERE cid=%d AND isprim", vid); }else if( fossil_strcmp(zTag, "next")==0 ){ rid = db_int(0, "SELECT cid FROM plink WHERE pid=%d" " ORDER BY isprim DESC, mtime DESC", vid); } if( rid ) return rid; } /* Date and times */ if( memcmp(zTag, "date:", 5)==0 ){ rid = db_int(0, "SELECT objid FROM event" " WHERE mtime<=julianday(%Q,'utc') AND type GLOB '%q'" " ORDER BY mtime DESC LIMIT 1", &zTag[5], zType); return rid; } if( fossil_isdate(zTag) ){ rid = db_int(0, "SELECT objid FROM event" " WHERE mtime<=julianday(%Q,'utc') AND type GLOB '%q'" " ORDER BY mtime DESC LIMIT 1", zTag, zType); if( rid) return rid; } /* Deprecated date & time formats: "local:" + date-time and ** "utc:" + date-time */ if( memcmp(zTag, "local:", 6)==0 ){ rid = db_int(0, "SELECT objid FROM event" " WHERE mtime<=julianday(%Q) AND type GLOB '%q'" " ORDER BY mtime DESC LIMIT 1", &zTag[6], zType); return rid; } if( memcmp(zTag, "utc:", 4)==0 ){ rid = db_int(0, "SELECT objid FROM event" " WHERE mtime<=julianday('%qz') AND type GLOB '%q'" " ORDER BY mtime DESC LIMIT 1", &zTag[4], zType); return rid; } /* "tag:" + symbolic-name */ if( memcmp(zTag, "tag:", 4)==0 ){ rid = db_int(0, "SELECT event.objid, max(event.mtime)" " FROM tag, tagxref, event" " WHERE tag.tagname='sym-%q' " " AND tagxref.tagid=tag.tagid AND tagxref.tagtype>0 " " AND event.objid=tagxref.rid " " AND event.type GLOB '%q'", &zTag[4], zType ); if( startOfBranch ) rid = start_of_branch(rid,1); return rid; } /* root:TAG -> The origin of the branch */ if( memcmp(zTag, "root:", 5)==0 ){ rid = symbolic_name_to_rid(zTag+5, zType); return start_of_branch(rid, 0); } /* symbolic-name ":" date-time */ nTag = strlen(zTag); for(i=0; i<nTag-10 && zTag[i]!=':'; i++){} if( zTag[i]==':' && fossil_isdate(&zTag[i+1]) ){ char *zDate = mprintf("%s", &zTag[i+1]); char *zTagBase = mprintf("%.*s", i, zTag); int nDate = strlen(zDate); if( sqlite3_strnicmp(&zDate[nDate-3],"utc",3)==0 ){ zDate[nDate-3] = 'z'; zDate[nDate-2] = 0; } rid = db_int(0, "SELECT event.objid, max(event.mtime)" " FROM tag, tagxref, event" " WHERE tag.tagname='sym-%q' " " AND tagxref.tagid=tag.tagid AND tagxref.tagtype>0 " " AND event.objid=tagxref.rid " " AND event.mtime<=julianday(%Q)" " AND event.type GLOB '%q'", zTagBase, zDate, zType ); return rid; } /* SHA1 hash or prefix */ if( nTag>=4 && nTag<=UUID_SIZE && validate16(zTag, nTag) ){ Stmt q; char zUuid[UUID_SIZE+1]; memcpy(zUuid, zTag, nTag+1); canonical16(zUuid, nTag); rid = 0; if( zType[0]=='*' ){ db_prepare(&q, "SELECT rid FROM blob WHERE uuid GLOB '%q*'", zUuid); }else{ db_prepare(&q, "SELECT blob.rid" " FROM blob, event" " WHERE blob.uuid GLOB '%q*'" " AND event.objid=blob.rid" " AND event.type GLOB '%q'", zUuid, zType ); } if( db_step(&q)==SQLITE_ROW ){ rid = db_column_int(&q, 0); if( db_step(&q)==SQLITE_ROW ) rid = -1; } db_finalize(&q); if( rid ) return rid; } /* Symbolic name */ rid = db_int(0, "SELECT event.objid, max(event.mtime)" " FROM tag, tagxref, event" " WHERE tag.tagname='sym-%q' " " AND tagxref.tagid=tag.tagid AND tagxref.tagtype>0 " " AND event.objid=tagxref.rid " " AND event.type GLOB '%q'", zTag, zType ); if( rid>0 ){ if( startOfBranch ) rid = start_of_branch(rid,1); return rid; } /* Undocumented: numeric tags get translated directly into the RID */ if( memcmp(zTag, "rid:", 4)==0 ){ zTag += 4; for(i=0; fossil_isdigit(zTag[i]); i++){} if( zTag[i]==0 ){ if( strcmp(zType,"*")==0 ){ rid = atoi(zTag); }else{ rid = db_int(0, "SELECT event.objid" " FROM event" " WHERE event.objid=%s" " AND event.type GLOB '%q'", zTag /*safe-for-%s*/, zType); } } } return rid; }
/* ** Use data accumulated in gg from a "commit" record to add a new ** manifest artifact to the BLOB table. */ static void finish_commit(void){ int i; char *zFromBranch; char *aTCard[4]; /* Array of T cards for manifest */ int nTCard = 0; /* Entries used in aTCard[] */ Blob record, cksum; import_prior_files(); qsort(gg.aFile, gg.nFile, sizeof(gg.aFile[0]), mfile_cmp); blob_zero(&record); blob_appendf(&record, "C %F\n", gg.zComment); blob_appendf(&record, "D %s\n", gg.zDate); for(i=0; i<gg.nFile; i++){ const char *zUuid = gg.aFile[i].zUuid; if( zUuid==0 ) continue; blob_appendf(&record, "F %F %s", gg.aFile[i].zName, zUuid); if( gg.aFile[i].isExe ){ blob_append(&record, " x\n", 3); }else if( gg.aFile[i].isLink ){ blob_append(&record, " l\n", 3); gg.hasLinks = 1; }else{ blob_append(&record, "\n", 1); } } if( gg.zFrom ){ blob_appendf(&record, "P %s", gg.zFrom); for(i=0; i<gg.nMerge; i++){ blob_appendf(&record, " %s", gg.azMerge[i]); } blob_append(&record, "\n", 1); zFromBranch = db_text(0, "SELECT brnm FROM xbranch WHERE tname=%Q", gg.zFromMark); }else{ zFromBranch = 0; } /* Add the required "T" cards to the manifest. Make sure they are added ** in sorted order and without any duplicates. Otherwise, fossil will not ** recognize the document as a valid manifest. */ if( !gg.tagCommit && fossil_strcmp(zFromBranch, gg.zBranch)!=0 ){ aTCard[nTCard++] = mprintf("T *branch * %F\n", gg.zBranch); aTCard[nTCard++] = mprintf("T *sym-%F *\n", gg.zBranch); if( zFromBranch ){ aTCard[nTCard++] = mprintf("T -sym-%F *\n", zFromBranch); } } if( gg.zFrom==0 ){ aTCard[nTCard++] = mprintf("T *sym-trunk *\n"); } qsort(aTCard, nTCard, sizeof(char *), string_cmp); for(i=0; i<nTCard; i++){ if( i==0 || fossil_strcmp(aTCard[i-1], aTCard[i]) ){ blob_appendf(&record, "%s", aTCard[i]); } } for(i=0; i<nTCard; i++) free(aTCard[i]); free(zFromBranch); db_multi_exec("INSERT INTO xbranch(tname, brnm) VALUES(%Q,%Q)", gg.zMark, gg.zBranch); blob_appendf(&record, "U %F\n", gg.zUser); md5sum_blob(&record, &cksum); blob_appendf(&record, "Z %b\n", &cksum); fast_insert_content(&record, gg.zMark, 1); blob_reset(&record); blob_reset(&cksum); /* The "git fast-export" command might output multiple "commit" lines ** that reference a tag using "refs/tags/TAGNAME". The tag should only ** be applied to the last commit that is output. The problem is we do not ** know at this time if the current commit is the last one to hold this ** tag or not. So make an entry in the XTAG table to record this tag ** but overwrite that entry if a later instance of the same tag appears. ** ** This behavior seems like a bug in git-fast-export, but it is easier ** to work around the problem than to fix git-fast-export. */ if( gg.tagCommit && gg.zDate && gg.zUser && gg.zFrom ){ blob_appendf(&record, "D %s\n", gg.zDate); blob_appendf(&record, "T +sym-%F %s\n", gg.zBranch, gg.zPrevCheckin); blob_appendf(&record, "U %F\n", gg.zUser); md5sum_blob(&record, &cksum); blob_appendf(&record, "Z %b\n", &cksum); db_multi_exec( "INSERT OR REPLACE INTO xtag(tname, tcontent)" " VALUES(%Q,%Q)", gg.zBranch, blob_str(&record) ); blob_reset(&record); blob_reset(&cksum); } fossil_free(gg.zPrevBranch); gg.zPrevBranch = gg.zBranch; gg.zBranch = 0; import_reset(0); }
/* ** Compare two strings for sorting. */ static int string_cmp(const void *pLeft, const void *pRight){ const char *zLeft = *(char const **)pLeft; const char *zRight = *(char const **)pRight; return fossil_strcmp(zLeft, zRight); }
/* ** COMMAND: test-integrity ?OPTIONS? ** ** Verify that all content can be extracted from the BLOB table correctly. ** If the BLOB table is correct, then the repository can always be ** successfully reconstructed using "fossil rebuild". ** ** Options: ** ** --parse Parse all manifests, wikis, tickets, events, and ** so forth, reporting any errors found. */ void test_integrity(void){ Stmt q; Blob content; Blob cksum; int n1 = 0; int n2 = 0; int nErr = 0; int total; int nCA = 0; int anCA[10]; int bParse = find_option("parse",0,0)!=0; db_find_and_open_repository(OPEN_ANY_SCHEMA, 2); memset(anCA, 0, sizeof(anCA)); /* Make sure no public artifact is a delta from a private artifact */ db_prepare(&q, "SELECT " " rid, (SELECT uuid FROM blob WHERE rid=delta.rid)," " srcid, (SELECT uuid FROM blob WHERE rid=delta.srcid)" " FROM delta" " WHERE srcid in private AND rid NOT IN private" ); while( db_step(&q)==SQLITE_ROW ){ int rid = db_column_int(&q, 0); const char *zId = db_column_text(&q, 1); int srcid = db_column_int(&q, 2); const char *zSrc = db_column_text(&q, 3); fossil_print( "public artifact %S (%d) is a delta from private artifact %S (%d)\n", zId, rid, zSrc, srcid ); nErr++; } db_finalize(&q); db_prepare(&q, "SELECT rid, uuid, size FROM blob ORDER BY rid"); total = db_int(0, "SELECT max(rid) FROM blob"); while( db_step(&q)==SQLITE_ROW ){ int rid = db_column_int(&q, 0); const char *zUuid = db_column_text(&q, 1); int size = db_column_int(&q, 2); n1++; fossil_print(" %d/%d\r", n1, total); fflush(stdout); if( size<0 ){ fossil_print("skip phantom %d %s\n", rid, zUuid); continue; /* Ignore phantoms */ } content_get(rid, &content); if( blob_size(&content)!=size ){ fossil_print("size mismatch on artifact %d: wanted %d but got %d\n", rid, size, blob_size(&content)); nErr++; } sha1sum_blob(&content, &cksum); if( fossil_strcmp(blob_str(&cksum), zUuid)!=0 ){ fossil_print("checksum mismatch on artifact %d: wanted %s but got %s\n", rid, zUuid, blob_str(&cksum)); nErr++; } if( bParse && looks_like_control_artifact(&content) ){ Blob err; int i, n; char *z; Manifest *p; char zFirstLine[400]; blob_zero(&err); z = blob_buffer(&content); n = blob_size(&content); for(i=0; i<n && z[i] && z[i]!='\n' && i<sizeof(zFirstLine)-1; i++){} memcpy(zFirstLine, z, i); zFirstLine[i] = 0; p = manifest_parse(&content, 0, &err); if( p==0 ){ fossil_print("manifest_parse failed for %s:\n%s\n", blob_str(&cksum), blob_str(&err)); if( strncmp(blob_str(&err), "line 1:", 7)==0 ){ fossil_print("\"%s\"\n", zFirstLine); } }else{ anCA[p->type]++; manifest_destroy(p); nCA++; } blob_reset(&err); }else{ blob_reset(&content); } blob_reset(&cksum); n2++; } db_finalize(&q); fossil_print("%d non-phantom blobs (out of %d total) checked: %d errors\n", n2, n1, nErr); if( bParse ){ const char *azType[] = { 0, "manifest", "cluster", "control", "wiki", "ticket", "attachment", "event" }; int i; fossil_print("%d total control artifacts\n", nCA); for(i=1; i<count(azType); i++){ if( anCA[i] ) fossil_print(" %d %ss\n", anCA[i], azType[i]); } } }
/* ** Parse the given URL. Populate variables in the global "g" structure. ** ** g.urlIsFile True if FILE: ** g.urlIsHttps True if HTTPS: ** g.urlIsSsh True if SSH: ** g.urlProtocol "http" or "https" or "file" ** g.urlName Hostname for HTTP:, HTTPS:, SSH:. Filename for FILE: ** g.urlPort TCP port number for HTTP or HTTPS. ** g.urlDfltPort Default TCP port number (80 or 443). ** g.urlPath Path name for HTTP or HTTPS. ** g.urlUser Userid. ** g.urlPasswd Password. ** g.urlHostname HOST:PORT or just HOST if port is the default. ** g.urlCanonical The URL in canonical form, omitting the password ** ** HTTP url format is: ** ** http://userid:password@host:port/path ** ** SSH url format is: ** ** ssh://userid:password@host:port/path?fossil=path/to/fossil.exe ** */ void url_parse(const char *zUrl){ int i, j, c; char *zFile = 0; if( strncmp(zUrl, "http://", 7)==0 || strncmp(zUrl, "https://", 8)==0 || strncmp(zUrl, "ssh://", 6)==0 ){ int iStart; char *zLogin; char *zExe; g.urlIsFile = 0; if( zUrl[4]=='s' ){ g.urlIsHttps = 1; g.urlProtocol = "https"; g.urlDfltPort = 443; iStart = 8; }else if( zUrl[0]=='s' ){ g.urlIsSsh = 1; g.urlProtocol = "ssh"; g.urlDfltPort = 22; g.urlFossil = "fossil"; iStart = 6; }else{ g.urlIsHttps = 0; g.urlProtocol = "http"; g.urlDfltPort = 80; iStart = 7; } for(i=iStart; (c=zUrl[i])!=0 && c!='/' && c!='@'; i++){} if( c=='@' ){ /* Parse up the user-id and password */ for(j=iStart; j<i && zUrl[j]!=':'; j++){} g.urlUser = mprintf("%.*s", j-iStart, &zUrl[iStart]); dehttpize(g.urlUser); if( j<i ){ g.urlPasswd = mprintf("%.*s", i-j-1, &zUrl[j+1]); dehttpize(g.urlPasswd); } if( g.urlIsSsh && g.urlPasswd ){ zLogin = mprintf("%t:*@", g.urlUser); }else{ zLogin = mprintf("%t@", g.urlUser); } for(j=i+1; (c=zUrl[j])!=0 && c!='/' && c!=':'; j++){} g.urlName = mprintf("%.*s", j-i-1, &zUrl[i+1]); i = j; }else{ for(i=iStart; (c=zUrl[i])!=0 && c!='/' && c!=':'; i++){} g.urlName = mprintf("%.*s", i-iStart, &zUrl[iStart]); zLogin = mprintf(""); } url_tolower(g.urlName); if( c==':' ){ g.urlPort = 0; i++; while( (c = zUrl[i])!=0 && fossil_isdigit(c) ){ g.urlPort = g.urlPort*10 + c - '0'; i++; } g.urlHostname = mprintf("%s:%d", g.urlName, g.urlPort); }else{ g.urlPort = g.urlDfltPort; g.urlHostname = g.urlName; } dehttpize(g.urlName); g.urlPath = mprintf("%s", &zUrl[i]); for(i=0; g.urlPath[i] && g.urlPath[i]!='?'; i++){} if( g.urlPath[i] ){ g.urlPath[i] = 0; i++; } zExe = mprintf(""); while( g.urlPath[i]!=0 ){ char *zName, *zValue; zName = &g.urlPath[i]; zValue = zName; while( g.urlPath[i] && g.urlPath[i]!='=' ){ i++; } if( g.urlPath[i]=='=' ){ g.urlPath[i] = 0; i++; zValue = &g.urlPath[i]; while( g.urlPath[i] && g.urlPath[i]!='&' ){ i++; } } if( g.urlPath[i] ){ g.urlPath[i] = 0; i++; } if( fossil_strcmp(zName,"fossil")==0 ){ g.urlFossil = zValue; dehttpize(g.urlFossil); zExe = mprintf("?fossil=%T", g.urlFossil); } } dehttpize(g.urlPath); if( g.urlDfltPort==g.urlPort ){ g.urlCanonical = mprintf( "%s://%s%T%T%s", g.urlProtocol, zLogin, g.urlName, g.urlPath, zExe ); }else{ g.urlCanonical = mprintf( "%s://%s%T:%d%T%s", g.urlProtocol, zLogin, g.urlName, g.urlPort, g.urlPath, zExe ); } if( g.urlIsSsh && g.urlPath[1] ) g.urlPath++; free(zLogin); }else if( strncmp(zUrl, "file:", 5)==0 ){ g.urlIsFile = 1; if( zUrl[5]=='/' && zUrl[6]=='/' ){ i = 7; }else{ i = 5; } zFile = mprintf("%s", &zUrl[i]); }else if( file_isfile(zUrl) ){ g.urlIsFile = 1; zFile = mprintf("%s", zUrl); }else if( file_isdir(zUrl)==1 ){ zFile = mprintf("%s/FOSSIL", zUrl); if( file_isfile(zFile) ){ g.urlIsFile = 1; }else{ free(zFile); fossil_panic("unknown repository: %s", zUrl); } }else{ fossil_panic("unknown repository: %s", zUrl); } if( g.urlIsFile ){ Blob cfile; dehttpize(zFile); file_canonical_name(zFile, &cfile, 0); free(zFile); g.urlProtocol = "file"; g.urlPath = ""; g.urlName = mprintf("%b", &cfile); g.urlCanonical = mprintf("file://%T", g.urlName); blob_reset(&cfile); } }