/*
** COMMAND: reconstruct*
**
** Usage: %fossil reconstruct FILENAME DIRECTORY
**
** This command studies the artifacts (files) in DIRECTORY and
** reconstructs the fossil record from them. It places the new
** fossil repository in FILENAME. Subdirectories are read, files
** with leading '.' in the filename are ignored.
**
** See also: deconstruct, rebuild
*/
void reconstruct_cmd(void) {
  char *zPassword;
  if( g.argc!=4 ){
    usage("FILENAME DIRECTORY");
  }
  if( file_isdir(g.argv[3])!=1 ){
    fossil_print("\"%s\" is not a directory\n\n", g.argv[3]);
    usage("FILENAME DIRECTORY");
  }
  db_create_repository(g.argv[2]);
  db_open_repository(g.argv[2]);
  db_open_config(0);
  db_begin_transaction();
  db_initial_setup(0, 0, 0, 1);

  fossil_print("Reading files from directory \"%s\"...\n", g.argv[3]);
  recon_read_dir(g.argv[3]);
  fossil_print("\nBuilding the Fossil repository...\n");

  rebuild_db(0, 1, 1);
  reconstruct_private_table();

  /* Skip the verify_before_commit() step on a reconstruct.  Most artifacts
  ** will have been changed and verification therefore takes a really, really
  ** long time.
  */
  verify_cancel();
  
  db_end_transaction(0);
  fossil_print("project-id: %s\n", db_get("project-code", 0));
  fossil_print("server-id: %s\n", db_get("server-code", 0));
  zPassword = db_text(0, "SELECT pw FROM user WHERE login=%Q", g.zLogin);
  fossil_print("admin-user: %s (initial password is \"%s\")\n", g.zLogin, zPassword);
}
Exemple #2
0
/*
** COMMAND: test-th-render
*/
void test_th_render(void){
  Blob in;
  if( g.argc<3 ){
    usage("FILE");
  }
  db_open_config(0); /* Needed for global "tcl" setting. */
  blob_zero(&in);
  blob_read_from_file(&in, g.argv[2]);
  Th_Render(blob_str(&in));
}
Exemple #3
0
/*
** COMMAND: configuration*
**
** Usage: %vcs configuration METHOD ... ?OPTIONS?
**
** Where METHOD is one of: export import merge pull push reset.  All methods
** accept the -R or --repository option to specific a repository.
**
**    %vcs configuration export AREA FILENAME
**
**         Write to FILENAME exported configuraton information for AREA.
**         AREA can be one of:  all email project shun skin ticket user
**
**    %vcs configuration import FILENAME
**
**         Read a configuration from FILENAME, overwriting the current
**         configuration.
**
**    %vcs configuration merge FILENAME
**
**         Read a configuration from FILENAME and merge its values into
**         the current configuration.  Existing values take priority over
**         values read from FILENAME.
**
**    %vcs configuration pull AREA ?URL?
**
**         Pull and install the configuration from a different server
**         identified by URL.  If no URL is specified, then the default
**         server is used. Use the --legacy option for the older protocol
**         (when talking to servers compiled prior to 2011-04-27.)  Use
**         the --overwrite flag to completely replace local settings with
**         content received from URL.
**
**    %vcs configuration push AREA ?URL?
**
**         Push the local configuration into the remote server identified
**         by URL.  Admin privilege is required on the remote server for
**         this to work.  When the same record exists both locally and on
**         the remote end, the one that was most recently changed wins.
**         Use the --legacy flag when talking to holder servers.
**
**    %vcs configuration reset AREA
**
**         Restore the configuration to the default.  AREA as above.
**
**    %vcs configuration sync AREA ?URL?
**
**         Synchronize configuration changes in the local repository with
**         the remote repository at URL.
**
** Options:
**    -R|--repository FILE       Extract info from repository FILE
**
** See also: settings, unset
*/
void configuration_cmd(void) {
    int n;
    const char *zMethod;
    if( g.argc<3 ) {
        usage("export|import|merge|pull|reset ...");
    }
    db_find_and_open_repository(0, 0);
    db_open_config(0);
    zMethod = g.argv[2];
    n = strlen(zMethod);
    if( strncmp(zMethod, "export", n)==0 ) {
        int mask;
        const char *zSince = find_option("since",0,1);
        sqlite3_int64 iStart;
        if( g.argc!=5 ) {
            usage("export AREA FILENAME");
        }
        mask = configure_name_to_mask(g.argv[3], 1);
        if( zSince ) {
            iStart = db_multi_exec(
                         "SELECT coalesce(strftime('%%s',%Q),strftime('%%s','now',%Q))+0",
                         zSince, zSince
                     );
        } else {
            iStart = 0;
        }
        export_config(mask, g.argv[3], iStart, g.argv[4]);
    } else if( strncmp(zMethod, "import", n)==0
               || strncmp(zMethod, "merge", n)==0 ) {
        Blob in;
        int groupMask;
        if( g.argc!=4 ) usage(mprintf("%s FILENAME",zMethod));
        blob_read_from_file(&in, g.argv[3]);
        db_begin_transaction();
        if( zMethod[0]=='i' ) {
            groupMask = CONFIGSET_ALL | CONFIGSET_OVERWRITE;
        } else {
            groupMask = CONFIGSET_ALL;
        }
        configure_receive_all(&in, groupMask);
        db_end_transaction(0);
    } else if( strncmp(zMethod, "pull", n)==0
               || strncmp(zMethod, "push", n)==0
               || strncmp(zMethod, "sync", n)==0
             ) {
        int mask;
        const char *zServer;
        const char *zPw;
        int legacyFlag = 0;
        int overwriteFlag = 0;
        if( zMethod[0]!='s' ) legacyFlag = find_option("legacy",0,0)!=0;
        if( strncmp(zMethod,"pull",n)==0 ) {
            overwriteFlag = find_option("overwrite",0,0)!=0;
        }
        url_proxy_options();
        if( g.argc!=4 && g.argc!=5 ) {
            usage("pull AREA ?URL?");
        }
        mask = configure_name_to_mask(g.argv[3], 1);
        if( g.argc==5 ) {
            zServer = g.argv[4];
            zPw = 0;
            g.dontKeepUrl = 1;
        } else {
            zServer = db_get("last-sync-url", 0);
            if( zServer==0 ) {
                vcs_fatal("no server specified");
            }
            zPw = unobscure(db_get("last-sync-pw", 0));
        }
        url_parse(zServer);
        if( g.urlPasswd==0 && zPw ) g.urlPasswd = mprintf("%s", zPw);
        user_select();
        url_enable_proxy("via proxy: ");
        if( legacyFlag ) mask |= CONFIGSET_OLDFORMAT;
        if( overwriteFlag ) mask |= CONFIGSET_OVERWRITE;
        if( strncmp(zMethod, "push", n)==0 ) {
            client_sync(0,0,0,0,0,mask);
        } else if( strncmp(zMethod, "pull", n)==0 ) {
            client_sync(0,0,0,0,mask,0);
        } else {
            client_sync(0,0,0,0,mask,mask);
        }
    } else if( strncmp(zMethod, "reset", n)==0 ) {
        int mask, i;
        char *zBackup;
        if( g.argc!=4 ) usage("reset AREA");
        mask = configure_name_to_mask(g.argv[3], 1);
        zBackup = db_text(0,
                          "SELECT strftime('config-backup-%%Y%%m%%d%%H%%M%%f','now')");
        db_begin_transaction();
        export_config(mask, g.argv[3], 0, zBackup);
        for(i=0; i<count(aConfig); i++) {
            const char *zName = aConfig[i].zName;
            if( (aConfig[i].groupMask & mask)==0 ) continue;
            if( zName[0]!='@' ) {
                db_multi_exec("DELETE FROM config WHERE name=%Q", zName);
            } else if( vcs_strcmp(zName,"@user")==0 ) {
                db_multi_exec("DELETE FROM user");
                db_create_default_users(0, 0);
            } else if( vcs_strcmp(zName,"@concealed")==0 ) {
                db_multi_exec("DELETE FROM concealed");
            } else if( vcs_strcmp(zName,"@shun")==0 ) {
                db_multi_exec("DELETE FROM shun");
            } else if( vcs_strcmp(zName,"@reportfmt")==0 ) {
                db_multi_exec("DELETE FROM reportfmt");
            }
        }
        db_end_transaction(0);
        vcs_print("Configuration reset to factory defaults.\n");
        vcs_print("To recover, use:  %s %s import %s\n",
                  vcs_nameofexe(), g.argv[1], zBackup);
    } else
    {
        vcs_fatal("METHOD should be one of:"
                  " export import merge pull push reset");
    }
}
Exemple #4
0
/*
** COMMAND: import
**
** Usage: %fossil import --git ?OPTIONS? NEW-REPOSITORY
**
** Read text generated by the git-fast-export command and use it to
** construct a new Fossil repository named by the NEW-REPOSITORY
** argument.  The git-fast-export text is read from standard input.
**
** The git-fast-export file format is currently the only VCS interchange
** format that is understood, though other interchange formats may be added
** in the future.
**
** The --incremental option allows an existing repository to be extended
** with new content.
**
** Options:
**   --incremental  allow importing into an existing repository
**
** See also: export
*/
void git_import_cmd(void){
  char *zPassword;
  FILE *pIn;
  Stmt q;
  int forceFlag = find_option("force", "f", 0)!=0;
  int incrFlag = find_option("incremental", "i", 0)!=0;

  find_option("git",0,0);  /* Skip the --git option for now */
  verify_all_options();
  if( g.argc!=3  && g.argc!=4 ){
    usage("REPOSITORY-NAME");
  }
  if( g.argc==4 ){
    pIn = fossil_fopen(g.argv[3], "rb");
  }else{
    pIn = stdin;
    fossil_binary_mode(pIn);
  }
  if( !incrFlag ){
    if( forceFlag ) file_delete(g.argv[2]);
    db_create_repository(g.argv[2]);
  }
  db_open_repository(g.argv[2]);
  db_open_config(0);

  /* The following temp-tables are used to hold information needed for
  ** the import.
  **
  ** The XMARK table provides a mapping from fast-import "marks" and symbols
  ** into artifact ids (UUIDs - the 40-byte hex SHA1 hash of artifacts).
  ** Given any valid fast-import symbol, the corresponding fossil rid and
  ** uuid can found by searching against the xmark.tname field.
  **
  ** The XBRANCH table maps commit marks and symbols into the branch those
  ** commits belong to.  If xbranch.tname is a fast-import symbol for a
  ** checkin then xbranch.brnm is the branch that checkin is part of.
  **
  ** The XTAG table records information about tags that need to be applied
  ** to various branches after the import finishes.  The xtag.tcontent field
  ** contains the text of an artifact that will add a tag to a check-in.
  ** The git-fast-export file format might specify the same tag multiple
  ** times but only the last tag should be used.  And we do not know which
  ** occurrence of the tag is the last until the import finishes.
  */
  db_multi_exec(
     "CREATE TEMP TABLE xmark(tname TEXT UNIQUE, trid INT, tuuid TEXT);"
     "CREATE TEMP TABLE xbranch(tname TEXT UNIQUE, brnm TEXT);"
     "CREATE TEMP TABLE xtag(tname TEXT UNIQUE, tcontent TEXT);"
  );


  db_begin_transaction();
  if( !incrFlag ) db_initial_setup(0, 0, 0, 1);
  git_fast_import(pIn);
  db_prepare(&q, "SELECT tcontent FROM xtag");
  while( db_step(&q)==SQLITE_ROW ){
    Blob record;
    db_ephemeral_blob(&q, 0, &record);
    fast_insert_content(&record, 0, 0);
    import_reset(0);
  }
  db_finalize(&q);
  db_end_transaction(0);
  db_begin_transaction();
  fossil_print("Rebuilding repository meta-data...\n");
  rebuild_db(0, 1, !incrFlag);
  verify_cancel();
  db_end_transaction(0);
  fossil_print("Vacuuming..."); fflush(stdout);
  db_multi_exec("VACUUM");
  fossil_print(" ok\n");
  if( !incrFlag ){
    fossil_print("project-id: %s\n", db_get("project-code", 0));
    fossil_print("server-id:  %s\n", db_get("server-code", 0));
    zPassword = db_text(0, "SELECT pw FROM user WHERE login=%Q", g.zLogin);
    fossil_print("admin-user: %s (password is \"%s\")\n", g.zLogin, zPassword);
  }
}
Exemple #5
0
/*
** COMMAND: clone
**
** Usage: %fossil clone ?OPTIONS? URL FILENAME
**
** Make a clone of a repository specified by URL in the local
** file named FILENAME.  
**
** URL must be in one of the following form: ([...] mean optional)
**   HTTP/HTTPS protocol:
**     http[s]://[userid[:password]@]host[:port][/path]
**
**   SSH protocol:
**     ssh://[userid[:password]@]host[:port]/path/to/repo.fossil\\
**     [?fossil=path/to/fossil.exe]
**
**   Filesystem:
**     [file://]path/to/repo.fossil
**
**   Note: For ssh and filesystem, path must have an extra leading 
**         '/' to use an absolute path.
**
** By default, your current login name is used to create the default
** admin user. This can be overridden using the -A|--admin-user
** parameter.
**
** Options:
**    --admin-user|-A USERNAME   Make USERNAME the administrator
**    --once                     Don't save url.
**    --private                  Also clone private branches 
**    --ssl-identity=filename    Use the SSL identity if requested by the server
**    --ssh-command|-c 'command' Use this SSH command
**
** See also: init
*/
void clone_cmd(void){
  char *zPassword;
  const char *zDefaultUser;   /* Optional name of the default user */
  int nErr = 0;
  int bPrivate = 0;           /* Also clone private branches */
  int urlFlags = URL_PROMPT_PW | URL_REMEMBER;

  if( find_option("private",0,0)!=0 ) bPrivate = SYNC_PRIVATE;
  if( find_option("once",0,0)!=0) urlFlags &= ~URL_REMEMBER;
  zDefaultUser = find_option("admin-user","A",1);
  clone_ssh_find_options();
  url_proxy_options();
  if( g.argc < 4 ){
    usage("?OPTIONS? FILE-OR-URL NEW-REPOSITORY");
  }
  db_open_config(0);
  if( file_size(g.argv[3])>0 ){
    fossil_fatal("file already exists: %s", g.argv[3]);
  }

  url_parse(g.argv[2], urlFlags);
  if( zDefaultUser==0 && g.urlUser!=0 ) zDefaultUser = g.urlUser;
  if( g.urlIsFile ){
    file_copy(g.urlName, g.argv[3]);
    db_close(1);
    db_open_repository(g.argv[3]);
    db_record_repository_filename(g.argv[3]);
    url_remember();
    if( !bPrivate ) delete_private_content();
    shun_artifacts();
    db_create_default_users(1, zDefaultUser);
    if( zDefaultUser ){
      g.zLogin = zDefaultUser;
    }else{
      g.zLogin = db_text(0, "SELECT login FROM user WHERE cap LIKE '%%s%%'");
    }
    fossil_print("Repository cloned into %s\n", g.argv[3]);
  }else{
    db_create_repository(g.argv[3]);
    db_open_repository(g.argv[3]);
    db_begin_transaction();
    db_record_repository_filename(g.argv[3]);
    db_initial_setup(0, 0, zDefaultUser, 0);
    user_select();
    db_set("content-schema", CONTENT_SCHEMA, 0);
    db_set("aux-schema", AUX_SCHEMA, 0);
    db_set("rebuilt", get_version(), 0);
    url_remember();
    if( g.zSSLIdentity!=0 ){
      /* If the --ssl-identity option was specified, store it as a setting */
      Blob fn;
      blob_zero(&fn);
      file_canonical_name(g.zSSLIdentity, &fn, 0);
      db_set("ssl-identity", blob_str(&fn), 0);
      blob_reset(&fn);
    }
    db_multi_exec(
      "REPLACE INTO config(name,value,mtime)"
      " VALUES('server-code', lower(hex(randomblob(20))), now());"
    );
    url_enable_proxy(0);
    clone_ssh_db_set_options();
    url_get_password_if_needed();
    g.xlinkClusterOnly = 1;
    nErr = client_sync(SYNC_CLONE | bPrivate,CONFIGSET_ALL,0);
    g.xlinkClusterOnly = 0;
    verify_cancel();
    db_end_transaction(0);
    db_close(1);
    if( nErr ){
      file_delete(g.argv[3]);
      fossil_fatal("server returned an error - clone aborted");
    }
    db_open_repository(g.argv[3]);
  }
  db_begin_transaction();
  fossil_print("Rebuilding repository meta-data...\n");
  rebuild_db(0, 1, 0);
  fossil_print("project-id: %s\n", db_get("project-code", 0));
  zPassword = db_text(0, "SELECT pw FROM user WHERE login=%Q", g.zLogin);
  fossil_print("admin-user: %s (password is \"%s\")\n", g.zLogin, zPassword);
  db_end_transaction(0);
}
/*
** COMMAND: all
**
** Usage: %fossil all (list|ls|pull|push|rebuild|sync)
**
** The ~/.fossil file records the location of all repositories for a
** user.  This command performs certain operations on all repositories
** that can be useful before or after a period of disconnected operation.
**
** On Win32 systems, the file is named "_fossil" and is located in
** %LOCALAPPDATA%, %APPDATA% or %HOMEPATH%.
**
** Available operations are:
**
**    ignore     Arguments are repositories that should be ignored
**               by subsequent list, pull, push, rebuild, and sync.
**
**    list | ls  Display the location of all repositories.
**               The --ckout option causes all local checkouts to be
**               list instead.
**
**    changes    Shows all local checkouts that have uncommitted changes
**
**    pull       Run a "pull" operation on all repositories
**
**    push       Run a "push" on all repositories
**
**    rebuild    Rebuild on all repositories
**
**    sync       Run a "sync" on all repositories
**
** Repositories are automatically added to the set of known repositories
** when one of the following commands are run against the repository: clone,
** info, pull, push, or sync.  Even previously ignored repositories are
** added back to the list of repositories by these commands.
*/
void all_cmd(void){
  int n;
  Stmt q;
  const char *zCmd;
  char *zSyscmd;
  char *zFossil;
  char *zQFilename;
  int useCheckouts = 0;
  int quiet = 0;
  int testRun = 0;
  int stopOnError = find_option("dontstop",0,0)==0;
  int rc;
  Bag outOfDate;
  
  /* The undocumented --test option causes no changes to occur to any
  ** repository, but instead show what would have happened.  Intended for
  ** test and debugging use.
  */
  testRun = find_option("test",0,0)!=0;

  if( g.argc<3 ){
    usage("changes|list|ls|pull|push|rebuild|sync");
  }
  n = strlen(g.argv[2]);
  db_open_config(1);
  zCmd = g.argv[2];
  if( strncmp(zCmd, "list", n)==0 || strncmp(zCmd,"ls",n)==0 ){
    zCmd = "list";
    useCheckouts = find_option("ckout","c",0)!=0;
  }else if( strncmp(zCmd, "push", n)==0 ){
    zCmd = "push -autourl -R";
  }else if( strncmp(zCmd, "pull", n)==0 ){
    zCmd = "pull -autourl -R";
  }else if( strncmp(zCmd, "rebuild", n)==0 ){
    zCmd = "rebuild";
  }else if( strncmp(zCmd, "sync", n)==0 ){
    zCmd = "sync -autourl -R";
  }else if( strncmp(zCmd, "test-integrity", n)==0 ){
    zCmd = "test-integrity";
  }else if( strncmp(zCmd, "changes", n)==0 ){
    zCmd = "changes --quiet --header --chdir";
    useCheckouts = 1;
    stopOnError = 0;
    quiet = 1;
  }else if( strncmp(zCmd, "ignore", n)==0 ){
    int j;
    verify_all_options();
    db_begin_transaction();
    for(j=3; j<g.argc; j++){
      char *zSql = mprintf("DELETE FROM global_config"
                           " WHERE name GLOB 'repo:%q'", g.argv[j]);
      if( testRun ){
        fossil_print("%s\n", zSql);
      }else{
        db_multi_exec("%s", zSql);
      }
      fossil_free(zSql);
    }
    db_end_transaction(0);
    return;
  }else{
    fossil_fatal("\"all\" subcommand should be one of: "
                 "changes ignore list ls push pull rebuild sync");
  }
  verify_all_options();
  zFossil = quoteFilename(g.nameOfExe);
  if( useCheckouts ){
    db_prepare(&q,
       "SELECT substr(name, 7) COLLATE nocase, max(rowid)"
       "  FROM global_config"
       " WHERE substr(name, 1, 6)=='ckout:'"
       " GROUP BY 1 ORDER BY 1"
    );
  }else{
    db_prepare(&q,
       "SELECT substr(name, 6) COLLATE nocase, max(rowid)"
       "  FROM global_config"
       " WHERE substr(name, 1, 5)=='repo:'"
       " GROUP BY 1 ORDER BY 1"
    );
  }
  bag_init(&outOfDate);
  while( db_step(&q)==SQLITE_ROW ){
    const char *zFilename = db_column_text(&q, 0);
    int rowid = db_column_int(&q, 1);
    if( file_access(zFilename, 0) || !file_is_canonical(zFilename) ){
      bag_insert(&outOfDate, rowid);
      continue;
    }
    if( useCheckouts && file_isdir(zFilename)!=1 ){
      bag_insert(&outOfDate, rowid);
      continue;
    }
    if( zCmd[0]=='l' ){
      fossil_print("%s\n", zFilename);
      continue;
    }
    zQFilename = quoteFilename(zFilename);
    zSyscmd = mprintf("%s %s %s", zFossil, zCmd, zQFilename);
    if( !quiet || testRun ){
      fossil_print("%s\n", zSyscmd);
      fflush(stdout);
    }
    rc = testRun ? 0 : fossil_system(zSyscmd);
    free(zSyscmd);
    free(zQFilename);
    if( stopOnError && rc ){
      break;
    }
  }
  db_finalize(&q);
  
  /* If any repositories whose names appear in the ~/.fossil file could not
  ** be found, remove those names from the ~/.fossil file.
  */
  if( bag_count(&outOfDate)>0 ){
    Blob sql;
    char *zSep = "(";
    int rowid;
    blob_zero(&sql);
    blob_appendf(&sql, "DELETE FROM global_config WHERE rowid IN ");
    for(rowid=bag_first(&outOfDate); rowid>0; rowid=bag_next(&outOfDate,rowid)){
      blob_appendf(&sql, "%s%d", zSep, rowid);
      zSep = ",";
    }
    blob_appendf(&sql, ")");
    if( testRun ){
      fossil_print("%s\n", blob_str(&sql));
    }else{
      db_multi_exec(blob_str(&sql));
    }
    blob_reset(&sql);
  }
}