Esempio n. 1
0
/*
** Get the blob.content value for blob.rid=rid.  Return 1 on success or
** 0 on failure.
*/
static int content_of_blob(int rid, Blob *pBlob){
  static Stmt q;
  int rc = 0;
  db_static_prepare(&q, "SELECT content FROM blob WHERE rid=:rid AND size>=0");
  db_bind_int(&q, ":rid", rid);
  if( db_step(&q)==SQLITE_ROW ){
    db_ephemeral_blob(&q, 0, pBlob);
    blob_uncompress(pBlob, pBlob);
    rc = 1;
  }
  db_reset(&q);
  return rc;
}
Esempio n. 2
0
/*
** Look at every VFILE entry with the given vid and  set update
** VFILE.CHNGED field on every file according to whether or not
** the file has changes.  0 means no change.  1 means edited.  2 means
** the file has changed due to a merge.  3 means the file was added
** by a merge.
**
** If VFILE.DELETED is true or if VFILE.RID is zero, then the file was
** either removed from managemented via "vcs rm" or added via
** "vcs add", respectively, and in both cases we always know that 
** the file has changed without having the check the size, mtime,
** or on-disk content.
**
** If the size of the file has changed, then we always know that the file
** changed without having to look at the mtime or on-disk content.
**
** The mtime of the file is only a factor if the mtime-changes setting
** is false and the useSha1sum flag is false.  If the mtime-changes
** setting is true (or undefined - it defaults to true) or if useSha1sum
** is true, then we do not trust the mtime and will examine the on-disk
** content to determine if a file really is the same.
**
** If the mtime is used, it is used only to determine if files are the same.
** If the mtime of a file has changed, we still examine the on-disk content
** to see whether or not the edit was a null-edit.
*/
void vfile_check_signature(int vid, int notFileIsFatal, int useSha1sum){
  int nErr = 0;
  Stmt q;
  Blob fileCksum, origCksum;
  int useMtime = useSha1sum==0 && db_get_boolean("mtime-changes", 1);

  db_begin_transaction();
  db_prepare(&q, "SELECT id, %Q || pathname,"
                 "       vfile.mrid, deleted, chnged, uuid, size, mtime"
                 "  FROM vfile LEFT JOIN blob ON vfile.mrid=blob.rid"
                 " WHERE vid=%d ", g.zLocalRoot, vid);
  while( db_step(&q)==SQLITE_ROW ){
    int id, rid, isDeleted;
    const char *zName;
    int chnged = 0;
    int oldChnged;
    i64 oldMtime;
    i64 currentMtime;
    i64 origSize;
    i64 currentSize;

    id = db_column_int(&q, 0);
    zName = db_column_text(&q, 1);
    rid = db_column_int(&q, 2);
    isDeleted = db_column_int(&q, 3);
    oldChnged = chnged = db_column_int(&q, 4);
    oldMtime = db_column_int64(&q, 7);
    currentSize = file_wd_size(zName);
    origSize = db_column_int64(&q, 6);
    currentMtime = file_wd_mtime(0);
    if( chnged==0 && (isDeleted || rid==0) ){
      /* "vcs rm" or "vcs add" always change the file */
      chnged = 1;
    }else if( !file_wd_isfile_or_link(0) && currentSize>=0 ){
      if( notFileIsFatal ){
        vcs_warning("not an ordinary file: %s", zName);
        nErr++;
      }
      chnged = 1;
    }
    if( origSize!=currentSize ){
      if( chnged!=1 ){
        /* A file size change is definitive - the file has changed.  No
        ** need to check the mtime or sha1sum */
        chnged = 1;
      }
    }else if( chnged==1 && rid!=0 && !isDeleted ){
      /* File is believed to have changed but it is the same size.
      ** Double check that it really has changed by looking at content. */
      assert( origSize==currentSize );
      db_ephemeral_blob(&q, 5, &origCksum);
      if( sha1sum_file(zName, &fileCksum) ){
        blob_zero(&fileCksum);
      }
      if( blob_compare(&fileCksum, &origCksum)==0 ) chnged = 0;
      blob_reset(&origCksum);
      blob_reset(&fileCksum);
    }else if( chnged==0 && (useMtime==0 || currentMtime!=oldMtime) ){
      /* For files that were formerly believed to be unchanged, if their
      ** mtime changes, or unconditionally if --sha1sum is used, check
      ** to see if they have been edited by looking at their SHA1 sum */
      assert( origSize==currentSize );
      db_ephemeral_blob(&q, 5, &origCksum);
      if( sha1sum_file(zName, &fileCksum) ){
        blob_zero(&fileCksum);
      }
      if( blob_compare(&fileCksum, &origCksum) ){
        chnged = 1;
      }
      blob_reset(&origCksum);
      blob_reset(&fileCksum);
    }
    if( currentMtime!=oldMtime || chnged!=oldChnged ){
      db_multi_exec("UPDATE vfile SET mtime=%lld, chnged=%d WHERE id=%d",
                    currentMtime, chnged, id);
    }
  }
  db_finalize(&q);
  if( nErr ) vcs_fatal("abort due to prior errors");
  db_end_transaction(0);
}
Esempio n. 3
0
/*
** Rebuild cross-referencing information for the artifact
** rid with content pBase and all of its descendants.  This
** routine clears the content buffer before returning.
**
** If the zFNameFormat variable is set, then this routine is
** called to run "fossil deconstruct" instead of the usual
** "fossil rebuild".  In that case, instead of rebuilding the
** cross-referencing information, write the file content out
** to the appropriate directory.
**
** In both cases, this routine automatically recurses to process
** other artifacts that are deltas off of the current artifact.
** This is the most efficient way to extract all of the original
** artifact content from the Fossil repository.
*/
static void rebuild_step(int rid, int size, Blob *pBase){
  static Stmt q1;
  Bag children;
  Blob copy;
  Blob *pUse;
  int nChild, i, cid;

  while( rid>0 ){

    /* Fix up the "blob.size" field if needed. */
    if( size!=blob_size(pBase) ){
      db_multi_exec(
         "UPDATE blob SET size=%d WHERE rid=%d", blob_size(pBase), rid
      );
    }
  
    /* Find all children of artifact rid */
    db_static_prepare(&q1, "SELECT rid FROM delta WHERE srcid=:rid");
    db_bind_int(&q1, ":rid", rid);
    bag_init(&children);
    while( db_step(&q1)==SQLITE_ROW ){
      int cid = db_column_int(&q1, 0);
      if( !bag_find(&bagDone, cid) ){
        bag_insert(&children, cid);
      }
    }
    nChild = bag_count(&children);
    db_reset(&q1);
  
    /* Crosslink the artifact */
    if( nChild==0 ){
      pUse = pBase;
    }else{
      blob_copy(&copy, pBase);
      pUse = ©
    }
    if( zFNameFormat==0 ){
      /* We are doing "fossil rebuild" */
      manifest_crosslink(rid, pUse, MC_NONE);
    }else{
      /* We are doing "fossil deconstruct" */
      char *zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", rid);
      char *zFile = mprintf(zFNameFormat, zUuid, zUuid+prefixLength);
      blob_write_to_file(pUse,zFile);
      free(zFile);
      free(zUuid);
      blob_reset(pUse);
    }
    assert( blob_is_reset(pUse) );
    rebuild_step_done(rid);
  
    /* Call all children recursively */
    rid = 0;
    for(cid=bag_first(&children), i=1; cid; cid=bag_next(&children, cid), i++){
      static Stmt q2;
      int sz;
      db_static_prepare(&q2, "SELECT content, size FROM blob WHERE rid=:rid");
      db_bind_int(&q2, ":rid", cid);
      if( db_step(&q2)==SQLITE_ROW && (sz = db_column_int(&q2,1))>=0 ){
        Blob delta, next;
        db_ephemeral_blob(&q2, 0, &delta);
        blob_uncompress(&delta, &delta);
        blob_delta_apply(pBase, &delta, &next);
        blob_reset(&delta);
        db_reset(&q2);
        if( i<nChild ){
          rebuild_step(cid, sz, &next);
        }else{
          /* Tail recursion */
          rid = cid;
          size = sz;
          blob_reset(pBase);
          *pBase = next;
        }
      }else{
        db_reset(&q2);
        blob_reset(pBase);
      }
    }
    bag_clear(&children);
  }
}
Esempio n. 4
0
/*
** COMMAND: import
**
** Usage: %fossil import --git ?OPTIONS? NEW-REPOSITORY
**
** Read text generated by the git-fast-export command and use it to
** construct a new Fossil repository named by the NEW-REPOSITORY
** argument.  The git-fast-export text is read from standard input.
**
** The git-fast-export file format is currently the only VCS interchange
** format that is understood, though other interchange formats may be added
** in the future.
**
** The --incremental option allows an existing repository to be extended
** with new content.
**
** Options:
**   --incremental  allow importing into an existing repository
**
** See also: export
*/
void git_import_cmd(void){
  char *zPassword;
  FILE *pIn;
  Stmt q;
  int forceFlag = find_option("force", "f", 0)!=0;
  int incrFlag = find_option("incremental", "i", 0)!=0;

  find_option("git",0,0);  /* Skip the --git option for now */
  verify_all_options();
  if( g.argc!=3  && g.argc!=4 ){
    usage("REPOSITORY-NAME");
  }
  if( g.argc==4 ){
    pIn = fossil_fopen(g.argv[3], "rb");
  }else{
    pIn = stdin;
    fossil_binary_mode(pIn);
  }
  if( !incrFlag ){
    if( forceFlag ) file_delete(g.argv[2]);
    db_create_repository(g.argv[2]);
  }
  db_open_repository(g.argv[2]);
  db_open_config(0);

  /* The following temp-tables are used to hold information needed for
  ** the import.
  **
  ** The XMARK table provides a mapping from fast-import "marks" and symbols
  ** into artifact ids (UUIDs - the 40-byte hex SHA1 hash of artifacts).
  ** Given any valid fast-import symbol, the corresponding fossil rid and
  ** uuid can found by searching against the xmark.tname field.
  **
  ** The XBRANCH table maps commit marks and symbols into the branch those
  ** commits belong to.  If xbranch.tname is a fast-import symbol for a
  ** checkin then xbranch.brnm is the branch that checkin is part of.
  **
  ** The XTAG table records information about tags that need to be applied
  ** to various branches after the import finishes.  The xtag.tcontent field
  ** contains the text of an artifact that will add a tag to a check-in.
  ** The git-fast-export file format might specify the same tag multiple
  ** times but only the last tag should be used.  And we do not know which
  ** occurrence of the tag is the last until the import finishes.
  */
  db_multi_exec(
     "CREATE TEMP TABLE xmark(tname TEXT UNIQUE, trid INT, tuuid TEXT);"
     "CREATE TEMP TABLE xbranch(tname TEXT UNIQUE, brnm TEXT);"
     "CREATE TEMP TABLE xtag(tname TEXT UNIQUE, tcontent TEXT);"
  );


  db_begin_transaction();
  if( !incrFlag ) db_initial_setup(0, 0, 0, 1);
  git_fast_import(pIn);
  db_prepare(&q, "SELECT tcontent FROM xtag");
  while( db_step(&q)==SQLITE_ROW ){
    Blob record;
    db_ephemeral_blob(&q, 0, &record);
    fast_insert_content(&record, 0, 0);
    import_reset(0);
  }
  db_finalize(&q);
  db_end_transaction(0);
  db_begin_transaction();
  fossil_print("Rebuilding repository meta-data...\n");
  rebuild_db(0, 1, !incrFlag);
  verify_cancel();
  db_end_transaction(0);
  fossil_print("Vacuuming..."); fflush(stdout);
  db_multi_exec("VACUUM");
  fossil_print(" ok\n");
  if( !incrFlag ){
    fossil_print("project-id: %s\n", db_get("project-code", 0));
    fossil_print("server-id:  %s\n", db_get("server-code", 0));
    zPassword = db_text(0, "SELECT pw FROM user WHERE login=%Q", g.zLogin);
    fossil_print("admin-user: %s (password is \"%s\")\n", g.zLogin, zPassword);
  }
}