Beispiel #1
0
/*
** Make sure the content at rid is the original content and is not a
** delta.
*/
void content_undelta(int rid){
  if( findSrcid(rid)>0 ){
    Blob x;
    if( content_get(rid, &x) ){
      Stmt s;
      db_prepare(&s, "UPDATE blob SET content=:c, size=%d WHERE rid=%d",
                     blob_size(&x), rid);
      blob_compress(&x, &x);
      db_bind_blob(&s, ":c", &x);
      db_exec(&s);
      db_finalize(&s);
      blob_reset(&x);
      db_multi_exec("DELETE FROM delta WHERE rid=%d", rid);
    }
  }
}
Beispiel #2
0
/*
** Load a vfile from a record ID.
*/
void load_vfile_from_rid(int vid){
  int rid, size;
  Stmt ins, ridq;
  Manifest *p;
  ManifestFile *pFile;

  if( db_exists("SELECT 1 FROM vfile WHERE vid=%d", vid) ){
    return;
  }

  db_begin_transaction();
  p = manifest_get(vid, CFTYPE_MANIFEST);
  if( p==0 ) return;
  db_multi_exec("DELETE FROM vfile WHERE vid=%d", vid);
  db_prepare(&ins,
    "INSERT INTO vfile(vid,isexe,islink,rid,mrid,pathname) "
    " VALUES(:vid,:isexe,:islink,:id,:id,:name)");
  db_prepare(&ridq, "SELECT rid,size FROM blob WHERE uuid=:uuid");
  db_bind_int(&ins, ":vid", vid);
  manifest_file_rewind(p);
  while( (pFile = manifest_file_next(p,0))!=0 ){
    if( pFile->zUuid==0 || uuid_is_shunned(pFile->zUuid) ) continue;
    db_bind_text(&ridq, ":uuid", pFile->zUuid);
    if( db_step(&ridq)==SQLITE_ROW ){
      rid = db_column_int(&ridq, 0);
      size = db_column_int(&ridq, 0);
    }else{
      rid = 0;
      size = 0;
    }
    db_reset(&ridq);
    if( rid==0 || size<0 ){
      vcs_warning("content missing for %s", pFile->zName);
      continue;
    }
    db_bind_int(&ins, ":isexe", ( manifest_file_mperm(pFile)==PERM_EXE ));
    db_bind_int(&ins, ":id", rid);
    db_bind_text(&ins, ":name", pFile->zName);
    db_bind_int(&ins, ":islink", ( manifest_file_mperm(pFile)==PERM_LNK ));
    db_step(&ins);
    db_reset(&ins);
  }
  db_finalize(&ridq);
  db_finalize(&ins);
  manifest_destroy(p);
  db_end_transaction(0);
}
Beispiel #3
0
/*
** Begin the process of generating a tarball.
**
** Initialize the GZIP compressor and the table of directory names.
*/
static void tar_begin(sqlite3_int64 mTime){
  assert( tball.aHdr==0 );
  tball.aHdr = fossil_malloc(512+512);
  memset(tball.aHdr, 0, 512+512);
  tball.zSpaces = (char*)&tball.aHdr[512];
  /* zPrevDir init */
  tball.zPrevDir = NULL;
  tball.nPrevDirAlloc = 0;
  /* scratch buffer init */
  blob_zero(&tball.pax);

  memcpy(&tball.aHdr[108], "0000000", 8);  /* Owner ID */
  memcpy(&tball.aHdr[116], "0000000", 8);  /* Group ID */
  memcpy(&tball.aHdr[257], "ustar\00000", 8);  /* POSIX.1 format */
  memcpy(&tball.aHdr[265], "nobody", 7);   /* Owner name */
  memcpy(&tball.aHdr[297], "nobody", 7);   /* Group name */
  gzip_begin(mTime);
  db_multi_exec(
    "CREATE TEMP TABLE dir(name UNIQUE);"
  );
}
Beispiel #4
0
/*
** Create a temporary table suitable for storing timeline data.
*/
static void json_timeline_temp_table(void){
  /* Field order MUST match that from json_timeline_query()!!! */
  static const char zSql[] =
    @ CREATE TEMP TABLE IF NOT EXISTS json_timeline(
    @   sortId INTEGER PRIMARY KEY,
    @   rid INTEGER,
    @   uuid TEXT,
    @   mtime INTEGER,
    @   timestampString TEXT,
    @   comment TEXT,
    @   user TEXT,
    @   isLeaf BOOLEAN,
    @   bgColor TEXT,
    @   eventType TEXT,
    @   tags TEXT,
    @   tagId INTEGER,
    @   brief TEXT
    @ )
  ;
  db_multi_exec("%s", zSql /*safe-for-%s*/);
}
Beispiel #5
0
/*
** Figure out what user is at the controls.
**
**   (1)  Use the --user and -U command-line options.
**
**   (2)  If the local database is open, check in VVAR.
**
**   (3)  Check the default user in the repository
**
**   (4)  Try the USER environment variable.
**
**   (5)  Use the first user in the USER table.
**
** The user name is stored in g.zLogin.  The uid is in g.userUid.
*/
void user_select(void){
  Stmt s;

  if( g.userUid ) return;
  if( attempt_user(g.zLogin) ) return;

  if( g.localOpen && attempt_user(db_lget("default-user",0)) ) return;

  if( attempt_user(db_get("default-user", 0)) ) return;

  if( attempt_user(getenv("USER")) ) return;

  db_prepare(&s,
    "SELECT uid, login FROM user"
    " WHERE login NOT IN ('anonymous','nobody','reader','developer')"
  );
  if( db_step(&s)==SQLITE_ROW ){
    g.userUid = db_column_int(&s, 0);
    g.zLogin = mprintf("%s", db_column_text(&s, 1));
  }
  db_finalize(&s);

  if( g.userUid==0 ){
    db_prepare(&s, "SELECT uid, login FROM user");
    if( db_step(&s)==SQLITE_ROW ){
      g.userUid = db_column_int(&s, 0);
      g.zLogin = mprintf("%s", db_column_text(&s, 1));
    }
    db_finalize(&s);
  }

  if( g.userUid==0 ){
    db_multi_exec(
      "INSERT INTO user(login, pw, cap, info)"
      "VALUES('anonymous', '', 'cfghjkmnoqw', '')"
    );
    g.userUid = db_last_insert_rowid();
    g.zLogin = "******";
  }
}
Beispiel #6
0
/*
** Attach a bundle file to the current database connection using the
** attachment name zBName.
*/
static void bundle_attach_file(
  const char *zFile,       /* Name of the file that contains the bundle */
  const char *zBName,      /* Attachment name */
  int doInit               /* Initialize a new bundle, if true */
){
  int rc;
  char *zErrMsg = 0;
  char *zSql;
  if( !doInit && file_size(zFile)<0 ){
    fossil_fatal("no such file: %s", zFile);
  }
  assert( g.db );
  zSql = sqlite3_mprintf("ATTACH %Q AS %Q", zFile, zBName);
  if( zSql==0 ) fossil_fatal("out of memory");
  rc = sqlite3_exec(g.db, zSql, 0, 0, &zErrMsg);
  sqlite3_free(zSql);
  if( rc!=SQLITE_OK || zErrMsg ){
    if( zErrMsg==0 ) zErrMsg = (char*)sqlite3_errmsg(g.db);
    fossil_fatal("not a valid bundle: %s", zFile);
  }
  if( doInit ){
    db_multi_exec(zBundleInit /*works-like:"%w%w"*/, zBName, zBName);
  }else{
    sqlite3_stmt *pStmt;
    zSql = sqlite3_mprintf("SELECT bcname, bcvalue"
                           "  FROM \"%w\".bconfig", zBName);
    if( zSql==0 ) fossil_fatal("out of memory");
    rc = sqlite3_prepare(g.db, zSql, -1, &pStmt, 0);
    if( rc ) fossil_fatal("not a valid bundle: %s", zFile);
    sqlite3_free(zSql);
    sqlite3_finalize(pStmt);
    zSql = sqlite3_mprintf("SELECT blobid, uuid, sz, delta, notes, data"
                           "  FROM \"%w\".bblob", zBName);
    if( zSql==0 ) fossil_fatal("out of memory");
    rc = sqlite3_prepare(g.db, zSql, -1, &pStmt, 0);
    if( rc ) fossil_fatal("not a valid bundle: %s", zFile);
    sqlite3_free(zSql);
    sqlite3_finalize(pStmt);
  }
}
Beispiel #7
0
/*
** Update an entry of the TICKET table according to the information
** in the control file given in p.  Attempt to create the appropriate
** TICKET table entry if createFlag is true.  If createFlag is false,
** that means we already know the entry exists and so we can save the
** work of trying to create it.
**
** Return TRUE if a new TICKET entry was created and FALSE if an
** existing entry was revised.
*/
int ticket_insert(const Manifest *p, int createFlag, int rid){
  Blob sql;
  Stmt q;
  int i;
  const char *zSep;
  int rc = 0;

  getAllTicketFields();
  if( createFlag ){  
    db_multi_exec("INSERT OR IGNORE INTO ticket(tkt_uuid, tkt_mtime) "
                  "VALUES(%Q, 0)", p->zTicketUuid);
    rc = db_changes();
  }
  blob_zero(&sql);
  blob_appendf(&sql, "UPDATE OR REPLACE ticket SET tkt_mtime=:mtime");
  zSep = "SET";
  for(i=0; i<p->nField; i++){
    const char *zName = p->aField[i].zName;
    if( zName[0]=='+' ){
      zName++;
      if( fieldId(zName)<0 ) continue;
      blob_appendf(&sql,", %s=coalesce(%s,'') || %Q",
                   zName, zName, p->aField[i].zValue);
    }else{
      if( fieldId(zName)<0 ) continue;
      blob_appendf(&sql,", %s=%Q", zName, p->aField[i].zValue);
    }
    if( rid>0 ){
      wiki_extract_links(p->aField[i].zValue, rid, 1, p->rDate, i==0, 0);
    }
  }
  blob_appendf(&sql, " WHERE tkt_uuid='%s' AND tkt_mtime<:mtime",
                     p->zTicketUuid);
  db_prepare(&q, "%s", blob_str(&sql));
  db_bind_double(&q, ":mtime", p->rDate);
  db_step(&q);
  db_finalize(&q);
  blob_reset(&sql);
  return rc;
}
Beispiel #8
0
/*
** Add all files in the sfile temp table.
**
** Automatically exclude the repository file.
*/
static int add_files_in_sfile(int vid, int caseSensitive){
  const char *zRepo;        /* Name of the repository database file */
  int nAdd = 0;             /* Number of files added */
  int i;                    /* Loop counter */
  const char *zReserved;    /* Name of a reserved file */
  Blob repoName;            /* Treename of the repository */
  Stmt loop;                /* SQL to loop over all files to add */
  int (*xCmp)(const char*,const char*);
 
  if( !file_tree_name(g.zRepositoryName, &repoName, 0) ){
    blob_zero(&repoName);
    zRepo = "";
  }else{
    zRepo = blob_str(&repoName);
  }
  if( caseSensitive ){
    xCmp = fossil_strcmp;
  }else{
    xCmp = fossil_stricmp;
    db_multi_exec(
      "CREATE INDEX IF NOT EXISTS vfile_nocase"
      "    ON vfile(pathname COLLATE nocase)"
    );
  }
  db_prepare(&loop, "SELECT x FROM sfile ORDER BY x");
  while( db_step(&loop)==SQLITE_ROW ){
    const char *zToAdd = db_column_text(&loop, 0);
    if( fossil_strcmp(zToAdd, zRepo)==0 ) continue;
    for(i=0; (zReserved = fossil_reserved_name(i))!=0; i++){
      if( xCmp(zToAdd, zReserved)==0 ) break;
    }
    if( zReserved ) continue;
    nAdd += add_one_file(zToAdd, vid, caseSensitive);
  }
  db_finalize(&loop);
  blob_reset(&repoName);
  return nAdd;
}
Beispiel #9
0
/*
** Rebuild an entire entry in the TICKET table
*/
void ticket_rebuild_entry(const char *zTktUuid){
  char *zTag = mprintf("tkt-%s", zTktUuid);
  int tagid = tag_findid(zTag, 1);
  Stmt q;
  Manifest manifest;
  Blob content;
  int createFlag = 1;
  
  db_multi_exec(
     "DELETE FROM ticket WHERE tkt_uuid=%Q", zTktUuid
  );
  db_prepare(&q, "SELECT rid FROM tagxref WHERE tagid=%d ORDER BY mtime",tagid);
  while( db_step(&q)==SQLITE_ROW ){
    int rid = db_column_int(&q, 0);
    content_get(rid, &content);
    manifest_parse(&manifest, &content);
    ticket_insert(&manifest, createFlag, rid);
    manifest_ticket_event(rid, &manifest, createFlag, tagid);
    manifest_clear(&manifest);
    createFlag = 0;
  }
  db_finalize(&q);
}
Beispiel #10
0
/*
** Recursively add an directory entry for the given file if those
** directories have not previously been seen.
*/
static void tar_add_directory_of(
  const char *zName,      /* Name of directory including final "/" */
  int nName,              /* Characters in zName */
  unsigned int mTime      /* Modification time */
){
  int i;
  for(i=nName-1; i>0 && zName[i]!='/'; i--){}
  if( i<=0 ) return;
  if( i < tball.nPrevDirAlloc && tball.zPrevDir[i]==0 &&
        memcmp(tball.zPrevDir, zName, i)==0 ) return;
  db_multi_exec("INSERT OR IGNORE INTO dir VALUES('%#q')", i, zName);
  if( sqlite3_changes(g.db)==0 ) return;
  tar_add_directory_of(zName, i-1, mTime);
  tar_add_header(zName, i, 0755, mTime, 0, '5');
  if( i >= tball.nPrevDirAlloc ){
    int nsize = tball.nPrevDirAlloc * 2;
    if(i+1 > nsize)
      nsize = i+1;
    tball.zPrevDir = fossil_realloc(tball.zPrevDir, nsize);
    tball.nPrevDirAlloc = nsize;
  }
  memcpy(tball.zPrevDir, zName, i);
  tball.zPrevDir[i] = 0;
}
Beispiel #11
0
/*
** Rebuild an entire entry in the TICKET table
*/
void ticket_rebuild_entry(const char *zTktUuid){
  char *zTag = mprintf("tkt-%s", zTktUuid);
  int tagid = tag_findid(zTag, 1);
  Stmt q;
  Manifest *pTicket;
  int createFlag = 1;

  fossil_free(zTag);  
  db_multi_exec(
     "DELETE FROM ticket WHERE tkt_uuid=%Q", zTktUuid
  );
  db_prepare(&q, "SELECT rid FROM tagxref WHERE tagid=%d ORDER BY mtime",tagid);
  while( db_step(&q)==SQLITE_ROW ){
    int rid = db_column_int(&q, 0);
    pTicket = manifest_get(rid, CFTYPE_TICKET);
    if( pTicket ){
      ticket_insert(pTicket, createFlag, rid);
      manifest_ticket_event(rid, pTicket, createFlag, tagid);
      manifest_destroy(pTicket);
    }
    createFlag = 0;
  }
  db_finalize(&q);
}
/*
** Create a temporary table named "leaves" if it does not
** already exist.  Load this table with the RID of all
** check-ins that are leaves which are descended from
** check-in iBase.
**
** A "leaf" is a check-in that has no children in the same branch.
** There is a separate permanent table LEAF that contains all leaves
** in the tree.  This routine is used to compute a subset of that
** table consisting of leaves that are descended from a single checkin.
**
** The closeMode flag determines behavior associated with the "closed"
** tag:
**
**    closeMode==0       Show all leaves regardless of the "closed" tag.
**
**    closeMode==1       Show only leaves without the "closed" tag.
**
**    closeMode==2       Show only leaves with the "closed" tag.
**
** The default behavior is to ignore closed leaves (closeMode==0).  To
** Show all leaves, use closeMode==1.  To show only closed leaves, use
** closeMode==2.
*/
void compute_leaves(int iBase, int closeMode){

  /* Create the LEAVES table if it does not already exist.  Make sure
  ** it is empty.
  */
  db_multi_exec(
    "CREATE TEMP TABLE IF NOT EXISTS leaves("
    "  rid INTEGER PRIMARY KEY"
    ");"
    "DELETE FROM leaves;"
  );

  if( iBase>0 ){
    Bag seen;     /* Descendants seen */
    Bag pending;  /* Unpropagated descendants */
    Stmt q1;      /* Query to find children of a check-in */
    Stmt isBr;    /* Query to check to see if a check-in starts a new branch */
    Stmt ins;     /* INSERT statement for a new record */

    /* Initialize the bags. */
    bag_init(&seen);
    bag_init(&pending);
    bag_insert(&pending, iBase);

    /* This query returns all non-branch-merge children of check-in :rid.
    **
    ** If a child is a merge of a fork within the same branch, it is
    ** returned.  Only merge children in different branches are excluded.
    */
    db_prepare(&q1,
      "SELECT cid FROM plink"
      " WHERE pid=:rid"
      "   AND (isprim"
      "        OR coalesce((SELECT value FROM tagxref"
                        "   WHERE tagid=%d AND rid=plink.pid), 'trunk')"
                 "=coalesce((SELECT value FROM tagxref"
                        "   WHERE tagid=%d AND rid=plink.cid), 'trunk'))",
      TAG_BRANCH, TAG_BRANCH
    );

    /* This query returns a single row if check-in :rid is the first
    ** check-in of a new branch.
    */
    db_prepare(&isBr,
       "SELECT 1 FROM tagxref"
       " WHERE rid=:rid AND tagid=%d AND tagtype=2"
       "   AND srcid>0",
       TAG_BRANCH
    );

    /* This statement inserts check-in :rid into the LEAVES table.
    */
    db_prepare(&ins, "INSERT OR IGNORE INTO leaves VALUES(:rid)");

    while( bag_count(&pending) ){
      int rid = bag_first(&pending);
      int cnt = 0;
      bag_remove(&pending, rid);
      db_bind_int(&q1, ":rid", rid);
      while( db_step(&q1)==SQLITE_ROW ){
        int cid = db_column_int(&q1, 0);
        if( bag_insert(&seen, cid) ){
          bag_insert(&pending, cid);
        }
        db_bind_int(&isBr, ":rid", cid);
        if( db_step(&isBr)==SQLITE_DONE ){
          cnt++;
        }
        db_reset(&isBr);
      }
      db_reset(&q1);
      if( cnt==0 && !is_a_leaf(rid) ){
        cnt++;
      }
      if( cnt==0 ){
        db_bind_int(&ins, ":rid", rid);
        db_step(&ins);
        db_reset(&ins);
      }
    }
    db_finalize(&ins);
    db_finalize(&isBr);
    db_finalize(&q1);
    bag_clear(&pending);
    bag_clear(&seen);
  }
  if( closeMode==1 ){
    db_multi_exec(
      "DELETE FROM leaves WHERE rid IN"
      "  (SELECT leaves.rid FROM leaves, tagxref"
      "    WHERE tagxref.rid=leaves.rid "
      "      AND tagxref.tagid=%d"
      "      AND tagxref.tagtype>0)",
      TAG_CLOSED
    );
  }else if( closeMode==2 ){
    db_multi_exec(
      "DELETE FROM leaves WHERE rid NOT IN"
      "  (SELECT leaves.rid FROM leaves, tagxref"
      "    WHERE tagxref.rid=leaves.rid "
      "      AND tagxref.tagid=%d"
      "      AND tagxref.tagtype>0)",
      TAG_CLOSED
    );
  }
}
Beispiel #13
0
/*
** COMMAND: mv
** COMMAND: rename*
**
** Usage: %fossil mv|rename OLDNAME NEWNAME
**    or: %fossil mv|rename OLDNAME... DIR
**
** Move or rename one or more files or directories within the repository tree.
** You can either rename a file or directory or move it to another subdirectory.
**
** This command does NOT rename or move the files on disk.  This command merely
** records the fact that filenames have changed so that appropriate notations
** can be made at the next commit/checkin.
**
** See also: changes, status
*/
void mv_cmd(void){
  int i;
  int vid;
  char *zDest;
  Blob dest;
  Stmt q;

  db_must_be_within_tree();
  vid = db_lget_int("checkout", 0);
  if( vid==0 ){
    fossil_panic("no checkout rename files in");
  }
  if( g.argc<4 ){
    usage("OLDNAME NEWNAME");
  }
  zDest = g.argv[g.argc-1];
  db_begin_transaction();
  file_tree_name(zDest, &dest, 1);
  db_multi_exec(
    "UPDATE vfile SET origname=pathname WHERE origname IS NULL;"
  );
  db_multi_exec(
    "CREATE TEMP TABLE mv(f TEXT UNIQUE ON CONFLICT IGNORE, t TEXT);"
  );
  if( file_wd_isdir(zDest)!=1 ){
    Blob orig;
    if( g.argc!=4 ){
      usage("OLDNAME NEWNAME");
    }
    file_tree_name(g.argv[2], &orig, 1);
    db_multi_exec(
      "INSERT INTO mv VALUES(%B,%B)", &orig, &dest
    );
  }else{
    if( blob_eq(&dest, ".") ){
      blob_reset(&dest);
    }else{
      blob_append(&dest, "/", 1);
    }
    for(i=2; i<g.argc-1; i++){
      Blob orig;
      char *zOrig;
      int nOrig;
      file_tree_name(g.argv[i], &orig, 1);
      zOrig = blob_str(&orig);
      nOrig = blob_size(&orig);
      db_prepare(&q,
         "SELECT pathname FROM vfile"
         " WHERE vid=%d"
         "   AND (pathname='%q' OR (pathname>'%q/' AND pathname<'%q0'))"
         " ORDER BY 1",
         vid, zOrig, zOrig, zOrig
      );
      while( db_step(&q)==SQLITE_ROW ){
        const char *zPath = db_column_text(&q, 0);
        int nPath = db_column_bytes(&q, 0);
        const char *zTail;
        if( nPath==nOrig ){
          zTail = file_tail(zPath);
        }else{
          zTail = &zPath[nOrig+1];
        }
        db_multi_exec(
          "INSERT INTO mv VALUES('%q','%q%q')",
          zPath, blob_str(&dest), zTail
        );
      }
      db_finalize(&q);
    }
  }
  db_prepare(&q, "SELECT f, t FROM mv ORDER BY f");
  while( db_step(&q)==SQLITE_ROW ){
    const char *zFrom = db_column_text(&q, 0);
    const char *zTo = db_column_text(&q, 1);
    mv_one_file(vid, zFrom, zTo);
  }
  db_finalize(&q);
  db_end_transaction(0);
}
Beispiel #14
0
/*
** Look at every VFILE entry with the given vid and  set update
** VFILE.CHNGED field on every file according to whether or not
** the file has changes.  0 means no change.  1 means edited.  2 means
** the file has changed due to a merge.  3 means the file was added
** by a merge.
**
** If VFILE.DELETED is true or if VFILE.RID is zero, then the file was
** either removed from managemented via "vcs rm" or added via
** "vcs add", respectively, and in both cases we always know that 
** the file has changed without having the check the size, mtime,
** or on-disk content.
**
** If the size of the file has changed, then we always know that the file
** changed without having to look at the mtime or on-disk content.
**
** The mtime of the file is only a factor if the mtime-changes setting
** is false and the useSha1sum flag is false.  If the mtime-changes
** setting is true (or undefined - it defaults to true) or if useSha1sum
** is true, then we do not trust the mtime and will examine the on-disk
** content to determine if a file really is the same.
**
** If the mtime is used, it is used only to determine if files are the same.
** If the mtime of a file has changed, we still examine the on-disk content
** to see whether or not the edit was a null-edit.
*/
void vfile_check_signature(int vid, int notFileIsFatal, int useSha1sum){
  int nErr = 0;
  Stmt q;
  Blob fileCksum, origCksum;
  int useMtime = useSha1sum==0 && db_get_boolean("mtime-changes", 1);

  db_begin_transaction();
  db_prepare(&q, "SELECT id, %Q || pathname,"
                 "       vfile.mrid, deleted, chnged, uuid, size, mtime"
                 "  FROM vfile LEFT JOIN blob ON vfile.mrid=blob.rid"
                 " WHERE vid=%d ", g.zLocalRoot, vid);
  while( db_step(&q)==SQLITE_ROW ){
    int id, rid, isDeleted;
    const char *zName;
    int chnged = 0;
    int oldChnged;
    i64 oldMtime;
    i64 currentMtime;
    i64 origSize;
    i64 currentSize;

    id = db_column_int(&q, 0);
    zName = db_column_text(&q, 1);
    rid = db_column_int(&q, 2);
    isDeleted = db_column_int(&q, 3);
    oldChnged = chnged = db_column_int(&q, 4);
    oldMtime = db_column_int64(&q, 7);
    currentSize = file_wd_size(zName);
    origSize = db_column_int64(&q, 6);
    currentMtime = file_wd_mtime(0);
    if( chnged==0 && (isDeleted || rid==0) ){
      /* "vcs rm" or "vcs add" always change the file */
      chnged = 1;
    }else if( !file_wd_isfile_or_link(0) && currentSize>=0 ){
      if( notFileIsFatal ){
        vcs_warning("not an ordinary file: %s", zName);
        nErr++;
      }
      chnged = 1;
    }
    if( origSize!=currentSize ){
      if( chnged!=1 ){
        /* A file size change is definitive - the file has changed.  No
        ** need to check the mtime or sha1sum */
        chnged = 1;
      }
    }else if( chnged==1 && rid!=0 && !isDeleted ){
      /* File is believed to have changed but it is the same size.
      ** Double check that it really has changed by looking at content. */
      assert( origSize==currentSize );
      db_ephemeral_blob(&q, 5, &origCksum);
      if( sha1sum_file(zName, &fileCksum) ){
        blob_zero(&fileCksum);
      }
      if( blob_compare(&fileCksum, &origCksum)==0 ) chnged = 0;
      blob_reset(&origCksum);
      blob_reset(&fileCksum);
    }else if( chnged==0 && (useMtime==0 || currentMtime!=oldMtime) ){
      /* For files that were formerly believed to be unchanged, if their
      ** mtime changes, or unconditionally if --sha1sum is used, check
      ** to see if they have been edited by looking at their SHA1 sum */
      assert( origSize==currentSize );
      db_ephemeral_blob(&q, 5, &origCksum);
      if( sha1sum_file(zName, &fileCksum) ){
        blob_zero(&fileCksum);
      }
      if( blob_compare(&fileCksum, &origCksum) ){
        chnged = 1;
      }
      blob_reset(&origCksum);
      blob_reset(&fileCksum);
    }
    if( currentMtime!=oldMtime || chnged!=oldChnged ){
      db_multi_exec("UPDATE vfile SET mtime=%lld, chnged=%d WHERE id=%d",
                    currentMtime, chnged, id);
    }
  }
  db_finalize(&q);
  if( nErr ) vcs_fatal("abort due to prior errors");
  db_end_transaction(0);
}
Beispiel #15
0
/*
** Write all files from vid to the disk.  Or if vid==0 and id!=0
** write just the specific file where VFILE.ID=id.
*/
void vfile_to_disk(
  int vid,               /* vid to write to disk */
  int id,                /* Write this one file, if not zero */
  int verbose,           /* Output progress information */
  int promptFlag         /* Prompt user to confirm overwrites */
){
  Stmt q;
  Blob content;
  int nRepos = strlen(g.zLocalRoot);

  if( vid>0 && id==0 ){
    db_prepare(&q, "SELECT id, %Q || pathname, mrid, isexe, islink"
                   "  FROM vfile"
                   " WHERE vid=%d AND mrid>0",
                   g.zLocalRoot, vid);
  }else{
    assert( vid==0 && id>0 );
    db_prepare(&q, "SELECT id, %Q || pathname, mrid, isexe, islink"
                   "  FROM vfile"
                   " WHERE id=%d AND mrid>0",
                   g.zLocalRoot, id);
  }
  while( db_step(&q)==SQLITE_ROW ){
    int id, rid, isExe, isLink;
    const char *zName;

    id = db_column_int(&q, 0);
    zName = db_column_text(&q, 1);
    rid = db_column_int(&q, 2);
    isExe = db_column_int(&q, 3);
    isLink = db_column_int(&q, 4);
    content_get(rid, &content);
    if( file_is_the_same(&content, zName) ){
      blob_reset(&content);
      if( file_wd_setexe(zName, isExe) ){
        db_multi_exec("UPDATE vfile SET mtime=%lld WHERE id=%d",
                      file_wd_mtime(zName), id);
      }
      continue;
    }
    if( promptFlag && file_wd_size(zName)>=0 ){
      Blob ans;
      char *zMsg;
      char cReply;
      zMsg = mprintf("overwrite %s (a=always/y/N)? ", zName);
      prompt_user(zMsg, &ans);
      free(zMsg);
      cReply = blob_str(&ans)[0];
      blob_reset(&ans);
      if( cReply=='a' || cReply=='A' ){
        promptFlag = 0;
        cReply = 'y';
      }
      if( cReply=='n' || cReply=='N' ){
        blob_reset(&content);
        continue;
      }
    }
    if( verbose ) vcs_print("%s\n", &zName[nRepos]);
    if( file_wd_isdir(zName) == 1 ){
      /*TODO(dchest): remove directories? */
      vcs_fatal("%s is directory, cannot overwrite\n", zName);
    }    
    if( file_wd_size(zName)>=0 && (isLink || file_wd_islink(zName)) ){
      file_delete(zName);
    }
    if( isLink ){
      symlink_create(blob_str(&content), zName);
    }else{
      blob_write_to_file(&content, zName);
    }
    file_wd_setexe(zName, isExe);
    blob_reset(&content);
    db_multi_exec("UPDATE vfile SET mtime=%lld WHERE id=%d",
                  file_wd_mtime(zName), id);
  }
  db_finalize(&q);
}
Beispiel #16
0
/*
** COMMAND: add
**
** Usage: %fossil add ?OPTIONS? FILE1 ?FILE2 ...?
**
** Make arrangements to add one or more files or directories to the
** current checkout at the next commit.
**
** When adding files or directories recursively, filenames that begin
** with "." are excluded by default.  To include such files, add
** the "--dotfiles" option to the command-line.
**
** The --ignore option is a comma-separate list of glob patterns for files
** to be excluded.  Example:  '*.o,*.obj,*.exe'  If the --ignore option
** does not appear on the command line then the "ignore-glob" setting is
** used.
**
** The --case-sensitive option determines whether or not filenames should
** be treated case sensitive or not. If the option is not given, the default
** depends on the global setting, or the operating system default, if not set.
**
** Options:
**
**    --case-sensitive <BOOL> override case-sensitive setting
**    --dotfiles              include files beginning with a dot (".")   
**    --ignore <CSG>          ignore files matching patterns from the 
**                            comma separated list of glob patterns.
** 
** See also: addremove, rm
*/
void add_cmd(void){
  int i;                     /* Loop counter */
  int vid;                   /* Currently checked out version */
  int nRoot;                 /* Full path characters in g.zLocalRoot */
  const char *zIgnoreFlag;   /* The --ignore option or ignore-glob setting */
  Glob *pIgnore;             /* Ignore everything matching this glob pattern */
  int caseSensitive;         /* True if filenames are case sensitive */
  unsigned scanFlags = 0;    /* Flags passed to vfile_scan() */

  zIgnoreFlag = find_option("ignore",0,1);
  if( find_option("dotfiles",0,0)!=0 ) scanFlags |= SCAN_ALL;
  capture_case_sensitive_option();
  db_must_be_within_tree();
  caseSensitive = filenames_are_case_sensitive();
  if( zIgnoreFlag==0 ){
    zIgnoreFlag = db_get("ignore-glob", 0);
  }
  vid = db_lget_int("checkout",0);
  if( vid==0 ){
    fossil_panic("no checkout to add to");
  }
  db_begin_transaction();
  db_multi_exec("CREATE TEMP TABLE sfile(x TEXT PRIMARY KEY)");
#if defined(_WIN32)
  db_multi_exec(
     "CREATE INDEX IF NOT EXISTS vfile_pathname "
     "  ON vfile(pathname COLLATE nocase)"
  );
#endif
  pIgnore = glob_create(zIgnoreFlag);
  nRoot = strlen(g.zLocalRoot);
  
  /* Load the names of all files that are to be added into sfile temp table */
  for(i=2; i<g.argc; i++){
    char *zName;
    int isDir;
    Blob fullName;

    file_canonical_name(g.argv[i], &fullName, 0);
    zName = blob_str(&fullName);
    isDir = file_wd_isdir(zName);
    if( isDir==1 ){
      vfile_scan(&fullName, nRoot-1, scanFlags, pIgnore);
    }else if( isDir==0 ){
      fossil_warning("not found: %s", zName);
    }else if( file_access(zName, R_OK) ){
      fossil_fatal("cannot open %s", zName);
    }else{
      char *zTreeName = &zName[nRoot];
      db_multi_exec(
         "INSERT OR IGNORE INTO sfile(x) VALUES(%Q)",
         zTreeName
      );
    }
    blob_reset(&fullName);
  }
  glob_free(pIgnore);

  add_files_in_sfile(vid, caseSensitive);
  db_end_transaction(0);
}
Beispiel #17
0
/*
** COMMAND: merge
**
** Usage: %fossil merge ?OPTIONS? ?VERSION?
**
** The argument VERSION is a version that should be merged into the
** current checkout.  All changes from VERSION back to the nearest
** common ancestor are merged.  Except, if either of the --cherrypick or
** --backout options are used only the changes associated with the
** single check-in VERSION are merged.  The --backout option causes
** the changes associated with VERSION to be removed from the current
** checkout rather than added.
**
** If the VERSION argument is omitted, then Fossil attempts to find
** a recent fork on the current branch to merge.
**
** Only file content is merged.  The result continues to use the
** file and directory names from the current checkout even if those
** names might have been changed in the branch being merged in.
**
** Other options:
**
**   --baseline BASELINE     Use BASELINE as the "pivot" of the merge instead
**                           of the nearest common ancestor.  This allows
**                           a sequence of changes in a branch to be merged
**                           without having to merge the entire branch.
**
**   --binary GLOBPATTERN    Treat files that match GLOBPATTERN as binary
**                           and do not try to merge parallel changes.  This
**                           option overrides the "binary-glob" setting.
**
**   --case-sensitive BOOL   Override the case-sensitive setting.  If false,
**                           files whose names differ only in case are taken
**                           to be the same file.
**
**   -f|--force              Force the merge even if it would be a no-op.
**
**   --force-missing         Force the merge even if there is missing content.
**
**   --integrate             Merged branch will be closed when committing.
**
**   -n|--dry-run            If given, display instead of run actions
**
**   -v|--verbose            Show additional details of the merge
*/
void merge_cmd(void){
  int vid;              /* Current version "V" */
  int mid;              /* Version we are merging from "M" */
  int pid;              /* The pivot version - most recent common ancestor P */
  int verboseFlag;      /* True if the -v|--verbose option is present */
  int integrateFlag;    /* True if the --integrate option is present */
  int pickFlag;         /* True if the --cherrypick option is present */
  int backoutFlag;      /* True if the --backout option is present */
  int dryRunFlag;       /* True if the --dry-run or -n option is present */
  int forceFlag;        /* True if the --force or -f option is present */
  int forceMissingFlag; /* True if the --force-missing option is present */
  const char *zBinGlob; /* The value of --binary */
  const char *zPivot;   /* The value of --baseline */
  int debugFlag;        /* True if --debug is present */
  int nChng;            /* Number of file name changes */
  int *aChng;           /* An array of file name changes */
  int i;                /* Loop counter */
  int nConflict = 0;    /* Number of conflicts seen */
  int nOverwrite = 0;   /* Number of unmanaged files overwritten */
  Stmt q;


  /* Notation:
  **
  **      V     The current checkout
  **      M     The version being merged in
  **      P     The "pivot" - the most recent common ancestor of V and M.
  */

  undo_capture_command_line();
  verboseFlag = find_option("verbose","v",0)!=0;
  forceMissingFlag = find_option("force-missing",0,0)!=0;
  if( !verboseFlag ){
    verboseFlag = find_option("detail",0,0)!=0; /* deprecated */
  }
  pickFlag = find_option("cherrypick",0,0)!=0;
  integrateFlag = find_option("integrate",0,0)!=0;
  backoutFlag = find_option("backout",0,0)!=0;
  debugFlag = find_option("debug",0,0)!=0;
  zBinGlob = find_option("binary",0,1);
  dryRunFlag = find_option("dry-run","n",0)!=0;
  if( !dryRunFlag ){
    dryRunFlag = find_option("nochange",0,0)!=0; /* deprecated */
  }
  forceFlag = find_option("force","f",0)!=0;
  zPivot = find_option("baseline",0,1);
  verify_all_options();
  db_must_be_within_tree();
  if( zBinGlob==0 ) zBinGlob = db_get("binary-glob",0);
  vid = db_lget_int("checkout", 0);
  if( vid==0 ){
    fossil_fatal("nothing is checked out");
  }

  /* Find mid, the artifactID of the version to be merged into the current
  ** check-out */
  if( g.argc==3 ){
    /* Mid is specified as an argument on the command-line */
    mid = name_to_typed_rid(g.argv[2], "ci");
    if( mid==0 || !is_a_version(mid) ){
      fossil_fatal("not a version: %s", g.argv[2]);
    }
  }else if( g.argc==2 ){
    /* No version specified on the command-line so pick the most recent
    ** leaf that is (1) not the version currently checked out and (2)
    ** has not already been merged into the current checkout and (3)
    ** the leaf is not closed and (4) the leaf is in the same branch
    ** as the current checkout.
    */
    Stmt q;
    if( pickFlag || backoutFlag || integrateFlag){
      fossil_fatal("cannot use --backout, --cherrypick or --integrate with a fork merge");
    }
    mid = db_int(0,
      "SELECT leaf.rid"
      "  FROM leaf, event"
      " WHERE leaf.rid=event.objid"
      "   AND leaf.rid!=%d"                                /* Constraint (1) */
      "   AND leaf.rid NOT IN (SELECT merge FROM vmerge)"  /* Constraint (2) */
      "   AND NOT EXISTS(SELECT 1 FROM tagxref"            /* Constraint (3) */
                    "     WHERE rid=leaf.rid"
                    "       AND tagid=%d"
                    "       AND tagtype>0)"
      "   AND (SELECT value FROM tagxref"                  /* Constraint (4) */
            "   WHERE tagid=%d AND rid=%d AND tagtype>0) ="
            " (SELECT value FROM tagxref"
            "   WHERE tagid=%d AND rid=leaf.rid AND tagtype>0)"
      " ORDER BY event.mtime DESC LIMIT 1",
      vid, TAG_CLOSED, TAG_BRANCH, vid, TAG_BRANCH
    );
    if( mid==0 ){
      fossil_fatal("no unmerged forks of branch \"%s\"",
        db_text(0, "SELECT value FROM tagxref"
                   " WHERE tagid=%d AND rid=%d AND tagtype>0",
                   TAG_BRANCH, vid)
      );
    }
    db_prepare(&q,
      "SELECT blob.uuid,"
          "   datetime(event.mtime%s),"
          "   coalesce(ecomment, comment),"
          "   coalesce(euser, user)"
      "  FROM event, blob"
      " WHERE event.objid=%d AND blob.rid=%d",
      timeline_utc(), mid, mid
    );
    if( db_step(&q)==SQLITE_ROW ){
      char *zCom = mprintf("Merging fork [%S] at %s by %s: \"%s\"",
            db_column_text(&q, 0), db_column_text(&q, 1),
            db_column_text(&q, 3), db_column_text(&q, 2));
      comment_print(zCom, db_column_text(&q,2), 0, -1, g.comFmtFlags);
      fossil_free(zCom);
    }
    db_finalize(&q);
  }else{
    usage("?OPTIONS? ?VERSION?");
    return;
  }

  if( zPivot ){
    pid = name_to_typed_rid(zPivot, "ci");
    if( pid==0 || !is_a_version(pid) ){
      fossil_fatal("not a version: %s", zPivot);
    }
    if( pickFlag ){
      fossil_fatal("incompatible options: --cherrypick & --baseline");
    }
  }else if( pickFlag || backoutFlag ){
    if( integrateFlag ){
      fossil_fatal("incompatible options: --integrate & --cherrypick or --backout");
    }
    pid = db_int(0, "SELECT pid FROM plink WHERE cid=%d AND isprim", mid);
    if( pid<=0 ){
      fossil_fatal("cannot find an ancestor for %s", g.argv[2]);
    }
  }else{
    pivot_set_primary(mid);
    pivot_set_secondary(vid);
    db_prepare(&q, "SELECT merge FROM vmerge WHERE id=0");
    while( db_step(&q)==SQLITE_ROW ){
      pivot_set_secondary(db_column_int(&q,0));
    }
    db_finalize(&q);
    pid = pivot_find();
    if( pid<=0 ){
      fossil_fatal("cannot find a common ancestor between the current "
                   "checkout and %s", g.argv[2]);
    }
  }
  if( backoutFlag ){
    int t = pid;
    pid = mid;
    mid = t;
  }
  if( !is_a_version(pid) ){
    fossil_fatal("not a version: record #%d", pid);
  }
  if( !forceFlag && mid==pid ){
    fossil_print("Merge skipped because it is a no-op. "
                 " Use --force to override.\n");
    return;
  }
  if( integrateFlag && !is_a_leaf(mid)){
    fossil_warning("ignoring --integrate: %s is not a leaf", g.argv[2]);
    integrateFlag = 0;
  }
  if( verboseFlag ){
    print_checkin_description(mid, 12, integrateFlag?"integrate:":"merge-from:");
    print_checkin_description(pid, 12, "baseline:");
  }
  vfile_check_signature(vid, CKSIG_ENOTFILE);
  db_begin_transaction();
  if( !dryRunFlag ) undo_begin();
  if( load_vfile_from_rid(mid) && !forceMissingFlag ){
    fossil_fatal("missing content, unable to merge");
  }
  if( load_vfile_from_rid(pid) && !forceMissingFlag ){
    fossil_fatal("missing content, unable to merge");
  }
  if( debugFlag ){
    char *z;
    z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", pid);
    fossil_print("P=%d %z\n", pid, z);
    z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", mid);
    fossil_print("M=%d %z\n", mid, z);
    z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", vid);
    fossil_print("V=%d %z\n", vid, z);
  }

  /*
  ** The vfile.pathname field is used to match files against each other.  The
  ** FV table contains one row for each each unique filename in
  ** in the current checkout, the pivot, and the version being merged.
  */
  db_multi_exec(
    "DROP TABLE IF EXISTS fv;"
    "CREATE TEMP TABLE fv("
    "  fn TEXT PRIMARY KEY %s,"   /* The filename */
    "  idv INTEGER,"              /* VFILE entry for current version */
    "  idp INTEGER,"              /* VFILE entry for the pivot */
    "  idm INTEGER,"              /* VFILE entry for version merging in */
    "  chnged BOOLEAN,"           /* True if current version has been edited */
    "  ridv INTEGER,"             /* Record ID for current version */
    "  ridp INTEGER,"             /* Record ID for pivot */
    "  ridm INTEGER,"             /* Record ID for merge */
    "  isexe BOOLEAN,"            /* Execute permission enabled */
    "  fnp TEXT %s,"              /* The filename in the pivot */
    "  fnm TEXT %s,"              /* the filename in the merged version */
    "  islinkv BOOLEAN,"          /* True if current version is a symlink */
    "  islinkm BOOLEAN"           /* True if merged version in is a symlink */
    ");",
    filename_collation(), filename_collation(), filename_collation()
  );

  /* Add files found in V
  */
  db_multi_exec(
    "INSERT OR IGNORE"
    " INTO fv(fn,fnp,fnm,idv,idp,idm,ridv,ridp,ridm,isexe,chnged)"
    " SELECT pathname, pathname, pathname, id, 0, 0, rid, 0, 0, isexe, chnged "
    " FROM vfile WHERE vid=%d",
    vid
  );

  /*
  ** Compute name changes from P->V
  */
  find_filename_changes(pid, vid, 0, &nChng, &aChng, debugFlag ? "P->V" : 0);
  if( nChng ){
    for(i=0; i<nChng; i++){
      char *z;
      z = db_text(0, "SELECT name FROM filename WHERE fnid=%d", aChng[i*2]);
      db_multi_exec(
        "UPDATE fv SET fnp=%Q, fnm=%Q"
        " WHERE fn=(SELECT name FROM filename WHERE fnid=%d)",
        z, z, aChng[i*2+1]
      );
      free(z);
    }
    fossil_free(aChng);
    db_multi_exec("UPDATE fv SET fnm=fnp WHERE fnp!=fn");
  }

  /* Add files found in P but not in V
  */
  db_multi_exec(
    "INSERT OR IGNORE"
    " INTO fv(fn,fnp,fnm,idv,idp,idm,ridv,ridp,ridm,isexe,chnged)"
    " SELECT pathname, pathname, pathname, 0, 0, 0, 0, 0, 0, isexe, 0 "
    "   FROM vfile"
    "  WHERE vid=%d AND pathname %s NOT IN (SELECT fnp FROM fv)",
    pid, filename_collation()
  );

  /*
  ** Compute name changes from P->M
  */
  find_filename_changes(pid, mid, 0, &nChng, &aChng, debugFlag ? "P->M" : 0);
  if( nChng ){
    if( nChng>4 ) db_multi_exec("CREATE INDEX fv_fnp ON fv(fnp)");
    for(i=0; i<nChng; i++){
      db_multi_exec(
        "UPDATE fv SET fnm=(SELECT name FROM filename WHERE fnid=%d)"
        " WHERE fnp=(SELECT name FROM filename WHERE fnid=%d)",
        aChng[i*2+1], aChng[i*2]
      );
    }
    fossil_free(aChng);
  }

  /* Add files found in M but not in P or V.
  */
  db_multi_exec(
    "INSERT OR IGNORE"
    " INTO fv(fn,fnp,fnm,idv,idp,idm,ridv,ridp,ridm,isexe,chnged)"
    " SELECT pathname, pathname, pathname, 0, 0, 0, 0, 0, 0, isexe, 0 "
    "   FROM vfile"
    "  WHERE vid=%d"
    "    AND pathname %s NOT IN (SELECT fnp FROM fv UNION SELECT fnm FROM fv)",
    mid, filename_collation()
  );

  /*
  ** Compute the file version ids for P and M.
  */
  db_multi_exec(
    "UPDATE fv SET"
    " idp=coalesce((SELECT id FROM vfile WHERE vid=%d AND fnp=pathname),0),"
    " ridp=coalesce((SELECT rid FROM vfile WHERE vid=%d AND fnp=pathname),0),"
    " idm=coalesce((SELECT id FROM vfile WHERE vid=%d AND fnm=pathname),0),"
    " ridm=coalesce((SELECT rid FROM vfile WHERE vid=%d AND fnm=pathname),0),"
    " islinkv=coalesce((SELECT islink FROM vfile"
                    " WHERE vid=%d AND fnm=pathname),0),"
    " islinkm=coalesce((SELECT islink FROM vfile"
                    " WHERE vid=%d AND fnm=pathname),0)",
    pid, pid, mid, mid, vid, mid
  );

  if( debugFlag ){
    db_prepare(&q,
       "SELECT rowid, fn, fnp, fnm, chnged, ridv, ridp, ridm, "
       "       isexe, islinkv, islinkm FROM fv"
    );
    while( db_step(&q)==SQLITE_ROW ){
       fossil_print("%3d: ridv=%-4d ridp=%-4d ridm=%-4d chnged=%d isexe=%d "
                    " islinkv=%d islinkm=%d\n",
          db_column_int(&q, 0),
          db_column_int(&q, 5),
          db_column_int(&q, 6),
          db_column_int(&q, 7),
          db_column_int(&q, 4),
          db_column_int(&q, 8),
          db_column_int(&q, 9),
          db_column_int(&q, 10));
       fossil_print("     fn  = [%s]\n", db_column_text(&q, 1));
       fossil_print("     fnp = [%s]\n", db_column_text(&q, 2));
       fossil_print("     fnm = [%s]\n", db_column_text(&q, 3));
    }
    db_finalize(&q);
  }

  /*
  ** Find files in M and V but not in P and report conflicts.
  ** The file in M will be ignored.  It will be treated as if it
  ** does not exist.
  */
  db_prepare(&q,
    "SELECT idm FROM fv WHERE idp=0 AND idv>0 AND idm>0"
  );
  while( db_step(&q)==SQLITE_ROW ){
    int idm = db_column_int(&q, 0);
    char *zName = db_text(0, "SELECT pathname FROM vfile WHERE id=%d", idm);
    fossil_warning("WARNING - no common ancestor: %s", zName);
    free(zName);
    db_multi_exec("UPDATE fv SET idm=0 WHERE idm=%d", idm);
  }
  db_finalize(&q);

  /*
  ** Add to V files that are not in V or P but are in M
  */
  db_prepare(&q,
    "SELECT idm, rowid, fnm FROM fv AS x"
    " WHERE idp=0 AND idv=0 AND idm>0"
  );
  while( db_step(&q)==SQLITE_ROW ){
    int idm = db_column_int(&q, 0);
    int rowid = db_column_int(&q, 1);
    int idv;
    const char *zName;
    char *zFullName;
    db_multi_exec(
      "INSERT INTO vfile(vid,chnged,deleted,rid,mrid,isexe,islink,pathname)"
      "  SELECT %d,%d,0,rid,mrid,isexe,islink,pathname FROM vfile WHERE id=%d",
      vid, integrateFlag?5:3, idm
    );
    idv = db_last_insert_rowid();
    db_multi_exec("UPDATE fv SET idv=%d WHERE rowid=%d", idv, rowid);
    zName = db_column_text(&q, 2);
    zFullName = mprintf("%s%s", g.zLocalRoot, zName);
    if( file_wd_isfile_or_link(zFullName) ){
      fossil_print("ADDED %s (overwrites an unmanaged file)\n", zName);
      nOverwrite++;
    }else{
      fossil_print("ADDED %s\n", zName);
    }
    fossil_free(zFullName);
    if( !dryRunFlag ){
      undo_save(zName);
      vfile_to_disk(0, idm, 0, 0);
    }
  }
  db_finalize(&q);

  /*
  ** Find files that have changed from P->M but not P->V.
  ** Copy the M content over into V.
  */
  db_prepare(&q,
    "SELECT idv, ridm, fn, islinkm FROM fv"
    " WHERE idp>0 AND idv>0 AND idm>0"
    "   AND ridm!=ridp AND ridv=ridp AND NOT chnged"
  );
  while( db_step(&q)==SQLITE_ROW ){
    int idv = db_column_int(&q, 0);
    int ridm = db_column_int(&q, 1);
    const char *zName = db_column_text(&q, 2);
    int islinkm = db_column_int(&q, 3);
    /* Copy content from idm over into idv.  Overwrite idv. */
    fossil_print("UPDATE %s\n", zName);
    if( !dryRunFlag ){
      undo_save(zName);
      db_multi_exec(
        "UPDATE vfile SET mtime=0, mrid=%d, chnged=%d, islink=%d "
        " WHERE id=%d", ridm, integrateFlag?4:2, islinkm, idv
      );
      vfile_to_disk(0, idv, 0, 0);
    }
  }
  db_finalize(&q);

  /*
  ** Do a three-way merge on files that have changes on both P->M and P->V.
  */
  db_prepare(&q,
    "SELECT ridm, idv, ridp, ridv, %s, fn, isexe, islinkv, islinkm FROM fv"
    " WHERE idp>0 AND idv>0 AND idm>0"
    "   AND ridm!=ridp AND (ridv!=ridp OR chnged)",
    glob_expr("fv.fn", zBinGlob)
  );
  while( db_step(&q)==SQLITE_ROW ){
    int ridm = db_column_int(&q, 0);
    int idv = db_column_int(&q, 1);
    int ridp = db_column_int(&q, 2);
    int ridv = db_column_int(&q, 3);
    int isBinary = db_column_int(&q, 4);
    const char *zName = db_column_text(&q, 5);
    int isExe = db_column_int(&q, 6);
    int islinkv = db_column_int(&q, 7);
    int islinkm = db_column_int(&q, 8);
    int rc;
    char *zFullPath;
    Blob m, p, r;
    /* Do a 3-way merge of idp->idm into idp->idv.  The results go into idv. */
    if( verboseFlag ){
      fossil_print("MERGE %s  (pivot=%d v1=%d v2=%d)\n",
                   zName, ridp, ridm, ridv);
    }else{
      fossil_print("MERGE %s\n", zName);
    }
    if( islinkv || islinkm /* || file_wd_islink(zFullPath) */ ){
      fossil_print("***** Cannot merge symlink %s\n", zName);
      nConflict++;
    }else{
      undo_save(zName);
      zFullPath = mprintf("%s/%s", g.zLocalRoot, zName);
      content_get(ridp, &p);
      content_get(ridm, &m);
      if( isBinary ){
        rc = -1;
        blob_zero(&r);
      }else{
        unsigned mergeFlags = dryRunFlag ? MERGE_DRYRUN : 0;
        rc = merge_3way(&p, zFullPath, &m, &r, mergeFlags);
      }
      if( rc>=0 ){
        if( !dryRunFlag ){
          blob_write_to_file(&r, zFullPath);
          file_wd_setexe(zFullPath, isExe);
        }
        db_multi_exec("UPDATE vfile SET mtime=0 WHERE id=%d", idv);
        if( rc>0 ){
          fossil_print("***** %d merge conflicts in %s\n", rc, zName);
          nConflict++;
        }
      }else{
        fossil_print("***** Cannot merge binary file %s\n", zName);
        nConflict++;
      }
      blob_reset(&p);
      blob_reset(&m);
      blob_reset(&r);
    }
    db_multi_exec("INSERT OR IGNORE INTO vmerge(id,merge) VALUES(%d,%d)",
                  idv,ridm);
  }
  db_finalize(&q);

  /*
  ** Drop files that are in P and V but not in M
  */
  db_prepare(&q,
    "SELECT idv, fn, chnged FROM fv"
    " WHERE idp>0 AND idv>0 AND idm=0"
  );
  while( db_step(&q)==SQLITE_ROW ){
    int idv = db_column_int(&q, 0);
    const char *zName = db_column_text(&q, 1);
    int chnged = db_column_int(&q, 2);
    /* Delete the file idv */
    fossil_print("DELETE %s\n", zName);
    if( chnged ){
      fossil_warning("WARNING: local edits lost for %s\n", zName);
      nConflict++;
    }
    undo_save(zName);
    db_multi_exec(
      "UPDATE vfile SET deleted=1 WHERE id=%d", idv
    );
    if( !dryRunFlag ){
      char *zFullPath = mprintf("%s%s", g.zLocalRoot, zName);
      file_delete(zFullPath);
      free(zFullPath);
    }
  }
  db_finalize(&q);

  /*
  ** Rename files that have taken a rename on P->M but which keep the same
  ** name o P->V.   If a file is renamed on P->V only or on both P->V and
  ** P->M then we retain the V name of the file.
  */
  db_prepare(&q,
    "SELECT idv, fnp, fnm FROM fv"
    " WHERE idv>0 AND idp>0 AND idm>0 AND fnp=fn AND fnm!=fnp"
  );
  while( db_step(&q)==SQLITE_ROW ){
    int idv = db_column_int(&q, 0);
    const char *zOldName = db_column_text(&q, 1);
    const char *zNewName = db_column_text(&q, 2);
    fossil_print("RENAME %s -> %s\n", zOldName, zNewName);
    undo_save(zOldName);
    undo_save(zNewName);
    db_multi_exec(
      "UPDATE vfile SET pathname=%Q, origname=coalesce(origname,pathname)"
      " WHERE id=%d AND vid=%d", zNewName, idv, vid
    );
    if( !dryRunFlag ){
      char *zFullOldPath = mprintf("%s%s", g.zLocalRoot, zOldName);
      char *zFullNewPath = mprintf("%s%s", g.zLocalRoot, zNewName);
      if( file_wd_islink(zFullOldPath) ){
        symlink_copy(zFullOldPath, zFullNewPath);
      }else{
        file_copy(zFullOldPath, zFullNewPath);
      }
      file_delete(zFullOldPath);
      free(zFullNewPath);
      free(zFullOldPath);
    }
  }
  db_finalize(&q);


  /* Report on conflicts
  */
  if( nConflict ){
    fossil_warning("WARNING: %d merge conflicts", nConflict);
  }
  if( nOverwrite ){
    fossil_warning("WARNING: %d unmanaged files were overwritten",
                   nOverwrite);
  }
  if( dryRunFlag ){
    fossil_warning("REMINDER: this was a dry run -"
                   " no files were actually changed.");
  }

  /*
  ** Clean up the mid and pid VFILE entries.  Then commit the changes.
  */
  db_multi_exec("DELETE FROM vfile WHERE vid!=%d", vid);
  if( pickFlag ){
    db_multi_exec("INSERT OR IGNORE INTO vmerge(id,merge) VALUES(-1,%d)",mid);
    /* For a cherry-pick merge, make the default check-in comment the same
    ** as the check-in comment on the check-in that is being merged in. */
    db_multi_exec(
       "REPLACE INTO vvar(name,value)"
       " SELECT 'ci-comment', coalesce(ecomment,comment) FROM event"
       "  WHERE type='ci' AND objid=%d",
       mid
    );
  }else if( backoutFlag ){
    db_multi_exec("INSERT OR IGNORE INTO vmerge(id,merge) VALUES(-2,%d)",pid);
  }else if( integrateFlag ){
    db_multi_exec("INSERT OR IGNORE INTO vmerge(id,merge) VALUES(-4,%d)",mid);
  }else{
    db_multi_exec("INSERT OR IGNORE INTO vmerge(id,merge) VALUES(0,%d)", mid);
  }
  undo_finish();
  db_end_transaction(dryRunFlag);
}
Beispiel #18
0
/*
** COMMAND: addremove
**
** Usage: %fossil addremove ?OPTIONS?
**
** Do all necessary "add" and "rm" commands to synchronize the repository
** with the content of the working checkout:
**
**  *  All files in the checkout but not in the repository (that is,
**     all files displayed using the "extra" command) are added as
**     if by the "add" command.
**
**  *  All files in the repository but missing from the checkout (that is,
**     all files that show as MISSING with the "status" command) are
**     removed as if by the "rm" command.
**
** The command does not "commit".  You must run the "commit" separately
** as a separate step.
**
** Files and directories whose names begin with "." are ignored unless
** the --dotfiles option is used.
**
** The --ignore option overrides the "ignore-glob" setting, as does the
** --case-sensitive option with the "case-sensitive" setting. See the
** documentation on the "settings" command for further information.
**
** The --test option shows what would happen without actually doing anything.
**
** This command can be used to track third party software.
** 
** Options: 
**   --case-sensitive <BOOL> override case-sensitive setting
**   --dotfiles              include files beginning with a dot (".")   
**   --ignore <CSG>          ignore files matching patterns from the 
**                           comma separated list of glob patterns.
**   --test                  If given, display instead of run actions
**
** See also: add, rm
*/
void addremove_cmd(void){
  Blob path;
  const char *zIgnoreFlag = find_option("ignore",0,1);
  unsigned scanFlags = find_option("dotfiles",0,0)!=0 ? SCAN_ALL : 0;
  int isTest = find_option("test",0,0)!=0;
  int caseSensitive;
  int n;
  Stmt q;
  int vid;
  int nAdd = 0;
  int nDelete = 0;
  Glob *pIgnore;

  capture_case_sensitive_option();
  db_must_be_within_tree();
  caseSensitive = filenames_are_case_sensitive();
  if( zIgnoreFlag==0 ){
    zIgnoreFlag = db_get("ignore-glob", 0);
  }
  vid = db_lget_int("checkout",0);
  if( vid==0 ){
    fossil_panic("no checkout to add to");
  }
  db_begin_transaction();

  /* step 1:  
  ** Populate the temp table "sfile" with the names of all unmanged
  ** files currently in the check-out, except for files that match the
  ** --ignore or ignore-glob patterns and dot-files.  Then add all of
  ** the files in the sfile temp table to the set of managed files.
  */
  db_multi_exec("CREATE TEMP TABLE sfile(x TEXT PRIMARY KEY)");
  n = strlen(g.zLocalRoot);
  blob_init(&path, g.zLocalRoot, n-1);
  /* now we read the complete file structure into a temp table */
  pIgnore = glob_create(zIgnoreFlag);
  vfile_scan(&path, blob_size(&path), scanFlags, pIgnore);
  glob_free(pIgnore);
  nAdd = add_files_in_sfile(vid, caseSensitive);

  /* step 2: search for missing files */
  db_prepare(&q,
      "SELECT pathname, %Q || pathname, deleted FROM vfile"
      " WHERE NOT deleted"
      " ORDER BY 1",
      g.zLocalRoot
  );
  while( db_step(&q)==SQLITE_ROW ){
    const char * zFile;
    const char * zPath;

    zFile = db_column_text(&q, 0);
    zPath = db_column_text(&q, 1);
    if( !file_wd_isfile_or_link(zPath) ){
      if( !isTest ){
        db_multi_exec("UPDATE vfile SET deleted=1 WHERE pathname=%Q", zFile);
      }
      fossil_print("DELETED  %s\n", zFile);
      nDelete++;
    }
  }
  db_finalize(&q);
  /* show cmmand summary */
  fossil_print("added %d files, deleted %d files\n", nAdd, nDelete);

  db_end_transaction(isTest);
}
Beispiel #19
0
/*
** When a record is converted from a phantom to a real record,
** if that record has other records that are derived by delta,
** then call manifest_crosslink() on those other records.
**
** If the formerly phantom record or any of the other records
** derived by delta from the former phantom are a baseline manifest,
** then also invoke manifest_crosslink() on the delta-manifests
** associated with that baseline.
**
** Tail recursion is used to minimize stack depth.
*/
void after_dephantomize(int rid, int linkFlag){
  Stmt q;
  int nChildAlloc = 0;
  int *aChild = 0;
  Blob content;

  if( ignoreDephantomizations ) return;
  while( rid ){
    int nChildUsed = 0;
    int i;

    /* Parse the object rid itself */
    if( linkFlag ){
      content_get(rid, &content);
      manifest_crosslink(rid, &content, MC_NONE);
      assert( blob_is_reset(&content) );
    }

    /* Parse all delta-manifests that depend on baseline-manifest rid */
    db_prepare(&q, "SELECT rid FROM orphan WHERE baseline=%d", rid);
    while( db_step(&q)==SQLITE_ROW ){
      int child = db_column_int(&q, 0);
      if( nChildUsed>=nChildAlloc ){
        nChildAlloc = nChildAlloc*2 + 10;
        aChild = fossil_realloc(aChild, nChildAlloc*sizeof(aChild));
      }
      aChild[nChildUsed++] = child;
    }
    db_finalize(&q);
    for(i=0; i<nChildUsed; i++){
      content_get(aChild[i], &content);
      manifest_crosslink(aChild[i], &content, MC_NONE);
      assert( blob_is_reset(&content) );
    }
    if( nChildUsed ){
      db_multi_exec("DELETE FROM orphan WHERE baseline=%d", rid);
    }

    /* Recursively dephantomize all artifacts that are derived by
    ** delta from artifact rid and which have not already been
    ** cross-linked.  */
    nChildUsed = 0;
    db_prepare(&q, 
       "SELECT rid FROM delta"
       " WHERE srcid=%d"
       "   AND NOT EXISTS(SELECT 1 FROM mlink WHERE mid=delta.rid)",
       rid
    );
    while( db_step(&q)==SQLITE_ROW ){
      int child = db_column_int(&q, 0);
      if( nChildUsed>=nChildAlloc ){
        nChildAlloc = nChildAlloc*2 + 10;
        aChild = fossil_realloc(aChild, nChildAlloc*sizeof(aChild));
      }
      aChild[nChildUsed++] = child;
    }
    db_finalize(&q);
    for(i=1; i<nChildUsed; i++){
      after_dephantomize(aChild[i], 1);
    }

    /* Tail recursion for the common case where only a single artifact
    ** is derived by delta from rid... */
    rid = nChildUsed>0 ? aChild[0] : 0;
    linkFlag = 1;
  }
  free(aChild);
}
Beispiel #20
0
static void rebuild_update_schema(void){
  int rc;
  db_multi_exec(zSchemaUpdates1);
  db_multi_exec(zSchemaUpdates2);

  rc = db_exists("SELECT 1 FROM sqlite_master"
                 " WHERE name='user' AND sql GLOB '* mtime *'");
  if( rc==0 ){
    db_multi_exec(
      "CREATE TEMP TABLE temp_user AS SELECT * FROM user;"
      "DROP TABLE user;"
      "CREATE TABLE user(\n"
      "  uid INTEGER PRIMARY KEY,\n"
      "  login TEXT UNIQUE,\n"
      "  pw TEXT,\n"
      "  cap TEXT,\n"
      "  cookie TEXT,\n"
      "  ipaddr TEXT,\n"
      "  cexpire DATETIME,\n"
      "  info TEXT,\n"
      "  mtime DATE,\n"
      "  photo BLOB\n"
      ");"
      "INSERT OR IGNORE INTO user"
        " SELECT uid, login, pw, cap, cookie,"
               " ipaddr, cexpire, info, now(), photo FROM temp_user;"
      "DROP TABLE temp_user;"
    );
  }

  rc = db_exists("SELECT 1 FROM sqlite_master"
                 " WHERE name='config' AND sql GLOB '* mtime *'");
  if( rc==0 ){
    db_multi_exec(
      "ALTER TABLE config ADD COLUMN mtime INTEGER;"
      "UPDATE config SET mtime=now();"
    );
  }

  rc = db_exists("SELECT 1 FROM sqlite_master"
                 " WHERE name='shun' AND sql GLOB '* mtime *'");
  if( rc==0 ){
    db_multi_exec(
      "ALTER TABLE shun ADD COLUMN mtime INTEGER;"
      "ALTER TABLE shun ADD COLUMN scom TEXT;"
      "UPDATE shun SET mtime=now();"
    );
  }

  rc = db_exists("SELECT 1 FROM sqlite_master"
                 " WHERE name='reportfmt' AND sql GLOB '* mtime *'");
  if( rc==0 ){
    db_multi_exec(
      "CREATE TEMP TABLE old_fmt AS SELECT * FROM reportfmt;"
      "DROP TABLE reportfmt;"
    );
    db_multi_exec(zSchemaUpdates2);
    db_multi_exec(
      "INSERT OR IGNORE INTO reportfmt(rn,owner,title,cols,sqlcode,mtime)"
        " SELECT rn, owner, title, cols, sqlcode, now() FROM old_fmt;"
      "INSERT OR IGNORE INTO reportfmt(rn,owner,title,cols,sqlcode,mtime)"
        " SELECT rn, owner, title || ' (' || rn || ')', cols, sqlcode, now()"
        "   FROM old_fmt;"
    );
  }

  rc = db_exists("SELECT 1 FROM sqlite_master"
                 " WHERE name='concealed' AND sql GLOB '* mtime *'");
  if( rc==0 ){
    db_multi_exec(
      "ALTER TABLE concealed ADD COLUMN mtime INTEGER;"
      "UPDATE concealed SET mtime=now();"
    );
  }
}  
Beispiel #21
0
/*
** COMMAND: rebuild
**
** Usage: %fossil rebuild ?REPOSITORY? ?OPTIONS?
**
** Reconstruct the named repository database from the core
** records.  Run this command after updating the fossil
** executable in a way that changes the database schema.
**
** Options:
**   --cluster     Compute clusters for unclustered artifacts
**   --compress    Strive to make the database as small as possible
**   --force       Force the rebuild to complete even if errors are seen
**   --noverify    Skip the verification of changes to the BLOB table
**   --pagesize N  Set the database pagesize to N. (512..65536 and power of 2)
**   --randomize   Scan artifacts in a random order
**   --vacuum      Run VACUUM on the database after rebuilding
**   --deanalyze   Remove ANALYZE tables from the database
**   --analyze     Run ANALYZE on the database after rebuilding
**   --wal         Set Write-Ahead-Log journalling mode on the database
**   --stats       Show artifact statistics after rebuilding
**
** See also: deconstruct, reconstruct
*/
void rebuild_database(void){
  int forceFlag;
  int randomizeFlag;
  int errCnt;
  int omitVerify;
  int doClustering;
  const char *zPagesize;
  int newPagesize = 0;
  int activateWal;
  int runVacuum;
  int runDeanalyze;
  int runAnalyze;
  int runCompress;
  int showStats;

  omitVerify = find_option("noverify",0,0)!=0;
  forceFlag = find_option("force","f",0)!=0;
  randomizeFlag = find_option("randomize", 0, 0)!=0;
  doClustering = find_option("cluster", 0, 0)!=0;
  runVacuum = find_option("vacuum",0,0)!=0;
  runDeanalyze = find_option("deanalyze",0,0)!=0;
  runAnalyze = find_option("analyze",0,0)!=0;
  runCompress = find_option("compress",0,0)!=0;
  zPagesize = find_option("pagesize",0,1);
  showStats = find_option("stats",0,0)!=0;
  if( zPagesize ){
    newPagesize = atoi(zPagesize);
    if( newPagesize<512 || newPagesize>65536
        || (newPagesize&(newPagesize-1))!=0
    ){
      fossil_fatal("page size must be a power of two between 512 and 65536");
    }
  }
  activateWal = find_option("wal",0,0)!=0;
  if( g.argc==3 ){
    db_open_repository(g.argv[2]);
  }else{
    db_find_and_open_repository(OPEN_ANY_SCHEMA, 0);
    if( g.argc!=2 ){
      usage("?REPOSITORY-FILENAME?");
    }
    db_close(1);
    db_open_repository(g.zRepositoryName);
  }
  db_begin_transaction();
  ttyOutput = 1;
  errCnt = rebuild_db(randomizeFlag, 1, doClustering);
  reconstruct_private_table();
  db_multi_exec(
    "REPLACE INTO config(name,value,mtime) VALUES('content-schema','%s',now());"
    "REPLACE INTO config(name,value,mtime) VALUES('aux-schema','%s',now());"
    "REPLACE INTO config(name,value,mtime) VALUES('rebuilt','%s',now());",
    CONTENT_SCHEMA, AUX_SCHEMA, get_version()
  );
  if( errCnt && !forceFlag ){
    fossil_print(
      "%d errors. Rolling back changes. Use --force to force a commit.\n",
      errCnt
    );
    db_end_transaction(1);
  }else{
    if( runCompress ){
      fossil_print("Extra delta compression... "); fflush(stdout);
      extra_deltification();
      runVacuum = 1;
    }
    if( omitVerify ) verify_cancel();
    db_end_transaction(0);
    if( runCompress ) fossil_print("done\n");
    db_close(0);
    db_open_repository(g.zRepositoryName);
    if( newPagesize ){
      db_multi_exec("PRAGMA page_size=%d", newPagesize);
      runVacuum = 1;
    }
    if( runDeanalyze ){
      db_multi_exec("DROP TABLE IF EXISTS sqlite_stat1;"
                    "DROP TABLE IF EXISTS sqlite_stat3;"
                    "DROP TABLE IF EXISTS sqlite_stat4;");
    }
    if( runAnalyze ){
      fossil_print("Analyzing the database... "); fflush(stdout);
      db_multi_exec("ANALYZE;");
      fossil_print("done\n");
    }
    if( runVacuum ){
      fossil_print("Vacuuming the database... "); fflush(stdout);
      db_multi_exec("VACUUM");
      fossil_print("done\n");
    }
    if( activateWal ){
      db_multi_exec("PRAGMA journal_mode=WAL;");
    }
  }
  if( showStats ){
    static struct { int idx; const char *zLabel; } aStat[] = {
       { CFTYPE_ANY,       "Artifacts:" },
       { CFTYPE_MANIFEST,  "Manifests:" },
       { CFTYPE_CLUSTER,   "Clusters:" },
       { CFTYPE_CONTROL,   "Tags:" },
       { CFTYPE_WIKI,      "Wikis:" },
       { CFTYPE_TICKET,    "Tickets:" },
       { CFTYPE_ATTACHMENT,"Attachments:" },
       { CFTYPE_EVENT,     "Events:" },
    };
    int i;
    int subtotal = 0;
    for(i=0; i<count(aStat); i++){
      int k = aStat[i].idx;
      fossil_print("%-15s %6d\n", aStat[i].zLabel, g.parseCnt[k]);
      if( k>0 ) subtotal += g.parseCnt[k];
    }
    fossil_print("%-15s %6d\n", "Other:", g.parseCnt[CFTYPE_ANY] - subtotal);
  }
}
Beispiel #22
0
/*
** Rebuild cross-referencing information for the artifact
** rid with content pBase and all of its descendants.  This
** routine clears the content buffer before returning.
**
** If the zFNameFormat variable is set, then this routine is
** called to run "fossil deconstruct" instead of the usual
** "fossil rebuild".  In that case, instead of rebuilding the
** cross-referencing information, write the file content out
** to the appropriate directory.
**
** In both cases, this routine automatically recurses to process
** other artifacts that are deltas off of the current artifact.
** This is the most efficient way to extract all of the original
** artifact content from the Fossil repository.
*/
static void rebuild_step(int rid, int size, Blob *pBase){
  static Stmt q1;
  Bag children;
  Blob copy;
  Blob *pUse;
  int nChild, i, cid;

  while( rid>0 ){

    /* Fix up the "blob.size" field if needed. */
    if( size!=blob_size(pBase) ){
      db_multi_exec(
         "UPDATE blob SET size=%d WHERE rid=%d", blob_size(pBase), rid
      );
    }
  
    /* Find all children of artifact rid */
    db_static_prepare(&q1, "SELECT rid FROM delta WHERE srcid=:rid");
    db_bind_int(&q1, ":rid", rid);
    bag_init(&children);
    while( db_step(&q1)==SQLITE_ROW ){
      int cid = db_column_int(&q1, 0);
      if( !bag_find(&bagDone, cid) ){
        bag_insert(&children, cid);
      }
    }
    nChild = bag_count(&children);
    db_reset(&q1);
  
    /* Crosslink the artifact */
    if( nChild==0 ){
      pUse = pBase;
    }else{
      blob_copy(&copy, pBase);
      pUse = &copy;
    }
    if( zFNameFormat==0 ){
      /* We are doing "fossil rebuild" */
      manifest_crosslink(rid, pUse, MC_NONE);
    }else{
      /* We are doing "fossil deconstruct" */
      char *zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", rid);
      char *zFile = mprintf(zFNameFormat, zUuid, zUuid+prefixLength);
      blob_write_to_file(pUse,zFile);
      free(zFile);
      free(zUuid);
      blob_reset(pUse);
    }
    assert( blob_is_reset(pUse) );
    rebuild_step_done(rid);
  
    /* Call all children recursively */
    rid = 0;
    for(cid=bag_first(&children), i=1; cid; cid=bag_next(&children, cid), i++){
      static Stmt q2;
      int sz;
      db_static_prepare(&q2, "SELECT content, size FROM blob WHERE rid=:rid");
      db_bind_int(&q2, ":rid", cid);
      if( db_step(&q2)==SQLITE_ROW && (sz = db_column_int(&q2,1))>=0 ){
        Blob delta, next;
        db_ephemeral_blob(&q2, 0, &delta);
        blob_uncompress(&delta, &delta);
        blob_delta_apply(pBase, &delta, &next);
        blob_reset(&delta);
        db_reset(&q2);
        if( i<nChild ){
          rebuild_step(cid, sz, &next);
        }else{
          /* Tail recursion */
          rid = cid;
          size = sz;
          blob_reset(pBase);
          *pBase = next;
        }
      }else{
        db_reset(&q2);
        blob_reset(pBase);
      }
    }
    bag_clear(&children);
  }
}
Beispiel #23
0
/*
** Core function to rebuild the information in the derived tables of a
** fossil repository from the blobs. This function is shared between
** 'rebuild_database' ('rebuild') and 'reconstruct_cmd'
** ('reconstruct'), both of which have to regenerate this information
** from scratch.
**
** If the randomize parameter is true, then the BLOBs are deliberately
** extracted in a random order.  This feature is used to test the
** ability of fossil to accept records in any order and still
** construct a sane repository.
*/
int rebuild_db(int randomize, int doOut, int doClustering){
  Stmt s;
  int errCnt = 0;
  char *zTable;
  int incrSize;

  bag_init(&bagDone);
  ttyOutput = doOut;
  processCnt = 0;
  if (ttyOutput && !g.fQuiet) {
    percent_complete(0);
  }
  rebuild_update_schema();
  for(;;){
    zTable = db_text(0,
       "SELECT name FROM sqlite_master /*scan*/"
       " WHERE type='table'"
       " AND name NOT IN ('blob','delta','rcvfrom','user',"
                         "'config','shun','private','reportfmt',"
                         "'concealed','accesslog','modreq')"
       " AND name NOT GLOB 'sqlite_*'"
       " AND name NOT GLOB 'fx_*'"
    );
    if( zTable==0 ) break;
    db_multi_exec("DROP TABLE %Q", zTable);
    free(zTable);
  }
  db_multi_exec(zRepositorySchema2);
  ticket_create_table(0);
  shun_artifacts();

  db_multi_exec(
     "INSERT INTO unclustered"
     " SELECT rid FROM blob EXCEPT SELECT rid FROM private"
  );
  db_multi_exec(
     "DELETE FROM unclustered"
     " WHERE rid IN (SELECT rid FROM shun JOIN blob USING(uuid))"
  );
  db_multi_exec(
    "DELETE FROM config WHERE name IN ('remote-code', 'remote-maxid')"
  );

  /* The following should be count(*) instead of max(rid). max(rid) is
  ** an adequate approximation, however, and is much faster for large
  ** repositories. */
  totalSize = db_int(0, "SELECT max(rid) FROM blob");
  incrSize = totalSize/100;
  totalSize += incrSize*2;
  db_prepare(&s,
     "SELECT rid, size FROM blob /*scan*/"
     " WHERE NOT EXISTS(SELECT 1 FROM shun WHERE uuid=blob.uuid)"
     "   AND NOT EXISTS(SELECT 1 FROM delta WHERE rid=blob.rid)"
  );
  manifest_crosslink_begin();
  while( db_step(&s)==SQLITE_ROW ){
    int rid = db_column_int(&s, 0);
    int size = db_column_int(&s, 1);
    if( size>=0 ){
      Blob content;
      content_get(rid, &content);
      rebuild_step(rid, size, &content);
    }
  }
  db_finalize(&s);
  db_prepare(&s,
     "SELECT rid, size FROM blob"
     " WHERE NOT EXISTS(SELECT 1 FROM shun WHERE uuid=blob.uuid)"
  );
  while( db_step(&s)==SQLITE_ROW ){
    int rid = db_column_int(&s, 0);
    int size = db_column_int(&s, 1);
    if( size>=0 ){
      if( !bag_find(&bagDone, rid) ){
        Blob content;
        content_get(rid, &content);
        rebuild_step(rid, size, &content);
      }
    }else{
      db_multi_exec("INSERT OR IGNORE INTO phantom VALUES(%d)", rid);
      rebuild_step_done(rid);
    }
  }
  db_finalize(&s);
  manifest_crosslink_end(MC_NONE);
  rebuild_tag_trunk();
  if( ttyOutput && !g.fQuiet && totalSize>0 ){
    processCnt += incrSize;
    percent_complete((processCnt*1000)/totalSize);
  }
  if( doClustering ) create_cluster();
  if( ttyOutput && !g.fQuiet && totalSize>0 ){
    processCnt += incrSize;
    percent_complete((processCnt*1000)/totalSize);
  }
  if(!g.fQuiet && ttyOutput ){
    percent_complete(1000);
    fossil_print("\n");
  }
  return errCnt;
}
Beispiel #24
0
/*
**  fossil branch new    BRANCH-NAME ?ORIGIN-CHECK-IN? ?-bgcolor COLOR?
**  argv0  argv1  argv2  argv3       argv4
*/
void branch_new(void){
  int rootid;            /* RID of the root check-in - what we branch off of */
  int brid;              /* RID of the branch check-in */
  int noSign;            /* True if the branch is unsigned */
  int i;                 /* Loop counter */
  char *zUuid;           /* Artifact ID of origin */
  Stmt q;                /* Generic query */
  const char *zBranch;   /* Name of the new branch */
  char *zDate;           /* Date that branch was created */
  char *zComment;        /* Check-in comment for the new branch */
  const char *zColor;    /* Color of the new branch */
  Blob branch;           /* manifest for the new branch */
  Manifest *pParent;     /* Parsed parent manifest */
  Blob mcksum;           /* Self-checksum on the manifest */
  const char *zDateOvrd; /* Override date string */
  const char *zUserOvrd; /* Override user name */
  int isPrivate = 0;     /* True if the branch should be private */
 
  noSign = find_option("nosign","",0)!=0;
  zColor = find_option("bgcolor","c",1);
  isPrivate = find_option("private",0,0)!=0;
  zDateOvrd = find_option("date-override",0,1);
  zUserOvrd = find_option("user-override",0,1);
  verify_all_options();
  if( g.argc<5 ){
    usage("new BRANCH-NAME CHECK-IN ?-bgcolor COLOR?");
  }
  db_find_and_open_repository(0, 0);  
  noSign = db_get_int("omitsign", 0)|noSign;
  
  /* fossil branch new name */
  zBranch = g.argv[3];
  if( zBranch==0 || zBranch[0]==0 ){
    fossil_panic("branch name cannot be empty");
  }
  if( db_exists(
        "SELECT 1 FROM tagxref"
        " WHERE tagtype>0"
        "   AND tagid=(SELECT tagid FROM tag WHERE tagname='sym-%s')",
        zBranch)!=0 ){
    fossil_fatal("branch \"%s\" already exists", zBranch);
  }

  user_select();
  db_begin_transaction();
  rootid = name_to_typed_rid(g.argv[4], "ci");
  if( rootid==0 ){
    fossil_fatal("unable to locate check-in off of which to branch");
  }

  pParent = manifest_get(rootid, CFTYPE_MANIFEST);
  if( pParent==0 ){
    fossil_fatal("%s is not a valid check-in", g.argv[4]);
  }

  /* Create a manifest for the new branch */
  blob_zero(&branch);
  if( pParent->zBaseline ){
    blob_appendf(&branch, "B %s\n", pParent->zBaseline);
  }
  zComment = mprintf("Create new branch named \"%h\"", zBranch);
  blob_appendf(&branch, "C %F\n", zComment);
  zDate = date_in_standard_format(zDateOvrd ? zDateOvrd : "now");
  blob_appendf(&branch, "D %s\n", zDate);

  /* Copy all of the content from the parent into the branch */
  for(i=0; i<pParent->nFile; ++i){
    blob_appendf(&branch, "F %F", pParent->aFile[i].zName);
    if( pParent->aFile[i].zUuid ){
      blob_appendf(&branch, " %s", pParent->aFile[i].zUuid);
      if( pParent->aFile[i].zPerm && pParent->aFile[i].zPerm[0] ){
        blob_appendf(&branch, " %s", pParent->aFile[i].zPerm);
      }
    }
    blob_append(&branch, "\n", 1);
  }
  zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", rootid);
  blob_appendf(&branch, "P %s\n", zUuid);
  if( pParent->zRepoCksum ){
    blob_appendf(&branch, "R %s\n", pParent->zRepoCksum);
  }
  manifest_destroy(pParent);

  /* Add the symbolic branch name and the "branch" tag to identify
  ** this as a new branch */
  if( content_is_private(rootid) ) isPrivate = 1;
  if( isPrivate && zColor==0 ) zColor = "#fec084";
  if( zColor!=0 ){
    blob_appendf(&branch, "T *bgcolor * %F\n", zColor);
  }
  blob_appendf(&branch, "T *branch * %F\n", zBranch);
  blob_appendf(&branch, "T *sym-%F *\n", zBranch);
  if( isPrivate ){
    blob_appendf(&branch, "T +private *\n");
    noSign = 1;
  }

  /* Cancel all other symbolic tags */
  db_prepare(&q,
      "SELECT tagname FROM tagxref, tag"
      " WHERE tagxref.rid=%d AND tagxref.tagid=tag.tagid"
      "   AND tagtype>0 AND tagname GLOB 'sym-*'"
      " ORDER BY tagname",
      rootid);
  while( db_step(&q)==SQLITE_ROW ){
    const char *zTag = db_column_text(&q, 0);
    blob_appendf(&branch, "T -%F *\n", zTag);
  }
  db_finalize(&q);
  
  blob_appendf(&branch, "U %F\n", zUserOvrd ? zUserOvrd : g.zLogin);
  md5sum_blob(&branch, &mcksum);
  blob_appendf(&branch, "Z %b\n", &mcksum);
  if( !noSign && clearsign(&branch, &branch) ){
    Blob ans;
    blob_zero(&ans);
    prompt_user("unable to sign manifest.  continue (y/N)? ", &ans);
    if( blob_str(&ans)[0]!='y' ){
      db_end_transaction(1);
      fossil_exit(1);
    }
  }

  brid = content_put_ex(&branch, 0, 0, 0, isPrivate);
  if( brid==0 ){
    fossil_panic("trouble committing manifest: %s", g.zErrMsg);
  }
  db_multi_exec("INSERT OR IGNORE INTO unsent VALUES(%d)", brid);
  if( manifest_crosslink(brid, &branch)==0 ){
    fossil_panic("unable to install new manifest");
  }
  assert( blob_is_reset(&branch) );
  content_deltify(rootid, brid, 0);
  zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", brid);
  fossil_print("New branch: %s\n", zUuid);
  if( g.argc==3 ){
    fossil_print(
      "\n"
      "Note: the local check-out has not been updated to the new\n"
      "      branch.  To begin working on the new branch, do this:\n"
      "\n"
      "      %s update %s\n",
      fossil_nameofexe(), zBranch
    );
  }


  /* Commit */
  db_end_transaction(0);
  
  /* Do an autosync push, if requested */
  if( !isPrivate ) autosync(AUTOSYNC_PUSH);
}
Beispiel #25
0
/*
** COMMAND: test-clusters
**
** Verify that all non-private and non-shunned artifacts are accessible
** through the cluster chain.
*/
void test_clusters_cmd(void){
  Bag pending;
  Stmt q;
  int n;
  
  db_find_and_open_repository(0, 2);
  bag_init(&pending);
  db_multi_exec(
    "CREATE TEMP TABLE xdone(x INTEGER PRIMARY KEY);"
    "INSERT INTO xdone SELECT rid FROM unclustered;"
    "INSERT OR IGNORE INTO xdone SELECT rid FROM private;"
    "INSERT OR IGNORE INTO xdone"
         " SELECT blob.rid FROM shun JOIN blob USING(uuid);"
  );
  db_prepare(&q,
    "SELECT rid FROM unclustered WHERE rid IN"
    " (SELECT rid FROM tagxref WHERE tagid=%d)", TAG_CLUSTER
  );
  while( db_step(&q)==SQLITE_ROW ){
    bag_insert(&pending, db_column_int(&q, 0));
  }
  db_finalize(&q);
  while( bag_count(&pending)>0 ){
    Manifest *p;
    int rid = bag_first(&pending);
    int i;
    
    bag_remove(&pending, rid);
    p = manifest_get(rid, CFTYPE_CLUSTER, 0);
    if( p==0 ){
      fossil_fatal("bad cluster: rid=%d", rid);
    }
    for(i=0; i<p->nCChild; i++){
      const char *zUuid = p->azCChild[i];
      int crid = name_to_rid(zUuid);
      if( crid==0 ){
         fossil_warning("cluster (rid=%d) references unknown artifact %s",
                        rid, zUuid);
         continue;
      }
      db_multi_exec("INSERT OR IGNORE INTO xdone VALUES(%d)", crid);
      if( db_exists("SELECT 1 FROM tagxref WHERE tagid=%d AND rid=%d",
                    TAG_CLUSTER, crid) ){
        bag_insert(&pending, crid);
      }
    }
    manifest_destroy(p);
  }
  n = db_int(0, "SELECT count(*) FROM /*scan*/"
                "  (SELECT rid FROM blob EXCEPT SELECT x FROM xdone)");
  if( n==0 ){
    fossil_print("all artifacts reachable through clusters\n");
  }else{
    fossil_print("%d unreachable artifacts:\n", n);
    db_prepare(&q, "SELECT rid, uuid FROM blob WHERE rid NOT IN xdone");
    while( db_step(&q)==SQLITE_ROW ){
      fossil_print("  %3d %s\n", db_column_int(&q,0), db_column_text(&q,1));
    }
    db_finalize(&q);
  }
}
Beispiel #26
0
/*
** COMMAND: configuration*
**
** Usage: %vcs configuration METHOD ... ?OPTIONS?
**
** Where METHOD is one of: export import merge pull push reset.  All methods
** accept the -R or --repository option to specific a repository.
**
**    %vcs configuration export AREA FILENAME
**
**         Write to FILENAME exported configuraton information for AREA.
**         AREA can be one of:  all email project shun skin ticket user
**
**    %vcs configuration import FILENAME
**
**         Read a configuration from FILENAME, overwriting the current
**         configuration.
**
**    %vcs configuration merge FILENAME
**
**         Read a configuration from FILENAME and merge its values into
**         the current configuration.  Existing values take priority over
**         values read from FILENAME.
**
**    %vcs configuration pull AREA ?URL?
**
**         Pull and install the configuration from a different server
**         identified by URL.  If no URL is specified, then the default
**         server is used. Use the --legacy option for the older protocol
**         (when talking to servers compiled prior to 2011-04-27.)  Use
**         the --overwrite flag to completely replace local settings with
**         content received from URL.
**
**    %vcs configuration push AREA ?URL?
**
**         Push the local configuration into the remote server identified
**         by URL.  Admin privilege is required on the remote server for
**         this to work.  When the same record exists both locally and on
**         the remote end, the one that was most recently changed wins.
**         Use the --legacy flag when talking to holder servers.
**
**    %vcs configuration reset AREA
**
**         Restore the configuration to the default.  AREA as above.
**
**    %vcs configuration sync AREA ?URL?
**
**         Synchronize configuration changes in the local repository with
**         the remote repository at URL.
**
** Options:
**    -R|--repository FILE       Extract info from repository FILE
**
** See also: settings, unset
*/
void configuration_cmd(void) {
    int n;
    const char *zMethod;
    if( g.argc<3 ) {
        usage("export|import|merge|pull|reset ...");
    }
    db_find_and_open_repository(0, 0);
    db_open_config(0);
    zMethod = g.argv[2];
    n = strlen(zMethod);
    if( strncmp(zMethod, "export", n)==0 ) {
        int mask;
        const char *zSince = find_option("since",0,1);
        sqlite3_int64 iStart;
        if( g.argc!=5 ) {
            usage("export AREA FILENAME");
        }
        mask = configure_name_to_mask(g.argv[3], 1);
        if( zSince ) {
            iStart = db_multi_exec(
                         "SELECT coalesce(strftime('%%s',%Q),strftime('%%s','now',%Q))+0",
                         zSince, zSince
                     );
        } else {
            iStart = 0;
        }
        export_config(mask, g.argv[3], iStart, g.argv[4]);
    } else if( strncmp(zMethod, "import", n)==0
               || strncmp(zMethod, "merge", n)==0 ) {
        Blob in;
        int groupMask;
        if( g.argc!=4 ) usage(mprintf("%s FILENAME",zMethod));
        blob_read_from_file(&in, g.argv[3]);
        db_begin_transaction();
        if( zMethod[0]=='i' ) {
            groupMask = CONFIGSET_ALL | CONFIGSET_OVERWRITE;
        } else {
            groupMask = CONFIGSET_ALL;
        }
        configure_receive_all(&in, groupMask);
        db_end_transaction(0);
    } else if( strncmp(zMethod, "pull", n)==0
               || strncmp(zMethod, "push", n)==0
               || strncmp(zMethod, "sync", n)==0
             ) {
        int mask;
        const char *zServer;
        const char *zPw;
        int legacyFlag = 0;
        int overwriteFlag = 0;
        if( zMethod[0]!='s' ) legacyFlag = find_option("legacy",0,0)!=0;
        if( strncmp(zMethod,"pull",n)==0 ) {
            overwriteFlag = find_option("overwrite",0,0)!=0;
        }
        url_proxy_options();
        if( g.argc!=4 && g.argc!=5 ) {
            usage("pull AREA ?URL?");
        }
        mask = configure_name_to_mask(g.argv[3], 1);
        if( g.argc==5 ) {
            zServer = g.argv[4];
            zPw = 0;
            g.dontKeepUrl = 1;
        } else {
            zServer = db_get("last-sync-url", 0);
            if( zServer==0 ) {
                vcs_fatal("no server specified");
            }
            zPw = unobscure(db_get("last-sync-pw", 0));
        }
        url_parse(zServer);
        if( g.urlPasswd==0 && zPw ) g.urlPasswd = mprintf("%s", zPw);
        user_select();
        url_enable_proxy("via proxy: ");
        if( legacyFlag ) mask |= CONFIGSET_OLDFORMAT;
        if( overwriteFlag ) mask |= CONFIGSET_OVERWRITE;
        if( strncmp(zMethod, "push", n)==0 ) {
            client_sync(0,0,0,0,0,mask);
        } else if( strncmp(zMethod, "pull", n)==0 ) {
            client_sync(0,0,0,0,mask,0);
        } else {
            client_sync(0,0,0,0,mask,mask);
        }
    } else if( strncmp(zMethod, "reset", n)==0 ) {
        int mask, i;
        char *zBackup;
        if( g.argc!=4 ) usage("reset AREA");
        mask = configure_name_to_mask(g.argv[3], 1);
        zBackup = db_text(0,
                          "SELECT strftime('config-backup-%%Y%%m%%d%%H%%M%%f','now')");
        db_begin_transaction();
        export_config(mask, g.argv[3], 0, zBackup);
        for(i=0; i<count(aConfig); i++) {
            const char *zName = aConfig[i].zName;
            if( (aConfig[i].groupMask & mask)==0 ) continue;
            if( zName[0]!='@' ) {
                db_multi_exec("DELETE FROM config WHERE name=%Q", zName);
            } else if( vcs_strcmp(zName,"@user")==0 ) {
                db_multi_exec("DELETE FROM user");
                db_create_default_users(0, 0);
            } else if( vcs_strcmp(zName,"@concealed")==0 ) {
                db_multi_exec("DELETE FROM concealed");
            } else if( vcs_strcmp(zName,"@shun")==0 ) {
                db_multi_exec("DELETE FROM shun");
            } else if( vcs_strcmp(zName,"@reportfmt")==0 ) {
                db_multi_exec("DELETE FROM reportfmt");
            }
        }
        db_end_transaction(0);
        vcs_print("Configuration reset to factory defaults.\n");
        vcs_print("To recover, use:  %s %s import %s\n",
                  vcs_nameofexe(), g.argv[1], zBackup);
    } else
    {
        vcs_fatal("METHOD should be one of:"
                  " export import merge pull push reset");
    }
}
Beispiel #27
0
/*
** Impl of /json/dir. 98% of it was taken directly
** from browse.c::page_dir()
*/
static cson_value * json_page_dir_list(){
  cson_object * zPayload = NULL; /* return value */
  cson_array * zEntries = NULL; /* accumulated list of entries. */
  cson_object * zEntry = NULL;  /* a single dir/file entry. */
  cson_array * keyStore = NULL; /* garbage collector for shared strings. */
  cson_string * zKeyName = NULL;
  cson_string * zKeySize = NULL;
  cson_string * zKeyIsDir = NULL;
  cson_string * zKeyUuid = NULL;
  cson_string * zKeyTime = NULL;
  cson_string * zKeyRaw = NULL;
  char * zD = NULL;
  char const * zDX = NULL;
  int nD;
  char * zUuid = NULL;
  char const * zCI = NULL;
  Manifest * pM = NULL;
  Stmt q = empty_Stmt;
  int rid = 0;
  if( !g.perm.Read ){
    json_set_err(FSL_JSON_E_DENIED, "Requires 'o' permissions.");
    return NULL;
  }
  zCI = json_find_option_cstr("checkin",NULL,"ci" );

  /* If a specific check-in is requested, fetch and parse it.  If the
  ** specific check-in does not exist, clear zCI.  zCI==0 will cause all
  ** files from all check-ins to be displayed.
  */
  if( zCI && *zCI ){
    pM = manifest_get_by_name(zCI, &rid);
    if( pM ){
      zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", rid);
    }else{
      json_set_err(FSL_JSON_E_UNRESOLVED_UUID,
                   "Checkin name [%s] is unresolved.",
                   zCI);
      return NULL;
    }
  }

  /* Jump through some hoops to find the directory name... */
  zDX = json_find_option_cstr("name",NULL,NULL);
  if(!zDX && !g.isHTTP){
    zDX = json_command_arg(g.json.dispatchDepth+1);
  }
  if(zDX && (!*zDX || (0==strcmp(zDX,"/")))){
    zDX = NULL;
  }
  zD = zDX ? fossil_strdup(zDX) : NULL;
  nD = zD ? strlen(zD)+1 : 0;
  while( nD>1 && zD[nD-2]=='/' ){ zD[(--nD)-1] = 0; }

  sqlite3_create_function(g.db, "pathelement", 2, SQLITE_UTF8, 0,
                          pathelementFunc, 0, 0);

  /* Compute the temporary table "localfiles" containing the names
  ** of all files and subdirectories in the zD[] directory.
  **
  ** Subdirectory names begin with "/".  This causes them to sort
  ** first and it also gives us an easy way to distinguish files
  ** from directories in the loop that follows.
  */

  if( zCI ){
    Stmt ins;
    ManifestFile *pFile;
    ManifestFile *pPrev = 0;
    int nPrev = 0;
    int c;

    db_multi_exec(
                  "CREATE TEMP TABLE json_dir_files("
                  "  n UNIQUE NOT NULL," /* file name */
                  "  fn UNIQUE NOT NULL," /* full file name */
                  "  u DEFAULT NULL," /* file uuid */
                  "  sz DEFAULT -1," /* file size */
                  "  mtime DEFAULT NULL" /* file mtime in unix epoch format */
                  ");"
                  );

    db_prepare(&ins,
               "INSERT OR IGNORE INTO json_dir_files (n,fn,u,sz,mtime) "
               "SELECT"
               "  pathelement(:path,0),"
               "  CASE WHEN %Q IS NULL THEN '' ELSE %Q||'/' END ||:abspath,"
               "  a.uuid,"
               "  a.size,"
               "  CAST(strftime('%%s',e.mtime) AS INTEGER) "
               "FROM"
               "  mlink m, "
               "  event e,"
               "  blob a,"
               "  blob b "
               "WHERE"
               " e.objid=m.mid"
               " AND a.rid=m.fid"/*FILE artifact*/
               " AND b.rid=m.mid"/*CHECKIN artifact*/
               " AND a.uuid=:uuid",
               zD, zD
               );
    manifest_file_rewind(pM);
    while( (pFile = manifest_file_next(pM,0))!=0 ){
      if( nD>0
        && ((pFile->zName[nD-1]!='/') || (0!=memcmp(pFile->zName, zD, nD-1)))
      ){
        continue;
      }
      /*printf("zD=%s, nD=%d, pFile->zName=%s\n", zD, nD, pFile->zName);*/
      if( pPrev
       && memcmp(&pFile->zName[nD],&pPrev->zName[nD],nPrev)==0
       && (pFile->zName[nD+nPrev]==0 || pFile->zName[nD+nPrev]=='/')
      ){
        continue;
      }
      db_bind_text( &ins, ":path", &pFile->zName[nD] );
      db_bind_text( &ins, ":abspath", &pFile->zName[nD] );
      db_bind_text( &ins, ":uuid", pFile->zUuid );
      db_step(&ins);
      db_reset(&ins);
      pPrev = pFile;
      for(nPrev=0; (c=pPrev->zName[nD+nPrev]) && c!='/'; nPrev++){}
      if( c=='/' ) nPrev++;
    }
    db_finalize(&ins);
  }else if( zD && *zD ){
    db_multi_exec(
      "CREATE TEMP VIEW json_dir_files AS"
      " SELECT DISTINCT(pathelement(name,%d)) AS n,"
      " %Q||'/'||name AS fn,"
      " NULL AS u, NULL AS sz, NULL AS mtime"
      " FROM filename"
      "  WHERE name GLOB '%q/*'"
      " GROUP BY n",
      nD, zD, zD
    );
  }else{
    db_multi_exec(
      "CREATE TEMP VIEW json_dir_files"
      " AS SELECT DISTINCT(pathelement(name,0)) AS n, NULL AS fn"
      " FROM filename"
    );
  }

  if(zCI){
    db_prepare( &q, "SELECT"
                "  n as name,"
                "  fn as fullname,"
                "  u as uuid,"
                "  sz as size,"
                "  mtime as mtime "
                "FROM json_dir_files ORDER BY n");
  }else{/* UUIDs are all NULL. */
    db_prepare( &q, "SELECT n, fn FROM json_dir_files ORDER BY n");
  }

  zKeyName = cson_new_string("name",4);
  zKeyUuid = cson_new_string("uuid",4);
  zKeyIsDir = cson_new_string("isDir",5);
  keyStore = cson_new_array();
  cson_array_append( keyStore, cson_string_value(zKeyName) );
  cson_array_append( keyStore, cson_string_value(zKeyUuid) );
  cson_array_append( keyStore, cson_string_value(zKeyIsDir) );

  if( zCI ){
    zKeySize = cson_new_string("size",4);
    cson_array_append( keyStore, cson_string_value(zKeySize) );
    zKeyTime = cson_new_string("timestamp",9);
    cson_array_append( keyStore, cson_string_value(zKeyTime) );
    zKeyRaw = cson_new_string("downloadPath",12);
    cson_array_append( keyStore, cson_string_value(zKeyRaw) );
  }
  zPayload = cson_new_object();
  cson_object_set_s( zPayload, zKeyName,
                     json_new_string((zD&&*zD) ? zD : "/") );
  if( zUuid ){
    cson_object_set( zPayload, "checkin", json_new_string(zUuid) );
  }

  while( (SQLITE_ROW==db_step(&q)) ){
    cson_value * name = NULL;
    char const * n = db_column_text(&q,0);
    char const isDir = ('/'==*n);
    zEntry = cson_new_object();
    if(!zEntries){
      zEntries = cson_new_array();
      cson_object_set( zPayload, "entries", cson_array_value(zEntries) );
    }
    cson_array_append(zEntries, cson_object_value(zEntry) );
    if(isDir){
      name = json_new_string( n+1 );
      cson_object_set_s(zEntry, zKeyIsDir, cson_value_true() );
    } else{
      name = json_new_string( n );
    }
    cson_object_set_s(zEntry, zKeyName, name );
    if( zCI && !isDir){
      /* Don't add the uuid/size for dir entries - that data refers to
         one of the files in that directory :/. Entries with no
         --checkin may refer to N versions, and therefore we cannot
         associate a single size and uuid with them (and fetching all
         would be overkill for most use cases).
      */
      char const * fullName = db_column_text(&q,1);
      char const * u = db_column_text(&q,2);
      sqlite_int64 const sz = db_column_int64(&q,3);
      sqlite_int64 const ts = db_column_int64(&q,4);
      cson_object_set_s(zEntry, zKeyUuid, json_new_string( u ) );
      cson_object_set_s(zEntry, zKeySize,
                        cson_value_new_integer( (cson_int_t)sz ));
      cson_object_set_s(zEntry, zKeyTime,
          cson_value_new_integer( (cson_int_t)ts ));
      cson_object_set_s(zEntry, zKeyRaw,
                        json_new_string_f("/raw/%T?name=%t",
                                          fullName, u));
    }
  }
  db_finalize(&q);
  if(pM){
    manifest_destroy(pM);
  }
  cson_free_array( keyStore );

  free( zUuid );
  free( zD );
  return cson_object_value(zPayload);
}
Beispiel #28
0
/*
** Insert a tag into the database.
*/
int tag_insert(
  const char *zTag,        /* Name of the tag (w/o the "+" or "-" prefix */
  int tagtype,             /* 0:cancel  1:singleton  2:propagated */
  const char *zValue,      /* Value if the tag is really a property */
  int srcId,               /* Artifact that contains this tag */
  double mtime,            /* Timestamp.  Use default if <=0.0 */
  int rid                  /* Artifact to which the tag is to attached */
){
  Stmt s;
  const char *zCol;
  int tagid = tag_findid(zTag, 1);
  int rc;

  if( mtime<=0.0 ){
    mtime = db_double(0.0, "SELECT julianday('now')");
  }
  db_prepare(&s,
    "SELECT 1 FROM tagxref"
    " WHERE tagid=%d"
    "   AND rid=%d"
    "   AND mtime>=:mtime",
    tagid, rid
  );
  db_bind_double(&s, ":mtime", mtime);
  rc = db_step(&s);
  db_finalize(&s);
  if( rc==SQLITE_ROW ){
    /* Another entry that is more recent already exists.  Do nothing */
    return tagid;
  }
  db_prepare(&s, 
    "REPLACE INTO tagxref(tagid,tagtype,srcId,origid,value,mtime,rid)"
    " VALUES(%d,%d,%d,%d,%Q,:mtime,%d)",
    tagid, tagtype, srcId, rid, zValue, rid
  );
  db_bind_double(&s, ":mtime", mtime);
  db_step(&s);
  db_finalize(&s);
  if( tagid==TAG_BRANCH ) leaf_eventually_check(rid);
  if( tagtype==0 ){
    zValue = 0;
  }
  zCol = 0;
  switch( tagid ){
    case TAG_BGCOLOR: {
      zCol = "bgcolor";
      break;
    }
    case TAG_COMMENT: {
      zCol = "ecomment";
      break;
    }
    case TAG_USER: {
      zCol = "euser";
      break;
    }
    case TAG_PRIVATE: {
      db_multi_exec(
        "INSERT OR IGNORE INTO private(rid) VALUES(%d);",
        rid
      );
    }
  }
  if( zCol ){
    db_multi_exec("UPDATE event SET %s=%Q WHERE objid=%d", zCol, zValue, rid);
    if( tagid==TAG_COMMENT ){
      char *zCopy = mprintf("%s", zValue);
      wiki_extract_links(zCopy, rid, 0, mtime, 1, WIKI_INLINE);
      free(zCopy);
    }
  }
  if( tagid==TAG_DATE ){
    db_multi_exec("UPDATE event "
                  "   SET mtime=julianday(%Q),"
                  "       omtime=coalesce(omtime,mtime)"
                  " WHERE objid=%d",
                  zValue, rid);
  }
  if( tagtype==1 ) tagtype = 0;
  tag_propagate(rid, tagid, tagtype, rid, zValue, mtime);
  return tagid;
}
Beispiel #29
0
/*
** Write content into the database.  Return the record ID.  If the
** content is already in the database, just return the record ID.
**
** If srcId is specified, then pBlob is delta content from
** the srcId record.  srcId might be a phantom.  
**
** pBlob is normally uncompressed text.  But if nBlob>0 then the
** pBlob value has already been compressed and nBlob is its uncompressed
** size.  If nBlob>0 then zUuid must be valid.
**
** zUuid is the UUID of the artifact, if it is specified.  When srcId is
** specified then zUuid must always be specified.  If srcId is zero,
** and zUuid is zero then the correct zUuid is computed from pBlob.
**
** If the record already exists but is a phantom, the pBlob content
** is inserted and the phatom becomes a real record.
**
** The original content of pBlob is not disturbed.  The caller continues
** to be responsible for pBlob.  This routine does *not* take over
** responsibility for freeing pBlob.
*/
int content_put_ex(
  Blob *pBlob,              /* Content to add to the repository */
  const char *zUuid,        /* SHA1 hash of reconstructed pBlob */
  int srcId,                /* pBlob is a delta from this entry */
  int nBlob,                /* pBlob is compressed. Original size is this */
  int isPrivate             /* The content should be marked private */
){
  int size;
  int rid;
  Stmt s1;
  Blob cmpr;
  Blob hash;
  int markAsUnclustered = 0;
  int isDephantomize = 0;
  
  assert( g.repositoryOpen );
  assert( pBlob!=0 );
  assert( srcId==0 || zUuid!=0 );
  if( zUuid==0 ){
    assert( nBlob==0 );
    sha1sum_blob(pBlob, &hash);
  }else{
    blob_init(&hash, zUuid, -1);
  }
  if( nBlob ){
    size = nBlob;
  }else{
    size = blob_size(pBlob);
    if( srcId ){
      size = delta_output_size(blob_buffer(pBlob), size);
    }
  }
  db_begin_transaction();

  /* Check to see if the entry already exists and if it does whether
  ** or not the entry is a phantom
  */
  db_prepare(&s1, "SELECT rid, size FROM blob WHERE uuid=%B", &hash);
  if( db_step(&s1)==SQLITE_ROW ){
    rid = db_column_int(&s1, 0);
    if( db_column_int(&s1, 1)>=0 || pBlob==0 ){
      /* Either the entry is not a phantom or it is a phantom but we
      ** have no data with which to dephantomize it.  In either case,
      ** there is nothing for us to do other than return the RID. */
      db_finalize(&s1);
      db_end_transaction(0);
      return rid;
    }
  }else{
    rid = 0;  /* No entry with the same UUID currently exists */
    markAsUnclustered = 1;
  }
  db_finalize(&s1);

  /* Construct a received-from ID if we do not already have one */
  if( g.rcvid==0 ){
    db_multi_exec(
       "INSERT INTO rcvfrom(uid, mtime, nonce, ipaddr)"
       "VALUES(%d, julianday('now'), %Q, %Q)",
       g.userUid, g.zNonce, g.zIpAddr
    );
    g.rcvid = db_last_insert_rowid();
  }

  if( nBlob ){
    cmpr = pBlob[0];
  }else{
    blob_compress(pBlob, &cmpr);
  }
  if( rid>0 ){
    /* We are just adding data to a phantom */
    db_prepare(&s1,
      "UPDATE blob SET rcvid=%d, size=%d, content=:data WHERE rid=%d",
       g.rcvid, size, rid
    );
    db_bind_blob(&s1, ":data", &cmpr);
    db_exec(&s1);
    db_multi_exec("DELETE FROM phantom WHERE rid=%d", rid);
    if( srcId==0 || content_is_available(srcId) ){
      isDephantomize = 1;
      content_mark_available(rid);
    }
  }else{
    /* We are creating a new entry */
    db_prepare(&s1,
      "INSERT INTO blob(rcvid,size,uuid,content)"
      "VALUES(%d,%d,'%b',:data)",
       g.rcvid, size, &hash
    );
    db_bind_blob(&s1, ":data", &cmpr);
    db_exec(&s1);
    rid = db_last_insert_rowid();
    if( !pBlob ){
      db_multi_exec("INSERT OR IGNORE INTO phantom VALUES(%d)", rid);
    }
    if( g.markPrivate || isPrivate ){
      db_multi_exec("INSERT INTO private VALUES(%d)", rid);
      markAsUnclustered = 0;
    }
  }
  if( nBlob==0 ) blob_reset(&cmpr);

  /* If the srcId is specified, then the data we just added is
  ** really a delta.  Record this fact in the delta table.
  */
  if( srcId ){
    db_multi_exec("REPLACE INTO delta(rid,srcid) VALUES(%d,%d)", rid, srcId);
  }
  if( !isDephantomize && bag_find(&contentCache.missing, rid) && 
      (srcId==0 || content_is_available(srcId)) ){
    content_mark_available(rid);
  }
  if( isDephantomize ){
    after_dephantomize(rid, 0);
  }
  
  /* Add the element to the unclustered table if has never been
  ** previously seen.
  */
  if( markAsUnclustered ){
    db_multi_exec("INSERT OR IGNORE INTO unclustered VALUES(%d)", rid);
  }

  /* Finish the transaction and cleanup */
  db_finalize(&s1);
  db_end_transaction(0);
  blob_reset(&hash);

  /* Make arrangements to verify that the data can be recovered
  ** before we commit */
  verify_before_commit(rid);
  return rid;
}
Beispiel #30
0
/*
** COMMAND:  user
**
** Usage: %fossil user SUBCOMMAND ...  ?-R|--repository FILE?
**
** Run various subcommands on users of the open repository or of
** the repository identified by the -R or --repository option.
**
**    %fossil user capabilities USERNAME ?STRING?
**
**        Query or set the capabilities for user USERNAME
**
**    %fossil user default ?USERNAME?
**
**        Query or set the default user.  The default user is the
**        user for command-line interaction.
**
**    %fossil user list
**
**        List all users known to the repository
**
**    %fossil user new ?USERNAME? ?CONTACT-INFO? ?PASSWORD?
**
**        Create a new user in the repository.  Users can never be
**        deleted.  They can be denied all access but they must continue
**        to exist in the database.
**
**    %fossil user password USERNAME ?PASSWORD?
**
**        Change the web access password for a user.
*/
void user_cmd(void){
  int n;
  db_find_and_open_repository(1);
  if( g.argc<3 ){
    usage("capabilities|default|list|new|password ...");
  }
  n = strlen(g.argv[2]);
  if( n>=2 && strncmp(g.argv[2],"new",n)==0 ){
    Blob passwd, login, contact;
    char *zPw;

    if( g.argc>=4 ){
      blob_init(&login, g.argv[3], -1);
    }else{
      prompt_user("login: "******"SELECT 1 FROM user WHERE login=%B", &login) ){
      fossil_fatal("user %b already exists", &login);
    }
    if( g.argc>=5 ){
      blob_init(&contact, g.argv[4], -1);
    }else{
      prompt_user("contact-info: ", &contact);
    }
    if( g.argc>=6 ){
      blob_init(&passwd, g.argv[5], -1);
    }else{
      prompt_for_password("password: "******"INSERT INTO user(login,pw,cap,info)"
      "VALUES(%B,%Q,'v',%B)",
      &login, zPw, &contact
    );
    free(zPw);
  }else if( n>=2 && strncmp(g.argv[2],"default",n)==0 ){
    user_select();
    if( g.argc==3 ){
      printf("%s\n", g.zLogin);
    }else{
      if( !db_exists("SELECT 1 FROM user WHERE login=%Q", g.argv[3]) ){
        fossil_fatal("no such user: %s", g.argv[3]);
      }
      if( g.localOpen ){
        db_lset("default-user", g.argv[3]);
      }else{
        db_set("default-user", g.argv[3], 0);
      }
    }
  }else if( n>=2 && strncmp(g.argv[2],"list",n)==0 ){
    Stmt q;
    db_prepare(&q, "SELECT login, info FROM user ORDER BY login");
    while( db_step(&q)==SQLITE_ROW ){
      printf("%-12s %s\n", db_column_text(&q, 0), db_column_text(&q, 1));
    }
    db_finalize(&q);
  }else if( n>=2 && strncmp(g.argv[2],"password",2)==0 ){
    char *zPrompt;
    int uid;
    Blob pw;
    if( g.argc!=4 && g.argc!=5 ) usage("password USERNAME ?NEW-PASSWORD?");
    uid = db_int(0, "SELECT uid FROM user WHERE login=%Q", g.argv[3]);
    if( uid==0 ){
      fossil_fatal("no such user: %s", g.argv[3]);
    }
    if( g.argc==5 ){
      blob_init(&pw, g.argv[4], -1);
    }else{
      zPrompt = mprintf("new passwd for %s: ", g.argv[3]);
      prompt_for_password(zPrompt, &pw, 1);
    }
    if( blob_size(&pw)==0 ){
      printf("password unchanged\n");
    }else{
      char *zSecret = sha1_shared_secret(blob_str(&pw), g.argv[3]);
      db_multi_exec("UPDATE user SET pw=%Q WHERE uid=%d", zSecret, uid);
      free(zSecret);
    }
  }else if( n>=2 && strncmp(g.argv[2],"capabilities",2)==0 ){
    int uid;
    if( g.argc!=4 && g.argc!=5 ){
      usage("user capabilities USERNAME ?PERMISSIONS?");
    }
    uid = db_int(0, "SELECT uid FROM user WHERE login=%Q", g.argv[3]);
    if( uid==0 ){
      fossil_fatal("no such user: %s", g.argv[3]);
    }
    if( g.argc==5 ){
      db_multi_exec(
        "UPDATE user SET cap=%Q WHERE uid=%d", g.argv[4],
        uid
      );
    }
    printf("%s\n", db_text(0, "SELECT cap FROM user WHERE uid=%d", uid));
  }else{
    fossil_panic("user subcommand should be one of: "
                 "capabilities default list new password");
  }
}