/*
** Load the record ID rid and up to N-1 closest ancestors into
** the "ok" table.
*/
void compute_ancestors(int rid, int N, int directOnly){
  Bag seen;
  PQueue queue;
  Stmt ins;
  Stmt q;
  bag_init(&seen);
  pqueuex_init(&queue);
  bag_insert(&seen, rid);
  pqueuex_insert(&queue, rid, 0.0, 0);
  db_prepare(&ins, "INSERT OR IGNORE INTO ok VALUES(:rid)");
  db_prepare(&q,
    "SELECT a.pid, b.mtime FROM plink a LEFT JOIN plink b ON b.cid=a.pid"
    " WHERE a.cid=:rid %s",
    directOnly ? " AND a.isprim" : ""
  );
  while( (N--)>0 && (rid = pqueuex_extract(&queue, 0))!=0 ){
    db_bind_int(&ins, ":rid", rid);
    db_step(&ins);
    db_reset(&ins);
    db_bind_int(&q, ":rid", rid);
    while( db_step(&q)==SQLITE_ROW ){
      int pid = db_column_int(&q, 0);
      double mtime = db_column_double(&q, 1);
      if( bag_insert(&seen, pid) ){
        pqueuex_insert(&queue, pid, -mtime, 0);
      }
    }
    db_reset(&q);
  }
  bag_clear(&seen);
  pqueuex_clear(&queue);
  db_finalize(&ins);
  db_finalize(&q);
}
/*
** Load the record ID rid and up to N-1 closest descendants into
** the "ok" table.
*/
void compute_descendants(int rid, int N){
  Bag seen;
  PQueue queue;
  Stmt ins;
  Stmt q;

  bag_init(&seen);
  pqueuex_init(&queue);
  bag_insert(&seen, rid);
  pqueuex_insert(&queue, rid, 0.0, 0);
  db_prepare(&ins, "INSERT OR IGNORE INTO ok VALUES(:rid)");
  db_prepare(&q, "SELECT cid, mtime FROM plink WHERE pid=:rid");
  while( (N--)>0 && (rid = pqueuex_extract(&queue, 0))!=0 ){
    db_bind_int(&ins, ":rid", rid);
    db_step(&ins);
    db_reset(&ins);
    db_bind_int(&q, ":rid", rid);
    while( db_step(&q)==SQLITE_ROW ){
      int pid = db_column_int(&q, 0);
      double mtime = db_column_double(&q, 1);
      if( bag_insert(&seen, pid) ){
        pqueuex_insert(&queue, pid, mtime, 0);
      }
    }
    db_reset(&q);
  }
  bag_clear(&seen);
  pqueuex_clear(&queue);
  db_finalize(&ins);
  db_finalize(&q);
}
/*
** Add to table zTab the record ID (rid) of every check-in that contains
** the file fid.
*/
void compute_uses_file(const char *zTab, int fid, int usesFlags){
  Bag seen;
  Bag pending;
  Stmt ins;
  Stmt q;
  int rid;

  bag_init(&seen);
  bag_init(&pending);
  db_prepare(&ins, "INSERT OR IGNORE INTO \"%s\" VALUES(:rid)", zTab);
  db_prepare(&q, "SELECT mid FROM mlink WHERE fid=%d", fid);
  while( db_step(&q)==SQLITE_ROW ){
    int mid = db_column_int(&q, 0);
    bag_insert(&pending, mid);
    bag_insert(&seen, mid);
    db_bind_int(&ins, ":rid", mid);
    db_step(&ins);
    db_reset(&ins);
  }
  db_finalize(&q);

  db_prepare(&q, "SELECT mid FROM mlink WHERE pid=%d", fid);
  while( db_step(&q)==SQLITE_ROW ){
    int mid = db_column_int(&q, 0);
    bag_insert(&seen, mid);
    if( usesFlags & USESFILE_DELETE ){
      db_bind_int(&ins, ":rid", mid);
      db_step(&ins);
      db_reset(&ins);
    }
  }
  db_finalize(&q);
  db_prepare(&q, "SELECT cid FROM plink WHERE pid=:rid");

  while( (rid = bag_first(&pending))!=0 ){
    bag_remove(&pending, rid);
    db_bind_int(&q, ":rid", rid);
    while( db_step(&q)==SQLITE_ROW ){
      int mid = db_column_int(&q, 0);
      if( bag_find(&seen, mid) ) continue;
      bag_insert(&seen, mid);
      bag_insert(&pending, mid);
      db_bind_int(&ins, ":rid", mid);
      db_step(&ins);
      db_reset(&ins);
    }
    db_reset(&q);
  }
  db_finalize(&q);
  db_finalize(&ins);
  bag_clear(&seen);
  bag_clear(&pending);
}
/*
** Schedule a leaf check for "rid" and its parents.
*/
void leaf_eventually_check(int rid){
  static Stmt parentsOf;

  db_static_prepare(&parentsOf, 
     "SELECT pid FROM plink WHERE cid=:rid AND pid>0"
  );
  db_bind_int(&parentsOf, ":rid", rid);
  bag_insert(&needToCheck, rid);
  while( db_step(&parentsOf)==SQLITE_ROW ){
    bag_insert(&needToCheck, db_column_int(&parentsOf, 0));
  }
  db_reset(&parentsOf);
}
Beispiel #5
0
/*
** Create a new phantom with the given UUID and return its artifact ID.
*/
int content_new(const char *zUuid, int isPrivate){
  int rid;
  static Stmt s1, s2, s3;
  
  assert( g.repositoryOpen );
  db_begin_transaction();
  if( uuid_is_shunned(zUuid) ){
    db_end_transaction(0);
    return 0;
  }
  db_static_prepare(&s1,
    "INSERT INTO blob(rcvid,size,uuid,content)"
    "VALUES(0,-1,:uuid,NULL)"
  );
  db_bind_text(&s1, ":uuid", zUuid);
  db_exec(&s1);
  rid = db_last_insert_rowid();
  db_static_prepare(&s2,
    "INSERT INTO phantom VALUES(:rid)"
  );
  db_bind_int(&s2, ":rid", rid);
  db_exec(&s2);
  if( g.markPrivate || isPrivate ){
    db_multi_exec("INSERT INTO private VALUES(%d)", rid);
  }else{
    db_static_prepare(&s3,
      "INSERT INTO unclustered VALUES(:rid)"
    );
    db_bind_int(&s3, ":rid", rid);
    db_exec(&s3);
  }
  bag_insert(&contentCache.missing, rid);
  db_end_transaction(0);
  return rid;
}
Beispiel #6
0
bag_t *generate_index(FILE *input, int min_word_len)
{
    bag_t *index = bag_create(entry_cmp);

    if (index) {
        char word[LINE_LENGTH] = "";
        entry_t new_word, *existing_entry;
        bag_elem_t new_entry;
        unsigned page = 0;
        while (get_word(input, word, &page))
        {
            new_word.entry_word = word;
            // check if the length of the word is long enough
            if(strlen(word) >= min_word_len)
            {
                existing_entry = bag_contains(index, &new_word);
                if(existing_entry != NULL) // if the word is already in index
                {
                    entry_add(existing_entry, page); // add the location to the list of locations for that word
                }
                else // if the word isn't in the index
                {
                    new_entry = entry_create(word, page); // create the entry
                    bag_insert(index, new_entry); // add the location
                }
            }
        }
    }
    return index;
}
Beispiel #7
0
int main(){

	//create new bag with bag_delete function pointer as a parameter
	bag_t *bag=bag_new(free);

	//allocate memorory in heap for deleting later
 	char *data1 = (char *) malloc(sizeof(char *));
 	strcpy(data1, "Dartmouth");

 	char *data2 = (char *) malloc(sizeof(char *));
 	strcpy(data2, "@");


 	bag_insert(bag, data1);
 	bag_insert(bag, data2);

 	printf("Inserted -\"%s\" and '%s' into the bag\n\n", data1, data2);

 	//test extracting data propely
 	printf("Extrating all data now\n----------------------\n");
 	void *fromBag1= bag_extract(bag);
 	void *fromBag2= bag_extract(bag);

 	printf("\n");

 	printf("%s, is first data (string) extrated item from the bag.\n", (char *) fromBag1);
 	printf("%s, is third data (string) extrated item from the bag.\n\n", (char *) fromBag2);

 	//test retutn null when bag empty
 	printf("Test if extracting a item from an empty bag returns null\n--------------------------------------------------------\n");
 	//should return null
 	printf("The past extraction was null; the extration-%s\n", (char *) bag_extract(bag));

 	//reinsert data and delete bag
 	bag_insert(bag, data1);
 	bag_insert(bag, data2);


 	printf("Inserted -\"%s\", and '%s' into the bag\n\n", data1, data2);

 	//dete bag and it's contents
 	//prints if data deleted propely in bag_delete function in bag.c
 	bag_delete(bag);

 }
Beispiel #8
0
/*
** Called after each artifact is processed
*/
static void rebuild_step_done(int rid){
  /* assert( bag_find(&bagDone, rid)==0 ); */
  bag_insert(&bagDone, rid);
  if( ttyOutput ){
    processCnt++;
    if (!g.fQuiet && totalSize>0) {
      percent_complete((processCnt*1000)/totalSize);
    }
  }
}
Beispiel #9
0
/*
** Arrange to verify a particular record prior to committing.
** 
** If the record rid is less than 1, then just initialize the
** verification system but do not record anything as needing
** verification.
*/
void verify_before_commit(int rid){
  static int isInit = 0;
  if( !isInit ){
    db_commit_hook(verify_at_commit, 1000);
    isInit = 1;
  }
  assert( !inFinalVerify );
  if( rid>0 ){
    bag_insert(&toVerify, rid);
  }
}
Beispiel #10
0
void entry_add(bag_elem_t *element, unsigned page)
{
    entry_t *mod = element;

    page_entry *new_page = malloc(sizeof(page_entry));
    *new_page = page;

    // check if page is already in the index
    if(!bag_contains(mod->page_index, new_page))
    bag_insert(mod->page_index, new_page);
}
Beispiel #11
0
/*
** Mark artifact rid as being available now.  Update the cache to
** show that everything that was formerly unavailable because rid
** was missing is now available.
*/
static void content_mark_available(int rid){
  Bag pending;
  static Stmt q;
  if( bag_find(&contentCache.available, rid) ) return;
  bag_init(&pending);
  bag_insert(&pending, rid);
  while( (rid = bag_first(&pending))!=0 ){
    bag_remove(&pending, rid);
    bag_remove(&contentCache.missing, rid);
    bag_insert(&contentCache.available, rid);
    db_static_prepare(&q, "SELECT rid FROM delta WHERE srcid=:rid");
    db_bind_int(&q, ":rid", rid);
    while( db_step(&q)==SQLITE_ROW ){
      int nx = db_column_int(&q, 0);
      bag_insert(&pending, nx);
    }
    db_reset(&q);
  }
  bag_clear(&pending);
}
Beispiel #12
0
/*
** Check to see if content is available for artifact "rid".  Return
** true if it is.  Return false if rid is a phantom or depends on
** a phantom.
*/
int content_is_available(int rid){
  int srcid;
  int depth = 0;  /* Limit to recursion depth */
  while( depth++ < 10000000 ){  
    if( bag_find(&contentCache.missing, rid) ){
      return 0;
    }
    if( bag_find(&contentCache.available, rid) ){
      return 1;
    }
    if( content_size(rid, -1)<0 ){
      bag_insert(&contentCache.missing, rid);
      return 0;
    }
    srcid = findSrcid(rid);
    if( srcid==0 ){
      bag_insert(&contentCache.available, rid);
      return 1;
    }
    rid = srcid;
  }
  fossil_panic("delta-loop in repository");
  return 0;
}
Beispiel #13
0
bag_elem_t entry_create(const char *word, unsigned page)
{
    // Allocate the memory for the new entry
    entry_t *new_entry = malloc(sizeof(entry_t));
    
    // Copy the word into a new string and put it in the entry.
    new_entry -> entry_word = malloc((strlen(word) + 1) * sizeof(char));
    strcpy(new_entry -> entry_word, word);

    // Create the page index bag to hold the page numbers.
    new_entry->page_index = bag_create(page_cmp);
    page_entry *new_page = malloc(sizeof(page_entry));
    *new_page = page;
    
    // add the page to the page index.
    bag_insert(new_entry->page_index, new_page);
    return new_entry;
}
Beispiel #14
0
//Splits a bag into two bags of about half the size
//This bag call will contain the remainder of the split
Bag* Bag::bag_split() {
	Bag* other = new Bag(scale);
	Pennant* leftover = penArray[0];
	penArray[0] = nullptr;
	//splitting the bag is analogous to shifting by 1
	for (int k = 1; k < scale + 1; ++k) {
		if (penArray[k] != nullptr) {
			other->penArray[k - 1] = penArray[k]->pen_split();
			penArray[k - 1] = penArray[k];
			penArray[k] = nullptr;
		}
	}
	other->size = size / 2;
	size = size / 2;
	//add back in the remainder
	if (leftover != nullptr) {
		bag_insert(leftover->data);
	}

	return other;
}
Beispiel #15
0
/*
** Add an entry to the content cache.
**
** This routines hands responsibility for the artifact over to the cache.
** The cache will deallocate memory when it has finished with it.
*/
void content_cache_insert(int rid, Blob *pBlob){
  struct cacheLine *p;
  if( contentCache.n>500 || contentCache.szTotal>50000000 ){
    i64 szBefore;
    do{
      szBefore = contentCache.szTotal;
      content_cache_expire_oldest();
    }while( contentCache.szTotal>50000000 && contentCache.szTotal<szBefore );
  }
  if( contentCache.n>=contentCache.nAlloc ){
    contentCache.nAlloc = contentCache.nAlloc*2 + 10;
    contentCache.a = fossil_realloc(contentCache.a,
                             contentCache.nAlloc*sizeof(contentCache.a[0]));
  }
  p = &contentCache.a[contentCache.n++];
  p->rid = rid;
  p->age = contentCache.nextAge++;
  contentCache.szTotal += blob_size(pBlob);
  p->content = *pBlob;
  blob_zero(pBlob);
  bag_insert(&contentCache.inCache, rid);
}
Beispiel #16
0
/*
** If there are public BLOBs that deltas from private BLOBs, then
** undeltify the public BLOBs so that the private BLOBs may be safely
** deleted.
*/
void fix_private_blob_dependencies(int showWarning){
  Bag toUndelta;
  Stmt q;
  int rid;

  /* Careful:  We are about to delete all BLOB entries that are private.
  ** So make sure that any no public BLOBs are deltas from a private BLOB.
  ** Otherwise after the deletion, we won't be able to recreate the public
  ** BLOBs.
  */
  db_prepare(&q,
    "SELECT "
    "   rid, (SELECT uuid FROM blob WHERE rid=delta.rid),"
    "   srcid, (SELECT uuid FROM blob WHERE rid=delta.srcid)"
    "  FROM delta"
    " WHERE srcid in private AND rid NOT IN private"
  );
  bag_init(&toUndelta);
  while( db_step(&q)==SQLITE_ROW ){
    int rid = db_column_int(&q, 0);
    const char *zId = db_column_text(&q, 1);
    int srcid = db_column_int(&q, 2);
    const char *zSrc = db_column_text(&q, 3);
    if( showWarning ){
      fossil_warning(
        "public artifact %S (%d) is a delta from private artifact %S (%d)",
        zId, rid, zSrc, srcid
      );
    }
    bag_insert(&toUndelta, rid);
  }
  db_finalize(&q);
  while( (rid = bag_first(&toUndelta))>0 ){
    content_undelta(rid);
    bag_remove(&toUndelta, rid);
  }
  bag_clear(&toUndelta);
}
/*
** COMMAND: all
**
** Usage: %fossil all (list|ls|pull|push|rebuild|sync)
**
** The ~/.fossil file records the location of all repositories for a
** user.  This command performs certain operations on all repositories
** that can be useful before or after a period of disconnected operation.
**
** On Win32 systems, the file is named "_fossil" and is located in
** %LOCALAPPDATA%, %APPDATA% or %HOMEPATH%.
**
** Available operations are:
**
**    ignore     Arguments are repositories that should be ignored
**               by subsequent list, pull, push, rebuild, and sync.
**
**    list | ls  Display the location of all repositories.
**               The --ckout option causes all local checkouts to be
**               list instead.
**
**    changes    Shows all local checkouts that have uncommitted changes
**
**    pull       Run a "pull" operation on all repositories
**
**    push       Run a "push" on all repositories
**
**    rebuild    Rebuild on all repositories
**
**    sync       Run a "sync" on all repositories
**
** Repositories are automatically added to the set of known repositories
** when one of the following commands are run against the repository: clone,
** info, pull, push, or sync.  Even previously ignored repositories are
** added back to the list of repositories by these commands.
*/
void all_cmd(void){
  int n;
  Stmt q;
  const char *zCmd;
  char *zSyscmd;
  char *zFossil;
  char *zQFilename;
  int useCheckouts = 0;
  int quiet = 0;
  int testRun = 0;
  int stopOnError = find_option("dontstop",0,0)==0;
  int rc;
  Bag outOfDate;
  
  /* The undocumented --test option causes no changes to occur to any
  ** repository, but instead show what would have happened.  Intended for
  ** test and debugging use.
  */
  testRun = find_option("test",0,0)!=0;

  if( g.argc<3 ){
    usage("changes|list|ls|pull|push|rebuild|sync");
  }
  n = strlen(g.argv[2]);
  db_open_config(1);
  zCmd = g.argv[2];
  if( strncmp(zCmd, "list", n)==0 || strncmp(zCmd,"ls",n)==0 ){
    zCmd = "list";
    useCheckouts = find_option("ckout","c",0)!=0;
  }else if( strncmp(zCmd, "push", n)==0 ){
    zCmd = "push -autourl -R";
  }else if( strncmp(zCmd, "pull", n)==0 ){
    zCmd = "pull -autourl -R";
  }else if( strncmp(zCmd, "rebuild", n)==0 ){
    zCmd = "rebuild";
  }else if( strncmp(zCmd, "sync", n)==0 ){
    zCmd = "sync -autourl -R";
  }else if( strncmp(zCmd, "test-integrity", n)==0 ){
    zCmd = "test-integrity";
  }else if( strncmp(zCmd, "changes", n)==0 ){
    zCmd = "changes --quiet --header --chdir";
    useCheckouts = 1;
    stopOnError = 0;
    quiet = 1;
  }else if( strncmp(zCmd, "ignore", n)==0 ){
    int j;
    verify_all_options();
    db_begin_transaction();
    for(j=3; j<g.argc; j++){
      char *zSql = mprintf("DELETE FROM global_config"
                           " WHERE name GLOB 'repo:%q'", g.argv[j]);
      if( testRun ){
        fossil_print("%s\n", zSql);
      }else{
        db_multi_exec("%s", zSql);
      }
      fossil_free(zSql);
    }
    db_end_transaction(0);
    return;
  }else{
    fossil_fatal("\"all\" subcommand should be one of: "
                 "changes ignore list ls push pull rebuild sync");
  }
  verify_all_options();
  zFossil = quoteFilename(g.nameOfExe);
  if( useCheckouts ){
    db_prepare(&q,
       "SELECT substr(name, 7) COLLATE nocase, max(rowid)"
       "  FROM global_config"
       " WHERE substr(name, 1, 6)=='ckout:'"
       " GROUP BY 1 ORDER BY 1"
    );
  }else{
    db_prepare(&q,
       "SELECT substr(name, 6) COLLATE nocase, max(rowid)"
       "  FROM global_config"
       " WHERE substr(name, 1, 5)=='repo:'"
       " GROUP BY 1 ORDER BY 1"
    );
  }
  bag_init(&outOfDate);
  while( db_step(&q)==SQLITE_ROW ){
    const char *zFilename = db_column_text(&q, 0);
    int rowid = db_column_int(&q, 1);
    if( file_access(zFilename, 0) || !file_is_canonical(zFilename) ){
      bag_insert(&outOfDate, rowid);
      continue;
    }
    if( useCheckouts && file_isdir(zFilename)!=1 ){
      bag_insert(&outOfDate, rowid);
      continue;
    }
    if( zCmd[0]=='l' ){
      fossil_print("%s\n", zFilename);
      continue;
    }
    zQFilename = quoteFilename(zFilename);
    zSyscmd = mprintf("%s %s %s", zFossil, zCmd, zQFilename);
    if( !quiet || testRun ){
      fossil_print("%s\n", zSyscmd);
      fflush(stdout);
    }
    rc = testRun ? 0 : fossil_system(zSyscmd);
    free(zSyscmd);
    free(zQFilename);
    if( stopOnError && rc ){
      break;
    }
  }
  db_finalize(&q);
  
  /* If any repositories whose names appear in the ~/.fossil file could not
  ** be found, remove those names from the ~/.fossil file.
  */
  if( bag_count(&outOfDate)>0 ){
    Blob sql;
    char *zSep = "(";
    int rowid;
    blob_zero(&sql);
    blob_appendf(&sql, "DELETE FROM global_config WHERE rowid IN ");
    for(rowid=bag_first(&outOfDate); rowid>0; rowid=bag_next(&outOfDate,rowid)){
      blob_appendf(&sql, "%s%d", zSep, rowid);
      zSep = ",";
    }
    blob_appendf(&sql, ")");
    if( testRun ){
      fossil_print("%s\n", blob_str(&sql));
    }else{
      db_multi_exec(blob_str(&sql));
    }
    blob_reset(&sql);
  }
}
Beispiel #18
0
/*
** Extract the content for ID rid and put it into the
** uninitialized blob.  Return 1 on success.  If the record
** is a phantom, zero pBlob and return 0.
*/
int content_get(int rid, Blob *pBlob){
  int rc;
  int i;
  int nextRid;

  assert( g.repositoryOpen );
  blob_zero(pBlob);
  if( rid==0 ) return 0;

  /* Early out if we know the content is not available */
  if( bag_find(&contentCache.missing, rid) ){
    return 0;
  }

  /* Look for the artifact in the cache first */
  if( bag_find(&contentCache.inCache, rid) ){
    for(i=0; i<contentCache.n; i++){
      if( contentCache.a[i].rid==rid ){
        blob_copy(pBlob, &contentCache.a[i].content);
        contentCache.a[i].age = contentCache.nextAge++;
        return 1;
      }
    }
  }

  nextRid = findSrcid(rid);
  if( nextRid==0 ){
    rc = content_of_blob(rid, pBlob);
  }else{
    int n = 1;
    int nAlloc = 10;
    int *a = 0;
    int mx;
    Blob delta, next;

    a = fossil_malloc( sizeof(a[0])*nAlloc );
    a[0] = rid;
    a[1] = nextRid;
    n = 1;
    while( !bag_find(&contentCache.inCache, nextRid)
        && (nextRid = findSrcid(nextRid))>0 ){
      n++;
      if( n>=nAlloc ){
        if( n>db_int(0, "SELECT max(rid) FROM blob") ){
          fossil_panic("infinite loop in DELTA table");
        }
        nAlloc = nAlloc*2 + 10;
        a = fossil_realloc(a, nAlloc*sizeof(a[0]));
      }
      a[n] = nextRid;
    }
    mx = n;
    rc = content_get(a[n], pBlob);
    n--;
    while( rc && n>=0 ){
      rc = content_of_blob(a[n], &delta);
      if( rc ){
        blob_delta_apply(pBlob, &delta, &next);
        blob_reset(&delta);
        if( (mx-n)%8==0 ){
          content_cache_insert(a[n+1], pBlob);
        }else{
          blob_reset(pBlob);
        }
        *pBlob = next;
      }
      n--;
    }
    free(a);
    if( !rc ) blob_reset(pBlob);
  }
  if( rc==0 ){
    bag_insert(&contentCache.missing, rid);
  }else{
    bag_insert(&contentCache.available, rid);
  }
  return rc;
}
Beispiel #19
0
/*
** COMMAND: test-clusters
**
** Verify that all non-private and non-shunned artifacts are accessible
** through the cluster chain.
*/
void test_clusters_cmd(void){
  Bag pending;
  Stmt q;
  int n;
  
  db_find_and_open_repository(0, 2);
  bag_init(&pending);
  db_multi_exec(
    "CREATE TEMP TABLE xdone(x INTEGER PRIMARY KEY);"
    "INSERT INTO xdone SELECT rid FROM unclustered;"
    "INSERT OR IGNORE INTO xdone SELECT rid FROM private;"
    "INSERT OR IGNORE INTO xdone"
         " SELECT blob.rid FROM shun JOIN blob USING(uuid);"
  );
  db_prepare(&q,
    "SELECT rid FROM unclustered WHERE rid IN"
    " (SELECT rid FROM tagxref WHERE tagid=%d)", TAG_CLUSTER
  );
  while( db_step(&q)==SQLITE_ROW ){
    bag_insert(&pending, db_column_int(&q, 0));
  }
  db_finalize(&q);
  while( bag_count(&pending)>0 ){
    Manifest *p;
    int rid = bag_first(&pending);
    int i;
    
    bag_remove(&pending, rid);
    p = manifest_get(rid, CFTYPE_CLUSTER, 0);
    if( p==0 ){
      fossil_fatal("bad cluster: rid=%d", rid);
    }
    for(i=0; i<p->nCChild; i++){
      const char *zUuid = p->azCChild[i];
      int crid = name_to_rid(zUuid);
      if( crid==0 ){
         fossil_warning("cluster (rid=%d) references unknown artifact %s",
                        rid, zUuid);
         continue;
      }
      db_multi_exec("INSERT OR IGNORE INTO xdone VALUES(%d)", crid);
      if( db_exists("SELECT 1 FROM tagxref WHERE tagid=%d AND rid=%d",
                    TAG_CLUSTER, crid) ){
        bag_insert(&pending, crid);
      }
    }
    manifest_destroy(p);
  }
  n = db_int(0, "SELECT count(*) FROM /*scan*/"
                "  (SELECT rid FROM blob EXCEPT SELECT x FROM xdone)");
  if( n==0 ){
    fossil_print("all artifacts reachable through clusters\n");
  }else{
    fossil_print("%d unreachable artifacts:\n", n);
    db_prepare(&q, "SELECT rid, uuid FROM blob WHERE rid NOT IN xdone");
    while( db_step(&q)==SQLITE_ROW ){
      fossil_print("  %3d %s\n", db_column_int(&q,0), db_column_text(&q,1));
    }
    db_finalize(&q);
  }
}
Beispiel #20
0
/*
** Rebuild cross-referencing information for the artifact
** rid with content pBase and all of its descendants.  This
** routine clears the content buffer before returning.
**
** If the zFNameFormat variable is set, then this routine is
** called to run "fossil deconstruct" instead of the usual
** "fossil rebuild".  In that case, instead of rebuilding the
** cross-referencing information, write the file content out
** to the appropriate directory.
**
** In both cases, this routine automatically recurses to process
** other artifacts that are deltas off of the current artifact.
** This is the most efficient way to extract all of the original
** artifact content from the Fossil repository.
*/
static void rebuild_step(int rid, int size, Blob *pBase){
  static Stmt q1;
  Bag children;
  Blob copy;
  Blob *pUse;
  int nChild, i, cid;

  while( rid>0 ){

    /* Fix up the "blob.size" field if needed. */
    if( size!=blob_size(pBase) ){
      db_multi_exec(
         "UPDATE blob SET size=%d WHERE rid=%d", blob_size(pBase), rid
      );
    }
  
    /* Find all children of artifact rid */
    db_static_prepare(&q1, "SELECT rid FROM delta WHERE srcid=:rid");
    db_bind_int(&q1, ":rid", rid);
    bag_init(&children);
    while( db_step(&q1)==SQLITE_ROW ){
      int cid = db_column_int(&q1, 0);
      if( !bag_find(&bagDone, cid) ){
        bag_insert(&children, cid);
      }
    }
    nChild = bag_count(&children);
    db_reset(&q1);
  
    /* Crosslink the artifact */
    if( nChild==0 ){
      pUse = pBase;
    }else{
      blob_copy(&copy, pBase);
      pUse = &copy;
    }
    if( zFNameFormat==0 ){
      /* We are doing "fossil rebuild" */
      manifest_crosslink(rid, pUse, MC_NONE);
    }else{
      /* We are doing "fossil deconstruct" */
      char *zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", rid);
      char *zFile = mprintf(zFNameFormat, zUuid, zUuid+prefixLength);
      blob_write_to_file(pUse,zFile);
      free(zFile);
      free(zUuid);
      blob_reset(pUse);
    }
    assert( blob_is_reset(pUse) );
    rebuild_step_done(rid);
  
    /* Call all children recursively */
    rid = 0;
    for(cid=bag_first(&children), i=1; cid; cid=bag_next(&children, cid), i++){
      static Stmt q2;
      int sz;
      db_static_prepare(&q2, "SELECT content, size FROM blob WHERE rid=:rid");
      db_bind_int(&q2, ":rid", cid);
      if( db_step(&q2)==SQLITE_ROW && (sz = db_column_int(&q2,1))>=0 ){
        Blob delta, next;
        db_ephemeral_blob(&q2, 0, &delta);
        blob_uncompress(&delta, &delta);
        blob_delta_apply(pBase, &delta, &next);
        blob_reset(&delta);
        db_reset(&q2);
        if( i<nChild ){
          rebuild_step(cid, sz, &next);
        }else{
          /* Tail recursion */
          rid = cid;
          size = sz;
          blob_reset(pBase);
          *pBase = next;
        }
      }else{
        db_reset(&q2);
        blob_reset(pBase);
      }
    }
    bag_clear(&children);
  }
}
Beispiel #21
0
/*
** COMMAND: export
**
** Usage: %fossil export --git ?OPTIONS? ?REPOSITORY?
**
** Write an export of all check-ins to standard output.  The export is
** written in the git-fast-export file format assuming the --git option is
** provided.  The git-fast-export format is currently the only VCS 
** interchange format supported, though other formats may be added in
** the future.
**
** Run this command within a checkout.  Or use the -R or --repository
** option to specify a Fossil repository to be exported.
**
** Only check-ins are exported using --git.  Git does not support tickets 
** or wiki or events or attachments, so none of those are exported.
**
** If the "--import-marks FILE" option is used, it contains a list of
** rids to skip.
**
** If the "--export-marks FILE" option is used, the rid of all commits and
** blobs written on exit for use with "--import-marks" on the next run.
**
** Options:
**   --export-marks FILE          export rids of exported data to FILE
**   --import-marks FILE          read rids of data to ignore from FILE
**   --repository|-R REPOSITORY   export the given REPOSITORY
**   
** See also: import
*/
void export_cmd(void){
  Stmt q, q2, q3;
  int i;
  Bag blobs, vers;
  const char *markfile_in;
  const char *markfile_out;

  bag_init(&blobs);
  bag_init(&vers);

  find_option("git", 0, 0);   /* Ignore the --git option for now */
  markfile_in = find_option("import-marks", 0, 1);
  markfile_out = find_option("export-marks", 0, 1);

  db_find_and_open_repository(0, 2);
  verify_all_options();
  if( g.argc!=2 && g.argc!=3 ){ usage("--git ?REPOSITORY?"); }

  db_multi_exec("CREATE TEMPORARY TABLE oldblob(rid INTEGER PRIMARY KEY)");
  db_multi_exec("CREATE TEMPORARY TABLE oldcommit(rid INTEGER PRIMARY KEY)");
  if( markfile_in!=0 ){
    Stmt qb,qc;
    char line[100];
    FILE *f;

    f = fossil_fopen(markfile_in, "r");
    if( f==0 ){
      fossil_fatal("cannot open %s for reading", markfile_in);
    }
    db_prepare(&qb, "INSERT OR IGNORE INTO oldblob VALUES (:rid)");
    db_prepare(&qc, "INSERT OR IGNORE INTO oldcommit VALUES (:rid)");
    while( fgets(line, sizeof(line), f)!=0 ){
      if( *line == 'b' ){
        db_bind_text(&qb, ":rid", line + 1);
        db_step(&qb);
        db_reset(&qb);
        bag_insert(&blobs, atoi(line + 1));
      }else if( *line == 'c' ){
        db_bind_text(&qc, ":rid", line + 1);
        db_step(&qc);
        db_reset(&qc);
        bag_insert(&vers, atoi(line + 1));
      }else{
        fossil_fatal("bad input from %s: %s", markfile_in, line);
      }
    }
    db_finalize(&qb);
    db_finalize(&qc);
    fclose(f);
  }

  /* Step 1:  Generate "blob" records for every artifact that is part
  ** of a check-in 
  */
  fossil_binary_mode(stdout);
  db_multi_exec("CREATE TEMP TABLE newblob(rid INTEGER KEY, srcid INTEGER)");
  db_multi_exec("CREATE INDEX newblob_src ON newblob(srcid)");
  db_multi_exec(
    "INSERT INTO newblob"
    " SELECT DISTINCT fid,"
    "  CASE WHEN EXISTS(SELECT 1 FROM delta"
                       " WHERE rid=fid"
                       "   AND NOT EXISTS(SELECT 1 FROM oldblob"
                                         " WHERE srcid=fid))"
    "   THEN (SELECT srcid FROM delta WHERE rid=fid)"
    "   ELSE 0"
    "  END"
    " FROM mlink"
    " WHERE fid>0 AND NOT EXISTS(SELECT 1 FROM oldblob WHERE rid=fid)");
  db_prepare(&q,
    "SELECT DISTINCT fid FROM mlink"
    " WHERE fid>0 AND NOT EXISTS(SELECT 1 FROM oldblob WHERE rid=fid)");
  db_prepare(&q2, "INSERT INTO oldblob VALUES (:rid)");
  db_prepare(&q3, "SELECT rid FROM newblob WHERE srcid= (:srcid)");
  while( db_step(&q)==SQLITE_ROW ){
    int rid = db_column_int(&q, 0);
    Blob content;

    while( !bag_find(&blobs, rid) ){
      content_get(rid, &content);
      db_bind_int(&q2, ":rid", rid);
      db_step(&q2);
      db_reset(&q2);
      printf("blob\nmark :%d\ndata %d\n", BLOBMARK(rid), blob_size(&content));
      bag_insert(&blobs, rid);
      fwrite(blob_buffer(&content), 1, blob_size(&content), stdout);
      printf("\n");
      blob_reset(&content);

      db_bind_int(&q3, ":srcid", rid);
      if( db_step(&q3) != SQLITE_ROW ){
        db_reset(&q3);
        break;
      }
      rid = db_column_int(&q3, 0);
      db_reset(&q3);
    }
  }
  db_finalize(&q);
  db_finalize(&q2);
  db_finalize(&q3);

  /* Output the commit records.
  */
  db_prepare(&q,
    "SELECT strftime('%%s',mtime), objid, coalesce(comment,ecomment),"
    "       coalesce(user,euser),"
    "       (SELECT value FROM tagxref WHERE rid=objid AND tagid=%d)"
    "  FROM event"
    " WHERE type='ci' AND NOT EXISTS (SELECT 1 FROM oldcommit WHERE objid=rid)"
    " ORDER BY mtime ASC",
    TAG_BRANCH
  );
  db_prepare(&q2, "INSERT INTO oldcommit VALUES (:rid)");
  while( db_step(&q)==SQLITE_ROW ){
    Stmt q4;
    const char *zSecondsSince1970 = db_column_text(&q, 0);
    int ckinId = db_column_int(&q, 1);
    const char *zComment = db_column_text(&q, 2);
    const char *zUser = db_column_text(&q, 3);
    const char *zBranch = db_column_text(&q, 4);
    char *zBr;

    bag_insert(&vers, ckinId);
    db_bind_int(&q2, ":rid", ckinId);
    db_step(&q2);
    db_reset(&q2);
    if( zBranch==0 ) zBranch = "trunk";
    zBr = mprintf("%s", zBranch);
    for(i=0; zBr[i]; i++){
      if( !fossil_isalnum(zBr[i]) ) zBr[i] = '_';
    }
    printf("commit refs/heads/%s\nmark :%d\n", zBr, COMMITMARK(ckinId));
    free(zBr);
    printf("committer");
    print_person(zUser);
    printf(" %s +0000\n", zSecondsSince1970);
    if( zComment==0 ) zComment = "null comment";
    printf("data %d\n%s\n", (int)strlen(zComment), zComment);
    db_prepare(&q3,
      "SELECT pid FROM plink"
      " WHERE cid=%d AND isprim"
      "   AND pid IN (SELECT objid FROM event)",
      ckinId
    );
    if( db_step(&q3) == SQLITE_ROW ){
      printf("from :%d\n", COMMITMARK(db_column_int(&q3, 0)));
      db_prepare(&q4,
        "SELECT pid FROM plink"
        " WHERE cid=%d AND NOT isprim"
        "   AND NOT EXISTS(SELECT 1 FROM phantom WHERE rid=pid)"
        " ORDER BY pid",
        ckinId);
      while( db_step(&q4)==SQLITE_ROW ){
        printf("merge :%d\n", COMMITMARK(db_column_int(&q4,0)));
      }
      db_finalize(&q4);
    }else{
      printf("deleteall\n");
    }

    db_prepare(&q4,
      "SELECT filename.name, mlink.fid, mlink.mperm FROM mlink"
      " JOIN filename ON filename.fnid=mlink.fnid"
      " WHERE mlink.mid=%d",
      ckinId
    );
    while( db_step(&q4)==SQLITE_ROW ){
      const char *zName = db_column_text(&q4,0);
      int zNew = db_column_int(&q4,1);
      int mPerm = db_column_int(&q4,2);
      if( zNew==0)
        printf("D %s\n", zName);
      else if( bag_find(&blobs, zNew) ) {
        const char *zPerm;
        switch( mPerm ){
          case PERM_LNK:  zPerm = "120000";   break;
          case PERM_EXE:  zPerm = "100755";   break;
          default:        zPerm = "100644";   break;
        }
        printf("M %s :%d %s\n", zPerm, BLOBMARK(zNew), zName);
      }
    }
    db_finalize(&q4);
    db_finalize(&q3);
    printf("\n");
  }
  db_finalize(&q2);
  db_finalize(&q);
  bag_clear(&blobs);
  manifest_cache_clear();


  /* Output tags */
  db_prepare(&q,
     "SELECT tagname, rid, strftime('%%s',mtime)"
     "  FROM tagxref JOIN tag USING(tagid)"
     " WHERE tagtype=1 AND tagname GLOB 'sym-*'"
  );
  while( db_step(&q)==SQLITE_ROW ){
    const char *zTagname = db_column_text(&q, 0);
    char *zEncoded = 0;
    int rid = db_column_int(&q, 1);
    const char *zSecSince1970 = db_column_text(&q, 2);
    int i;
    if( rid==0 || !bag_find(&vers, rid) ) continue;
    zTagname += 4;
    zEncoded = mprintf("%s", zTagname);
    for(i=0; zEncoded[i]; i++){
      if( !fossil_isalnum(zEncoded[i]) ) zEncoded[i] = '_';
    }
    printf("tag %s\n", zEncoded);
    printf("from :%d\n", COMMITMARK(rid));
    printf("tagger <tagger> %s +0000\n", zSecSince1970);
    printf("data 0\n");
    fossil_free(zEncoded);
  }
  db_finalize(&q);
  bag_clear(&vers);

  if( markfile_out!=0 ){
    FILE *f;
    f = fossil_fopen(markfile_out, "w");
    if( f == 0 ){
      fossil_fatal("cannot open %s for writing", markfile_out);
    }
    db_prepare(&q, "SELECT rid FROM oldblob");
    while( db_step(&q)==SQLITE_ROW ){
      fprintf(f, "b%d\n", db_column_int(&q, 0));
    }
    db_finalize(&q);
    db_prepare(&q, "SELECT rid FROM oldcommit");
    while( db_step(&q)==SQLITE_ROW ){
      fprintf(f, "c%d\n", db_column_int(&q, 0));
    }
    db_finalize(&q);
    if( ferror(f)!=0 || fclose(f)!=0 ) {
      fossil_fatal("error while writing %s", markfile_out);
    }
  }
}
Beispiel #22
0
/*
** There is a TEMP table bix(blobid,delta) containing a set of purgeitems
** that need to be transferred to the BLOB table.  This routine does
** all items that have srcid=iSrc.  The pBasis blob holds the content
** of the source document if iSrc>0.
*/
static void bundle_import_elements(int iSrc, Blob *pBasis, int isPriv){
  Stmt q;
  static Bag busy;
  assert( pBasis!=0 || iSrc==0 );
  if( iSrc>0 ){
    if( bag_find(&busy, iSrc) ){
      fossil_fatal("delta loop while uncompressing bundle artifacts");
    }
    bag_insert(&busy, iSrc);
  }
  db_prepare(&q,
     "SELECT uuid, data, bblob.delta, bix.blobid"
     "  FROM bix, bblob"
     " WHERE bix.delta=%d"
     "   AND bix.blobid=bblob.blobid;",
     iSrc
  );
  while( db_step(&q)==SQLITE_ROW ){
    Blob h1, h2, c1, c2;
    int rid;
    blob_zero(&h1);
    db_column_blob(&q, 0, &h1);
    blob_zero(&c1);
    db_column_blob(&q, 1, &c1);
    blob_uncompress(&c1, &c1);
    blob_zero(&c2);
    if( db_column_type(&q,2)==SQLITE_TEXT && db_column_bytes(&q,2)==40 ){
      Blob basis;
      rid = db_int(0,"SELECT rid FROM blob WHERE uuid=%Q",
                   db_column_text(&q,2));
      content_get(rid, &basis);
      blob_delta_apply(&basis, &c1, &c2);
      blob_reset(&basis);
      blob_reset(&c1);
    }else if( pBasis ){
      blob_delta_apply(pBasis, &c1, &c2);
      blob_reset(&c1);
    }else{
      c2 = c1;
    }
    sha1sum_blob(&c2, &h2);
    if( blob_compare(&h1, &h2)!=0 ){
      fossil_fatal("SHA1 hash mismatch - wanted %s, got %s",
                   blob_str(&h1), blob_str(&h2));
    }
    blob_reset(&h2);
    rid = content_put_ex(&c2, blob_str(&h1), 0, 0, isPriv);
    if( rid==0 ){
      fossil_fatal("%s", g.zErrMsg);
    }else{
      if( !isPriv ) content_make_public(rid);
      content_get(rid, &c1);
      manifest_crosslink(rid, &c1, MC_NO_ERRORS);
      db_multi_exec("INSERT INTO got(rid) VALUES(%d)",rid);
    }
    bundle_import_elements(db_column_int(&q,3), &c2, isPriv);
    blob_reset(&c2);
  }
  db_finalize(&q);
  if( iSrc>0 ) bag_remove(&busy, iSrc);
}
/*
** Create a temporary table named "leaves" if it does not
** already exist.  Load this table with the RID of all
** check-ins that are leaves which are descended from
** check-in iBase.
**
** A "leaf" is a check-in that has no children in the same branch.
** There is a separate permanent table LEAF that contains all leaves
** in the tree.  This routine is used to compute a subset of that
** table consisting of leaves that are descended from a single checkin.
**
** The closeMode flag determines behavior associated with the "closed"
** tag:
**
**    closeMode==0       Show all leaves regardless of the "closed" tag.
**
**    closeMode==1       Show only leaves without the "closed" tag.
**
**    closeMode==2       Show only leaves with the "closed" tag.
**
** The default behavior is to ignore closed leaves (closeMode==0).  To
** Show all leaves, use closeMode==1.  To show only closed leaves, use
** closeMode==2.
*/
void compute_leaves(int iBase, int closeMode){

  /* Create the LEAVES table if it does not already exist.  Make sure
  ** it is empty.
  */
  db_multi_exec(
    "CREATE TEMP TABLE IF NOT EXISTS leaves("
    "  rid INTEGER PRIMARY KEY"
    ");"
    "DELETE FROM leaves;"
  );

  if( iBase>0 ){
    Bag seen;     /* Descendants seen */
    Bag pending;  /* Unpropagated descendants */
    Stmt q1;      /* Query to find children of a check-in */
    Stmt isBr;    /* Query to check to see if a check-in starts a new branch */
    Stmt ins;     /* INSERT statement for a new record */

    /* Initialize the bags. */
    bag_init(&seen);
    bag_init(&pending);
    bag_insert(&pending, iBase);

    /* This query returns all non-branch-merge children of check-in :rid.
    **
    ** If a child is a merge of a fork within the same branch, it is
    ** returned.  Only merge children in different branches are excluded.
    */
    db_prepare(&q1,
      "SELECT cid FROM plink"
      " WHERE pid=:rid"
      "   AND (isprim"
      "        OR coalesce((SELECT value FROM tagxref"
                        "   WHERE tagid=%d AND rid=plink.pid), 'trunk')"
                 "=coalesce((SELECT value FROM tagxref"
                        "   WHERE tagid=%d AND rid=plink.cid), 'trunk'))",
      TAG_BRANCH, TAG_BRANCH
    );

    /* This query returns a single row if check-in :rid is the first
    ** check-in of a new branch.
    */
    db_prepare(&isBr,
       "SELECT 1 FROM tagxref"
       " WHERE rid=:rid AND tagid=%d AND tagtype=2"
       "   AND srcid>0",
       TAG_BRANCH
    );

    /* This statement inserts check-in :rid into the LEAVES table.
    */
    db_prepare(&ins, "INSERT OR IGNORE INTO leaves VALUES(:rid)");

    while( bag_count(&pending) ){
      int rid = bag_first(&pending);
      int cnt = 0;
      bag_remove(&pending, rid);
      db_bind_int(&q1, ":rid", rid);
      while( db_step(&q1)==SQLITE_ROW ){
        int cid = db_column_int(&q1, 0);
        if( bag_insert(&seen, cid) ){
          bag_insert(&pending, cid);
        }
        db_bind_int(&isBr, ":rid", cid);
        if( db_step(&isBr)==SQLITE_DONE ){
          cnt++;
        }
        db_reset(&isBr);
      }
      db_reset(&q1);
      if( cnt==0 && !is_a_leaf(rid) ){
        cnt++;
      }
      if( cnt==0 ){
        db_bind_int(&ins, ":rid", rid);
        db_step(&ins);
        db_reset(&ins);
      }
    }
    db_finalize(&ins);
    db_finalize(&isBr);
    db_finalize(&q1);
    bag_clear(&pending);
    bag_clear(&seen);
  }
  if( closeMode==1 ){
    db_multi_exec(
      "DELETE FROM leaves WHERE rid IN"
      "  (SELECT leaves.rid FROM leaves, tagxref"
      "    WHERE tagxref.rid=leaves.rid "
      "      AND tagxref.tagid=%d"
      "      AND tagxref.tagtype>0)",
      TAG_CLOSED
    );
  }else if( closeMode==2 ){
    db_multi_exec(
      "DELETE FROM leaves WHERE rid NOT IN"
      "  (SELECT leaves.rid FROM leaves, tagxref"
      "    WHERE tagxref.rid=leaves.rid "
      "      AND tagxref.tagid=%d"
      "      AND tagxref.tagtype>0)",
      TAG_CLOSED
    );
  }
}