コード例 #1
0
ファイル: url.c プロジェクト: ilchenkoanna/VCS
/*
** Render the URL with a parameter override.
*/
char *url_render(
  HQuery *p,              /* Base URL */
  const char *zName1,     /* First override */
  const char *zValue1,    /* First override value */
  const char *zName2,     /* Second override */
  const char *zValue2     /* Second override value */
){
  const char *zSep = "?";
  int i;
  
  blob_reset(&p->url);
  blob_appendf(&p->url, "%s/%s", g.zTop, p->zBase);
  for(i=0; i<p->nParam; i++){
    const char *z = p->azValue[i];
    if( zName1 && fossil_strcmp(zName1,p->azName[i])==0 ){
      zName1 = 0;
      z = zValue1;
      if( z==0 ) continue;
    }
    if( zName2 && fossil_strcmp(zName2,p->azName[i])==0 ){
      zName2 = 0;
      z = zValue2;
      if( z==0 ) continue;
    }
    blob_appendf(&p->url, "%s%s", zSep, p->azName[i]);
    if( z && z[0] ) blob_appendf(&p->url, "=%T", z);
    zSep = "&amp;";
  }
  if( zName1 && zValue1 ){
    blob_appendf(&p->url, "%s%s", zSep, zName1);
    if( zValue1[0] ) blob_appendf(&p->url, "=%T", zValue1);
  }
  if( zName2 && zValue2 ){
    blob_appendf(&p->url, "%s%s", zSep, zName2);
    if( zValue2[0] ) blob_appendf(&p->url, "=%T", zValue2);
  }
  return blob_str(&p->url);
}
コード例 #2
0
ファイル: vfile.c プロジェクト: ilchenkoanna/VCS
/*
** Compute an aggregate MD5 checksum over the repository image of every
** file in manifest vid.  The file names are part of the checksum.  The
** resulting checksum is suitable for use as the R-card of a manifest.
**
** Return the resulting checksum in blob pOut.
**
** If pManOut is not NULL then fill it with the checksum found in the
** "R" card near the end of the manifest.
**
** In a well-formed manifest, the two checksums computed here, pOut and
** pManOut, should be identical.  
*/
void vfile_aggregate_checksum_manifest(int vid, Blob *pOut, Blob *pManOut){
  int fid;
  Blob file;
  Manifest *pManifest;
  ManifestFile *pFile;
  char zBuf[100];

  blob_zero(pOut);
  if( pManOut ){
    blob_zero(pManOut);
  }
  db_must_be_within_tree();
  pManifest = manifest_get(vid, CFTYPE_MANIFEST);
  if( pManifest==0 ){
    vcs_panic("manifest file (%d) is malformed", vid);
  }
  manifest_file_rewind(pManifest);
  while( (pFile = manifest_file_next(pManifest,0))!=0 ){
    if( pFile->zUuid==0 ) continue;
    fid = uuid_to_rid(pFile->zUuid, 0);
    md5sum_step_text(pFile->zName, -1);
    content_get(fid, &file);
    sqlite3_snprintf(sizeof(zBuf), zBuf, " %d\n", blob_size(&file));
    md5sum_step_text(zBuf, -1);
    md5sum_step_blob(&file);
    blob_reset(&file);
  }
  if( pManOut ){
    if( pManifest->zRepoCksum ){
      blob_append(pManOut, pManifest->zRepoCksum, -1);
    }else{
      blob_zero(pManOut);
    }
  }
  manifest_destroy(pManifest);
  md5sum_finish(pOut);
}
コード例 #3
0
ファイル: file.c プロジェクト: digsrc/fossil
/*
** Create symlink to file on Unix, or plain-text file with
** symlink target if "allow-symlinks" is off or we're on Windows.
**
** Arguments: target file (symlink will point to it), link file
**/
void symlink_create(const char *zTargetFile, const char *zLinkFile){
#if !defined(_WIN32)
  if( g.allowSymlinks ){
    int i, nName;
    char *zName, zBuf[1000];

    nName = strlen(zLinkFile);
    if( nName>=sizeof(zBuf) ){
      zName = mprintf("%s", zLinkFile);
    }else{
      zName = zBuf;
      memcpy(zName, zLinkFile, nName+1);
    }
    nName = file_simplify_name(zName, nName, 0);
    for(i=1; i<nName; i++){
      if( zName[i]=='/' ){
        zName[i] = 0;
          if( file_mkdir(zName, 1) ){
            fossil_fatal_recursive("unable to create directory %s", zName);
            return;
          }
        zName[i] = '/';
      }
    }
    if( symlink(zTargetFile, zName)!=0 ){
      fossil_fatal_recursive("unable to create symlink \"%s\"", zName);
    }
    if( zName!=zBuf ) free(zName);
  }else
#endif
  {
    Blob content;
    blob_set(&content, zTargetFile);
    blob_write_to_file(&content, zLinkFile);
    blob_reset(&content);
  }
}
コード例 #4
0
ファイル: merge_.c プロジェクト: sambassett/Fossil-Repo
/*
** COMMAND: merge
**
** Usage: %fossil merge ?OPTIONS? ?VERSION?
**
** The argument VERSION is a version that should be merged into the
** current checkout.  All changes from VERSION back to the nearest
** common ancestor are merged.  Except, if either of the --cherrypick or
** --backout options are used only the changes associated with the
** single check-in VERSION are merged.  The --backout option causes
** the changes associated with VERSION to be removed from the current
** checkout rather than added.
**
** If the VERSION argument is omitted, then Fossil attempts to find
** a recent fork on the current branch to merge.
**
** Only file content is merged.  The result continues to use the
** file and directory names from the current checkout even if those
** names might have been changed in the branch being merged in.
**
** Other options:
**
**   --baseline BASELINE     Use BASELINE as the "pivot" of the merge instead
**                           of the nearest common ancestor.  This allows
**                           a sequence of changes in a branch to be merged
**                           without having to merge the entire branch.
**
**   --binary GLOBPATTERN    Treat files that match GLOBPATTERN as binary
**                           and do not try to merge parallel changes.  This
**                           option overrides the "binary-glob" setting.
**
**   --case-sensitive BOOL   Override the case-sensitive setting.  If false,
**                           files whose names differ only in case are taken
**                           to be the same file.
**
**   -f|--force              Force the merge even if it would be a no-op.
**
**   --force-missing         Force the merge even if there is missing content.
**
**   --integrate             Merged branch will be closed when committing.
**
**   -n|--dry-run            If given, display instead of run actions
**
**   -v|--verbose            Show additional details of the merge
*/
void merge_cmd(void){
  int vid;              /* Current version "V" */
  int mid;              /* Version we are merging from "M" */
  int pid;              /* The pivot version - most recent common ancestor P */
  int verboseFlag;      /* True if the -v|--verbose option is present */
  int integrateFlag;    /* True if the --integrate option is present */
  int pickFlag;         /* True if the --cherrypick option is present */
  int backoutFlag;      /* True if the --backout option is present */
  int dryRunFlag;       /* True if the --dry-run or -n option is present */
  int forceFlag;        /* True if the --force or -f option is present */
  int forceMissingFlag; /* True if the --force-missing option is present */
  const char *zBinGlob; /* The value of --binary */
  const char *zPivot;   /* The value of --baseline */
  int debugFlag;        /* True if --debug is present */
  int nChng;            /* Number of file name changes */
  int *aChng;           /* An array of file name changes */
  int i;                /* Loop counter */
  int nConflict = 0;    /* Number of conflicts seen */
  int nOverwrite = 0;   /* Number of unmanaged files overwritten */
  Stmt q;


  /* Notation:
  **
  **      V     The current checkout
  **      M     The version being merged in
  **      P     The "pivot" - the most recent common ancestor of V and M.
  */

  undo_capture_command_line();
  verboseFlag = find_option("verbose","v",0)!=0;
  forceMissingFlag = find_option("force-missing",0,0)!=0;
  if( !verboseFlag ){
    verboseFlag = find_option("detail",0,0)!=0; /* deprecated */
  }
  pickFlag = find_option("cherrypick",0,0)!=0;
  integrateFlag = find_option("integrate",0,0)!=0;
  backoutFlag = find_option("backout",0,0)!=0;
  debugFlag = find_option("debug",0,0)!=0;
  zBinGlob = find_option("binary",0,1);
  dryRunFlag = find_option("dry-run","n",0)!=0;
  if( !dryRunFlag ){
    dryRunFlag = find_option("nochange",0,0)!=0; /* deprecated */
  }
  forceFlag = find_option("force","f",0)!=0;
  zPivot = find_option("baseline",0,1);
  verify_all_options();
  db_must_be_within_tree();
  if( zBinGlob==0 ) zBinGlob = db_get("binary-glob",0);
  vid = db_lget_int("checkout", 0);
  if( vid==0 ){
    fossil_fatal("nothing is checked out");
  }

  /* Find mid, the artifactID of the version to be merged into the current
  ** check-out */
  if( g.argc==3 ){
    /* Mid is specified as an argument on the command-line */
    mid = name_to_typed_rid(g.argv[2], "ci");
    if( mid==0 || !is_a_version(mid) ){
      fossil_fatal("not a version: %s", g.argv[2]);
    }
  }else if( g.argc==2 ){
    /* No version specified on the command-line so pick the most recent
    ** leaf that is (1) not the version currently checked out and (2)
    ** has not already been merged into the current checkout and (3)
    ** the leaf is not closed and (4) the leaf is in the same branch
    ** as the current checkout.
    */
    Stmt q;
    if( pickFlag || backoutFlag || integrateFlag){
      fossil_fatal("cannot use --backout, --cherrypick or --integrate with a fork merge");
    }
    mid = db_int(0,
      "SELECT leaf.rid"
      "  FROM leaf, event"
      " WHERE leaf.rid=event.objid"
      "   AND leaf.rid!=%d"                                /* Constraint (1) */
      "   AND leaf.rid NOT IN (SELECT merge FROM vmerge)"  /* Constraint (2) */
      "   AND NOT EXISTS(SELECT 1 FROM tagxref"            /* Constraint (3) */
                    "     WHERE rid=leaf.rid"
                    "       AND tagid=%d"
                    "       AND tagtype>0)"
      "   AND (SELECT value FROM tagxref"                  /* Constraint (4) */
            "   WHERE tagid=%d AND rid=%d AND tagtype>0) ="
            " (SELECT value FROM tagxref"
            "   WHERE tagid=%d AND rid=leaf.rid AND tagtype>0)"
      " ORDER BY event.mtime DESC LIMIT 1",
      vid, TAG_CLOSED, TAG_BRANCH, vid, TAG_BRANCH
    );
    if( mid==0 ){
      fossil_fatal("no unmerged forks of branch \"%s\"",
        db_text(0, "SELECT value FROM tagxref"
                   " WHERE tagid=%d AND rid=%d AND tagtype>0",
                   TAG_BRANCH, vid)
      );
    }
    db_prepare(&q,
      "SELECT blob.uuid,"
          "   datetime(event.mtime%s),"
          "   coalesce(ecomment, comment),"
          "   coalesce(euser, user)"
      "  FROM event, blob"
      " WHERE event.objid=%d AND blob.rid=%d",
      timeline_utc(), mid, mid
    );
    if( db_step(&q)==SQLITE_ROW ){
      char *zCom = mprintf("Merging fork [%S] at %s by %s: \"%s\"",
            db_column_text(&q, 0), db_column_text(&q, 1),
            db_column_text(&q, 3), db_column_text(&q, 2));
      comment_print(zCom, db_column_text(&q,2), 0, -1, g.comFmtFlags);
      fossil_free(zCom);
    }
    db_finalize(&q);
  }else{
    usage("?OPTIONS? ?VERSION?");
    return;
  }

  if( zPivot ){
    pid = name_to_typed_rid(zPivot, "ci");
    if( pid==0 || !is_a_version(pid) ){
      fossil_fatal("not a version: %s", zPivot);
    }
    if( pickFlag ){
      fossil_fatal("incompatible options: --cherrypick & --baseline");
    }
  }else if( pickFlag || backoutFlag ){
    if( integrateFlag ){
      fossil_fatal("incompatible options: --integrate & --cherrypick or --backout");
    }
    pid = db_int(0, "SELECT pid FROM plink WHERE cid=%d AND isprim", mid);
    if( pid<=0 ){
      fossil_fatal("cannot find an ancestor for %s", g.argv[2]);
    }
  }else{
    pivot_set_primary(mid);
    pivot_set_secondary(vid);
    db_prepare(&q, "SELECT merge FROM vmerge WHERE id=0");
    while( db_step(&q)==SQLITE_ROW ){
      pivot_set_secondary(db_column_int(&q,0));
    }
    db_finalize(&q);
    pid = pivot_find();
    if( pid<=0 ){
      fossil_fatal("cannot find a common ancestor between the current "
                   "checkout and %s", g.argv[2]);
    }
  }
  if( backoutFlag ){
    int t = pid;
    pid = mid;
    mid = t;
  }
  if( !is_a_version(pid) ){
    fossil_fatal("not a version: record #%d", pid);
  }
  if( !forceFlag && mid==pid ){
    fossil_print("Merge skipped because it is a no-op. "
                 " Use --force to override.\n");
    return;
  }
  if( integrateFlag && !is_a_leaf(mid)){
    fossil_warning("ignoring --integrate: %s is not a leaf", g.argv[2]);
    integrateFlag = 0;
  }
  if( verboseFlag ){
    print_checkin_description(mid, 12, integrateFlag?"integrate:":"merge-from:");
    print_checkin_description(pid, 12, "baseline:");
  }
  vfile_check_signature(vid, CKSIG_ENOTFILE);
  db_begin_transaction();
  if( !dryRunFlag ) undo_begin();
  if( load_vfile_from_rid(mid) && !forceMissingFlag ){
    fossil_fatal("missing content, unable to merge");
  }
  if( load_vfile_from_rid(pid) && !forceMissingFlag ){
    fossil_fatal("missing content, unable to merge");
  }
  if( debugFlag ){
    char *z;
    z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", pid);
    fossil_print("P=%d %z\n", pid, z);
    z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", mid);
    fossil_print("M=%d %z\n", mid, z);
    z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", vid);
    fossil_print("V=%d %z\n", vid, z);
  }

  /*
  ** The vfile.pathname field is used to match files against each other.  The
  ** FV table contains one row for each each unique filename in
  ** in the current checkout, the pivot, and the version being merged.
  */
  db_multi_exec(
    "DROP TABLE IF EXISTS fv;"
    "CREATE TEMP TABLE fv("
    "  fn TEXT PRIMARY KEY %s,"   /* The filename */
    "  idv INTEGER,"              /* VFILE entry for current version */
    "  idp INTEGER,"              /* VFILE entry for the pivot */
    "  idm INTEGER,"              /* VFILE entry for version merging in */
    "  chnged BOOLEAN,"           /* True if current version has been edited */
    "  ridv INTEGER,"             /* Record ID for current version */
    "  ridp INTEGER,"             /* Record ID for pivot */
    "  ridm INTEGER,"             /* Record ID for merge */
    "  isexe BOOLEAN,"            /* Execute permission enabled */
    "  fnp TEXT %s,"              /* The filename in the pivot */
    "  fnm TEXT %s,"              /* the filename in the merged version */
    "  islinkv BOOLEAN,"          /* True if current version is a symlink */
    "  islinkm BOOLEAN"           /* True if merged version in is a symlink */
    ");",
    filename_collation(), filename_collation(), filename_collation()
  );

  /* Add files found in V
  */
  db_multi_exec(
    "INSERT OR IGNORE"
    " INTO fv(fn,fnp,fnm,idv,idp,idm,ridv,ridp,ridm,isexe,chnged)"
    " SELECT pathname, pathname, pathname, id, 0, 0, rid, 0, 0, isexe, chnged "
    " FROM vfile WHERE vid=%d",
    vid
  );

  /*
  ** Compute name changes from P->V
  */
  find_filename_changes(pid, vid, 0, &nChng, &aChng, debugFlag ? "P->V" : 0);
  if( nChng ){
    for(i=0; i<nChng; i++){
      char *z;
      z = db_text(0, "SELECT name FROM filename WHERE fnid=%d", aChng[i*2]);
      db_multi_exec(
        "UPDATE fv SET fnp=%Q, fnm=%Q"
        " WHERE fn=(SELECT name FROM filename WHERE fnid=%d)",
        z, z, aChng[i*2+1]
      );
      free(z);
    }
    fossil_free(aChng);
    db_multi_exec("UPDATE fv SET fnm=fnp WHERE fnp!=fn");
  }

  /* Add files found in P but not in V
  */
  db_multi_exec(
    "INSERT OR IGNORE"
    " INTO fv(fn,fnp,fnm,idv,idp,idm,ridv,ridp,ridm,isexe,chnged)"
    " SELECT pathname, pathname, pathname, 0, 0, 0, 0, 0, 0, isexe, 0 "
    "   FROM vfile"
    "  WHERE vid=%d AND pathname %s NOT IN (SELECT fnp FROM fv)",
    pid, filename_collation()
  );

  /*
  ** Compute name changes from P->M
  */
  find_filename_changes(pid, mid, 0, &nChng, &aChng, debugFlag ? "P->M" : 0);
  if( nChng ){
    if( nChng>4 ) db_multi_exec("CREATE INDEX fv_fnp ON fv(fnp)");
    for(i=0; i<nChng; i++){
      db_multi_exec(
        "UPDATE fv SET fnm=(SELECT name FROM filename WHERE fnid=%d)"
        " WHERE fnp=(SELECT name FROM filename WHERE fnid=%d)",
        aChng[i*2+1], aChng[i*2]
      );
    }
    fossil_free(aChng);
  }

  /* Add files found in M but not in P or V.
  */
  db_multi_exec(
    "INSERT OR IGNORE"
    " INTO fv(fn,fnp,fnm,idv,idp,idm,ridv,ridp,ridm,isexe,chnged)"
    " SELECT pathname, pathname, pathname, 0, 0, 0, 0, 0, 0, isexe, 0 "
    "   FROM vfile"
    "  WHERE vid=%d"
    "    AND pathname %s NOT IN (SELECT fnp FROM fv UNION SELECT fnm FROM fv)",
    mid, filename_collation()
  );

  /*
  ** Compute the file version ids for P and M.
  */
  db_multi_exec(
    "UPDATE fv SET"
    " idp=coalesce((SELECT id FROM vfile WHERE vid=%d AND fnp=pathname),0),"
    " ridp=coalesce((SELECT rid FROM vfile WHERE vid=%d AND fnp=pathname),0),"
    " idm=coalesce((SELECT id FROM vfile WHERE vid=%d AND fnm=pathname),0),"
    " ridm=coalesce((SELECT rid FROM vfile WHERE vid=%d AND fnm=pathname),0),"
    " islinkv=coalesce((SELECT islink FROM vfile"
                    " WHERE vid=%d AND fnm=pathname),0),"
    " islinkm=coalesce((SELECT islink FROM vfile"
                    " WHERE vid=%d AND fnm=pathname),0)",
    pid, pid, mid, mid, vid, mid
  );

  if( debugFlag ){
    db_prepare(&q,
       "SELECT rowid, fn, fnp, fnm, chnged, ridv, ridp, ridm, "
       "       isexe, islinkv, islinkm FROM fv"
    );
    while( db_step(&q)==SQLITE_ROW ){
       fossil_print("%3d: ridv=%-4d ridp=%-4d ridm=%-4d chnged=%d isexe=%d "
                    " islinkv=%d islinkm=%d\n",
          db_column_int(&q, 0),
          db_column_int(&q, 5),
          db_column_int(&q, 6),
          db_column_int(&q, 7),
          db_column_int(&q, 4),
          db_column_int(&q, 8),
          db_column_int(&q, 9),
          db_column_int(&q, 10));
       fossil_print("     fn  = [%s]\n", db_column_text(&q, 1));
       fossil_print("     fnp = [%s]\n", db_column_text(&q, 2));
       fossil_print("     fnm = [%s]\n", db_column_text(&q, 3));
    }
    db_finalize(&q);
  }

  /*
  ** Find files in M and V but not in P and report conflicts.
  ** The file in M will be ignored.  It will be treated as if it
  ** does not exist.
  */
  db_prepare(&q,
    "SELECT idm FROM fv WHERE idp=0 AND idv>0 AND idm>0"
  );
  while( db_step(&q)==SQLITE_ROW ){
    int idm = db_column_int(&q, 0);
    char *zName = db_text(0, "SELECT pathname FROM vfile WHERE id=%d", idm);
    fossil_warning("WARNING - no common ancestor: %s", zName);
    free(zName);
    db_multi_exec("UPDATE fv SET idm=0 WHERE idm=%d", idm);
  }
  db_finalize(&q);

  /*
  ** Add to V files that are not in V or P but are in M
  */
  db_prepare(&q,
    "SELECT idm, rowid, fnm FROM fv AS x"
    " WHERE idp=0 AND idv=0 AND idm>0"
  );
  while( db_step(&q)==SQLITE_ROW ){
    int idm = db_column_int(&q, 0);
    int rowid = db_column_int(&q, 1);
    int idv;
    const char *zName;
    char *zFullName;
    db_multi_exec(
      "INSERT INTO vfile(vid,chnged,deleted,rid,mrid,isexe,islink,pathname)"
      "  SELECT %d,%d,0,rid,mrid,isexe,islink,pathname FROM vfile WHERE id=%d",
      vid, integrateFlag?5:3, idm
    );
    idv = db_last_insert_rowid();
    db_multi_exec("UPDATE fv SET idv=%d WHERE rowid=%d", idv, rowid);
    zName = db_column_text(&q, 2);
    zFullName = mprintf("%s%s", g.zLocalRoot, zName);
    if( file_wd_isfile_or_link(zFullName) ){
      fossil_print("ADDED %s (overwrites an unmanaged file)\n", zName);
      nOverwrite++;
    }else{
      fossil_print("ADDED %s\n", zName);
    }
    fossil_free(zFullName);
    if( !dryRunFlag ){
      undo_save(zName);
      vfile_to_disk(0, idm, 0, 0);
    }
  }
  db_finalize(&q);

  /*
  ** Find files that have changed from P->M but not P->V.
  ** Copy the M content over into V.
  */
  db_prepare(&q,
    "SELECT idv, ridm, fn, islinkm FROM fv"
    " WHERE idp>0 AND idv>0 AND idm>0"
    "   AND ridm!=ridp AND ridv=ridp AND NOT chnged"
  );
  while( db_step(&q)==SQLITE_ROW ){
    int idv = db_column_int(&q, 0);
    int ridm = db_column_int(&q, 1);
    const char *zName = db_column_text(&q, 2);
    int islinkm = db_column_int(&q, 3);
    /* Copy content from idm over into idv.  Overwrite idv. */
    fossil_print("UPDATE %s\n", zName);
    if( !dryRunFlag ){
      undo_save(zName);
      db_multi_exec(
        "UPDATE vfile SET mtime=0, mrid=%d, chnged=%d, islink=%d "
        " WHERE id=%d", ridm, integrateFlag?4:2, islinkm, idv
      );
      vfile_to_disk(0, idv, 0, 0);
    }
  }
  db_finalize(&q);

  /*
  ** Do a three-way merge on files that have changes on both P->M and P->V.
  */
  db_prepare(&q,
    "SELECT ridm, idv, ridp, ridv, %s, fn, isexe, islinkv, islinkm FROM fv"
    " WHERE idp>0 AND idv>0 AND idm>0"
    "   AND ridm!=ridp AND (ridv!=ridp OR chnged)",
    glob_expr("fv.fn", zBinGlob)
  );
  while( db_step(&q)==SQLITE_ROW ){
    int ridm = db_column_int(&q, 0);
    int idv = db_column_int(&q, 1);
    int ridp = db_column_int(&q, 2);
    int ridv = db_column_int(&q, 3);
    int isBinary = db_column_int(&q, 4);
    const char *zName = db_column_text(&q, 5);
    int isExe = db_column_int(&q, 6);
    int islinkv = db_column_int(&q, 7);
    int islinkm = db_column_int(&q, 8);
    int rc;
    char *zFullPath;
    Blob m, p, r;
    /* Do a 3-way merge of idp->idm into idp->idv.  The results go into idv. */
    if( verboseFlag ){
      fossil_print("MERGE %s  (pivot=%d v1=%d v2=%d)\n",
                   zName, ridp, ridm, ridv);
    }else{
      fossil_print("MERGE %s\n", zName);
    }
    if( islinkv || islinkm /* || file_wd_islink(zFullPath) */ ){
      fossil_print("***** Cannot merge symlink %s\n", zName);
      nConflict++;
    }else{
      undo_save(zName);
      zFullPath = mprintf("%s/%s", g.zLocalRoot, zName);
      content_get(ridp, &p);
      content_get(ridm, &m);
      if( isBinary ){
        rc = -1;
        blob_zero(&r);
      }else{
        unsigned mergeFlags = dryRunFlag ? MERGE_DRYRUN : 0;
        rc = merge_3way(&p, zFullPath, &m, &r, mergeFlags);
      }
      if( rc>=0 ){
        if( !dryRunFlag ){
          blob_write_to_file(&r, zFullPath);
          file_wd_setexe(zFullPath, isExe);
        }
        db_multi_exec("UPDATE vfile SET mtime=0 WHERE id=%d", idv);
        if( rc>0 ){
          fossil_print("***** %d merge conflicts in %s\n", rc, zName);
          nConflict++;
        }
      }else{
        fossil_print("***** Cannot merge binary file %s\n", zName);
        nConflict++;
      }
      blob_reset(&p);
      blob_reset(&m);
      blob_reset(&r);
    }
    db_multi_exec("INSERT OR IGNORE INTO vmerge(id,merge) VALUES(%d,%d)",
                  idv,ridm);
  }
  db_finalize(&q);

  /*
  ** Drop files that are in P and V but not in M
  */
  db_prepare(&q,
    "SELECT idv, fn, chnged FROM fv"
    " WHERE idp>0 AND idv>0 AND idm=0"
  );
  while( db_step(&q)==SQLITE_ROW ){
    int idv = db_column_int(&q, 0);
    const char *zName = db_column_text(&q, 1);
    int chnged = db_column_int(&q, 2);
    /* Delete the file idv */
    fossil_print("DELETE %s\n", zName);
    if( chnged ){
      fossil_warning("WARNING: local edits lost for %s\n", zName);
      nConflict++;
    }
    undo_save(zName);
    db_multi_exec(
      "UPDATE vfile SET deleted=1 WHERE id=%d", idv
    );
    if( !dryRunFlag ){
      char *zFullPath = mprintf("%s%s", g.zLocalRoot, zName);
      file_delete(zFullPath);
      free(zFullPath);
    }
  }
  db_finalize(&q);

  /*
  ** Rename files that have taken a rename on P->M but which keep the same
  ** name o P->V.   If a file is renamed on P->V only or on both P->V and
  ** P->M then we retain the V name of the file.
  */
  db_prepare(&q,
    "SELECT idv, fnp, fnm FROM fv"
    " WHERE idv>0 AND idp>0 AND idm>0 AND fnp=fn AND fnm!=fnp"
  );
  while( db_step(&q)==SQLITE_ROW ){
    int idv = db_column_int(&q, 0);
    const char *zOldName = db_column_text(&q, 1);
    const char *zNewName = db_column_text(&q, 2);
    fossil_print("RENAME %s -> %s\n", zOldName, zNewName);
    undo_save(zOldName);
    undo_save(zNewName);
    db_multi_exec(
      "UPDATE vfile SET pathname=%Q, origname=coalesce(origname,pathname)"
      " WHERE id=%d AND vid=%d", zNewName, idv, vid
    );
    if( !dryRunFlag ){
      char *zFullOldPath = mprintf("%s%s", g.zLocalRoot, zOldName);
      char *zFullNewPath = mprintf("%s%s", g.zLocalRoot, zNewName);
      if( file_wd_islink(zFullOldPath) ){
        symlink_copy(zFullOldPath, zFullNewPath);
      }else{
        file_copy(zFullOldPath, zFullNewPath);
      }
      file_delete(zFullOldPath);
      free(zFullNewPath);
      free(zFullOldPath);
    }
  }
  db_finalize(&q);


  /* Report on conflicts
  */
  if( nConflict ){
    fossil_warning("WARNING: %d merge conflicts", nConflict);
  }
  if( nOverwrite ){
    fossil_warning("WARNING: %d unmanaged files were overwritten",
                   nOverwrite);
  }
  if( dryRunFlag ){
    fossil_warning("REMINDER: this was a dry run -"
                   " no files were actually changed.");
  }

  /*
  ** Clean up the mid and pid VFILE entries.  Then commit the changes.
  */
  db_multi_exec("DELETE FROM vfile WHERE vid!=%d", vid);
  if( pickFlag ){
    db_multi_exec("INSERT OR IGNORE INTO vmerge(id,merge) VALUES(-1,%d)",mid);
    /* For a cherry-pick merge, make the default check-in comment the same
    ** as the check-in comment on the check-in that is being merged in. */
    db_multi_exec(
       "REPLACE INTO vvar(name,value)"
       " SELECT 'ci-comment', coalesce(ecomment,comment) FROM event"
       "  WHERE type='ci' AND objid=%d",
       mid
    );
  }else if( backoutFlag ){
    db_multi_exec("INSERT OR IGNORE INTO vmerge(id,merge) VALUES(-2,%d)",pid);
  }else if( integrateFlag ){
    db_multi_exec("INSERT OR IGNORE INTO vmerge(id,merge) VALUES(-4,%d)",mid);
  }else{
    db_multi_exec("INSERT OR IGNORE INTO vmerge(id,merge) VALUES(0,%d)", mid);
  }
  undo_finish();
  db_end_transaction(dryRunFlag);
}
コード例 #5
0
ファイル: configure.c プロジェクト: ilchenkoanna/VCS
/*
** Send "config" cards using the new format for all elements of a group
** that have recently changed.
**
** Output goes into pOut.  The groupMask identifies the group(s) to be sent.
** Send only entries whose timestamp is later than or equal to iStart.
**
** Return the number of cards sent.
*/
int configure_send_group(
    Blob *pOut,              /* Write output here */
    int groupMask,           /* Mask of groups to be send */
    sqlite3_int64 iStart     /* Only write values changed since this time */
) {
    Stmt q;
    Blob rec;
    int ii;
    int nCard = 0;

    blob_zero(&rec);
    if( groupMask & CONFIGSET_SHUN ) {
        db_prepare(&q, "SELECT mtime, quote(uuid), quote(scom) FROM shun"
                   " WHERE mtime>=%lld", iStart);
        while( db_step(&q)==SQLITE_ROW ) {
            blob_appendf(&rec,"%s %s scom %s",
                         db_column_text(&q, 0),
                         db_column_text(&q, 1),
                         db_column_text(&q, 2)
                        );
            blob_appendf(pOut, "config /shun %d\n%s\n",
                         blob_size(&rec), blob_str(&rec));
            nCard++;
            blob_reset(&rec);
        }
        db_finalize(&q);
    }
    if( groupMask & CONFIGSET_USER ) {
        db_prepare(&q, "SELECT mtime, quote(login), quote(pw), quote(cap),"
                   "       quote(info), quote(photo) FROM user"
                   " WHERE mtime>=%lld", iStart);
        while( db_step(&q)==SQLITE_ROW ) {
            blob_appendf(&rec,"%s %s pw %s cap %s info %s photo %s",
                         db_column_text(&q, 0),
                         db_column_text(&q, 1),
                         db_column_text(&q, 2),
                         db_column_text(&q, 3),
                         db_column_text(&q, 4),
                         db_column_text(&q, 5)
                        );
            blob_appendf(pOut, "config /user %d\n%s\n",
                         blob_size(&rec), blob_str(&rec));
            nCard++;
            blob_reset(&rec);
        }
        db_finalize(&q);
    }
    if( groupMask & CONFIGSET_TKT ) {
        db_prepare(&q, "SELECT mtime, quote(title), quote(owner), quote(cols),"
                   "       quote(sqlcode) FROM reportfmt"
                   " WHERE mtime>=%lld", iStart);
        while( db_step(&q)==SQLITE_ROW ) {
            blob_appendf(&rec,"%s %s owner %s cols %s sqlcode %s",
                         db_column_text(&q, 0),
                         db_column_text(&q, 1),
                         db_column_text(&q, 2),
                         db_column_text(&q, 3),
                         db_column_text(&q, 4)
                        );
            blob_appendf(pOut, "config /reportfmt %d\n%s\n",
                         blob_size(&rec), blob_str(&rec));
            nCard++;
            blob_reset(&rec);
        }
        db_finalize(&q);
    }
    if( groupMask & CONFIGSET_ADDR ) {
        db_prepare(&q, "SELECT mtime, quote(hash), quote(content) FROM concealed"
                   " WHERE mtime>=%lld", iStart);
        while( db_step(&q)==SQLITE_ROW ) {
            blob_appendf(&rec,"%s %s content %s",
                         db_column_text(&q, 0),
                         db_column_text(&q, 1),
                         db_column_text(&q, 2)
                        );
            blob_appendf(pOut, "config /concealed %d\n%s\n",
                         blob_size(&rec), blob_str(&rec));
            nCard++;
            blob_reset(&rec);
        }
        db_finalize(&q);
    }
    db_prepare(&q, "SELECT mtime, quote(name), quote(value) FROM config"
               " WHERE name=:name AND mtime>=%lld", iStart);
    for(ii=0; ii<count(aConfig); ii++) {
        if( (aConfig[ii].groupMask & groupMask)!=0 && aConfig[ii].zName[0]!='@' ) {
            db_bind_text(&q, ":name", aConfig[ii].zName);
            while( db_step(&q)==SQLITE_ROW ) {
                blob_appendf(&rec,"%s %s value %s",
                             db_column_text(&q, 0),
                             db_column_text(&q, 1),
                             db_column_text(&q, 2)
                            );
                blob_appendf(pOut, "config /config %d\n%s\n",
                             blob_size(&rec), blob_str(&rec));
                nCard++;
                blob_reset(&rec);
            }
            db_reset(&q);
        }
    }
    db_finalize(&q);
    return nCard;
}
コード例 #6
0
ファイル: update.c プロジェクト: digsrc/fossil
/*
** COMMAND: update
**
** Usage: %fossil update ?OPTIONS? ?VERSION? ?FILES...?
**
** Change the version of the current checkout to VERSION.  Any
** uncommitted changes are retained and applied to the new checkout.
**
** The VERSION argument can be a specific version or tag or branch
** name.  If the VERSION argument is omitted, then the leaf of the
** subtree that begins at the current version is used, if there is
** only a single leaf.  VERSION can also be "current" to select the
** leaf of the current version or "latest" to select the most recent
** check-in.
**
** If one or more FILES are listed after the VERSION then only the
** named files are candidates to be updated, and any updates to them
** will be treated as edits to the current version. Using a directory
** name for one of the FILES arguments is the same as using every
** subdirectory and file beneath that directory.
**
** If FILES is omitted, all files in the current checkout are subject
** to being updated and the version of the current checkout is changed
** to VERSION. Any uncommitted changes are retained and applied to the
** new checkout.
**
** The -n or --dry-run option causes this command to do a "dry run".
** It prints out what would have happened but does not actually make
** any changes to the current checkout or the repository.
**
** The -v or --verbose option prints status information about
** unchanged files in addition to those file that actually do change.
**
** Options:
**   --case-sensitive <BOOL> override case-sensitive setting
**   --debug          print debug information on stdout
**   --latest         acceptable in place of VERSION, update to latest version
**   --force-missing  force update if missing content after sync
**   -n|--dry-run     If given, display instead of run actions
**   -v|--verbose     print status information about all files
**   -W|--width <num> Width of lines (default is to auto-detect). Must be >20
**                    or 0 (= no limit, resulting in a single line per entry).
**
** See also: revert
*/
void update_cmd(void) {
    int vid;              /* Current version */
    int tid=0;            /* Target version - version we are changing to */
    Stmt q;
    int latestFlag;       /* --latest.  Pick the latest version if true */
    int dryRunFlag;       /* -n or --dry-run.  Do a dry run */
    int verboseFlag;      /* -v or --verbose.  Output extra information */
    int forceMissingFlag; /* --force-missing.  Continue if missing content */
    int debugFlag;        /* --debug option */
    int setmtimeFlag;     /* --setmtime.  Set mtimes on files */
    int nChng;            /* Number of file renames */
    int *aChng;           /* Array of file renames */
    int i;                /* Loop counter */
    int nConflict = 0;    /* Number of merge conflicts */
    int nOverwrite = 0;   /* Number of unmanaged files overwritten */
    int nUpdate = 0;      /* Number of changes of any kind */
    int width;            /* Width of printed comment lines */
    Stmt mtimeXfer;       /* Statement to transfer mtimes */
    const char *zWidth;   /* Width option string value */

    if( !internalUpdate ) {
        undo_capture_command_line();
        url_proxy_options();
    }
    zWidth = find_option("width","W",1);
    if( zWidth ) {
        width = atoi(zWidth);
        if( (width!=0) && (width<=20) ) {
            fossil_fatal("-W|--width value must be >20 or 0");
        }
    } else {
        width = -1;
    }
    latestFlag = find_option("latest",0, 0)!=0;
    dryRunFlag = find_option("dry-run","n",0)!=0;
    if( !dryRunFlag ) {
        dryRunFlag = find_option("nochange",0,0)!=0; /* deprecated */
    }
    verboseFlag = find_option("verbose","v",0)!=0;
    forceMissingFlag = find_option("force-missing",0,0)!=0;
    debugFlag = find_option("debug",0,0)!=0;
    setmtimeFlag = find_option("setmtime",0,0)!=0;

    /* We should be done with options.. */
    verify_all_options();

    db_must_be_within_tree();
    vid = db_lget_int("checkout", 0);
    user_select();
    if( !dryRunFlag && !internalUpdate ) {
        if( autosync_loop(SYNC_PULL + SYNC_VERBOSE*verboseFlag,
                          db_get_int("autosync-tries", 1)) ) {
            fossil_fatal("Cannot proceed with update");
        }
    }

    /* Create any empty directories now, as well as after the update,
    ** so changes in settings are reflected now */
    if( !dryRunFlag ) ensure_empty_dirs_created();

    if( internalUpdate ) {
        tid = internalUpdate;
    } else if( g.argc>=3 ) {
        if( fossil_strcmp(g.argv[2], "current")==0 ) {
            /* If VERSION is "current", then use the same algorithm to find the
            ** target as if VERSION were omitted. */
        } else if( fossil_strcmp(g.argv[2], "latest")==0 ) {
            /* If VERSION is "latest", then use the same algorithm to find the
            ** target as if VERSION were omitted and the --latest flag is present.
            */
            latestFlag = 1;
        } else {
            tid = name_to_typed_rid(g.argv[2],"ci");
            if( tid==0 || !is_a_version(tid) ) {
                fossil_fatal("no such check-in: %s", g.argv[2]);
            }
        }
    }

    /* If no VERSION is specified on the command-line, then look for a
    ** descendent of the current version.  If there are multiple descendants,
    ** look for one from the same branch as the current version.  If there
    ** are still multiple descendants, show them all and refuse to update
    ** until the user selects one.
    */
    if( tid==0 ) {
        int closeCode = 1;
        compute_leaves(vid, closeCode);
        if( !db_exists("SELECT 1 FROM leaves") ) {
            closeCode = 0;
            compute_leaves(vid, closeCode);
        }
        if( !latestFlag && db_int(0, "SELECT count(*) FROM leaves")>1 ) {
            db_multi_exec(
                "DELETE FROM leaves WHERE rid NOT IN"
                "   (SELECT leaves.rid FROM leaves, tagxref"
                "     WHERE leaves.rid=tagxref.rid AND tagxref.tagid=%d"
                "       AND tagxref.value==(SELECT value FROM tagxref"
                " WHERE tagid=%d AND rid=%d))",
                TAG_BRANCH, TAG_BRANCH, vid
            );
            if( db_int(0, "SELECT count(*) FROM leaves")>1 ) {
                compute_leaves(vid, closeCode);
                db_prepare(&q,
                           "%s "
                           "   AND event.objid IN leaves"
                           " ORDER BY event.mtime DESC",
                           timeline_query_for_tty()
                          );
                print_timeline(&q, -100, width, 0);
                db_finalize(&q);
                fossil_fatal("Multiple descendants");
            }
        }
        tid = db_int(0, "SELECT rid FROM leaves, event"
                     " WHERE event.objid=leaves.rid"
                     " ORDER BY event.mtime DESC");
        if( tid==0 ) tid = vid;
    }

    if( tid==0 ) {
        return;
    }

    db_begin_transaction();
    vfile_check_signature(vid, CKSIG_ENOTFILE);
    if( !dryRunFlag && !internalUpdate ) undo_begin();
    if( load_vfile_from_rid(tid) && !forceMissingFlag ) {
        fossil_fatal("missing content, unable to update");
    };

    /*
    ** The record.fn field is used to match files against each other.  The
    ** FV table contains one row for each each unique filename in
    ** in the current checkout, the pivot, and the version being merged.
    */
    db_multi_exec(
        "DROP TABLE IF EXISTS fv;"
        "CREATE TEMP TABLE fv("
        "  fn TEXT %s PRIMARY KEY,"   /* The filename relative to root */
        "  idv INTEGER,"              /* VFILE entry for current version */
        "  idt INTEGER,"              /* VFILE entry for target version */
        "  chnged BOOLEAN,"           /* True if current version has been edited */
        "  islinkv BOOLEAN,"          /* True if current file is a link */
        "  islinkt BOOLEAN,"          /* True if target file is a link */
        "  ridv INTEGER,"             /* Record ID for current version */
        "  ridt INTEGER,"             /* Record ID for target */
        "  isexe BOOLEAN,"            /* Does target have execute permission? */
        "  deleted BOOLEAN DEFAULT 0,"/* File marked by "rm" to become unmanaged */
        "  fnt TEXT %s"               /* Filename of same file on target version */
        ");",
        filename_collation(), filename_collation()
    );

    /* Add files found in the current version
    */
    db_multi_exec(
        "INSERT OR IGNORE INTO fv(fn,fnt,idv,idt,ridv,ridt,isexe,chnged,deleted)"
        " SELECT pathname, pathname, id, 0, rid, 0, isexe, chnged, deleted"
        "   FROM vfile WHERE vid=%d",
        vid
    );

    /* Compute file name changes on V->T.  Record name changes in files that
    ** have changed locally.
    */
    if( vid ) {
        find_filename_changes(vid, tid, 1, &nChng, &aChng, debugFlag ? "V->T": 0);
        if( nChng ) {
            for(i=0; i<nChng; i++) {
                db_multi_exec(
                    "UPDATE fv"
                    "   SET fnt=(SELECT name FROM filename WHERE fnid=%d)"
                    " WHERE fn=(SELECT name FROM filename WHERE fnid=%d) AND chnged",
                    aChng[i*2+1], aChng[i*2]
                );
            }
            fossil_free(aChng);
        }
    }

    /* Add files found in the target version T but missing from the current
    ** version V.
    */
    db_multi_exec(
        "INSERT OR IGNORE INTO fv(fn,fnt,idv,idt,ridv,ridt,isexe,chnged)"
        " SELECT pathname, pathname, 0, 0, 0, 0, isexe, 0 FROM vfile"
        "  WHERE vid=%d"
        "    AND pathname %s NOT IN (SELECT fnt FROM fv)",
        tid, filename_collation()
    );

    /*
    ** Compute the file version ids for T
    */
    db_multi_exec(
        "UPDATE fv SET"
        " idt=coalesce((SELECT id FROM vfile WHERE vid=%d AND fnt=pathname),0),"
        " ridt=coalesce((SELECT rid FROM vfile WHERE vid=%d AND fnt=pathname),0)",
        tid, tid
    );

    /*
    ** Add islink information
    */
    db_multi_exec(
        "UPDATE fv SET"
        " islinkv=coalesce((SELECT islink FROM vfile"
        " WHERE vid=%d AND fnt=pathname),0),"
        " islinkt=coalesce((SELECT islink FROM vfile"
        " WHERE vid=%d AND fnt=pathname),0)",
        vid, tid
    );


    if( debugFlag ) {
        db_prepare(&q,
                   "SELECT rowid, fn, fnt, chnged, ridv, ridt, isexe,"
                   "       islinkv, islinkt FROM fv"
                  );
        while( db_step(&q)==SQLITE_ROW ) {
            fossil_print("%3d: ridv=%-4d ridt=%-4d chnged=%d isexe=%d"
                         " islinkv=%d  islinkt=%d\n",
                         db_column_int(&q, 0),
                         db_column_int(&q, 4),
                         db_column_int(&q, 5),
                         db_column_int(&q, 3),
                         db_column_int(&q, 6),
                         db_column_int(&q, 7),
                         db_column_int(&q, 8));
            fossil_print("     fnv = [%s]\n", db_column_text(&q, 1));
            fossil_print("     fnt = [%s]\n", db_column_text(&q, 2));
        }
        db_finalize(&q);
    }

    /* If FILES appear on the command-line, remove from the "fv" table
    ** every entry that is not named on the command-line or which is not
    ** in a directory named on the command-line.
    */
    if( g.argc>=4 ) {
        Blob sql;              /* SQL statement to purge unwanted entries */
        Blob treename;         /* Normalized filename */
        int i;                 /* Loop counter */
        const char *zSep;      /* Term separator */

        blob_zero(&sql);
        blob_append(&sql, "DELETE FROM fv WHERE ", -1);
        zSep = "";
        for(i=3; i<g.argc; i++) {
            file_tree_name(g.argv[i], &treename, 0, 1);
            if( file_wd_isdir(g.argv[i])==1 ) {
                if( blob_size(&treename) != 1 || blob_str(&treename)[0] != '.' ) {
                    blob_append_sql(&sql, "%sfn NOT GLOB '%q/*' ",
                                    zSep /*safe-for-%s*/, blob_str(&treename));
                } else {
                    blob_reset(&sql);
                    break;
                }
            } else {
                blob_append_sql(&sql, "%sfn<>%Q ",
                                zSep /*safe-for-%s*/, blob_str(&treename));
            }
            zSep = "AND ";
            blob_reset(&treename);
        }
        db_multi_exec("%s", blob_sql_text(&sql));
        blob_reset(&sql);
    }

    /*
    ** Alter the content of the checkout so that it conforms with the
    ** target
    */
    db_prepare(&q,
               "SELECT fn, idv, ridv, idt, ridt, chnged, fnt,"
               "       isexe, islinkv, islinkt, deleted FROM fv ORDER BY 1"
              );
    db_prepare(&mtimeXfer,
               "UPDATE vfile SET mtime=(SELECT mtime FROM vfile WHERE id=:idv)"
               " WHERE id=:idt"
              );
    assert( g.zLocalRoot!=0 );
    assert( strlen(g.zLocalRoot)>0 );
    assert( g.zLocalRoot[strlen(g.zLocalRoot)-1]=='/' );
    while( db_step(&q)==SQLITE_ROW ) {
        const char *zName = db_column_text(&q, 0);  /* The filename from root */
        int idv = db_column_int(&q, 1);             /* VFILE entry for current */
        int ridv = db_column_int(&q, 2);            /* RecordID for current */
        int idt = db_column_int(&q, 3);             /* VFILE entry for target */
        int ridt = db_column_int(&q, 4);            /* RecordID for target */
        int chnged = db_column_int(&q, 5);          /* Current is edited */
        const char *zNewName = db_column_text(&q,6);/* New filename */
        int isexe = db_column_int(&q, 7);           /* EXE perm for new file */
        int islinkv = db_column_int(&q, 8);         /* Is current file is a link */
        int islinkt = db_column_int(&q, 9);         /* Is target file is a link */
        int deleted = db_column_int(&q, 10);        /* Marked for deletion */
        char *zFullPath;                            /* Full pathname of the file */
        char *zFullNewPath;                         /* Full pathname of dest */
        char nameChng;                              /* True if the name changed */

        zFullPath = mprintf("%s%s", g.zLocalRoot, zName);
        zFullNewPath = mprintf("%s%s", g.zLocalRoot, zNewName);
        nameChng = fossil_strcmp(zName, zNewName);
        nUpdate++;
        if( deleted ) {
            db_multi_exec("UPDATE vfile SET deleted=1 WHERE id=%d", idt);
        }
        if( idv>0 && ridv==0 && idt>0 && ridt>0 ) {
            /* Conflict.  This file has been added to the current checkout
            ** but also exists in the target checkout.  Use the current version.
            */
            fossil_print("CONFLICT %s\n", zName);
            nConflict++;
        } else if( idt>0 && idv==0 ) {
            /* File added in the target. */
            if( file_wd_isfile_or_link(zFullPath) ) {
                fossil_print("ADD %s - overwrites an unmanaged file\n", zName);
                nOverwrite++;
            } else {
                fossil_print("ADD %s\n", zName);
            }
            if( !dryRunFlag && !internalUpdate ) undo_save(zName);
            if( !dryRunFlag ) vfile_to_disk(0, idt, 0, 0);
        } else if( idt>0 && idv>0 && ridt!=ridv && (chnged==0 || deleted) ) {
            /* The file is unedited.  Change it to the target version */
            if( deleted ) {
                fossil_print("UPDATE %s - change to unmanaged file\n", zName);
            } else {
                fossil_print("UPDATE %s\n", zName);
            }
            if( !dryRunFlag && !internalUpdate ) undo_save(zName);
            if( !dryRunFlag ) vfile_to_disk(0, idt, 0, 0);
        } else if( idt>0 && idv>0 && !deleted && file_wd_size(zFullPath)<0 ) {
            /* The file missing from the local check-out. Restore it to the
            ** version that appears in the target. */
            fossil_print("UPDATE %s\n", zName);
            if( !dryRunFlag && !internalUpdate ) undo_save(zName);
            if( !dryRunFlag ) vfile_to_disk(0, idt, 0, 0);
        } else if( idt==0 && idv>0 ) {
            if( ridv==0 ) {
                /* Added in current checkout.  Continue to hold the file as
                ** as an addition */
                db_multi_exec("UPDATE vfile SET vid=%d WHERE id=%d", tid, idv);
            } else if( chnged ) {
                /* Edited locally but deleted from the target.  Do not track the
                ** file but keep the edited version around. */
                fossil_print("CONFLICT %s - edited locally but deleted by update\n",
                             zName);
                nConflict++;
            } else {
                fossil_print("REMOVE %s\n", zName);
                if( !dryRunFlag && !internalUpdate ) undo_save(zName);
                if( !dryRunFlag ) file_delete(zFullPath);
            }
        } else if( idt>0 && idv>0 && ridt!=ridv && chnged ) {
            /* Merge the changes in the current tree into the target version */
            Blob r, t, v;
            int rc;
            if( nameChng ) {
                fossil_print("MERGE %s -> %s\n", zName, zNewName);
            } else {
                fossil_print("MERGE %s\n", zName);
            }
            if( islinkv || islinkt /* || file_wd_islink(zFullPath) */ ) {
                fossil_print("***** Cannot merge symlink %s\n", zNewName);
                nConflict++;
            } else {
                unsigned mergeFlags = dryRunFlag ? MERGE_DRYRUN : 0;
                if( !dryRunFlag && !internalUpdate ) undo_save(zName);
                content_get(ridt, &t);
                content_get(ridv, &v);
                rc = merge_3way(&v, zFullPath, &t, &r, mergeFlags);
                if( rc>=0 ) {
                    if( !dryRunFlag ) {
                        blob_write_to_file(&r, zFullNewPath);
                        file_wd_setexe(zFullNewPath, isexe);
                    }
                    if( rc>0 ) {
                        fossil_print("***** %d merge conflicts in %s\n", rc, zNewName);
                        nConflict++;
                    }
                } else {
                    if( !dryRunFlag ) {
                        blob_write_to_file(&t, zFullNewPath);
                        file_wd_setexe(zFullNewPath, isexe);
                    }
                    fossil_print("***** Cannot merge binary file %s\n", zNewName);
                    nConflict++;
                }
            }
            if( nameChng && !dryRunFlag ) file_delete(zFullPath);
            blob_reset(&v);
            blob_reset(&t);
            blob_reset(&r);
        } else {
            nUpdate--;
            if( chnged ) {
                if( verboseFlag ) fossil_print("EDITED %s\n", zName);
            } else {
                db_bind_int(&mtimeXfer, ":idv", idv);
                db_bind_int(&mtimeXfer, ":idt", idt);
                db_step(&mtimeXfer);
                db_reset(&mtimeXfer);
                if( verboseFlag ) fossil_print("UNCHANGED %s\n", zName);
            }
        }
        free(zFullPath);
        free(zFullNewPath);
    }
    db_finalize(&q);
    db_finalize(&mtimeXfer);
    fossil_print("%.79c\n",'-');
    if( nUpdate==0 ) {
        show_common_info(tid, "checkout:", 1, 0);
        fossil_print("%-13s None. Already up-to-date\n", "changes:");
    } else {
        show_common_info(tid, "updated-to:", 1, 0);
        fossil_print("%-13s %d file%s modified.\n", "changes:",
                     nUpdate, nUpdate>1 ? "s" : "");
    }

    /* Report on conflicts
    */
    if( !dryRunFlag ) {
        Stmt q;
        int nMerge = 0;
        db_prepare(&q, "SELECT uuid, id FROM vmerge JOIN blob ON merge=rid"
                   " WHERE id<=0");
        while( db_step(&q)==SQLITE_ROW ) {
            const char *zLabel = "merge";
            switch( db_column_int(&q, 1) ) {
            case -1:
                zLabel = "cherrypick merge";
                break;
            case -2:
                zLabel = "backout merge";
                break;
            }
            fossil_warning("uncommitted %s against %S.",
                           zLabel, db_column_text(&q, 0));
            nMerge++;
        }
        db_finalize(&q);
        leaf_ambiguity_warning(tid, tid);

        if( nConflict ) {
            if( internalUpdate ) {
                internalConflictCnt = nConflict;
                nConflict = 0;
            } else {
                fossil_warning("WARNING: %d merge conflicts", nConflict);
            }
        }
        if( nOverwrite ) {
            fossil_warning("WARNING: %d unmanaged files were overwritten",
                           nOverwrite);
        }
        if( nMerge ) {
            fossil_warning("WARNING: %d uncommitted prior merges", nMerge);
        }
    }

    /*
    ** Clean up the mid and pid VFILE entries.  Then commit the changes.
    */
    if( dryRunFlag ) {
        db_end_transaction(1);  /* With --dry-run, rollback changes */
    } else {
        ensure_empty_dirs_created();
        if( g.argc<=3 ) {
            /* All files updated.  Shift the current checkout to the target. */
            db_multi_exec("DELETE FROM vfile WHERE vid!=%d", tid);
            checkout_set_all_exe(tid);
            manifest_to_disk(tid);
            db_lset_int("checkout", tid);
        } else {
            /* A subset of files have been checked out.  Keep the current
            ** checkout unchanged. */
            db_multi_exec("DELETE FROM vfile WHERE vid!=%d", vid);
        }
        if( !internalUpdate ) undo_finish();
        if( setmtimeFlag ) vfile_check_signature(tid, CKSIG_SETMTIME);
        db_end_transaction(0);
    }
}
コード例 #7
0
ファイル: content.c プロジェクト: LitleWaffle/sampleDirectory
/*
** Write content into the database.  Return the record ID.  If the
** content is already in the database, just return the record ID.
**
** If srcId is specified, then pBlob is delta content from
** the srcId record.  srcId might be a phantom.  
**
** pBlob is normally uncompressed text.  But if nBlob>0 then the
** pBlob value has already been compressed and nBlob is its uncompressed
** size.  If nBlob>0 then zUuid must be valid.
**
** zUuid is the UUID of the artifact, if it is specified.  When srcId is
** specified then zUuid must always be specified.  If srcId is zero,
** and zUuid is zero then the correct zUuid is computed from pBlob.
**
** If the record already exists but is a phantom, the pBlob content
** is inserted and the phatom becomes a real record.
**
** The original content of pBlob is not disturbed.  The caller continues
** to be responsible for pBlob.  This routine does *not* take over
** responsibility for freeing pBlob.
*/
int content_put_ex(
  Blob *pBlob,              /* Content to add to the repository */
  const char *zUuid,        /* SHA1 hash of reconstructed pBlob */
  int srcId,                /* pBlob is a delta from this entry */
  int nBlob,                /* pBlob is compressed. Original size is this */
  int isPrivate             /* The content should be marked private */
){
  int size;
  int rid;
  Stmt s1;
  Blob cmpr;
  Blob hash;
  int markAsUnclustered = 0;
  int isDephantomize = 0;
  
  assert( g.repositoryOpen );
  assert( pBlob!=0 );
  assert( srcId==0 || zUuid!=0 );
  if( zUuid==0 ){
    assert( nBlob==0 );
    sha1sum_blob(pBlob, &hash);
  }else{
    blob_init(&hash, zUuid, -1);
  }
  if( nBlob ){
    size = nBlob;
  }else{
    size = blob_size(pBlob);
    if( srcId ){
      size = delta_output_size(blob_buffer(pBlob), size);
    }
  }
  db_begin_transaction();

  /* Check to see if the entry already exists and if it does whether
  ** or not the entry is a phantom
  */
  db_prepare(&s1, "SELECT rid, size FROM blob WHERE uuid=%B", &hash);
  if( db_step(&s1)==SQLITE_ROW ){
    rid = db_column_int(&s1, 0);
    if( db_column_int(&s1, 1)>=0 || pBlob==0 ){
      /* Either the entry is not a phantom or it is a phantom but we
      ** have no data with which to dephantomize it.  In either case,
      ** there is nothing for us to do other than return the RID. */
      db_finalize(&s1);
      db_end_transaction(0);
      return rid;
    }
  }else{
    rid = 0;  /* No entry with the same UUID currently exists */
    markAsUnclustered = 1;
  }
  db_finalize(&s1);

  /* Construct a received-from ID if we do not already have one */
  if( g.rcvid==0 ){
    db_multi_exec(
       "INSERT INTO rcvfrom(uid, mtime, nonce, ipaddr)"
       "VALUES(%d, julianday('now'), %Q, %Q)",
       g.userUid, g.zNonce, g.zIpAddr
    );
    g.rcvid = db_last_insert_rowid();
  }

  if( nBlob ){
    cmpr = pBlob[0];
  }else{
    blob_compress(pBlob, &cmpr);
  }
  if( rid>0 ){
    /* We are just adding data to a phantom */
    db_prepare(&s1,
      "UPDATE blob SET rcvid=%d, size=%d, content=:data WHERE rid=%d",
       g.rcvid, size, rid
    );
    db_bind_blob(&s1, ":data", &cmpr);
    db_exec(&s1);
    db_multi_exec("DELETE FROM phantom WHERE rid=%d", rid);
    if( srcId==0 || content_is_available(srcId) ){
      isDephantomize = 1;
      content_mark_available(rid);
    }
  }else{
    /* We are creating a new entry */
    db_prepare(&s1,
      "INSERT INTO blob(rcvid,size,uuid,content)"
      "VALUES(%d,%d,'%b',:data)",
       g.rcvid, size, &hash
    );
    db_bind_blob(&s1, ":data", &cmpr);
    db_exec(&s1);
    rid = db_last_insert_rowid();
    if( !pBlob ){
      db_multi_exec("INSERT OR IGNORE INTO phantom VALUES(%d)", rid);
    }
    if( g.markPrivate || isPrivate ){
      db_multi_exec("INSERT INTO private VALUES(%d)", rid);
      markAsUnclustered = 0;
    }
  }
  if( nBlob==0 ) blob_reset(&cmpr);

  /* If the srcId is specified, then the data we just added is
  ** really a delta.  Record this fact in the delta table.
  */
  if( srcId ){
    db_multi_exec("REPLACE INTO delta(rid,srcid) VALUES(%d,%d)", rid, srcId);
  }
  if( !isDephantomize && bag_find(&contentCache.missing, rid) && 
      (srcId==0 || content_is_available(srcId)) ){
    content_mark_available(rid);
  }
  if( isDephantomize ){
    after_dephantomize(rid, 0);
  }
  
  /* Add the element to the unclustered table if has never been
  ** previously seen.
  */
  if( markAsUnclustered ){
    db_multi_exec("INSERT OR IGNORE INTO unclustered VALUES(%d)", rid);
  }

  /* Finish the transaction and cleanup */
  db_finalize(&s1);
  db_end_transaction(0);
  blob_reset(&hash);

  /* Make arrangements to verify that the data can be recovered
  ** before we commit */
  verify_before_commit(rid);
  return rid;
}
コード例 #8
0
/*
** WEBPAGE:  leaves
**
** Find leaves of all branches.
*/
void leaves_page(void){
  Blob sql;
  Stmt q;
  int showAll = P("all")!=0;
  int showClosed = P("closed")!=0;

  login_check_credentials();
  if( !g.perm.Read ){ login_needed(); return; }

  if( !showAll ){
    style_submenu_element("All", "All", "leaves?all");
  }
  if( !showClosed ){
    style_submenu_element("Closed", "Closed", "leaves?closed");
  }
  if( showClosed || showAll ){
    style_submenu_element("Open", "Open", "leaves");
  }
  style_header("Leaves");
  login_anonymous_available();
  style_sidebox_begin("Nomenclature:", "33%");
  cgi_printf("<ol>\n"
         "<li> A <div class=\"sideboxDescribed\">leaf</div>\n"
         "is a check-in with no descendants in the same branch.</li>\n"
         "<li> An <div class=\"sideboxDescribed\">open leaf</div>\n"
         "is a leaf that does not have a \"closed\" tag\n"
         "and is thus assumed to still be in use.</li>\n"
         "<li> A <div class=\"sideboxDescribed\">closed leaf</div>\n"
         "has a \"closed\" tag and is thus assumed to\n"
         "be historical and no longer in active use.</li>\n"
         "</ol>\n");
  style_sidebox_end();

  if( showAll ){
    cgi_printf("<h1>All leaves, both open and closed:</h1>\n");
  }else if( showClosed ){
    cgi_printf("<h1>Closed leaves:</h1>\n");
  }else{
    cgi_printf("<h1>Open leaves:</h1>\n");
  }
  blob_zero(&sql);
  blob_append(&sql, timeline_query_for_www(), -1);
  blob_appendf(&sql, " AND blob.rid IN leaf");
  if( showClosed ){
    blob_appendf(&sql," AND %z", leaf_is_closed_sql("blob.rid"));
  }else if( !showAll ){
    blob_appendf(&sql," AND NOT %z", leaf_is_closed_sql("blob.rid"));
  }
  db_prepare(&q, "%s ORDER BY event.mtime DESC", blob_str(&sql));
  blob_reset(&sql);
  www_print_timeline(&q, TIMELINE_LEAFONLY, 0, 0, 0);
  db_finalize(&q);
  cgi_printf("<br />\n"
         "<script  type=\"text/JavaScript\">\n"
         "function xin(id){\n"
         "}\n"
         "function xout(id){\n"
         "}\n"
         "</script>\n");
  style_footer();
}
コード例 #9
0
ファイル: vfile.c プロジェクト: ilchenkoanna/VCS
/*
** Compute an aggregate MD5 checksum over the disk image of every
** file in vid.  The file names are part of the checksum.  The resulting
** checksum is the same as is expected on the R-card of a manifest.
**
** This function operates differently if the Global.aCommitFile
** variable is not NULL. In that case, the disk image is used for
** each file in aCommitFile[] and the repository image
** is used for all others).
**
** Newly added files that are not contained in the repository are
** omitted from the checksum if they are not in Global.aCommitFile[].
**
** Newly deleted files are included in the checksum if they are not
** part of Global.aCommitFile[]
**
** Renamed files use their new name if they are in Global.aCommitFile[]
** and their original name if they are not in Global.aCommitFile[]
**
** Return the resulting checksum in blob pOut.
*/
void vfile_aggregate_checksum_disk(int vid, Blob *pOut){
  FILE *in;
  Stmt q;
  char zBuf[4096];

  db_must_be_within_tree();
  db_prepare(&q, 
      "SELECT %Q || pathname, pathname, origname, file_is_selected(id), rid"
      "  FROM vfile"
      " WHERE (NOT deleted OR NOT file_is_selected(id)) AND vid=%d"
      " ORDER BY pathname /*scan*/",
      g.zLocalRoot, vid
  );
  md5sum_init();
  while( db_step(&q)==SQLITE_ROW ){
    const char *zFullpath = db_column_text(&q, 0);
    const char *zName = db_column_text(&q, 1);
    int isSelected = db_column_int(&q, 3);

    if( isSelected ){
      md5sum_step_text(zName, -1);
      if( file_wd_islink(zFullpath) ){
        /* Instead of file content, use link destination path */
        Blob pathBuf;

        sqlite3_snprintf(sizeof(zBuf), zBuf, " %ld\n", 
                         blob_read_link(&pathBuf, zFullpath));
        md5sum_step_text(zBuf, -1);
        md5sum_step_text(blob_str(&pathBuf), -1);
        blob_reset(&pathBuf);
      }else{
        in = vcs_fopen(zFullpath,"rb");
        if( in==0 ){
          md5sum_step_text(" 0\n", -1);
          continue;
        }
        fseek(in, 0L, SEEK_END);
        sqlite3_snprintf(sizeof(zBuf), zBuf, " %ld\n", ftell(in));
        fseek(in, 0L, SEEK_SET);
        md5sum_step_text(zBuf, -1);
        /*printf("%s %s %s",md5sum_current_state(),zName,zBuf); fflush(stdout);*/
        for(;;){
          int n;
          n = fread(zBuf, 1, sizeof(zBuf), in);
          if( n<=0 ) break;
          md5sum_step_text(zBuf, n);
        }
        fclose(in);
      }
    }else{
      int rid = db_column_int(&q, 4);
      const char *zOrigName = db_column_text(&q, 2);
      char zBuf[100];
      Blob file;

      if( zOrigName ) zName = zOrigName;
      if( rid>0 ){
        md5sum_step_text(zName, -1);
        blob_zero(&file);
        content_get(rid, &file);
        sqlite3_snprintf(sizeof(zBuf), zBuf, " %d\n", blob_size(&file));
        md5sum_step_text(zBuf, -1);
        md5sum_step_blob(&file);
        blob_reset(&file);
      }
    }
  }
  db_finalize(&q);
  md5sum_finish(pOut);
}
コード例 #10
0
ファイル: vfile.c プロジェクト: ilchenkoanna/VCS
/*
** Look at every VFILE entry with the given vid and  set update
** VFILE.CHNGED field on every file according to whether or not
** the file has changes.  0 means no change.  1 means edited.  2 means
** the file has changed due to a merge.  3 means the file was added
** by a merge.
**
** If VFILE.DELETED is true or if VFILE.RID is zero, then the file was
** either removed from managemented via "vcs rm" or added via
** "vcs add", respectively, and in both cases we always know that 
** the file has changed without having the check the size, mtime,
** or on-disk content.
**
** If the size of the file has changed, then we always know that the file
** changed without having to look at the mtime or on-disk content.
**
** The mtime of the file is only a factor if the mtime-changes setting
** is false and the useSha1sum flag is false.  If the mtime-changes
** setting is true (or undefined - it defaults to true) or if useSha1sum
** is true, then we do not trust the mtime and will examine the on-disk
** content to determine if a file really is the same.
**
** If the mtime is used, it is used only to determine if files are the same.
** If the mtime of a file has changed, we still examine the on-disk content
** to see whether or not the edit was a null-edit.
*/
void vfile_check_signature(int vid, int notFileIsFatal, int useSha1sum){
  int nErr = 0;
  Stmt q;
  Blob fileCksum, origCksum;
  int useMtime = useSha1sum==0 && db_get_boolean("mtime-changes", 1);

  db_begin_transaction();
  db_prepare(&q, "SELECT id, %Q || pathname,"
                 "       vfile.mrid, deleted, chnged, uuid, size, mtime"
                 "  FROM vfile LEFT JOIN blob ON vfile.mrid=blob.rid"
                 " WHERE vid=%d ", g.zLocalRoot, vid);
  while( db_step(&q)==SQLITE_ROW ){
    int id, rid, isDeleted;
    const char *zName;
    int chnged = 0;
    int oldChnged;
    i64 oldMtime;
    i64 currentMtime;
    i64 origSize;
    i64 currentSize;

    id = db_column_int(&q, 0);
    zName = db_column_text(&q, 1);
    rid = db_column_int(&q, 2);
    isDeleted = db_column_int(&q, 3);
    oldChnged = chnged = db_column_int(&q, 4);
    oldMtime = db_column_int64(&q, 7);
    currentSize = file_wd_size(zName);
    origSize = db_column_int64(&q, 6);
    currentMtime = file_wd_mtime(0);
    if( chnged==0 && (isDeleted || rid==0) ){
      /* "vcs rm" or "vcs add" always change the file */
      chnged = 1;
    }else if( !file_wd_isfile_or_link(0) && currentSize>=0 ){
      if( notFileIsFatal ){
        vcs_warning("not an ordinary file: %s", zName);
        nErr++;
      }
      chnged = 1;
    }
    if( origSize!=currentSize ){
      if( chnged!=1 ){
        /* A file size change is definitive - the file has changed.  No
        ** need to check the mtime or sha1sum */
        chnged = 1;
      }
    }else if( chnged==1 && rid!=0 && !isDeleted ){
      /* File is believed to have changed but it is the same size.
      ** Double check that it really has changed by looking at content. */
      assert( origSize==currentSize );
      db_ephemeral_blob(&q, 5, &origCksum);
      if( sha1sum_file(zName, &fileCksum) ){
        blob_zero(&fileCksum);
      }
      if( blob_compare(&fileCksum, &origCksum)==0 ) chnged = 0;
      blob_reset(&origCksum);
      blob_reset(&fileCksum);
    }else if( chnged==0 && (useMtime==0 || currentMtime!=oldMtime) ){
      /* For files that were formerly believed to be unchanged, if their
      ** mtime changes, or unconditionally if --sha1sum is used, check
      ** to see if they have been edited by looking at their SHA1 sum */
      assert( origSize==currentSize );
      db_ephemeral_blob(&q, 5, &origCksum);
      if( sha1sum_file(zName, &fileCksum) ){
        blob_zero(&fileCksum);
      }
      if( blob_compare(&fileCksum, &origCksum) ){
        chnged = 1;
      }
      blob_reset(&origCksum);
      blob_reset(&fileCksum);
    }
    if( currentMtime!=oldMtime || chnged!=oldChnged ){
      db_multi_exec("UPDATE vfile SET mtime=%lld, chnged=%d WHERE id=%d",
                    currentMtime, chnged, id);
    }
  }
  db_finalize(&q);
  if( nErr ) vcs_fatal("abort due to prior errors");
  db_end_transaction(0);
}
コード例 #11
0
ファイル: line_buffer.c プロジェクト: Barklad/first_app
void buffer_reset(void)
{
	blob_reset();
}
コード例 #12
0
/*
** COMMAND: mv
** COMMAND: rename*
**
** Usage: %fossil mv|rename OLDNAME NEWNAME
**    or: %fossil mv|rename OLDNAME... DIR
**
** Move or rename one or more files or directories within the repository tree.
** You can either rename a file or directory or move it to another subdirectory.
**
** This command does NOT rename or move the files on disk.  This command merely
** records the fact that filenames have changed so that appropriate notations
** can be made at the next commit/checkin.
**
** See also: changes, status
*/
void mv_cmd(void){
  int i;
  int vid;
  char *zDest;
  Blob dest;
  Stmt q;

  db_must_be_within_tree();
  vid = db_lget_int("checkout", 0);
  if( vid==0 ){
    fossil_panic("no checkout rename files in");
  }
  if( g.argc<4 ){
    usage("OLDNAME NEWNAME");
  }
  zDest = g.argv[g.argc-1];
  db_begin_transaction();
  file_tree_name(zDest, &dest, 1);
  db_multi_exec(
    "UPDATE vfile SET origname=pathname WHERE origname IS NULL;"
  );
  db_multi_exec(
    "CREATE TEMP TABLE mv(f TEXT UNIQUE ON CONFLICT IGNORE, t TEXT);"
  );
  if( file_wd_isdir(zDest)!=1 ){
    Blob orig;
    if( g.argc!=4 ){
      usage("OLDNAME NEWNAME");
    }
    file_tree_name(g.argv[2], &orig, 1);
    db_multi_exec(
      "INSERT INTO mv VALUES(%B,%B)", &orig, &dest
    );
  }else{
    if( blob_eq(&dest, ".") ){
      blob_reset(&dest);
    }else{
      blob_append(&dest, "/", 1);
    }
    for(i=2; i<g.argc-1; i++){
      Blob orig;
      char *zOrig;
      int nOrig;
      file_tree_name(g.argv[i], &orig, 1);
      zOrig = blob_str(&orig);
      nOrig = blob_size(&orig);
      db_prepare(&q,
         "SELECT pathname FROM vfile"
         " WHERE vid=%d"
         "   AND (pathname='%q' OR (pathname>'%q/' AND pathname<'%q0'))"
         " ORDER BY 1",
         vid, zOrig, zOrig, zOrig
      );
      while( db_step(&q)==SQLITE_ROW ){
        const char *zPath = db_column_text(&q, 0);
        int nPath = db_column_bytes(&q, 0);
        const char *zTail;
        if( nPath==nOrig ){
          zTail = file_tail(zPath);
        }else{
          zTail = &zPath[nOrig+1];
        }
        db_multi_exec(
          "INSERT INTO mv VALUES('%q','%q%q')",
          zPath, blob_str(&dest), zTail
        );
      }
      db_finalize(&q);
    }
  }
  db_prepare(&q, "SELECT f, t FROM mv ORDER BY f");
  while( db_step(&q)==SQLITE_ROW ){
    const char *zFrom = db_column_text(&q, 0);
    const char *zTo = db_column_text(&q, 1);
    mv_one_file(vid, zFrom, zTo);
  }
  db_finalize(&q);
  db_end_transaction(0);
}
コード例 #13
0
/*
** COMMAND: add
**
** Usage: %fossil add ?OPTIONS? FILE1 ?FILE2 ...?
**
** Make arrangements to add one or more files or directories to the
** current checkout at the next commit.
**
** When adding files or directories recursively, filenames that begin
** with "." are excluded by default.  To include such files, add
** the "--dotfiles" option to the command-line.
**
** The --ignore option is a comma-separate list of glob patterns for files
** to be excluded.  Example:  '*.o,*.obj,*.exe'  If the --ignore option
** does not appear on the command line then the "ignore-glob" setting is
** used.
**
** The --case-sensitive option determines whether or not filenames should
** be treated case sensitive or not. If the option is not given, the default
** depends on the global setting, or the operating system default, if not set.
**
** Options:
**
**    --case-sensitive <BOOL> override case-sensitive setting
**    --dotfiles              include files beginning with a dot (".")   
**    --ignore <CSG>          ignore files matching patterns from the 
**                            comma separated list of glob patterns.
** 
** See also: addremove, rm
*/
void add_cmd(void){
  int i;                     /* Loop counter */
  int vid;                   /* Currently checked out version */
  int nRoot;                 /* Full path characters in g.zLocalRoot */
  const char *zIgnoreFlag;   /* The --ignore option or ignore-glob setting */
  Glob *pIgnore;             /* Ignore everything matching this glob pattern */
  int caseSensitive;         /* True if filenames are case sensitive */
  unsigned scanFlags = 0;    /* Flags passed to vfile_scan() */

  zIgnoreFlag = find_option("ignore",0,1);
  if( find_option("dotfiles",0,0)!=0 ) scanFlags |= SCAN_ALL;
  capture_case_sensitive_option();
  db_must_be_within_tree();
  caseSensitive = filenames_are_case_sensitive();
  if( zIgnoreFlag==0 ){
    zIgnoreFlag = db_get("ignore-glob", 0);
  }
  vid = db_lget_int("checkout",0);
  if( vid==0 ){
    fossil_panic("no checkout to add to");
  }
  db_begin_transaction();
  db_multi_exec("CREATE TEMP TABLE sfile(x TEXT PRIMARY KEY)");
#if defined(_WIN32)
  db_multi_exec(
     "CREATE INDEX IF NOT EXISTS vfile_pathname "
     "  ON vfile(pathname COLLATE nocase)"
  );
#endif
  pIgnore = glob_create(zIgnoreFlag);
  nRoot = strlen(g.zLocalRoot);
  
  /* Load the names of all files that are to be added into sfile temp table */
  for(i=2; i<g.argc; i++){
    char *zName;
    int isDir;
    Blob fullName;

    file_canonical_name(g.argv[i], &fullName, 0);
    zName = blob_str(&fullName);
    isDir = file_wd_isdir(zName);
    if( isDir==1 ){
      vfile_scan(&fullName, nRoot-1, scanFlags, pIgnore);
    }else if( isDir==0 ){
      fossil_warning("not found: %s", zName);
    }else if( file_access(zName, R_OK) ){
      fossil_fatal("cannot open %s", zName);
    }else{
      char *zTreeName = &zName[nRoot];
      db_multi_exec(
         "INSERT OR IGNORE INTO sfile(x) VALUES(%Q)",
         zTreeName
      );
    }
    blob_reset(&fullName);
  }
  glob_free(pIgnore);

  add_files_in_sfile(vid, caseSensitive);
  db_end_transaction(0);
}
コード例 #14
0
ファイル: import.c プロジェクト: LitleWaffle/sampleDirectory
/*
** Use data accumulated in gg from a "commit" record to add a new
** manifest artifact to the BLOB table.
*/
static void finish_commit(void){
  int i;
  char *zFromBranch;
  char *aTCard[4];                /* Array of T cards for manifest */
  int nTCard = 0;                 /* Entries used in aTCard[] */
  Blob record, cksum;

  import_prior_files();
  qsort(gg.aFile, gg.nFile, sizeof(gg.aFile[0]), mfile_cmp);
  blob_zero(&record);
  blob_appendf(&record, "C %F\n", gg.zComment);
  blob_appendf(&record, "D %s\n", gg.zDate);
  for(i=0; i<gg.nFile; i++){
    const char *zUuid = gg.aFile[i].zUuid;
    if( zUuid==0 ) continue;
    blob_appendf(&record, "F %F %s", gg.aFile[i].zName, zUuid);
    if( gg.aFile[i].isExe ){
      blob_append(&record, " x\n", 3);
    }else if( gg.aFile[i].isLink ){
      blob_append(&record, " l\n", 3);
      gg.hasLinks = 1;
    }else{
      blob_append(&record, "\n", 1);
    }
  }
  if( gg.zFrom ){
    blob_appendf(&record, "P %s", gg.zFrom);
    for(i=0; i<gg.nMerge; i++){
      blob_appendf(&record, " %s", gg.azMerge[i]);
    }
    blob_append(&record, "\n", 1);
    zFromBranch = db_text(0, "SELECT brnm FROM xbranch WHERE tname=%Q",
                              gg.zFromMark);
  }else{
    zFromBranch = 0;
  }

  /* Add the required "T" cards to the manifest. Make sure they are added
  ** in sorted order and without any duplicates. Otherwise, fossil will not
  ** recognize the document as a valid manifest. */
  if( !gg.tagCommit && fossil_strcmp(zFromBranch, gg.zBranch)!=0 ){
    aTCard[nTCard++] = mprintf("T *branch * %F\n", gg.zBranch);
    aTCard[nTCard++] = mprintf("T *sym-%F *\n", gg.zBranch);
    if( zFromBranch ){
      aTCard[nTCard++] = mprintf("T -sym-%F *\n", zFromBranch);
    }
  }
  if( gg.zFrom==0 ){
    aTCard[nTCard++] = mprintf("T *sym-trunk *\n");
  }
  qsort(aTCard, nTCard, sizeof(char *), string_cmp);
  for(i=0; i<nTCard; i++){
    if( i==0 || fossil_strcmp(aTCard[i-1], aTCard[i]) ){
      blob_appendf(&record, "%s", aTCard[i]);
    }
  }
  for(i=0; i<nTCard; i++) free(aTCard[i]);

  free(zFromBranch);
  db_multi_exec("INSERT INTO xbranch(tname, brnm) VALUES(%Q,%Q)",
                gg.zMark, gg.zBranch);
  blob_appendf(&record, "U %F\n", gg.zUser);
  md5sum_blob(&record, &cksum);
  blob_appendf(&record, "Z %b\n", &cksum);
  fast_insert_content(&record, gg.zMark, 1);
  blob_reset(&record);
  blob_reset(&cksum);

  /* The "git fast-export" command might output multiple "commit" lines
  ** that reference a tag using "refs/tags/TAGNAME".  The tag should only
  ** be applied to the last commit that is output.  The problem is we do not
  ** know at this time if the current commit is the last one to hold this
  ** tag or not.  So make an entry in the XTAG table to record this tag
  ** but overwrite that entry if a later instance of the same tag appears.
  **
  ** This behavior seems like a bug in git-fast-export, but it is easier
  ** to work around the problem than to fix git-fast-export.
  */
  if( gg.tagCommit && gg.zDate && gg.zUser && gg.zFrom ){
    blob_appendf(&record, "D %s\n", gg.zDate);
    blob_appendf(&record, "T +sym-%F %s\n", gg.zBranch, gg.zPrevCheckin);
    blob_appendf(&record, "U %F\n", gg.zUser);
    md5sum_blob(&record, &cksum);
    blob_appendf(&record, "Z %b\n", &cksum);
    db_multi_exec(
       "INSERT OR REPLACE INTO xtag(tname, tcontent)"
       " VALUES(%Q,%Q)", gg.zBranch, blob_str(&record)
    );
    blob_reset(&record);
    blob_reset(&cksum);
  }

  fossil_free(gg.zPrevBranch);
  gg.zPrevBranch = gg.zBranch;
  gg.zBranch = 0;
  import_reset(0);
}
コード例 #15
0
/*
** Impl of /json/artifact. This basically just determines the type of
** an artifact and forwards the real work to another function.
*/
cson_value * json_page_artifact(){
  cson_object * pay = NULL;
  char const * zName = NULL;
  char const * zType = NULL;
  char const * zUuid = NULL;
  cson_value * entry = NULL;
  Blob uuid = empty_blob;
  int rc;
  int rid = 0;
  ArtifactDispatchEntry const * dispatcher = &ArtifactDispatchList[0];
  zName = json_find_option_cstr2("name", NULL, NULL, g.json.dispatchDepth+1);
  if(!zName || !*zName) {
    json_set_err(FSL_JSON_E_MISSING_ARGS,
                 "Missing 'name' argument.");
    return NULL;
  }

  if( validate16(zName, strlen(zName)) ){
    if( db_exists("SELECT 1 FROM ticket WHERE tkt_uuid GLOB '%q*'", zName) ){
      zType = "ticket";
      goto handle_entry;
    }
    if( db_exists("SELECT 1 FROM tag WHERE tagname GLOB 'event-%q*'", zName) ){
      zType = "tag";
      goto handle_entry;
    }
  }
  blob_set(&uuid,zName);
  rc = name_to_uuid(&uuid,-1,"*");
  /* FIXME: check for a filename if all else fails. */
  if(1==rc){
    g.json.resultCode = FSL_JSON_E_RESOURCE_NOT_FOUND;
    goto error;
  }else if(2==rc){
    g.json.resultCode = FSL_JSON_E_AMBIGUOUS_UUID;
    goto error;
  }
  zUuid = blob_str(&uuid);
  rid = db_int(0, "SELECT rid FROM blob WHERE uuid=%Q", zUuid);
  if(0==rid){
    g.json.resultCode = FSL_JSON_E_RESOURCE_NOT_FOUND;
    goto error;
  }

  if( db_exists("SELECT 1 FROM mlink WHERE mid=%d", rid)
      || db_exists("SELECT 1 FROM plink WHERE cid=%d", rid)
      || db_exists("SELECT 1 FROM plink WHERE pid=%d", rid)){
    zType = "checkin";
    goto handle_entry;
  }else if( db_exists("SELECT 1 FROM tagxref JOIN tag USING(tagid)"
                      " WHERE rid=%d AND tagname LIKE 'wiki-%%'", rid) ){
    zType = "wiki";
    goto handle_entry;
  }else if( db_exists("SELECT 1 FROM tagxref JOIN tag USING(tagid)"
                      " WHERE rid=%d AND tagname LIKE 'tkt-%%'", rid) ){
    zType = "ticket";
    goto handle_entry;
  }else if ( db_exists("SELECT 1 FROM mlink WHERE fid = %d", rid) ){
    zType = "file";
    goto handle_entry;
  }else{
    g.json.resultCode = FSL_JSON_E_RESOURCE_NOT_FOUND;
    goto error;
  }

  error:
  assert( 0 != g.json.resultCode );
  goto veryend;

  handle_entry:
  pay = cson_new_object();
  assert( (NULL != zType) && "Internal dispatching error." );
  for( ; dispatcher->name; ++dispatcher ){
    if(0!=strcmp(dispatcher->name, zType)){
      continue;
    }else{
      entry = (*dispatcher->func)(pay, rid);
      break;
    }
  }
  if(!g.json.resultCode){
    assert( NULL != entry );
    assert( NULL != zType );
    cson_object_set( pay, "type", json_new_string(zType) );
    cson_object_set( pay, "uuid", json_new_string(zUuid) );
    /*cson_object_set( pay, "name", json_new_string(zName ? zName : zUuid) );*/
    /*cson_object_set( pay, "rid", cson_value_new_integer(rid) );*/
    if(cson_value_is_object(entry) && (cson_value_get_object(entry) != pay)){
      cson_object_set(pay, "artifact", entry);
    }
  }
  veryend:
  blob_reset(&uuid);
  if(g.json.resultCode && pay){
    cson_free_object(pay);
    pay = NULL;
  }
  return cson_object_value(pay);
}
コード例 #16
0
cson_value * json_artifact_file(cson_object * zParent, int rid){
  cson_object * pay = NULL;
  Stmt q = empty_Stmt;
  cson_array * checkin_arr = NULL;
  char contentFormat;
  i64 contentSize = -1;
  char * parentUuid;
  if( ! g.perm.Read ){
    json_set_err(FSL_JSON_E_DENIED,
                 "Requires 'o' privileges.");
    return NULL;
  }
  
  pay = zParent;

  contentFormat = json_artifact_get_content_format_flag();
  if( 0 != contentFormat ){
    Blob content = empty_blob;
    const char *zMime;
    char const * zFormat = (contentFormat<1) ? "raw" : "html";
    content_get(rid, &content);
    zMime = mimetype_from_content(&content);
    cson_object_set(zParent, "contentType",
                    json_new_string(zMime ? zMime : "text/plain"));
    if(!zMime){/* text/plain */
      if(0 < blob_size(&content)){
        if( 0 < contentFormat ){/*HTML-size it*/
          Blob html = empty_blob;
          wiki_convert(&content, &html, 0);
          assert( blob_size(&content) < blob_size(&html) );
          blob_swap( &html, &content );
          assert( blob_size(&content) > blob_size(&html) );
          blob_reset( &html );
        }/*else as-is*/
      }
      cson_object_set(zParent, "content",
                      cson_value_new_string(blob_str(&content),
                                            (unsigned int)blob_size(&content)));
    }/*else binary: ignore*/
    contentSize = blob_size(&content);
    cson_object_set(zParent, "contentSize", json_new_int(contentSize) );
    cson_object_set(zParent, "contentFormat", json_new_string(zFormat) );
    blob_reset(&content);
  }
  contentSize = db_int64(-1, "SELECT size FROM blob WHERE rid=%d", rid);
  assert( -1 < contentSize );
  cson_object_set(zParent, "size", json_new_int(contentSize) );

  parentUuid = db_text(NULL,
                       "SELECT DISTINCT p.uuid "
                       "FROM blob p, blob f, mlink m "
                       "WHERE m.pid=p.rid "
                       "AND m.fid=f.rid "
                       "AND f.rid=%d",
                       rid
                       );
  if(parentUuid){
    cson_object_set( zParent, "parent", json_new_string(parentUuid) );
    fossil_free(parentUuid);
  }
  
  /* Find checkins associated with this file... */
  db_prepare(&q,
      "SELECT filename.name AS name, "
      "  (mlink.pid==0) AS isNew,"
      "  (mlink.fid==0) AS isDel,"
      "  cast(strftime('%%s',event.mtime) as int) AS timestamp,"
      "  coalesce(event.ecomment,event.comment) as comment,"
      "  coalesce(event.euser,event.user) as user,"
#if 0
      "  a.size AS size," /* same for all checkins. */
#endif
      "  b.uuid as checkin, "
#if 0
      "  mlink.mperm as mperm,"
#endif
      "  coalesce((SELECT value FROM tagxref"
                      "  WHERE tagid=%d AND tagtype>0 AND "
                      " rid=mlink.mid),'trunk') as branch"
      "  FROM mlink, filename, event, blob a, blob b"
      " WHERE filename.fnid=mlink.fnid"
      "   AND event.objid=mlink.mid"
      "   AND a.rid=mlink.fid"
      "   AND b.rid=mlink.mid"
      "   AND mlink.fid=%d"
      "   ORDER BY filename.name, event.mtime",
      TAG_BRANCH, rid
    );
  /* TODO: add a "state" flag for the file in each checkin,
     e.g. "modified", "new", "deleted".
   */
  checkin_arr = cson_new_array(); 
  cson_object_set(pay, "checkins", cson_array_value(checkin_arr));
  while( (SQLITE_ROW==db_step(&q) ) ){
    cson_object * row = cson_value_get_object(cson_sqlite3_row_to_object(q.pStmt));
    /* FIXME: move this isNew/isDel stuff into an SQL CASE statement. */
    char const isNew = cson_value_get_bool(cson_object_get(row,"isNew"));
    char const isDel = cson_value_get_bool(cson_object_get(row,"isDel"));
    cson_object_set(row, "isNew", NULL);
    cson_object_set(row, "isDel", NULL);
    cson_object_set(row, "state",
                    json_new_string(json_artifact_status_to_string(isNew, isDel)));
    cson_array_append( checkin_arr, cson_object_value(row) );
  }
  db_finalize(&q);
  return cson_object_value(pay);
}
コード例 #17
0
/*
** Given the RID for a checkin, construct a tarball containing
** all files in that checkin
**
** If RID is for an object that is not a real manifest, then the
** resulting tarball contains a single file which is the RID
** object.
**
** If the RID object does not exist in the repository, then
** pTar is zeroed.
**
** zDir is a "synthetic" subdirectory which all files get
** added to as part of the tarball. It may be 0 or an empty string, in
** which case it is ignored. The intention is to create a tarball which
** politely expands into a subdir instead of filling your current dir
** with source files. For example, pass a UUID or "ProjectName".
**
*/
void tarball_of_checkin(int rid, Blob *pTar, const char *zDir){
  Blob mfile, hash, file;
  Manifest *pManifest;
  ManifestFile *pFile;
  Blob filename;
  int nPrefix;
  char *zName;
  unsigned int mTime;

  content_get(rid, &mfile);
  if( blob_size(&mfile)==0 ){
    blob_zero(pTar);
    return;
  }
  blob_zero(&hash);
  blob_zero(&filename);

  if( zDir && zDir[0] ){
    blob_appendf(&filename, "%s/", zDir);
  }
  nPrefix = blob_size(&filename);

  pManifest = manifest_get(rid, CFTYPE_MANIFEST);
  if( pManifest ){
    mTime = (pManifest->rDate - 2440587.5)*86400.0;
    tar_begin(mTime);
    if( db_get_boolean("manifest", 0) ){
      blob_append(&filename, "manifest", -1);
      zName = blob_str(&filename);
      tar_add_file(zName, &mfile, 0, mTime);
      sha1sum_blob(&mfile, &hash);
      blob_reset(&mfile);
      blob_append(&hash, "\n", 1);
      blob_resize(&filename, nPrefix);
      blob_append(&filename, "manifest.uuid", -1);
      zName = blob_str(&filename);
      tar_add_file(zName, &hash, 0, mTime);
      blob_reset(&hash);
    }
    manifest_file_rewind(pManifest);
    while( (pFile = manifest_file_next(pManifest,0))!=0 ){
      int fid = uuid_to_rid(pFile->zUuid, 0);
      if( fid ){
        content_get(fid, &file);
        blob_resize(&filename, nPrefix);
        blob_append(&filename, pFile->zName, -1);
        zName = blob_str(&filename);
        tar_add_file(zName, &file, manifest_file_mperm(pFile), mTime);
        blob_reset(&file);
      }
    }
  }else{
    sha1sum_blob(&mfile, &hash);
    blob_append(&filename, blob_str(&hash), 16);
    zName = blob_str(&filename);
    mTime = db_int64(0, "SELECT (julianday('now') -  2440587.5)*86400.0;");
    tar_begin(mTime);
    tar_add_file(zName, &mfile, 0, mTime);
  }
  manifest_destroy(pManifest);
  blob_reset(&mfile);
  blob_reset(&filename);
  tar_finish(pTar);
}
コード例 #18
0
ファイル: diffcmd.c プロジェクト: 3615pipou/coopy
/*
** Run a diff between the version zFrom and files on disk.  zFrom might
** be NULL which means to simply show the difference between the edited
** files on disk and the check-out on which they are based.
*/
static void diff_all_against_disk(
  const char *zFrom,        /* Version to difference from */
  const char *zDiffCmd,     /* Use this diff command.  NULL for built-in */
  int ignoreEolWs           /* Ignore end-of-line whitespace */
){
  int vid;
  Blob sql;
  Stmt q;

  vid = db_lget_int("checkout", 0);
  vfile_check_signature(vid, 1);
  blob_zero(&sql);
  db_begin_transaction();
  if( zFrom ){
    int rid = name_to_rid(zFrom);
    if( !is_a_version(rid) ){
      fossil_fatal("no such check-in: %s", zFrom);
    }
    load_vfile_from_rid(rid);
    blob_appendf(&sql,
      "SELECT v2.pathname, v2.deleted, v2.chnged, v2.rid==0, v1.rid"
      "  FROM vfile v1, vfile v2 "
      " WHERE v1.pathname=v2.pathname AND v1.vid=%d AND v2.vid=%d"
      "   AND (v2.deleted OR v2.chnged OR v1.rid!=v2.rid)"
      "UNION "
      "SELECT pathname, 1, 0, 0, 0"
      "  FROM vfile v1"
      " WHERE v1.vid=%d"
      "   AND NOT EXISTS(SELECT 1 FROM vfile v2"
                        " WHERE v2.vid=%d AND v2.pathname=v1.pathname)"
      "UNION "
      "SELECT pathname, 0, 0, 1, 0"
      "  FROM vfile v2"
      " WHERE v2.vid=%d"
      "   AND NOT EXISTS(SELECT 1 FROM vfile v1"
                        " WHERE v1.vid=%d AND v1.pathname=v2.pathname)"
      " ORDER BY 1",
      rid, vid, rid, vid, vid, rid
    );
  }else{
    blob_appendf(&sql,
      "SELECT pathname, deleted, chnged , rid==0, rid"
      "  FROM vfile"
      " WHERE vid=%d"
      "   AND (deleted OR chnged OR rid==0)"
      " ORDER BY pathname",
      vid
    );
  }
  db_prepare(&q, blob_str(&sql));
  while( db_step(&q)==SQLITE_ROW ){
    const char *zPathname = db_column_text(&q,0);
    int isDeleted = db_column_int(&q, 1);
    int isChnged = db_column_int(&q,2);
    int isNew = db_column_int(&q,3);
    char *zFullName = mprintf("%s%s", g.zLocalRoot, zPathname);
    if( isDeleted ){
      printf("DELETED  %s\n", zPathname);
    }else if( access(zFullName, 0) ){
      printf("MISSING  %s\n", zPathname);
    }else if( isNew ){
      printf("ADDED    %s\n", zPathname);
    }else if( isChnged==3 ){
      printf("ADDED_BY_MERGE %s\n", zPathname);
    }else{
      int srcid = db_column_int(&q, 4);
      Blob content;
      content_get(srcid, &content);
      printf("Index: %s\n======================================="
             "============================\n",
             zPathname
      );
      diff_file(&content, zFullName, zPathname, zDiffCmd, ignoreEolWs);
      blob_reset(&content);
    }
    free(zFullName);
  }
  db_finalize(&q);
  db_end_transaction(1);  /* ROLLBACK */
}
コード例 #19
0
/*
** Impl for /json/report/run
**
** Options/arguments:
**
** report=int (CLI: -report # or -r #) is the report number to run.
**
** limit=int (CLI: -limit # or -n #) -n is for compat. with other commands.
**
** format=a|o Specifies result format: a=each row is an arry, o=each
** row is an object.  Default=o.
*/
static cson_value * json_report_run(){
  int nReport;
  Stmt q = empty_Stmt;
  cson_object * pay = NULL;
  cson_array * tktList = NULL;
  char const * zFmt;
  char * zTitle = NULL;
  Blob sql = empty_blob;
  int limit = 0;
  cson_value * colNames = NULL;
  int i;

  if(!g.perm.RdTkt){
    json_set_err(FSL_JSON_E_DENIED,
                 "Requires 'r' privileges.");
    return NULL;
  }
  nReport = json_report_get_number(3);
  if(nReport <=0){
    json_set_err(FSL_JSON_E_MISSING_ARGS,
                 "Missing or invalid 'number' (-n) parameter.");
    goto error;
  }
  zFmt = json_find_option_cstr2("format",NULL,"f",3);
  if(!zFmt) zFmt = "o";
  db_prepare(&q,
             "SELECT sqlcode, "
             " title"
             " FROM reportfmt"
             " WHERE rn=%d",
             nReport);
  if(SQLITE_ROW != db_step(&q)){
    json_set_err(FSL_JSON_E_INVALID_ARGS,
                 "Report number %d not found.",
                 nReport);
    db_finalize(&q);
    goto error;
  }

  limit = json_find_option_int("limit",NULL,"n",-1);

  
  /* Copy over report's SQL...*/
  blob_append(&sql, db_column_text(&q,0), -1);
  zTitle = mprintf("%s", db_column_text(&q,1));
  db_finalize(&q);
  db_prepare(&q, "%s", blob_str(&sql));

  /** Build the response... */
  pay = cson_new_object();

  cson_object_set(pay, "report", json_new_int(nReport));
  cson_object_set(pay, "title", json_new_string(zTitle));
  if(limit>0){
    cson_object_set(pay, "limit", json_new_int((limit<0) ? 0 : limit));
  }
  free(zTitle);
  zTitle = NULL;

  if(g.perm.TktFmt){
    cson_object_set(pay, "sqlcode",
                    cson_value_new_string(blob_str(&sql),
                                          (unsigned int)blob_size(&sql)));
  }
  blob_reset(&sql);

  colNames = cson_sqlite3_column_names(q.pStmt);
  cson_object_set( pay, "columnNames", colNames);
  for( i = 0 ; ((limit>0) ?(i < limit) : 1)
         && (SQLITE_ROW == db_step(&q));
       ++i){
    cson_value * row = ('a'==*zFmt)
      ? cson_sqlite3_row_to_array(q.pStmt)
      : cson_sqlite3_row_to_object2(q.pStmt,
                                    cson_value_get_array(colNames));
    ;
    if(row && !tktList){
      tktList = cson_new_array();
    }
    cson_array_append(tktList, row);
  }
  db_finalize(&q);
  cson_object_set(pay, "tickets",
                  tktList ? cson_array_value(tktList) : cson_value_null());

  goto end;

  error:
  assert(0 != g.json.resultCode);
  cson_value_free( cson_object_value(pay) );
  pay = NULL;
  end:

  return pay ? cson_object_value(pay) : NULL;

}
コード例 #20
0
ファイル: url_.c プロジェクト: sambassett/Fossil-Repo
/*
** Resets the given URL object, deallocating any memory
** it uses.
*/
void url_reset(HQuery *p){
  blob_reset(&p->url);
  fossil_free(p->azName);
  fossil_free(p->azValue);
  url_initialize(p, p->zBase);
}
コード例 #21
0
ファイル: vfile.c プロジェクト: ilchenkoanna/VCS
/*
** Write all files from vid to the disk.  Or if vid==0 and id!=0
** write just the specific file where VFILE.ID=id.
*/
void vfile_to_disk(
  int vid,               /* vid to write to disk */
  int id,                /* Write this one file, if not zero */
  int verbose,           /* Output progress information */
  int promptFlag         /* Prompt user to confirm overwrites */
){
  Stmt q;
  Blob content;
  int nRepos = strlen(g.zLocalRoot);

  if( vid>0 && id==0 ){
    db_prepare(&q, "SELECT id, %Q || pathname, mrid, isexe, islink"
                   "  FROM vfile"
                   " WHERE vid=%d AND mrid>0",
                   g.zLocalRoot, vid);
  }else{
    assert( vid==0 && id>0 );
    db_prepare(&q, "SELECT id, %Q || pathname, mrid, isexe, islink"
                   "  FROM vfile"
                   " WHERE id=%d AND mrid>0",
                   g.zLocalRoot, id);
  }
  while( db_step(&q)==SQLITE_ROW ){
    int id, rid, isExe, isLink;
    const char *zName;

    id = db_column_int(&q, 0);
    zName = db_column_text(&q, 1);
    rid = db_column_int(&q, 2);
    isExe = db_column_int(&q, 3);
    isLink = db_column_int(&q, 4);
    content_get(rid, &content);
    if( file_is_the_same(&content, zName) ){
      blob_reset(&content);
      if( file_wd_setexe(zName, isExe) ){
        db_multi_exec("UPDATE vfile SET mtime=%lld WHERE id=%d",
                      file_wd_mtime(zName), id);
      }
      continue;
    }
    if( promptFlag && file_wd_size(zName)>=0 ){
      Blob ans;
      char *zMsg;
      char cReply;
      zMsg = mprintf("overwrite %s (a=always/y/N)? ", zName);
      prompt_user(zMsg, &ans);
      free(zMsg);
      cReply = blob_str(&ans)[0];
      blob_reset(&ans);
      if( cReply=='a' || cReply=='A' ){
        promptFlag = 0;
        cReply = 'y';
      }
      if( cReply=='n' || cReply=='N' ){
        blob_reset(&content);
        continue;
      }
    }
    if( verbose ) vcs_print("%s\n", &zName[nRepos]);
    if( file_wd_isdir(zName) == 1 ){
      /*TODO(dchest): remove directories? */
      vcs_fatal("%s is directory, cannot overwrite\n", zName);
    }    
    if( file_wd_size(zName)>=0 && (isLink || file_wd_islink(zName)) ){
      file_delete(zName);
    }
    if( isLink ){
      symlink_create(blob_str(&content), zName);
    }else{
      blob_write_to_file(&content, zName);
    }
    file_wd_setexe(zName, isExe);
    blob_reset(&content);
    db_multi_exec("UPDATE vfile SET mtime=%lld WHERE id=%d",
                  file_wd_mtime(zName), id);
  }
  db_finalize(&q);
}
コード例 #22
0
/*
** Rebuild cross-referencing information for the artifact
** rid with content pBase and all of its descendants.  This
** routine clears the content buffer before returning.
**
** If the zFNameFormat variable is set, then this routine is
** called to run "fossil deconstruct" instead of the usual
** "fossil rebuild".  In that case, instead of rebuilding the
** cross-referencing information, write the file content out
** to the appropriate directory.
**
** In both cases, this routine automatically recurses to process
** other artifacts that are deltas off of the current artifact.
** This is the most efficient way to extract all of the original
** artifact content from the Fossil repository.
*/
static void rebuild_step(int rid, int size, Blob *pBase){
  static Stmt q1;
  Bag children;
  Blob copy;
  Blob *pUse;
  int nChild, i, cid;

  while( rid>0 ){

    /* Fix up the "blob.size" field if needed. */
    if( size!=blob_size(pBase) ){
      db_multi_exec(
         "UPDATE blob SET size=%d WHERE rid=%d", blob_size(pBase), rid
      );
    }
  
    /* Find all children of artifact rid */
    db_static_prepare(&q1, "SELECT rid FROM delta WHERE srcid=:rid");
    db_bind_int(&q1, ":rid", rid);
    bag_init(&children);
    while( db_step(&q1)==SQLITE_ROW ){
      int cid = db_column_int(&q1, 0);
      if( !bag_find(&bagDone, cid) ){
        bag_insert(&children, cid);
      }
    }
    nChild = bag_count(&children);
    db_reset(&q1);
  
    /* Crosslink the artifact */
    if( nChild==0 ){
      pUse = pBase;
    }else{
      blob_copy(&copy, pBase);
      pUse = &copy;
    }
    if( zFNameFormat==0 ){
      /* We are doing "fossil rebuild" */
      manifest_crosslink(rid, pUse, MC_NONE);
    }else{
      /* We are doing "fossil deconstruct" */
      char *zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", rid);
      char *zFile = mprintf(zFNameFormat, zUuid, zUuid+prefixLength);
      blob_write_to_file(pUse,zFile);
      free(zFile);
      free(zUuid);
      blob_reset(pUse);
    }
    assert( blob_is_reset(pUse) );
    rebuild_step_done(rid);
  
    /* Call all children recursively */
    rid = 0;
    for(cid=bag_first(&children), i=1; cid; cid=bag_next(&children, cid), i++){
      static Stmt q2;
      int sz;
      db_static_prepare(&q2, "SELECT content, size FROM blob WHERE rid=:rid");
      db_bind_int(&q2, ":rid", cid);
      if( db_step(&q2)==SQLITE_ROW && (sz = db_column_int(&q2,1))>=0 ){
        Blob delta, next;
        db_ephemeral_blob(&q2, 0, &delta);
        blob_uncompress(&delta, &delta);
        blob_delta_apply(pBase, &delta, &next);
        blob_reset(&delta);
        db_reset(&q2);
        if( i<nChild ){
          rebuild_step(cid, sz, &next);
        }else{
          /* Tail recursion */
          rid = cid;
          size = sz;
          blob_reset(pBase);
          *pBase = next;
        }
      }else{
        db_reset(&q2);
        blob_reset(pBase);
      }
    }
    bag_clear(&children);
  }
}
コード例 #23
0
ファイル: url_.c プロジェクト: sambassett/Fossil-Repo
/*
** Parse the given URL.  Populate members of the provided UrlData structure
** as follows:
**
**      isFile      True if FILE:
**      isHttps     True if HTTPS:
**      isSsh       True if SSH:
**      protocol    "http" or "https" or "file"
**      name        Hostname for HTTP:, HTTPS:, SSH:.  Filename for FILE:
**      port        TCP port number for HTTP or HTTPS.
**      dfltPort    Default TCP port number (80 or 443).
**      path        Path name for HTTP or HTTPS.
**      user        Userid.
**      passwd      Password.
**      hostname    HOST:PORT or just HOST if port is the default.
**      canonical   The URL in canonical form, omitting the password
**
*/
void url_parse_local(
  const char *zUrl,
  unsigned int urlFlags,
  UrlData *pUrlData
){
  int i, j, c;
  char *zFile = 0;

  if( zUrl==0 ){
    zUrl = db_get("last-sync-url", 0);
    if( zUrl==0 ) return;
    if( pUrlData->passwd==0 ){
      pUrlData->passwd = unobscure(db_get("last-sync-pw", 0));
    }
  }

  if( strncmp(zUrl, "http://", 7)==0
   || strncmp(zUrl, "https://", 8)==0
   || strncmp(zUrl, "ssh://", 6)==0
  ){
    int iStart;
    char *zLogin;
    char *zExe;
    char cQuerySep = '?';

    pUrlData->isFile = 0;
    pUrlData->useProxy = 0;
    if( zUrl[4]=='s' ){
      pUrlData->isHttps = 1;
      pUrlData->protocol = "https";
      pUrlData->dfltPort = 443;
      iStart = 8;
    }else if( zUrl[0]=='s' ){
      pUrlData->isSsh = 1;
      pUrlData->protocol = "ssh";
      pUrlData->dfltPort = 22;
      pUrlData->fossil = "fossil";
      iStart = 6;
    }else{
      pUrlData->isHttps = 0;
      pUrlData->protocol = "http";
      pUrlData->dfltPort = 80;
      iStart = 7;
    }
    for(i=iStart; (c=zUrl[i])!=0 && c!='/' && c!='@'; i++){}
    if( c=='@' ){
      /* Parse up the user-id and password */
      for(j=iStart; j<i && zUrl[j]!=':'; j++){}
      pUrlData->user = mprintf("%.*s", j-iStart, &zUrl[iStart]);
      dehttpize(pUrlData->user);
      if( j<i ){
        if( ( urlFlags & URL_REMEMBER ) && pUrlData->isSsh==0 ){
          urlFlags |= URL_ASK_REMEMBER_PW;
        }
        pUrlData->passwd = mprintf("%.*s", i-j-1, &zUrl[j+1]);
        dehttpize(pUrlData->passwd);
      }
      if( pUrlData->isSsh ){
        urlFlags &= ~URL_ASK_REMEMBER_PW;
      }
      zLogin = mprintf("%t@", pUrlData->user);
      for(j=i+1; (c=zUrl[j])!=0 && c!='/' && c!=':'; j++){}
      pUrlData->name = mprintf("%.*s", j-i-1, &zUrl[i+1]);
      i = j;
    }else{
      int inSquare = 0;
      int n;
      for(i=iStart; (c=zUrl[i])!=0 && c!='/' && (inSquare || c!=':'); i++){
        if( c=='[' ) inSquare = 1;
        if( c==']' ) inSquare = 0;
      }
      pUrlData->name = mprintf("%.*s", i-iStart, &zUrl[iStart]);
      n = strlen(pUrlData->name);
      if( pUrlData->name[0]=='[' && n>2 && pUrlData->name[n-1]==']' ){
        pUrlData->name++;
        pUrlData->name[n-2] = 0;
      }
      zLogin = mprintf("");
    }
    url_tolower(pUrlData->name);
    if( c==':' ){
      pUrlData->port = 0;
      i++;
      while( (c = zUrl[i])!=0 && fossil_isdigit(c) ){
        pUrlData->port = pUrlData->port*10 + c - '0';
        i++;
      }
      pUrlData->hostname = mprintf("%s:%d", pUrlData->name, pUrlData->port);
    }else{
      pUrlData->port = pUrlData->dfltPort;
      pUrlData->hostname = pUrlData->name;
    }
    dehttpize(pUrlData->name);
    pUrlData->path = mprintf("%s", &zUrl[i]);
    for(i=0; pUrlData->path[i] && pUrlData->path[i]!='?'; i++){}
    if( pUrlData->path[i] ){
      pUrlData->path[i] = 0;
      i++;
    }
    zExe = mprintf("");
    while( pUrlData->path[i]!=0 ){
      char *zName, *zValue;
      zName = &pUrlData->path[i];
      zValue = zName;
      while( pUrlData->path[i] && pUrlData->path[i]!='=' ){ i++; }
      if( pUrlData->path[i]=='=' ){
        pUrlData->path[i] = 0;
        i++;
        zValue = &pUrlData->path[i];
        while( pUrlData->path[i] && pUrlData->path[i]!='&' ){ i++; }
      }
      if( pUrlData->path[i] ){
        pUrlData->path[i] = 0;
        i++;
      }
      if( fossil_strcmp(zName,"fossil")==0 ){
        pUrlData->fossil = zValue;
        dehttpize(pUrlData->fossil);
        zExe = mprintf("%cfossil=%T", cQuerySep, pUrlData->fossil);
        cQuerySep = '&';
      }
    }

    dehttpize(pUrlData->path);
    if( pUrlData->dfltPort==pUrlData->port ){
      pUrlData->canonical = mprintf(
        "%s://%s%T%T%s",
        pUrlData->protocol, zLogin, pUrlData->name, pUrlData->path, zExe
      );
    }else{
      pUrlData->canonical = mprintf(
        "%s://%s%T:%d%T%s",
        pUrlData->protocol, zLogin, pUrlData->name, pUrlData->port,
        pUrlData->path, zExe
      );
    }
    if( pUrlData->isSsh && pUrlData->path[1] ) pUrlData->path++;
    free(zLogin);
  }else if( strncmp(zUrl, "file:", 5)==0 ){
    pUrlData->isFile = 1;
    if( zUrl[5]=='/' && zUrl[6]=='/' ){
      i = 7;
    }else{
      i = 5;
    }
    zFile = mprintf("%s", &zUrl[i]);
  }else if( file_isfile(zUrl) ){
    pUrlData->isFile = 1;
    zFile = mprintf("%s", zUrl);
  }else if( file_isdir(zUrl)==1 ){
    zFile = mprintf("%s/FOSSIL", zUrl);
    if( file_isfile(zFile) ){
      pUrlData->isFile = 1;
    }else{
      free(zFile);
      zFile = 0;
      fossil_fatal("unknown repository: %s", zUrl);
    }
  }else{
    fossil_fatal("unknown repository: %s", zUrl);
  }
  if( urlFlags ) pUrlData->flags = urlFlags;
  if( pUrlData->isFile ){
    Blob cfile;
    dehttpize(zFile);
    file_canonical_name(zFile, &cfile, 0);
    free(zFile);
    zFile = 0;
    pUrlData->protocol = "file";
    pUrlData->path = "";
    pUrlData->name = mprintf("%b", &cfile);
    pUrlData->canonical = mprintf("file://%T", pUrlData->name);
    blob_reset(&cfile);
  }else if( pUrlData->user!=0 && pUrlData->passwd==0 && (urlFlags & URL_PROMPT_PW) ){
    url_prompt_for_password_local(pUrlData);
  }else if( pUrlData->user!=0 && ( urlFlags & URL_ASK_REMEMBER_PW ) ){
    if( isatty(fileno(stdin)) ){
      if( save_password_prompt(pUrlData->passwd) ){
        pUrlData->flags = urlFlags |= URL_REMEMBER_PW;
      }else{
        pUrlData->flags = urlFlags &= ~URL_REMEMBER_PW;
      }
    }
  }
}
コード例 #24
0
ファイル: update.c プロジェクト: digsrc/fossil
/*
** COMMAND: revert
**
** Usage: %fossil revert ?-r REVISION? ?FILE ...?
**
** Revert to the current repository version of FILE, or to
** the version associated with baseline REVISION if the -r flag
** appears.
**
** If FILE was part of a rename operation, both the original file
** and the renamed file are reverted.
**
** Revert all files if no file name is provided.
**
** If a file is reverted accidently, it can be restored using
** the "fossil undo" command.
**
** Options:
**   -r REVISION    revert given FILE(s) back to given REVISION
**
** See also: redo, undo, update
*/
void revert_cmd(void) {
    const char *zFile;
    const char *zRevision;
    Blob record;
    int i;
    int errCode;
    Stmt q;

    undo_capture_command_line();
    zRevision = find_option("revision", "r", 1);
    verify_all_options();

    if( g.argc<2 ) {
        usage("?OPTIONS? [FILE] ...");
    }
    if( zRevision && g.argc<3 ) {
        fossil_fatal("the --revision option does not work for the entire tree");
    }
    db_must_be_within_tree();
    db_begin_transaction();
    undo_begin();
    db_multi_exec("CREATE TEMP TABLE torevert(name UNIQUE);");

    if( g.argc>2 ) {
        for(i=2; i<g.argc; i++) {
            Blob fname;
            zFile = mprintf("%/", g.argv[i]);
            blob_zero(&fname);
            file_tree_name(zFile, &fname, 0, 1);
            db_multi_exec(
                "REPLACE INTO torevert VALUES(%B);"
                "INSERT OR IGNORE INTO torevert"
                " SELECT pathname"
                "   FROM vfile"
                "  WHERE origname=%B;",
                &fname, &fname
            );
            blob_reset(&fname);
        }
    } else {
        int vid;
        vid = db_lget_int("checkout", 0);
        vfile_check_signature(vid, 0);
        db_multi_exec(
            "DELETE FROM vmerge;"
            "INSERT OR IGNORE INTO torevert "
            " SELECT pathname"
            "   FROM vfile "
            "  WHERE chnged OR deleted OR rid=0 OR pathname!=origname;"
        );
    }
    db_multi_exec(
        "INSERT OR IGNORE INTO torevert"
        " SELECT origname"
        "   FROM vfile"
        "  WHERE origname!=pathname AND pathname IN (SELECT name FROM torevert);"
    );
    blob_zero(&record);
    db_prepare(&q, "SELECT name FROM torevert");
    if( zRevision==0 ) {
        int vid = db_lget_int("checkout", 0);
        zRevision = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", vid);
    }
    while( db_step(&q)==SQLITE_ROW ) {
        int isExe = 0;
        int isLink = 0;
        char *zFull;
        zFile = db_column_text(&q, 0);
        zFull = mprintf("%/%/", g.zLocalRoot, zFile);
        errCode = historical_version_of_file(zRevision, zFile, &record,
                                             &isLink, &isExe, 0, 2);
        if( errCode==2 ) {
            if( db_int(0, "SELECT rid FROM vfile WHERE pathname=%Q OR origname=%Q",
                       zFile, zFile)==0 ) {
                fossil_print("UNMANAGE %s\n", zFile);
            } else {
                undo_save(zFile);
                file_delete(zFull);
                fossil_print("DELETE   %s\n", zFile);
            }
            db_multi_exec(
                "UPDATE OR REPLACE vfile"
                "   SET pathname=origname, origname=NULL"
                " WHERE pathname=%Q AND origname!=pathname;"
                "DELETE FROM vfile WHERE pathname=%Q",
                zFile, zFile
            );
        } else {
            sqlite3_int64 mtime;
            undo_save(zFile);
            if( file_wd_size(zFull)>=0 && (isLink || file_wd_islink(0)) ) {
                file_delete(zFull);
            }
            if( isLink ) {
                symlink_create(blob_str(&record), zFull);
            } else {
                blob_write_to_file(&record, zFull);
            }
            file_wd_setexe(zFull, isExe);
            fossil_print("REVERT   %s\n", zFile);
            mtime = file_wd_mtime(zFull);
            db_multi_exec(
                "UPDATE vfile"
                "   SET mtime=%lld, chnged=0, deleted=0, isexe=%d, islink=%d,mrid=rid"
                " WHERE pathname=%Q OR origname=%Q",
                mtime, isExe, isLink, zFile, zFile
            );
        }
        blob_reset(&record);
        free(zFull);
    }
    db_finalize(&q);
    undo_finish();
    db_end_transaction(0);
}
コード例 #25
0
ファイル: file.c プロジェクト: digsrc/fossil
/*
** Copy symbolic link from zFrom to zTo.
*/
void symlink_copy(const char *zFrom, const char *zTo){
  Blob content;
  blob_read_link(&content, zFrom);
  symlink_create(blob_str(&content), zTo);
  blob_reset(&content);
}
コード例 #26
0
ファイル: blob_.c プロジェクト: LitleWaffle/sampleDirectory
void blobarray_reset(Blob *aBlob, int n){
  int i;
  for(i=0; i<n; i++) blob_reset(&aBlob[i]);
}
コード例 #27
0
ファイル: content.c プロジェクト: LitleWaffle/sampleDirectory
/*
** Extract the content for ID rid and put it into the
** uninitialized blob.  Return 1 on success.  If the record
** is a phantom, zero pBlob and return 0.
*/
int content_get(int rid, Blob *pBlob){
  int rc;
  int i;
  int nextRid;

  assert( g.repositoryOpen );
  blob_zero(pBlob);
  if( rid==0 ) return 0;

  /* Early out if we know the content is not available */
  if( bag_find(&contentCache.missing, rid) ){
    return 0;
  }

  /* Look for the artifact in the cache first */
  if( bag_find(&contentCache.inCache, rid) ){
    for(i=0; i<contentCache.n; i++){
      if( contentCache.a[i].rid==rid ){
        blob_copy(pBlob, &contentCache.a[i].content);
        contentCache.a[i].age = contentCache.nextAge++;
        return 1;
      }
    }
  }

  nextRid = findSrcid(rid);
  if( nextRid==0 ){
    rc = content_of_blob(rid, pBlob);
  }else{
    int n = 1;
    int nAlloc = 10;
    int *a = 0;
    int mx;
    Blob delta, next;

    a = fossil_malloc( sizeof(a[0])*nAlloc );
    a[0] = rid;
    a[1] = nextRid;
    n = 1;
    while( !bag_find(&contentCache.inCache, nextRid)
        && (nextRid = findSrcid(nextRid))>0 ){
      n++;
      if( n>=nAlloc ){
        if( n>db_int(0, "SELECT max(rid) FROM blob") ){
          fossil_panic("infinite loop in DELTA table");
        }
        nAlloc = nAlloc*2 + 10;
        a = fossil_realloc(a, nAlloc*sizeof(a[0]));
      }
      a[n] = nextRid;
    }
    mx = n;
    rc = content_get(a[n], pBlob);
    n--;
    while( rc && n>=0 ){
      rc = content_of_blob(a[n], &delta);
      if( rc ){
        blob_delta_apply(pBlob, &delta, &next);
        blob_reset(&delta);
        if( (mx-n)%8==0 ){
          content_cache_insert(a[n+1], pBlob);
        }else{
          blob_reset(pBlob);
        }
        *pBlob = next;
      }
      n--;
    }
    free(a);
    if( !rc ) blob_reset(pBlob);
  }
  if( rc==0 ){
    bag_insert(&contentCache.missing, rid);
  }else{
    bag_insert(&contentCache.available, rid);
  }
  return rc;
}
コード例 #28
0
ファイル: json_user.c プロジェクト: digsrc/fossil
/*
** Expects pUser to contain fossil user fields in JSON form: name,
** uid, info, capabilities, password.
**
** At least one of (name, uid) must be included. All others are
** optional and their db fields will not be updated if those fields
** are not included in pUser.
**
** If uid is specified then name may refer to a _new_ name
** for a user, otherwise the name must refer to an existing user.
** If uid=-1 then the name must be specified and a new user is
** created (fails if one already exists).
**
** If uid is not set, this function might modify pUser to contain the
** db-found (or inserted) user ID.
**
** On error g.json's error state is set and one of the FSL_JSON_E_xxx
** values from FossilJsonCodes is returned.
**
** On success the db record for the given user is updated.
**
** Requires either Admin, Setup, or Password access. Non-admin/setup
** users can only change their own information. Non-setup users may
** not modify the 's' permission. Admin users without setup
** permissions may not edit any other user who has the 's' permission.
**
*/
int json_user_update_from_json( cson_object * pUser ){
#define CSTR(X) cson_string_cstr(cson_value_get_string( cson_object_get(pUser, X ) ))
  char const * zName = CSTR("name");
  char const * zNameNew = zName;
  char * zNameFree = NULL;
  char const * zInfo = CSTR("info");
  char const * zCap = CSTR("capabilities");
  char const * zPW = CSTR("password");
  cson_value const * forceLogout = cson_object_get(pUser, "forceLogout");
  int gotFields = 0;
#undef CSTR
  cson_int_t uid = cson_value_get_integer( cson_object_get(pUser, "uid") );
  char const tgtHasSetup = zCap && (NULL!=strchr(zCap, 's'));
  char tgtHadSetup = 0;
  Blob sql = empty_blob;
  Stmt q = empty_Stmt;

#if 0
  if(!g.perm.Admin && !g.perm.Setup && !g.perm.Password){
    return json_set_err( FSL_JSON_E_DENIED,
                         "Password change requires 'a', 's', "
                         "or 'p' permissions.");
  }
#endif
  if(uid<=0 && (!zName||!*zName)){
    return json_set_err(FSL_JSON_E_MISSING_ARGS,
                        "One of 'uid' or 'name' is required.");
  }else if(uid>0){
    zNameFree = db_text(NULL, "SELECT login FROM user WHERE uid=%d",uid);
    if(!zNameFree){
      return json_set_err(FSL_JSON_E_RESOURCE_NOT_FOUND,
                          "No login found for uid %d.", uid);
    }
    zName = zNameFree;
  }else if(-1==uid){
    /* try to create a new user */
    if(!g.perm.Admin && !g.perm.Setup){
      json_set_err(FSL_JSON_E_DENIED,
                   "Requires 'a' or 's' privileges.");
      goto error;
    }else if(!zName || !*zName){
      json_set_err(FSL_JSON_E_MISSING_ARGS,
                   "No name specified for new user.");
      goto error;
    }else if( db_exists("SELECT 1 FROM user WHERE login=%Q", zName) ){
      json_set_err(FSL_JSON_E_RESOURCE_ALREADY_EXISTS,
                   "User %s already exists.", zName);
      goto error;
    }else{
      Stmt ins = empty_Stmt;
      db_prepare(&ins, "INSERT INTO user (login) VALUES(%Q)",zName);
      db_step( &ins );
      db_finalize(&ins);
      uid = db_int(0,"SELECT uid FROM user WHERE login=%Q", zName);
      assert(uid>0);
      zNameNew = zName;
      cson_object_set( pUser, "uid", cson_value_new_integer(uid) );
    }
  }else{
    uid = db_int(0,"SELECT uid FROM user WHERE login=%Q", zName);
    if(uid<=0){
      json_set_err(FSL_JSON_E_RESOURCE_NOT_FOUND,
                   "No login found for user [%s].", zName);
      goto error;
    }
    cson_object_set( pUser, "uid", cson_value_new_integer(uid) );
  }

  /* Maintenance note: all error-returns from here on out should go
     via 'goto error' in order to clean up.
  */
  
  if(uid != g.userUid){
    if(!g.perm.Admin && !g.perm.Setup){
      json_set_err(FSL_JSON_E_DENIED,
                   "Changing another user's data requires "
                   "'a' or 's' privileges.");
      goto error;
    }
  }
  /* check if the target uid currently has setup rights. */
  tgtHadSetup = db_int(0,"SELECT 1 FROM user where uid=%d"
                       " AND cap GLOB '*s*'", uid);

  if((tgtHasSetup || tgtHadSetup) && !g.perm.Setup){
    /*
      Do not allow a non-setup user to set or remove setup
      privileges. setup.c uses similar logic.
    */
    json_set_err(FSL_JSON_E_DENIED,
                 "Modifying 's' users/privileges requires "
                 "'s' privileges.");
    goto error;
  }
  /*
    Potential todo: do not allow a setup user to remove 's' from
    himself, to avoid locking himself out?
  */

  blob_append(&sql, "UPDATE user SET",-1 );
  blob_append(&sql, " mtime=cast(strftime('%s') AS INTEGER)", -1);

  if((uid>0) && zNameNew){
    /* Check for name change... */
    if(0!=strcmp(zName,zNameNew)){
      if( (!g.perm.Admin && !g.perm.Setup)
          && (zName != zNameNew)){
        json_set_err( FSL_JSON_E_DENIED,
                      "Modifying user names requires 'a' or 's' privileges.");
        goto error;
      }
      forceLogout = cson_value_true()
        /* reminders: 1) does not allocate.
           2) we do this because changing a name
           invalidates any login token because the old name
           is part of the token hash.
        */;
      blob_append_sql(&sql, ", login=%Q", zNameNew);
      ++gotFields;
    }
  }

  if( zCap && *zCap ){
    if(!g.perm.Admin || !g.perm.Setup){
      /* we "could" arguably silently ignore cap in this case. */
      json_set_err(FSL_JSON_E_DENIED,
                   "Changing capabilities requires 'a' or 's' privileges.");
      goto error;
    }
    blob_append_sql(&sql, ", cap=%Q", zCap);
    ++gotFields;
  }

  if( zPW && *zPW ){
    if(!g.perm.Admin && !g.perm.Setup && !g.perm.Password){
      json_set_err( FSL_JSON_E_DENIED,
                    "Password change requires 'a', 's', "
                    "or 'p' permissions.");
      goto error;
    }else{
#define TRY_LOGIN_GROUP 0 /* login group support is not yet implemented. */
#if !TRY_LOGIN_GROUP
      char * zPWHash = NULL;
      ++gotFields;
      zPWHash = sha1_shared_secret(zPW, zNameNew ? zNameNew : zName, NULL);
      blob_append_sql(&sql, ", pw=%Q", zPWHash);
      free(zPWHash);
#else
      ++gotFields;
      blob_append_sql(&sql, ", pw=coalesce(shared_secret(%Q,%Q,"
                   "(SELECT value FROM config WHERE name='project-code')))",
                   zPW, zNameNew ? zNameNew : zName);
      /* shared_secret() func is undefined? */
#endif
    }
  }

  if( zInfo ){
    blob_append_sql(&sql, ", info=%Q", zInfo);
    ++gotFields;
  }

  if((g.perm.Admin || g.perm.Setup)
     && forceLogout && cson_value_get_bool(forceLogout)){
    blob_append(&sql, ", cookie=NULL, cexpire=NULL", -1);
    ++gotFields;
  }
  
  if(!gotFields){
    json_set_err( FSL_JSON_E_MISSING_ARGS,
                  "Required user data are missing.");
    goto error;
  }
  assert(uid>0);
#if !TRY_LOGIN_GROUP
  blob_append_sql(&sql, " WHERE uid=%d", uid);
#else /* need name for login group support :/ */
  blob_append_sql(&sql, " WHERE login=%Q", zName);
#endif
#if 0
  puts(blob_str(&sql));
  cson_output_FILE( cson_object_value(pUser), stdout, NULL );
#endif
  db_prepare(&q, "%s", blob_sql_text(&sql));
  db_exec(&q);
  db_finalize(&q);
#if TRY_LOGIN_GROUP
  if( zPW || cson_value_get_bool(forceLogout) ){
    Blob groupSql = empty_blob;
    char * zErr = NULL;
    blob_append_sql(&groupSql,
      "INSERT INTO user(login)"
      "  SELECT %Q WHERE NOT EXISTS(SELECT 1 FROM user WHERE login=%Q);",
      zName, zName
    );
    blob_append(&groupSql, blob_str(&sql), blob_size(&sql));
    login_group_sql(blob_str(&groupSql), NULL, NULL, &zErr);
    blob_reset(&groupSql);
    if( zErr ){
      json_set_err( FSL_JSON_E_UNKNOWN,
                    "Repo-group update at least partially failed: %s",
                    zErr);
      free(zErr);
      goto error;
    }
  }
#endif /* TRY_LOGIN_GROUP */

#undef TRY_LOGIN_GROUP

  free( zNameFree );
  blob_reset(&sql);
  return 0;

  error:
  assert(0 != g.json.resultCode);
  free(zNameFree);
  blob_reset(&sql);
  return g.json.resultCode;
}
コード例 #29
0
ファイル: content.c プロジェクト: LitleWaffle/sampleDirectory
/*
** COMMAND: test-integrity ?OPTIONS?
**
** Verify that all content can be extracted from the BLOB table correctly.
** If the BLOB table is correct, then the repository can always be
** successfully reconstructed using "fossil rebuild".
**
** Options:
**
**    --parse            Parse all manifests, wikis, tickets, events, and
**                       so forth, reporting any errors found.
*/
void test_integrity(void){
  Stmt q;
  Blob content;
  Blob cksum;
  int n1 = 0;
  int n2 = 0;
  int nErr = 0;
  int total;
  int nCA = 0;
  int anCA[10];
  int bParse = find_option("parse",0,0)!=0;
  db_find_and_open_repository(OPEN_ANY_SCHEMA, 2);
  memset(anCA, 0, sizeof(anCA));

  /* Make sure no public artifact is a delta from a private artifact */
  db_prepare(&q,
    "SELECT "
    "   rid, (SELECT uuid FROM blob WHERE rid=delta.rid),"
    "   srcid, (SELECT uuid FROM blob WHERE rid=delta.srcid)"
    "  FROM delta"
    " WHERE srcid in private AND rid NOT IN private"
  );
  while( db_step(&q)==SQLITE_ROW ){
    int rid = db_column_int(&q, 0);
    const char *zId = db_column_text(&q, 1);
    int srcid = db_column_int(&q, 2);
    const char *zSrc = db_column_text(&q, 3);
    fossil_print(
      "public artifact %S (%d) is a delta from private artifact %S (%d)\n",
      zId, rid, zSrc, srcid
    );
    nErr++;
  }
  db_finalize(&q);
    
  db_prepare(&q, "SELECT rid, uuid, size FROM blob ORDER BY rid");
  total = db_int(0, "SELECT max(rid) FROM blob");
  while( db_step(&q)==SQLITE_ROW ){
    int rid = db_column_int(&q, 0);
    const char *zUuid = db_column_text(&q, 1);
    int size = db_column_int(&q, 2);
    n1++;
    fossil_print("  %d/%d\r", n1, total);
    fflush(stdout);
    if( size<0 ){
      fossil_print("skip phantom %d %s\n", rid, zUuid);
      continue;  /* Ignore phantoms */
    }
    content_get(rid, &content);
    if( blob_size(&content)!=size ){
      fossil_print("size mismatch on artifact %d: wanted %d but got %d\n",
                     rid, size, blob_size(&content));
      nErr++;
    }
    sha1sum_blob(&content, &cksum);
    if( fossil_strcmp(blob_str(&cksum), zUuid)!=0 ){
      fossil_print("checksum mismatch on artifact %d: wanted %s but got %s\n",
                   rid, zUuid, blob_str(&cksum));
      nErr++;
    }
    if( bParse && looks_like_control_artifact(&content) ){
      Blob err;
      int i, n;
      char *z;
      Manifest *p;
      char zFirstLine[400];
      blob_zero(&err);

      z = blob_buffer(&content);
      n = blob_size(&content);
      for(i=0; i<n && z[i] && z[i]!='\n' && i<sizeof(zFirstLine)-1; i++){}
      memcpy(zFirstLine, z, i);
      zFirstLine[i] = 0;
      p = manifest_parse(&content, 0, &err);
      if( p==0 ){
        fossil_print("manifest_parse failed for %s:\n%s\n",
               blob_str(&cksum), blob_str(&err));
        if( strncmp(blob_str(&err), "line 1:", 7)==0 ){
          fossil_print("\"%s\"\n", zFirstLine);
        }
      }else{
        anCA[p->type]++;
        manifest_destroy(p);
        nCA++;
      }
      blob_reset(&err);
    }else{
      blob_reset(&content);
    }
    blob_reset(&cksum);
    n2++;
  }
  db_finalize(&q);
  fossil_print("%d non-phantom blobs (out of %d total) checked:  %d errors\n",
               n2, n1, nErr);
  if( bParse ){
    const char *azType[] = { 0, "manifest", "cluster", "control", "wiki",
                             "ticket", "attachment", "event" };
    int i;
    fossil_print("%d total control artifacts\n", nCA);
    for(i=1; i<count(azType); i++){
      if( anCA[i] ) fossil_print("  %d %ss\n", anCA[i], azType[i]);
    }
  }
}
コード例 #30
0
ファイル: tag.c プロジェクト: LitleWaffle/sampleDirectory
/*
** COMMAND: tag
** Usage: %fossil tag SUBCOMMAND ...
**
** Run various subcommands to control tags and properties
**
**     %fossil tag add ?--raw? ?--propagate? TAGNAME CHECK-IN ?VALUE?
**
**         Add a new tag or property to CHECK-IN. The tag will
**         be usable instead of a CHECK-IN in commands such as
**         update and merge.  If the --propagate flag is present,
**         the tag value propagates to all descendants of CHECK-IN
**
**     %fossil tag cancel ?--raw? TAGNAME CHECK-IN
**
**         Remove the tag TAGNAME from CHECK-IN, and also remove
**         the propagation of the tag to any descendants.
**
**     %fossil tag find ?--raw? ?-t|--type TYPE? ?-n|--limit #? TAGNAME
**
**         List all objects that use TAGNAME.  TYPE can be "ci" for
**         checkins or "e" for events. The limit option limits the number
**         of results to the given value.
**
**     %fossil tag list ?--raw? ?CHECK-IN?
**
**         List all tags, or if CHECK-IN is supplied, list
**         all tags and their values for CHECK-IN.
**
** The option --raw allows the manipulation of all types of tags
** used for various internal purposes in fossil. It also shows
** "cancel" tags for the "find" and "list" subcommands. You should
** not use this option to make changes unless you are sure what
** you are doing.
**
** If you need to use a tagname that might be confused with
** a hexadecimal baseline or artifact ID, you can explicitly
** disambiguate it by prefixing it with "tag:". For instance:
**
**   fossil update decaf
**
** will be taken as an artifact or baseline ID and fossil will
** probably complain that no such revision was found. However
**
**   fossil update tag:decaf
**
** will assume that "decaf" is a tag/branch name.
**
** only allow --date-override and --user-override in 
**   %fossil tag add --date-override 'YYYY-MMM-DD HH:MM:SS' \\
**                   --user-override user 
** in order to import history from other scm systems
*/
void tag_cmd(void){
  int n;
  int fRaw = find_option("raw","",0)!=0;
  int fPropagate = find_option("propagate","",0)!=0;
  const char *zPrefix = fRaw ? "" : "sym-";
  char const * zFindLimit = find_option("limit","n",1);
  int const nFindLimit = zFindLimit ? atoi(zFindLimit) : -2000;

  db_find_and_open_repository(0, 0);
  if( g.argc<3 ){
    goto tag_cmd_usage;
  }
  n = strlen(g.argv[2]);
  if( n==0 ){
    goto tag_cmd_usage;
  }

  if( strncmp(g.argv[2],"add",n)==0 ){
    char *zValue;
    const char *zDateOvrd = find_option("date-override",0,1);
    const char *zUserOvrd = find_option("user-override",0,1);
    if( g.argc!=5 && g.argc!=6 ){
      usage("add ?--raw? ?--propagate? TAGNAME CHECK-IN ?VALUE?");
    }
    zValue = g.argc==6 ? g.argv[5] : 0;
    db_begin_transaction();
    tag_add_artifact(zPrefix, g.argv[3], g.argv[4], zValue,
                     1+fPropagate,zDateOvrd,zUserOvrd);
    db_end_transaction(0);
  }else

  if( strncmp(g.argv[2],"branch",n)==0 ){
    fossil_fatal("the \"fossil tag branch\" command is discontinued\n"
                 "Use the \"fossil branch new\" command instead.");
  }else

  if( strncmp(g.argv[2],"cancel",n)==0 ){
    if( g.argc!=5 ){
      usage("cancel ?--raw? TAGNAME CHECK-IN");
    }
    db_begin_transaction();
    tag_add_artifact(zPrefix, g.argv[3], g.argv[4], 0, 0, 0, 0);
    db_end_transaction(0);
  }else

  if( strncmp(g.argv[2],"find",n)==0 ){
    Stmt q;
    const char *zType = find_option("type","t",1);
    Blob sql = empty_blob;
    if( zType==0 || zType[0]==0 ) zType = "*";
    if( g.argc!=4 ){
      usage("find ?--raw? ?-t|--type TYPE? ?-n|--limit #? TAGNAME");
    }
    if( fRaw ){
      blob_appendf(&sql,
        "SELECT blob.uuid FROM tagxref, blob"
        " WHERE tagid=(SELECT tagid FROM tag WHERE tagname=%Q)"
        "   AND tagxref.tagtype>0"
        "   AND blob.rid=tagxref.rid",
        g.argv[3]
      );
      if( nFindLimit>0 ){
        blob_appendf(&sql, " LIMIT %d", nFindLimit);
      }
      db_prepare(&q, "%s", blob_str(&sql));
      blob_reset(&sql);
      while( db_step(&q)==SQLITE_ROW ){
        fossil_print("%s\n", db_column_text(&q, 0));
      }
      db_finalize(&q);
    }else{
      int tagid = db_int(0, "SELECT tagid FROM tag WHERE tagname='sym-%q'",
                         g.argv[3]);
      if( tagid>0 ){
        blob_appendf(&sql,
          "%s"
          "  AND event.type GLOB '%q'"
          "  AND blob.rid IN ("
                    " SELECT rid FROM tagxref"
                    "  WHERE tagtype>0 AND tagid=%d"
                    ")"
          " ORDER BY event.mtime DESC",
          timeline_query_for_tty(), zType, tagid
        );
        db_prepare(&q, "%s", blob_str(&sql));
        blob_reset(&sql);
        print_timeline(&q, nFindLimit, 79, 0);
        db_finalize(&q);
      }
    }
  }else

  if( strncmp(g.argv[2],"list",n)==0 ){
    Stmt q;
    if( g.argc==3 ){
      db_prepare(&q, 
        "SELECT tagname FROM tag"
        " WHERE EXISTS(SELECT 1 FROM tagxref"
        "               WHERE tagid=tag.tagid"
        "                 AND tagtype>0)"
        " ORDER BY tagname"
      );
      while( db_step(&q)==SQLITE_ROW ){
        const char *zName = db_column_text(&q, 0);
        if( fRaw ){
          fossil_print("%s\n", zName);
        }else if( strncmp(zName, "sym-", 4)==0 ){
          fossil_print("%s\n", &zName[4]);
        }
      }
      db_finalize(&q);
    }else if( g.argc==4 ){
      int rid = name_to_rid(g.argv[3]);
      db_prepare(&q,
        "SELECT tagname, value FROM tagxref, tag"
        " WHERE tagxref.rid=%d AND tagxref.tagid=tag.tagid"
        "   AND tagtype>%d"
        " ORDER BY tagname",
        rid,
        fRaw ? -1 : 0
      );
      while( db_step(&q)==SQLITE_ROW ){
        const char *zName = db_column_text(&q, 0);
        const char *zValue = db_column_text(&q, 1);
        if( fRaw==0 ){
          if( strncmp(zName, "sym-", 4)!=0 ) continue;
          zName += 4;
        }
        if( zValue && zValue[0] ){
          fossil_print("%s=%s\n", zName, zValue);
        }else{
          fossil_print("%s\n", zName);
        }
      }
      db_finalize(&q);
    }else{
      usage("tag list ?CHECK-IN?");
    }
  }else
  {
    goto tag_cmd_usage;
  }

  /* Cleanup */
  return;

tag_cmd_usage:
  usage("add|cancel|find|list ...");
}