Esempio n. 1
0
/*
** Usage:   btree_update_meta ID METADATA...
**
** Return meta data
*/
static int btree_update_meta(
  void *NotUsed,
  Tcl_Interp *interp,    /* The TCL interpreter that invoked this command */
  int argc,              /* Number of arguments */
  const char **argv      /* Text of each argument */
){
  Btree *pBt;
  int rc;
  int i;
  int aMeta[SQLITE_N_BTREE_META];

  if( argc!=2+SQLITE_N_BTREE_META ){
    char zBuf[30];
    sqlite3_snprintf(sizeof(zBuf), zBuf,"%d",SQLITE_N_BTREE_META);
    Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
       " ID METADATA...\" (METADATA is ", zBuf, " integers)", 0);
    return TCL_ERROR;
  }
  pBt = sqlite3TestTextToPtr(argv[1]);
  for(i=1; i<SQLITE_N_BTREE_META; i++){
    if( Tcl_GetInt(interp, argv[i+2], &aMeta[i]) ) return TCL_ERROR;
  }
  for(i=1; i<SQLITE_N_BTREE_META; i++){
    sqlite3BtreeEnter(pBt);
    rc = sqlite3BtreeUpdateMeta(pBt, i, aMeta[i]);
    sqlite3BtreeLeave(pBt);
    if( rc!=SQLITE_OK ){
      Tcl_AppendResult(interp, errorName(rc), 0);
      return TCL_ERROR;
    }
  }
  return TCL_OK;
}
Esempio n. 2
0
/*
** This routine implements the OP_Vacuum opcode of the VDBE.
*/
int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){
  int rc = SQLITE_OK;     /* Return code from service routines */
  const char *zFilename;  /* full pathname of the database file */
  int nFilename;          /* number of characters  in zFilename[] */
  char *zTemp = 0;        /* a temporary file in same directory as zFilename */
  Btree *pMain;           /* The database being vacuumed */
  Btree *pTemp;
  char *zSql = 0;
  int saved_flags;       /* Saved value of the db->flags */
  Db *pDb = 0;           /* Database to detach at end of vacuum */

  /* Save the current value of the write-schema flag before setting it. */
  saved_flags = db->flags;
  db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks;

  if( !db->autoCommit ){
    sqlite3SetString(pzErrMsg, "cannot VACUUM from within a transaction", 
       (char*)0);
    rc = SQLITE_ERROR;
    goto end_of_vacuum;
  }

  /* Get the full pathname of the database file and create a
  ** temporary filename in the same directory as the original file.
  */
  pMain = db->aDb[0].pBt;
  zFilename = sqlite3BtreeGetFilename(pMain);
  assert( zFilename );
  if( zFilename[0]=='\0' ){
    /* The in-memory database. Do nothing. Return directly to avoid causing
    ** an error trying to DETACH the vacuum_db (which never got attached)
    ** in the exit-handler.
    */
    return SQLITE_OK;
  }
  nFilename = strlen(zFilename);
  zTemp = sqliteMalloc( nFilename+100 );
  if( zTemp==0 ){
    rc = SQLITE_NOMEM;
    goto end_of_vacuum;
  }
  strcpy(zTemp, zFilename);

  /* The randomName() procedure in the following loop uses an excellent
  ** source of randomness to generate a name from a space of 1.3e+31 
  ** possibilities.  So unless the directory already contains on the order
  ** of 1.3e+31 files, the probability that the following loop will
  ** run more than once or twice is vanishingly small.  We are certain
  ** enough that this loop will always terminate (and terminate quickly)
  ** that we don't even bother to set a maximum loop count.
  */
  do {
    zTemp[nFilename] = '-';
    randomName((unsigned char*)&zTemp[nFilename+1]);
  } while( sqlite3OsFileExists(zTemp) );

  /* Attach the temporary database as 'vacuum_db'. The synchronous pragma
  ** can be set to 'off' for this file, as it is not recovered if a crash
  ** occurs anyway. The integrity of the database is maintained by a
  ** (possibly synchronous) transaction opened on the main database before
  ** sqlite3BtreeCopyFile() is called.
  **
  ** An optimisation would be to use a non-journaled pager.
  */
  zSql = sqlite3MPrintf("ATTACH '%q' AS vacuum_db;", zTemp);
  if( !zSql ){
    rc = SQLITE_NOMEM;
    goto end_of_vacuum;
  }
  rc = execSql(db, zSql);
  sqliteFree(zSql);
  zSql = 0;
  if( rc!=SQLITE_OK ) goto end_of_vacuum;
  pDb = &db->aDb[db->nDb-1];
  assert( strcmp(db->aDb[db->nDb-1].zName,"vacuum_db")==0 );
  pTemp = db->aDb[db->nDb-1].pBt;
  sqlite3BtreeSetPageSize(pTemp, sqlite3BtreeGetPageSize(pMain),
     sqlite3BtreeGetReserve(pMain));
  assert( sqlite3BtreeGetPageSize(pTemp)==sqlite3BtreeGetPageSize(pMain) );
  rc = execSql(db, "PRAGMA vacuum_db.synchronous=OFF");
  if( rc!=SQLITE_OK ){
    goto end_of_vacuum;
  }

#ifndef SQLITE_OMIT_AUTOVACUUM
  sqlite3BtreeSetAutoVacuum(pTemp, sqlite3BtreeGetAutoVacuum(pMain));
#endif

  /* Begin a transaction */
  rc = execSql(db, "BEGIN EXCLUSIVE;");
  if( rc!=SQLITE_OK ) goto end_of_vacuum;

  /* Query the schema of the main database. Create a mirror schema
  ** in the temporary database.
  */
  rc = execExecSql(db, 
      "SELECT 'CREATE TABLE vacuum_db.' || substr(sql,14,100000000) "
      "  FROM sqlite_master WHERE type='table' AND name!='sqlite_sequence'"
      "   AND rootpage>0"
  );
  if( rc!=SQLITE_OK ) goto end_of_vacuum;
  rc = execExecSql(db, 
      "SELECT 'CREATE INDEX vacuum_db.' || substr(sql,14,100000000)"
      "  FROM sqlite_master WHERE sql LIKE 'CREATE INDEX %' ");
  if( rc!=SQLITE_OK ) goto end_of_vacuum;
  rc = execExecSql(db, 
      "SELECT 'CREATE UNIQUE INDEX vacuum_db.' || substr(sql,21,100000000) "
      "  FROM sqlite_master WHERE sql LIKE 'CREATE UNIQUE INDEX %'");
  if( rc!=SQLITE_OK ) goto end_of_vacuum;

  /* Loop through the tables in the main database. For each, do
  ** an "INSERT INTO vacuum_db.xxx SELECT * FROM xxx;" to copy
  ** the contents to the temporary database.
  */
  rc = execExecSql(db, 
      "SELECT 'INSERT INTO vacuum_db.' || quote(name) "
      "|| ' SELECT * FROM ' || quote(name) || ';'"
      "FROM sqlite_master "
      "WHERE type = 'table' AND name!='sqlite_sequence' "
      "  AND rootpage>0"

  );
  if( rc!=SQLITE_OK ) goto end_of_vacuum;

  /* Copy over the sequence table
  */
  rc = execExecSql(db, 
      "SELECT 'DELETE FROM vacuum_db.' || quote(name) || ';' "
      "FROM vacuum_db.sqlite_master WHERE name='sqlite_sequence' "
  );
  if( rc!=SQLITE_OK ) goto end_of_vacuum;
  rc = execExecSql(db, 
      "SELECT 'INSERT INTO vacuum_db.' || quote(name) "
      "|| ' SELECT * FROM ' || quote(name) || ';' "
      "FROM vacuum_db.sqlite_master WHERE name=='sqlite_sequence';"
  );
  if( rc!=SQLITE_OK ) goto end_of_vacuum;


  /* Copy the triggers, views, and virtual tables from the main database
  ** over to the temporary database.  None of these objects has any
  ** associated storage, so all we have to do is copy their entries
  ** from the SQLITE_MASTER table.
  */
  rc = execSql(db,
      "INSERT INTO vacuum_db.sqlite_master "
      "  SELECT type, name, tbl_name, rootpage, sql"
      "    FROM sqlite_master"
      "   WHERE type='view' OR type='trigger'"
      "      OR (type='table' AND rootpage=0)"
  );
  if( rc ) goto end_of_vacuum;

  /* At this point, unless the main db was completely empty, there is now a
  ** transaction open on the vacuum database, but not on the main database.
  ** Open a btree level transaction on the main database. This allows a
  ** call to sqlite3BtreeCopyFile(). The main database btree level
  ** transaction is then committed, so the SQL level never knows it was
  ** opened for writing. This way, the SQL transaction used to create the
  ** temporary database never needs to be committed.
  */
  if( rc==SQLITE_OK ){
    u32 meta;
    int i;

    /* This array determines which meta meta values are preserved in the
    ** vacuum.  Even entries are the meta value number and odd entries
    ** are an increment to apply to the meta value after the vacuum.
    ** The increment is used to increase the schema cookie so that other
    ** connections to the same database will know to reread the schema.
    */
    static const unsigned char aCopy[] = {
       1, 1,    /* Add one to the old schema cookie */
       3, 0,    /* Preserve the default page cache size */
       5, 0,    /* Preserve the default text encoding */
       6, 0,    /* Preserve the user version */
    };

    assert( 1==sqlite3BtreeIsInTrans(pTemp) );
    assert( 1==sqlite3BtreeIsInTrans(pMain) );

    /* Copy Btree meta values */
    for(i=0; i<sizeof(aCopy)/sizeof(aCopy[0]); i+=2){
      rc = sqlite3BtreeGetMeta(pMain, aCopy[i], &meta);
      if( rc!=SQLITE_OK ) goto end_of_vacuum;
      rc = sqlite3BtreeUpdateMeta(pTemp, aCopy[i], meta+aCopy[i+1]);
      if( rc!=SQLITE_OK ) goto end_of_vacuum;
    }

    rc = sqlite3BtreeCopyFile(pMain, pTemp);
    if( rc!=SQLITE_OK ) goto end_of_vacuum;
    rc = sqlite3BtreeCommit(pTemp);
    if( rc!=SQLITE_OK ) goto end_of_vacuum;
    rc = sqlite3BtreeCommit(pMain);
  }

end_of_vacuum:
  /* Restore the original value of db->flags */
  db->flags = saved_flags;

  /* Currently there is an SQL level transaction open on the vacuum
  ** database. No locks are held on any other files (since the main file
  ** was committed at the btree level). So it safe to end the transaction
  ** by manually setting the autoCommit flag to true and detaching the
  ** vacuum database. The vacuum_db journal file is deleted when the pager
  ** is closed by the DETACH.
  */
  db->autoCommit = 1;

  if( pDb ){
    sqlite3MallocDisallow();
    sqlite3BtreeClose(pDb->pBt);
    sqlite3MallocAllow();
    pDb->pBt = 0;
    pDb->pSchema = 0;
  }

  if( zTemp ){
    sqlite3OsDelete(zTemp);
    sqliteFree(zTemp);
  }
  sqliteFree( zSql );
  sqlite3ResetInternalSchema(db, 0);

  return rc;
}
Esempio n. 3
0
/* CHANGE 1 of 3: Add function parameter nRes */
SQLITE_PRIVATE int sqlite3RunVacuumForRekey(char **pzErrMsg, sqlite3 *db, int iDb, int nRes){
  int rc = SQLITE_OK;     /* Return code from service routines */
  Btree *pMain;           /* The database being vacuumed */
  Btree *pTemp;           /* The temporary database we vacuum into */
  u16 saved_mDbFlags;     /* Saved value of db->mDbFlags */
  u32 saved_flags;        /* Saved value of db->flags */
  int saved_nChange;      /* Saved value of db->nChange */
  int saved_nTotalChange; /* Saved value of db->nTotalChange */
  u8 saved_mTrace;        /* Saved trace settings */
  Db *pDb = 0;            /* Database to detach at end of vacuum */
  int isMemDb;            /* True if vacuuming a :memory: database */
  /* CHANGE 2 of 3: Do not define local variable nRes */
  /*int nRes;*/               /* Bytes of reserved space at the end of each page */
  int nDb;                /* Number of attached databases */
  const char *zDbMain;    /* Schema name of database to vacuum */

  if (!db->autoCommit) {
    sqlite3SetString(pzErrMsg, db, "cannot VACUUM from within a transaction");
    return SQLITE_ERROR;
  }
  if (db->nVdbeActive>1) {
    sqlite3SetString(pzErrMsg, db, "cannot VACUUM - SQL statements in progress");
    return SQLITE_ERROR;
  }

  /* Save the current value of the database flags so that it can be
  ** restored before returning. Then set the writable-schema flag, and
  ** disable CHECK and foreign key constraints.  */
  saved_flags = db->flags;
  saved_mDbFlags = db->mDbFlags;
  saved_nChange = db->nChange;
  saved_nTotalChange = db->nTotalChange;
  saved_mTrace = db->mTrace;
  db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks;
  db->mDbFlags |= DBFLAG_PreferBuiltin | DBFLAG_Vacuum;
  db->flags &= ~(SQLITE_ForeignKeys | SQLITE_ReverseOrder | SQLITE_CountRows);
  db->mTrace = 0;

  zDbMain = db->aDb[iDb].zDbSName;
  pMain = db->aDb[iDb].pBt;
  isMemDb = sqlite3PagerIsMemdb(sqlite3BtreePager(pMain));

  /* Attach the temporary database as 'vacuum_db'. The synchronous pragma
  ** can be set to 'off' for this file, as it is not recovered if a crash
  ** occurs anyway. The integrity of the database is maintained by a
  ** (possibly synchronous) transaction opened on the main database before
  ** sqlite3BtreeCopyFile() is called.
  **
  ** An optimisation would be to use a non-journaled pager.
  ** (Later:) I tried setting "PRAGMA vacuum_db.journal_mode=OFF" but
  ** that actually made the VACUUM run slower.  Very little journalling
  ** actually occurs when doing a vacuum since the vacuum_db is initially
  ** empty.  Only the journal header is written.  Apparently it takes more
  ** time to parse and run the PRAGMA to turn journalling off than it does
  ** to write the journal header file.
  */
  nDb = db->nDb;
  rc = execSql(db, pzErrMsg, "ATTACH''AS vacuum_db");
  if (rc != SQLITE_OK) goto end_of_vacuum;
  assert((db->nDb - 1) == nDb);
  pDb = &db->aDb[nDb];
  assert(strcmp(pDb->zDbSName, "vacuum_db") == 0);
  pTemp = pDb->pBt;

  /* The call to execSql() to attach the temp database has left the file
  ** locked (as there was more than one active statement when the transaction
  ** to read the schema was concluded. Unlock it here so that this doesn't
  ** cause problems for the call to BtreeSetPageSize() below.  */
  sqlite3BtreeCommit(pTemp);

  /* CHANGE 3 of 3: Do not call sqlite3BtreeGetOptimalReserve */
  /* nRes = sqlite3BtreeGetOptimalReserve(pMain); */

  /* A VACUUM cannot change the pagesize of an encrypted database. */
#ifdef SQLITE_HAS_CODEC
  if (db->nextPagesize) {
    extern void sqlite3CodecGetKey(sqlite3*, int, void**, int*);
    int nKey;
    char *zKey;
    sqlite3CodecGetKey(db, iDb, (void**)&zKey, &nKey);
    if (nKey) db->nextPagesize = 0;
  }
#endif

  sqlite3BtreeSetCacheSize(pTemp, db->aDb[iDb].pSchema->cache_size);
  sqlite3BtreeSetSpillSize(pTemp, sqlite3BtreeSetSpillSize(pMain, 0));
  sqlite3BtreeSetPagerFlags(pTemp, PAGER_SYNCHRONOUS_OFF | PAGER_CACHESPILL);

  /* Begin a transaction and take an exclusive lock on the main database
  ** file. This is done before the sqlite3BtreeGetPageSize(pMain) call below,
  ** to ensure that we do not try to change the page-size on a WAL database.
  */
  rc = execSql(db, pzErrMsg, "BEGIN");
  if (rc != SQLITE_OK) goto end_of_vacuum;
  rc = sqlite3BtreeBeginTrans(pMain, 2);
  if (rc != SQLITE_OK) goto end_of_vacuum;

  /* Do not attempt to change the page size for a WAL database */
  if (sqlite3PagerGetJournalMode(sqlite3BtreePager(pMain))
    == PAGER_JOURNALMODE_WAL) {
    db->nextPagesize = 0;
  }

  if (sqlite3BtreeSetPageSize(pTemp, sqlite3BtreeGetPageSize(pMain), nRes, 0)
    || (!isMemDb && sqlite3BtreeSetPageSize(pTemp, db->nextPagesize, nRes, 0))
    || NEVER(db->mallocFailed)
    ) {
    rc = SQLITE_NOMEM_BKPT;
    goto end_of_vacuum;
  }

#ifndef SQLITE_OMIT_AUTOVACUUM
  sqlite3BtreeSetAutoVacuum(pTemp, db->nextAutovac >= 0 ? db->nextAutovac :
    sqlite3BtreeGetAutoVacuum(pMain));
#endif

  /* Query the schema of the main database. Create a mirror schema
  ** in the temporary database.
  */
  db->init.iDb = nDb; /* force new CREATE statements into vacuum_db */
  rc = execSqlF(db, pzErrMsg,
    "SELECT sql FROM \"%w\".sqlite_master"
    " WHERE type='table'AND name<>'sqlite_sequence'"
    " AND coalesce(rootpage,1)>0",
    zDbMain
  );
  if (rc != SQLITE_OK) goto end_of_vacuum;
  rc = execSqlF(db, pzErrMsg,
    "SELECT sql FROM \"%w\".sqlite_master"
    " WHERE type='index'",
    zDbMain
  );
  if (rc != SQLITE_OK) goto end_of_vacuum;
  db->init.iDb = 0;

  /* Loop through the tables in the main database. For each, do
  ** an "INSERT INTO vacuum_db.xxx SELECT * FROM main.xxx;" to copy
  ** the contents to the temporary database.
  */
  rc = execSqlF(db, pzErrMsg,
    "SELECT'INSERT INTO vacuum_db.'||quote(name)"
    "||' SELECT*FROM\"%w\".'||quote(name)"
    "FROM vacuum_db.sqlite_master "
    "WHERE type='table'AND coalesce(rootpage,1)>0",
    zDbMain
  );
  assert((db->mDbFlags & DBFLAG_Vacuum) != 0);
  db->mDbFlags &= ~DBFLAG_Vacuum;
  if (rc != SQLITE_OK) goto end_of_vacuum;

  /* Copy the triggers, views, and virtual tables from the main database
  ** over to the temporary database.  None of these objects has any
  ** associated storage, so all we have to do is copy their entries
  ** from the SQLITE_MASTER table.
  */
  rc = execSqlF(db, pzErrMsg,
    "INSERT INTO vacuum_db.sqlite_master"
    " SELECT*FROM \"%w\".sqlite_master"
    " WHERE type IN('view','trigger')"
    " OR(type='table'AND rootpage=0)",
    zDbMain
  );
  if (rc) goto end_of_vacuum;

  /* At this point, there is a write transaction open on both the
  ** vacuum database and the main database. Assuming no error occurs,
  ** both transactions are closed by this block - the main database
  ** transaction by sqlite3BtreeCopyFile() and the other by an explicit
  ** call to sqlite3BtreeCommit().
  */
  {
    u32 meta;
    int i;

    /* This array determines which meta meta values are preserved in the
    ** vacuum.  Even entries are the meta value number and odd entries
    ** are an increment to apply to the meta value after the vacuum.
    ** The increment is used to increase the schema cookie so that other
    ** connections to the same database will know to reread the schema.
    */
    static const unsigned char aCopy[] = {
      BTREE_SCHEMA_VERSION,     1,  /* Add one to the old schema cookie */
      BTREE_DEFAULT_CACHE_SIZE, 0,  /* Preserve the default page cache size */
      BTREE_TEXT_ENCODING,      0,  /* Preserve the text encoding */
      BTREE_USER_VERSION,       0,  /* Preserve the user version */
      BTREE_APPLICATION_ID,     0,  /* Preserve the application id */
    };

    assert(1 == sqlite3BtreeIsInTrans(pTemp));
    assert(1 == sqlite3BtreeIsInTrans(pMain));

    /* Copy Btree meta values */
    for (i = 0; i<ArraySize(aCopy); i += 2) {
      /* GetMeta() and UpdateMeta() cannot fail in this context because
      ** we already have page 1 loaded into cache and marked dirty. */
      sqlite3BtreeGetMeta(pMain, aCopy[i], &meta);
      rc = sqlite3BtreeUpdateMeta(pTemp, aCopy[i], meta + aCopy[i + 1]);
      if (NEVER(rc != SQLITE_OK)) goto end_of_vacuum;
    }

    rc = sqlite3BtreeCopyFile(pMain, pTemp);
    if (rc != SQLITE_OK) goto end_of_vacuum;
    rc = sqlite3BtreeCommit(pTemp);
    if (rc != SQLITE_OK) goto end_of_vacuum;
#ifndef SQLITE_OMIT_AUTOVACUUM
    sqlite3BtreeSetAutoVacuum(pMain, sqlite3BtreeGetAutoVacuum(pTemp));
#endif
  }

  assert(rc == SQLITE_OK);
  rc = sqlite3BtreeSetPageSize(pMain, sqlite3BtreeGetPageSize(pTemp), nRes, 1);

end_of_vacuum:
  /* Restore the original value of db->flags */
  db->init.iDb = 0;
  db->mDbFlags = saved_mDbFlags;
  db->flags = saved_flags;
  db->nChange = saved_nChange;
  db->nTotalChange = saved_nTotalChange;
  db->mTrace = saved_mTrace;
  sqlite3BtreeSetPageSize(pMain, -1, -1, 1);

  /* Currently there is an SQL level transaction open on the vacuum
  ** database. No locks are held on any other files (since the main file
  ** was committed at the btree level). So it safe to end the transaction
  ** by manually setting the autoCommit flag to true and detaching the
  ** vacuum database. The vacuum_db journal file is deleted when the pager
  ** is closed by the DETACH.
  */
  db->autoCommit = 1;

  if (pDb) {
    sqlite3BtreeClose(pDb->pBt);
    pDb->pBt = 0;
    pDb->pSchema = 0;
  }

  /* This both clears the schemas and reduces the size of the db->aDb[]
  ** array. */
  sqlite3ResetAllSchemasOfConnection(db);

  return rc;
}
Esempio n. 4
0
/*
** This routine implements the OP_Vacuum opcode of the VDBE.
*/
int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){
  int rc = SQLITE_OK;     /* Return code from service routines */
  Btree *pMain;           /* The database being vacuumed */
  Pager *pMainPager;      /* Pager for database being vacuumed */
  Btree *pTemp;           /* The temporary database we vacuum into */
  char *zSql = 0;         /* SQL statements */
  int saved_flags;        /* Saved value of the db->flags */
  int saved_nChange;      /* Saved value of db->nChange */
  int saved_nTotalChange; /* Saved value of db->nTotalChange */
  Db *pDb = 0;            /* Database to detach at end of vacuum */
  int isMemDb;            /* True is vacuuming a :memory: database */
  int nRes;

  /* Save the current value of the write-schema flag before setting it. */
  saved_flags = db->flags;
  saved_nChange = db->nChange;
  saved_nTotalChange = db->nTotalChange;
  db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks;

  if( !db->autoCommit ){
    sqlite3SetString(pzErrMsg, db, "cannot VACUUM from within a transaction");
    rc = SQLITE_ERROR;
    goto end_of_vacuum;
  }
  pMain = db->aDb[0].pBt;
  pMainPager = sqlite3BtreePager(pMain);
  isMemDb = sqlite3PagerFile(pMainPager)->pMethods==0;

  /* Attach the temporary database as 'vacuum_db'. The synchronous pragma
  ** can be set to 'off' for this file, as it is not recovered if a crash
  ** occurs anyway. The integrity of the database is maintained by a
  ** (possibly synchronous) transaction opened on the main database before
  ** sqlite3BtreeCopyFile() is called.
  **
  ** An optimisation would be to use a non-journaled pager.
  ** (Later:) I tried setting "PRAGMA vacuum_db.journal_mode=OFF" but
  ** that actually made the VACUUM run slower.  Very little journalling
  ** actually occurs when doing a vacuum since the vacuum_db is initially
  ** empty.  Only the journal header is written.  Apparently it takes more
  ** time to parse and run the PRAGMA to turn journalling off than it does
  ** to write the journal header file.
  */
  zSql = "ATTACH '' AS vacuum_db;";
  rc = execSql(db, zSql);
  if( rc!=SQLITE_OK ) goto end_of_vacuum;
  pDb = &db->aDb[db->nDb-1];
  assert( strcmp(db->aDb[db->nDb-1].zName,"vacuum_db")==0 );
  pTemp = db->aDb[db->nDb-1].pBt;

  nRes = sqlite3BtreeGetReserve(pMain);

  /* A VACUUM cannot change the pagesize of an encrypted database. */
#ifdef SQLITE_HAS_CODEC
  if( db->nextPagesize ){
    extern void sqlite3CodecGetKey(sqlite3*, int, void**, int*);
    int nKey;
    char *zKey;
    sqlite3CodecGetKey(db, 0, (void**)&zKey, &nKey);
    if( nKey ) db->nextPagesize = 0;
  }
#endif

  if( sqlite3BtreeSetPageSize(pTemp, sqlite3BtreeGetPageSize(pMain), nRes)
   || (!isMemDb && sqlite3BtreeSetPageSize(pTemp, db->nextPagesize, nRes))
   || db->mallocFailed 
  ){
    rc = SQLITE_NOMEM;
    goto end_of_vacuum;
  }
  rc = execSql(db, "PRAGMA vacuum_db.synchronous=OFF");
  if( rc!=SQLITE_OK ){
    goto end_of_vacuum;
  }

#ifndef SQLITE_OMIT_AUTOVACUUM
  sqlite3BtreeSetAutoVacuum(pTemp, db->nextAutovac>=0 ? db->nextAutovac :
                                           sqlite3BtreeGetAutoVacuum(pMain));
#endif

  /* Begin a transaction */
  rc = execSql(db, "BEGIN EXCLUSIVE;");
  if( rc!=SQLITE_OK ) goto end_of_vacuum;

  /* Query the schema of the main database. Create a mirror schema
  ** in the temporary database.
  */
  rc = execExecSql(db, 
      "SELECT 'CREATE TABLE vacuum_db.' || substr(sql,14) "
      "  FROM sqlite_master WHERE type='table' AND name!='sqlite_sequence'"
      "   AND rootpage>0"
  );
  if( rc!=SQLITE_OK ) goto end_of_vacuum;
  rc = execExecSql(db, 
      "SELECT 'CREATE INDEX vacuum_db.' || substr(sql,14)"
      "  FROM sqlite_master WHERE sql LIKE 'CREATE INDEX %' ");
  if( rc!=SQLITE_OK ) goto end_of_vacuum;
  rc = execExecSql(db, 
      "SELECT 'CREATE UNIQUE INDEX vacuum_db.' || substr(sql,21) "
      "  FROM sqlite_master WHERE sql LIKE 'CREATE UNIQUE INDEX %'");
  if( rc!=SQLITE_OK ) goto end_of_vacuum;

  /* Loop through the tables in the main database. For each, do
  ** an "INSERT INTO vacuum_db.xxx SELECT * FROM xxx;" to copy
  ** the contents to the temporary database.
  */
  rc = execExecSql(db, 
      "SELECT 'INSERT INTO vacuum_db.' || quote(name) "
      "|| ' SELECT * FROM ' || quote(name) || ';'"
      "FROM sqlite_master "
      "WHERE type = 'table' AND name!='sqlite_sequence' "
      "  AND rootpage>0"

  );
  if( rc!=SQLITE_OK ) goto end_of_vacuum;

  /* Copy over the sequence table
  */
  rc = execExecSql(db, 
      "SELECT 'DELETE FROM vacuum_db.' || quote(name) || ';' "
      "FROM vacuum_db.sqlite_master WHERE name='sqlite_sequence' "
  );
  if( rc!=SQLITE_OK ) goto end_of_vacuum;
  rc = execExecSql(db, 
      "SELECT 'INSERT INTO vacuum_db.' || quote(name) "
      "|| ' SELECT * FROM ' || quote(name) || ';' "
      "FROM vacuum_db.sqlite_master WHERE name=='sqlite_sequence';"
  );
  if( rc!=SQLITE_OK ) goto end_of_vacuum;


  /* Copy the triggers, views, and virtual tables from the main database
  ** over to the temporary database.  None of these objects has any
  ** associated storage, so all we have to do is copy their entries
  ** from the SQLITE_MASTER table.
  */
  rc = execSql(db,
      "INSERT INTO vacuum_db.sqlite_master "
      "  SELECT type, name, tbl_name, rootpage, sql"
      "    FROM sqlite_master"
      "   WHERE type='view' OR type='trigger'"
      "      OR (type='table' AND rootpage=0)"
  );
  if( rc ) goto end_of_vacuum;

  /* At this point, unless the main db was completely empty, there is now a
  ** transaction open on the vacuum database, but not on the main database.
  ** Open a btree level transaction on the main database. This allows a
  ** call to sqlite3BtreeCopyFile(). The main database btree level
  ** transaction is then committed, so the SQL level never knows it was
  ** opened for writing. This way, the SQL transaction used to create the
  ** temporary database never needs to be committed.
  */
  if( rc==SQLITE_OK ){
    u32 meta;
    int i;

    /* This array determines which meta meta values are preserved in the
    ** vacuum.  Even entries are the meta value number and odd entries
    ** are an increment to apply to the meta value after the vacuum.
    ** The increment is used to increase the schema cookie so that other
    ** connections to the same database will know to reread the schema.
    */
    static const unsigned char aCopy[] = {
       1, 1,    /* Add one to the old schema cookie */
       3, 0,    /* Preserve the default page cache size */
       5, 0,    /* Preserve the default text encoding */
       6, 0,    /* Preserve the user version */
    };

    assert( 1==sqlite3BtreeIsInTrans(pTemp) );
    assert( 1==sqlite3BtreeIsInTrans(pMain) );

    /* Copy Btree meta values */
    for(i=0; i<ArraySize(aCopy); i+=2){
      rc = sqlite3BtreeGetMeta(pMain, aCopy[i], &meta);
      if( rc!=SQLITE_OK ) goto end_of_vacuum;
      rc = sqlite3BtreeUpdateMeta(pTemp, aCopy[i], meta+aCopy[i+1]);
      if( rc!=SQLITE_OK ) goto end_of_vacuum;
    }

    rc = sqlite3BtreeCopyFile(pMain, pTemp);
    if( rc!=SQLITE_OK ) goto end_of_vacuum;
    rc = sqlite3BtreeCommit(pTemp);
    if( rc!=SQLITE_OK ) goto end_of_vacuum;
#ifndef SQLITE_OMIT_AUTOVACUUM
    sqlite3BtreeSetAutoVacuum(pMain, sqlite3BtreeGetAutoVacuum(pTemp));
#endif
    rc = sqlite3BtreeCommit(pMain);
  }

  if( rc==SQLITE_OK ){
    rc = sqlite3BtreeSetPageSize(pMain, sqlite3BtreeGetPageSize(pTemp), nRes);
  }

end_of_vacuum:
  /* Restore the original value of db->flags */
  db->flags = saved_flags;
  db->nChange = saved_nChange;
  db->nTotalChange = saved_nTotalChange;

  /* Currently there is an SQL level transaction open on the vacuum
  ** database. No locks are held on any other files (since the main file
  ** was committed at the btree level). So it safe to end the transaction
  ** by manually setting the autoCommit flag to true and detaching the
  ** vacuum database. The vacuum_db journal file is deleted when the pager
  ** is closed by the DETACH.
  */
  db->autoCommit = 1;

  if( pDb ){
    sqlite3BtreeClose(pDb->pBt);
    pDb->pBt = 0;
    pDb->pSchema = 0;
  }

  sqlite3ResetInternalSchema(db, 0);

  return rc;
}
Esempio n. 5
0
/*
** Copy nPage pages from the source b-tree to the destination.
*/
int sqlite3_backup_step(sqlite3_backup *p, int nPage){
  int rc;
  int destMode;       /* Destination journal mode */
  int pgszSrc = 0;    /* Source page size */
  int pgszDest = 0;   /* Destination page size */

#ifdef SQLITE_ENABLE_API_ARMOR
  if( p==0 ) return SQLITE_MISUSE_BKPT;
#endif
  sqlite3_mutex_enter(p->pSrcDb->mutex);
  sqlite3BtreeEnter(p->pSrc);
  if( p->pDestDb ){
    sqlite3_mutex_enter(p->pDestDb->mutex);
  }

  rc = p->rc;
  if( !isFatalError(rc) ){
    Pager * const pSrcPager = sqlite3BtreePager(p->pSrc);     /* Source pager */
    Pager * const pDestPager = sqlite3BtreePager(p->pDest);   /* Dest pager */
    int ii;                            /* Iterator variable */
    int nSrcPage = -1;                 /* Size of source db in pages */
    int bCloseTrans = 0;               /* True if src db requires unlocking */

    /* If the source pager is currently in a write-transaction, return
    ** SQLITE_BUSY immediately.
    */
    if( p->pDestDb && p->pSrc->pBt->inTransaction==TRANS_WRITE ){
      rc = SQLITE_BUSY;
    }else{
      rc = SQLITE_OK;
    }

    /* If there is no open read-transaction on the source database, open
    ** one now. If a transaction is opened here, then it will be closed
    ** before this function exits.
    */
    if( rc==SQLITE_OK && 0==sqlite3BtreeIsInReadTrans(p->pSrc) ){
      rc = sqlite3BtreeBeginTrans(p->pSrc, 0, 0);
      bCloseTrans = 1;
    }

    /* If the destination database has not yet been locked (i.e. if this
    ** is the first call to backup_step() for the current backup operation),
    ** try to set its page size to the same as the source database. This
    ** is especially important on ZipVFS systems, as in that case it is
    ** not possible to create a database file that uses one page size by
    ** writing to it with another.  */
    if( p->bDestLocked==0 && rc==SQLITE_OK && setDestPgsz(p)==SQLITE_NOMEM ){
      rc = SQLITE_NOMEM;
    }

    /* Lock the destination database, if it is not locked already. */
    if( SQLITE_OK==rc && p->bDestLocked==0
     && SQLITE_OK==(rc = sqlite3BtreeBeginTrans(p->pDest, 2,
                                                (int*)&p->iDestSchema)) 
    ){
      p->bDestLocked = 1;
    }

    /* Do not allow backup if the destination database is in WAL mode
    ** and the page sizes are different between source and destination */
    pgszSrc = sqlite3BtreeGetPageSize(p->pSrc);
    pgszDest = sqlite3BtreeGetPageSize(p->pDest);
    destMode = sqlite3PagerGetJournalMode(sqlite3BtreePager(p->pDest));
    if( SQLITE_OK==rc && destMode==PAGER_JOURNALMODE_WAL && pgszSrc!=pgszDest ){
      rc = SQLITE_READONLY;
    }
  
    /* Now that there is a read-lock on the source database, query the
    ** source pager for the number of pages in the database.
    */
    nSrcPage = (int)sqlite3BtreeLastPage(p->pSrc);
    assert( nSrcPage>=0 );
    for(ii=0; (nPage<0 || ii<nPage) && p->iNext<=(Pgno)nSrcPage && !rc; ii++){
      const Pgno iSrcPg = p->iNext;                 /* Source page number */
      if( iSrcPg!=PENDING_BYTE_PAGE(p->pSrc->pBt) ){
        DbPage *pSrcPg;                             /* Source page object */
        rc = sqlite3PagerGet(pSrcPager, iSrcPg, &pSrcPg,PAGER_GET_READONLY);
        if( rc==SQLITE_OK ){
          rc = backupOnePage(p, iSrcPg, sqlite3PagerGetData(pSrcPg), 0);
          sqlite3PagerUnref(pSrcPg);
        }
      }
      p->iNext++;
    }
    if( rc==SQLITE_OK ){
      p->nPagecount = nSrcPage;
      p->nRemaining = nSrcPage+1-p->iNext;
      if( p->iNext>(Pgno)nSrcPage ){
        rc = SQLITE_DONE;
      }else if( !p->isAttached ){
        attachBackupObject(p);
      }
    }
  
    /* Update the schema version field in the destination database. This
    ** is to make sure that the schema-version really does change in
    ** the case where the source and destination databases have the
    ** same schema version.
    */
    if( rc==SQLITE_DONE ){
      if( nSrcPage==0 ){
        rc = sqlite3BtreeNewDb(p->pDest);
        nSrcPage = 1;
      }
      if( rc==SQLITE_OK || rc==SQLITE_DONE ){
        rc = sqlite3BtreeUpdateMeta(p->pDest,1,p->iDestSchema+1);
      }
      if( rc==SQLITE_OK ){
        if( p->pDestDb ){
          sqlite3ResetAllSchemasOfConnection(p->pDestDb);
        }
        if( destMode==PAGER_JOURNALMODE_WAL ){
          rc = sqlite3BtreeSetVersion(p->pDest, 2);
        }
      }
      if( rc==SQLITE_OK ){
        int nDestTruncate;
        /* Set nDestTruncate to the final number of pages in the destination
        ** database. The complication here is that the destination page
        ** size may be different to the source page size. 
        **
        ** If the source page size is smaller than the destination page size, 
        ** round up. In this case the call to sqlite3OsTruncate() below will
        ** fix the size of the file. However it is important to call
        ** sqlite3PagerTruncateImage() here so that any pages in the 
        ** destination file that lie beyond the nDestTruncate page mark are
        ** journalled by PagerCommitPhaseOne() before they are destroyed
        ** by the file truncation.
        */
        assert( pgszSrc==sqlite3BtreeGetPageSize(p->pSrc) );
        assert( pgszDest==sqlite3BtreeGetPageSize(p->pDest) );
        if( pgszSrc<pgszDest ){
          int ratio = pgszDest/pgszSrc;
          nDestTruncate = (nSrcPage+ratio-1)/ratio;
          if( nDestTruncate==(int)PENDING_BYTE_PAGE(p->pDest->pBt) ){
            nDestTruncate--;
          }
        }else{
          nDestTruncate = nSrcPage * (pgszSrc/pgszDest);
        }
        assert( nDestTruncate>0 );

        if( pgszSrc<pgszDest ){
          /* If the source page-size is smaller than the destination page-size,
          ** two extra things may need to happen:
          **
          **   * The destination may need to be truncated, and
          **
          **   * Data stored on the pages immediately following the 
          **     pending-byte page in the source database may need to be
          **     copied into the destination database.
          */
          const i64 iSize = (i64)pgszSrc * (i64)nSrcPage;
          sqlite3_file * const pFile = sqlite3PagerFile(pDestPager);
          Pgno iPg;
          int nDstPage;
          i64 iOff;
          i64 iEnd;

          assert( pFile );
          assert( nDestTruncate==0 
              || (i64)nDestTruncate*(i64)pgszDest >= iSize || (
                nDestTruncate==(int)(PENDING_BYTE_PAGE(p->pDest->pBt)-1)
             && iSize>=PENDING_BYTE && iSize<=PENDING_BYTE+pgszDest
          ));

          /* This block ensures that all data required to recreate the original
          ** database has been stored in the journal for pDestPager and the
          ** journal synced to disk. So at this point we may safely modify
          ** the database file in any way, knowing that if a power failure
          ** occurs, the original database will be reconstructed from the 
          ** journal file.  */
          sqlite3PagerPagecount(pDestPager, &nDstPage);
          for(iPg=nDestTruncate; rc==SQLITE_OK && iPg<=(Pgno)nDstPage; iPg++){
            if( iPg!=PENDING_BYTE_PAGE(p->pDest->pBt) ){
              DbPage *pPg;
              rc = sqlite3PagerGet(pDestPager, iPg, &pPg, 0);
              if( rc==SQLITE_OK ){
                rc = sqlite3PagerWrite(pPg);
                sqlite3PagerUnref(pPg);
              }
            }
          }
          if( rc==SQLITE_OK ){
            rc = sqlite3PagerCommitPhaseOne(pDestPager, 0, 1);
          }

          /* Write the extra pages and truncate the database file as required */
          iEnd = MIN(PENDING_BYTE + pgszDest, iSize);
          for(
            iOff=PENDING_BYTE+pgszSrc; 
            rc==SQLITE_OK && iOff<iEnd; 
            iOff+=pgszSrc
          ){
            PgHdr *pSrcPg = 0;
            const Pgno iSrcPg = (Pgno)((iOff/pgszSrc)+1);
            rc = sqlite3PagerGet(pSrcPager, iSrcPg, &pSrcPg, 0);
            if( rc==SQLITE_OK ){
              u8 *zData = sqlite3PagerGetData(pSrcPg);
              rc = sqlite3OsWrite(pFile, zData, pgszSrc, iOff);
            }
            sqlite3PagerUnref(pSrcPg);
          }
          if( rc==SQLITE_OK ){
            rc = backupTruncateFile(pFile, iSize);
          }

          /* Sync the database file to disk. */
          if( rc==SQLITE_OK ){
            rc = sqlite3PagerSync(pDestPager, 0);
          }
        }else{
          sqlite3PagerTruncateImage(pDestPager, nDestTruncate);
          rc = sqlite3PagerCommitPhaseOne(pDestPager, 0, 0);
        }
    
        /* Finish committing the transaction to the destination database. */
        if( SQLITE_OK==rc
         && SQLITE_OK==(rc = sqlite3BtreeCommitPhaseTwo(p->pDest, 0))
        ){
          rc = SQLITE_DONE;
        }
      }
    }
  
    /* If bCloseTrans is true, then this function opened a read transaction
    ** on the source database. Close the read transaction here. There is
    ** no need to check the return values of the btree methods here, as
    ** "committing" a read-only transaction cannot fail.
    */
    if( bCloseTrans ){
      TESTONLY( int rc2 );
      TESTONLY( rc2  = ) sqlite3BtreeCommitPhaseOne(p->pSrc, 0);
      TESTONLY( rc2 |= ) sqlite3BtreeCommitPhaseTwo(p->pSrc, 0);
      assert( rc2==SQLITE_OK );
    }
  
    if( rc==SQLITE_IOERR_NOMEM ){
      rc = SQLITE_NOMEM_BKPT;
    }
    p->rc = rc;
  }
Esempio n. 6
0
/*
 * Use Bulk Get/Put to copy the given number of pages worth of
 * records from the source database to the destination database,
 * this function should be called until all tables are copied, at
 * which point it will return SQLITE_DONE.  Both Btrees need to
 * have transactions before calling this function.
 * p->pSrc - Source Btree
 * p->tables - Contains a list of iTables to copy, gotten using
 *          btreeGetTables().
 * p->currentTable - Index in tables of the current table being copied.
 * p->srcCur -  Cursor on the current source table being copied.
 * p->pDest - Destiniation Btree.
 * p->destCur - BtCursor on the destination table being copied into.
 * pages - Number of pages worth of data to copy.
 */
static int btreeCopyPages(sqlite3_backup *p, int *pages)
{
	DB *dbp;
	DBT dataOut, dataIn;
	char bufOut[MULTI_BUFSIZE], bufIn[MULTI_BUFSIZE];
	int ret, rc, copied, srcIsDupIndex;
	void *in, *out, *app;

	ret = 0;
	rc = SQLITE_OK;
	dbp = NULL;
	copied = 0;
	memset(&dataOut, 0, sizeof(dataOut));
	memset(&dataIn, 0, sizeof(dataIn));
	dataOut.flags = DB_DBT_USERMEM;
	dataIn.flags = DB_DBT_USERMEM;
	dataOut.data = bufOut;
	dataOut.ulen = sizeof(bufOut);
	dataIn.data = bufIn;
	dataIn.ulen = sizeof(bufIn);

	while (*pages < 0 || *pages > copied) {
		/* No tables left to copy */
		if (p->tables[p->currentTable] == -1) {
			u32 val;
			/*
			 * Update the schema file format and largest rootpage
			 * in the meta data.  Other meta data values should
			 * not be changed.
			 */
			sqlite3BtreeGetMeta(p->pSrc, 1, &val);
			if (p->pSrc->db->errCode == SQLITE_BUSY) {
				rc = SQLITE_BUSY;
				goto err;
			}
			rc = sqlite3BtreeUpdateMeta(p->pDest, 1, val);
			if (rc != SQLITE_OK)
				goto err;
			sqlite3BtreeGetMeta(p->pSrc, 3, &val);
		       if (p->pSrc->db->errCode == SQLITE_BUSY) {
				rc = SQLITE_BUSY;
				goto err;
			}
			rc = sqlite3BtreeUpdateMeta(p->pDest, 3, val);
			if (rc != SQLITE_OK)
				goto err;
			ret = SQLITE_DONE;
			goto err;
		}
		/* If not currently copying a table, get the next table. */
		if (!p->srcCur) {
			rc = btreeGetUserTable(p->pSrc, p->srcTxn, &dbp,
			    p->tables[p->currentTable]);
			if (rc != SQLITE_OK)
				goto err;
			assert(dbp);
			memset(&p->destCur, 0, sizeof(p->destCur));
			/*
			 * Open a cursor on the destination table, this will
			 * create the table and allow the Btree to manage the
			 * DB object.
			 */
			sqlite3BtreeCursor(p->pDest, p->tables[p->currentTable],
			    1, dbp->app_private, &p->destCur);
			if ((rc = p->destCur.error) != SQLITE_OK) {
				app = dbp->app_private;
				dbp->close(dbp, DB_NOSYNC);
				if (app)
					sqlite3DbFree(p->pSrcDb, app);
				goto err;
			}
			/* Open a cursor on the source table. */
			if ((ret = dbp->cursor(dbp,
			    p->srcTxn, &p->srcCur, 0)) != 0)
				goto err;
			dbp = 0;
		}
		srcIsDupIndex = isDupIndex((p->tables[p->currentTable] & 1) ?
		    BTREE_INTKEY : 0, p->pSrc->pBt->dbStorage,
		    p->srcCur->dbp->app_private, p->srcCur->dbp);
		/*
		 * Copy the current table until the given number of
		 * pages is copied, or the entire table has been copied.
		 */
		while (*pages < 0 || *pages > copied) {
			DBT key, data;
			memset(&key, 0, sizeof(key));
			memset(&data, 0, sizeof(data));
			/* Do a Bulk Get from the source table. */
			ret = p->srcCur->get(p->srcCur, &key, &dataOut,
			    DB_NEXT | DB_MULTIPLE_KEY);
			if (ret == DB_NOTFOUND)
				break;
			if (ret != 0)
				goto err;
			/* Copy the records into the Bulk buffer. */
			DB_MULTIPLE_INIT(out, &dataOut);
			DB_MULTIPLE_WRITE_INIT(in, &dataIn);
			DB_MULTIPLE_KEY_NEXT(out, &dataOut, key.data,
			    key.size, data.data, data.size);
			while (out) {
				/*
				 * Have to translate the index formats if they
				 * are not the same.
				 */
				if (p->destCur.isDupIndex != srcIsDupIndex) {
					if (srcIsDupIndex) {
						p->destCur.key = key;
						p->destCur.data = data;
						if (!btreeCreateIndexKey(
						    &p->destCur)) {
							rc = SQLITE_NOMEM;
							goto err;
						}
						DB_MULTIPLE_KEY_WRITE_NEXT(in,
						    &dataIn,
						    p->destCur.index.data,
						    p->destCur.index.size,
						    p->destCur.data.data, 0);
					} else {
						/* Copy the key into the cursor
						 * index since spliting the key
						 * requires changing the
						 * internal memory.
						 */
						if (!allocateCursorIndex(
						    &p->destCur, key.size)) {
							rc = SQLITE_NOMEM;
							goto err;
						}
						memcpy(p->destCur.index.data,
						    key.data, key.size);
						p->destCur.index.size =
						    key.size;
						p->destCur.key.data =
						    p->destCur.index.data;
						p->destCur.key.size =
						    p->destCur.index.size;
						splitIndexKey(&p->destCur);
						DB_MULTIPLE_KEY_WRITE_NEXT(
						    in, &dataIn,
						    p->destCur.key.data,
						    p->destCur.key.size,
						    p->destCur.data.data,
						    p->destCur.data.size);
					}
				} else
					DB_MULTIPLE_KEY_WRITE_NEXT(in, &dataIn,
					    key.data, key.size,
					    data.data, data.size);
				DB_MULTIPLE_KEY_NEXT(out, &dataOut,
				    key.data, key.size,
				    data.data, data.size);
			}
			/* Insert into the destination table. */
			dbp = p->destCur.cached_db->dbp;
			if ((ret = dbp->put(dbp, p->pDest->savepoint_txn,
			    &dataIn, 0, DB_MULTIPLE_KEY)) != 0)
				goto err;
			dbp = NULL;
			copied += MULTI_BUFSIZE/SQLITE_DEFAULT_PAGE_SIZE;
		}
		/*
		 * Done copying the current table, time to look for a new
		 * table to copy.
		 */
		if (ret == DB_NOTFOUND) {
			ret = 0;
			rc = sqlite3BtreeCloseCursor(&p->destCur);
			if (p->srcCur) {
				app = p->srcCur->dbp->app_private;
				dbp = p->srcCur->dbp;
				p->srcCur->close(p->srcCur);
				ret = dbp->close(dbp, DB_NOSYNC);
				if (app)
					sqlite3DbFree(p->pSrcDb, app);
			}
			p->srcCur = NULL;
			if (ret != 0 || rc != SQLITE_OK)
				goto err;
			p->currentTable += 1;
		}
	}
	goto done;
err:	if (ret == SQLITE_DONE)
		return ret;
done:	return MAP_ERR(rc, ret);
}
Esempio n. 7
0
int sqlcipher_codec_ctx_migrate(codec_ctx *ctx) {
  u32 meta;
  int rc = 0;
  int command_idx = 0;
  int password_sz;
  int saved_flags;
  int saved_nChange;
  int saved_nTotalChange;
  void (*saved_xTrace)(void*,const char*);
  Db *pDb = 0;
  sqlite3 *db = ctx->pBt->db;
  const char *db_filename = sqlite3_db_filename(db, "main");
  char *migrated_db_filename = sqlite3_mprintf("%s-migrated", db_filename);
  char *pragma_hmac_off = "PRAGMA cipher_use_hmac = OFF;";
  char *pragma_4k_kdf_iter = "PRAGMA kdf_iter = 4000;";
  char *pragma_1x_and_4k;
  char *set_user_version;
  char *key;
  int key_sz;
  int user_version = 0;
  int upgrade_1x_format = 0;
  int upgrade_4k_format = 0;
  static const unsigned char aCopy[] = {
    BTREE_SCHEMA_VERSION,     1,  /* Add one to the old schema cookie */
    BTREE_DEFAULT_CACHE_SIZE, 0,  /* Preserve the default page cache size */
    BTREE_TEXT_ENCODING,      0,  /* Preserve the text encoding */
    BTREE_USER_VERSION,       0,  /* Preserve the user version */
    BTREE_APPLICATION_ID,     0,  /* Preserve the application id */
  };


  key_sz = ctx->read_ctx->pass_sz + 1;
  key = sqlcipher_malloc(key_sz);
  memset(key, 0, key_sz);
  memcpy(key, ctx->read_ctx->pass, ctx->read_ctx->pass_sz);

  if(db_filename){
    const char* commands[5];
    char *attach_command = sqlite3_mprintf("ATTACH DATABASE '%s-migrated' as migrate KEY '%q';",
                                            db_filename, key);

    int rc = sqlcipher_check_connection(db_filename, key, ctx->read_ctx->pass_sz, "", &user_version);
    if(rc == SQLITE_OK){
      CODEC_TRACE(("No upgrade required - exiting\n"));
      goto exit;
    }
    
    // Version 2 - check for 4k with hmac format 
    rc = sqlcipher_check_connection(db_filename, key, ctx->read_ctx->pass_sz, pragma_4k_kdf_iter, &user_version);
    if(rc == SQLITE_OK) {
      CODEC_TRACE(("Version 2 format found\n"));
      upgrade_4k_format = 1;
    }

    // Version 1 - check both no hmac and 4k together
    pragma_1x_and_4k = sqlite3_mprintf("%s%s", pragma_hmac_off,
                                             pragma_4k_kdf_iter);
    rc = sqlcipher_check_connection(db_filename, key, ctx->read_ctx->pass_sz, pragma_1x_and_4k, &user_version);
    sqlite3_free(pragma_1x_and_4k);
    if(rc == SQLITE_OK) {
      CODEC_TRACE(("Version 1 format found\n"));
      upgrade_1x_format = 1;
      upgrade_4k_format = 1;
    }

    if(upgrade_1x_format == 0 && upgrade_4k_format == 0) {
      CODEC_TRACE(("Upgrade format not determined\n"));
      goto handle_error;
    }

    set_user_version = sqlite3_mprintf("PRAGMA migrate.user_version = %d;", user_version);
    commands[0] = upgrade_4k_format == 1 ? pragma_4k_kdf_iter : "";
    commands[1] = upgrade_1x_format == 1 ? pragma_hmac_off : "";
    commands[2] = attach_command;
    commands[3] = "SELECT sqlcipher_export('migrate');";
    commands[4] = set_user_version;
      
    for(command_idx = 0; command_idx < ArraySize(commands); command_idx++){
      const char *command = commands[command_idx];
      if(strcmp(command, "") == 0){
        continue;
      }
      rc = sqlite3_exec(db, command, NULL, NULL, NULL);
      if(rc != SQLITE_OK){
        break;
      }
    }
    sqlite3_free(attach_command);
    sqlite3_free(set_user_version);
    sqlcipher_free(key, key_sz);
    
    if(rc == SQLITE_OK){
      Btree *pDest;
      Btree *pSrc;
      int i = 0;

      if( !db->autoCommit ){
        CODEC_TRACE(("cannot migrate from within a transaction"));
        goto handle_error;
      }
      if( db->nVdbeActive>1 ){
        CODEC_TRACE(("cannot migrate - SQL statements in progress"));
        goto handle_error;
      }

      /* Save the current value of the database flags so that it can be
      ** restored before returning. Then set the writable-schema flag, and
      ** disable CHECK and foreign key constraints.  */
      saved_flags = db->flags;
      saved_nChange = db->nChange;
      saved_nTotalChange = db->nTotalChange;
      saved_xTrace = db->xTrace;
      db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks | SQLITE_PreferBuiltin;
      db->flags &= ~(SQLITE_ForeignKeys | SQLITE_ReverseOrder);
      db->xTrace = 0;
      
      pDest = db->aDb[0].pBt;
      pDb = &(db->aDb[db->nDb-1]);
      pSrc = pDb->pBt;
      
      rc = sqlite3_exec(db, "BEGIN;", NULL, NULL, NULL);
      rc = sqlite3BtreeBeginTrans(pSrc, 2);
      rc = sqlite3BtreeBeginTrans(pDest, 2);
      
      assert( 1==sqlite3BtreeIsInTrans(pDest) );
      assert( 1==sqlite3BtreeIsInTrans(pSrc) );

      sqlite3CodecGetKey(db, db->nDb - 1, (void**)&key, &password_sz);
      sqlite3CodecAttach(db, 0, key, password_sz);
      sqlite3pager_get_codec(pDest->pBt->pPager, (void**)&ctx);
      
      ctx->skip_read_hmac = 1;      
      for(i=0; i<ArraySize(aCopy); i+=2){
        sqlite3BtreeGetMeta(pSrc, aCopy[i], &meta);
        rc = sqlite3BtreeUpdateMeta(pDest, aCopy[i], meta+aCopy[i+1]);
        if( NEVER(rc!=SQLITE_OK) ) goto handle_error; 
      }
      rc = sqlite3BtreeCopyFile(pDest, pSrc);
      ctx->skip_read_hmac = 0;
      if( rc!=SQLITE_OK ) goto handle_error;
      rc = sqlite3BtreeCommit(pDest);

      db->flags = saved_flags;
      db->nChange = saved_nChange;
      db->nTotalChange = saved_nTotalChange;
      db->xTrace = saved_xTrace;
      db->autoCommit = 1;
      if( pDb ){
        sqlite3BtreeClose(pDb->pBt);
        pDb->pBt = 0;
        pDb->pSchema = 0;
      }
      sqlite3ResetAllSchemasOfConnection(db);
      remove(migrated_db_filename);
      sqlite3_free(migrated_db_filename);
    } else {
      CODEC_TRACE(("*** migration failure** \n\n"));
    }
    
  }
  goto exit;

 handle_error:
  CODEC_TRACE(("An error occurred attempting to migrate the database\n"));
  rc = SQLITE_ERROR;

 exit:
  return rc;
}
Esempio n. 8
0
/*
** This routine implements the OP_Vacuum opcode of the VDBE.
*/
int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){
  int rc = SQLITE_OK;     /* Return code from service routines */
#if !defined(SQLITE_OMIT_VACUUM) || SQLITE_OMIT_VACUUM
  const char *zFilename;  /* full pathname of the database file */
  int nFilename;          /* number of characters  in zFilename[] */
  char *zTemp = 0;        /* a temporary file in same directory as zFilename */
  int i;                  /* Loop counter */
  Btree *pMain;           /* The database being vacuumed */
  Btree *pTemp;
  char *zSql = 0;

  if( !db->autoCommit ){
    sqlite3SetString(pzErrMsg, "cannot VACUUM from within a transaction", 
       (char*)0);
    rc = SQLITE_ERROR;
    goto end_of_vacuum;
  }

  /* Get the full pathname of the database file and create a
  ** temporary filename in the same directory as the original file.
  */
  pMain = db->aDb[0].pBt;
  zFilename = sqlite3BtreeGetFilename(pMain);
  assert( zFilename );
  if( zFilename[0]=='\0' ){
    /* The in-memory database. Do nothing. Return directly to avoid causing
    ** an error trying to DETACH the vacuum_db (which never got attached)
    ** in the exit-handler.
    */
    return SQLITE_OK;
  }
  nFilename = strlen(zFilename);
  zTemp = sqliteMalloc( nFilename+100 );
  if( zTemp==0 ){
    rc = SQLITE_NOMEM;
    goto end_of_vacuum;
  }
  strcpy(zTemp, zFilename);
  i = 0;
  do {
    zTemp[nFilename] = '-';
    randomName((unsigned char*)&zTemp[nFilename+1]);
  } while( i<10 && sqlite3OsFileExists(zTemp) );

  /* Attach the temporary database as 'vacuum_db'. The synchronous pragma
  ** can be set to 'off' for this file, as it is not recovered if a crash
  ** occurs anyway. The integrity of the database is maintained by a
  ** (possibly synchronous) transaction opened on the main database before
  ** sqlite3BtreeCopyFile() is called.
  **
  ** An optimisation would be to use a non-journaled pager.
  */
  zSql = sqlite3MPrintf("ATTACH '%q' AS vacuum_db;", zTemp);
  if( !zSql ){
    rc = SQLITE_NOMEM;
    goto end_of_vacuum;
  }
  rc = execSql(db, zSql);
  sqliteFree(zSql);
  zSql = 0;
  if( rc!=SQLITE_OK ) goto end_of_vacuum;
  assert( strcmp(db->aDb[db->nDb-1].zName,"vacuum_db")==0 );
  pTemp = db->aDb[db->nDb-1].pBt;
  sqlite3BtreeSetPageSize(pTemp, sqlite3BtreeGetPageSize(pMain),
     sqlite3BtreeGetReserve(pMain));
  assert( sqlite3BtreeGetPageSize(pTemp)==sqlite3BtreeGetPageSize(pMain) );
  execSql(db, "PRAGMA vacuum_db.synchronous=OFF");

  /* Begin a transaction */
  rc = execSql(db, "BEGIN;");
  if( rc!=SQLITE_OK ) goto end_of_vacuum;

  /* Query the schema of the main database. Create a mirror schema
  ** in the temporary database.
  */
  rc = execExecSql(db, 
      "SELECT 'CREATE TABLE vacuum_db.' || substr(sql,14,100000000) "
      "  FROM sqlite_master WHERE type='table' "
      "UNION ALL "
      "SELECT 'CREATE INDEX vacuum_db.' || substr(sql,14,100000000) "
      "  FROM sqlite_master WHERE sql LIKE 'CREATE INDEX %' "
      "UNION ALL "
      "SELECT 'CREATE UNIQUE INDEX vacuum_db.' || substr(sql,21,100000000) "
      "  FROM sqlite_master WHERE sql LIKE 'CREATE UNIQUE INDEX %'"
      "UNION ALL "
      "SELECT 'CREATE VIEW vacuum_db.' || substr(sql,13,100000000) "
      "  FROM sqlite_master WHERE type='view'"
  );
  if( rc!=SQLITE_OK ) goto end_of_vacuum;

  /* Loop through the tables in the main database. For each, do
  ** an "INSERT INTO vacuum_db.xxx SELECT * FROM xxx;" to copy
  ** the contents to the temporary database.
  */
  rc = execExecSql(db, 
      "SELECT 'INSERT INTO vacuum_db.' || quote(name) "
      "|| ' SELECT * FROM ' || quote(name) || ';'"
      "FROM sqlite_master "
      "WHERE type = 'table';"
  );
  if( rc!=SQLITE_OK ) goto end_of_vacuum;

  /* Copy the triggers from the main database to the temporary database.
  ** This was deferred before in case the triggers interfered with copying
  ** the data. It's possible the indices should be deferred until this
  ** point also.
  */
  rc = execExecSql(db, 
      "SELECT 'CREATE TRIGGER  vacuum_db.' || substr(sql, 16, 1000000) "
      "FROM sqlite_master WHERE type='trigger'"
  );
  if( rc!=SQLITE_OK ) goto end_of_vacuum;


  /* At this point, unless the main db was completely empty, there is now a
  ** transaction open on the vacuum database, but not on the main database.
  ** Open a btree level transaction on the main database. This allows a
  ** call to sqlite3BtreeCopyFile(). The main database btree level
  ** transaction is then committed, so the SQL level never knows it was
  ** opened for writing. This way, the SQL transaction used to create the
  ** temporary database never needs to be committed.
  */
  if( sqlite3BtreeIsInTrans(pTemp) ){
    u32 meta;

    assert( 0==sqlite3BtreeIsInTrans(pMain) );
    rc = sqlite3BtreeBeginTrans(pMain, 1);
    if( rc!=SQLITE_OK ) goto end_of_vacuum;

    /* Copy Btree meta values 3 and 4. These correspond to SQL layer meta 
    ** values 2 and 3, the default values of a couple of pragmas.
    */
    rc = sqlite3BtreeGetMeta(pMain, 3, &meta);
    if( rc!=SQLITE_OK ) goto end_of_vacuum;
    rc = sqlite3BtreeUpdateMeta(pTemp, 3, meta);
    if( rc!=SQLITE_OK ) goto end_of_vacuum;
    rc = sqlite3BtreeGetMeta(pMain, 4, &meta);
    if( rc!=SQLITE_OK ) goto end_of_vacuum;
    rc = sqlite3BtreeUpdateMeta(pTemp, 4, meta);
    if( rc!=SQLITE_OK ) goto end_of_vacuum;

    rc = sqlite3BtreeCopyFile(pMain, pTemp);
    if( rc!=SQLITE_OK ) goto end_of_vacuum;
    rc = sqlite3BtreeCommit(pMain);
  }

end_of_vacuum:
  /* Currently there is an SQL level transaction open on the vacuum
  ** database. No locks are held on any other files (since the main file
  ** was committed at the btree level). So it safe to end the transaction
  ** by manually setting the autoCommit flag to true and detaching the
  ** vacuum database. The vacuum_db journal file is deleted when the pager
  ** is closed by the DETACH.
  */
  db->autoCommit = 1;
  if( rc==SQLITE_OK ){
    rc = execSql(db, "DETACH vacuum_db;");
  }else{
    execSql(db, "DETACH vacuum_db;");
  }
  if( zTemp ){
    sqlite3OsDelete(zTemp);
    sqliteFree(zTemp);
  }
  if( zSql ) sqliteFree( zSql );
  sqlite3ResetInternalSchema(db, 0);
#endif
  return rc;
}
Esempio n. 9
0
/*
** Copy nPage pages from the source b-tree to the destination.
*/
int sqlite3_backup_step(sqlite3_backup *p, int nPage){
  int rc;
  int destMode;       /* Destination journal mode */
  int pgszSrc = 0;    /* Source page size */
  int pgszDest = 0;   /* Destination page size */

  sqlite3_mutex_enter(p->pSrcDb->mutex);
  sqlite3BtreeEnter(p->pSrc);
  if( p->pDestDb ){
    sqlite3_mutex_enter(p->pDestDb->mutex);
  }

  rc = p->rc;
  if( !isFatalError(rc) ){
    Pager * const pSrcPager = sqlite3BtreePager(p->pSrc);     /* Source pager */
    Pager * const pDestPager = sqlite3BtreePager(p->pDest);   /* Dest pager */
    int ii;                            /* Iterator variable */
    int nSrcPage = -1;                 /* Size of source db in pages */
    int bCloseTrans = 0;               /* True if src db requires unlocking */

    /* If the source pager is currently in a write-transaction, return
    ** SQLITE_BUSY immediately.
    */
    if( p->pDestDb && p->pSrc->pBt->inTransaction==TRANS_WRITE ){
      rc = SQLITE_BUSY;
    }else{
      rc = SQLITE_OK;
    }

    /* Lock the destination database, if it is not locked already. */
    if( SQLITE_OK==rc && p->bDestLocked==0
     && SQLITE_OK==(rc = sqlite3BtreeBeginTrans(p->pDest, 2)) 
    ){
      p->bDestLocked = 1;
      sqlite3BtreeGetMeta(p->pDest, BTREE_SCHEMA_VERSION, &p->iDestSchema);
    }

    /* If there is no open read-transaction on the source database, open
    ** one now. If a transaction is opened here, then it will be closed
    ** before this function exits.
    */
    if( rc==SQLITE_OK && 0==sqlite3BtreeIsInReadTrans(p->pSrc) ){
      rc = sqlite3BtreeBeginTrans(p->pSrc, 0);
      bCloseTrans = 1;
    }

    /* Do not allow backup if the destination database is in WAL mode
    ** and the page sizes are different between source and destination */
    pgszSrc = sqlite3BtreeGetPageSize(p->pSrc);
    pgszDest = sqlite3BtreeGetPageSize(p->pDest);
    destMode = sqlite3PagerGetJournalMode(sqlite3BtreePager(p->pDest));
    if( SQLITE_OK==rc && destMode==PAGER_JOURNALMODE_WAL && pgszSrc!=pgszDest ){
      rc = SQLITE_READONLY;
    }
  
    /* Now that there is a read-lock on the source database, query the
    ** source pager for the number of pages in the database.
    */
    nSrcPage = (int)sqlite3BtreeLastPage(p->pSrc);
    assert( nSrcPage>=0 );
    for(ii=0; (nPage<0 || ii<nPage) && p->iNext<=(Pgno)nSrcPage && !rc; ii++){
      const Pgno iSrcPg = p->iNext;                 /* Source page number */
      if( iSrcPg!=PENDING_BYTE_PAGE(p->pSrc->pBt) ){
        DbPage *pSrcPg;                             /* Source page object */
        rc = sqlite3PagerGet(pSrcPager, iSrcPg, &pSrcPg);
        if( rc==SQLITE_OK ){
          rc = backupOnePage(p, iSrcPg, sqlite3PagerGetData(pSrcPg));
          sqlite3PagerUnref(pSrcPg);
        }
      }
      p->iNext++;
    }
    if( rc==SQLITE_OK ){
      p->nPagecount = nSrcPage;
      p->nRemaining = nSrcPage+1-p->iNext;
      if( p->iNext>(Pgno)nSrcPage ){
        rc = SQLITE_DONE;
      }else if( !p->isAttached ){
        attachBackupObject(p);
      }
    }
  
    /* Update the schema version field in the destination database. This
    ** is to make sure that the schema-version really does change in
    ** the case where the source and destination databases have the
    ** same schema version.
    */
    if( rc==SQLITE_DONE 
     && (rc = sqlite3BtreeUpdateMeta(p->pDest,1,p->iDestSchema+1))==SQLITE_OK
    ){
      int nDestTruncate;
  
      if( p->pDestDb ){
        sqlite3ResetInternalSchema(p->pDestDb, 0);
      }

      /* Set nDestTruncate to the final number of pages in the destination
      ** database. The complication here is that the destination page
      ** size may be different to the source page size. 
      **
      ** If the source page size is smaller than the destination page size, 
      ** round up. In this case the call to sqlite3OsTruncate() below will
      ** fix the size of the file. However it is important to call
      ** sqlite3PagerTruncateImage() here so that any pages in the 
      ** destination file that lie beyond the nDestTruncate page mark are
      ** journalled by PagerCommitPhaseOne() before they are destroyed
      ** by the file truncation.
      */
      assert( pgszSrc==sqlite3BtreeGetPageSize(p->pSrc) );
      assert( pgszDest==sqlite3BtreeGetPageSize(p->pDest) );
      if( pgszSrc<pgszDest ){
        int ratio = pgszDest/pgszSrc;
        nDestTruncate = (nSrcPage+ratio-1)/ratio;
        if( nDestTruncate==(int)PENDING_BYTE_PAGE(p->pDest->pBt) ){
          nDestTruncate--;
        }
      }else{
        nDestTruncate = nSrcPage * (pgszSrc/pgszDest);
      }
      sqlite3PagerTruncateImage(pDestPager, nDestTruncate);

      if( pgszSrc<pgszDest ){
        /* If the source page-size is smaller than the destination page-size,
        ** two extra things may need to happen:
        **
        **   * The destination may need to be truncated, and
        **
        **   * Data stored on the pages immediately following the 
        **     pending-byte page in the source database may need to be
        **     copied into the destination database.
        */
        const i64 iSize = (i64)pgszSrc * (i64)nSrcPage;
        sqlite3_file * const pFile = sqlite3PagerFile(pDestPager);

        assert( pFile );
        assert( (i64)nDestTruncate*(i64)pgszDest >= iSize || (
              nDestTruncate==(int)(PENDING_BYTE_PAGE(p->pDest->pBt)-1)
           && iSize>=PENDING_BYTE && iSize<=PENDING_BYTE+pgszDest
        ));
        if( SQLITE_OK==(rc = sqlite3PagerCommitPhaseOne(pDestPager, 0, 1))
         && SQLITE_OK==(rc = backupTruncateFile(pFile, iSize))
         && SQLITE_OK==(rc = sqlite3PagerSync(pDestPager))
        ){
          i64 iOff;
          i64 iEnd = MIN(PENDING_BYTE + pgszDest, iSize);
          for(
            iOff=PENDING_BYTE+pgszSrc; 
            rc==SQLITE_OK && iOff<iEnd; 
            iOff+=pgszSrc
          ){
            PgHdr *pSrcPg = 0;
            const Pgno iSrcPg = (Pgno)((iOff/pgszSrc)+1);
            rc = sqlite3PagerGet(pSrcPager, iSrcPg, &pSrcPg);
            if( rc==SQLITE_OK ){
              u8 *zData = sqlite3PagerGetData(pSrcPg);
              rc = sqlite3OsWrite(pFile, zData, pgszSrc, iOff);
            }
            sqlite3PagerUnref(pSrcPg);
          }
        }
      }else{
        rc = sqlite3PagerCommitPhaseOne(pDestPager, 0, 0);
      }
  
      /* Finish committing the transaction to the destination database. */
      if( SQLITE_OK==rc
       && SQLITE_OK==(rc = sqlite3BtreeCommitPhaseTwo(p->pDest))
      ){
        rc = SQLITE_DONE;
      }
    }
  
    /* If bCloseTrans is true, then this function opened a read transaction
    ** on the source database. Close the read transaction here. There is
    ** no need to check the return values of the btree methods here, as
    ** "committing" a read-only transaction cannot fail.
    */
    if( bCloseTrans ){
      TESTONLY( int rc2 );
      TESTONLY( rc2  = ) sqlite3BtreeCommitPhaseOne(p->pSrc, 0);
      TESTONLY( rc2 |= ) sqlite3BtreeCommitPhaseTwo(p->pSrc);
      assert( rc2==SQLITE_OK );
    }
  
    if( rc==SQLITE_IOERR_NOMEM ){
      rc = SQLITE_NOMEM;
    }
    p->rc = rc;
  }