Esempio n. 1
0
/*
 * SQLite manages explicit transactions by setting a flag when a BEGIN; is
 * issued, then starting an actual transaction in the btree layer when the
 * first operation happens (a read txn if it's a read op, a write txn if write)
 * Then each statement will be contained in a sub-transaction. Since sequences
 * are implemented using a custom function, we need to emulate that
 * functionality. So there are three cases here:
 * - Not in an explicit transaction - start a statement, since we might do
 *   write operations, and thus we need a valid statement_txn.
 * - In an explicit transaction, and the first statement. Start a txn and a
     statement txn.
 * - In an explicit transaction and not the first statemetn. Start a statement
 *   transaction.
 *
 * The SQLite vdbe will take care of closing the statement transaction for us,
 * so we don't need to worry about that.
 *
 * Caching sequences can't be transactionally protected, so it's a no-op in
 * that case (and this function should not be called).
 *
 * It's safe to call this method multiple times since both
 * sqlite3BtreeBeginTrans and sqlite3BtreeBeginStmt are no-ops on subsequent
 * calls.
 */
static int btreeSeqStartTransaction(
    sqlite3_context *context, Btree *p, int is_write)
{
	sqlite3 *db;
	Vdbe *vdbe;
	int rc;

	db = sqlite3_context_db_handle(context);
	/*
	 * TODO: This is actually a linked list of VDBEs, not necessarily
	 *       the vdbe we want. I can't see a way to get the correct
	 *       vdbe handle.
	 *       It's possible that there is only one VDBE handle in DB, since
	 *       we use a shared cache.
	 */
	vdbe = db->pVdbe;

	if (!sqlite3BtreeIsInTrans(p) &&
	    (rc = btreeBeginTransInternal(p, 1)) != SQLITE_OK) {
		btreeSeqError(context, SQLITE_ERROR,
		    "Could not begin transaction.");
		return (rc);
	}

	/*
	 * TODO: Do we need logic bumping the VDBE statement count here?
	 *       vdbe.c:OP_Transaction does, but adding it here causes an
	 *       assert failure. It should be OK, because this code is only
	 *       really relevant in the case where there is a single statement.
	 */
	rc = sqlite3BtreeBeginStmt(p, vdbe->iStatement);
	return (rc);
}
Esempio n. 2
0
/*
** Rollback all database files.
*/
void sqlite3RollbackAll(sqlite3 *db){
  int i;
  int inTrans = 0;
  assert( sqlite3_mutex_held(db->mutex) );
  sqlite3MallocEnterBenignBlock(1);                 /* Enter benign region */
  for(i=0; i<db->nDb; i++){
    if( db->aDb[i].pBt ){
      if( sqlite3BtreeIsInTrans(db->aDb[i].pBt) ){
        inTrans = 1;
      }
      sqlite3BtreeRollback(db->aDb[i].pBt);
      db->aDb[i].inTrans = 0;
    }
  }
  sqlite3VtabRollback(db);
  sqlite3MallocLeaveBenignBlock();                 /* Leave benign region */

  if( db->flags&SQLITE_InternChanges ){
    sqlite3ExpirePreparedStatements(db);
    sqlite3ResetInternalSchema(db, 0);
  }

  /* If one has been configured, invoke the rollback-hook callback */
  if( db->xRollbackCallback && (inTrans || !db->autoCommit) ){
    db->xRollbackCallback(db->pRollbackArg);
  }
}
Esempio n. 3
0
/*
** Rollback all database files.
*/
void sqlite3RollbackAll(sqlite3 *db){
  int i;
  int inTrans = 0;
  assert( sqlite3_mutex_held(db->mutex) );
  sqlite3FaultBenign(SQLITE_FAULTINJECTOR_MALLOC, 1);
  for(i=0; i<db->nDb; i++){
    if( db->aDb[i].pBt ){
      if( sqlite3BtreeIsInTrans(db->aDb[i].pBt) ){
        inTrans = 1;
      }
      sqlite3BtreeRollback(db->aDb[i].pBt);
      db->aDb[i].inTrans = 0;
    }
  }
  sqlite3VtabRollback(db);
  sqlite3FaultBenign(SQLITE_FAULTINJECTOR_MALLOC, 0);

  if( db->flags&SQLITE_InternChanges ){
    sqlite3ExpirePreparedStatements(db);
    sqlite3ResetInternalSchema(db, 0);
  }

  /* If one has been configured, invoke the rollback-hook callback */
  if( db->xRollbackCallback && (inTrans || !db->autoCommit) ){
    db->xRollbackCallback(db->pRollbackArg);
  }
}
Esempio n. 4
0
/*
** Rollback all database files.
*/
void sqlite3RollbackAll(sqlite3 *db){
  int i;
  int inTrans = 0;
  for(i=0; i<db->nDb; i++){
    if( db->aDb[i].pBt ){
      if( sqlite3BtreeIsInTrans(db->aDb[i].pBt) ){
        inTrans = 1;
      }
      sqlite3BtreeRollback(db->aDb[i].pBt);
      db->aDb[i].inTrans = 0;
    }
  }
  if( db->flags&SQLITE_InternChanges ){
    sqlite3ResetInternalSchema(db, 0);
  }

  /* If one has been configured, invoke the rollback-hook callback */
  if( db->xRollbackCallback && (inTrans || !db->autoCommit) ){
    db->xRollbackCallback(db->pRollbackArg);
  }
}
Esempio n. 5
0
/*
** This routine implements the OP_Vacuum opcode of the VDBE.
*/
int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){
  int rc = SQLITE_OK;     /* Return code from service routines */
  const char *zFilename;  /* full pathname of the database file */
  int nFilename;          /* number of characters  in zFilename[] */
  char *zTemp = 0;        /* a temporary file in same directory as zFilename */
  Btree *pMain;           /* The database being vacuumed */
  Btree *pTemp;
  char *zSql = 0;
  int saved_flags;       /* Saved value of the db->flags */
  Db *pDb = 0;           /* Database to detach at end of vacuum */

  /* Save the current value of the write-schema flag before setting it. */
  saved_flags = db->flags;
  db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks;

  if( !db->autoCommit ){
    sqlite3SetString(pzErrMsg, "cannot VACUUM from within a transaction", 
       (char*)0);
    rc = SQLITE_ERROR;
    goto end_of_vacuum;
  }

  /* Get the full pathname of the database file and create a
  ** temporary filename in the same directory as the original file.
  */
  pMain = db->aDb[0].pBt;
  zFilename = sqlite3BtreeGetFilename(pMain);
  assert( zFilename );
  if( zFilename[0]=='\0' ){
    /* The in-memory database. Do nothing. Return directly to avoid causing
    ** an error trying to DETACH the vacuum_db (which never got attached)
    ** in the exit-handler.
    */
    return SQLITE_OK;
  }
  nFilename = strlen(zFilename);
  zTemp = sqliteMalloc( nFilename+100 );
  if( zTemp==0 ){
    rc = SQLITE_NOMEM;
    goto end_of_vacuum;
  }
  strcpy(zTemp, zFilename);

  /* The randomName() procedure in the following loop uses an excellent
  ** source of randomness to generate a name from a space of 1.3e+31 
  ** possibilities.  So unless the directory already contains on the order
  ** of 1.3e+31 files, the probability that the following loop will
  ** run more than once or twice is vanishingly small.  We are certain
  ** enough that this loop will always terminate (and terminate quickly)
  ** that we don't even bother to set a maximum loop count.
  */
  do {
    zTemp[nFilename] = '-';
    randomName((unsigned char*)&zTemp[nFilename+1]);
  } while( sqlite3OsFileExists(zTemp) );

  /* Attach the temporary database as 'vacuum_db'. The synchronous pragma
  ** can be set to 'off' for this file, as it is not recovered if a crash
  ** occurs anyway. The integrity of the database is maintained by a
  ** (possibly synchronous) transaction opened on the main database before
  ** sqlite3BtreeCopyFile() is called.
  **
  ** An optimisation would be to use a non-journaled pager.
  */
  zSql = sqlite3MPrintf("ATTACH '%q' AS vacuum_db;", zTemp);
  if( !zSql ){
    rc = SQLITE_NOMEM;
    goto end_of_vacuum;
  }
  rc = execSql(db, zSql);
  sqliteFree(zSql);
  zSql = 0;
  if( rc!=SQLITE_OK ) goto end_of_vacuum;
  pDb = &db->aDb[db->nDb-1];
  assert( strcmp(db->aDb[db->nDb-1].zName,"vacuum_db")==0 );
  pTemp = db->aDb[db->nDb-1].pBt;
  sqlite3BtreeSetPageSize(pTemp, sqlite3BtreeGetPageSize(pMain),
     sqlite3BtreeGetReserve(pMain));
  assert( sqlite3BtreeGetPageSize(pTemp)==sqlite3BtreeGetPageSize(pMain) );
  rc = execSql(db, "PRAGMA vacuum_db.synchronous=OFF");
  if( rc!=SQLITE_OK ){
    goto end_of_vacuum;
  }

#ifndef SQLITE_OMIT_AUTOVACUUM
  sqlite3BtreeSetAutoVacuum(pTemp, sqlite3BtreeGetAutoVacuum(pMain));
#endif

  /* Begin a transaction */
  rc = execSql(db, "BEGIN EXCLUSIVE;");
  if( rc!=SQLITE_OK ) goto end_of_vacuum;

  /* Query the schema of the main database. Create a mirror schema
  ** in the temporary database.
  */
  rc = execExecSql(db, 
      "SELECT 'CREATE TABLE vacuum_db.' || substr(sql,14,100000000) "
      "  FROM sqlite_master WHERE type='table' AND name!='sqlite_sequence'"
      "   AND rootpage>0"
  );
  if( rc!=SQLITE_OK ) goto end_of_vacuum;
  rc = execExecSql(db, 
      "SELECT 'CREATE INDEX vacuum_db.' || substr(sql,14,100000000)"
      "  FROM sqlite_master WHERE sql LIKE 'CREATE INDEX %' ");
  if( rc!=SQLITE_OK ) goto end_of_vacuum;
  rc = execExecSql(db, 
      "SELECT 'CREATE UNIQUE INDEX vacuum_db.' || substr(sql,21,100000000) "
      "  FROM sqlite_master WHERE sql LIKE 'CREATE UNIQUE INDEX %'");
  if( rc!=SQLITE_OK ) goto end_of_vacuum;

  /* Loop through the tables in the main database. For each, do
  ** an "INSERT INTO vacuum_db.xxx SELECT * FROM xxx;" to copy
  ** the contents to the temporary database.
  */
  rc = execExecSql(db, 
      "SELECT 'INSERT INTO vacuum_db.' || quote(name) "
      "|| ' SELECT * FROM ' || quote(name) || ';'"
      "FROM sqlite_master "
      "WHERE type = 'table' AND name!='sqlite_sequence' "
      "  AND rootpage>0"

  );
  if( rc!=SQLITE_OK ) goto end_of_vacuum;

  /* Copy over the sequence table
  */
  rc = execExecSql(db, 
      "SELECT 'DELETE FROM vacuum_db.' || quote(name) || ';' "
      "FROM vacuum_db.sqlite_master WHERE name='sqlite_sequence' "
  );
  if( rc!=SQLITE_OK ) goto end_of_vacuum;
  rc = execExecSql(db, 
      "SELECT 'INSERT INTO vacuum_db.' || quote(name) "
      "|| ' SELECT * FROM ' || quote(name) || ';' "
      "FROM vacuum_db.sqlite_master WHERE name=='sqlite_sequence';"
  );
  if( rc!=SQLITE_OK ) goto end_of_vacuum;


  /* Copy the triggers, views, and virtual tables from the main database
  ** over to the temporary database.  None of these objects has any
  ** associated storage, so all we have to do is copy their entries
  ** from the SQLITE_MASTER table.
  */
  rc = execSql(db,
      "INSERT INTO vacuum_db.sqlite_master "
      "  SELECT type, name, tbl_name, rootpage, sql"
      "    FROM sqlite_master"
      "   WHERE type='view' OR type='trigger'"
      "      OR (type='table' AND rootpage=0)"
  );
  if( rc ) goto end_of_vacuum;

  /* At this point, unless the main db was completely empty, there is now a
  ** transaction open on the vacuum database, but not on the main database.
  ** Open a btree level transaction on the main database. This allows a
  ** call to sqlite3BtreeCopyFile(). The main database btree level
  ** transaction is then committed, so the SQL level never knows it was
  ** opened for writing. This way, the SQL transaction used to create the
  ** temporary database never needs to be committed.
  */
  if( rc==SQLITE_OK ){
    u32 meta;
    int i;

    /* This array determines which meta meta values are preserved in the
    ** vacuum.  Even entries are the meta value number and odd entries
    ** are an increment to apply to the meta value after the vacuum.
    ** The increment is used to increase the schema cookie so that other
    ** connections to the same database will know to reread the schema.
    */
    static const unsigned char aCopy[] = {
       1, 1,    /* Add one to the old schema cookie */
       3, 0,    /* Preserve the default page cache size */
       5, 0,    /* Preserve the default text encoding */
       6, 0,    /* Preserve the user version */
    };

    assert( 1==sqlite3BtreeIsInTrans(pTemp) );
    assert( 1==sqlite3BtreeIsInTrans(pMain) );

    /* Copy Btree meta values */
    for(i=0; i<sizeof(aCopy)/sizeof(aCopy[0]); i+=2){
      rc = sqlite3BtreeGetMeta(pMain, aCopy[i], &meta);
      if( rc!=SQLITE_OK ) goto end_of_vacuum;
      rc = sqlite3BtreeUpdateMeta(pTemp, aCopy[i], meta+aCopy[i+1]);
      if( rc!=SQLITE_OK ) goto end_of_vacuum;
    }

    rc = sqlite3BtreeCopyFile(pMain, pTemp);
    if( rc!=SQLITE_OK ) goto end_of_vacuum;
    rc = sqlite3BtreeCommit(pTemp);
    if( rc!=SQLITE_OK ) goto end_of_vacuum;
    rc = sqlite3BtreeCommit(pMain);
  }

end_of_vacuum:
  /* Restore the original value of db->flags */
  db->flags = saved_flags;

  /* Currently there is an SQL level transaction open on the vacuum
  ** database. No locks are held on any other files (since the main file
  ** was committed at the btree level). So it safe to end the transaction
  ** by manually setting the autoCommit flag to true and detaching the
  ** vacuum database. The vacuum_db journal file is deleted when the pager
  ** is closed by the DETACH.
  */
  db->autoCommit = 1;

  if( pDb ){
    sqlite3MallocDisallow();
    sqlite3BtreeClose(pDb->pBt);
    sqlite3MallocAllow();
    pDb->pBt = 0;
    pDb->pSchema = 0;
  }

  if( zTemp ){
    sqlite3OsDelete(zTemp);
    sqliteFree(zTemp);
  }
  sqliteFree( zSql );
  sqlite3ResetInternalSchema(db, 0);

  return rc;
}
Esempio n. 6
0
/*
** This routine implements the OP_Vacuum opcode of the VDBE.
*/
int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){
  int rc = SQLITE_OK;     /* Return code from service routines */
  Btree *pMain;           /* The database being vacuumed */
  Pager *pMainPager;      /* Pager for database being vacuumed */
  Btree *pTemp;           /* The temporary database we vacuum into */
  char *zSql = 0;         /* SQL statements */
  int saved_flags;        /* Saved value of the db->flags */
  int saved_nChange;      /* Saved value of db->nChange */
  int saved_nTotalChange; /* Saved value of db->nTotalChange */
  Db *pDb = 0;            /* Database to detach at end of vacuum */
  int isMemDb;            /* True is vacuuming a :memory: database */
  int nRes;

  /* Save the current value of the write-schema flag before setting it. */
  saved_flags = db->flags;
  saved_nChange = db->nChange;
  saved_nTotalChange = db->nTotalChange;
  db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks;

  if( !db->autoCommit ){
    sqlite3SetString(pzErrMsg, db, "cannot VACUUM from within a transaction");
    rc = SQLITE_ERROR;
    goto end_of_vacuum;
  }
  pMain = db->aDb[0].pBt;
  pMainPager = sqlite3BtreePager(pMain);
  isMemDb = sqlite3PagerFile(pMainPager)->pMethods==0;

  /* Attach the temporary database as 'vacuum_db'. The synchronous pragma
  ** can be set to 'off' for this file, as it is not recovered if a crash
  ** occurs anyway. The integrity of the database is maintained by a
  ** (possibly synchronous) transaction opened on the main database before
  ** sqlite3BtreeCopyFile() is called.
  **
  ** An optimisation would be to use a non-journaled pager.
  ** (Later:) I tried setting "PRAGMA vacuum_db.journal_mode=OFF" but
  ** that actually made the VACUUM run slower.  Very little journalling
  ** actually occurs when doing a vacuum since the vacuum_db is initially
  ** empty.  Only the journal header is written.  Apparently it takes more
  ** time to parse and run the PRAGMA to turn journalling off than it does
  ** to write the journal header file.
  */
  zSql = "ATTACH '' AS vacuum_db;";
  rc = execSql(db, zSql);
  if( rc!=SQLITE_OK ) goto end_of_vacuum;
  pDb = &db->aDb[db->nDb-1];
  assert( strcmp(db->aDb[db->nDb-1].zName,"vacuum_db")==0 );
  pTemp = db->aDb[db->nDb-1].pBt;

  nRes = sqlite3BtreeGetReserve(pMain);

  /* A VACUUM cannot change the pagesize of an encrypted database. */
#ifdef SQLITE_HAS_CODEC
  if( db->nextPagesize ){
    extern void sqlite3CodecGetKey(sqlite3*, int, void**, int*);
    int nKey;
    char *zKey;
    sqlite3CodecGetKey(db, 0, (void**)&zKey, &nKey);
    if( nKey ) db->nextPagesize = 0;
  }
#endif

  if( sqlite3BtreeSetPageSize(pTemp, sqlite3BtreeGetPageSize(pMain), nRes)
   || (!isMemDb && sqlite3BtreeSetPageSize(pTemp, db->nextPagesize, nRes))
   || db->mallocFailed 
  ){
    rc = SQLITE_NOMEM;
    goto end_of_vacuum;
  }
  rc = execSql(db, "PRAGMA vacuum_db.synchronous=OFF");
  if( rc!=SQLITE_OK ){
    goto end_of_vacuum;
  }

#ifndef SQLITE_OMIT_AUTOVACUUM
  sqlite3BtreeSetAutoVacuum(pTemp, db->nextAutovac>=0 ? db->nextAutovac :
                                           sqlite3BtreeGetAutoVacuum(pMain));
#endif

  /* Begin a transaction */
  rc = execSql(db, "BEGIN EXCLUSIVE;");
  if( rc!=SQLITE_OK ) goto end_of_vacuum;

  /* Query the schema of the main database. Create a mirror schema
  ** in the temporary database.
  */
  rc = execExecSql(db, 
      "SELECT 'CREATE TABLE vacuum_db.' || substr(sql,14) "
      "  FROM sqlite_master WHERE type='table' AND name!='sqlite_sequence'"
      "   AND rootpage>0"
  );
  if( rc!=SQLITE_OK ) goto end_of_vacuum;
  rc = execExecSql(db, 
      "SELECT 'CREATE INDEX vacuum_db.' || substr(sql,14)"
      "  FROM sqlite_master WHERE sql LIKE 'CREATE INDEX %' ");
  if( rc!=SQLITE_OK ) goto end_of_vacuum;
  rc = execExecSql(db, 
      "SELECT 'CREATE UNIQUE INDEX vacuum_db.' || substr(sql,21) "
      "  FROM sqlite_master WHERE sql LIKE 'CREATE UNIQUE INDEX %'");
  if( rc!=SQLITE_OK ) goto end_of_vacuum;

  /* Loop through the tables in the main database. For each, do
  ** an "INSERT INTO vacuum_db.xxx SELECT * FROM xxx;" to copy
  ** the contents to the temporary database.
  */
  rc = execExecSql(db, 
      "SELECT 'INSERT INTO vacuum_db.' || quote(name) "
      "|| ' SELECT * FROM ' || quote(name) || ';'"
      "FROM sqlite_master "
      "WHERE type = 'table' AND name!='sqlite_sequence' "
      "  AND rootpage>0"

  );
  if( rc!=SQLITE_OK ) goto end_of_vacuum;

  /* Copy over the sequence table
  */
  rc = execExecSql(db, 
      "SELECT 'DELETE FROM vacuum_db.' || quote(name) || ';' "
      "FROM vacuum_db.sqlite_master WHERE name='sqlite_sequence' "
  );
  if( rc!=SQLITE_OK ) goto end_of_vacuum;
  rc = execExecSql(db, 
      "SELECT 'INSERT INTO vacuum_db.' || quote(name) "
      "|| ' SELECT * FROM ' || quote(name) || ';' "
      "FROM vacuum_db.sqlite_master WHERE name=='sqlite_sequence';"
  );
  if( rc!=SQLITE_OK ) goto end_of_vacuum;


  /* Copy the triggers, views, and virtual tables from the main database
  ** over to the temporary database.  None of these objects has any
  ** associated storage, so all we have to do is copy their entries
  ** from the SQLITE_MASTER table.
  */
  rc = execSql(db,
      "INSERT INTO vacuum_db.sqlite_master "
      "  SELECT type, name, tbl_name, rootpage, sql"
      "    FROM sqlite_master"
      "   WHERE type='view' OR type='trigger'"
      "      OR (type='table' AND rootpage=0)"
  );
  if( rc ) goto end_of_vacuum;

  /* At this point, unless the main db was completely empty, there is now a
  ** transaction open on the vacuum database, but not on the main database.
  ** Open a btree level transaction on the main database. This allows a
  ** call to sqlite3BtreeCopyFile(). The main database btree level
  ** transaction is then committed, so the SQL level never knows it was
  ** opened for writing. This way, the SQL transaction used to create the
  ** temporary database never needs to be committed.
  */
  if( rc==SQLITE_OK ){
    u32 meta;
    int i;

    /* This array determines which meta meta values are preserved in the
    ** vacuum.  Even entries are the meta value number and odd entries
    ** are an increment to apply to the meta value after the vacuum.
    ** The increment is used to increase the schema cookie so that other
    ** connections to the same database will know to reread the schema.
    */
    static const unsigned char aCopy[] = {
       1, 1,    /* Add one to the old schema cookie */
       3, 0,    /* Preserve the default page cache size */
       5, 0,    /* Preserve the default text encoding */
       6, 0,    /* Preserve the user version */
    };

    assert( 1==sqlite3BtreeIsInTrans(pTemp) );
    assert( 1==sqlite3BtreeIsInTrans(pMain) );

    /* Copy Btree meta values */
    for(i=0; i<ArraySize(aCopy); i+=2){
      rc = sqlite3BtreeGetMeta(pMain, aCopy[i], &meta);
      if( rc!=SQLITE_OK ) goto end_of_vacuum;
      rc = sqlite3BtreeUpdateMeta(pTemp, aCopy[i], meta+aCopy[i+1]);
      if( rc!=SQLITE_OK ) goto end_of_vacuum;
    }

    rc = sqlite3BtreeCopyFile(pMain, pTemp);
    if( rc!=SQLITE_OK ) goto end_of_vacuum;
    rc = sqlite3BtreeCommit(pTemp);
    if( rc!=SQLITE_OK ) goto end_of_vacuum;
#ifndef SQLITE_OMIT_AUTOVACUUM
    sqlite3BtreeSetAutoVacuum(pMain, sqlite3BtreeGetAutoVacuum(pTemp));
#endif
    rc = sqlite3BtreeCommit(pMain);
  }

  if( rc==SQLITE_OK ){
    rc = sqlite3BtreeSetPageSize(pMain, sqlite3BtreeGetPageSize(pTemp), nRes);
  }

end_of_vacuum:
  /* Restore the original value of db->flags */
  db->flags = saved_flags;
  db->nChange = saved_nChange;
  db->nTotalChange = saved_nTotalChange;

  /* Currently there is an SQL level transaction open on the vacuum
  ** database. No locks are held on any other files (since the main file
  ** was committed at the btree level). So it safe to end the transaction
  ** by manually setting the autoCommit flag to true and detaching the
  ** vacuum database. The vacuum_db journal file is deleted when the pager
  ** is closed by the DETACH.
  */
  db->autoCommit = 1;

  if( pDb ){
    sqlite3BtreeClose(pDb->pBt);
    pDb->pBt = 0;
    pDb->pSchema = 0;
  }

  sqlite3ResetInternalSchema(db, 0);

  return rc;
}
Esempio n. 7
0
/* CHANGE 1 of 3: Add function parameter nRes */
SQLITE_PRIVATE int sqlite3RunVacuumForRekey(char **pzErrMsg, sqlite3 *db, int iDb, int nRes){
  int rc = SQLITE_OK;     /* Return code from service routines */
  Btree *pMain;           /* The database being vacuumed */
  Btree *pTemp;           /* The temporary database we vacuum into */
  u16 saved_mDbFlags;     /* Saved value of db->mDbFlags */
  u32 saved_flags;        /* Saved value of db->flags */
  int saved_nChange;      /* Saved value of db->nChange */
  int saved_nTotalChange; /* Saved value of db->nTotalChange */
  u8 saved_mTrace;        /* Saved trace settings */
  Db *pDb = 0;            /* Database to detach at end of vacuum */
  int isMemDb;            /* True if vacuuming a :memory: database */
  /* CHANGE 2 of 3: Do not define local variable nRes */
  /*int nRes;*/               /* Bytes of reserved space at the end of each page */
  int nDb;                /* Number of attached databases */
  const char *zDbMain;    /* Schema name of database to vacuum */

  if (!db->autoCommit) {
    sqlite3SetString(pzErrMsg, db, "cannot VACUUM from within a transaction");
    return SQLITE_ERROR;
  }
  if (db->nVdbeActive>1) {
    sqlite3SetString(pzErrMsg, db, "cannot VACUUM - SQL statements in progress");
    return SQLITE_ERROR;
  }

  /* Save the current value of the database flags so that it can be
  ** restored before returning. Then set the writable-schema flag, and
  ** disable CHECK and foreign key constraints.  */
  saved_flags = db->flags;
  saved_mDbFlags = db->mDbFlags;
  saved_nChange = db->nChange;
  saved_nTotalChange = db->nTotalChange;
  saved_mTrace = db->mTrace;
  db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks;
  db->mDbFlags |= DBFLAG_PreferBuiltin | DBFLAG_Vacuum;
  db->flags &= ~(SQLITE_ForeignKeys | SQLITE_ReverseOrder | SQLITE_CountRows);
  db->mTrace = 0;

  zDbMain = db->aDb[iDb].zDbSName;
  pMain = db->aDb[iDb].pBt;
  isMemDb = sqlite3PagerIsMemdb(sqlite3BtreePager(pMain));

  /* Attach the temporary database as 'vacuum_db'. The synchronous pragma
  ** can be set to 'off' for this file, as it is not recovered if a crash
  ** occurs anyway. The integrity of the database is maintained by a
  ** (possibly synchronous) transaction opened on the main database before
  ** sqlite3BtreeCopyFile() is called.
  **
  ** An optimisation would be to use a non-journaled pager.
  ** (Later:) I tried setting "PRAGMA vacuum_db.journal_mode=OFF" but
  ** that actually made the VACUUM run slower.  Very little journalling
  ** actually occurs when doing a vacuum since the vacuum_db is initially
  ** empty.  Only the journal header is written.  Apparently it takes more
  ** time to parse and run the PRAGMA to turn journalling off than it does
  ** to write the journal header file.
  */
  nDb = db->nDb;
  rc = execSql(db, pzErrMsg, "ATTACH''AS vacuum_db");
  if (rc != SQLITE_OK) goto end_of_vacuum;
  assert((db->nDb - 1) == nDb);
  pDb = &db->aDb[nDb];
  assert(strcmp(pDb->zDbSName, "vacuum_db") == 0);
  pTemp = pDb->pBt;

  /* The call to execSql() to attach the temp database has left the file
  ** locked (as there was more than one active statement when the transaction
  ** to read the schema was concluded. Unlock it here so that this doesn't
  ** cause problems for the call to BtreeSetPageSize() below.  */
  sqlite3BtreeCommit(pTemp);

  /* CHANGE 3 of 3: Do not call sqlite3BtreeGetOptimalReserve */
  /* nRes = sqlite3BtreeGetOptimalReserve(pMain); */

  /* A VACUUM cannot change the pagesize of an encrypted database. */
#ifdef SQLITE_HAS_CODEC
  if (db->nextPagesize) {
    extern void sqlite3CodecGetKey(sqlite3*, int, void**, int*);
    int nKey;
    char *zKey;
    sqlite3CodecGetKey(db, iDb, (void**)&zKey, &nKey);
    if (nKey) db->nextPagesize = 0;
  }
#endif

  sqlite3BtreeSetCacheSize(pTemp, db->aDb[iDb].pSchema->cache_size);
  sqlite3BtreeSetSpillSize(pTemp, sqlite3BtreeSetSpillSize(pMain, 0));
  sqlite3BtreeSetPagerFlags(pTemp, PAGER_SYNCHRONOUS_OFF | PAGER_CACHESPILL);

  /* Begin a transaction and take an exclusive lock on the main database
  ** file. This is done before the sqlite3BtreeGetPageSize(pMain) call below,
  ** to ensure that we do not try to change the page-size on a WAL database.
  */
  rc = execSql(db, pzErrMsg, "BEGIN");
  if (rc != SQLITE_OK) goto end_of_vacuum;
  rc = sqlite3BtreeBeginTrans(pMain, 2);
  if (rc != SQLITE_OK) goto end_of_vacuum;

  /* Do not attempt to change the page size for a WAL database */
  if (sqlite3PagerGetJournalMode(sqlite3BtreePager(pMain))
    == PAGER_JOURNALMODE_WAL) {
    db->nextPagesize = 0;
  }

  if (sqlite3BtreeSetPageSize(pTemp, sqlite3BtreeGetPageSize(pMain), nRes, 0)
    || (!isMemDb && sqlite3BtreeSetPageSize(pTemp, db->nextPagesize, nRes, 0))
    || NEVER(db->mallocFailed)
    ) {
    rc = SQLITE_NOMEM_BKPT;
    goto end_of_vacuum;
  }

#ifndef SQLITE_OMIT_AUTOVACUUM
  sqlite3BtreeSetAutoVacuum(pTemp, db->nextAutovac >= 0 ? db->nextAutovac :
    sqlite3BtreeGetAutoVacuum(pMain));
#endif

  /* Query the schema of the main database. Create a mirror schema
  ** in the temporary database.
  */
  db->init.iDb = nDb; /* force new CREATE statements into vacuum_db */
  rc = execSqlF(db, pzErrMsg,
    "SELECT sql FROM \"%w\".sqlite_master"
    " WHERE type='table'AND name<>'sqlite_sequence'"
    " AND coalesce(rootpage,1)>0",
    zDbMain
  );
  if (rc != SQLITE_OK) goto end_of_vacuum;
  rc = execSqlF(db, pzErrMsg,
    "SELECT sql FROM \"%w\".sqlite_master"
    " WHERE type='index'",
    zDbMain
  );
  if (rc != SQLITE_OK) goto end_of_vacuum;
  db->init.iDb = 0;

  /* Loop through the tables in the main database. For each, do
  ** an "INSERT INTO vacuum_db.xxx SELECT * FROM main.xxx;" to copy
  ** the contents to the temporary database.
  */
  rc = execSqlF(db, pzErrMsg,
    "SELECT'INSERT INTO vacuum_db.'||quote(name)"
    "||' SELECT*FROM\"%w\".'||quote(name)"
    "FROM vacuum_db.sqlite_master "
    "WHERE type='table'AND coalesce(rootpage,1)>0",
    zDbMain
  );
  assert((db->mDbFlags & DBFLAG_Vacuum) != 0);
  db->mDbFlags &= ~DBFLAG_Vacuum;
  if (rc != SQLITE_OK) goto end_of_vacuum;

  /* Copy the triggers, views, and virtual tables from the main database
  ** over to the temporary database.  None of these objects has any
  ** associated storage, so all we have to do is copy their entries
  ** from the SQLITE_MASTER table.
  */
  rc = execSqlF(db, pzErrMsg,
    "INSERT INTO vacuum_db.sqlite_master"
    " SELECT*FROM \"%w\".sqlite_master"
    " WHERE type IN('view','trigger')"
    " OR(type='table'AND rootpage=0)",
    zDbMain
  );
  if (rc) goto end_of_vacuum;

  /* At this point, there is a write transaction open on both the
  ** vacuum database and the main database. Assuming no error occurs,
  ** both transactions are closed by this block - the main database
  ** transaction by sqlite3BtreeCopyFile() and the other by an explicit
  ** call to sqlite3BtreeCommit().
  */
  {
    u32 meta;
    int i;

    /* This array determines which meta meta values are preserved in the
    ** vacuum.  Even entries are the meta value number and odd entries
    ** are an increment to apply to the meta value after the vacuum.
    ** The increment is used to increase the schema cookie so that other
    ** connections to the same database will know to reread the schema.
    */
    static const unsigned char aCopy[] = {
      BTREE_SCHEMA_VERSION,     1,  /* Add one to the old schema cookie */
      BTREE_DEFAULT_CACHE_SIZE, 0,  /* Preserve the default page cache size */
      BTREE_TEXT_ENCODING,      0,  /* Preserve the text encoding */
      BTREE_USER_VERSION,       0,  /* Preserve the user version */
      BTREE_APPLICATION_ID,     0,  /* Preserve the application id */
    };

    assert(1 == sqlite3BtreeIsInTrans(pTemp));
    assert(1 == sqlite3BtreeIsInTrans(pMain));

    /* Copy Btree meta values */
    for (i = 0; i<ArraySize(aCopy); i += 2) {
      /* GetMeta() and UpdateMeta() cannot fail in this context because
      ** we already have page 1 loaded into cache and marked dirty. */
      sqlite3BtreeGetMeta(pMain, aCopy[i], &meta);
      rc = sqlite3BtreeUpdateMeta(pTemp, aCopy[i], meta + aCopy[i + 1]);
      if (NEVER(rc != SQLITE_OK)) goto end_of_vacuum;
    }

    rc = sqlite3BtreeCopyFile(pMain, pTemp);
    if (rc != SQLITE_OK) goto end_of_vacuum;
    rc = sqlite3BtreeCommit(pTemp);
    if (rc != SQLITE_OK) goto end_of_vacuum;
#ifndef SQLITE_OMIT_AUTOVACUUM
    sqlite3BtreeSetAutoVacuum(pMain, sqlite3BtreeGetAutoVacuum(pTemp));
#endif
  }

  assert(rc == SQLITE_OK);
  rc = sqlite3BtreeSetPageSize(pMain, sqlite3BtreeGetPageSize(pTemp), nRes, 1);

end_of_vacuum:
  /* Restore the original value of db->flags */
  db->init.iDb = 0;
  db->mDbFlags = saved_mDbFlags;
  db->flags = saved_flags;
  db->nChange = saved_nChange;
  db->nTotalChange = saved_nTotalChange;
  db->mTrace = saved_mTrace;
  sqlite3BtreeSetPageSize(pMain, -1, -1, 1);

  /* Currently there is an SQL level transaction open on the vacuum
  ** database. No locks are held on any other files (since the main file
  ** was committed at the btree level). So it safe to end the transaction
  ** by manually setting the autoCommit flag to true and detaching the
  ** vacuum database. The vacuum_db journal file is deleted when the pager
  ** is closed by the DETACH.
  */
  db->autoCommit = 1;

  if (pDb) {
    sqlite3BtreeClose(pDb->pBt);
    pDb->pBt = 0;
    pDb->pSchema = 0;
  }

  /* This both clears the schemas and reduces the size of the db->aDb[]
  ** array. */
  sqlite3ResetAllSchemasOfConnection(db);

  return rc;
}
Esempio n. 8
0
/*
** A read or write transaction may or may not be active on database handle
** db. If a transaction is active, commit it. If there is a
** write-transaction spanning more than one database file, this routine
** takes care of the master journal trickery.
*/
static int vdbeCommit(sqlite3 *db){
  int i;
  int nTrans = 0;  /* Number of databases with an active write-transaction */
  int rc = SQLITE_OK;
  int needXcommit = 0;

  for(i=0; i<db->nDb; i++){ 
    Btree *pBt = db->aDb[i].pBt;
    if( pBt && sqlite3BtreeIsInTrans(pBt) ){
      needXcommit = 1;
      if( i!=1 ) nTrans++;
    }
  }

  /* If there are any write-transactions at all, invoke the commit hook */
  if( needXcommit && db->xCommitCallback ){
    int rc;
    sqlite3SafetyOff(db);
    rc = db->xCommitCallback(db->pCommitArg);
    sqlite3SafetyOn(db);
    if( rc ){
      return SQLITE_CONSTRAINT;
    }
  }

  /* The simple case - no more than one database file (not counting the
  ** TEMP database) has a transaction active.   There is no need for the
  ** master-journal.
  **
  ** If the return value of sqlite3BtreeGetFilename() is a zero length
  ** string, it means the main database is :memory:.  In that case we do
  ** not support atomic multi-file commits, so use the simple case then
  ** too.
  */
  if( 0==strlen(sqlite3BtreeGetFilename(db->aDb[0].pBt)) || nTrans<=1 ){
    for(i=0; rc==SQLITE_OK && i<db->nDb; i++){ 
      Btree *pBt = db->aDb[i].pBt;
      if( pBt ){
        rc = sqlite3BtreeSync(pBt, 0);
      }
    }

    /* Do the commit only if all databases successfully synced */
    if( rc==SQLITE_OK ){
      for(i=0; i<db->nDb; i++){
        Btree *pBt = db->aDb[i].pBt;
        if( pBt ){
          sqlite3BtreeCommit(pBt);
        }
      }
    }
  }

  /* The complex case - There is a multi-file write-transaction active.
  ** This requires a master journal file to ensure the transaction is
  ** committed atomicly.
  */
  else{
    char *zMaster = 0;   /* File-name for the master journal */
    char const *zMainFile = sqlite3BtreeGetFilename(db->aDb[0].pBt);
    OsFile master;

    /* Select a master journal file name */
    do {
      u32 random;
      sqliteFree(zMaster);
      sqlite3Randomness(sizeof(random), &random);
      zMaster = sqlite3MPrintf("%s-mj%08X", zMainFile, random&0x7fffffff);
      if( !zMaster ){
        return SQLITE_NOMEM;
      }
    }while( sqlite3OsFileExists(zMaster) );

    /* Open the master journal. */
    memset(&master, 0, sizeof(master));
    rc = sqlite3OsOpenExclusive(zMaster, &master, 0);
    if( rc!=SQLITE_OK ){
      sqliteFree(zMaster);
      return rc;
    }
 
    /* Write the name of each database file in the transaction into the new
    ** master journal file. If an error occurs at this point close
    ** and delete the master journal file. All the individual journal files
    ** still have 'null' as the master journal pointer, so they will roll
    ** back independantly if a failure occurs.
    */
    for(i=0; i<db->nDb; i++){ 
      Btree *pBt = db->aDb[i].pBt;
      if( i==1 ) continue;   /* Ignore the TEMP database */
      if( pBt && sqlite3BtreeIsInTrans(pBt) ){
        char const *zFile = sqlite3BtreeGetJournalname(pBt);
        if( zFile[0]==0 ) continue;  /* Ignore :memory: databases */
        rc = sqlite3OsWrite(&master, zFile, strlen(zFile)+1);
        if( rc!=SQLITE_OK ){
          sqlite3OsClose(&master);
          sqlite3OsDelete(zMaster);
          sqliteFree(zMaster);
          return rc;
        }
      }
    }


    /* Sync the master journal file. Before doing this, open the directory
    ** the master journal file is store in so that it gets synced too.
    */
    zMainFile = sqlite3BtreeGetDirname(db->aDb[0].pBt);
    rc = sqlite3OsOpenDirectory(zMainFile, &master);
    if( rc!=SQLITE_OK ){
      sqlite3OsClose(&master);
      sqlite3OsDelete(zMaster);
      sqliteFree(zMaster);
      return rc;
    }
    rc = sqlite3OsSync(&master);
    if( rc!=SQLITE_OK ){
      sqlite3OsClose(&master);
      sqliteFree(zMaster);
      return rc;
    }

    /* Sync all the db files involved in the transaction. The same call
    ** sets the master journal pointer in each individual journal. If
    ** an error occurs here, do not delete the master journal file.
    **
    ** If the error occurs during the first call to sqlite3BtreeSync(),
    ** then there is a chance that the master journal file will be
    ** orphaned. But we cannot delete it, in case the master journal
    ** file name was written into the journal file before the failure
    ** occured.
    */
    for(i=0; i<db->nDb; i++){ 
      Btree *pBt = db->aDb[i].pBt;
      if( pBt && sqlite3BtreeIsInTrans(pBt) ){
        rc = sqlite3BtreeSync(pBt, zMaster);
        if( rc!=SQLITE_OK ){
          sqlite3OsClose(&master);
          sqliteFree(zMaster);
          return rc;
        }
      }
    }
    sqlite3OsClose(&master);

    /* Delete the master journal file. This commits the transaction. After
    ** doing this the directory is synced again before any individual
    ** transaction files are deleted.
    */
    rc = sqlite3OsDelete(zMaster);
    assert( rc==SQLITE_OK );
    sqliteFree(zMaster);
    zMaster = 0;
    rc = sqlite3OsSyncDirectory(zMainFile);
    if( rc!=SQLITE_OK ){
      /* This is not good. The master journal file has been deleted, but
      ** the directory sync failed. There is no completely safe course of
      ** action from here. The individual journals contain the name of the
      ** master journal file, but there is no way of knowing if that
      ** master journal exists now or if it will exist after the operating
      ** system crash that may follow the fsync() failure.
      */
      assert(0);
      sqliteFree(zMaster);
      return rc;
    }

    /* All files and directories have already been synced, so the following
    ** calls to sqlite3BtreeCommit() are only closing files and deleting
    ** journals. If something goes wrong while this is happening we don't
    ** really care. The integrity of the transaction is already guaranteed,
    ** but some stray 'cold' journals may be lying around. Returning an
    ** error code won't help matters.
    */
    for(i=0; i<db->nDb; i++){ 
      Btree *pBt = db->aDb[i].pBt;
      if( pBt ){
        sqlite3BtreeCommit(pBt);
      }
    }
  }

  return rc;
}
Esempio n. 9
0
/*
** Copy nPage pages from the source b-tree to the destination.
*/
int sqlite3_backup_step(sqlite3_backup *p, int nPage) {
	int returnCode, pages;
	Parse parse;
	DB_ENV *dbenv;
	BtShared *pBtDest, *pBtSrc;

	pBtDest = pBtSrc = NULL;

	if (p->rc != SQLITE_OK || nPage == 0)
		return p->rc;

	sqlite3_mutex_enter(p->pSrcDb->mutex);
	sqlite3_mutex_enter(p->pDestDb->mutex);

	/*
	 * Make sure the schema has been read in, so the keyInfo
	 * can be retrieved for the indexes.  No-op if already read.
	 * If the schema has not been read then an update must have
	 * changed it, so backup will restart.
	 */
	memset(&parse, 0, sizeof(parse));
	parse.db = p->pSrcDb;
	p->rc = sqlite3ReadSchema(&parse);
	if (p->rc != SQLITE_OK)
		goto err;

	/*
	 * This process updated the source database, so
	 * the backup process has to restart.
	 */
	if (p->pSrc->updateDuringBackup > p->lastUpdate) {
		p->rc = SQLITE_LOCKED;
		if ((p->rc = backupCleanup(p)) != SQLITE_OK)
			goto err;
		else
			backupReset(p);
	}

	pages = nPage;

	if (!p->cleaned) {
		const char *home;
		const char inmem[9] = ":memory:";
		int storage;

		pBtDest = p->pDest->pBt;
		storage = p->pDest->pBt->dbStorage;
		if (storage == DB_STORE_NAMED)
			p->openDest = 1;
		p->rc = btreeDeleteEnvironment(p->pDest, p->fullName, 1);
		if (storage == DB_STORE_INMEM && strcmp(p->destName, "temp")
		    != 0)
			home = inmem;
		else
			home = p->fullName;
		p->pDest = p->pDestDb->aDb[p->iDb].pBt;
		if (p->rc != SQLITE_OK)
			goto err;
		/*
		 * Call sqlite3OpenTempDatabase instead of
		 * sqlite3BtreeOpen, because sqlite3OpenTempDatabase
		 * automatically chooses the right flags before calling
		 * sqlite3BtreeOpen.
		 */
		if (strcmp(p->destName, "temp") == 0) {
			memset(&parse, 0, sizeof(parse));
			parse.db = p->pDestDb;
			p->rc = sqlite3OpenTempDatabase(&parse);
			p->pDest = p->pDestDb->aDb[p->iDb].pBt;
		} else {
			p->rc = sqlite3BtreeOpen(home, p->pDestDb,
			    &p->pDest, SQLITE_DEFAULT_CACHE_SIZE |
			    SQLITE_OPEN_MAIN_DB, p->pDestDb->openFlags);
			p->pDestDb->aDb[p->iDb].pBt = p->pDest;
			if (p->rc == SQLITE_OK) {
				p->pDestDb->aDb[p->iDb].pSchema =
				    sqlite3SchemaGet(p->pDestDb, p->pDest);
				if (!p->pDestDb->aDb[p->iDb].pSchema)
					p->rc = SQLITE_NOMEM;
			} else
				p->pDestDb->aDb[p->iDb].pSchema = NULL;
		}

		if (p->pDest)
			p->pDest->nBackup++;
#ifdef SQLITE_HAS_CODEC
		/*
		 * In the case of a temporary source database, use the
		 * encryption of the main database.
		 */
		if (strcmp(p->srcName, "temp") == 0) {
			 int iDb = sqlite3FindDbName(p->pSrcDb, "main");
			 pBtSrc = p->pSrcDb->aDb[iDb].pBt->pBt;
		} else
			 pBtSrc = p->pSrc->pBt;
		if (p->rc == SQLITE_OK) {
			if (p->iDb == 0)
				p->rc = sqlite3_key(p->pDestDb,
				    pBtSrc->encrypt_pwd,
				    pBtSrc->encrypt_pwd_len);
			else
				p->rc = sqlite3CodecAttach(p->pDestDb, p->iDb,
				    pBtSrc->encrypt_pwd,
				    pBtSrc->encrypt_pwd_len);
		}
#endif
		if (p->rc != SQLITE_OK)
			goto err;
		p->cleaned = 1;
	}

	/*
	 * Begin a transaction, unfortuantely the lock on
	 * the schema has to be released to allow the sqlite_master
	 * table to be cleared, which could allow another thread to
	 * alter it, however accessing the backup database during
	 * backup is already an illegal condition with undefined
	 * results.
	 */
	if (!sqlite3BtreeIsInTrans(p->pDest)) {
		if (!p->pDest->connected) {
			p->rc = btreeOpenEnvironment(p->pDest, 1);
			if (p->rc != SQLITE_OK)
				goto err;
		}
		if ((p->rc = sqlite3BtreeBeginTrans(p->pDest, 2))
			!= SQLITE_OK)
			goto err;
	}
	/* Only this process should be accessing the backup environment. */
	if (p->pDest->pBt->nRef > 1) {
		p->rc = SQLITE_BUSY;
		goto err;
	}

	/*
	 * Begin a transaction, a lock error or update could have caused
	 * it to be released in a previous call to step.
	 */
	if (!p->srcTxn) {
		dbenv = p->pSrc->pBt->dbenv;
		if ((p->rc = dberr2sqlite(dbenv->txn_begin(dbenv,
		    p->pSrc->family_txn, &p->srcTxn, 0))) != SQLITE_OK)
			goto err;
	}

	/*
	 * An update could have dropped or created a table, so recalculate
	 * the list of tables.
	 */
	if (!p->tables) {
		if ((p->rc = btreeGetPageCount(p->pSrc,
		    &p->tables, &p->nPagecount, p->srcTxn)) != SQLITE_OK) {
				sqlite3Error(p->pSrcDb, p->rc, 0);
				goto err;
		}
		p->nRemaining = p->nPagecount;
	}

	/* Copy the pages. */
	p->rc = btreeCopyPages(p, &pages);
	if (p->rc == SQLITE_DONE) {
		p->nRemaining = 0;
		sqlite3ResetInternalSchema(p->pDestDb, p->iDb);
		memset(&parse, 0, sizeof(parse));
		parse.db = p->pDestDb;
		p->rc = sqlite3ReadSchema(&parse);
		if (p->rc == SQLITE_OK)
			p->rc = SQLITE_DONE;
	} else if (p->rc != SQLITE_OK)
		goto err;

	/*
	 * The number of pages left to copy is an estimate, so
	 * do not let the number go to zero unless we are really
	 * done.
	 */
	if (p->rc != SQLITE_DONE) {
		if ((u32)pages >= p->nRemaining)
			p->nRemaining = 1;
		else
			p->nRemaining -= pages;
	}

err:	/*
	 * This process updated the source database, so
	 * the backup process has to restart.
	 */
	if (p->pSrc->updateDuringBackup > p->lastUpdate &&
	    (p->rc == SQLITE_OK || p->rc == SQLITE_DONE)) {
		int cleanCode;
		returnCode = p->rc;
		p->rc = SQLITE_LOCKED;
		if ((cleanCode = backupCleanup(p)) != SQLITE_OK)
			returnCode = p->rc = cleanCode;
		else
			backupReset(p);
	} else {
		returnCode = backupCleanup(p);
		if (returnCode == SQLITE_OK ||
		    (p->rc != SQLITE_OK && p->rc != SQLITE_DONE))
			returnCode = p->rc;
		else
			p->rc = returnCode;
	}
	/*
	 * On a locked or busy error the backup process is rolled back,
	 * but can be restarted by the user.
	 */
	if ( returnCode == SQLITE_LOCKED || returnCode == SQLITE_BUSY )
		backupReset(p);
	else if ( returnCode != SQLITE_OK && returnCode != SQLITE_DONE ) {
		sqlite3Error(p->pDestDb, p->rc, 0);
	}
	sqlite3_mutex_leave(p->pDestDb->mutex);
	sqlite3_mutex_leave(p->pSrcDb->mutex);
	return (returnCode);
}
Esempio n. 10
0
/* Close or free all handles and commit or rollback the transaction. */
static int backupCleanup(sqlite3_backup *p)
{
	int rc, rc2, ret;
	void *app;
	DB *db;

	rc = rc2 = SQLITE_OK;

	if (!p || p->rc == SQLITE_OK)
		return rc;

	rc2 = sqlite3BtreeCloseCursor(&p->destCur);
	if (rc2 != SQLITE_OK)
		rc = rc2;
	if (p->srcCur) {
		db = p->srcCur->dbp;
		app = db->app_private;
		if ((ret = p->srcCur->close(p->srcCur)) == 0)
			ret = db->close(db, DB_NOSYNC);
		rc2 = dberr2sqlite(ret);
		/*
		 * The KeyInfo was allocated in btreeSetupIndex,
		 * so have to deallocate it here.
		 */
		if (app)
			sqlite3DbFree(p->pSrcDb, app);
	}
	if (rc2 != SQLITE_OK)
		rc = rc2;
	p->srcCur = 0;

	/*
	 * May retry on a locked or busy error, so keep
	 * these values.
	 */
	if (p->rc != SQLITE_LOCKED && p->rc != SQLITE_BUSY) {
		if (p->srcName)
			sqlite3_free(p->srcName);
		if (p->destName != 0)
			sqlite3_free(p->destName);
		p->srcName = p->destName = NULL;
	}

	if (p->tables != 0)
			sqlite3_free(p->tables);
	p->tables = NULL;

	if (p->pSrc->nBackup)
		p->pSrc->nBackup--;
	if (p->pDest != NULL && p->pDest->nBackup)
		p->pDest->nBackup--;
	if (p->srcTxn) {
		if (p->rc == SQLITE_DONE)
			ret = p->srcTxn->commit(p->srcTxn, 0);
		else
			ret = p->srcTxn->abort(p->srcTxn);
		rc2 = dberr2sqlite(ret);
	}
	p->srcTxn = 0;
	if (rc2 != SQLITE_OK && sqlite3BtreeIsInTrans(p->pDest)) {
		rc = rc2;
		if (p->rc == SQLITE_DONE)
			rc2 = sqlite3BtreeCommit(p->pDest);
		else
			rc2 = sqlite3BtreeRollback(p->pDest);
		if (rc2 != SQLITE_OK)
			rc = rc2;
	}

	if (p->pDest && p->openDest) {
		char path[512];

		/*
		 * If successfully done then delete the old backup, if
		 * an error then delete the current database and restore
		 * the old backup.
		 */
		sqlite3_snprintf(sizeof(path), path,
		    "%s%s", p->fullName, BACKUP_SUFFIX);
		if (p->rc == SQLITE_DONE) {
			rc2 = btreeDeleteEnvironment(p->pDest, path, 0);
		} else {
			rc2 = btreeDeleteEnvironment(p->pDest, p->fullName, 0);
			if (!__os_exists(NULL, path, 0))
				__os_rename(NULL, path, p->fullName, 0);
		}
		if (rc == SQLITE_OK)
			rc = rc2;
		if (rc == SQLITE_OK) {
			p->pDest = NULL;
			p->pDestDb->aDb[p->iDb].pBt = NULL;
			p->openDest = 0;
			rc = sqlite3BtreeOpen(p->fullName, p->pDestDb,
			    &p->pDest,
			    SQLITE_DEFAULT_CACHE_SIZE | SQLITE_OPEN_MAIN_DB,
			    p->pDestDb->openFlags);
			p->pDestDb->aDb[p->iDb].pBt = p->pDest;
			if (rc == SQLITE_OK) {
				p->pDestDb->aDb[p->iDb].pSchema =
				    sqlite3SchemaGet(p->pDestDb, p->pDest);
				if (!p->pDestDb->aDb[p->iDb].pSchema)
					p->rc = SQLITE_NOMEM;
			} else
				p->pDestDb->aDb[p->iDb].pSchema = NULL;
			if (rc == SQLITE_OK)
				p->pDest->pBt->db_oflags |= DB_CREATE;
			/*
			 * Have to delete the schema here on error to avoid
			 * assert failure.
			 */
			if (p->pDest == NULL &&
			    p->pDestDb->aDb[p->iDb].pSchema != NULL) {
				sqlite3SchemaClear(
				    p->pDestDb->aDb[p->iDb].pSchema);
				p->pDestDb->aDb[p->iDb].pSchema = NULL;
			}
#ifdef SQLITE_HAS_CODEC
			if (rc == SQLITE_OK) {
				if (p->iDb == 0)
					rc = sqlite3_key(p->pDestDb,
					    p->pSrc->pBt->encrypt_pwd,
					    p->pSrc->pBt->encrypt_pwd_len);
				else
					rc = sqlite3CodecAttach(p->pDestDb,
					    p->iDb, p->pSrc->pBt->encrypt_pwd,
					    p->pSrc->pBt->encrypt_pwd_len);
			}
#endif
		}
	}
	if (p->rc != SQLITE_LOCKED && p->rc != SQLITE_BUSY) {
		if (p->fullName != 0)
			sqlite3_free(p->fullName);
		p->fullName = NULL;
	}
	p->lastUpdate = p->pSrc->updateDuringBackup;
	return rc;
}
Esempio n. 11
0
int sqlcipher_codec_ctx_migrate(codec_ctx *ctx) {
  u32 meta;
  int rc = 0;
  int command_idx = 0;
  int password_sz;
  int saved_flags;
  int saved_nChange;
  int saved_nTotalChange;
  void (*saved_xTrace)(void*,const char*);
  Db *pDb = 0;
  sqlite3 *db = ctx->pBt->db;
  const char *db_filename = sqlite3_db_filename(db, "main");
  char *migrated_db_filename = sqlite3_mprintf("%s-migrated", db_filename);
  char *pragma_hmac_off = "PRAGMA cipher_use_hmac = OFF;";
  char *pragma_4k_kdf_iter = "PRAGMA kdf_iter = 4000;";
  char *pragma_1x_and_4k;
  char *set_user_version;
  char *key;
  int key_sz;
  int user_version = 0;
  int upgrade_1x_format = 0;
  int upgrade_4k_format = 0;
  static const unsigned char aCopy[] = {
    BTREE_SCHEMA_VERSION,     1,  /* Add one to the old schema cookie */
    BTREE_DEFAULT_CACHE_SIZE, 0,  /* Preserve the default page cache size */
    BTREE_TEXT_ENCODING,      0,  /* Preserve the text encoding */
    BTREE_USER_VERSION,       0,  /* Preserve the user version */
    BTREE_APPLICATION_ID,     0,  /* Preserve the application id */
  };


  key_sz = ctx->read_ctx->pass_sz + 1;
  key = sqlcipher_malloc(key_sz);
  memset(key, 0, key_sz);
  memcpy(key, ctx->read_ctx->pass, ctx->read_ctx->pass_sz);

  if(db_filename){
    const char* commands[5];
    char *attach_command = sqlite3_mprintf("ATTACH DATABASE '%s-migrated' as migrate KEY '%q';",
                                            db_filename, key);

    int rc = sqlcipher_check_connection(db_filename, key, ctx->read_ctx->pass_sz, "", &user_version);
    if(rc == SQLITE_OK){
      CODEC_TRACE(("No upgrade required - exiting\n"));
      goto exit;
    }
    
    // Version 2 - check for 4k with hmac format 
    rc = sqlcipher_check_connection(db_filename, key, ctx->read_ctx->pass_sz, pragma_4k_kdf_iter, &user_version);
    if(rc == SQLITE_OK) {
      CODEC_TRACE(("Version 2 format found\n"));
      upgrade_4k_format = 1;
    }

    // Version 1 - check both no hmac and 4k together
    pragma_1x_and_4k = sqlite3_mprintf("%s%s", pragma_hmac_off,
                                             pragma_4k_kdf_iter);
    rc = sqlcipher_check_connection(db_filename, key, ctx->read_ctx->pass_sz, pragma_1x_and_4k, &user_version);
    sqlite3_free(pragma_1x_and_4k);
    if(rc == SQLITE_OK) {
      CODEC_TRACE(("Version 1 format found\n"));
      upgrade_1x_format = 1;
      upgrade_4k_format = 1;
    }

    if(upgrade_1x_format == 0 && upgrade_4k_format == 0) {
      CODEC_TRACE(("Upgrade format not determined\n"));
      goto handle_error;
    }

    set_user_version = sqlite3_mprintf("PRAGMA migrate.user_version = %d;", user_version);
    commands[0] = upgrade_4k_format == 1 ? pragma_4k_kdf_iter : "";
    commands[1] = upgrade_1x_format == 1 ? pragma_hmac_off : "";
    commands[2] = attach_command;
    commands[3] = "SELECT sqlcipher_export('migrate');";
    commands[4] = set_user_version;
      
    for(command_idx = 0; command_idx < ArraySize(commands); command_idx++){
      const char *command = commands[command_idx];
      if(strcmp(command, "") == 0){
        continue;
      }
      rc = sqlite3_exec(db, command, NULL, NULL, NULL);
      if(rc != SQLITE_OK){
        break;
      }
    }
    sqlite3_free(attach_command);
    sqlite3_free(set_user_version);
    sqlcipher_free(key, key_sz);
    
    if(rc == SQLITE_OK){
      Btree *pDest;
      Btree *pSrc;
      int i = 0;

      if( !db->autoCommit ){
        CODEC_TRACE(("cannot migrate from within a transaction"));
        goto handle_error;
      }
      if( db->nVdbeActive>1 ){
        CODEC_TRACE(("cannot migrate - SQL statements in progress"));
        goto handle_error;
      }

      /* Save the current value of the database flags so that it can be
      ** restored before returning. Then set the writable-schema flag, and
      ** disable CHECK and foreign key constraints.  */
      saved_flags = db->flags;
      saved_nChange = db->nChange;
      saved_nTotalChange = db->nTotalChange;
      saved_xTrace = db->xTrace;
      db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks | SQLITE_PreferBuiltin;
      db->flags &= ~(SQLITE_ForeignKeys | SQLITE_ReverseOrder);
      db->xTrace = 0;
      
      pDest = db->aDb[0].pBt;
      pDb = &(db->aDb[db->nDb-1]);
      pSrc = pDb->pBt;
      
      rc = sqlite3_exec(db, "BEGIN;", NULL, NULL, NULL);
      rc = sqlite3BtreeBeginTrans(pSrc, 2);
      rc = sqlite3BtreeBeginTrans(pDest, 2);
      
      assert( 1==sqlite3BtreeIsInTrans(pDest) );
      assert( 1==sqlite3BtreeIsInTrans(pSrc) );

      sqlite3CodecGetKey(db, db->nDb - 1, (void**)&key, &password_sz);
      sqlite3CodecAttach(db, 0, key, password_sz);
      sqlite3pager_get_codec(pDest->pBt->pPager, (void**)&ctx);
      
      ctx->skip_read_hmac = 1;      
      for(i=0; i<ArraySize(aCopy); i+=2){
        sqlite3BtreeGetMeta(pSrc, aCopy[i], &meta);
        rc = sqlite3BtreeUpdateMeta(pDest, aCopy[i], meta+aCopy[i+1]);
        if( NEVER(rc!=SQLITE_OK) ) goto handle_error; 
      }
      rc = sqlite3BtreeCopyFile(pDest, pSrc);
      ctx->skip_read_hmac = 0;
      if( rc!=SQLITE_OK ) goto handle_error;
      rc = sqlite3BtreeCommit(pDest);

      db->flags = saved_flags;
      db->nChange = saved_nChange;
      db->nTotalChange = saved_nTotalChange;
      db->xTrace = saved_xTrace;
      db->autoCommit = 1;
      if( pDb ){
        sqlite3BtreeClose(pDb->pBt);
        pDb->pBt = 0;
        pDb->pSchema = 0;
      }
      sqlite3ResetAllSchemasOfConnection(db);
      remove(migrated_db_filename);
      sqlite3_free(migrated_db_filename);
    } else {
      CODEC_TRACE(("*** migration failure** \n\n"));
    }
    
  }
  goto exit;

 handle_error:
  CODEC_TRACE(("An error occurred attempting to migrate the database\n"));
  rc = SQLITE_ERROR;

 exit:
  return rc;
}
Esempio n. 12
0
/*
** This routine implements the OP_Vacuum opcode of the VDBE.
*/
int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){
  int rc = SQLITE_OK;     /* Return code from service routines */
#if !defined(SQLITE_OMIT_VACUUM) || SQLITE_OMIT_VACUUM
  const char *zFilename;  /* full pathname of the database file */
  int nFilename;          /* number of characters  in zFilename[] */
  char *zTemp = 0;        /* a temporary file in same directory as zFilename */
  int i;                  /* Loop counter */
  Btree *pMain;           /* The database being vacuumed */
  Btree *pTemp;
  char *zSql = 0;

  if( !db->autoCommit ){
    sqlite3SetString(pzErrMsg, "cannot VACUUM from within a transaction", 
       (char*)0);
    rc = SQLITE_ERROR;
    goto end_of_vacuum;
  }

  /* Get the full pathname of the database file and create a
  ** temporary filename in the same directory as the original file.
  */
  pMain = db->aDb[0].pBt;
  zFilename = sqlite3BtreeGetFilename(pMain);
  assert( zFilename );
  if( zFilename[0]=='\0' ){
    /* The in-memory database. Do nothing. Return directly to avoid causing
    ** an error trying to DETACH the vacuum_db (which never got attached)
    ** in the exit-handler.
    */
    return SQLITE_OK;
  }
  nFilename = strlen(zFilename);
  zTemp = sqliteMalloc( nFilename+100 );
  if( zTemp==0 ){
    rc = SQLITE_NOMEM;
    goto end_of_vacuum;
  }
  strcpy(zTemp, zFilename);
  i = 0;
  do {
    zTemp[nFilename] = '-';
    randomName((unsigned char*)&zTemp[nFilename+1]);
  } while( i<10 && sqlite3OsFileExists(zTemp) );

  /* Attach the temporary database as 'vacuum_db'. The synchronous pragma
  ** can be set to 'off' for this file, as it is not recovered if a crash
  ** occurs anyway. The integrity of the database is maintained by a
  ** (possibly synchronous) transaction opened on the main database before
  ** sqlite3BtreeCopyFile() is called.
  **
  ** An optimisation would be to use a non-journaled pager.
  */
  zSql = sqlite3MPrintf("ATTACH '%q' AS vacuum_db;", zTemp);
  if( !zSql ){
    rc = SQLITE_NOMEM;
    goto end_of_vacuum;
  }
  rc = execSql(db, zSql);
  sqliteFree(zSql);
  zSql = 0;
  if( rc!=SQLITE_OK ) goto end_of_vacuum;
  assert( strcmp(db->aDb[db->nDb-1].zName,"vacuum_db")==0 );
  pTemp = db->aDb[db->nDb-1].pBt;
  sqlite3BtreeSetPageSize(pTemp, sqlite3BtreeGetPageSize(pMain),
     sqlite3BtreeGetReserve(pMain));
  assert( sqlite3BtreeGetPageSize(pTemp)==sqlite3BtreeGetPageSize(pMain) );
  execSql(db, "PRAGMA vacuum_db.synchronous=OFF");

  /* Begin a transaction */
  rc = execSql(db, "BEGIN;");
  if( rc!=SQLITE_OK ) goto end_of_vacuum;

  /* Query the schema of the main database. Create a mirror schema
  ** in the temporary database.
  */
  rc = execExecSql(db, 
      "SELECT 'CREATE TABLE vacuum_db.' || substr(sql,14,100000000) "
      "  FROM sqlite_master WHERE type='table' "
      "UNION ALL "
      "SELECT 'CREATE INDEX vacuum_db.' || substr(sql,14,100000000) "
      "  FROM sqlite_master WHERE sql LIKE 'CREATE INDEX %' "
      "UNION ALL "
      "SELECT 'CREATE UNIQUE INDEX vacuum_db.' || substr(sql,21,100000000) "
      "  FROM sqlite_master WHERE sql LIKE 'CREATE UNIQUE INDEX %'"
      "UNION ALL "
      "SELECT 'CREATE VIEW vacuum_db.' || substr(sql,13,100000000) "
      "  FROM sqlite_master WHERE type='view'"
  );
  if( rc!=SQLITE_OK ) goto end_of_vacuum;

  /* Loop through the tables in the main database. For each, do
  ** an "INSERT INTO vacuum_db.xxx SELECT * FROM xxx;" to copy
  ** the contents to the temporary database.
  */
  rc = execExecSql(db, 
      "SELECT 'INSERT INTO vacuum_db.' || quote(name) "
      "|| ' SELECT * FROM ' || quote(name) || ';'"
      "FROM sqlite_master "
      "WHERE type = 'table';"
  );
  if( rc!=SQLITE_OK ) goto end_of_vacuum;

  /* Copy the triggers from the main database to the temporary database.
  ** This was deferred before in case the triggers interfered with copying
  ** the data. It's possible the indices should be deferred until this
  ** point also.
  */
  rc = execExecSql(db, 
      "SELECT 'CREATE TRIGGER  vacuum_db.' || substr(sql, 16, 1000000) "
      "FROM sqlite_master WHERE type='trigger'"
  );
  if( rc!=SQLITE_OK ) goto end_of_vacuum;


  /* At this point, unless the main db was completely empty, there is now a
  ** transaction open on the vacuum database, but not on the main database.
  ** Open a btree level transaction on the main database. This allows a
  ** call to sqlite3BtreeCopyFile(). The main database btree level
  ** transaction is then committed, so the SQL level never knows it was
  ** opened for writing. This way, the SQL transaction used to create the
  ** temporary database never needs to be committed.
  */
  if( sqlite3BtreeIsInTrans(pTemp) ){
    u32 meta;

    assert( 0==sqlite3BtreeIsInTrans(pMain) );
    rc = sqlite3BtreeBeginTrans(pMain, 1);
    if( rc!=SQLITE_OK ) goto end_of_vacuum;

    /* Copy Btree meta values 3 and 4. These correspond to SQL layer meta 
    ** values 2 and 3, the default values of a couple of pragmas.
    */
    rc = sqlite3BtreeGetMeta(pMain, 3, &meta);
    if( rc!=SQLITE_OK ) goto end_of_vacuum;
    rc = sqlite3BtreeUpdateMeta(pTemp, 3, meta);
    if( rc!=SQLITE_OK ) goto end_of_vacuum;
    rc = sqlite3BtreeGetMeta(pMain, 4, &meta);
    if( rc!=SQLITE_OK ) goto end_of_vacuum;
    rc = sqlite3BtreeUpdateMeta(pTemp, 4, meta);
    if( rc!=SQLITE_OK ) goto end_of_vacuum;

    rc = sqlite3BtreeCopyFile(pMain, pTemp);
    if( rc!=SQLITE_OK ) goto end_of_vacuum;
    rc = sqlite3BtreeCommit(pMain);
  }

end_of_vacuum:
  /* Currently there is an SQL level transaction open on the vacuum
  ** database. No locks are held on any other files (since the main file
  ** was committed at the btree level). So it safe to end the transaction
  ** by manually setting the autoCommit flag to true and detaching the
  ** vacuum database. The vacuum_db journal file is deleted when the pager
  ** is closed by the DETACH.
  */
  db->autoCommit = 1;
  if( rc==SQLITE_OK ){
    rc = execSql(db, "DETACH vacuum_db;");
  }else{
    execSql(db, "DETACH vacuum_db;");
  }
  if( zTemp ){
    sqlite3OsDelete(zTemp);
    sqliteFree(zTemp);
  }
  if( zSql ) sqliteFree( zSql );
  sqlite3ResetInternalSchema(db, 0);
#endif
  return rc;
}