/* * Used by the ATTACH command and by sqlite3_key, set the encryption key for the * backend-th database, where "main" is 0, "temp" is 1 and each additional * ATTACH-ed database file is 2, 3, 4, ... */ int sqlite3CodecAttach(sqlite3 *db, int backend, const void *key, int nkey) { struct BtShared *pBt; int ret; assert(db->aDb[backend].pBt != NULL); pBt = db->aDb[backend].pBt->pBt; /* * An empty key means no encryption. Also, don't try to encrypt an * environment that's already been opened. Don't encrypt an in-mem db * since it will never be written to disk. */ if (nkey == 0 || pBt->env_opened || pBt->dbStorage != DB_STORE_NAMED) return dberr2sqlite(0, db->aDb[backend].pBt); sqlite3_mutex_enter(db->mutex); /* * SQLite and BDB have slightly different semantics for the key. * SQLite's key is a string of bytes whose length is specified * separately, while BDB takes a NULL terminated string. We need to * ensure the key is NULL terminated before passing to BDB, but we can't * modify the given key, so we have to make a copy. BDB will make its * own copy of the key, it's safe to free keystring after * the set_encrypt call. */ if (pBt->encrypt_pwd != NULL) CLEAR_PWD(pBt); if ((pBt->encrypt_pwd = malloc((size_t)nkey + 1)) == NULL) { ret = ENOMEM; goto err; } memcpy(pBt->encrypt_pwd, key, nkey); /* * We allocate nkey + 1 bytes, but will only clear nkey bytes to * preserve the terminating NULL. */ pBt->encrypt_pwd_len = nkey; pBt->encrypt_pwd[nkey] = '\0'; ret = pDbEnv->set_encrypt(pDbEnv, pBt->encrypt_pwd, DB_ENCRYPT_AES); pBt->encrypted = 1; err: sqlite3_mutex_leave(db->mutex); return dberr2sqlite(ret, db->aDb[backend].pBt); }
static int btreeSeqRemoveHandle( sqlite3_context *context, Btree *p, CACHED_DB *cache_entry) { BtShared *pBt; DB_SEQUENCE *seq; DBT key; SEQ_COOKIE cookie; int ret; pBt = p->pBt; memcpy(&cookie, cache_entry->cookie, sizeof(cookie)); /* Remove the entry from the hash table. */ sqlite3HashInsert(&pBt->db_cache, cookie.name, cookie.name_len, NULL); if (cookie.cache != 0) { seq = (DB_SEQUENCE *)cache_entry->dbp; seq->remove(seq, p->savepoint_txn, 0); } /* Remove the cookie entry from the metadata database. */ memset(&key, 0, sizeof(key)); key.data = cookie.name; key.size = cookie.name_len; if ((ret = pBt->metadb->del(pBt->metadb, p->savepoint_txn, &key, 0)) != 0 && ret != DB_NOTFOUND) { btreeSeqError(context, SQLITE_ERROR, "Sequence remove incomplete. Couldn't delete metadata." "Error %s.", db_strerror(ret)); } if (cache_entry->cookie != NULL) sqlite3_free(cache_entry->cookie); sqlite3_free(cache_entry); return (ret == 0 ? SQLITE_OK : dberr2sqlite(ret, NULL)); }
static void db_seq_create_func( sqlite3_context *context, int argc, sqlite3_value **argv) { Btree *p; BtShared *pBt; SEQ_COOKIE cookie; int i, rc; sqlite3 *db; if (argc < 1) { btreeSeqError(context, SQLITE_ERROR, "wrong number of arguments to function " "create_sequence()"); return; } /* * Ensure that the sequence name is OK with our static buffer * size. We need extra characters for "seq_" and "_db". */ if (strlen((const char *)sqlite3_value_text(argv[0])) > BT_MAX_SEQ_NAME - 8) { btreeSeqError(context, SQLITE_ERROR, "Sequence name too long."); return; } db = sqlite3_context_db_handle(context); /* * TODO: How do we know which BtShared to use? * What if there are multiple attached DBs, can the user specify which * one to create the sequence on? */ p = db->aDb[0].pBt; pBt = p->pBt; if (!p->connected && (rc = btreeOpenEnvironment(p, 1)) != SQLITE_OK) { btreeSeqError(context, SQLITE_ERROR, "%sconnection could not be opened.", MSG_CREATE_FAIL); return; } /* The cookie holds configuration information. */ memset(&cookie, 0, sizeof(SEQ_COOKIE)); cookie.incr = 1; sqlite3_snprintf(BT_MAX_SEQ_NAME, cookie.name, "seq_%s", sqlite3_value_text(argv[0])); cookie.name_len = (int)strlen(cookie.name); if (pBt->dbStorage == DB_STORE_NAMED && btreeSeqExists(context, p, cookie.name) == 1) { btreeSeqError(context, SQLITE_ERROR, "Attempt to call sequence_create when a sequence " "already exists."); return; } /* * TODO: In the future calling create_sequence when there is already a * handle in the cache could be used to alter the "cookie" values. * Don't do that for now, because it requires opening and closing * the DB handle, which needs care if it's being used by * multiple threads. */ /* Set the boundary values to distinguish if user set the values. */ cookie.min_val = -INT64_MAX; cookie.max_val = INT64_MAX; cookie.start_val = -INT64_MAX; /* Parse options. */ for (i = 1; i < argc; i++) { if (strncmp((char *)sqlite3_value_text(argv[i]), "cache", 5) == 0) { if (i == argc || sqlite3_value_type(argv[++i]) != SQLITE_INTEGER) { btreeSeqError(context, SQLITE_ERROR, "%sInvalid parameter.", MSG_CREATE_FAIL); goto err; } cookie.cache = sqlite3_value_int(argv[i]); } else if (strncmp((char *)sqlite3_value_text(argv[i]), "incr", 4) == 0) { if (i == argc || sqlite3_value_type(argv[++i]) != SQLITE_INTEGER) { btreeSeqError(context, SQLITE_ERROR, "%sInvalid parameter.", MSG_CREATE_FAIL); goto err; } cookie.incr = sqlite3_value_int(argv[i]); } else if (strncmp((char *)sqlite3_value_text(argv[i]), "maxvalue", 8) == 0) { if (i == argc || sqlite3_value_type(argv[++i]) != SQLITE_INTEGER) { btreeSeqError(context, SQLITE_ERROR, "%sInvalid parameter.", MSG_CREATE_FAIL); goto err; } cookie.max_val = sqlite3_value_int(argv[i]); } else if (strncmp((char *)sqlite3_value_text(argv[i]), "minvalue", 8) == 0) { if (i == argc || sqlite3_value_type(argv[++i]) != SQLITE_INTEGER) { btreeSeqError(context, SQLITE_ERROR, "%sInvalid parameter.", MSG_CREATE_FAIL); goto err; } cookie.min_val = sqlite3_value_int(argv[i]); } else if (strncmp((char *)sqlite3_value_text(argv[i]), "start", 5) == 0) { if (i == argc || sqlite3_value_type(argv[++i]) != SQLITE_INTEGER) { btreeSeqError(context, SQLITE_ERROR, "%sInvalid parameter.", MSG_CREATE_FAIL); goto err; } cookie.start_val = sqlite3_value_int(argv[i]); } else { btreeSeqError(context, SQLITE_ERROR, "%sInvalid parameter.", MSG_CREATE_FAIL); goto err; } } /* * Setup the cookie. Do this after the parsing so param order doesn't * matter. */ if (cookie.incr < 0) { cookie.decrementing = 1; cookie.incr = -cookie.incr; } /* Attempt to give a reasonable default start value. */ if (cookie.start_val == -INT64_MAX) { /* * Set a reasonable default start value, if * only half of a range has been given. */ if (cookie.decrementing == 1 && cookie.max_val != INT64_MAX) { cookie.start_val = cookie.max_val; } else if (cookie.decrementing == 0 && cookie.min_val != -INT64_MAX) { cookie.start_val = cookie.min_val; } else { /* * If user does not set start_val, min_val and * max_val, set default start_val to 0 by default. */ cookie.start_val = 0; } } /* Validate the settings. */ if (cookie.min_val > cookie.max_val && cookie.max_val != 0) { btreeSeqError(context, SQLITE_ERROR, "%sInvalid parameter.", MSG_CREATE_FAIL); goto err; } if (cookie.min_val > cookie.start_val || cookie.max_val < cookie.start_val) { btreeSeqError(context, SQLITE_ERROR, "%sInvalid parameter.", MSG_CREATE_FAIL); goto err; } if (cookie.cache != 0 && db->autoCommit == 0) { btreeSeqError(context, SQLITE_ERROR, "Cannot create caching sequence in a transaction."); goto err; } if ((rc = btreeSeqGetHandle(context, p, SEQ_HANDLE_CREATE, &cookie)) != SQLITE_OK) { if (rc != SQLITE_ERROR) btreeSeqError(context, dberr2sqlite(rc, NULL), "Failed to create sequence %s. Error: %s", (const char *)sqlite3_value_text(argv[0]), db_strerror(rc)); goto err; } sqlite3_result_int(context, SQLITE_OK); err: return; }
static void btreeSeqGetVal( sqlite3_context *context, const char * name, int mode) { Btree *p; BtShared *pBt; SEQ_COOKIE cookie; db_seq_t val; int rc, ret; sqlite3 *db; db = sqlite3_context_db_handle(context); p = db->aDb[0].pBt; pBt = p->pBt; memset(&cookie, 0, sizeof(cookie)); if (!p->connected && (rc = btreeOpenEnvironment(p, 1)) != SQLITE_OK) { sqlite3_result_error(context, "Sequence open failed: connection could not be opened.", -1); return; } sqlite3_snprintf(BT_MAX_SEQ_NAME, cookie.name, "seq_%s", name); cookie.name_len = (int)strlen(cookie.name); rc = btreeSeqGetHandle(context, p, SEQ_HANDLE_OPEN, &cookie); if (rc != SQLITE_OK) { if (rc == DB_NOTFOUND) btreeSeqError(context, dberr2sqlite(rc, NULL), "no such sequence: %s", name); else if (rc != SQLITE_ERROR) btreeSeqError(context, dberr2sqlite(rc, NULL), "Fail to get next value from seq %s. Error: %s", name, db_strerror(rc)); return; } if (cookie.cache == 0) { /* * Ensure we see the latest value across connections. Use * DB_RMW to ensure that no other process changes the value * while we're updating it. */ if ((ret = btreeSeqGetCookie(context, p, &cookie, DB_RMW)) != 0) { btreeSeqError(context, SQLITE_ERROR, "Failed to retrieve sequence value. Error: %s", db_strerror(ret)); return; } if (mode == DB_SEQ_NEXT) { /* Bounds check. */ if (cookie.used && ((cookie.decrementing && cookie.val - cookie.incr < cookie.min_val) || (!cookie.decrementing && cookie.val + cookie.incr > cookie.max_val))) { btreeSeqError(context, SQLITE_ERROR, "Sequence value out of bounds."); return; } if (!cookie.used) { cookie.used = 1; cookie.val = cookie.start_val; } else if (cookie.decrementing) cookie.val -= cookie.incr; else cookie.val += cookie.incr; btreeSeqPutCookie(context, p, &cookie, 0); } else if (!cookie.used) { btreeSeqError(context, SQLITE_ERROR, "Can't call currval on an unused sequence."); return ; } val = cookie.val; } else { if (mode == DB_SEQ_CURRENT) { btreeSeqError(context, SQLITE_ERROR, "Can't call currval on a caching sequence."); return; } /* * Using a cached sequence while an exclusive transaction is * active on this handle causes a hang. Avoid it. */ if (p->txn_excl == 1) { btreeSeqError(context, SQLITE_ERROR, "Can't call nextval on a caching sequence while an" " exclusive transaction is active."); return; } /* Cached gets can't be transactionally protected. */ if ((ret = cookie.handle->get(cookie.handle, NULL, cookie.incr, &val, 0)) != 0) { if (ret == EINVAL) btreeSeqError(context, SQLITE_ERROR, "Sequence value out of bounds."); else btreeSeqError(context, SQLITE_ERROR, "Failed sequence get. Error: %s", db_strerror(ret)); return; } } sqlite3_result_int64(context, val); }
static void db_seq_drop_func( sqlite3_context *context, int argc, sqlite3_value **argv) { Btree *p; BtShared *pBt; CACHED_DB *cache_entry; SEQ_COOKIE cookie; int mutex_held, rc; sqlite3 *db; db = sqlite3_context_db_handle(context); p = db->aDb[0].pBt; pBt = p->pBt; mutex_held = 0; memset(&cookie, 0, sizeof(cookie)); if (!p->connected && (rc = btreeOpenEnvironment(p, 1)) != SQLITE_OK) { btreeSeqError(context, SQLITE_ERROR, "Sequence drop failed: connection could not be opened."); return; } sqlite3_snprintf(BT_MAX_SEQ_NAME, cookie.name, "seq_%s", sqlite3_value_text(argv[0])); cookie.name_len = (int)strlen(cookie.name); rc = btreeSeqGetHandle(context, p, SEQ_HANDLE_OPEN, &cookie); if (rc != SQLITE_OK) { /* If the handle doesn't exist, return an error. */ if (rc == DB_NOTFOUND) btreeSeqError(context, dberr2sqlite(rc, NULL), "no such sequence: %s", cookie.name + 4); else if (rc != SQLITE_ERROR) btreeSeqError(context, dberr2sqlite(rc, NULL), "Fail to drop sequence %s. Error: %s", cookie.name + 4, db_strerror(rc)); return; } sqlite3_mutex_enter(pBt->mutex); mutex_held = 1; cache_entry = sqlite3HashFind(&pBt->db_cache, cookie.name, cookie.name_len); if (cache_entry == NULL) goto done; if (cookie.cache != 0 && db->autoCommit == 0) { btreeSeqError(context, SQLITE_ERROR, "Cannot drop caching sequence in a transaction."); rc = SQLITE_ERROR; goto done; } sqlite3_mutex_leave(pBt->mutex); if ((rc = btreeSeqStartTransaction(context, p, 1)) != SQLITE_OK) { btreeSeqError(context, SQLITE_ERROR, "Could not begin transaction for drop."); return; } /* * Drop the mutex - it's not valid to begin a transaction while * holding the mutex. We can drop it safely because it's use is to * protect handle cache changes. */ sqlite3_mutex_enter(pBt->mutex); btreeSeqRemoveHandle(context, p, cache_entry); done: sqlite3_mutex_leave(pBt->mutex); if (rc == SQLITE_OK) sqlite3_result_int(context, SQLITE_OK); }
/* ** Copy nPage pages from the source b-tree to the destination. */ int sqlite3_backup_step(sqlite3_backup *p, int nPage) { int returnCode, pages; Parse parse; DB_ENV *dbenv; BtShared *pBtDest, *pBtSrc; pBtDest = pBtSrc = NULL; if (p->rc != SQLITE_OK || nPage == 0) return p->rc; sqlite3_mutex_enter(p->pSrcDb->mutex); sqlite3_mutex_enter(p->pDestDb->mutex); /* * Make sure the schema has been read in, so the keyInfo * can be retrieved for the indexes. No-op if already read. * If the schema has not been read then an update must have * changed it, so backup will restart. */ memset(&parse, 0, sizeof(parse)); parse.db = p->pSrcDb; p->rc = sqlite3ReadSchema(&parse); if (p->rc != SQLITE_OK) goto err; /* * This process updated the source database, so * the backup process has to restart. */ if (p->pSrc->updateDuringBackup > p->lastUpdate) { p->rc = SQLITE_LOCKED; if ((p->rc = backupCleanup(p)) != SQLITE_OK) goto err; else backupReset(p); } pages = nPage; if (!p->cleaned) { const char *home; const char inmem[9] = ":memory:"; int storage; pBtDest = p->pDest->pBt; storage = p->pDest->pBt->dbStorage; if (storage == DB_STORE_NAMED) p->openDest = 1; p->rc = btreeDeleteEnvironment(p->pDest, p->fullName, 1); if (storage == DB_STORE_INMEM && strcmp(p->destName, "temp") != 0) home = inmem; else home = p->fullName; p->pDest = p->pDestDb->aDb[p->iDb].pBt; if (p->rc != SQLITE_OK) goto err; /* * Call sqlite3OpenTempDatabase instead of * sqlite3BtreeOpen, because sqlite3OpenTempDatabase * automatically chooses the right flags before calling * sqlite3BtreeOpen. */ if (strcmp(p->destName, "temp") == 0) { memset(&parse, 0, sizeof(parse)); parse.db = p->pDestDb; p->rc = sqlite3OpenTempDatabase(&parse); p->pDest = p->pDestDb->aDb[p->iDb].pBt; } else { p->rc = sqlite3BtreeOpen(home, p->pDestDb, &p->pDest, SQLITE_DEFAULT_CACHE_SIZE | SQLITE_OPEN_MAIN_DB, p->pDestDb->openFlags); p->pDestDb->aDb[p->iDb].pBt = p->pDest; if (p->rc == SQLITE_OK) { p->pDestDb->aDb[p->iDb].pSchema = sqlite3SchemaGet(p->pDestDb, p->pDest); if (!p->pDestDb->aDb[p->iDb].pSchema) p->rc = SQLITE_NOMEM; } else p->pDestDb->aDb[p->iDb].pSchema = NULL; } if (p->pDest) p->pDest->nBackup++; #ifdef SQLITE_HAS_CODEC /* * In the case of a temporary source database, use the * encryption of the main database. */ if (strcmp(p->srcName, "temp") == 0) { int iDb = sqlite3FindDbName(p->pSrcDb, "main"); pBtSrc = p->pSrcDb->aDb[iDb].pBt->pBt; } else pBtSrc = p->pSrc->pBt; if (p->rc == SQLITE_OK) { if (p->iDb == 0) p->rc = sqlite3_key(p->pDestDb, pBtSrc->encrypt_pwd, pBtSrc->encrypt_pwd_len); else p->rc = sqlite3CodecAttach(p->pDestDb, p->iDb, pBtSrc->encrypt_pwd, pBtSrc->encrypt_pwd_len); } #endif if (p->rc != SQLITE_OK) goto err; p->cleaned = 1; } /* * Begin a transaction, unfortuantely the lock on * the schema has to be released to allow the sqlite_master * table to be cleared, which could allow another thread to * alter it, however accessing the backup database during * backup is already an illegal condition with undefined * results. */ if (!sqlite3BtreeIsInTrans(p->pDest)) { if (!p->pDest->connected) { p->rc = btreeOpenEnvironment(p->pDest, 1); if (p->rc != SQLITE_OK) goto err; } if ((p->rc = sqlite3BtreeBeginTrans(p->pDest, 2)) != SQLITE_OK) goto err; } /* Only this process should be accessing the backup environment. */ if (p->pDest->pBt->nRef > 1) { p->rc = SQLITE_BUSY; goto err; } /* * Begin a transaction, a lock error or update could have caused * it to be released in a previous call to step. */ if (!p->srcTxn) { dbenv = p->pSrc->pBt->dbenv; if ((p->rc = dberr2sqlite(dbenv->txn_begin(dbenv, p->pSrc->family_txn, &p->srcTxn, 0))) != SQLITE_OK) goto err; } /* * An update could have dropped or created a table, so recalculate * the list of tables. */ if (!p->tables) { if ((p->rc = btreeGetPageCount(p->pSrc, &p->tables, &p->nPagecount, p->srcTxn)) != SQLITE_OK) { sqlite3Error(p->pSrcDb, p->rc, 0); goto err; } p->nRemaining = p->nPagecount; } /* Copy the pages. */ p->rc = btreeCopyPages(p, &pages); if (p->rc == SQLITE_DONE) { p->nRemaining = 0; sqlite3ResetInternalSchema(p->pDestDb, p->iDb); memset(&parse, 0, sizeof(parse)); parse.db = p->pDestDb; p->rc = sqlite3ReadSchema(&parse); if (p->rc == SQLITE_OK) p->rc = SQLITE_DONE; } else if (p->rc != SQLITE_OK) goto err; /* * The number of pages left to copy is an estimate, so * do not let the number go to zero unless we are really * done. */ if (p->rc != SQLITE_DONE) { if ((u32)pages >= p->nRemaining) p->nRemaining = 1; else p->nRemaining -= pages; } err: /* * This process updated the source database, so * the backup process has to restart. */ if (p->pSrc->updateDuringBackup > p->lastUpdate && (p->rc == SQLITE_OK || p->rc == SQLITE_DONE)) { int cleanCode; returnCode = p->rc; p->rc = SQLITE_LOCKED; if ((cleanCode = backupCleanup(p)) != SQLITE_OK) returnCode = p->rc = cleanCode; else backupReset(p); } else { returnCode = backupCleanup(p); if (returnCode == SQLITE_OK || (p->rc != SQLITE_OK && p->rc != SQLITE_DONE)) returnCode = p->rc; else p->rc = returnCode; } /* * On a locked or busy error the backup process is rolled back, * but can be restarted by the user. */ if ( returnCode == SQLITE_LOCKED || returnCode == SQLITE_BUSY ) backupReset(p); else if ( returnCode != SQLITE_OK && returnCode != SQLITE_DONE ) { sqlite3Error(p->pDestDb, p->rc, 0); } sqlite3_mutex_leave(p->pDestDb->mutex); sqlite3_mutex_leave(p->pSrcDb->mutex); return (returnCode); }
/* Close or free all handles and commit or rollback the transaction. */ static int backupCleanup(sqlite3_backup *p) { int rc, rc2, ret; void *app; DB *db; rc = rc2 = SQLITE_OK; if (!p || p->rc == SQLITE_OK) return rc; rc2 = sqlite3BtreeCloseCursor(&p->destCur); if (rc2 != SQLITE_OK) rc = rc2; if (p->srcCur) { db = p->srcCur->dbp; app = db->app_private; if ((ret = p->srcCur->close(p->srcCur)) == 0) ret = db->close(db, DB_NOSYNC); rc2 = dberr2sqlite(ret); /* * The KeyInfo was allocated in btreeSetupIndex, * so have to deallocate it here. */ if (app) sqlite3DbFree(p->pSrcDb, app); } if (rc2 != SQLITE_OK) rc = rc2; p->srcCur = 0; /* * May retry on a locked or busy error, so keep * these values. */ if (p->rc != SQLITE_LOCKED && p->rc != SQLITE_BUSY) { if (p->srcName) sqlite3_free(p->srcName); if (p->destName != 0) sqlite3_free(p->destName); p->srcName = p->destName = NULL; } if (p->tables != 0) sqlite3_free(p->tables); p->tables = NULL; if (p->pSrc->nBackup) p->pSrc->nBackup--; if (p->pDest != NULL && p->pDest->nBackup) p->pDest->nBackup--; if (p->srcTxn) { if (p->rc == SQLITE_DONE) ret = p->srcTxn->commit(p->srcTxn, 0); else ret = p->srcTxn->abort(p->srcTxn); rc2 = dberr2sqlite(ret); } p->srcTxn = 0; if (rc2 != SQLITE_OK && sqlite3BtreeIsInTrans(p->pDest)) { rc = rc2; if (p->rc == SQLITE_DONE) rc2 = sqlite3BtreeCommit(p->pDest); else rc2 = sqlite3BtreeRollback(p->pDest); if (rc2 != SQLITE_OK) rc = rc2; } if (p->pDest && p->openDest) { char path[512]; /* * If successfully done then delete the old backup, if * an error then delete the current database and restore * the old backup. */ sqlite3_snprintf(sizeof(path), path, "%s%s", p->fullName, BACKUP_SUFFIX); if (p->rc == SQLITE_DONE) { rc2 = btreeDeleteEnvironment(p->pDest, path, 0); } else { rc2 = btreeDeleteEnvironment(p->pDest, p->fullName, 0); if (!__os_exists(NULL, path, 0)) __os_rename(NULL, path, p->fullName, 0); } if (rc == SQLITE_OK) rc = rc2; if (rc == SQLITE_OK) { p->pDest = NULL; p->pDestDb->aDb[p->iDb].pBt = NULL; p->openDest = 0; rc = sqlite3BtreeOpen(p->fullName, p->pDestDb, &p->pDest, SQLITE_DEFAULT_CACHE_SIZE | SQLITE_OPEN_MAIN_DB, p->pDestDb->openFlags); p->pDestDb->aDb[p->iDb].pBt = p->pDest; if (rc == SQLITE_OK) { p->pDestDb->aDb[p->iDb].pSchema = sqlite3SchemaGet(p->pDestDb, p->pDest); if (!p->pDestDb->aDb[p->iDb].pSchema) p->rc = SQLITE_NOMEM; } else p->pDestDb->aDb[p->iDb].pSchema = NULL; if (rc == SQLITE_OK) p->pDest->pBt->db_oflags |= DB_CREATE; /* * Have to delete the schema here on error to avoid * assert failure. */ if (p->pDest == NULL && p->pDestDb->aDb[p->iDb].pSchema != NULL) { sqlite3SchemaClear( p->pDestDb->aDb[p->iDb].pSchema); p->pDestDb->aDb[p->iDb].pSchema = NULL; } #ifdef SQLITE_HAS_CODEC if (rc == SQLITE_OK) { if (p->iDb == 0) rc = sqlite3_key(p->pDestDb, p->pSrc->pBt->encrypt_pwd, p->pSrc->pBt->encrypt_pwd_len); else rc = sqlite3CodecAttach(p->pDestDb, p->iDb, p->pSrc->pBt->encrypt_pwd, p->pSrc->pBt->encrypt_pwd_len); } #endif } } if (p->rc != SQLITE_LOCKED && p->rc != SQLITE_BUSY) { if (p->fullName != 0) sqlite3_free(p->fullName); p->fullName = NULL; } p->lastUpdate = p->pSrc->updateDuringBackup; return rc; }
/* ** Create an sqlite3_backup process to copy the contents of zSrcDb from ** connection handle pSrcDb to zDestDb in pDestDb. If successful, return ** a pointer to the new sqlite3_backup object. ** ** If an error occurs, NULL is returned and an error code and error message ** stored in database handle pDestDb. ** pDestDb Database to write to ** zDestDb Name of database within pDestDb ** pSrcDb Database connection to read from ** zSrcDb Name of database within pSrcDb */ sqlite3_backup *sqlite3_backup_init(sqlite3* pDestDb, const char *zDestDb, sqlite3* pSrcDb, const char *zSrcDb) { sqlite3_backup *p; /* Value to return */ Parse parse; DB_ENV *dbenv; int ret; p = NULL; ret = 0; if (!pDestDb || !pSrcDb) return 0; sqlite3_mutex_enter(pSrcDb->mutex); sqlite3_mutex_enter(pDestDb->mutex); if (pSrcDb == pDestDb) { sqlite3Error(pDestDb, SQLITE_ERROR, "source and destination must be distinct"); goto err; } /* Allocate space for a new sqlite3_backup object */ p = (sqlite3_backup *)sqlite3_malloc(sizeof(sqlite3_backup)); if (!p) { sqlite3Error(pDestDb, SQLITE_NOMEM, 0); goto err; } memset(p, 0, sizeof(sqlite3_backup)); p->pSrc = findBtree(pDestDb, pSrcDb, zSrcDb); p->pDest = findBtree(pDestDb, pDestDb, zDestDb); p->pDestDb = pDestDb; p->pSrcDb = pSrcDb; if (0 == p->pSrc) { p->rc = p->pSrcDb->errCode; goto err; } if (0 == p->pDest) { p->rc = p->pDestDb->errCode; goto err; } p->iDb = sqlite3FindDbName(pDestDb, zDestDb); p->srcName = sqlite3_malloc((int)strlen(zSrcDb) + 1); p->destName = sqlite3_malloc((int)strlen(zDestDb) + 1); if (0 == p->srcName || 0 == p->destName) { p->rc = SQLITE_NOMEM; goto err; } strncpy(p->srcName, zSrcDb, strlen(zSrcDb) + 1); strncpy(p->destName, zDestDb, strlen(zDestDb) + 1); if (p->pDest->pBt->full_name) { const char *fullName = p->pDest->pBt->full_name; p->fullName = sqlite3_malloc((int)strlen(fullName) + 1); if (!p->fullName) { p->rc = SQLITE_NOMEM; goto err; } strncpy(p->fullName, fullName, strlen(fullName) + 1); } /* * Make sure the schema has been read in, so the keyInfo * can be retrieved for the indexes. No-op if already read. */ memset(&parse, 0, sizeof(parse)); parse.db = p->pSrcDb; p->rc = sqlite3ReadSchema(&parse); if (p->rc != SQLITE_OK) { if (parse.zErrMsg != NULL) sqlite3DbFree(p->pSrcDb, parse.zErrMsg); goto err; } /* Begin a transaction on the source. */ if (!p->pSrc->connected) { if ((p->rc = btreeOpenEnvironment(p->pSrc, 1)) != SQLITE_OK) goto err; } dbenv = p->pSrc->pBt->dbenv; p->rc = dberr2sqlite(dbenv->txn_begin(dbenv, p->pSrc->family_txn, &p->srcTxn, 0)); if (p->rc != SQLITE_OK) { sqlite3Error(pSrcDb, p->rc, 0); goto err; } /* * Get the page count and list of tables to copy. This will * result in a read lock on the schema table, held in the * read transaction. */ if ((p->rc = btreeGetPageCount(p->pSrc, &p->tables, &p->nPagecount, p->srcTxn)) != SQLITE_OK) { sqlite3Error(pSrcDb, p->rc, 0); goto err; } p->nRemaining = p->nPagecount; p->pSrc->nBackup++; p->pDest->nBackup++; p->lastUpdate = p->pSrc->updateDuringBackup; goto done; err: if (p != 0) { if (pDestDb->errCode == SQLITE_OK) sqlite3Error(pDestDb, p->rc, 0); if (p->srcTxn) p->srcTxn->abort(p->srcTxn); if (p->srcName != 0) sqlite3_free(p->srcName); if (p->destName != 0) sqlite3_free(p->destName); if (p->fullName != 0) sqlite3_free(p->fullName); if (p->tables != 0) sqlite3_free(p->tables); sqlite3_free(p); p = NULL; } done: sqlite3_mutex_leave(pDestDb->mutex); sqlite3_mutex_leave(pSrcDb->mutex); return p; }