/* ** Write the current contents of the in-memory linked-list to a PMA. Return ** SQLITE_OK if successful, or an SQLite error code otherwise. ** ** The format of a PMA is: ** ** * A varint. This varint contains the total number of bytes of content ** in the PMA (not including the varint itself). ** ** * One or more records packed end-to-end in order of ascending keys. ** Each record consists of a varint followed by a blob of data (the ** key). The varint is the number of bytes in the blob of data. */ static int vdbeSorterListToPMA(sqlite3 *db, VdbeCursor *pCsr){ int rc = SQLITE_OK; /* Return code */ VdbeSorter *pSorter = pCsr->pSorter; if( pSorter->nInMemory==0 ){ assert( pSorter->pRecord==0 ); return rc; } rc = vdbeSorterSort(pCsr); /* If the first temporary PMA file has not been opened, open it now. */ if( rc==SQLITE_OK && pSorter->pTemp1==0 ){ rc = vdbeSorterOpenTempFile(db, &pSorter->pTemp1); assert( rc!=SQLITE_OK || pSorter->pTemp1 ); assert( pSorter->iWriteOff==0 ); assert( pSorter->nPMA==0 ); } if( rc==SQLITE_OK ){ i64 iOff = pSorter->iWriteOff; SorterRecord *p; SorterRecord *pNext = 0; static const char eightZeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; pSorter->nPMA++; rc = vdbeSorterWriteVarint(pSorter->pTemp1, pSorter->nInMemory, &iOff); for(p=pSorter->pRecord; rc==SQLITE_OK && p; p=pNext){ pNext = p->pNext; rc = vdbeSorterWriteVarint(pSorter->pTemp1, p->nVal, &iOff); if( rc==SQLITE_OK ){ rc = sqlite3OsWrite(pSorter->pTemp1, p->pVal, p->nVal, iOff); iOff += p->nVal; } sqlite3DbFree(db, p); } /* This assert verifies that unless an error has occurred, the size of ** the PMA on disk is the same as the expected size stored in ** pSorter->nInMemory. */ assert( rc!=SQLITE_OK || pSorter->nInMemory==( iOff-pSorter->iWriteOff-sqlite3VarintLen(pSorter->nInMemory) )); pSorter->iWriteOff = iOff; if( rc==SQLITE_OK ){ /* Terminate each file with 8 extra bytes so that from any offset ** in the file we can always read 9 bytes without a SHORT_READ error */ rc = sqlite3OsWrite(pSorter->pTemp1, eightZeros, 8, iOff); } pSorter->pRecord = p; } return rc; }
/* ** Write data to an tvfs-file. */ static int tvfsWrite( sqlite3_file *pFile, const void *zBuf, int iAmt, sqlite_int64 iOfst ){ int rc = SQLITE_OK; TestvfsFd *pFd = tvfsGetFd(pFile); Testvfs *p = (Testvfs *)pFd->pVfs->pAppData; if( p->pScript && p->mask&TESTVFS_WRITE_MASK ){ tvfsExecTcl(p, "xWrite", Tcl_NewStringObj(pFd->zFilename, -1), pFd->pShmId, Tcl_NewWideIntObj(iOfst), Tcl_NewIntObj(iAmt) ); tvfsResultCode(p, &rc); } if( rc==SQLITE_OK && tvfsInjectFullerr(p) ){ rc = SQLITE_FULL; } if( rc==SQLITE_OK && p->mask&TESTVFS_WRITE_MASK && tvfsInjectIoerr(p) ){ rc = SQLITE_IOERR; } if( rc==SQLITE_OK ){ rc = sqlite3OsWrite(pFd->pReal, zBuf, iAmt, iOfst); } return rc; }
void testAppend(){ OsFile id;/* I don't like the implementation since OSFile need to create by myself*/ int Readonly; int i,nPages; double time; char * buf=sqlite3Malloc(config.pagesize); rc = sqlite3OsOpenReadWrite( config.datfile, &id, &Readonly); errorHandle(rc, "can't open the file"); for(nPages=1; nPages<=config.pagenum; nPages++){ printf("append %d pages!\n",nPages); start_timer(); for(i=0;i<nPages;i++){ sqlite3Randomness(config.pagesize, buf); rc = sqlite3OsWrite(&id, buf, config.pagesize); errorHandle(rc, "write error"); } time = get_timer(); pr_times(config.recordfile, time); } rc= sqlite3OsClose(&id); errorHandle(rc, "can't close the file"); //TODO can't find the defintion, do it later //sqlite3Free((void *)buf); }
/* ** Write data to an jt-file. */ static int jtWrite( sqlite3_file *pFile, const void *zBuf, int iAmt, sqlite_int64 iOfst ){ int rc; jt_file *p = (jt_file *)pFile; if( p->flags&SQLITE_OPEN_MAIN_JOURNAL ){ if( iOfst==0 ){ jt_file *pMain = locateDatabaseHandle(p->zName); assert( pMain ); if( iAmt==28 ){ /* Zeroing the first journal-file header. This is the end of a ** transaction. */ closeTransaction(pMain); }else if( iAmt!=12 ){ /* Writing the first journal header to a journal file. This happens ** when a transaction is first started. */ u8 *z = (u8 *)zBuf; pMain->nPage = decodeUint32(&z[16]); pMain->nPagesize = decodeUint32(&z[24]); if( SQLITE_OK!=(rc=openTransaction(pMain, p)) ){ return rc; } } } if( p->iMaxOff<(iOfst + iAmt) ){ p->iMaxOff = iOfst + iAmt; } } if( p->flags&SQLITE_OPEN_MAIN_DB && p->pWritable ){ if( iAmt<p->nPagesize && p->nPagesize%iAmt==0 && iOfst>=(PENDING_BYTE+512) && iOfst+iAmt<=PENDING_BYTE+p->nPagesize ){ /* No-op. This special case is hit when the backup code is copying a ** to a database with a larger page-size than the source database and ** it needs to fill in the non-locking-region part of the original ** pending-byte page. */ }else{ u32 pgno = iOfst/p->nPagesize + 1; assert( (iAmt==1||iAmt==p->nPagesize) && ((iOfst+iAmt)%p->nPagesize)==0 ); assert( pgno<=p->nPage || p->nSync>0 ); assert( pgno>p->nPage || sqlite3BitvecTest(p->pWritable, pgno) ); } } rc = sqlite3OsWrite(p->pReal, zBuf, iAmt, iOfst); if( (p->flags&SQLITE_OPEN_MAIN_JOURNAL) && iAmt==12 ){ jt_file *pMain = locateDatabaseHandle(p->zName); int rc2 = readJournalFile(p, pMain); if( rc==SQLITE_OK ) rc = rc2; } return rc; }
/* ** Wrapper around the sqlite3OsWrite() function that avoids writing to the ** 512 byte block begining at offset PENDING_BYTE. */ static int writeDbFile(CrashFile *p, u8 *z, i64 iAmt, i64 iOff){ int rc = SQLITE_OK; int iSkip = 0; if( (iAmt-iSkip)>0 ){ rc = sqlite3OsWrite(p->pRealFile, &z[iSkip], (int)(iAmt-iSkip), iOff+iSkip); } return rc; }
/* ** Write data to an devsym-file. */ static int devsymWrite( sqlite3_file *pFile, const void *zBuf, int iAmt, sqlite_int64 iOfst ){ devsym_file *p = (devsym_file *)pFile; return sqlite3OsWrite(p->pReal, zBuf, iAmt, iOfst); }
/* ** Wrapper around the sqlite3OsWrite() function that avoids writing to the ** 512 byte block begining at offset PENDING_BYTE. */ static int writeDbFile(CrashFile *p, u8 *z, i64 iAmt, i64 iOff){ int rc; int iSkip = 0; if( iOff==PENDING_BYTE && (p->flags&SQLITE_OPEN_MAIN_DB) ){ iSkip = 512; } if( (iAmt-iSkip)>0 ){ rc = sqlite3OsWrite(p->pRealFile, &z[iSkip], iAmt-iSkip, iOff+iSkip); } return rc; }
void VfsOpenTempFileFileIoErrTest() { //Delete all temp files in this test private data cage. TInt err = DoDeleteTempFiles(); TEST(err == KErrNone || err == KErrNotFound); sqlite3_vfs* vfs = sqlite3_vfs_find(NULL); TEST(vfs != NULL); sqlite3_file* osFile = (sqlite3_file*)User::Alloc(vfs->szOsFile); TEST(osFile != NULL); err = SQLITE_ERROR; TInt cnt = 1; while(err != SQLITE_OK) { TInt processHandleCnt = 0; TInt threadHandleCnt = 0; RThread().HandleCount(processHandleCnt, threadHandleCnt); TInt allocCellsCnt = User::CountAllocCells(); TheTest.Printf(_L("%d "), cnt); (void)TheFs.SetErrorCondition(KErrGeneral, cnt); int outFlags = 0; err = sqlite3OsOpen(vfs, NULL, osFile, SQLITE_OPEN_READWRITE, &outFlags); if(err == SQLITE_OK) { //Since this is a temp file, its creation will be delayed till the first file write operation. err = sqlite3OsWrite(osFile, "1234", 4, 0); (void)sqlite3OsClose(osFile); } (void)TheFs.SetErrorCondition(KErrNone); if(err != SQLITE_OK) { TInt processHandleCnt2 = 0; TInt threadHandleCnt2 = 0; RThread().HandleCount(processHandleCnt2, threadHandleCnt2); TEST2(processHandleCnt2, processHandleCnt); TEST2(threadHandleCnt2, threadHandleCnt); TInt allocCellsCnt2 = User::CountAllocCells(); TEST2(allocCellsCnt2, allocCellsCnt); ++cnt; } //If the iteration has failed, then no temp file should exist in the test private data cage. //If the iteration has succeeded, then sqlite3OsClose() should have deleted the temp file. TInt err2 = DoDeleteTempFiles(); TEST2(err2, KErrNotFound); } TEST2(err, SQLITE_OK); TheTest.Printf(_L("\r\n=== TVfs::Open(<temp file>) file I/O error simulation test succeeded at iteration %d\r\n"), cnt); User::Free(osFile); }
static int jtWrite( sqlite3_file *pFile, const void *zBuf, int iAmt, sqlite_int64 iOfst ){ int rc; jt_file *p = (jt_file *)pFile; if( p->flags&SQLITE_OPEN_MAIN_JOURNAL ){ if( iOfst==0 ){ jt_file *pMain = locateDatabaseHandle(p->zName); assert( pMain ); if( iAmt==28 ){ closeTransaction(pMain); }else if( iAmt!=12 ){ u8 *z = (u8 *)zBuf; pMain->nPage = decodeUint32(&z[16]); pMain->nPagesize = decodeUint32(&z[24]); if( SQLITE_OK!=(rc=openTransaction(pMain, p)) ){ return rc; } } } if( p->iMaxOff<(iOfst + iAmt) ){ p->iMaxOff = iOfst + iAmt; } } if( p->flags&SQLITE_OPEN_MAIN_DB && p->pWritable ){ if( iAmt<p->nPagesize && p->nPagesize%iAmt==0 && iOfst>=(PENDING_BYTE+512) && iOfst+iAmt<=PENDING_BYTE+p->nPagesize ){ }else{ u32 pgno = iOfst/p->nPagesize + 1; assert( (iAmt==1||iAmt==p->nPagesize) && ((iOfst+iAmt)%p->nPagesize)==0 ); assert( pgno<=p->nPage || p->nSync>0 ); assert( pgno>p->nPage || sqlite3BitvecTest(p->pWritable, pgno) ); } } rc = sqlite3OsWrite(p->pReal, zBuf, iAmt, iOfst); if( (p->flags&SQLITE_OPEN_MAIN_JOURNAL) && iAmt==12 ){ jt_file *pMain = locateDatabaseHandle(p->zName); int rc2 = readJournalFile(p, pMain); if( rc==SQLITE_OK ) rc = rc2; } return rc; }
/* ** Flush any buffered data to disk and clean up the file-writer object. ** The results of using the file-writer after this call are undefined. ** Return SQLITE_OK if flushing the buffered data succeeds or is not ** required. Otherwise, return an SQLite error code. ** ** Before returning, set *piEof to the offset immediately following the ** last byte written to the file. */ static int fileWriterFinish(sqlite3 *db, FileWriter *p, i64 *piEof){ int rc; if( p->eFWErr==0 && ALWAYS(p->aBuffer) && p->iBufEnd>p->iBufStart ){ p->eFWErr = sqlite3OsWrite(p->pFile, &p->aBuffer[p->iBufStart], p->iBufEnd - p->iBufStart, p->iWriteOff + p->iBufStart ); } *piEof = (p->iWriteOff + p->iBufEnd); sqlite3DbFree(db, p->aBuffer); rc = p->eFWErr; memset(p, 0, sizeof(FileWriter)); return rc; }
/* ** If it does not already exists, create and populate the on-disk file ** for JournalFile p. */ static int createFile(JournalFile *p){ int rc = SQLITE_OK; if( !p->pReal ){ sqlite3_file *pReal = (sqlite3_file *)&p[1]; rc = sqlite3OsOpen(p->pVfs, p->zJournal, pReal, p->flags, 0); if( rc==SQLITE_OK ){ p->pReal = pReal; if( p->iSize>0 ){ assert(p->iSize<=p->nBuf); rc = sqlite3OsWrite(p->pReal, p->zBuf, p->iSize, 0); } } } return rc; }
/* ** Write a single varint, value iVal, to file-descriptor pFile. Return ** SQLITE_OK if successful, or an SQLite error code if some error occurs. ** ** The value of *piOffset when this function is called is used as the byte ** offset in file pFile to write to. Before returning, *piOffset is ** incremented by the number of bytes written. */ static int vdbeSorterWriteVarint( sqlite3_file *pFile, /* File to write to */ i64 iVal, /* Value to write as a varint */ i64 *piOffset /* IN/OUT: Write offset in file pFile */ ){ u8 aVarint[9]; /* Buffer large enough for a varint */ int nVarint; /* Number of used bytes in varint */ int rc; /* Result of write() call */ nVarint = sqlite3PutVarint(aVarint, iVal); rc = sqlite3OsWrite(pFile, aVarint, nVarint, *piOffset); *piOffset += nVarint; return rc; }
/* ** Usage: fake_big_file N FILENAME ** ** Write a few bytes at the N megabyte point of FILENAME. This will ** create a large file. If the file was a valid SQLite database, then ** the next time the database is opened, SQLite will begin allocating ** new pages after N. If N is 2096 or bigger, this will test the ** ability of SQLite to write to large files. */ static int fake_big_file( void *NotUsed, Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ int argc, /* Number of arguments */ const char **argv /* Text of each argument */ ){ sqlite3_vfs *pVfs; sqlite3_file *fd = 0; int rc; int n; i64 offset; char *zFile; int nFile; if( argc!=3 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], " N-MEGABYTES FILE\"", 0); return TCL_ERROR; } if( Tcl_GetInt(interp, argv[1], &n) ) return TCL_ERROR; /* this does not work with MDB */ return TCL_ERROR; pVfs = sqlite3_vfs_find(0); nFile = (int)strlen(argv[2]); zFile = sqlite3_malloc( nFile+2 ); if( zFile==0 ) return TCL_ERROR; memcpy(zFile, argv[2], nFile+1); zFile[nFile+1] = 0; rc = sqlite3OsOpenMalloc(pVfs, zFile, &fd, (SQLITE_OPEN_CREATE|SQLITE_OPEN_READWRITE|SQLITE_OPEN_MAIN_DB), 0 ); if( rc ){ Tcl_AppendResult(interp, "open failed: ", sqlite3ErrName(rc), 0); sqlite3_free(zFile); return TCL_ERROR; } offset = n; offset *= 1024*1024; rc = sqlite3OsWrite(fd, "Hello, World!", 14, offset); sqlite3OsCloseFree(fd); sqlite3_free(zFile); if( rc ){ Tcl_AppendResult(interp, "write failed: ", sqlite3ErrName(rc), 0); return TCL_ERROR; } return TCL_OK; }
void VfsOpenTempFileOomTest() { //Delete all temp files in this test private data cage. TInt err = DoDeleteTempFiles(); TEST(err == KErrNone || err == KErrNotFound); sqlite3_vfs* vfs = sqlite3_vfs_find(NULL); TEST(vfs != NULL); sqlite3_file* osFile = (sqlite3_file*)User::Alloc(vfs->szOsFile); TEST(osFile != NULL); TheTest.Printf(_L("Iteration: ")); TInt failingAllocNum = 0; err = SQLITE_IOERR_NOMEM; while(err == SQLITE_IOERR_NOMEM) { ++failingAllocNum; TheTest.Printf(_L("%d "), failingAllocNum); OomPreStep(failingAllocNum); int outFlags = 0; err = sqlite3OsOpen(vfs, NULL, osFile, SQLITE_OPEN_READWRITE, &outFlags); if(err == SQLITE_OK) { //Since this is a temp file, its creation will be delayed till the first file write operation. err = sqlite3OsWrite(osFile, "1234", 4, 0); (void)sqlite3OsClose(osFile); } OomPostStep(); if(err != SQLITE_OK) { TEST2(err, SQLITE_IOERR_NOMEM); } //If the iteration has failed, then no temp file should exist in the test private data cage. //If the iteration has succeeded, then sqlite3OsClose() should have deleted the temp file. TInt err2 = DoDeleteTempFiles(); TEST2(err2, KErrNotFound); } TEST2(err, SQLITE_OK); TheTest.Printf(_L("\r\n=== TVfs::Open(<temp file>) OOM test succeeded at allcoation %d\r\n"), failingAllocNum); User::Free(osFile); }
/* ** Usage: fake_big_file N FILENAME ** ** Write a few bytes at the N megabyte point of FILENAME. This will ** create a large file. If the file was a valid SQLite database, then ** the next time the database is opened, SQLite will begin allocating ** new pages after N. If N is 2096 or bigger, this will test the ** ability of SQLite to write to large files. */ static int fake_big_file( void *NotUsed, Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ int argc, /* Number of arguments */ const char **argv /* Text of each argument */ ){ sqlite3_vfs *pVfs; sqlite3_file *fd = 0; int rc; int n; i64 offset; if( argc!=3 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], " N-MEGABYTES FILE\"", 0); return TCL_ERROR; } if( Tcl_GetInt(interp, argv[1], &n) ) return TCL_ERROR; /* * This does not work with Berkeley DB. Making a large file does not cause * DB to skip the existing pages. */ return TCL_ERROR; pVfs = sqlite3_vfs_find(0); rc = sqlite3OsOpenMalloc(pVfs, argv[2], &fd, (SQLITE_OPEN_CREATE|SQLITE_OPEN_READWRITE|SQLITE_OPEN_MAIN_DB), 0 ); if( rc ){ Tcl_AppendResult(interp, "open failed: ", errorName(rc), 0); return TCL_ERROR; } offset = n; offset *= 1024*1024; rc = sqlite3OsWrite(fd, "Hello, World!", 14, offset); sqlite3OsCloseFree(fd); if( rc ){ Tcl_AppendResult(interp, "write failed: ", errorName(rc), 0); return TCL_ERROR; } return TCL_OK; }
/* ** Write data to the file. */ static int jrnlWrite( sqlite3_file *pJfd, /* The journal file into which to write */ const void *zBuf, /* Take data to be written from here */ int iAmt, /* Number of bytes to write */ sqlite_int64 iOfst /* Begin writing at this offset into the file */ ){ int rc = SQLITE_OK; JournalFile *p = (JournalFile *)pJfd; if( !p->pReal && (iOfst+iAmt)>p->nBuf ){ rc = createFile(p); } if( rc==SQLITE_OK ){ if( p->pReal ){ rc = sqlite3OsWrite(p->pReal, zBuf, iAmt, iOfst); }else{ memcpy(&p->zBuf[iOfst], zBuf, iAmt); if( p->iSize<(iOfst+iAmt) ){ p->iSize = (iOfst+iAmt); } } } return rc; }
/* ** Write nData bytes of data to the file-write object. Return SQLITE_OK ** if successful, or an SQLite error code if an error occurs. */ static void fileWriterWrite(FileWriter *p, u8 *pData, int nData){ int nRem = nData; while( nRem>0 && p->eFWErr==0 ){ int nCopy = nRem; if( nCopy>(p->nBuffer - p->iBufEnd) ){ nCopy = p->nBuffer - p->iBufEnd; } memcpy(&p->aBuffer[p->iBufEnd], &pData[nData-nRem], nCopy); p->iBufEnd += nCopy; if( p->iBufEnd==p->nBuffer ){ p->eFWErr = sqlite3OsWrite(p->pFile, &p->aBuffer[p->iBufStart], p->iBufEnd - p->iBufStart, p->iWriteOff + p->iBufStart ); p->iBufStart = p->iBufEnd = 0; p->iWriteOff += p->nBuffer; } assert( p->iBufEnd<p->nBuffer ); nRem -= nCopy; } }
/* ** Usage: fake_big_file N FILENAME ** ** Write a few bytes at the N megabyte point of FILENAME. This will ** create a large file. If the file was a valid SQLite database, then ** the next time the database is opened, SQLite will begin allocating ** new pages after N. If N is 2096 or bigger, this will test the ** ability of SQLite to write to large files. */ static int fake_big_file( void *NotUsed, Tcl_Interp *interp, /* The TCL interpreter that invoked this command */ int argc, /* Number of arguments */ const char **argv /* Text of each argument */ ){ int rc; int n; i64 offset; OsFile *fd = 0; int readOnly = 0; if( argc!=3 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], " N-MEGABYTES FILE\"", 0); return TCL_ERROR; } if( Tcl_GetInt(interp, argv[1], &n) ) return TCL_ERROR; rc = sqlite3OsOpenReadWrite(argv[2], &fd, &readOnly); if( rc ){ Tcl_AppendResult(interp, "open failed: ", errorName(rc), 0); return TCL_ERROR; } offset = n; offset *= 1024*1024; rc = sqlite3OsSeek(fd, offset); if( rc ){ Tcl_AppendResult(interp, "seek failed: ", errorName(rc), 0); return TCL_ERROR; } rc = sqlite3OsWrite(fd, "Hello, World!", 14); sqlite3OsClose(&fd); if( rc ){ Tcl_AppendResult(interp, "write failed: ", errorName(rc), 0); return TCL_ERROR; } return TCL_OK; }
static int fake_big_file( void *NotUsed, Tcl_Interp *interp, int argc, const char **argv ){ sqlite3_vfs *pVfs; sqlite3_file *fd = 0; int rc; int n; i64 offset; if( argc!=3 ){ Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0], " N-MEGABYTES FILE\"", 0); return TCL_ERROR; } if( Tcl_GetInt(interp, argv[1], &n) ) return TCL_ERROR; pVfs = sqlite3_vfs_find(0); rc = sqlite3OsOpenMalloc(pVfs, argv[2], &fd, (SQLITE_OPEN_CREATE|SQLITE_OPEN_READWRITE|SQLITE_OPEN_MAIN_DB), 0 ); if( rc ){ Tcl_AppendResult(interp, "open failed: ", errorName(rc), 0); return TCL_ERROR; } offset = n; offset *= 1024*1024; rc = sqlite3OsWrite(fd, "Hello, World!", 14, offset); sqlite3OsCloseFree(fd); if( rc ){ Tcl_AppendResult(interp, "write failed: ", errorName(rc), 0); return TCL_ERROR; } return TCL_OK; }
/* ** This procedure runs in a separate thread, reading messages off of the ** write queue and processing them one by one. ** ** If async.writerHaltNow is true, then this procedure exits ** after processing a single message. ** ** If async.writerHaltWhenIdle is true, then this procedure exits when ** the write queue is empty. ** ** If both of the above variables are false, this procedure runs ** indefinately, waiting for operations to be added to the write queue ** and processing them in the order in which they arrive. ** ** An artifical delay of async.ioDelay milliseconds is inserted before ** each write operation in order to simulate the effect of a slow disk. ** ** Only one instance of this procedure may be running at a time. */ static void *asyncWriterThread(void *NotUsed){ AsyncWrite *p = 0; int rc = SQLITE_OK; int holdingMutex = 0; if( pthread_mutex_trylock(&async.writerMutex) ){ return 0; } while( async.writerHaltNow==0 ){ OsFile *pBase = 0; if( !holdingMutex ){ pthread_mutex_lock(&async.queueMutex); } while( (p = async.pQueueFirst)==0 ){ pthread_cond_broadcast(&async.emptySignal); if( async.writerHaltWhenIdle ){ pthread_mutex_unlock(&async.queueMutex); break; }else{ ASYNC_TRACE(("IDLE\n")); pthread_cond_wait(&async.queueSignal, &async.queueMutex); ASYNC_TRACE(("WAKEUP\n")); } } if( p==0 ) break; holdingMutex = 1; /* Right now this thread is holding the mutex on the write-op queue. ** Variable 'p' points to the first entry in the write-op queue. In ** the general case, we hold on to the mutex for the entire body of ** the loop. ** ** However in the cases enumerated below, we relinquish the mutex, ** perform the IO, and then re-request the mutex before removing 'p' from ** the head of the write-op queue. The idea is to increase concurrency with ** sqlite threads. ** ** * An ASYNC_CLOSE operation. ** * An ASYNC_OPENEXCLUSIVE operation. For this one, we relinquish ** the mutex, call the underlying xOpenExclusive() function, then ** re-aquire the mutex before seting the AsyncFile.pBaseRead ** variable. ** * ASYNC_SYNC and ASYNC_WRITE operations, if ** SQLITE_ASYNC_TWO_FILEHANDLES was set at compile time and two ** file-handles are open for the particular file being "synced". */ if( async.ioError!=SQLITE_OK && p->op!=ASYNC_CLOSE ){ p->op = ASYNC_NOOP; } if( p->pFile ){ pBase = p->pFile->pBaseWrite; if( p->op==ASYNC_CLOSE || p->op==ASYNC_OPENEXCLUSIVE || (pBase && (p->op==ASYNC_SYNC || p->op==ASYNC_WRITE) ) ){ pthread_mutex_unlock(&async.queueMutex); holdingMutex = 0; } if( !pBase ){ pBase = p->pFile->pBaseRead; } } switch( p->op ){ case ASYNC_NOOP: break; case ASYNC_WRITE: assert( pBase ); ASYNC_TRACE(("WRITE %s %d bytes at %d\n", p->pFile->zName, p->nByte, p->iOffset)); rc = sqlite3OsSeek(pBase, p->iOffset); if( rc==SQLITE_OK ){ rc = sqlite3OsWrite(pBase, (const void *)(p->zBuf), p->nByte); } break; case ASYNC_SYNC: assert( pBase ); ASYNC_TRACE(("SYNC %s\n", p->pFile->zName)); rc = sqlite3OsSync(pBase, p->nByte); break; case ASYNC_TRUNCATE: assert( pBase ); ASYNC_TRACE(("TRUNCATE %s to %d bytes\n", p->pFile->zName, p->iOffset)); rc = sqlite3OsTruncate(pBase, p->iOffset); break; case ASYNC_CLOSE: ASYNC_TRACE(("CLOSE %s\n", p->pFile->zName)); sqlite3OsClose(&p->pFile->pBaseWrite); sqlite3OsClose(&p->pFile->pBaseRead); sqlite3OsFree(p->pFile); break; case ASYNC_OPENDIRECTORY: assert( pBase ); ASYNC_TRACE(("OPENDIR %s\n", p->zBuf)); sqlite3OsOpenDirectory(pBase, p->zBuf); break; case ASYNC_SETFULLSYNC: assert( pBase ); ASYNC_TRACE(("SETFULLSYNC %s %d\n", p->pFile->zName, p->nByte)); sqlite3OsSetFullSync(pBase, p->nByte); break; case ASYNC_DELETE: ASYNC_TRACE(("DELETE %s\n", p->zBuf)); rc = xOrigDelete(p->zBuf); break; case ASYNC_SYNCDIRECTORY: ASYNC_TRACE(("SYNCDIR %s\n", p->zBuf)); rc = xOrigSyncDirectory(p->zBuf); break; case ASYNC_OPENEXCLUSIVE: { AsyncFile *pFile = p->pFile; int delFlag = ((p->iOffset)?1:0); OsFile *pBase = 0; ASYNC_TRACE(("OPEN %s delFlag=%d\n", p->zBuf, delFlag)); assert(pFile->pBaseRead==0 && pFile->pBaseWrite==0); rc = xOrigOpenExclusive(p->zBuf, &pBase, delFlag); assert( holdingMutex==0 ); pthread_mutex_lock(&async.queueMutex); holdingMutex = 1; if( rc==SQLITE_OK ){ pFile->pBaseRead = pBase; } break; } default: assert(!"Illegal value for AsyncWrite.op"); } /* If we didn't hang on to the mutex during the IO op, obtain it now ** so that the AsyncWrite structure can be safely removed from the ** global write-op queue. */ if( !holdingMutex ){ pthread_mutex_lock(&async.queueMutex); holdingMutex = 1; } /* ASYNC_TRACE(("UNLINK %p\n", p)); */ if( p==async.pQueueLast ){ async.pQueueLast = 0; } async.pQueueFirst = p->pNext; sqlite3OsFree(p); assert( holdingMutex ); /* An IO error has occured. We cannot report the error back to the ** connection that requested the I/O since the error happened ** asynchronously. The connection has already moved on. There ** really is nobody to report the error to. ** ** The file for which the error occured may have been a database or ** journal file. Regardless, none of the currently queued operations ** associated with the same database should now be performed. Nor should ** any subsequently requested IO on either a database or journal file ** handle for the same database be accepted until the main database ** file handle has been closed and reopened. ** ** Furthermore, no further IO should be queued or performed on any file ** handle associated with a database that may have been part of a ** multi-file transaction that included the database associated with ** the IO error (i.e. a database ATTACHed to the same handle at some ** point in time). */ if( rc!=SQLITE_OK ){ async.ioError = rc; } /* Drop the queue mutex before continuing to the next write operation ** in order to give other threads a chance to work with the write queue. */ if( !async.pQueueFirst || !async.ioError ){ sqlite3ApiExit(0, 0); pthread_mutex_unlock(&async.queueMutex); holdingMutex = 0; if( async.ioDelay>0 ){ sqlite3OsSleep(async.ioDelay); }else{ sched_yield(); } } } pthread_mutex_unlock(&async.writerMutex); return 0; }
/* ** Once the sorter has been populated, this function is called to prepare ** for iterating through its contents in sorted order. */ int sqlite3VdbeSorterRewind(sqlite3 *db, VdbeCursor *pCsr, int *pbEof){ VdbeSorter *pSorter = pCsr->pSorter; int rc; /* Return code */ sqlite3_file *pTemp2 = 0; /* Second temp file to use */ i64 iWrite2 = 0; /* Write offset for pTemp2 */ int nIter; /* Number of iterators used */ int nByte; /* Bytes of space required for aIter/aTree */ int N = 2; /* Power of 2 >= nIter */ assert( pSorter ); /* If no data has been written to disk, then do not do so now. Instead, ** sort the VdbeSorter.pRecord list. The vdbe layer will read data directly ** from the in-memory list. */ if( pSorter->nPMA==0 ){ *pbEof = !pSorter->pRecord; assert( pSorter->aTree==0 ); return vdbeSorterSort(pCsr); } /* Write the current b-tree to a PMA. Close the b-tree cursor. */ rc = vdbeSorterListToPMA(db, pCsr); if( rc!=SQLITE_OK ) return rc; /* Allocate space for aIter[] and aTree[]. */ nIter = pSorter->nPMA; if( nIter>SORTER_MAX_MERGE_COUNT ) nIter = SORTER_MAX_MERGE_COUNT; assert( nIter>0 ); while( N<nIter ) N += N; nByte = N * (sizeof(int) + sizeof(VdbeSorterIter)); pSorter->aIter = (VdbeSorterIter *)sqlite3DbMallocZero(db, nByte); if( !pSorter->aIter ) return SQLITE_NOMEM; pSorter->aTree = (int *)&pSorter->aIter[N]; pSorter->nTree = N; do { int iNew; /* Index of new, merged, PMA */ for(iNew=0; rc==SQLITE_OK && iNew*SORTER_MAX_MERGE_COUNT<pSorter->nPMA; iNew++ ){ i64 nWrite; /* Number of bytes in new PMA */ /* If there are SORTER_MAX_MERGE_COUNT or less PMAs in file pTemp1, ** initialize an iterator for each of them and break out of the loop. ** These iterators will be incrementally merged as the VDBE layer calls ** sqlite3VdbeSorterNext(). ** ** Otherwise, if pTemp1 contains more than SORTER_MAX_MERGE_COUNT PMAs, ** initialize interators for SORTER_MAX_MERGE_COUNT of them. These PMAs ** are merged into a single PMA that is written to file pTemp2. */ rc = vdbeSorterInitMerge(db, pCsr, &nWrite); assert( rc!=SQLITE_OK || pSorter->aIter[ pSorter->aTree[1] ].pFile ); if( rc!=SQLITE_OK || pSorter->nPMA<=SORTER_MAX_MERGE_COUNT ){ break; } /* Open the second temp file, if it is not already open. */ if( pTemp2==0 ){ assert( iWrite2==0 ); rc = vdbeSorterOpenTempFile(db, &pTemp2); } if( rc==SQLITE_OK ){ rc = vdbeSorterWriteVarint(pTemp2, nWrite, &iWrite2); } if( rc==SQLITE_OK ){ int bEof = 0; while( rc==SQLITE_OK && bEof==0 ){ int nToWrite; VdbeSorterIter *pIter = &pSorter->aIter[ pSorter->aTree[1] ]; assert( pIter->pFile ); nToWrite = pIter->nKey + sqlite3VarintLen(pIter->nKey); rc = sqlite3OsWrite(pTemp2, pIter->aAlloc, nToWrite, iWrite2); iWrite2 += nToWrite; if( rc==SQLITE_OK ){ rc = sqlite3VdbeSorterNext(db, pCsr, &bEof); } } } } if( pSorter->nPMA<=SORTER_MAX_MERGE_COUNT ){ break; }else{ sqlite3_file *pTmp = pSorter->pTemp1; pSorter->nPMA = iNew; pSorter->pTemp1 = pTemp2; pTemp2 = pTmp; pSorter->iWriteOff = iWrite2; pSorter->iReadOff = 0; iWrite2 = 0; } }while( rc==SQLITE_OK ); if( pTemp2 ){ sqlite3OsCloseFree(pTemp2); } *pbEof = (pSorter->aIter[pSorter->aTree[1]].pFile==0); return rc; }
/* ** A read or write transaction may or may not be active on database handle ** db. If a transaction is active, commit it. If there is a ** write-transaction spanning more than one database file, this routine ** takes care of the master journal trickery. */ static int vdbeCommit(sqlite3 *db){ int i; int nTrans = 0; /* Number of databases with an active write-transaction */ int rc = SQLITE_OK; int needXcommit = 0; for(i=0; i<db->nDb; i++){ Btree *pBt = db->aDb[i].pBt; if( pBt && sqlite3BtreeIsInTrans(pBt) ){ needXcommit = 1; if( i!=1 ) nTrans++; } } /* If there are any write-transactions at all, invoke the commit hook */ if( needXcommit && db->xCommitCallback ){ int rc; sqlite3SafetyOff(db); rc = db->xCommitCallback(db->pCommitArg); sqlite3SafetyOn(db); if( rc ){ return SQLITE_CONSTRAINT; } } /* The simple case - no more than one database file (not counting the ** TEMP database) has a transaction active. There is no need for the ** master-journal. ** ** If the return value of sqlite3BtreeGetFilename() is a zero length ** string, it means the main database is :memory:. In that case we do ** not support atomic multi-file commits, so use the simple case then ** too. */ if( 0==strlen(sqlite3BtreeGetFilename(db->aDb[0].pBt)) || nTrans<=1 ){ for(i=0; rc==SQLITE_OK && i<db->nDb; i++){ Btree *pBt = db->aDb[i].pBt; if( pBt ){ rc = sqlite3BtreeSync(pBt, 0); } } /* Do the commit only if all databases successfully synced */ if( rc==SQLITE_OK ){ for(i=0; i<db->nDb; i++){ Btree *pBt = db->aDb[i].pBt; if( pBt ){ sqlite3BtreeCommit(pBt); } } } } /* The complex case - There is a multi-file write-transaction active. ** This requires a master journal file to ensure the transaction is ** committed atomicly. */ else{ char *zMaster = 0; /* File-name for the master journal */ char const *zMainFile = sqlite3BtreeGetFilename(db->aDb[0].pBt); OsFile master; /* Select a master journal file name */ do { u32 random; sqliteFree(zMaster); sqlite3Randomness(sizeof(random), &random); zMaster = sqlite3MPrintf("%s-mj%08X", zMainFile, random&0x7fffffff); if( !zMaster ){ return SQLITE_NOMEM; } }while( sqlite3OsFileExists(zMaster) ); /* Open the master journal. */ memset(&master, 0, sizeof(master)); rc = sqlite3OsOpenExclusive(zMaster, &master, 0); if( rc!=SQLITE_OK ){ sqliteFree(zMaster); return rc; } /* Write the name of each database file in the transaction into the new ** master journal file. If an error occurs at this point close ** and delete the master journal file. All the individual journal files ** still have 'null' as the master journal pointer, so they will roll ** back independantly if a failure occurs. */ for(i=0; i<db->nDb; i++){ Btree *pBt = db->aDb[i].pBt; if( i==1 ) continue; /* Ignore the TEMP database */ if( pBt && sqlite3BtreeIsInTrans(pBt) ){ char const *zFile = sqlite3BtreeGetJournalname(pBt); if( zFile[0]==0 ) continue; /* Ignore :memory: databases */ rc = sqlite3OsWrite(&master, zFile, strlen(zFile)+1); if( rc!=SQLITE_OK ){ sqlite3OsClose(&master); sqlite3OsDelete(zMaster); sqliteFree(zMaster); return rc; } } } /* Sync the master journal file. Before doing this, open the directory ** the master journal file is store in so that it gets synced too. */ zMainFile = sqlite3BtreeGetDirname(db->aDb[0].pBt); rc = sqlite3OsOpenDirectory(zMainFile, &master); if( rc!=SQLITE_OK ){ sqlite3OsClose(&master); sqlite3OsDelete(zMaster); sqliteFree(zMaster); return rc; } rc = sqlite3OsSync(&master); if( rc!=SQLITE_OK ){ sqlite3OsClose(&master); sqliteFree(zMaster); return rc; } /* Sync all the db files involved in the transaction. The same call ** sets the master journal pointer in each individual journal. If ** an error occurs here, do not delete the master journal file. ** ** If the error occurs during the first call to sqlite3BtreeSync(), ** then there is a chance that the master journal file will be ** orphaned. But we cannot delete it, in case the master journal ** file name was written into the journal file before the failure ** occured. */ for(i=0; i<db->nDb; i++){ Btree *pBt = db->aDb[i].pBt; if( pBt && sqlite3BtreeIsInTrans(pBt) ){ rc = sqlite3BtreeSync(pBt, zMaster); if( rc!=SQLITE_OK ){ sqlite3OsClose(&master); sqliteFree(zMaster); return rc; } } } sqlite3OsClose(&master); /* Delete the master journal file. This commits the transaction. After ** doing this the directory is synced again before any individual ** transaction files are deleted. */ rc = sqlite3OsDelete(zMaster); assert( rc==SQLITE_OK ); sqliteFree(zMaster); zMaster = 0; rc = sqlite3OsSyncDirectory(zMainFile); if( rc!=SQLITE_OK ){ /* This is not good. The master journal file has been deleted, but ** the directory sync failed. There is no completely safe course of ** action from here. The individual journals contain the name of the ** master journal file, but there is no way of knowing if that ** master journal exists now or if it will exist after the operating ** system crash that may follow the fsync() failure. */ assert(0); sqliteFree(zMaster); return rc; } /* All files and directories have already been synced, so the following ** calls to sqlite3BtreeCommit() are only closing files and deleting ** journals. If something goes wrong while this is happening we don't ** really care. The integrity of the transaction is already guaranteed, ** but some stray 'cold' journals may be lying around. Returning an ** error code won't help matters. */ for(i=0; i<db->nDb; i++){ Btree *pBt = db->aDb[i].pBt; if( pBt ){ sqlite3BtreeCommit(pBt); } } } return rc; }
//Create/open/close/delete a file void Test1() { sqlite3_vfs* vfs = sqlite3_vfs_find(KSymbianVfsNameZ); TEST(vfs != NULL); sqlite3_file* osFile = (sqlite3_file*)User::Alloc(vfs->szOsFile); TEST(osFile != NULL); //Creating a new file int outFlags = 0; int err = sqlite3OsOpen(vfs, KTestFile1Z, osFile, SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, &outFlags); TEST2(err, SQLITE_OK); TEST(outFlags & SQLITE_OPEN_READWRITE); err = sqlite3OsClose(osFile); TEST2(err, SQLITE_OK); //Opening an existing file for R/W err = sqlite3OsOpen(vfs, KTestFile1Z, osFile, SQLITE_OPEN_READWRITE, &outFlags); TEST2(err, SQLITE_OK); TEST(outFlags & SQLITE_OPEN_READWRITE); err = sqlite3OsClose(osFile); TEST2(err, SQLITE_OK); //Opening a read-only file err = sqlite3OsOpen(vfs, KTestFile2Z, osFile, SQLITE_OPEN_READWRITE, &outFlags); TEST2(err, SQLITE_OK); TEST(outFlags & SQLITE_OPEN_READONLY); //Truncate a read-only file err = osFile->pMethods->xTruncate(osFile, 0); TEST2(err, SQLITE_IOERR); //xAccess - read-only file int res = 0; err = vfs->xAccess(vfs, KTestFile2Z, SQLITE_ACCESS_READ, &res); TEST2(err, SQLITE_OK); TEST(res != 0); //xAccess - invalid request res = 0; err = vfs->xAccess(vfs, KTestFile2Z, 122, &res); TEST2(err, SQLITE_OK); TEST2(res, 0); // err = sqlite3OsClose(osFile); TEST2(err, SQLITE_OK); //Creating a new file err = sqlite3OsOpen(vfs, KTestFile3Z, osFile, SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, &outFlags); TEST2(err, SQLITE_OK); TEST(outFlags & SQLITE_OPEN_READWRITE); err = sqlite3OsClose(osFile); TEST2(err, SQLITE_OK); //Open a file for a read-only access err = sqlite3OsOpen(vfs, KTestFile1Z, osFile, SQLITE_OPEN_READONLY, &outFlags); TEST2(err, SQLITE_OK); TEST(outFlags & SQLITE_OPEN_READONLY); err = sqlite3OsWrite(osFile, "1234", 4, 0); TEST(err != SQLITE_OK); err = sqlite3SymbianLastOsError(); TEST2(err, KErrAccessDenied); err = vfs->xGetLastError(vfs, 0, 0); TEST2(err, 0);//Default implementation err = sqlite3OsClose(osFile); TEST2(err, SQLITE_OK); //Delete KTestFile3Z file err = sqlite3OsDelete(vfs, KTestFile3Z, 0); TEST2(err, SQLITE_OK); res = 0; err = sqlite3OsAccess(vfs, KTestFile3Z, SQLITE_ACCESS_EXISTS, &res); TEST2(err, SQLITE_OK); TEST2(res, 0); //Open a file for an exclusive access err = sqlite3OsOpen(vfs, KTestFile3Z, osFile, SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_DELETEONCLOSE | SQLITE_OPEN_EXCLUSIVE, &outFlags); TEST2(err, SQLITE_OK); err = sqlite3OsClose(osFile); TEST2(err, SQLITE_OK); //The file should not exist now err = sqlite3OsAccess(vfs, KTestFile3Z, SQLITE_ACCESS_EXISTS, &res); TEST2(err, SQLITE_OK); TEST2(res, 0); //Open a file for an exclusive access without deleting it after err = sqlite3OsOpen(vfs, KTestFile3Z, osFile, SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE, &outFlags); TEST2(err, SQLITE_OK); err = sqlite3OsClose(osFile); TEST2(err, SQLITE_OK); //The file should exist now err = sqlite3OsAccess(vfs, KTestFile3Z, SQLITE_ACCESS_EXISTS, &res); TEST2(err, SQLITE_OK); TEST2(res, 1); //Delete KTestFile3Z file err = sqlite3OsDelete(vfs, KTestFile3Z, 0); TEST2(err, SQLITE_OK); err = sqlite3OsAccess(vfs, KTestFile3Z, SQLITE_ACCESS_EXISTS, &res); TEST2(err, SQLITE_OK); TEST2(res, 0); // User::Free(osFile); }
/* ** Copy nPage pages from the source b-tree to the destination. */ int sqlite3_backup_step(sqlite3_backup *p, int nPage){ int rc; int destMode; /* Destination journal mode */ int pgszSrc = 0; /* Source page size */ int pgszDest = 0; /* Destination page size */ sqlite3_mutex_enter(p->pSrcDb->mutex); sqlite3BtreeEnter(p->pSrc); if( p->pDestDb ){ sqlite3_mutex_enter(p->pDestDb->mutex); } rc = p->rc; if( !isFatalError(rc) ){ Pager * const pSrcPager = sqlite3BtreePager(p->pSrc); /* Source pager */ Pager * const pDestPager = sqlite3BtreePager(p->pDest); /* Dest pager */ int ii; /* Iterator variable */ int nSrcPage = -1; /* Size of source db in pages */ int bCloseTrans = 0; /* True if src db requires unlocking */ /* If the source pager is currently in a write-transaction, return ** SQLITE_BUSY immediately. */ if( p->pDestDb && p->pSrc->pBt->inTransaction==TRANS_WRITE ){ rc = SQLITE_BUSY; }else{ rc = SQLITE_OK; } /* Lock the destination database, if it is not locked already. */ if( SQLITE_OK==rc && p->bDestLocked==0 && SQLITE_OK==(rc = sqlite3BtreeBeginTrans(p->pDest, 2)) ){ p->bDestLocked = 1; sqlite3BtreeGetMeta(p->pDest, BTREE_SCHEMA_VERSION, &p->iDestSchema); } /* If there is no open read-transaction on the source database, open ** one now. If a transaction is opened here, then it will be closed ** before this function exits. */ if( rc==SQLITE_OK && 0==sqlite3BtreeIsInReadTrans(p->pSrc) ){ rc = sqlite3BtreeBeginTrans(p->pSrc, 0); bCloseTrans = 1; } /* Do not allow backup if the destination database is in WAL mode ** and the page sizes are different between source and destination */ pgszSrc = sqlite3BtreeGetPageSize(p->pSrc); pgszDest = sqlite3BtreeGetPageSize(p->pDest); destMode = sqlite3PagerGetJournalMode(sqlite3BtreePager(p->pDest)); if( SQLITE_OK==rc && destMode==PAGER_JOURNALMODE_WAL && pgszSrc!=pgszDest ){ rc = SQLITE_READONLY; } /* Now that there is a read-lock on the source database, query the ** source pager for the number of pages in the database. */ nSrcPage = (int)sqlite3BtreeLastPage(p->pSrc); assert( nSrcPage>=0 ); for(ii=0; (nPage<0 || ii<nPage) && p->iNext<=(Pgno)nSrcPage && !rc; ii++){ const Pgno iSrcPg = p->iNext; /* Source page number */ if( iSrcPg!=PENDING_BYTE_PAGE(p->pSrc->pBt) ){ DbPage *pSrcPg; /* Source page object */ rc = sqlite3PagerGet(pSrcPager, iSrcPg, &pSrcPg); if( rc==SQLITE_OK ){ rc = backupOnePage(p, iSrcPg, sqlite3PagerGetData(pSrcPg)); sqlite3PagerUnref(pSrcPg); } } p->iNext++; } if( rc==SQLITE_OK ){ p->nPagecount = nSrcPage; p->nRemaining = nSrcPage+1-p->iNext; if( p->iNext>(Pgno)nSrcPage ){ rc = SQLITE_DONE; }else if( !p->isAttached ){ attachBackupObject(p); } } /* Update the schema version field in the destination database. This ** is to make sure that the schema-version really does change in ** the case where the source and destination databases have the ** same schema version. */ if( rc==SQLITE_DONE && (rc = sqlite3BtreeUpdateMeta(p->pDest,1,p->iDestSchema+1))==SQLITE_OK ){ int nDestTruncate; if( p->pDestDb ){ sqlite3ResetInternalSchema(p->pDestDb, 0); } /* Set nDestTruncate to the final number of pages in the destination ** database. The complication here is that the destination page ** size may be different to the source page size. ** ** If the source page size is smaller than the destination page size, ** round up. In this case the call to sqlite3OsTruncate() below will ** fix the size of the file. However it is important to call ** sqlite3PagerTruncateImage() here so that any pages in the ** destination file that lie beyond the nDestTruncate page mark are ** journalled by PagerCommitPhaseOne() before they are destroyed ** by the file truncation. */ assert( pgszSrc==sqlite3BtreeGetPageSize(p->pSrc) ); assert( pgszDest==sqlite3BtreeGetPageSize(p->pDest) ); if( pgszSrc<pgszDest ){ int ratio = pgszDest/pgszSrc; nDestTruncate = (nSrcPage+ratio-1)/ratio; if( nDestTruncate==(int)PENDING_BYTE_PAGE(p->pDest->pBt) ){ nDestTruncate--; } }else{ nDestTruncate = nSrcPage * (pgszSrc/pgszDest); } sqlite3PagerTruncateImage(pDestPager, nDestTruncate); if( pgszSrc<pgszDest ){ /* If the source page-size is smaller than the destination page-size, ** two extra things may need to happen: ** ** * The destination may need to be truncated, and ** ** * Data stored on the pages immediately following the ** pending-byte page in the source database may need to be ** copied into the destination database. */ const i64 iSize = (i64)pgszSrc * (i64)nSrcPage; sqlite3_file * const pFile = sqlite3PagerFile(pDestPager); assert( pFile ); assert( (i64)nDestTruncate*(i64)pgszDest >= iSize || ( nDestTruncate==(int)(PENDING_BYTE_PAGE(p->pDest->pBt)-1) && iSize>=PENDING_BYTE && iSize<=PENDING_BYTE+pgszDest )); if( SQLITE_OK==(rc = sqlite3PagerCommitPhaseOne(pDestPager, 0, 1)) && SQLITE_OK==(rc = backupTruncateFile(pFile, iSize)) && SQLITE_OK==(rc = sqlite3PagerSync(pDestPager)) ){ i64 iOff; i64 iEnd = MIN(PENDING_BYTE + pgszDest, iSize); for( iOff=PENDING_BYTE+pgszSrc; rc==SQLITE_OK && iOff<iEnd; iOff+=pgszSrc ){ PgHdr *pSrcPg = 0; const Pgno iSrcPg = (Pgno)((iOff/pgszSrc)+1); rc = sqlite3PagerGet(pSrcPager, iSrcPg, &pSrcPg); if( rc==SQLITE_OK ){ u8 *zData = sqlite3PagerGetData(pSrcPg); rc = sqlite3OsWrite(pFile, zData, pgszSrc, iOff); } sqlite3PagerUnref(pSrcPg); } } }else{ rc = sqlite3PagerCommitPhaseOne(pDestPager, 0, 0); } /* Finish committing the transaction to the destination database. */ if( SQLITE_OK==rc && SQLITE_OK==(rc = sqlite3BtreeCommitPhaseTwo(p->pDest)) ){ rc = SQLITE_DONE; } } /* If bCloseTrans is true, then this function opened a read transaction ** on the source database. Close the read transaction here. There is ** no need to check the return values of the btree methods here, as ** "committing" a read-only transaction cannot fail. */ if( bCloseTrans ){ TESTONLY( int rc2 ); TESTONLY( rc2 = ) sqlite3BtreeCommitPhaseOne(p->pSrc, 0); TESTONLY( rc2 |= ) sqlite3BtreeCommitPhaseTwo(p->pSrc); assert( rc2==SQLITE_OK ); } if( rc==SQLITE_IOERR_NOMEM ){ rc = SQLITE_NOMEM; } p->rc = rc; }
//Read/Write/Seek/Truncate test void Test2() { sqlite3_vfs* vfs = sqlite3_vfs_find(KSymbianVfsNameZ); TEST(vfs != NULL); sqlite3_file* osFile = (sqlite3_file*)User::Alloc(vfs->szOsFile); TEST(osFile != NULL); //Creating a new file int err = sqlite3OsDelete(vfs, KTestFile1Z, 0); TEST2(err, SQLITE_OK); int res = 0; err = sqlite3OsAccess(vfs, KTestFile1Z, SQLITE_ACCESS_EXISTS, &res); TEST2(err, SQLITE_OK); TEST2(res, 0); err = sqlite3OsOpen(vfs, KTestFile1Z, osFile, SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0); TEST2(err, SQLITE_OK); //Writing at the beginning of the file err = sqlite3OsWrite(osFile, "123456", 6, 0); TEST2(err, SQLITE_OK); //Verify the written data char data[20]; err = sqlite3OsRead(osFile, data, 6, 0); TEST2(err, SQLITE_OK); err = memcmp(data, "123456", 6); TEST2(err, 0); //Writing at beyond the end of the file err = sqlite3OsWrite(osFile, "abcdefgh", 8, 100); TEST2(err, SQLITE_OK); //Verify the written data err = sqlite3OsRead(osFile, data, 8, 100); TEST2(err, SQLITE_OK); err = memcmp(data, "abcdefgh", 8); TEST2(err, 0); //Truncate the file err = sqlite3OsTruncate(osFile, 3); TEST2(err, SQLITE_OK); //Write more data err = sqlite3OsWrite(osFile, "xyz", 3, 3); TEST2(err, SQLITE_OK); //Verify the written data err = sqlite3OsRead(osFile, data, 6, 0); TEST2(err, SQLITE_OK); err = memcmp(data, "123xyz", 6); TEST2(err, 0); //Check the file size TInt64 fileSize = 0; err = sqlite3OsFileSize(osFile, &fileSize); TEST2(err, SQLITE_OK); TEST(fileSize == 6); //FileControl - lock type int lockType = -1; err = osFile->pMethods->xFileControl(osFile, SQLITE_FCNTL_LOCKSTATE, &lockType); TEST2(err, SQLITE_OK); TEST2(lockType, NO_LOCK); //FileControl - set callback - NULL callback err = osFile->pMethods->xFileControl(osFile, KSqlFcntlRegisterFreePageCallback, 0); TEST2(err, SQLITE_ERROR); //FileControl - set callback - invalid callback object TSqlFreePageCallback cbck; err = osFile->pMethods->xFileControl(osFile, KSqlFcntlRegisterFreePageCallback, &cbck); TEST2(err, SQLITE_ERROR); //FileControl - invalid op-code err = osFile->pMethods->xFileControl(osFile, 90234, 0); TEST2(err, SQLITE_ERROR); //Close the file err = sqlite3OsClose(osFile); TEST2(err, SQLITE_OK); // err = sqlite3OsDelete(vfs, KTestFile1Z, 0); TEST2(err, SQLITE_OK); User::Free(osFile); }
/* ** Flush the write-list as if xSync() had been called on file handle ** pFile. If isCrash is true, simulate a crash. */ static int writeListSync(CrashFile *pFile, int isCrash){ int rc = SQLITE_OK; int iDc = g.iDeviceCharacteristics; WriteBuffer *pWrite; WriteBuffer **ppPtr; /* If this is not a crash simulation, set pFinal to point to the ** last element of the write-list that is associated with file handle ** pFile. ** ** If this is a crash simulation, set pFinal to an arbitrarily selected ** element of the write-list. */ WriteBuffer *pFinal = 0; if( !isCrash ){ for(pWrite=g.pWriteList; pWrite; pWrite=pWrite->pNext){ if( pWrite->pFile==pFile ){ pFinal = pWrite; } } }else if( iDc&(SQLITE_IOCAP_SEQUENTIAL|SQLITE_IOCAP_SAFE_APPEND) ){ int nWrite = 0; int iFinal; for(pWrite=g.pWriteList; pWrite; pWrite=pWrite->pNext) nWrite++; sqlite3Randomness(sizeof(int), &iFinal); iFinal = ((iFinal<0)?-1*iFinal:iFinal)%nWrite; for(pWrite=g.pWriteList; iFinal>0; pWrite=pWrite->pNext) iFinal--; pFinal = pWrite; } #ifdef TRACE_CRASHTEST printf("Sync %s (is %s crash)\n", pFile->zName, (isCrash?"a":"not a")); #endif ppPtr = &g.pWriteList; for(pWrite=*ppPtr; rc==SQLITE_OK && pWrite; pWrite=*ppPtr){ sqlite3_file *pRealFile = pWrite->pFile->pRealFile; /* (eAction==1) -> write block out normally, ** (eAction==2) -> do nothing, ** (eAction==3) -> trash sectors. */ int eAction = 0; if( !isCrash ){ eAction = 2; if( (pWrite->pFile==pFile || iDc&SQLITE_IOCAP_SEQUENTIAL) ){ eAction = 1; } }else{ char random; sqlite3Randomness(1, &random); /* Do not select option 3 (sector trashing) if the IOCAP_ATOMIC flag ** is set or this is an OsTruncate(), not an Oswrite(). */ if( (iDc&SQLITE_IOCAP_ATOMIC) || (pWrite->zBuf==0) ){ random &= 0x01; } /* If IOCAP_SEQUENTIAL is set and this is not the final entry ** in the truncated write-list, always select option 1 (write ** out correctly). */ if( (iDc&SQLITE_IOCAP_SEQUENTIAL && pWrite!=pFinal) ){ random = 0; } /* If IOCAP_SAFE_APPEND is set and this OsWrite() operation is ** an append (first byte of the written region is 1 byte past the ** current EOF), always select option 1 (write out correctly). */ if( iDc&SQLITE_IOCAP_SAFE_APPEND && pWrite->zBuf ){ i64 iSize; sqlite3OsFileSize(pRealFile, &iSize); if( iSize==pWrite->iOffset ){ random = 0; } } if( (random&0x06)==0x06 ){ eAction = 3; }else{ eAction = ((random&0x01)?2:1); } } switch( eAction ){ case 1: { /* Write out correctly */ if( pWrite->zBuf ){ rc = sqlite3OsWrite( pRealFile, pWrite->zBuf, pWrite->nBuf, pWrite->iOffset ); }else{ rc = sqlite3OsTruncate(pRealFile, pWrite->iOffset); } *ppPtr = pWrite->pNext; #ifdef TRACE_CRASHTEST if( isCrash ){ printf("Writing %d bytes @ %d (%s)\n", pWrite->nBuf, (int)pWrite->iOffset, pWrite->pFile->zName ); } #endif sqlite3_free(pWrite); break; } case 2: { /* Do nothing */ ppPtr = &pWrite->pNext; #ifdef TRACE_CRASHTEST if( isCrash ){ printf("Omiting %d bytes @ %d (%s)\n", pWrite->nBuf, (int)pWrite->iOffset, pWrite->pFile->zName ); } #endif break; } case 3: { /* Trash sectors */ u8 *zGarbage; int iFirst = (pWrite->iOffset/g.iSectorSize); int iLast = (pWrite->iOffset+pWrite->nBuf-1)/g.iSectorSize; assert(pWrite->zBuf); #ifdef TRACE_CRASHTEST printf("Trashing %d sectors @ sector %d (%s)\n", 1+iLast-iFirst, iFirst, pWrite->pFile->zName ); #endif zGarbage = sqlite3_malloc(g.iSectorSize); if( zGarbage ){ sqlite3_int64 i; for(i=iFirst; rc==SQLITE_OK && i<=iLast; i++){ sqlite3Randomness(g.iSectorSize, zGarbage); rc = sqlite3OsWrite( pRealFile, zGarbage, g.iSectorSize, i*g.iSectorSize ); } sqlite3_free(zGarbage); }else{ rc = SQLITE_NOMEM; } ppPtr = &pWrite->pNext; break; } default: assert(!"Cannot happen"); } if( pWrite==pFinal ) break; } if( rc==SQLITE_OK && isCrash ){ exit(-1); } for(pWrite=g.pWriteList; pWrite && pWrite->pNext; pWrite=pWrite->pNext); g.pWriteListEnd = pWrite; return rc; }
/* ** Copy nPage pages from the source b-tree to the destination. */ int sqlite3_backup_step(sqlite3_backup *p, int nPage){ int rc; int destMode; /* Destination journal mode */ int pgszSrc = 0; /* Source page size */ int pgszDest = 0; /* Destination page size */ #ifdef SQLITE_ENABLE_API_ARMOR if( p==0 ) return SQLITE_MISUSE_BKPT; #endif sqlite3_mutex_enter(p->pSrcDb->mutex); sqlite3BtreeEnter(p->pSrc); if( p->pDestDb ){ sqlite3_mutex_enter(p->pDestDb->mutex); } rc = p->rc; if( !isFatalError(rc) ){ Pager * const pSrcPager = sqlite3BtreePager(p->pSrc); /* Source pager */ Pager * const pDestPager = sqlite3BtreePager(p->pDest); /* Dest pager */ int ii; /* Iterator variable */ int nSrcPage = -1; /* Size of source db in pages */ int bCloseTrans = 0; /* True if src db requires unlocking */ /* If the source pager is currently in a write-transaction, return ** SQLITE_BUSY immediately. */ if( p->pDestDb && p->pSrc->pBt->inTransaction==TRANS_WRITE ){ rc = SQLITE_BUSY; }else{ rc = SQLITE_OK; } /* If there is no open read-transaction on the source database, open ** one now. If a transaction is opened here, then it will be closed ** before this function exits. */ if( rc==SQLITE_OK && 0==sqlite3BtreeIsInReadTrans(p->pSrc) ){ rc = sqlite3BtreeBeginTrans(p->pSrc, 0, 0); bCloseTrans = 1; } /* If the destination database has not yet been locked (i.e. if this ** is the first call to backup_step() for the current backup operation), ** try to set its page size to the same as the source database. This ** is especially important on ZipVFS systems, as in that case it is ** not possible to create a database file that uses one page size by ** writing to it with another. */ if( p->bDestLocked==0 && rc==SQLITE_OK && setDestPgsz(p)==SQLITE_NOMEM ){ rc = SQLITE_NOMEM; } /* Lock the destination database, if it is not locked already. */ if( SQLITE_OK==rc && p->bDestLocked==0 && SQLITE_OK==(rc = sqlite3BtreeBeginTrans(p->pDest, 2, (int*)&p->iDestSchema)) ){ p->bDestLocked = 1; } /* Do not allow backup if the destination database is in WAL mode ** and the page sizes are different between source and destination */ pgszSrc = sqlite3BtreeGetPageSize(p->pSrc); pgszDest = sqlite3BtreeGetPageSize(p->pDest); destMode = sqlite3PagerGetJournalMode(sqlite3BtreePager(p->pDest)); if( SQLITE_OK==rc && destMode==PAGER_JOURNALMODE_WAL && pgszSrc!=pgszDest ){ rc = SQLITE_READONLY; } /* Now that there is a read-lock on the source database, query the ** source pager for the number of pages in the database. */ nSrcPage = (int)sqlite3BtreeLastPage(p->pSrc); assert( nSrcPage>=0 ); for(ii=0; (nPage<0 || ii<nPage) && p->iNext<=(Pgno)nSrcPage && !rc; ii++){ const Pgno iSrcPg = p->iNext; /* Source page number */ if( iSrcPg!=PENDING_BYTE_PAGE(p->pSrc->pBt) ){ DbPage *pSrcPg; /* Source page object */ rc = sqlite3PagerGet(pSrcPager, iSrcPg, &pSrcPg,PAGER_GET_READONLY); if( rc==SQLITE_OK ){ rc = backupOnePage(p, iSrcPg, sqlite3PagerGetData(pSrcPg), 0); sqlite3PagerUnref(pSrcPg); } } p->iNext++; } if( rc==SQLITE_OK ){ p->nPagecount = nSrcPage; p->nRemaining = nSrcPage+1-p->iNext; if( p->iNext>(Pgno)nSrcPage ){ rc = SQLITE_DONE; }else if( !p->isAttached ){ attachBackupObject(p); } } /* Update the schema version field in the destination database. This ** is to make sure that the schema-version really does change in ** the case where the source and destination databases have the ** same schema version. */ if( rc==SQLITE_DONE ){ if( nSrcPage==0 ){ rc = sqlite3BtreeNewDb(p->pDest); nSrcPage = 1; } if( rc==SQLITE_OK || rc==SQLITE_DONE ){ rc = sqlite3BtreeUpdateMeta(p->pDest,1,p->iDestSchema+1); } if( rc==SQLITE_OK ){ if( p->pDestDb ){ sqlite3ResetAllSchemasOfConnection(p->pDestDb); } if( destMode==PAGER_JOURNALMODE_WAL ){ rc = sqlite3BtreeSetVersion(p->pDest, 2); } } if( rc==SQLITE_OK ){ int nDestTruncate; /* Set nDestTruncate to the final number of pages in the destination ** database. The complication here is that the destination page ** size may be different to the source page size. ** ** If the source page size is smaller than the destination page size, ** round up. In this case the call to sqlite3OsTruncate() below will ** fix the size of the file. However it is important to call ** sqlite3PagerTruncateImage() here so that any pages in the ** destination file that lie beyond the nDestTruncate page mark are ** journalled by PagerCommitPhaseOne() before they are destroyed ** by the file truncation. */ assert( pgszSrc==sqlite3BtreeGetPageSize(p->pSrc) ); assert( pgszDest==sqlite3BtreeGetPageSize(p->pDest) ); if( pgszSrc<pgszDest ){ int ratio = pgszDest/pgszSrc; nDestTruncate = (nSrcPage+ratio-1)/ratio; if( nDestTruncate==(int)PENDING_BYTE_PAGE(p->pDest->pBt) ){ nDestTruncate--; } }else{ nDestTruncate = nSrcPage * (pgszSrc/pgszDest); } assert( nDestTruncate>0 ); if( pgszSrc<pgszDest ){ /* If the source page-size is smaller than the destination page-size, ** two extra things may need to happen: ** ** * The destination may need to be truncated, and ** ** * Data stored on the pages immediately following the ** pending-byte page in the source database may need to be ** copied into the destination database. */ const i64 iSize = (i64)pgszSrc * (i64)nSrcPage; sqlite3_file * const pFile = sqlite3PagerFile(pDestPager); Pgno iPg; int nDstPage; i64 iOff; i64 iEnd; assert( pFile ); assert( nDestTruncate==0 || (i64)nDestTruncate*(i64)pgszDest >= iSize || ( nDestTruncate==(int)(PENDING_BYTE_PAGE(p->pDest->pBt)-1) && iSize>=PENDING_BYTE && iSize<=PENDING_BYTE+pgszDest )); /* This block ensures that all data required to recreate the original ** database has been stored in the journal for pDestPager and the ** journal synced to disk. So at this point we may safely modify ** the database file in any way, knowing that if a power failure ** occurs, the original database will be reconstructed from the ** journal file. */ sqlite3PagerPagecount(pDestPager, &nDstPage); for(iPg=nDestTruncate; rc==SQLITE_OK && iPg<=(Pgno)nDstPage; iPg++){ if( iPg!=PENDING_BYTE_PAGE(p->pDest->pBt) ){ DbPage *pPg; rc = sqlite3PagerGet(pDestPager, iPg, &pPg, 0); if( rc==SQLITE_OK ){ rc = sqlite3PagerWrite(pPg); sqlite3PagerUnref(pPg); } } } if( rc==SQLITE_OK ){ rc = sqlite3PagerCommitPhaseOne(pDestPager, 0, 1); } /* Write the extra pages and truncate the database file as required */ iEnd = MIN(PENDING_BYTE + pgszDest, iSize); for( iOff=PENDING_BYTE+pgszSrc; rc==SQLITE_OK && iOff<iEnd; iOff+=pgszSrc ){ PgHdr *pSrcPg = 0; const Pgno iSrcPg = (Pgno)((iOff/pgszSrc)+1); rc = sqlite3PagerGet(pSrcPager, iSrcPg, &pSrcPg, 0); if( rc==SQLITE_OK ){ u8 *zData = sqlite3PagerGetData(pSrcPg); rc = sqlite3OsWrite(pFile, zData, pgszSrc, iOff); } sqlite3PagerUnref(pSrcPg); } if( rc==SQLITE_OK ){ rc = backupTruncateFile(pFile, iSize); } /* Sync the database file to disk. */ if( rc==SQLITE_OK ){ rc = sqlite3PagerSync(pDestPager, 0); } }else{ sqlite3PagerTruncateImage(pDestPager, nDestTruncate); rc = sqlite3PagerCommitPhaseOne(pDestPager, 0, 0); } /* Finish committing the transaction to the destination database. */ if( SQLITE_OK==rc && SQLITE_OK==(rc = sqlite3BtreeCommitPhaseTwo(p->pDest, 0)) ){ rc = SQLITE_DONE; } } } /* If bCloseTrans is true, then this function opened a read transaction ** on the source database. Close the read transaction here. There is ** no need to check the return values of the btree methods here, as ** "committing" a read-only transaction cannot fail. */ if( bCloseTrans ){ TESTONLY( int rc2 ); TESTONLY( rc2 = ) sqlite3BtreeCommitPhaseOne(p->pSrc, 0); TESTONLY( rc2 |= ) sqlite3BtreeCommitPhaseTwo(p->pSrc, 0); assert( rc2==SQLITE_OK ); } if( rc==SQLITE_IOERR_NOMEM ){ rc = SQLITE_NOMEM_BKPT; } p->rc = rc; }