/* SSL error handling, according to SSL_get_error(3). */ static int error_ossl(ne_socket *sock, int sret) { int err = SSL_get_error(sock->ssl, sret), ret = NE_SOCK_ERROR; switch (err) { case SSL_ERROR_ZERO_RETURN: ret = NE_SOCK_CLOSED; set_error(sock, _("Connection closed")); break; case SSL_ERROR_SYSCALL: err = ERR_get_error(); if (err == 0) { if (sret == 0) { /* EOF without close_notify, possible truncation */ set_error(sock, _("Secure connection truncated")); ret = NE_SOCK_TRUNC; } else { /* Other socket error. */ err = ne_errno; set_strerror(sock, err); ret = MAP_ERR(err); } } else { ne_snprintf(sock->error, sizeof sock->error, _("SSL error: %s"), ERR_reason_error_string(err)); } break; default: ne_snprintf(sock->error, sizeof sock->error, _("SSL error: %s"), ERR_reason_error_string(ERR_get_error())); break; } return ret; }
static ssize_t write_raw(ne_socket *sock, const char *data, size_t length) { ssize_t wrote; do { wrote = ne_write(sock->fd, data, length); if (wrote > 0) { data += wrote; length -= wrote; } } while ((wrote > 0 || NE_ISINTR(ne_errno)) && length > 0); if (wrote < 0) { int errnum = ne_errno; set_strerror(sock, errnum); return MAP_ERR(errnum); } return 0; }
static ssize_t write_raw(ne_socket *sock, const char *data, size_t length) { ssize_t ret; #ifdef __QNX__ /* Test failures seen on QNX over loopback, if passing large * buffer lengths to send(). */ if (length > 8192) length = 8192; #endif do { ret = send(sock->fd, data, length, 0); } while (ret == -1 && NE_ISINTR(ne_errno)); if (ret < 0) { int errnum = ne_errno; set_strerror(sock, errnum); return MAP_ERR(errnum); } return ret; }
/* SSL error handling, according to SSL_get_error(3). */ static int error_ossl(ne_socket *sock, int sret) { int errnum = SSL_get_error(sock->ssl, sret); unsigned long err; if (errnum == SSL_ERROR_ZERO_RETURN) { set_error(sock, _("Connection closed")); return NE_SOCK_CLOSED; } /* for all other errors, look at the OpenSSL error stack */ err = ERR_get_error(); if (err == 0) { /* Empty error stack, presume this is a system call error: */ if (sret == 0) { /* EOF without close_notify, possible truncation */ set_error(sock, _("Secure connection truncated")); return NE_SOCK_TRUNC; } else { /* Other socket error. */ errnum = ne_errno; set_strerror(sock, errnum); return MAP_ERR(errnum); } } if (ERR_reason_error_string(err)) { ne_snprintf(sock->error, sizeof sock->error, _("SSL error: %s"), ERR_reason_error_string(err)); } else { ne_snprintf(sock->error, sizeof sock->error, _("SSL error code %d/%d/%lu"), sret, errnum, err); } /* make sure the error stack is now empty. */ ERR_clear_error(); return NE_SOCK_ERROR; }
/* * Use Bulk Get/Put to copy the given number of pages worth of * records from the source database to the destination database, * this function should be called until all tables are copied, at * which point it will return SQLITE_DONE. Both Btrees need to * have transactions before calling this function. * p->pSrc - Source Btree * p->tables - Contains a list of iTables to copy, gotten using * btreeGetTables(). * p->currentTable - Index in tables of the current table being copied. * p->srcCur - Cursor on the current source table being copied. * p->pDest - Destiniation Btree. * p->destCur - BtCursor on the destination table being copied into. * pages - Number of pages worth of data to copy. */ static int btreeCopyPages(sqlite3_backup *p, int *pages) { DB *dbp; DBT dataOut, dataIn; char bufOut[MULTI_BUFSIZE], bufIn[MULTI_BUFSIZE]; int ret, rc, copied, srcIsDupIndex; void *in, *out, *app; ret = 0; rc = SQLITE_OK; dbp = NULL; copied = 0; memset(&dataOut, 0, sizeof(dataOut)); memset(&dataIn, 0, sizeof(dataIn)); dataOut.flags = DB_DBT_USERMEM; dataIn.flags = DB_DBT_USERMEM; dataOut.data = bufOut; dataOut.ulen = sizeof(bufOut); dataIn.data = bufIn; dataIn.ulen = sizeof(bufIn); while (*pages < 0 || *pages > copied) { /* No tables left to copy */ if (p->tables[p->currentTable] == -1) { u32 val; /* * Update the schema file format and largest rootpage * in the meta data. Other meta data values should * not be changed. */ sqlite3BtreeGetMeta(p->pSrc, 1, &val); if (p->pSrc->db->errCode == SQLITE_BUSY) { rc = SQLITE_BUSY; goto err; } rc = sqlite3BtreeUpdateMeta(p->pDest, 1, val); if (rc != SQLITE_OK) goto err; sqlite3BtreeGetMeta(p->pSrc, 3, &val); if (p->pSrc->db->errCode == SQLITE_BUSY) { rc = SQLITE_BUSY; goto err; } rc = sqlite3BtreeUpdateMeta(p->pDest, 3, val); if (rc != SQLITE_OK) goto err; ret = SQLITE_DONE; goto err; } /* If not currently copying a table, get the next table. */ if (!p->srcCur) { rc = btreeGetUserTable(p->pSrc, p->srcTxn, &dbp, p->tables[p->currentTable]); if (rc != SQLITE_OK) goto err; assert(dbp); memset(&p->destCur, 0, sizeof(p->destCur)); /* * Open a cursor on the destination table, this will * create the table and allow the Btree to manage the * DB object. */ sqlite3BtreeCursor(p->pDest, p->tables[p->currentTable], 1, dbp->app_private, &p->destCur); if ((rc = p->destCur.error) != SQLITE_OK) { app = dbp->app_private; dbp->close(dbp, DB_NOSYNC); if (app) sqlite3DbFree(p->pSrcDb, app); goto err; } /* Open a cursor on the source table. */ if ((ret = dbp->cursor(dbp, p->srcTxn, &p->srcCur, 0)) != 0) goto err; dbp = 0; } srcIsDupIndex = isDupIndex((p->tables[p->currentTable] & 1) ? BTREE_INTKEY : 0, p->pSrc->pBt->dbStorage, p->srcCur->dbp->app_private, p->srcCur->dbp); /* * Copy the current table until the given number of * pages is copied, or the entire table has been copied. */ while (*pages < 0 || *pages > copied) { DBT key, data; memset(&key, 0, sizeof(key)); memset(&data, 0, sizeof(data)); /* Do a Bulk Get from the source table. */ ret = p->srcCur->get(p->srcCur, &key, &dataOut, DB_NEXT | DB_MULTIPLE_KEY); if (ret == DB_NOTFOUND) break; if (ret != 0) goto err; /* Copy the records into the Bulk buffer. */ DB_MULTIPLE_INIT(out, &dataOut); DB_MULTIPLE_WRITE_INIT(in, &dataIn); DB_MULTIPLE_KEY_NEXT(out, &dataOut, key.data, key.size, data.data, data.size); while (out) { /* * Have to translate the index formats if they * are not the same. */ if (p->destCur.isDupIndex != srcIsDupIndex) { if (srcIsDupIndex) { p->destCur.key = key; p->destCur.data = data; if (!btreeCreateIndexKey( &p->destCur)) { rc = SQLITE_NOMEM; goto err; } DB_MULTIPLE_KEY_WRITE_NEXT(in, &dataIn, p->destCur.index.data, p->destCur.index.size, p->destCur.data.data, 0); } else { /* Copy the key into the cursor * index since spliting the key * requires changing the * internal memory. */ if (!allocateCursorIndex( &p->destCur, key.size)) { rc = SQLITE_NOMEM; goto err; } memcpy(p->destCur.index.data, key.data, key.size); p->destCur.index.size = key.size; p->destCur.key.data = p->destCur.index.data; p->destCur.key.size = p->destCur.index.size; splitIndexKey(&p->destCur); DB_MULTIPLE_KEY_WRITE_NEXT( in, &dataIn, p->destCur.key.data, p->destCur.key.size, p->destCur.data.data, p->destCur.data.size); } } else DB_MULTIPLE_KEY_WRITE_NEXT(in, &dataIn, key.data, key.size, data.data, data.size); DB_MULTIPLE_KEY_NEXT(out, &dataOut, key.data, key.size, data.data, data.size); } /* Insert into the destination table. */ dbp = p->destCur.cached_db->dbp; if ((ret = dbp->put(dbp, p->pDest->savepoint_txn, &dataIn, 0, DB_MULTIPLE_KEY)) != 0) goto err; dbp = NULL; copied += MULTI_BUFSIZE/SQLITE_DEFAULT_PAGE_SIZE; } /* * Done copying the current table, time to look for a new * table to copy. */ if (ret == DB_NOTFOUND) { ret = 0; rc = sqlite3BtreeCloseCursor(&p->destCur); if (p->srcCur) { app = p->srcCur->dbp->app_private; dbp = p->srcCur->dbp; p->srcCur->close(p->srcCur); ret = dbp->close(dbp, DB_NOSYNC); if (app) sqlite3DbFree(p->pSrcDb, app); } p->srcCur = NULL; if (ret != 0 || rc != SQLITE_OK) goto err; p->currentTable += 1; } } goto done; err: if (ret == SQLITE_DONE) return ret; done: return MAP_ERR(rc, ret); }
/* * Deletes all data and environment files of the given Btree. Requires * that there are no other handles using the BtShared when this function * is called. */ int btreeDeleteEnvironment(Btree *p, const char *home, int rename) { BtShared *pBt; int rc, ret, iDb, storage; sqlite3 *db; DB_ENV *tmp_env; char path[512]; #ifdef BDBSQL_FILE_PER_TABLE int numFiles; char **files; #endif rc = SQLITE_OK; ret = 0; tmp_env = NULL; if (p != NULL) { if ((rc = btreeUpdateBtShared(p, 1)) != SQLITE_OK) goto err; pBt = p->pBt; if (pBt->nRef > 1) return SQLITE_BUSY; storage = pBt->dbStorage; db = p->db; for (iDb = 0; iDb < db->nDb; iDb++) { if (db->aDb[iDb].pBt == p) break; } if ((rc = sqlite3BtreeClose(p)) != SQLITE_OK) goto err; pBt = NULL; p = NULL; db->aDb[iDb].pBt = NULL; } if (home == NULL) goto done; ret = btreeCleanupEnv(path); /* EFAULT can be returned on Windows when the file does not exist.*/ if (ret == ENOENT || ret == EFAULT) ret = 0; else if (ret != 0) goto err; if ((ret = db_env_create(&tmp_env, 0)) != 0) goto err; if (rename) { if (!(ret = __os_exists(tmp_env->env, home, 0))) { sqlite3_snprintf(sizeof(path), path, "%s%s", home, BACKUP_SUFFIX); ret = __os_rename(tmp_env->env, home, path, 0); } } else { #ifdef BDBSQL_FILE_PER_TABLE ret = __os_dirlist(tmp_env->env, home, 0, &files, &numFiles); if (ret == 0) { int i, ret2; for (i = 0; i < numFiles; i++) { sqlite3_snprintf(sizeof(path), path, "%s/%s", home, files[i]); if ((ret2 = __os_unlink (tmp_env->env, path, 0)) != 0) ret = ret2; } } __os_dirfree(tmp_env->env, files, numFiles); if (ret == 0) ret = __os_unlink(tmp_env->env, home, 0); #else if (!(ret = __os_exists(tmp_env->env, home, 0))) ret = __os_unlink(tmp_env->env, home, 0); #endif } /* EFAULT can be returned on Windows when the file does not exist.*/ if (ret == ENOENT || ret == EFAULT) ret = 0; else if (ret != 0) goto err; err: done: if (tmp_env != NULL) tmp_env->close(tmp_env, 0); return MAP_ERR(rc, ret); }
/* ** A write transaction must be opened before calling this function. ** It performs a single unit of work towards an incremental vacuum. ** Specifically, in the Berkeley DB storage manager, it attempts to compact ** one table. ** ** If the incremental vacuum is finished after this function has run, ** SQLITE_DONE is returned. If it is not finished, but no error occurred, ** SQLITE_OK is returned. Otherwise an SQLite error code. ** ** The caller can get and accumulate the number of truncated pages truncated ** with input parameter truncatedPages. Also, btreeIncrVacuum would skip ** the vacuum if enough pages has been truncated for optimization. */ int btreeIncrVacuum(Btree *p, u_int32_t *truncatedPages) { BtShared *pBt; CACHED_DB *cached_db; DB *dbp; DBT key, data; char *fileName, *tableName, tableNameBuf[DBNAME_SIZE]; void *app; int iTable, rc, ret, t_ret; u_int32_t was_create; DB_COMPACT compact_data; DBT *pStart, end; /* start/end of db_compact() */ struct VacuumInfo *pInfo; int vacuumMode; assert(p->pBt->dbStorage == DB_STORE_NAMED); if (!p->connected && (rc = btreeOpenEnvironment(p, 1)) != SQLITE_OK) return rc; pBt = p->pBt; rc = SQLITE_OK; cached_db = NULL; dbp = NULL; memset(&end, 0, sizeof(end)); #ifndef BDBSQL_OMIT_LEAKCHECK /* Let BDB use the user-specified malloc function (btreeMalloc) */ end.flags |= DB_DBT_MALLOC; #endif /* * Turn off DB_CREATE: we don't want to create any tables that don't * already exist. */ was_create = (pBt->db_oflags & DB_CREATE); pBt->db_oflags &= ~DB_CREATE; memset(&key, 0, sizeof(key)); key.data = tableNameBuf; key.ulen = sizeof(tableNameBuf); key.flags = DB_DBT_USERMEM; memset(&data, 0, sizeof(data)); data.flags = DB_DBT_PARTIAL | DB_DBT_USERMEM; UPDATE_DURING_BACKUP(p); if (p->compact_cursor == NULL) { if ((ret = pTablesDb->cursor(pTablesDb, pReadTxn, &p->compact_cursor, 0)) != 0) goto err; } if ((ret = p->compact_cursor->get(p->compact_cursor, &key, &data, DB_NEXT)) == DB_NOTFOUND) { (void)p->compact_cursor->close(p->compact_cursor); p->compact_cursor = NULL; pBt->db_oflags |= was_create; return SQLITE_DONE; } else if (ret != 0) goto err; tableNameBuf[key.size] = '\0'; if (strncmp(tableNameBuf, "table", 5) != 0) { iTable = 0; #ifdef BDBSQL_FILE_PER_TABLE /* Cannot compact the metadata file */ goto err; #endif /* Open a DB handle on that table. */ if ((ret = db_create(&dbp, pDbEnv, 0)) != 0) goto err; if (pBt->encrypted && (ret = dbp->set_flags(dbp, DB_ENCRYPT)) != 0) goto err; tableName = tableNameBuf; FIX_TABLENAME(pBt, fileName, tableName); /* * We know we're not creating this table, open it using the * family transaction because that keeps the dbreg records out * of the vacuum transaction, reducing pressure on the log * region (since we copy the filename of every open DB handle * into the log region). */ if ((ret = dbp->open(dbp, pFamilyTxn, fileName, tableName, DB_BTREE, GET_AUTO_COMMIT(pBt, pFamilyTxn), 0)) != 0) goto err; } else { if ((ret = btreeTableNameToId(tableNameBuf, key.size, &iTable)) != 0) goto err; /* Try to retrieve the matching handle from the cache. */ rc = btreeFindOrCreateDataTable(p, &iTable, &cached_db, 0); if (rc != SQLITE_OK) goto err; assert(cached_db != NULL && cached_db->dbp != NULL); dbp = cached_db->dbp; if ((iTable & 1) == 0) { /* * Attach the DB handle to a SQLite index, required for * the key comparator to work correctly. If we can't * find an Index struct, just skip this database. It * may not be open yet (c.f. whereA-1.7). */ #ifdef BDBSQL_SINGLE_THREAD rc = btreeGetKeyInfo(p, iTable, (KeyInfo **)&(dbp->app_private)); #else rc = btreeGetKeyInfo(p, iTable, &((TableInfo *)dbp->app_private)->pKeyInfo); #endif if (rc != SQLITE_OK) goto err; } } /* * In following db_compact, we use the family transaction because * DB->compact will then auto-commit, and it has built-in smarts * about retrying on deadlock. */ /* Setup compact_data as configured */ memset(&compact_data, 0, sizeof(compact_data)); compact_data.compact_fillpercent = p->fillPercent; vacuumMode = sqlite3BtreeGetAutoVacuum(p); if (vacuumMode == BTREE_AUTOVACUUM_NONE) { ret = dbp->compact(dbp, pFamilyTxn, NULL, NULL, &compact_data, DB_FREE_SPACE, NULL); /* Skip current table if we have truncated enough pages */ } else if (truncatedPages == NULL || (truncatedPages != NULL && *truncatedPages < p->vacuumPages)) { /* Find DBT for db_compact start */ for (pInfo = p->vacuumInfo, pStart = NULL; pInfo != NULL; pInfo = pInfo->next) { if (pInfo->iTable == iTable) break; } /* Create new VacuumInfo for current iTable as needed */ if (pInfo == NULL) { /* Create info for current iTable */ if ((pInfo = (struct VacuumInfo *)sqlite3_malloc( sizeof(struct VacuumInfo))) == NULL) { rc = SQLITE_NOMEM; goto err; } memset(pInfo, 0, sizeof(struct VacuumInfo)); pInfo->iTable = iTable; pInfo->next = p->vacuumInfo; p->vacuumInfo = pInfo; } pStart = &(pInfo->start); /* Do page compact for IncrVacuum */ if (vacuumMode == BTREE_AUTOVACUUM_INCR) { /* Do compact with given arguments */ compact_data.compact_pages = p->vacuumPages; if ((ret = dbp->compact(dbp, pFamilyTxn, (pStart->data == NULL) ? NULL : pStart, NULL, &compact_data, 0, &end)) != 0) goto err; /* Save current vacuum position */ if (pStart->data != NULL) sqlite3_free(pStart->data); memcpy(pStart, &end, sizeof(DBT)); memset(&end, 0, sizeof(end)); /* Rewind to start if we reach the end of subdb */ if (compact_data.compact_pages_free < p->vacuumPages || p->vacuumPages == 0) { if (pStart->data != NULL) sqlite3_free(pStart->data); memset(pStart, 0, sizeof(DBT)); } } /* Because of the one-pass nature of the compaction algorithm, * any unemptied page near the end of the file inhibits * returning pages to the file system. * A repeated call to the DB->compact() method with a low * compact_fillpercent may be used to return pages in this case. */ memset(&compact_data, 0, sizeof(compact_data)); compact_data.compact_fillpercent = 1; if ((ret = dbp->compact(dbp, pFamilyTxn, NULL, NULL, &compact_data, DB_FREE_SPACE, NULL)) != 0) goto err; if (truncatedPages != NULL && *truncatedPages > 0) *truncatedPages += compact_data.compact_pages_truncated; } err: /* Free cursor and DBT if run into error */ if (ret != 0) { if (p->compact_cursor != NULL) { (void)p->compact_cursor->close(p->compact_cursor); p->compact_cursor = NULL; } if (end.data != NULL) sqlite3_free(end.data); btreeFreeVacuumInfo(p); } if (cached_db != NULL) { #ifdef BDBSQL_SINGLE_THREAD if ((app = dbp->app_private) != NULL) sqlite3DbFree(p->db, app); #else if (dbp->app_private != NULL && (app = ((TableInfo *)dbp->app_private)->pKeyInfo) != NULL) { sqlite3DbFree(p->db, app); ((TableInfo *)dbp->app_private)->pKeyInfo = NULL; } #endif } else if (dbp != NULL) { app = dbp->app_private; if ((t_ret = dbp->close(dbp, DB_NOSYNC)) != 0 && ret == 0) ret = t_ret; if (app != NULL) sqlite3DbFree(p->db, app); } pBt->db_oflags |= was_create; return MAP_ERR(rc, ret, p); }