int main() { DB *dbp; DB_ENV *dbenv; DB_TXN *xid; DBT key, data; const unsigned int INSERT_NUM = 100; char value[22]; /* should be log INSERT_NUM */ int ret, i, t_ret; env_dir_create(); env_open(&dbenv); if ((ret = db_create(&dbp, dbenv, 0)) != 0) { fprintf(stderr, "db_create: %s\n", db_strerror(ret)); exit (1); } dbenv->txn_begin(dbenv, NULL, &xid, 0); if ((ret = dbp->open(dbp, xid, DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) { dbp->err(dbp, ret, "%s", DATABASE); goto err; } memset(&key, 0, sizeof(key)); memset(&data, 0, sizeof(data)); key.size = sizeof(int); key.data = malloc(sizeof(int)); data.data = value; for( i = 0; i < INSERT_NUM; i++ ) { *((int*)key.data) = i; data.size = sizeof(char)*strlen(data.data); sprintf(value, "value: %u\n", i); dbp->put(dbp, xid, &key, &data, 0); } xid->commit(xid, 0); dbenv->txn_begin(dbenv, NULL, &xid, 0); for( i = 0; i < INSERT_NUM; i++ ) { *((int*)key.data) = i; dbp->get(dbp, xid, &key, &data, 0); printf("db: %u: key retrieved: data was %s.\n", *((int*)key.data), (char *)data.data); } xid->abort(xid); err: if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0) ret = t_ret; return 0; }
int main(int argc,char * argv[]) { int rc; DB_ENV *env; DB *dbi; DBT key, data; DB_TXN *txn; DBC *cursor; char sval[32], kval[32]; #define FLAGS (DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_INIT_MPOOL|DB_CREATE|DB_THREAD) rc = db_env_create(&env, 0); rc = env->open(env, "./testdb", FLAGS, 0664); rc = db_create(&dbi, env, 0); rc = env->txn_begin(env, NULL, &txn, 0); rc = dbi->open(dbi, txn, "test.bdb", NULL, DB_BTREE, DB_CREATE, 0664); memset(&key, 0, sizeof(DBT)); memset(&data, 0, sizeof(DBT)); key.size = sizeof(int); key.data = sval; data.size = sizeof(sval); data.data = sval; sprintf(sval, "%03x %d foo bar", 32, 3141592); rc = dbi->put(dbi, txn, &key, &data, 0); rc = txn->commit(txn, 0); if (rc) { fprintf(stderr, "txn->commit: (%d) %s\n", rc, db_strerror(rc)); goto leave; } rc = env->txn_begin(env, NULL, &txn, 0); rc = dbi->cursor(dbi, txn, &cursor, 0); key.flags = DB_DBT_USERMEM; key.data = kval; key.ulen = sizeof(kval); data.flags = DB_DBT_USERMEM; data.data = sval; data.ulen = sizeof(sval); while ((rc = cursor->c_get(cursor, &key, &data, DB_NEXT)) == 0) { printf("key: %p %.*s, data: %p %.*s\n", key.data, (int) key.size, (char *) key.data, data.data, (int) data.size, (char *) data.data); } rc = cursor->c_close(cursor); rc = txn->abort(txn); leave: rc = dbi->close(dbi, 0); rc = env->close(env, 0); return rc; }
AccountingInfo *getAccoutingInfo(int db) { DB_ENV *dbenv = radacct_dbenv[db]; DB *dbp = radacct_dbp[db]; DB_TXN *tid = NULL; DBT key, data; db_recno_t recno; AccountingInfo *info = NULL; char buf[REC_SIZE]; int ret; memset(&key, 0, sizeof(DBT)); memset(&data, 0, sizeof(DBT)); key.data = &recno; key.size = key.ulen = sizeof(recno); key.flags = DB_DBT_USERMEM; data.data = buf; data.ulen = sizeof(buf); data.flags = DB_DBT_USERMEM; #if USE_TXN if((ret = dbenv->txn_begin(dbenv, NULL, &tid, 0)) != 0) { NETERROR(MRADC, ("getAccoutingInfo: transaction failed: %s\n", db_strerror(ret))); return 0; } switch((ret = dbp->get(dbp, tid, &key, &data, DB_CONSUME))) #else switch((ret = dbp->get(dbp, NULL, &key, &data, DB_CONSUME))) #endif { case DB_LOCK_DEADLOCK: NETERROR(MRADC, ("getAccoutingInfo: deadlock: %s\n", db_strerror(ret))); break; case 0: if((info = (AccountingInfo*)malloc(sizeof(AccountingInfo)))) { memset(info, 0, sizeof(AccountingInfo)); info->tid = tid; unMarshalAccountingInfo(buf, info); } break; default: NETERROR(MRADC, ("getAccoutingInfo: oops: %d\n", ret)); break; } return info; }
int main(int argc, char **argv) { DB *dbp; DB_ENV *dbenv; DBT key, data; db_recno_t recno; DB_TXN *xid; int num_xactions; int num_inserts_per_xaction; char *string; int i, j; if (argc != 3) { printf("usage: %s <num xactions> <num inserts per xaction>\n", argv[0]); exit(-1); } num_xactions = atoi(argv[1]); num_inserts_per_xaction = atoi(argv[2]); env_dir_create(); env_open(&dbenv); rand_str_init(); if (db_open(dbenv, &dbp, DATABASE, 0)) { return (1); } memset(&key, 0, sizeof(DBT)); memset(&data, 0, sizeof(DBT)); recno = 1; for (i = 1; i <= num_xactions; i++ ) { dbenv->txn_begin(dbenv, NULL, &xid, 0); for (j = 0; j < num_inserts_per_xaction; j++) { string = rand_str(); key.size = sizeof(recno); key.data = &recno; data.size = strlen(string) + 1; // + 1 for the null terminator data.data = string; /* if(VERBOSE) { printf("%s\n", string); } */ dbp->put(dbp, xid, &key, &data, 0); recno++; /* Its unclear from BDB docs whether we should free string */ } xid->commit(xid, 0); } return 0; }
/* * A function that performs a series of writes to a * Berkeley DB database. The information written * to the database is largely nonsensical, but the * mechanism of transactional commit/abort and * deadlock detection is illustrated here. */ void * writer_thread(void *args) { static char *key_strings[] = { "key 1", "key 2", "key 3", "key 4", "key 5", "key 6", "key 7", "key 8", "key 9", "key 10" }; DB *dbp; DB_ENV *envp; DBT key, value; DB_TXN *txn; int i, j, payload, ret, thread_num; int retry_count, max_retries = 20; /* Max retry on a deadlock */ dbp = (DB *)args; envp = dbp->get_env(dbp); /* Get the thread number */ (void)mutex_lock(&thread_num_lock); global_thread_num++; thread_num = global_thread_num; (void)mutex_unlock(&thread_num_lock); /* Initialize the random number generator */ srand(thread_num); /* Write 50 times and then quit */ for (i = 0; i < 50; i++) { retry_count = 0; /* Used for deadlock retries */ /* * Some think it is bad form to loop with a goto statement, but * we do it anyway because it is the simplest and clearest way * to achieve our abort/retry operation. */ retry: /* Begin our transaction. We group multiple writes in * this thread under a single transaction so as to * (1) show that you can atomically perform multiple writes * at a time, and (2) to increase the chances of a * deadlock occurring so that we can observe our * deadlock detection at work. * * Normally we would want to avoid the potential for deadlocks, * so for this workload the correct thing would be to perform our * puts with autocommit. But that would excessively simplify our * example, so we do the "wrong" thing here instead. */ ret = envp->txn_begin(envp, NULL, &txn, 0); if (ret != 0) { envp->err(envp, ret, "txn_begin failed"); return ((void *)EXIT_FAILURE); } for (j = 0; j < 10; j++) { /* Set up our key and values DBTs */ memset(&key, 0, sizeof(DBT)); key.data = key_strings[j]; key.size = (u_int32_t)strlen(key_strings[j]) + 1; memset(&value, 0, sizeof(DBT)); payload = rand() + i; value.data = &payload; value.size = sizeof(int); /* Perform the database put. */ switch (ret = dbp->put(dbp, txn, &key, &value, 0)) { case 0: break; /* * Here's where we perform deadlock detection. If * DB_LOCK_DEADLOCK is returned by the put operation, * then this thread has been chosen to break a deadlock. * It must abort its operation, and optionally retry the * put. */ case DB_LOCK_DEADLOCK: /* * First thing that we MUST do is abort the * transaction. */ (void)txn->abort(txn); /* * Now we decide if we want to retry the operation. * If we have retried less than max_retries, * increment the retry count and goto retry. */ if (retry_count < max_retries) { printf("Writer %i: Got DB_LOCK_DEADLOCK.\n", thread_num); printf("Writer %i: Retrying write operation.\n", thread_num); retry_count++; goto retry; } /* * Otherwise, just give up. */ printf("Writer %i: ", thread_num); printf("Got DB_LOCK_DEADLOCK and out of retries.\n"); printf("Writer %i: Giving up.\n", thread_num); return ((void *)EXIT_FAILURE); /* * If a generic error occurs, we simply abort the * transaction and exit the thread completely. */ default: envp->err(envp, ret, "db put failed"); ret = txn->abort(txn); if (ret != 0) envp->err(envp, ret, "txn abort failed"); return ((void *)EXIT_FAILURE); } /** End case statement **/ } /** End for loop **/ /* * print the number of records found in the database. * See count_records() for usage information. */ printf("Thread %i. Record count: %i\n", thread_num, count_records(dbp, txn)); /* * If all goes well, we can commit the transaction and * exit the thread. */ ret = txn->commit(txn, 0); if (ret != 0) { envp->err(envp, ret, "txn commit failed"); return ((void *)EXIT_FAILURE); } } return ((void *)EXIT_SUCCESS); }
static int process_smtp_rcpt(int crypted) { double delay; int rc; DB_ENV *dbenv; DB *db; DB_TXN *txn = NULL; if (setjmp(defect_jmp_buf)) { if (defect_msg) { printf(STR_ACTION "WARN %s\n", defect_msg); defect_msg = 0; } else puts(STR_ACTION "WARN " PACKAGE_STRING " is not working properly"); if (txn) call_db(txn->abort(txn), "Failed to abort transaction"); return 1; } rc = get_dbenv(&dbenv, 1); if (rc) jmperr("get_dbenv failed"); rc = get_db(&db, 1); if (rc) jmperr("get_db failed"); rc = call_db(dbenv->txn_begin(dbenv, NULL, &txn, 0), "txn_begin failed in process_smtp_rcpt"); if (rc) jmperr("txn_begin failed"); get_grey_data(db, txn); if (triplet_data.crypted != crypted) { triplet_data.crypted = crypted; if (debug_me) syslog(LOG_DEBUG,"crypted field changed for some reason"); } delay = difftime(triplet_data.access_time, triplet_data.create_time); /* Block inbound mail that is from a previously unknown (ip, from, to) triplet */ /* However we want different behavior for crypted stuff */ if(crypted > 0) { if(delay < cryptlist_delay) { triplet_data.block_count++; fputs(STR_ACTION, stdout); printf_action(reject_action_fmt, greylist_delay - delay); putchar('\n'); } else if (triplet_data.pass_count++) puts(STR_ACTION "DUNNO"); else { fputs(STR_ACTION, stdout); printf_action(cryptlisted_action_fmt, delay); putchar('\n'); } } else { if(delay < greylist_delay || block_unencrypted) { triplet_data.block_count++; fputs(STR_ACTION, stdout); if(block_unencrypted == 1) { printf_action(reject_unencrypted_action_fmt, (3600*24)); // block it for a day } else { printf_action(reject_unencrypted_action_fmt, greylist_delay - delay); } putchar('\n'); } else if (triplet_data.pass_count++) puts(STR_ACTION "DUNNO"); else { fputs(STR_ACTION, stdout); printf_action(greylisted_action_fmt, delay); putchar('\n'); } } rc = put_grey_data(db, txn); if (rc) call_db(txn->abort(txn), "abort failed"); else call_db(txn->commit(txn, 0), "commit failed"); return rc; }
int main(int argc, char **argv) { DB *dbp; DB_ENV *dbenv; DB_TXN *xid; DBT key, data; db_recno_t recno; int num_trials; int ret; char c; double r; int start, end; char first_printable_ascii = ' '; char last_printable_ascii = '~'; int ascii_length = (int)(last_printable_ascii - first_printable_ascii); char *ASCII = (char *)malloc(sizeof(char) * ascii_length); char *string; for (c = 0; c < ascii_length; c++) { ASCII[(int)c] = first_printable_ascii + c; } if (argc != 2) { printf("usage: %s <num trials>\n", argv[0]); exit(-1); } env_dir_create(); env_open(&dbenv); if (db_open(dbenv, &dbp, DATABASE, 0)) return (1); memset(&key, 0, sizeof(DBT)); memset(&data, 0, sizeof(DBT)); num_trials = atoi(argv[1]); for (recno = 1; (int)recno <= num_trials; recno++) { start = 0; end = 0; while (start == end) { r = ((double)rand()/(double)((double)RAND_MAX+1)); /* returns [0, 1)*/ r = r*ascii_length; start = (int)r; /* an int in the rand [0, ascii_length) */ r = ((double)rand()/(double)((double)RAND_MAX+1)); /* re turns [0, 1)*/ r = r*ascii_length; end = (int)r; /* an int in the rand [0, ascii_length) */ } if (end < start) { int swap = start; start = end; end = swap; } string = (char *)malloc(sizeof(char) * (end - start) + 1); strncpy(string, ASCII + start, end-start); string[end-start] = '\0'; /* make the string null terminated */ dbenv->txn_begin(dbenv, NULL, &xid, 0); key.size = sizeof(recno); key.data = &recno; data.size = strlen(string) + 1; // + 1 for the null terminator data.data = string; switch (ret = dbp->put(dbp, xid, &key, &data, 0)) { case 0: xid->commit(xid, 0); break; default: dbp->err(dbp, ret, "DB->put"); xid->abort(xid); break; } } return 0; }
/* ** Copy nPage pages from the source b-tree to the destination. */ int sqlite3_backup_step(sqlite3_backup *p, int nPage) { int returnCode, pages; Parse parse; DB_ENV *dbenv; BtShared *pBtDest, *pBtSrc; pBtDest = pBtSrc = NULL; if (p->rc != SQLITE_OK || nPage == 0) return p->rc; sqlite3_mutex_enter(p->pSrcDb->mutex); sqlite3_mutex_enter(p->pDestDb->mutex); /* * Make sure the schema has been read in, so the keyInfo * can be retrieved for the indexes. No-op if already read. * If the schema has not been read then an update must have * changed it, so backup will restart. */ memset(&parse, 0, sizeof(parse)); parse.db = p->pSrcDb; p->rc = sqlite3ReadSchema(&parse); if (p->rc != SQLITE_OK) goto err; /* * This process updated the source database, so * the backup process has to restart. */ if (p->pSrc->updateDuringBackup > p->lastUpdate) { p->rc = SQLITE_LOCKED; if ((p->rc = backupCleanup(p)) != SQLITE_OK) goto err; else backupReset(p); } pages = nPage; if (!p->cleaned) { const char *home; const char inmem[9] = ":memory:"; int storage; pBtDest = p->pDest->pBt; storage = p->pDest->pBt->dbStorage; if (storage == DB_STORE_NAMED) p->openDest = 1; p->rc = btreeDeleteEnvironment(p->pDest, p->fullName, 1); if (storage == DB_STORE_INMEM && strcmp(p->destName, "temp") != 0) home = inmem; else home = p->fullName; p->pDest = p->pDestDb->aDb[p->iDb].pBt; if (p->rc != SQLITE_OK) goto err; /* * Call sqlite3OpenTempDatabase instead of * sqlite3BtreeOpen, because sqlite3OpenTempDatabase * automatically chooses the right flags before calling * sqlite3BtreeOpen. */ if (strcmp(p->destName, "temp") == 0) { memset(&parse, 0, sizeof(parse)); parse.db = p->pDestDb; p->rc = sqlite3OpenTempDatabase(&parse); p->pDest = p->pDestDb->aDb[p->iDb].pBt; } else { p->rc = sqlite3BtreeOpen(home, p->pDestDb, &p->pDest, SQLITE_DEFAULT_CACHE_SIZE | SQLITE_OPEN_MAIN_DB, p->pDestDb->openFlags); p->pDestDb->aDb[p->iDb].pBt = p->pDest; if (p->rc == SQLITE_OK) { p->pDestDb->aDb[p->iDb].pSchema = sqlite3SchemaGet(p->pDestDb, p->pDest); if (!p->pDestDb->aDb[p->iDb].pSchema) p->rc = SQLITE_NOMEM; } else p->pDestDb->aDb[p->iDb].pSchema = NULL; } if (p->pDest) p->pDest->nBackup++; #ifdef SQLITE_HAS_CODEC /* * In the case of a temporary source database, use the * encryption of the main database. */ if (strcmp(p->srcName, "temp") == 0) { int iDb = sqlite3FindDbName(p->pSrcDb, "main"); pBtSrc = p->pSrcDb->aDb[iDb].pBt->pBt; } else pBtSrc = p->pSrc->pBt; if (p->rc == SQLITE_OK) { if (p->iDb == 0) p->rc = sqlite3_key(p->pDestDb, pBtSrc->encrypt_pwd, pBtSrc->encrypt_pwd_len); else p->rc = sqlite3CodecAttach(p->pDestDb, p->iDb, pBtSrc->encrypt_pwd, pBtSrc->encrypt_pwd_len); } #endif if (p->rc != SQLITE_OK) goto err; p->cleaned = 1; } /* * Begin a transaction, unfortuantely the lock on * the schema has to be released to allow the sqlite_master * table to be cleared, which could allow another thread to * alter it, however accessing the backup database during * backup is already an illegal condition with undefined * results. */ if (!sqlite3BtreeIsInTrans(p->pDest)) { if (!p->pDest->connected) { p->rc = btreeOpenEnvironment(p->pDest, 1); if (p->rc != SQLITE_OK) goto err; } if ((p->rc = sqlite3BtreeBeginTrans(p->pDest, 2)) != SQLITE_OK) goto err; } /* Only this process should be accessing the backup environment. */ if (p->pDest->pBt->nRef > 1) { p->rc = SQLITE_BUSY; goto err; } /* * Begin a transaction, a lock error or update could have caused * it to be released in a previous call to step. */ if (!p->srcTxn) { dbenv = p->pSrc->pBt->dbenv; if ((p->rc = dberr2sqlite(dbenv->txn_begin(dbenv, p->pSrc->family_txn, &p->srcTxn, 0))) != SQLITE_OK) goto err; } /* * An update could have dropped or created a table, so recalculate * the list of tables. */ if (!p->tables) { if ((p->rc = btreeGetPageCount(p->pSrc, &p->tables, &p->nPagecount, p->srcTxn)) != SQLITE_OK) { sqlite3Error(p->pSrcDb, p->rc, 0); goto err; } p->nRemaining = p->nPagecount; } /* Copy the pages. */ p->rc = btreeCopyPages(p, &pages); if (p->rc == SQLITE_DONE) { p->nRemaining = 0; sqlite3ResetInternalSchema(p->pDestDb, p->iDb); memset(&parse, 0, sizeof(parse)); parse.db = p->pDestDb; p->rc = sqlite3ReadSchema(&parse); if (p->rc == SQLITE_OK) p->rc = SQLITE_DONE; } else if (p->rc != SQLITE_OK) goto err; /* * The number of pages left to copy is an estimate, so * do not let the number go to zero unless we are really * done. */ if (p->rc != SQLITE_DONE) { if ((u32)pages >= p->nRemaining) p->nRemaining = 1; else p->nRemaining -= pages; } err: /* * This process updated the source database, so * the backup process has to restart. */ if (p->pSrc->updateDuringBackup > p->lastUpdate && (p->rc == SQLITE_OK || p->rc == SQLITE_DONE)) { int cleanCode; returnCode = p->rc; p->rc = SQLITE_LOCKED; if ((cleanCode = backupCleanup(p)) != SQLITE_OK) returnCode = p->rc = cleanCode; else backupReset(p); } else { returnCode = backupCleanup(p); if (returnCode == SQLITE_OK || (p->rc != SQLITE_OK && p->rc != SQLITE_DONE)) returnCode = p->rc; else p->rc = returnCode; } /* * On a locked or busy error the backup process is rolled back, * but can be restarted by the user. */ if ( returnCode == SQLITE_LOCKED || returnCode == SQLITE_BUSY ) backupReset(p); else if ( returnCode != SQLITE_OK && returnCode != SQLITE_DONE ) { sqlite3Error(p->pDestDb, p->rc, 0); } sqlite3_mutex_leave(p->pDestDb->mutex); sqlite3_mutex_leave(p->pSrcDb->mutex); return (returnCode); }
nfsstat4 nfs_op_readdir(struct nfs_cxn *cxn, const READDIR4args *args, struct list_head *writes, struct rpc_write **wr) { nfsstat4 status = NFS4_OK; struct nfs_inode *ino = NULL; uint32_t dircount, maxcount, *status_p; struct readdir_info ri; uint64_t cookie, attr_request; const verifier4 *cookie_verf; DB_TXN *txn = NULL; DB *dirent = srv.fsdb.dirent; DB_ENV *dbenv = srv.fsdb.env; DBT pkey, pval; struct fsdb_de_key key; int cget_flags; DBC *curs = NULL; int rc; uint64_t dirent_inum, db_de; struct fsdb_de_key *rkey; cookie = args->cookie; cookie_verf = &args->cookieverf; dircount = args->dircount; maxcount = args->maxcount; attr_request = bitmap4_decode(&args->attr_request); status_p = WRSKIP(4); if (debugging) { applog(LOG_INFO, "op READDIR (COOKIE:%Lu DIR:%u MAX:%u MAP:%Lx)", (unsigned long long) cookie, dircount, maxcount, (unsigned long long) attr_request); print_fattr_bitmap("op READDIR", attr_request); } /* traditionally "." and "..", hardcoded */ if (cookie == 1 || cookie == 2) { status = NFS4ERR_BAD_COOKIE; goto out; } /* don't permit request of write-only attrib */ if (attr_request & fattr_write_only_mask) { status = NFS4ERR_INVAL; goto out; } /* FIXME: very, very, very poor verifier */ if (cookie && memcmp(cookie_verf, &srv.instance_verf, sizeof(verifier4))) { status = NFS4ERR_NOT_SAME; goto out; } /* read inode of directory being read */ status = dir_curfh(NULL, cxn, &ino, 0); if (status != NFS4_OK) goto out; if (ino->mode == 0) { status = NFS4ERR_ACCESS; goto out; } /* subtract READDIR4resok header and footer size */ if (maxcount < 16) { status = NFS4ERR_TOOSMALL; goto out; } maxcount -= (8 + 4 + 4); /* verify within server limits */ if (dircount > SRV_MAX_READ || maxcount > SRV_MAX_READ) { status = NFS4ERR_INVAL; goto out; } /* open transaction */ rc = dbenv->txn_begin(dbenv, NULL, &txn, 0); if (rc) { status = NFS4ERR_IO; dbenv->err(dbenv, rc, "DB_ENV->txn_begin"); goto out; } /* set up directory iteration */ memset(&ri, 0, sizeof(ri)); ri.cookie = cookie; ri.dircount = dircount; ri.maxcount = maxcount; ri.attr_request = attr_request; ri.status = NFS4_OK; ri.writes = writes; ri.wr = wr; ri.dir_pos = 3; ri.first_time = true; /* if dir is empty, skip directory interation loop completely */ if (dir_is_empty(txn, ino)) { WRMEM(&srv.instance_verf, sizeof(verifier4)); /* cookieverf */ ri.val_follows = WRSKIP(4); if (debugging) applog(LOG_DEBUG, " READDIR: empty directory"); goto the_finale; } /* otherwise, loop through each dirent attached to ino->inum */ rc = dirent->cursor(dirent, txn, &curs, 0); if (rc) { status = NFS4ERR_IO; dirent->err(dirent, rc, "dirent->cursor"); goto out_abort; } key.inum = inum_encode(ino->inum); memset(&pkey, 0, sizeof(pkey)); pkey.data = &key; pkey.size = sizeof(key); pkey.flags = DB_DBT_MALLOC; memset(&pval, 0, sizeof(pval)); pval.data = &db_de; pval.ulen = sizeof(db_de); pval.flags = DB_DBT_USERMEM; cget_flags = DB_SET_RANGE; while (1) { bool iter_rc; rc = curs->get(curs, &pkey, &pval, cget_flags); if (rc) { if (rc != DB_NOTFOUND) dirent->err(dirent, rc, "readdir curs->get"); break; } cget_flags = DB_NEXT; rkey = pkey.data; if (inum_decode(rkey->inum) != ino->inum) { free(rkey); break; } dirent_inum = inum_decode(db_de); iter_rc = readdir_iter(txn, rkey, pkey.size, dirent_inum, &ri); free(rkey); if (iter_rc) break; } if (!ri.n_results) { if (debugging) applog(LOG_INFO, " zero results, status %s", ri.status <= NFS4ERR_CB_PATH_DOWN ? status2str(ri.status) : "n/a"); if (ri.status == NFS4_OK) { WRMEM(&srv.instance_verf, sizeof(verifier4)); /* cookieverf */ ri.val_follows = WRSKIP(4); } } rc = curs->close(curs); if (rc) { status = NFS4ERR_IO; dirent->err(dirent, rc, "dirent->cursor close"); goto out_abort; } the_finale: /* terminate final entry4.nextentry and dirlist4.entries */ if (ri.val_follows) *ri.val_follows = htonl(0); if (ri.cookie_found && !ri.n_results && ri.hit_limit) { status = NFS4ERR_TOOSMALL; goto out_abort; } /* close transaction */ rc = txn->commit(txn, 0); if (rc) { dbenv->err(dbenv, rc, "DB_ENV->txn_commit"); status = NFS4ERR_IO; goto out; } WR32(ri.hit_limit ? 0 : 1); /* reply eof */ out: *status_p = htonl(status); inode_free(ino); return status; out_abort: if (txn->abort(txn)) dbenv->err(dbenv, rc, "DB_ENV->txn_abort"); goto out; }
nfsstat4 nfs_op_remove(struct nfs_cxn *cxn, const REMOVE4args *args, struct list_head *writes, struct rpc_write **wr) { nfsstat4 status = NFS4_OK; struct nfs_inode *dir_ino = NULL, *target_ino = NULL; struct nfs_buf target; change_info4 cinfo = { true, 0, 0 }; DB_TXN *txn = NULL; DB_ENV *dbenv = srv.fsdb.env; int rc; nfsino_t de_inum; target.len = args->target.utf8string_len; target.val = args->target.utf8string_val; if (debugging) applog(LOG_INFO, "op REMOVE ('%.*s')", target.len, target.val); if (target.len > SRV_MAX_NAME) { status = NFS4ERR_NAMETOOLONG; goto out; } if (!valid_utf8string(&target)) { status = NFS4ERR_INVAL; goto out; } if (has_dots(&target)) { status = NFS4ERR_BADNAME; goto out; } rc = dbenv->txn_begin(dbenv, NULL, &txn, 0); if (rc) { status = NFS4ERR_IO; dbenv->err(dbenv, rc, "DB_ENV->txn_begin"); goto out; } /* reference container directory */ status = dir_curfh(txn, cxn, &dir_ino, DB_RMW); if (status != NFS4_OK) goto out_abort; /* lookup target name in directory */ status = dir_lookup(txn, dir_ino, &target, 0, &de_inum); if (status != NFS4_OK) goto out_abort; /* reference target inode */ target_ino = inode_getdec(txn, de_inum, DB_RMW); if (!target_ino) { status = NFS4ERR_NOENT; goto out_abort; } /* prevent root dir deletion */ if (target_ino->inum == INO_ROOT) { status = NFS4ERR_INVAL; goto out_abort; } /* prevent removal of non-empty dirs */ if ((target_ino->type == NF4DIR) && !dir_is_empty(txn, target_ino)) { status = NFS4ERR_NOTEMPTY; goto out_abort; } /* remove target inode from directory */ rc = fsdb_dirent_del(&srv.fsdb, txn, dir_ino->inum, &target, 0); if (rc) { status = NFS4ERR_IO; goto out_abort; } /* record directory change info */ cinfo.before = dir_ino->version; rc = inode_touch(txn, dir_ino); if (rc) { status = NFS4ERR_IO; goto out_abort; } cinfo.after = dir_ino->version; /* remove link, possibly deleting inode */ rc = inode_unlink(txn, target_ino); if (rc) { status = NFS4ERR_IO; goto out_abort; } rc = txn->commit(txn, 0); if (rc) { dbenv->err(dbenv, rc, "DB_ENV->txn_commit"); status = NFS4ERR_IO; goto out; } out: WR32(status); if (status == NFS4_OK) { WR32(cinfo.atomic ? 1 : 0); /* cinfo.atomic */ WR64(cinfo.before); /* cinfo.before */ WR64(cinfo.after); /* cinfo.after */ } inode_free(dir_ino); inode_free(target_ino); return status; out_abort: if (txn->abort(txn)) dbenv->err(dbenv, rc, "DB_ENV->txn_abort"); goto out; }
nfsstat4 nfs_op_rename(struct nfs_cxn *cxn, const RENAME4args *args, struct list_head *writes, struct rpc_write **wr) { nfsstat4 status = NFS4_OK; struct nfs_inode *src_dir = NULL, *target_dir = NULL; struct nfs_inode *old_file = NULL, *new_file = NULL; struct nfs_buf oldname, newname; change_info4 src = { true, 0, 0 }; change_info4 target = { true, 0, 0 }; DB_TXN *txn = NULL; DB_ENV *dbenv = srv.fsdb.env; int rc; nfsino_t old_dirent, new_dirent; oldname.len = args->oldname.utf8string_len; oldname.val = args->oldname.utf8string_val; newname.len = args->newname.utf8string_len; newname.val = args->newname.utf8string_val; if (debugging) applog(LOG_INFO, "op RENAME (OLD:%.*s, NEW:%.*s)", oldname.len, oldname.val, newname.len, newname.val); /* validate text input */ if ((!valid_utf8string(&oldname)) || (!valid_utf8string(&newname))) { status = NFS4ERR_INVAL; goto out; } if (has_dots(&oldname) || has_dots(&newname)) { status = NFS4ERR_BADNAME; goto out; } rc = dbenv->txn_begin(dbenv, NULL, &txn, 0); if (rc) { status = NFS4ERR_IO; dbenv->err(dbenv, rc, "DB_ENV->txn_begin"); goto out; } /* reference source, target directories. * NOTE: src_dir and target_dir may point to the same object */ src_dir = inode_fhdec(txn, cxn->save_fh, DB_RMW); if (fh_equal(cxn->save_fh, cxn->current_fh)) target_dir = src_dir; else target_dir = inode_fhdec(txn, cxn->current_fh, DB_RMW); if (!src_dir || !target_dir) { status = NFS4ERR_NOFILEHANDLE; goto out_abort; } if ((src_dir->type != NF4DIR) || (target_dir->type != NF4DIR)) { status = NFS4ERR_NOTDIR; goto out_abort; } /* lookup source, target names */ status = dir_lookup(txn, src_dir, &oldname, 0, &old_dirent); if (status != NFS4_OK) goto out_abort; old_file = inode_getdec(txn, old_dirent, 0); if (!old_file) { status = NFS4ERR_NOENT; goto out_abort; } status = dir_lookup(txn, target_dir, &newname, 0, &new_dirent); if (status != NFS4_OK && status != NFS4ERR_NOENT) goto out_abort; /* if target (newname) is present, attempt to remove */ if (status == NFS4_OK) { bool ok_to_remove = false; /* read to-be-deleted inode */ new_file = inode_getdec(txn, new_dirent, DB_RMW); if (!new_file) { status = NFS4ERR_NOENT; goto out_abort; } /* do oldname and newname refer to same file? */ if (old_file->inum == new_file->inum) { src.after = src.before = src_dir->version; target.after = target.before = target_dir->version; goto out_abort; } if (old_file->type != NF4DIR && new_file->type != NF4DIR) ok_to_remove = true; else if (old_file->type == NF4DIR && new_file->type == NF4DIR && dir_is_empty(txn, new_file)) ok_to_remove = true; if (!ok_to_remove) { status = NFS4ERR_EXIST; goto out_abort; } /* remove target inode from directory */ rc = fsdb_dirent_del(&srv.fsdb, txn, target_dir->inum, &newname, 0); if (rc == 0) rc = inode_unlink(txn, new_file); if (rc) { status = NFS4ERR_IO; goto out_abort; } } else status = NFS4_OK; new_dirent = old_dirent; /* delete entry from source directory; add to target directory */ rc = fsdb_dirent_del(&srv.fsdb, txn, src_dir->inum, &oldname, 0); if (rc == 0) rc = fsdb_dirent_put(&srv.fsdb, txn, target_dir->inum, &newname, 0, new_dirent); if (rc) { status = NFS4ERR_IO; goto out_abort; } /* if renamed file is a directory, ensure its 'parent' is updated */ if (old_file->type == NF4DIR) { old_file->parent = target_dir->inum; if (inode_touch(txn, old_file)) { status = NFS4ERR_IO; goto out_abort; } } /* record directory change info */ src.before = src_dir->version; target.before = target_dir->version; /* update last-modified stamps of directory inodes */ rc = inode_touch(txn, src_dir); if (rc == 0 && src_dir != target_dir) rc = inode_touch(txn, target_dir); if (rc) { status = NFS4ERR_IO; goto out_abort; } /* close the transaction */ rc = txn->commit(txn, 0); if (rc) { dbenv->err(dbenv, rc, "DB_ENV->txn_commit"); status = NFS4ERR_IO; goto out; } src.after = src_dir->version; target.after = target_dir->version; out: WR32(status); if (status == NFS4_OK) { WR32(src.atomic ? 1 : 0); /* src cinfo.atomic */ WR64(src.before); /* src cinfo.before */ WR64(src.after); /* src cinfo.after */ WR32(target.atomic ? 1 : 0); /* target cinfo.atomic */ WR64(target.before); /* target cinfo.before */ WR64(target.after); /* target cinfo.after */ } inode_free(src_dir); if (src_dir != target_dir) inode_free(target_dir); inode_free(old_file); inode_free(new_file); return status; out_abort: if (txn->abort(txn)) dbenv->err(dbenv, rc, "DB_ENV->txn_abort"); goto out; }
nfsstat4 nfs_op_link(struct nfs_cxn *cxn, const LINK4args *args, struct list_head *writes, struct rpc_write **wr) { nfsstat4 status; struct nfs_inode *dir_ino = NULL, *src_ino = NULL; struct nfs_buf newname; uint64_t before = 0, after = 0; DB_TXN *txn; DB_ENV *dbenv = srv.fsdb.env; int rc; newname.len = args->newname.utf8string_len; newname.val = args->newname.utf8string_val; if (debugging) applog(LOG_INFO, "op LINK (%.*s)", newname.len, newname.val); /* verify input parameters */ if (!valid_fh(cxn->current_fh) || !valid_fh(cxn->save_fh)) { status = NFS4ERR_NOFILEHANDLE; goto out; } if (newname.len > SRV_MAX_NAME) { status = NFS4ERR_NAMETOOLONG; goto out; } /* open transaction */ rc = dbenv->txn_begin(dbenv, NULL, &txn, 0); if (rc) { status = NFS4ERR_IO; dbenv->err(dbenv, rc, "DB_ENV->txn_begin"); goto out; } /* read source inode's directory inode */ dir_ino = inode_fhdec(txn, cxn->current_fh, 0); if (!dir_ino) { status = NFS4ERR_NOFILEHANDLE; goto out_abort; } /* make sure target is a directory */ if (dir_ino->type != NF4DIR) { status = NFS4ERR_NOTDIR; goto out_abort; } /* read source inode */ src_ino = inode_fhdec(txn, cxn->save_fh, 0); if (!src_ino) { status = NFS4ERR_NOFILEHANDLE; goto out_abort; } /* make sure source is a not a directory */ if (src_ino->type == NF4DIR) { status = NFS4ERR_ISDIR; goto out_abort; } before = dir_ino->version; /* add directory entry */ status = dir_add(txn, dir_ino, &newname, src_ino); if (status != NFS4_OK) goto out_abort; after = dir_ino->version; /* update source inode */ src_ino->n_link++; if (inode_touch(txn, src_ino)) { status = NFS4ERR_IO; goto out_abort; } /* close transaction */ rc = txn->commit(txn, 0); if (rc) { dbenv->err(dbenv, rc, "DB_ENV->txn_commit"); status = NFS4ERR_IO; goto out; } out: WR32(status); if (status == NFS4_OK) { WR32(1); /* cinfo.atomic */ WR64(before); /* cinfo.before */ WR64(after); /* cinfo.after */ } inode_free(src_ino); inode_free(dir_ino); return status; out_abort: if (txn->abort(txn)) dbenv->err(dbenv, rc, "DB_ENV->txn_abort"); goto out; }
nfsstat4 nfs_op_lookup(struct nfs_cxn *cxn, const LOOKUP4args *args, struct list_head *writes, struct rpc_write **wr) { nfsstat4 status = NFS4_OK; struct nfs_inode *ino = NULL; bool printed = false; struct nfs_buf objname; nfsino_t inum; DB_TXN *txn = NULL; DB_ENV *dbenv = srv.fsdb.env; int rc; objname.len = args->objname.utf8string_len; objname.val = args->objname.utf8string_val; if (!objname.len) { status = NFS4ERR_INVAL; goto out; } if (!objname.val) { status = NFS4ERR_BADXDR; goto out; } if (objname.len > SRV_MAX_NAME) { status = NFS4ERR_NAMETOOLONG; goto out; } rc = dbenv->txn_begin(dbenv, NULL, &txn, 0); if (rc) { status = NFS4ERR_IO; dbenv->err(dbenv, rc, "DB_ENV->txn_begin"); goto out; } status = dir_curfh(txn, cxn, &ino, 0); if (status != NFS4_OK) { if ((status == NFS4ERR_NOTDIR) && (ino->type == NF4LNK)) status = NFS4ERR_SYMLINK; goto out_abort; } status = dir_lookup(txn, ino, &objname, 0, &inum); if (status != NFS4_OK) goto out_abort; rc = txn->commit(txn, 0); if (rc) { dbenv->err(dbenv, rc, "DB_ENV->txn_commit"); status = NFS4ERR_IO; goto out; } fh_set(&cxn->current_fh, inum); if (debugging) { applog(LOG_INFO, "op LOOKUP ('%.*s') -> %016llX", objname.len, objname.val, (unsigned long long) cxn->current_fh.inum); printed = true; } out: if (!printed) { if (debugging) applog(LOG_INFO, "op LOOKUP ('%.*s')", objname.len, objname.val); } WR32(status); inode_free(ino); return status; out_abort: if (txn->abort(txn)) dbenv->err(dbenv, rc, "DB_ENV->txn_abort"); goto out; }
/* 批量插入示例函数。*/ void * run_bulk_delete() { int raw_key[NUM_KEY_INT]; DBT key; DB_ENV *envp; int bulk_size = 100; DB *dbp; DB_TXN *tid; int *delete_load; int delete_count, i, ret, op_flag; char *key_buf; void *p; int j; /* Initialize structs and arrays */ memset(raw_key, 0, KEY_SIZE); memset(&key, 0, sizeof(DBT)); tid = NULL; /* * 初始化批量删除使用的key buffer。由于批量删除不需要data, * 所以只需要初始化和填充key buffer。我们同样需要使用自己分配的内存。 */ key_buf = (char*) malloc(KEY_SIZE * bulk_size * 2); memset(key_buf, 0, KEY_SIZE * bulk_size * 2); /* 初始化key buffer DBT 对象,设置正确的flags和ulen成员。 */ key.data = key_buf; key.ulen = KEY_SIZE * bulk_size * 2; key.flags = DB_DBT_USERMEM; op_flag = DB_MULTIPLE; /* 批量删除同样需要这个flag。*/ /* * 批量删除所有的数据。每一批删除由key buffer DBT 当中的key * 指定的bulk_size条key/data pair. 这两个宏的详细用法见上文。 */ for (i = 0; i < delete_count / bulk_size; ) { /* 为批量删除初始化并填充一个key buffer DBT 对象。 */ DB_MULTIPLE_WRITE_INIT(p, &key); for (j = i * bulk_size; j < (i + 1) * bulk_size; j++) { raw_key[0] = delete_load[j]; DB_MULTIPLE_WRITE_NEXT(p, &key, raw_key, KEY_SIZE); } /* 启动事务。*/ if ((ret = envp->txn_begin(envp, NULL, &tid, 0)) != 0) { envp->err(envp, ret, "[delete] DB_ENV->txn_begin"); exit(EXIT_FAILURE); } /* * 执行批量删除。key buffer DBT * 当中的bulk_size条key指定的key/data pairs会被从数据库当中删除。 */ switch(ret = dbp->del(dbp, tid, &key, op_flag)) { case 0: /* 批量删除操作成功,提交事务。*/ if ((ret = tid->commit(tid, 0)) != 0) { envp->err(envp, ret, "[delete] DB_TXN->commit"); exit(EXIT_FAILURE); } break; case DB_LOCK_DEADLOCK: /* 如果数据库操作发生死锁,那么必须abort事务。然后,可以选择重新执行该操作。*/ if ((ret = tid->abort(tid)) != 0) { envp->err(envp, ret, "[delete] DB_TXN->abort"); exit(EXIT_FAILURE); } continue; default: envp->err(envp, ret, "[delete] DB->del ([%d]%d)", i, delete_load[i]); exit(EXIT_FAILURE); } i++; } (void)free(key_buf); return (NULL); }
/************************* Transactional Berkeley DB *************************/ gpointer bdb_thread(gpointer d) { int res; DB_ENV *env; DB *db; DB_TXN *txn = NULL; int count = 0; res = db_env_create(&env, 0); g_assert(res == 0); res = env->open(env, ".", DB_CREATE | DB_RECOVER | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_THREAD, 0644); g_assert(res == 0); if (opt_bdb_async) { res = env->set_flags(env, DB_TXN_WRITE_NOSYNC, 1); g_assert(res == 0); } res = db_create(&db, env, 0); g_assert(res == 0); res = db->open(db, NULL, "log.db", "log", DB_BTREE, DB_CREATE | DB_THREAD | DB_AUTO_COMMIT, 0644); g_assert(res == 0); while (TRUE) { if (txn == NULL && !opt_bdb_async) { res = env->txn_begin(env, NULL, &txn, 0); g_assert(res == 0); } struct item *item = get_item(); DBT key, value; memset(&key, 0, sizeof(key)); memset(&value, 0, sizeof(value)); key.data = item->key; key.size = strlen(item->key); value.data = item->data; value.size = item->len; res = db->put(db, opt_bdb_async ? NULL : txn, &key, &value, 0); g_assert(res == 0); count++; if (count % opt_batchsize == 0) { if (opt_bdb_async) { env->txn_checkpoint(env, 0, 0, 0); } else { txn->commit(txn, 0); txn = NULL; } } finish_item(item); } return NULL; }
void * writer_thread(void *args) { DB *dbp; DB_ENV *dbenv; DBT key, data; DB_TXN *txn; char *key_strings[] = {"001", "002", "003", "004", "005", "006", "007", "008", "009", "010"}; int i, j, payload, ret, thread_num; int retry_count, max_retries = 20; dbp = (DB *)args; dbenv = dbp->dbenv; /* Get the thread number */ (void)mutex_lock(&thread_num_lock); global_thread_num++; thread_num = global_thread_num; (void)mutex_unlock(&thread_num_lock); /* Initialize the random number generator */ srand(thread_num); /* Write 50 times and then quit */ for (i = 0; i < 50; i++) { retry_count = 0; /* Used for deadlock retries */ retry: ret = dbenv->txn_begin(dbenv, NULL, &txn, 0); if (ret != 0) { dbenv->err(dbenv, ret, "txn_begin failed"); return ((void *)EXIT_FAILURE); } memset(&key, 0, sizeof(DBT)); memset(&data, 0, sizeof(DBT)); for (j = 0; j < 10; j++) { /* Set up our key and data DBTs. */ data.data = key_strings[j]; data.size = (u_int32_t)strlen(key_strings[j]) + 1; payload = rand() + i; key.data = &payload; key.size = sizeof(int); switch (ret = dbp->put(dbp, txn, &key, &data, DB_NOOVERWRITE)) { case 0: break; case DB_KEYEXIST: break; case DB_LOCK_DEADLOCK: (void)txn->abort(txn); if (retry_count < max_retries) { retry_count++; goto retry; } return ((void *)EXIT_FAILURE); default: dbenv->err(dbenv, ret, "db put failed"); ret = txn->abort(txn); if (ret != 0) dbenv->err(dbenv, ret, "txn abort failed"); return ((void *)EXIT_FAILURE); } } if ((ret = txn->commit(txn, 0)) != 0) { dbenv->err(dbenv, ret, "txn commit failed"); return ((void *)EXIT_FAILURE); } } return ((void *)EXIT_SUCCESS); }
/* ** Create an sqlite3_backup process to copy the contents of zSrcDb from ** connection handle pSrcDb to zDestDb in pDestDb. If successful, return ** a pointer to the new sqlite3_backup object. ** ** If an error occurs, NULL is returned and an error code and error message ** stored in database handle pDestDb. ** pDestDb Database to write to ** zDestDb Name of database within pDestDb ** pSrcDb Database connection to read from ** zSrcDb Name of database within pSrcDb */ sqlite3_backup *sqlite3_backup_init(sqlite3* pDestDb, const char *zDestDb, sqlite3* pSrcDb, const char *zSrcDb) { sqlite3_backup *p; /* Value to return */ Parse parse; DB_ENV *dbenv; int ret; p = NULL; ret = 0; if (!pDestDb || !pSrcDb) return 0; sqlite3_mutex_enter(pSrcDb->mutex); sqlite3_mutex_enter(pDestDb->mutex); if (pSrcDb == pDestDb) { sqlite3Error(pDestDb, SQLITE_ERROR, "source and destination must be distinct"); goto err; } /* Allocate space for a new sqlite3_backup object */ p = (sqlite3_backup *)sqlite3_malloc(sizeof(sqlite3_backup)); if (!p) { sqlite3Error(pDestDb, SQLITE_NOMEM, 0); goto err; } memset(p, 0, sizeof(sqlite3_backup)); p->pSrc = findBtree(pDestDb, pSrcDb, zSrcDb); p->pDest = findBtree(pDestDb, pDestDb, zDestDb); p->pDestDb = pDestDb; p->pSrcDb = pSrcDb; if (0 == p->pSrc) { p->rc = p->pSrcDb->errCode; goto err; } if (0 == p->pDest) { p->rc = p->pDestDb->errCode; goto err; } p->iDb = sqlite3FindDbName(pDestDb, zDestDb); p->srcName = sqlite3_malloc((int)strlen(zSrcDb) + 1); p->destName = sqlite3_malloc((int)strlen(zDestDb) + 1); if (0 == p->srcName || 0 == p->destName) { p->rc = SQLITE_NOMEM; goto err; } strncpy(p->srcName, zSrcDb, strlen(zSrcDb) + 1); strncpy(p->destName, zDestDb, strlen(zDestDb) + 1); if (p->pDest->pBt->full_name) { const char *fullName = p->pDest->pBt->full_name; p->fullName = sqlite3_malloc((int)strlen(fullName) + 1); if (!p->fullName) { p->rc = SQLITE_NOMEM; goto err; } strncpy(p->fullName, fullName, strlen(fullName) + 1); } /* * Make sure the schema has been read in, so the keyInfo * can be retrieved for the indexes. No-op if already read. */ memset(&parse, 0, sizeof(parse)); parse.db = p->pSrcDb; p->rc = sqlite3ReadSchema(&parse); if (p->rc != SQLITE_OK) { if (parse.zErrMsg != NULL) sqlite3DbFree(p->pSrcDb, parse.zErrMsg); goto err; } /* Begin a transaction on the source. */ if (!p->pSrc->connected) { if ((p->rc = btreeOpenEnvironment(p->pSrc, 1)) != SQLITE_OK) goto err; } dbenv = p->pSrc->pBt->dbenv; p->rc = dberr2sqlite(dbenv->txn_begin(dbenv, p->pSrc->family_txn, &p->srcTxn, 0)); if (p->rc != SQLITE_OK) { sqlite3Error(pSrcDb, p->rc, 0); goto err; } /* * Get the page count and list of tables to copy. This will * result in a read lock on the schema table, held in the * read transaction. */ if ((p->rc = btreeGetPageCount(p->pSrc, &p->tables, &p->nPagecount, p->srcTxn)) != SQLITE_OK) { sqlite3Error(pSrcDb, p->rc, 0); goto err; } p->nRemaining = p->nPagecount; p->pSrc->nBackup++; p->pDest->nBackup++; p->lastUpdate = p->pSrc->updateDuringBackup; goto done; err: if (p != 0) { if (pDestDb->errCode == SQLITE_OK) sqlite3Error(pDestDb, p->rc, 0); if (p->srcTxn) p->srcTxn->abort(p->srcTxn); if (p->srcName != 0) sqlite3_free(p->srcName); if (p->destName != 0) sqlite3_free(p->destName); if (p->fullName != 0) sqlite3_free(p->fullName); if (p->tables != 0) sqlite3_free(p->tables); sqlite3_free(p); p = NULL; } done: sqlite3_mutex_leave(pDestDb->mutex); sqlite3_mutex_leave(pSrcDb->mutex); return p; }
bool access_list(struct client *cli, const char *bucket, const char *key, const char *user) { struct macl { char perm[128]; /* perm(s) granted */ char grantee[64]; /* grantee user */ }; GHashTable *param; enum errcode err = InternalError; DB_ENV *dbenv = tdbrep.tdb.env; DB *acls = tdbrep.tdb.acls; int alloc_len; char owner[64]; GList *res; struct db_acl_key *acl_key; struct db_acl_ent *acl; DB_TXN *txn = NULL; DBC *cur = NULL; GList *content; DBT pkey, pval; struct macl *mp; char guser[64]; GList *p; char *s; int str_len; int rc; bool rcb; /* verify READ access for ACL */ if (!user || !has_access(user, bucket, key, "READ_ACP")) { err = AccessDenied; goto err_out; } /* parse URI query string */ param = hreq_query(&cli->req); if (!param) goto err_out; res = NULL; alloc_len = sizeof(struct db_acl_key) + strlen(key) + 1; acl_key = alloca(alloc_len); memset(acl_key, 0, alloc_len); strncpy(acl_key->bucket, bucket, sizeof(acl_key->bucket)); strcpy(acl_key->key, key); /* open transaction, search cursor */ rc = dbenv->txn_begin(dbenv, NULL, &txn, 0); if (rc) { dbenv->err(dbenv, rc, "DB_ENV->txn_begin"); goto err_out_param; } rc = bucket_find(txn, bucket, &owner[0], sizeof(owner)); if (rc) { if (rc == DB_NOTFOUND) err = InvalidBucketName; else dbenv->err(dbenv, rc, "bucket_find"); goto err_out_rb; } rc = acls->cursor(acls, txn, &cur, 0); if (rc) { acls->err(acls, rc, "acls->cursor"); goto err_out_rb; } memset(&pkey, 0, sizeof(pkey)); pkey.data = acl_key; pkey.size = alloc_len; for (;; free(acl)) { memset(&pval, 0, sizeof(pval)); pval.flags = DB_DBT_MALLOC; rc = cur->get(cur, &pkey, &pval, DB_NEXT); if (rc) break; acl = pval.data; /* This is a workaround, see FIXME about DB_NEXT. */ if (strncmp(acl->bucket, bucket, sizeof(acl->bucket))) continue; if (strcmp(acl->key, key)) continue; if ((mp = malloc(sizeof(struct macl))) == NULL) { free(acl); cur->close(cur); goto err_out_rb; } memcpy(mp->grantee, acl->grantee, sizeof(mp->grantee)); mp->grantee[sizeof(mp->grantee)-1] = 0; memcpy(mp->perm, acl->perm, sizeof(mp->perm)); /* lop off the trailing comma */ mp->perm[sizeof(mp->perm)-1] = 0; str_len = strlen(mp->perm); if (str_len && mp->perm[str_len-1] == ',') mp->perm[--str_len] = 0; res = g_list_append(res, mp); } if (rc != DB_NOTFOUND) acls->err(acls, rc, "access_list iteration"); /* close cursor, transaction */ rc = cur->close(cur); if (rc) acls->err(acls, rc, "acls->cursor close"); rc = txn->commit(txn, 0); if (rc) dbenv->err(dbenv, rc, "DB_ENV->txn_commit"); /* dump collected acls -- no more exception handling */ s = g_markup_printf_escaped( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n" "<AccessControlPolicy " "xmlns=\"http://indy.yyz.us/doc/2006-03-01/\">\r\n" " <Owner>\r\n" " <ID>%s</ID>\r\n" " <DisplayName>%s</DisplayName>\r\n" " </Owner>\r\n", owner, owner); content = g_list_append(NULL, s); s = g_markup_printf_escaped( " <AccessControlList>\r\n"); content = g_list_append(content, s); for (p = res; p != NULL; p = p->next) { mp = p->data; if (!strcmp(DB_ACL_ANON, mp->grantee)) { strcpy(guser, "anonymous"); } else { strncpy(guser, mp->grantee, sizeof(guser)); guser[sizeof(guser)-1] = 0; } s = g_markup_printf_escaped( " <Grant>\r\n" " <Grantee xmlns:xsi=\"http://www.w3.org/2001/" "XMLSchema-instance\" xsi:type=\"CanonicalUser\">\r\n" " <ID>%s</ID>\r\n" " <DisplayName>%s</DisplayName>\r\n" " </Grantee>\r\n", guser, guser); content = g_list_append(content, s); /* * FIXME This parsing is totally lame, we should replace * strings with a bit mask once we make sure this works. */ if (!strcmp(mp->perm, "READ,WRITE,READ_ACP,WRITE_ACP")) { s = g_markup_printf_escaped( " <Permission>FULL_CONTROL</Permission>\r\n"); } else { s = g_markup_printf_escaped( " <Permission>%s</Permission>\r\n", mp->perm); } content = g_list_append(content, s); s = g_markup_printf_escaped(" </Grant>\r\n"); content = g_list_append(content, s); free(mp); } s = g_markup_printf_escaped(" </AccessControlList>\r\n"); content = g_list_append(content, s); s = g_markup_printf_escaped("</AccessControlPolicy>\r\n"); content = g_list_append(content, s); g_list_free(res); rcb = cli_resp_xml(cli, 200, content); g_list_free(content); return rcb; err_out_rb: rc = txn->abort(txn); if (rc) dbenv->err(dbenv, rc, "DB_ENV->txn_abort"); for (p = res; p != NULL; p = p->next) free(p->data); g_list_free(res); err_out_param: g_hash_table_destroy(param); err_out: return cli_err(cli, err); }
int main(void) { int ret, ret_c; u_int32_t db_flags, env_flags; DB *dbp; DB_ENV *envp; DBT key, data; DB_TXN *txn; const char *db_home_dir = "/tmp/mai-test-1/"; const char *file_name = "mydb.db"; const char keystr[BUF_SIZE]; const char datastr[BUF_SIZE]; int i = 0; dbp = NULL; envp = NULL; /* Open the environment */ ret = db_env_create(&envp, 0); if (ret != 0) { fprintf(stderr, "Error creating environment handle: %s\n", db_strerror(ret)); return (EXIT_FAILURE); } env_flags = DB_CREATE | /* Create the environment if it does * not already exist. */ DB_INIT_TXN | /* Initialize transactions */ DB_INIT_LOCK | /* Initialize locking. */ DB_INIT_LOG | /* Initialize logging */ DB_INIT_MPOOL | /* Initialize the in-memory cache. */ DB_RECOVER; ret = envp->open(envp, db_home_dir, env_flags, 0); if (ret != 0) { fprintf(stderr, "Error opening environment: %s\n", db_strerror(ret)); goto err; } /* Initialize the DB handle */ ret = db_create(&dbp, envp, 0); if (ret != 0) { envp->err(envp, ret, "Database creation failed"); goto err; } db_flags = DB_CREATE | DB_AUTO_COMMIT; /* Open the database. Note that we are using auto commit for the open, so the database is able to support transactions. */ ret = dbp->open(dbp, /* Pointer to the database */ NULL, /* Txn pointer */ file_name, /* File name */ NULL, /* Logical db name */ DB_BTREE, /* Database type (using btree) */ db_flags, /* Open flags */ 0); /* File mode. Using defaults */ if (ret != 0) { envp->err(envp, ret, "Database '%s' open failed", file_name); goto err; } /* Get the txn handle */ txn = NULL; ret = envp->txn_begin(envp, NULL, &txn, 0); if (ret != 0) { envp->err(envp, ret, "Transaction begin failed."); goto err; } for (i = 0; i < LOOP_SIZE; i++) { /* Prepare the DBTs */ memset(&key, 0, sizeof(DBT)); memset(&data, 0, sizeof(DBT)); sprintf((char*)keystr, "key%d", i); //sprintf((char*)datastr, "data%d", i); key.data = (char*)keystr; key.size = strlen((char*)keystr) + 1; //data.data = (char*)datastr; //data.size = strlen((char*)datastr) + 1; /* Perform the database write. If this fails, abort the transaction. */ ret = dbp->get(dbp, txn, &key, &data, 0); if (ret != 0) { envp->err(envp, ret, "Database put failed."); txn->abort(txn); goto err; } printf("%s %s\n", key.data, data.data); } /* Commit the transaction. Note that the transaction handle can no longer be used. */ ret = txn->commit(txn, 0); if (ret != 0) { envp->err(envp, ret, "Transaction commit failed."); goto err; } err: /* Close the database */ if (dbp != NULL) { ret_c = dbp->close(dbp, 0); if (ret_c != 0) { envp->err(envp, ret_c, "Database close failed."); ret = ret_c; } } /* Close the environment */ if (envp != NULL) { ret_c = envp->close(envp, 0); if (ret_c != 0) { fprintf(stderr, "environment close failed: %s\n", db_strerror(ret_c)); ret = ret_c; } } return (ret == 0 ? EXIT_SUCCESS : EXIT_FAILURE); }
bool service_list(struct client *cli, const char *user) { GList *files = NULL, *content = NULL; char *s; enum errcode err = InternalError; int rc; bool rcb; DB_TXN *txn = NULL; DBC *cur = NULL; DB_ENV *dbenv = tdbrep.tdb.env; DB *bidx = tdbrep.tdb.buckets_idx; DBT skey, pkey, pval; if (asprintf(&s, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n" "<ListAllMyBucketsResult xmlns=\"http://indy.yyz.us/doc/2006-03-01/\">\r\n" " <Owner>\r\n" " <ID>%s</ID>\r\n" " <DisplayName>%s</DisplayName>\r\n" " </Owner>\r\n" " <Buckets>\r\n", user, user) < 0) goto err_out; content = g_list_append(content, s); /* open transaction, search cursor */ rc = dbenv->txn_begin(dbenv, NULL, &txn, 0); if (rc) { dbenv->err(dbenv, rc, "DB_ENV->txn_begin"); goto err_out_content; } rc = bidx->cursor(bidx, txn, &cur, 0); if (rc) { bidx->err(bidx, rc, "bidx->cursor"); goto err_out_content; } memset(&skey, 0, sizeof(skey)); memset(&pkey, 0, sizeof(pkey)); memset(&pval, 0, sizeof(pval)); skey.data = (char *) user; skey.size = strlen(user) + 1; /* FIXME: Use of DB_NEXT rather than DB_SET to begin search * means we iterate through entire db, rather than * starting at the first matching key. */ /* loop through matching buckets, if any */ while (1) { char timestr[64]; struct db_bucket_ent *ent; rc = cur->pget(cur, &skey, &pkey, &pval, DB_NEXT); if (rc) break; ent = pval.data; s = g_markup_printf_escaped( " <Bucket>\r\n" " <Name>%s</Name>\r\n" " <CreationDate>%s</CreationDate>\r\n" " </Bucket>\r\n", ent->name, hutil_time2str(timestr, sizeof(timestr), GUINT64_FROM_LE(ent->time_create))); if (!s) goto err_out_content; content = g_list_append(content, s); } if (rc != DB_NOTFOUND) bidx->err(bidx, rc, "service_list iter"); /* close cursor, transaction */ rc = cur->close(cur); if (rc) bidx->err(bidx, rc, "bidx->cursor close"); rc = txn->commit(txn, 0); if (rc) dbenv->err(dbenv, rc, "DB_ENV->txn_commit"); if (asprintf(&s, " </Buckets>\r\n" "</ListAllMyBucketsResult>\r\n") < 0) goto err_out_content; content = g_list_append(content, s); rcb = cli_resp_xml(cli, 200, content); strlist_free(files); g_list_free(content); return rcb; err_out_content: strlist_free(content); err_out: strlist_free(files); return cli_err(cli, err); }
int b_recover(int argc, char *argv[]) { extern char *optarg; extern int optind; DB *dbp; DBT key, data; DB_ENV *dbenv; DB_TXN *txn; u_int32_t cachesize; int ch, i, count; /* * Recover was too slow before release 4.0 that it's not worth * running the test. */ #if DB_VERSION_MAJOR < 4 return (0); #endif cachesize = MEGABYTE; count = 1000; while ((ch = getopt(argc, argv, "C:c:")) != EOF) switch (ch) { case 'C': cachesize = (u_int32_t)atoi(optarg); break; case 'c': count = atoi(optarg); break; case '?': default: return (usage()); } argc -= optind; argv += optind; if (argc != 0) return (usage()); /* Create the environment. */ DB_BENCH_ASSERT(db_env_create(&dbenv, 0) == 0); dbenv->set_errfile(dbenv, stderr); DB_BENCH_ASSERT(dbenv->set_cachesize(dbenv, 0, cachesize, 0) == 0); #define OFLAGS \ (DB_CREATE | DB_INIT_LOCK | \ DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_PRIVATE) #if DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 0 DB_BENCH_ASSERT(dbenv->open(dbenv, TESTDIR, NULL, OFLAGS, 0666) == 0); #endif #if DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 DB_BENCH_ASSERT(dbenv->open(dbenv, TESTDIR, OFLAGS, 0666) == 0); #endif #if DB_VERSION_MAJOR > 3 || DB_VERSION_MINOR > 1 DB_BENCH_ASSERT(dbenv->open(dbenv, TESTDIR, OFLAGS, 0666) == 0); #endif /* Create the database. */ DB_BENCH_ASSERT(db_create(&dbp, dbenv, 0) == 0); #if DB_VERSION_MAJOR >= 4 && DB_VERSION_MINOR >= 1 DB_BENCH_ASSERT(dbp->open(dbp, NULL, TESTFILE, NULL, DB_BTREE, DB_CREATE | DB_AUTO_COMMIT, 0666) == 0); #else DB_BENCH_ASSERT( dbp->open(dbp, TESTFILE, NULL, DB_BTREE, DB_CREATE, 0666) == 0); #endif /* Initialize the data. */ memset(&key, 0, sizeof(key)); memset(&data, 0, sizeof(data)); key.size = data.size = 20; key.data = data.data = "01234567890123456789"; /* Start/commit a transaction count times. */ for (i = 0; i < count; ++i) { #if DB_VERSION_MAJOR < 4 DB_BENCH_ASSERT( txn_begin(dbenv, NULL, &txn, DB_TXN_NOSYNC) == 0); DB_BENCH_ASSERT(dbp->put(dbp, txn, &key, &data, 0) == 0); DB_BENCH_ASSERT(txn_commit(txn, 0) == 0); #else DB_BENCH_ASSERT( dbenv->txn_begin(dbenv, NULL, &txn, DB_TXN_NOSYNC) == 0); DB_BENCH_ASSERT(dbp->put(dbp, txn, &key, &data, 0) == 0); DB_BENCH_ASSERT(txn->commit(txn, 0) == 0); #endif } DB_BENCH_ASSERT(dbp->close(dbp, 0) == 0); DB_BENCH_ASSERT(dbenv->close(dbenv, 0) == 0); /* Create a new DB_ENV handle. */ DB_BENCH_ASSERT(db_env_create(&dbenv, 0) == 0); dbenv->set_errfile(dbenv, stderr); DB_BENCH_ASSERT( dbenv->set_cachesize(dbenv, 0, 1048576 /* 1MB */, 0) == 0); /* Now run recovery. */ TIMER_START; #if DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 0 DB_BENCH_ASSERT(dbenv->open( dbenv, TESTDIR, NULL, OFLAGS | DB_RECOVER, 0666) == 0); #endif #if DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 1 DB_BENCH_ASSERT( dbenv->open(dbenv, TESTDIR, OFLAGS | DB_RECOVER, 0666) == 0); #endif #if DB_VERSION_MAJOR > 3 || DB_VERSION_MINOR > 1 DB_BENCH_ASSERT( dbenv->open(dbenv, TESTDIR, OFLAGS | DB_RECOVER, 0666) == 0); #endif TIMER_STOP; /* * We divide the time by the number of transactions, so an "operation" * is the recovery of a single transaction. */ printf("# recovery after %d transactions\n", count); TIMER_DISPLAY(count); DB_BENCH_ASSERT(dbenv->close(dbenv, 0) == 0); return (0); }
bool has_access(const char *user, const char *bucket, const char *key, const char *perm_in) { int rc; char perm[16]; bool match = false; size_t alloc_len, key_len = 0; struct db_acl_key *acl_key; struct db_acl_ent *acl; DB_ENV *dbenv = tdbrep.tdb.env; DB_TXN *txn = NULL; DBT pkey, pval; DBC *cur = NULL; DB *acls = tdbrep.tdb.acls; if (user == NULL) user = DB_ACL_ANON; if (key == NULL) key = ""; /* alloc ACL key on stack, sized to fit 'key' function arg */ alloc_len = sizeof(struct db_acl_key) + 1; if (key) { key_len = strlen(key); alloc_len += key_len; } acl_key = alloca(alloc_len); /* fill in search key struct */ memset(acl_key, 0, alloc_len); strncpy(acl_key->bucket, bucket, sizeof(acl_key->bucket)); memcpy(acl_key->key, key, key_len); acl_key->key[key_len] = 0; snprintf(perm, sizeof(perm), "%s,", perm_in); /* open transaction, search cursor */ rc = dbenv->txn_begin(dbenv, NULL, &txn, 0); if (rc) { dbenv->err(dbenv, rc, "DB_ENV->txn_begin"); return false; } rc = acls->cursor(acls, txn, &cur, 0); if (rc) { acls->err(acls, rc, "acls->cursor"); goto err_out; } memset(&pkey, 0, sizeof(pkey)); pkey.data = acl_key; pkey.size = alloc_len; memset(&pval, 0, sizeof(pval)); pval.flags = DB_DBT_MALLOC; /* loop through matching records (if any) */ rc = cur->get(cur, &pkey, &pval, DB_SET); while (rc == 0) { acl = pval.data; if (!strncmp(acl->grantee, user, sizeof(acl->grantee))) { match = (strstr(acl->perm, perm) != NULL); free(acl); break; } free(acl); memset(&pval, 0, sizeof(pval)); pval.flags = DB_DBT_MALLOC; rc = cur->get(cur, &pkey, &pval, DB_NEXT_DUP); } if (rc && rc != DB_NOTFOUND) acls->err(acls, rc, "has_access iteration"); /* close cursor, transaction */ rc = cur->close(cur); if (rc) acls->err(acls, rc, "acls->cursor close"); rc = txn->commit(txn, 0); if (rc) dbenv->err(dbenv, rc, "DB_ENV->txn_commit"); return match; err_out: rc = txn->abort(txn); if (rc) dbenv->err(dbenv, rc, "DB_ENV->txn_abort"); return false; }
static void run_expiry() { DB_ENV *dbenv; DB *db; DB_TXN *txn; DBC *dbcp; int rc; time_t now; DBT key = { 0 }; unsigned int count = 0; if (exit_requested) return; /* Cursor operations can hold several locks and therefore deadlock so don't run expiry if deadlock detection does not work http://docs.oracle.com/cd/E17076_02/html/programmer_reference/lock_notxn.html */ rc = get_db(&db, 0); assert(! rc); if (db == 0 || deadlock_detect == 0) return; rc = get_dbenv(&dbenv, 0); assert(! rc && dbenv); if (time(&now) == (time_t)-1) { syslog(LOG_ERR, "time failed during run_expiry"); return; } muffle_error++; rc = dbenv->txn_begin(dbenv, NULL, &txn, DB_TXN_NOWAIT); if (rc) { if (rc == DB_LOCK_DEADLOCK) syslog(LOG_DEBUG, "skipping concurrent expiry avoids " "deadlocks and unnecessary work"); else log_db_error("txn_begin failed during run_expiry", rc); goto out; } #if DB_VERSION_MAJOR >= 5 call_db(txn->set_priority(txn, 50), "TXN->set_priority"); #endif rc = call_db(db->cursor(db, txn, &dbcp, 0), "db->cursor failed during expiry run"); if (rc) goto txn_fail; while ((rc = dbcp->c_get(dbcp, &key, &dbdata, DB_NEXT | DB_RMW)) == 0) { time_t ref_time; double age_max, age; if (triplet_data.pass_count) { ref_time = triplet_data.access_time; age_max = pass_max_idle; } else { ref_time = triplet_data.create_time; age_max = bloc_max_idle; } age = difftime(now, ref_time); if (age > age_max) { if (opt_verbose) { syslog(LOG_INFO, "Expiring %s %s after %.0f seconds idle", db_key_ntop(key.data), triplet_data.pass_count ? "pass" : "block", age); } rc = call_db(dbcp->c_del(dbcp, 0), "dbcp->c_del failed"); if (rc) goto cursor_fail; count++; } if (exit_requested) break; } if (rc && rc != DB_NOTFOUND) { if (rc == DB_LOCK_DEADLOCK) syslog(LOG_NOTICE, "Aborting concurrent expiry due to deadlock"); else log_db_error("dbcp->c_get failed", rc); goto cursor_fail; } if (call_db(dbcp->c_close(dbcp), "dbcp->c_close failed")) goto txn_fail; call_db(txn->commit(txn, 0), "commit failed in run_expiry"); if (count) syslog(LOG_NOTICE, "Expired %u triplets", count); goto out; cursor_fail: call_db(dbcp->c_close(dbcp), "dbcp->c_close failed"); txn_fail: call_db(txn->abort(txn), "failed to abort"); out: muffle_error--; return; }
bool bucket_add(struct client *cli, const char *user, const char *bucket) { char *hdr, timestr[64]; enum errcode err = InternalError; int rc; struct db_bucket_ent ent; bool setacl; /* is ok to put pre-existing bucket */ enum ReqACLC canacl; DB *buckets = tdbrep.tdb.buckets; DB *acls = tdbrep.tdb.acls; DB_ENV *dbenv = tdbrep.tdb.env; DB_TXN *txn = NULL; DBT key, val; if (!user) return cli_err(cli, AccessDenied); /* prepare parameters */ setacl = false; if (cli->req.uri.query_len) { switch (hreq_is_query(&cli->req)) { case URIQ_ACL: setacl = true; break; default: err = InvalidURI; goto err_par; } } if ((rc = hreq_acl_canned(&cli->req)) == ACLCNUM) { err = InvalidArgument; goto err_par; } canacl = (rc == -1)? ACLC_PRIV: rc; /* begin trans */ rc = dbenv->txn_begin(dbenv, NULL, &txn, 0); if (rc) { dbenv->err(dbenv, rc, "DB_ENV->txn_begin"); goto err_db; } memset(&key, 0, sizeof(key)); memset(&val, 0, sizeof(val)); memset(&ent, 0, sizeof(ent)); strncpy(ent.name, bucket, sizeof(ent.name)); strncpy(ent.owner, user, sizeof(ent.owner)); ent.time_create = GUINT64_TO_LE(time(NULL)); key.data = &ent.name; key.size = strlen(ent.name) + 1; val.data = &ent; val.size = sizeof(ent); if (setacl) { /* check if the bucket exists, else insert it */ rc = bucket_find(txn, bucket, NULL, 0); if (rc) { if (rc != DB_NOTFOUND) { buckets->err(buckets, rc, "buckets->find"); goto err_out; } rc = buckets->put(buckets, txn, &key, &val, DB_NOOVERWRITE); if (rc) { buckets->err(buckets, rc, "buckets->put"); goto err_out; } } else { if (!has_access(user, bucket, NULL, "WRITE_ACP")) { err = AccessDenied; goto err_out; } if (!object_del_acls(txn, bucket, "")) goto err_out; } } else { /* attempt to insert new bucket */ rc = buckets->put(buckets, txn, &key, &val, DB_NOOVERWRITE); if (rc) { if (rc == DB_KEYEXIST) err = BucketAlreadyExists; else buckets->err(buckets, rc, "buckets->put"); goto err_out; } } /* insert bucket ACL */ rc = add_access_canned(txn, bucket, "", user, canacl); if (rc) { acls->err(acls, rc, "acls->put"); goto err_out; } /* commit -- no more exception emulation with goto. */ rc = txn->commit(txn, 0); if (rc) { dbenv->err(dbenv, rc, "DB_ENV->txn_commit"); return cli_err(cli, InternalError); } if (asprintf(&hdr, "HTTP/%d.%d 200 x\r\n" "Content-Length: 0\r\n" "Date: %s\r\n" "Location: /%s\r\n" "Server: " PACKAGE_STRING "\r\n" "\r\n", cli->req.major, cli->req.minor, hutil_time2str(timestr, sizeof(timestr), time(NULL)), bucket) < 0) return cli_err(cli, InternalError); rc = atcp_writeq(&cli->wst, hdr, strlen(hdr), atcp_cb_free, hdr); if (rc) { free(hdr); return true; } return atcp_write_start(&cli->wst); err_out: rc = txn->abort(txn); if (rc) dbenv->err(dbenv, rc, "DB_ENV->txn_abort"); err_db: err_par: return cli_err(cli, err); }
int getInfo(int id) { DB_ENV *dbenv = radacct_dbenv; DB *dbp = radacct_dbp; DB_TXN *tid = NULL; DBT key, data; db_recno_t recno; int info = 0; char buf[REC_SIZE]; int ret; clock_t t1, t2; clock_t t3, t4; struct tms buf1, buf2; struct tms buf3, buf4; memset(&key, 0, sizeof(DBT)); memset(&data, 0, sizeof(DBT)); key.data = &recno; key.size = key.ulen = sizeof(recno); key.flags = DB_DBT_USERMEM; data.data = buf; data.ulen = sizeof(buf); data.flags = DB_DBT_USERMEM; if((ret = dbenv->txn_begin(dbenv, NULL, &tid, 0)) != 0) { printf("getInfo: transaction failed: %s\n", db_strerror(ret)); return 0; } t1 = times(&buf1); ret = dbp->get(dbp, tid, &key, &data, DB_CONSUME); t2 = times(&buf2); get_total_time[id] += (t2 - t1); get_time_buf[id].tms_utime += (buf2.tms_utime - buf1.tms_utime); get_time_buf[id].tms_stime += (buf2.tms_stime - buf1.tms_stime); get_time_buf[id].tms_cutime += (buf2.tms_cutime - buf1.tms_cutime); get_time_buf[id].tms_cstime += (buf2.tms_cstime - buf1.tms_cstime); switch(ret) { case DB_LOCK_DEADLOCK: printf("getInfo: deadlock: %s\n", db_strerror(ret)); break; case 0: t3 = times(&buf3); removeInfo(tid); t4 = times(&buf4); remove_total_time[id] = (t4 - t3); remove_time_buf[id].tms_utime += (buf4.tms_utime - buf3.tms_utime); remove_time_buf[id].tms_stime += (buf4.tms_stime - buf3.tms_stime); remove_time_buf[id].tms_cutime += (buf4.tms_cutime - buf3.tms_cutime); remove_time_buf[id].tms_cstime += (buf4.tms_cstime - buf3.tms_cstime); break; default: printf("getInfo: oops: %d\n", ret); break; } return info; }
bool bucket_del(struct client *cli, const char *user, const char *bucket) { char *hdr, timestr[64]; enum errcode err = InternalError; int rc; struct db_bucket_ent ent; DB_ENV *dbenv = tdbrep.tdb.env; DB_TXN *txn = NULL; DB *buckets = tdbrep.tdb.buckets; DB *acls = tdbrep.tdb.acls; DB *objs = tdbrep.tdb.objs; DBC *cur = NULL; DBT key, val; char structbuf[sizeof(struct db_acl_key) + 32]; struct db_acl_key *acl_key = (struct db_acl_key *) &structbuf; struct db_obj_key *obj_key = (struct db_obj_key *) &structbuf; if (!user) return cli_err(cli, AccessDenied); /* open transaction */ rc = dbenv->txn_begin(dbenv, NULL, &txn, 0); if (rc) { dbenv->err(dbenv, rc, "DB_ENV->txn_begin"); goto err_none; } /* search for (bucket, *) in object database, to see if * any objects associated with this bucket exist */ rc = objs->cursor(objs, txn, &cur, 0); if (rc) { objs->err(objs, rc, "objs->cursor"); goto err_out; } memset(&structbuf, 0, sizeof(structbuf)); strncpy(obj_key->bucket, bucket, sizeof(obj_key->bucket)); obj_key->key[0] = 0; memset(&key, 0, sizeof(key)); memset(&val, 0, sizeof(val)); key.data = obj_key; key.size = sizeof(*obj_key) + strlen(obj_key->key) + 1; val.flags = DB_DBT_MALLOC; rc = cur->get(cur, &key, &val, DB_SET_RANGE); if (rc == 0) { struct db_obj_key *newkey = key.data; if (!strcmp(newkey->bucket, bucket)) { free(newkey); cur->close(cur); err = BucketNotEmpty; goto err_out; } free(newkey); } else if (rc != DB_NOTFOUND) objs->err(objs, rc, "bucket_del empty check"); rc = cur->close(cur); if (rc) { objs->err(objs, rc, "objs->cursor_close"); goto err_out; } memset(&key, 0, sizeof(key)); key.data = (char *) bucket; key.size = strlen(bucket) + 1; memset(&val, 0, sizeof(val)); val.data = &ent; val.ulen = sizeof(struct db_bucket_ent); val.flags = DB_DBT_USERMEM; /* verify the bucket exists */ rc = buckets->get(buckets, txn, &key, &val, 0); if (rc) { if (rc == DB_NOTFOUND) err = NoSuchBucket; else buckets->err(buckets, rc, "buckets->get"); goto err_out; } /* verify that it is the owner who wishes to delete bucket */ if (strncmp(user, ent.owner, sizeof(ent.owner))) { err = AccessDenied; goto err_out; } /* delete bucket */ rc = buckets->del(buckets, txn, &key, 0); if (rc) { buckets->err(buckets, rc, "bucket del"); goto err_out; } /* delete bucket ACLs */ memset(&structbuf, 0, sizeof(structbuf)); strncpy(acl_key->bucket, bucket, sizeof(acl_key->bucket)); acl_key->key[0] = 0; memset(&key, 0, sizeof(key)); key.data = acl_key; key.size = sizeof(*acl_key) + strlen(acl_key->key) + 1; rc = acls->del(acls, txn, &key, 0); if (rc && rc != DB_NOTFOUND) { acls->err(acls, rc, "acl del"); goto err_out; } /* commit */ rc = txn->commit(txn, 0); if (rc) { dbenv->err(dbenv, rc, "DB_ENV->txn_commit"); return cli_err(cli, InternalError); } if (asprintf(&hdr, "HTTP/%d.%d 204 x\r\n" "Content-Length: 0\r\n" "Date: %s\r\n" "Server: " PACKAGE_STRING "\r\n" "\r\n", cli->req.major, cli->req.minor, hutil_time2str(timestr, sizeof(timestr), time(NULL))) < 0) return cli_err(cli, InternalError); rc = atcp_writeq(&cli->wst, hdr, strlen(hdr), atcp_cb_free, hdr); if (rc) { free(hdr); return true; } return atcp_write_start(&cli->wst); err_out: rc = txn->abort(txn); if (rc) dbenv->err(dbenv, rc, "DB_ENV->txn_abort"); err_none: return cli_err(cli, err); }
int b_txn(int argc, char *argv[]) { extern char *optarg; extern int optind; DB_ENV *dbenv; DB_TXN *txn; int tabort, ch, i, count; count = 1000; tabort = 0; while ((ch = getopt(argc, argv, "ac:")) != EOF) switch (ch) { case 'a': tabort = 1; break; case 'c': count = atoi(optarg); break; case '?': default: return (usage()); } argc -= optind; argv += optind; if (argc != 0) return (usage()); /* Create the environment. */ DB_BENCH_ASSERT(db_env_create(&dbenv, 0) == 0); dbenv->set_errfile(dbenv, stderr); #if DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR < 1 DB_BENCH_ASSERT(dbenv->open(dbenv, TESTDIR, NULL, DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_PRIVATE, 0666) == 0); #else DB_BENCH_ASSERT(dbenv->open(dbenv, TESTDIR, DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_PRIVATE, 0666) == 0); #endif /* Start and commit/abort a transaction count times. */ TIMER_START; if (tabort) for (i = 0; i < count; ++i) { #if DB_VERSION_MAJOR < 4 DB_BENCH_ASSERT(txn_begin(dbenv, NULL, &txn, 0) == 0); DB_BENCH_ASSERT(txn_abort(txn) == 0); #else DB_BENCH_ASSERT( dbenv->txn_begin(dbenv, NULL, &txn, 0) == 0); DB_BENCH_ASSERT(txn->abort(txn) == 0); #endif } else for (i = 0; i < count; ++i) { #if DB_VERSION_MAJOR < 4 DB_BENCH_ASSERT(txn_begin(dbenv, NULL, &txn, 0) == 0); DB_BENCH_ASSERT(txn_commit(txn, 0) == 0); #else DB_BENCH_ASSERT( dbenv->txn_begin(dbenv, NULL, &txn, 0) == 0); DB_BENCH_ASSERT(txn->commit(txn, 0) == 0); #endif } TIMER_STOP; printf("# %d empty transaction start/%s pairs\n", count, tabort ? "abort" : "commit"); TIMER_DISPLAY(count); DB_BENCH_ASSERT(dbenv->close(dbenv, 0) == 0); return (0); }
static bool bucket_list_keys(struct client *cli, const char *user, const char *bucket) { GHashTable *param; enum errcode err = InternalError; char *prefix, *marker, *maxkeys_str, *delim, *s; int maxkeys = 100, i, rc; GList *content, *tmpl; size_t pfx_len; struct bucket_list_info bli; bool rcb; DB_ENV *dbenv = tdbrep.tdb.env; DB_TXN *txn = NULL; DB *objs = tdbrep.tdb.objs; DBC *cur = NULL; DBT pkey, pval; struct db_obj_key *obj_key; size_t alloc_len; bool seen_prefix = false; int get_flags; /* verify READ access */ if (!user || !has_access(user, bucket, NULL, "READ")) { err = AccessDenied; goto err_out; } /* parse URI query string */ param = hreq_query(&cli->req); if (!param) goto err_out; /* read useful params from query string */ prefix = g_hash_table_lookup(param, "prefix"); pfx_len = prefix ? strlen(prefix) : 0; marker = g_hash_table_lookup(param, "marker"); delim = g_hash_table_lookup(param, "delimiter"); maxkeys_str = g_hash_table_lookup(param, "max-keys"); if (maxkeys_str) { i = atoi(maxkeys_str); if (i > 0 && i < maxkeys) maxkeys = i; } /* open transaction */ rc = dbenv->txn_begin(dbenv, NULL, &txn, 0); if (rc) { dbenv->err(dbenv, rc, "DB_ENV->txn_begin"); goto err_out; } /* search for (bucket, *) in object database, to see if * any objects associated with this bucket exist */ rc = objs->cursor(objs, txn, &cur, 0); if (rc) { objs->err(objs, rc, "objs->cursor"); goto err_out; } alloc_len = sizeof(*obj_key) + (marker ? strlen(marker) : pfx_len) + 1; obj_key = alloca(alloc_len); memset(obj_key, 0, alloc_len); strncpy(obj_key->bucket, bucket, sizeof(obj_key->bucket)); strcpy(obj_key->key, marker ? marker : prefix ? prefix : ""); memset(&pkey, 0, sizeof(pkey)); pkey.data = obj_key; pkey.size = alloc_len; memset(&bli, 0, sizeof(bli)); bli.prefix = prefix; bli.pfx_len = pfx_len; bli.delim = delim; bli.common_pfx = g_hash_table_new_full(g_str_hash, g_str_equal, free, NULL); bli.maxkeys = maxkeys; /* iterate through each returned data row */ get_flags = DB_SET_RANGE; while (1) { struct obj_vitals v; struct db_obj_key *tmpkey; struct db_obj_ent *obj; memset(&pval, 0, sizeof(pval)); pval.flags = DB_DBT_MALLOC; rc = cur->get(cur, &pkey, &pval, get_flags); if (rc) { if (rc != DB_NOTFOUND) objs->err(objs, rc, "bucket_list_keys iter"); break; } get_flags = DB_NEXT; tmpkey = pkey.data; obj = pval.data; if (strcmp(tmpkey->bucket, bucket)) { free(obj); break; } if (prefix) { if (strncmp(tmpkey->key, prefix, pfx_len) != 0) { free(obj); if (!seen_prefix) /* continue searching for * a record that begins with this * prefix */ continue; else /* no more records with our prefix */ break; } seen_prefix = true; } memset(&v, 0, sizeof(v)); strcpy(v.md5, obj->md5); strncpy(v.owner, obj->owner, sizeof(v.owner)-1); if (!(GUINT32_FROM_LE(obj->flags) & DB_OBJ_INLINE)) memcpy(&v.addr, &obj->d.a, sizeof(v.addr)); v.mtime = GUINT64_FROM_LE(obj->mtime); v.size = GUINT64_FROM_LE(obj->size); free(obj); if (bucket_list_iter(tmpkey->key, &v, &bli)) break; } /* close cursor, transaction */ rc = cur->close(cur); if (rc) { objs->err(objs, rc, "objs->cursor close"); goto err_out_rb; } rc = txn->commit(txn, 0); if (rc) { dbenv->err(dbenv, rc, "DB_ENV->txn_commit"); goto err_out_param; } s = g_markup_printf_escaped( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n" "<ListBucketResult xmlns=\"http://indy.yyz.us/doc/2006-03-01/\">\r\n" " <Name>%s</Name>\r\n" " <MaxKeys>%d</MaxKeys>\r\n" " <IsTruncated>%s</IsTruncated>\r\n", bucket, maxkeys, bli.trunc ? "true" : "false"); content = g_list_append(NULL, s); if (prefix) { s = g_markup_printf_escaped(" <Prefix>%s</Prefix>\n", prefix); content = g_list_append(content, s); } if (marker) { s = g_markup_printf_escaped(" <Marker>%s</Marker>\n", marker); content = g_list_append(content, s); } tmpl = bli.res; while (tmpl) { char timestr[64]; struct obj_vitals *vp; vp = tmpl->data; tmpl = tmpl->next; /* * FIXME Use the vp->addr to verify that key still exists. * And if it doesn't, then what? (addr.nid can be 0 for inline) */ s = g_markup_printf_escaped( " <Contents>\r\n" " <Key>%s</Key>\r\n" " <LastModified>%s</LastModified>\r\n" " <ETag>%s</ETag>\r\n" " <Size>%llu</Size>\r\n" " <StorageClass>STANDARD</StorageClass>\r\n" " <Owner>\r\n" " <ID>%s</ID>\r\n" " <DisplayName>%s</DisplayName>\r\n" " </Owner>\r\n" " </Contents>\r\n", vp->key, hutil_time2str(timestr, sizeof(timestr), vp->mtime / 1000000), vp->md5, (unsigned long long) vp->size, vp->owner, vp->owner); content = g_list_append(content, s); free(vp->key); free(vp); } g_list_free(bli.res); content = bucket_list_pfx(content, bli.common_pfx, bli.delim); s = strdup("</ListBucketResult>\r\n"); content = g_list_append(content, s); free(bli.last_comp); g_hash_table_destroy(bli.common_pfx); g_hash_table_destroy(param); rcb = cli_resp_xml(cli, 200, content); g_list_free(content); return rcb; err_out_rb: rc = txn->abort(txn); if (rc) dbenv->err(dbenv, rc, "DB_ENV->txn_abort"); err_out_param: g_hash_table_destroy(param); err_out: return cli_err(cli, err); }
static void b_inmem_op_tds(u_int ops, int update, u_int32_t env_flags, u_int32_t log_flags) { DB *dbp; DBT key, data; DB_ENV *dbenv; DB_MPOOL_STAT *gsp; DB_TXN *txn; char *keybuf, *databuf; DB_BENCH_ASSERT((keybuf = malloc(keysize)) != NULL); DB_BENCH_ASSERT((databuf = malloc(datasize)) != NULL); memset(&key, 0, sizeof(key)); memset(&data, 0, sizeof(data)); key.data = keybuf; key.size = keysize; memset(keybuf, 'a', keysize); data.data = databuf; data.size = datasize; memset(databuf, 'b', datasize); DB_BENCH_ASSERT(db_env_create(&dbenv, 0) == 0); dbenv->set_errfile(dbenv, stderr); /* General environment configuration. */ #ifdef DB_AUTO_COMMIT DB_BENCH_ASSERT(dbenv->set_flags(dbenv, DB_AUTO_COMMIT, 1) == 0); #endif if (env_flags != 0) DB_BENCH_ASSERT(dbenv->set_flags(dbenv, env_flags, 1) == 0); /* Logging configuration. */ if (log_flags != 0) #if DB_VERSION_MINOR >= 7 DB_BENCH_ASSERT( dbenv->log_set_config(dbenv, log_flags, 1) == 0); #else DB_BENCH_ASSERT(dbenv->set_flags(dbenv, log_flags, 1) == 0); #endif #ifdef DB_LOG_INMEMORY if (!(log_flags & DB_LOG_INMEMORY)) #endif #ifdef DB_LOG_IN_MEMORY if (!(log_flags & DB_LOG_IN_MEMORY)) #endif DB_BENCH_ASSERT(dbenv->set_lg_max(dbenv, logbufsize * 10) == 0); DB_BENCH_ASSERT(dbenv->set_lg_bsize(dbenv, logbufsize) == 0); DB_BENCH_ASSERT(dbenv->open(dbenv, "TESTDIR", DB_CREATE | DB_PRIVATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN, 0666) == 0); DB_BENCH_ASSERT(db_create(&dbp, dbenv, 0) == 0); DB_BENCH_ASSERT(dbp->set_pagesize(dbp, pagesize) == 0); DB_BENCH_ASSERT(dbp->open( dbp, NULL, TESTFILE, NULL, DB_BTREE, DB_CREATE, 0666) == 0); if (update) { (void)dbenv->memp_stat(dbenv, &gsp, NULL, DB_STAT_CLEAR); TIMER_START; for (; ops > 0; --ops) DB_BENCH_ASSERT( dbp->put(dbp, NULL, &key, &data, 0) == 0); TIMER_STOP; if (dbenv->memp_stat(dbenv, &gsp, NULL, 0) == 0) DB_BENCH_ASSERT(gsp->st_page_out == 0); } else { DB_BENCH_ASSERT(dbp->put(dbp, NULL, &key, &data, 0) == 0); (void)dbenv->memp_stat(dbenv, &gsp, NULL, DB_STAT_CLEAR); TIMER_START; for (; ops > 0; --ops) { DB_BENCH_ASSERT( dbenv->txn_begin(dbenv, NULL, &txn, 0) == 0); DB_BENCH_ASSERT( dbp->get(dbp, NULL, &key, &data, 0) == 0); DB_BENCH_ASSERT(txn->commit(txn, 0) == 0); } TIMER_STOP; if (dbenv->memp_stat(dbenv, &gsp, NULL, 0) == 0) DB_BENCH_ASSERT(gsp->st_cache_miss == 0); } DB_BENCH_ASSERT(dbp->close(dbp, 0) == 0); DB_BENCH_ASSERT(dbenv->close(dbenv, 0) == 0); }
void op_tds(u_int ops, int update, u_int32_t flags) { DB *dbp; DBT key, data; DB_ENV *dbenv; DB_TXN *txn; char *keybuf, *databuf; DB_MPOOL_STAT *gsp; DB_BENCH_ASSERT((keybuf = malloc(keysize)) != NULL); DB_BENCH_ASSERT((databuf = malloc(datasize)) != NULL); memset(&key, 0, sizeof(key)); memset(&data, 0, sizeof(data)); key.data = keybuf; key.size = keysize; memset(keybuf, 'a', keysize); data.data = databuf; data.size = datasize; memset(databuf, 'b', datasize); DB_BENCH_ASSERT(db_env_create(&dbenv, 0) == 0); dbenv->set_errfile(dbenv, stderr); #ifdef DB_AUTO_COMMIT DB_BENCH_ASSERT(dbenv->set_flags(dbenv, DB_AUTO_COMMIT, 1) == 0); #endif DB_BENCH_ASSERT(dbenv->set_flags(dbenv, flags, 1) == 0); #ifdef DB_LOG_INMEMORY if (!(flags & DB_LOG_INMEMORY)) #endif DB_BENCH_ASSERT(dbenv->set_lg_max(dbenv, logbufsize * 10) == 0); DB_BENCH_ASSERT(dbenv->set_lg_bsize(dbenv, logbufsize) == 0); DB_BENCH_ASSERT(dbenv->open(dbenv, "TESTDIR", DB_CREATE | DB_PRIVATE | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN, 0666) == 0); DB_BENCH_ASSERT(db_create(&dbp, dbenv, 0) == 0); DB_BENCH_ASSERT(dbp->set_pagesize(dbp, pagesize) == 0); DB_BENCH_ASSERT( dbp->open(dbp, NULL, "a", NULL, DB_BTREE, DB_CREATE, 0666) == 0); if (update) { dbenv->memp_stat(dbenv, &gsp, NULL, DB_STAT_CLEAR); TIMER_START; for (; ops > 0; --ops) DB_BENCH_ASSERT( dbp->put(dbp, NULL, &key, &data, 0) == 0); TIMER_STOP; dbenv->memp_stat(dbenv, &gsp, NULL, 0); DB_BENCH_ASSERT(gsp->st_page_out == 0); } else { DB_BENCH_ASSERT(dbp->put(dbp, NULL, &key, &data, 0) == 0); dbenv->memp_stat(dbenv, &gsp, NULL, DB_STAT_CLEAR); TIMER_START; for (; ops > 0; --ops) { DB_BENCH_ASSERT( dbenv->txn_begin(dbenv, NULL, &txn, 0) == 0); DB_BENCH_ASSERT( dbp->get(dbp, NULL, &key, &data, 0) == 0); DB_BENCH_ASSERT(txn->commit(txn, 0) == 0); } TIMER_STOP; dbenv->memp_stat(dbenv, &gsp, NULL, 0); DB_BENCH_ASSERT(gsp->st_cache_miss == 0); } DB_BENCH_ASSERT(dbp->close(dbp, 0) == 0); DB_BENCH_ASSERT(dbenv->close(dbenv, 0) == 0); }