int Dbc::pget(Dbt* key, Dbt *pkey, Dbt *data, u_int32_t _flags) { int ret; DBC *dbc = this; ret = dbc->pget(dbc, key, pkey, data, _flags); /* Logic is the same as for Dbc::get - reusing macro. */ if (!DB_RETOK_DBCGET(ret)) { if (ret == DB_BUFFER_SMALL && DB_OVERFLOWED_DBT(key)) DB_ERROR_DBT(DbEnv::get_DbEnv(dbc->dbenv), "Dbc::pget", key, ON_ERROR_UNKNOWN); else if (ret == DB_BUFFER_SMALL && DB_OVERFLOWED_DBT(data)) DB_ERROR_DBT(DbEnv::get_DbEnv(dbc->dbenv), "Dbc::pget", data, ON_ERROR_UNKNOWN); else DB_ERROR(DbEnv::get_DbEnv(dbc->dbenv), "Dbc::pget", ret, ON_ERROR_UNKNOWN); } return (ret); }
bool service_list(struct client *cli, const char *user) { GList *files = NULL, *content = NULL; char *s; enum errcode err = InternalError; int rc; bool rcb; DB_TXN *txn = NULL; DBC *cur = NULL; DB_ENV *dbenv = tdbrep.tdb.env; DB *bidx = tdbrep.tdb.buckets_idx; DBT skey, pkey, pval; if (asprintf(&s, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n" "<ListAllMyBucketsResult xmlns=\"http://indy.yyz.us/doc/2006-03-01/\">\r\n" " <Owner>\r\n" " <ID>%s</ID>\r\n" " <DisplayName>%s</DisplayName>\r\n" " </Owner>\r\n" " <Buckets>\r\n", user, user) < 0) goto err_out; content = g_list_append(content, s); /* open transaction, search cursor */ rc = dbenv->txn_begin(dbenv, NULL, &txn, 0); if (rc) { dbenv->err(dbenv, rc, "DB_ENV->txn_begin"); goto err_out_content; } rc = bidx->cursor(bidx, txn, &cur, 0); if (rc) { bidx->err(bidx, rc, "bidx->cursor"); goto err_out_content; } memset(&skey, 0, sizeof(skey)); memset(&pkey, 0, sizeof(pkey)); memset(&pval, 0, sizeof(pval)); skey.data = (char *) user; skey.size = strlen(user) + 1; /* FIXME: Use of DB_NEXT rather than DB_SET to begin search * means we iterate through entire db, rather than * starting at the first matching key. */ /* loop through matching buckets, if any */ while (1) { char timestr[64]; struct db_bucket_ent *ent; rc = cur->pget(cur, &skey, &pkey, &pval, DB_NEXT); if (rc) break; ent = pval.data; s = g_markup_printf_escaped( " <Bucket>\r\n" " <Name>%s</Name>\r\n" " <CreationDate>%s</CreationDate>\r\n" " </Bucket>\r\n", ent->name, hutil_time2str(timestr, sizeof(timestr), GUINT64_FROM_LE(ent->time_create))); if (!s) goto err_out_content; content = g_list_append(content, s); } if (rc != DB_NOTFOUND) bidx->err(bidx, rc, "service_list iter"); /* close cursor, transaction */ rc = cur->close(cur); if (rc) bidx->err(bidx, rc, "bidx->cursor close"); rc = txn->commit(txn, 0); if (rc) dbenv->err(dbenv, rc, "DB_ENV->txn_commit"); if (asprintf(&s, " </Buckets>\r\n" "</ListAllMyBucketsResult>\r\n") < 0) goto err_out_content; content = g_list_append(content, s); rcb = cli_resp_xml(cli, 200, content); strlist_free(files); g_list_free(content); return rcb; err_out_content: strlist_free(content); err_out: strlist_free(files); return cli_err(cli, err); }
int input_load(DB *db, sqlite3* sql_db, char* sql_table){ int ret; struct sqlite_workspace w; DB *sdb; DBC *cursor; DBT pkey; memset(&pkey, 0, sizeof(pkey)); sqlite3_stmt *ppStmt; DbRecord recordp; DB_BTREE_STAT *stat; db_recno_t count=0; w.count = &count; db_recno_t max = 0; char big_block[128]; if ((ret = db->cursor(db, NULL, &cursor, 0)) != 0) { dbenv->err(dbenv, ret, "DB->cursor"); return (1); } memset(&(w.key), 0, sizeof(w.key)); memset(&(w.data), 0, sizeof(w.data)); if(DB_NOTFOUND == (ret = cursor->get(cursor, &(w.key), &(w.data), DB_LAST))) w.primary_key = 0; else{ w.primary_key = *(u_int32_t*)(w.data.data); w.primary_key++; } ret = cursor->close(cursor); sqlite3_prepare_v2(sql_db, sql_query, -1, &ppStmt, NULL); while(SQLITE_ROW == (ret=sqlite3_step(ppStmt))){ memcpy(&recordp, &DbRecord_base, sizeof(DbRecord)); build_record(ppStmt, &recordp, &w); DbRecord_write(&recordp, db, &w); if((count++ % 100000)==0){ printf("%lu records processed...\n", (ulong)count); db->sync(db, 0); } } count = 0; sqlite3_finalize(ppStmt); db->cursor(db, NULL, &cursor, 0); cursor->get(cursor, &(w.key), &(w.data), DB_LAST); DbRecord_dump(w.data.data); db->stat(db, NULL, &stat, 0); printf("primary nkeys: %lu\n", (u_long)(stat->bt_nkeys)); free(stat); sqlite_db_secondary_open(db, &sdb, "block_idx", 8*1024, DB_DUPSORT, blocking_callback, compare_uint32); sdb->stat(sdb, NULL, &stat, 0); printf("block_idx keys: %lu\n", (u_long)(stat->bt_nkeys)); free(stat); sdb->cursor(sdb, NULL, &cursor, 0); while(DB_NOTFOUND != cursor->pget(cursor, &(w.key), &pkey, &(w.data), DB_NEXT)){ cursor->count(cursor, &count, 0); if (count > max){ max = count; memcpy(big_block, w.key.data, (size_t)w.key.size); big_block[w.key.size] = '\0'; } } cursor->close(cursor); printf("Biggest block: %s\n", big_block); printf("%u records.\n", (size_t)max); count_blocks(sdb); sdb->close(sdb, 0); sqlite_db_secondary_open(db, &sdb, "idx", 8*1024, 0, index_callback, NULL); ret = sdb->stat(sdb, NULL, &stat, 0); printf("%d\n", ret); printf("idx_keys: %lu\n", (u_long)(stat->bt_nkeys)); free(stat); sdb->close(sdb, 0); return(0); }