// // set the threshold value for the agc in db void agcHandler::setThreshold (int16_t s) { if (s < get_db (0)) s = get_db (0); if (s > 0) s = 0; ThresholdValue = (DSPFLOAT)s / 16; }
static notmuch_query_t *get_query(struct nm_ctxdata *data, int writable) { notmuch_database_t *db = NULL; notmuch_query_t *q = NULL; const char *str; if (!data) return NULL; db = get_db(data, writable); str = get_query_string(data); if (!db || !str) goto err; q = notmuch_query_create(db, str); if (!q) goto err; apply_exclude_tags(q); notmuch_query_set_sort(q, NOTMUCH_SORT_NEWEST_FIRST); dprint(2, (debugfile, "nm: query successfully initialized\n")); return q; err: if (!is_longrun(data)) release_db(data); return NULL; }
/** * create -- creates a Berkeley DB file with tablename (tn), along with * the needed metadata. * requires the schema data to be already parsed '-L' option. */ int create(char* tn) { DB* db; int rc; tbl_cache_p tbc = NULL; table_p tp = NULL; rc = 0; tbc = get_table(tn); if(!tbc) { fprintf(stderr, "[create] Table %s is not supported.\n",tn); return 1; } tp = tbc->dtp; db = get_db(tp); if(db) { printf("Created table %s\n",tn); rc = 0; } else { fprintf(stderr, "[create] Failed to create table %s\n",tn); rc = 1; } return rc; }
static void cleanup() { int rc; DB *db; DB_ENV *dbenv; rc = get_db(&db, 0); assert(! rc); rc = get_dbenv(&dbenv, 0); assert(! rc); if (dbkey.data) free(dbkey.data); if (db) call_db(db->close(db, 0), "DB close"); if (dbenv) { rc = call_db(db_create(&db, dbenv, 0), "db_create"); if (!rc) if (! db->remove(db, "tls_stats.db", 0, 0)) syslog(LOG_NOTICE, "Unused database tls_stats.db removed"); call_db(dbenv->txn_checkpoint(dbenv, 100 * 1024, 24 * 60, 0), "txn_checkpoint"); call_db(dbenv->log_archive(dbenv, NULL, DB_ARCH_REMOVE), "log_archive"); call_db(dbenv->close(dbenv, 0), "DB_ENV close"); } policy_cleanup(); }
static Info* berkely_registry_get(void *self, const char* filename) { DB *dbp = NULL; int ret; dbp = get_db(self); g_assert(dbp != NULL); DBT key, data; memset(&key, 0, sizeof(key)); memset(&data, 0, sizeof(data)); key.data = (void*) filename; key.size = strlen(filename) + 1; key.flags = DB_DBT_USERMEM; if ((ret = dbp->get(dbp, NULL, &key, &data, 0)) == 0) { g_debug("db: '%s' key retreived", (char*)key.data); } else { dbp->err(dbp, ret, "DB->get"); g_warning("DB get failed"); } return bab_info_unmarshall(data.data); }
static void* berkely_registry_new(GError **error) { Registry *s = g_memdup(&BerkelyRegistryImpl, sizeof(Registry)); const gchar* db_filename = "my_db.db"; g_debug("Opening DB %s", db_filename); DB *dbp; int ret; ret = db_create(&dbp, NULL, 0); if (ret != 0) { g_error("failed to create db"); } ret = dbp->open(dbp, NULL, db_filename, NULL, DB_BTREE, DB_CREATE, 0); if (ret != 0) { g_error("failed to open db"); } s->data = g_malloc(sizeof(Data)); get_data(s)->db = dbp; g_assert(get_db(s) != NULL); return s; }
int nm_modify_message_tags(CONTEXT *ctx, HEADER *hdr, char *buf) { struct nm_ctxdata *data = get_ctxdata(ctx); notmuch_database_t *db = NULL; notmuch_message_t *msg = NULL; int rc = -1; if (!buf || !*buf || !data) return -1; if (!(db = get_db(data, TRUE)) || !(msg = get_nm_message(db, hdr))) goto done; dprint(1, (debugfile, "nm: tags modify: '%s'\n", buf)); update_tags(msg, buf); update_header_tags(hdr, msg); mutt_set_header_color(ctx, hdr); rc = 0; hdr->changed = TRUE; done: if (!is_longrun(data)) release_db(data); if (hdr->changed) ctx->mtime = time(NULL); dprint(1, (debugfile, "nm: tags modify done [rc=%d]\n", rc)); return rc; }
static void berkely_registry_free(void *self) { g_debug("Shutting down berkeley DB"); DB *dbp = get_db(self); if (dbp != NULL) { list_db(dbp); dbp->close(dbp, 0); } }
void nm_longrun_init(CONTEXT *ctx, int writable) { struct nm_ctxdata *data = get_ctxdata(ctx); if (data && get_db(data, writable)) { data->longrun = 1; dprint(2, (debugfile, "nm: long run initialized\n")); } }
void update_db(const char *sql) { sqlite3 *db; int ret; char *errmsg; db = get_db(); ret = sqlite3_exec(db, sql, NULL, NULL, &errmsg); if (ret != SQLITE_OK) { database_error(errmsg, sql); } }
static void output_function_object(DebugObject* defining_object, uintptr_t defining_object_offset, CH_DBAddrMapEntry* map_event, CH_DbgDwarf2FunctionInfo* info, JSON_Builder* builder) { /* translate file address to virtual address(es) and compute the lifetime of each mapping */ JSON_open_object(builder, NULL); if (info->entry_point) { CH_Address virtual_addr = info->entry_point - map_event->offset + map_event->address; CH_MemMapHistory* history = find_memory_map_history_for(virtual_addr); CH_MemMapInfo mmap_info = get_memory_map_info_for(history, map_event->tstamp); CH_TStamp end_tstamp = mmap_info.unmap_operation ? mmap_info.unmap_operation->tstamp : get_db()->header.end_tstamp; JSON_append_int(builder, "entryPoint", virtual_addr); JSON_append_int(builder, "beginTStamp", map_event->tstamp); JSON_append_int(builder, "endTStamp", end_tstamp); append_type_key(builder, "typeKey", defining_object, info->type_offset); } if (info->prologue_end) { CH_Address virtual_addr = info->prologue_end - map_event->offset + map_event->address; JSON_append_int(builder, "prologueEnd", virtual_addr); } if (info->name) { JSON_append_stringdup(builder, "name", info->name); } if (info->namespace_prefix) { JSON_append_stringdup(builder, "namespacePrefix", info->namespace_prefix); } if (info->container_prefix) { JSON_append_stringdup(builder, "containerPrefix", info->container_prefix); } if (info->ranges) { CH_Range* range; JSON_open_array(builder, "ranges"); for (range = info->ranges; range->length; ++range) { CH_Address virtual_addr = range->start - map_event->offset + map_event->address; JSON_open_object(builder, NULL); JSON_append_int(builder, "start", virtual_addr); JSON_append_int(builder, "length", range->length); JSON_close_object(builder); } JSON_close_array(builder); } output_compilation_unit_info(&info->cu, builder); JSON_close_object(builder); }
/* FIXME should this be wrapped in an explicit transaction? */ static void do_dump_triplets() { DB *db; DBC *dbcp; int rc; DBT key = { 0 }, data = { 0 }; rc = get_db(&db, 1); if (rc) { fprintf(stderr, "DBD-%d: failed to open database\n", rc); return; } rc = db->cursor(db, 0, &dbcp, 0); if (rc) fprintf(stderr, "DBD-%d: db->cursor failed: %s\n", rc, db_strerror(rc)); else { while (! exit_requested && (rc = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0) { const char *const start = key.data, *s = start; const struct triplet_data *t = data.data; printf("%d\t", t->crypted); printf("%s\t", db_key_ntop(s)); switch ((enum dbkey_type_enum)*s++) { case DBKEY_T_RAW: s += strlen(s) + 1; break; case DBKEY_T_IP4: s += IPV4_BITS / 8; break; case DBKEY_T_IP6: s += IPV6_BITS / 8; break; } printf("%s\t", s); s += strlen(s) + 1; fwrite(s, 1, key.size - (s - start), stdout); putchar('\t'); write_ctime(&t->create_time); putchar('\t'); write_ctime(&t->access_time); printf("\t%lu\t%lu\n", t->block_count, t->pass_count); } if (rc && rc != DB_NOTFOUND) fprintf(stderr, "DBD-%d: dbcp->c_get failed: %s\n", rc, db_strerror(rc)); rc = dbcp->c_close(dbcp); if (rc) fprintf(stderr, "DBD-%d: dbcp->c_close failed: %s\n", rc, db_strerror(rc)); } }
extern CAMLprim value kc_size(value caml_db) { CAMLparam1(caml_db); CAMLlocal1(val); KCDB* db = get_db(caml_db); int64_t size = kcdbsize(db); val = copy_int64(size); CAMLreturn(val); }
extern CAMLprim value kc_count(value caml_db) { CAMLparam1(caml_db); CAMLlocal1(val); KCDB* db = get_db(caml_db); int64_t count = kcdbcount(db); val = copy_int64(count); CAMLreturn(val); }
static int remove_filename(struct nm_ctxdata *data, const char *path) { notmuch_status_t st; notmuch_filenames_t *ls; notmuch_message_t *msg = NULL; notmuch_database_t *db = get_db(data, TRUE); int trans; dprint(2, (debugfile, "nm: remove filename '%s'\n", path)); if (!db) return -1; st = notmuch_database_find_message_by_filename(db, path, &msg); if (st || !msg) return -1; trans = db_trans_begin(data); if (trans < 0) return -1; /* * note that unlink() is probably unnecessary here, it's already removed * by mh_sync_mailbox_message(), but for sure... */ st = notmuch_database_remove_message(db, path); switch (st) { case NOTMUCH_STATUS_SUCCESS: dprint(2, (debugfile, "nm: remove success, call unlink\n")); unlink(path); break; case NOTMUCH_STATUS_DUPLICATE_MESSAGE_ID: dprint(2, (debugfile, "nm: remove succes (duplicate), call unlink\n")); unlink(path); for (ls = notmuch_message_get_filenames(msg); ls && notmuch_filenames_valid(ls); notmuch_filenames_move_to_next(ls)) { path = notmuch_filenames_get(ls); dprint(2, (debugfile, "nm: remove duplicate: '%s'\n", path)); unlink(path); notmuch_database_remove_message(db, path); } break; default: dprint(1, (debugfile, "nm: failed to remove '%s' [st=%d]\n", path, (int) st)); break; } notmuch_message_destroy(msg); if (trans) db_trans_end(data); return 0; }
extern CAMLprim value kc_cursor_open(value caml_db) { CAMLparam1(caml_db); KCDB* db = get_db(caml_db); KCCUR* cur = open_cursor(db); value caml_cursor = alloc_small(1, Abstract_tag); KCCUR_val(caml_cursor) = cur; CAMLreturn(caml_cursor); }
void analyser::find_biggest(){ //find biggest n peaks (n=MAX_PEAKS) std::vector<double> dbs; //get dB's for the peaks for(unsigned int k = 0 ; k < peaks.size();k++){ if(peaks[k]> lower_border && peaks[k]<upper_border) dbs.push_back(get_db(peaks[k])); } peaks.clear(); //copy the biggest peak back to peaks then look for the next biggest for(int j = 0 ; j < MAX_PEAKS ; j++){ double tmp = *(std::max_element(dbs.begin(),dbs.end())); tmp = get_wl(tmp); peaks.push_back(tmp); for(unsigned int i = 0; i < dbs.size();i++){ if(dbs[i] == get_db(tmp)){ dbs.erase(dbs.begin()+ i); } } } }
int nm_read_entire_thread(CONTEXT *ctx, HEADER *h) { struct nm_ctxdata *data = get_ctxdata(ctx); const char *id; char *qstr = NULL; notmuch_query_t *q = NULL; notmuch_database_t *db = NULL; notmuch_message_t *msg = NULL; int rc = -1; if (!data) return -1; if (!(db = get_db(data, FALSE)) || !(msg = get_nm_message(db, h))) goto done; dprint(1, (debugfile, "nm: reading entire-thread messages...[current count=%d]\n", ctx->msgcount)); nm_progress_reset(ctx); id = notmuch_message_get_thread_id(msg); if (!id) goto done; append_str_item(&qstr, "thread:", 0); append_str_item(&qstr, id, 0); q = notmuch_query_create(db, qstr); FREE(&qstr); if (!q) goto done; apply_exclude_tags(q); notmuch_query_set_sort(q, NOTMUCH_SORT_NEWEST_FIRST); read_threads_query(ctx, q, 1, 0); ctx->mtime = time(NULL); rc = 0; if (ctx->msgcount > data->oldmsgcount) mx_update_context(ctx, ctx->msgcount - data->oldmsgcount); done: if (q) notmuch_query_destroy(q); if (!is_longrun(data)) release_db(data); if (ctx->msgcount == data->oldmsgcount) mutt_message _("No more messages in the thread."); data->oldmsgcount = 0; dprint(1, (debugfile, "nm: reading entire-thread messages... done [rc=%d, count=%d]\n", rc, ctx->msgcount)); return rc; }
extern CAMLprim value kc_abort_tran(value caml_db) { CAMLparam1(caml_db); KCDB* db = get_db(caml_db); if (! kcdbendtran(db, 0)) { const char *error = kcdbemsg(db); RAISE(error); } CAMLreturn(Val_unit); }
extern CAMLprim value kc_begin_tran_sync(value caml_db) { CAMLparam1(caml_db); KCDB* db = get_db(caml_db); if (! kcdbbegintran(db, 1)) { const char *error = kcdbemsg(db); RAISE(error); } CAMLreturn(Val_unit); }
//简单测试 void test_single_dump(MEM_POOL* mem_pool) { system("rm -rf /tmp/binlog"); system("rm -rf /tmp/log"); system("rm -rf /tmp/storage"); db_initialize(mem_pool); //构造数据 uint16 sid = 0; uint32 docid = 0; int32 ret; uint16 field_count = 5; enum field_types types[5] = {HI_TYPE_STRING,HI_TYPE_LONG,HI_TYPE_TINY,HI_TYPE_LONGLONG,HI_TYPE_DOUBLE}; //将第一个段填满 while(sid == 0) { ret = db_insert("hello", &sid, &docid,get_row_data(field_count,types,mem_pool), DOCID_BY_SELF, mem_pool); ASSERT_EQ(MILE_RETURN_SUCCESS,ret); } sid = 0; docid = 10; //验证是否可查询 uint32 i; for(i=0; i<docid; i++) ASSERT_EQ(1,verify_row_data(db_data_query_row("hello",sid,i,mem_pool),field_count,types)); ret = db_unload_segment("hello",sid,mem_pool); ASSERT_EQ(MILE_RETURN_SUCCESS,ret); //验证是否可查询 for(i=0; i<docid; i++) ASSERT_EQ(0,verify_row_data(db_data_query_row("hello",sid,i,mem_pool),field_count,types)); char segment_file_name[FILENAME_MAX_LENGTH]; memset(segment_file_name,0,sizeof(segment_file_name)); sprintf(segment_file_name,"%s/hello/hello_segment_000000_dump",get_db()->work_space); ret = db_load_segment("hello",sid,segment_file_name,mem_pool); //验证是否可查询 for(i=0; i<docid; i++) ASSERT_EQ(1,verify_row_data(db_data_query_row("hello",sid,i,mem_pool),field_count,types)); db_release(); }
extern CAMLprim value kc_set(value caml_db, value key, value val) { CAMLparam3(caml_db, key, val); KCDB* db = get_db(caml_db); if (! kcdbset(db, String_val(key), caml_string_length(key), String_val(val), caml_string_length(val) )) { RAISE(kcdbemsg(db)); } CAMLreturn(Val_unit); }
extern CAMLprim value kc_remove(value caml_db, value key) { CAMLparam2(caml_db, key); KCDB* db = get_db(caml_db); if (! kcdbremove(db, String_val(key), caml_string_length(key) )) { if (kcdbecode(db) != KCENOREC) { RAISE(kcdbemsg(db)); } } CAMLreturn(Val_unit); }
extern CAMLprim value kc_exists(value caml_db, value key) { CAMLparam2(caml_db, key); CAMLlocal1(val); KCDB* db = get_db(caml_db); if (! kcdbaccept(db, String_val(key), caml_string_length(key), exists_some_value, exists_no_value, &val, 0 )) { RAISE(kcdbemsg(db)); } CAMLreturn(val); }
extern CAMLprim value kc_replace(value caml_db, value key, value val) { CAMLparam3(caml_db, key, val); KCDB* db = get_db(caml_db); if (! kcdbreplace(db, String_val(key), caml_string_length(key), String_val(val), caml_string_length(val) )) { if (kcdbecode(db) == KCENOREC) raise_not_found(); else RAISE(kcdbemsg(db)); } CAMLreturn(Val_unit); }
extern CAMLprim value kc_add(value caml_db, value key, value val) { CAMLparam3(caml_db, key, val); KCDB* db = get_db(caml_db); if (! kcdbadd(db, String_val(key), caml_string_length(key), String_val(val), caml_string_length(val) )) { if (kcdbecode(db) == KCEDUPREC) caml_invalid_argument("Entry already exists"); else RAISE(kcdbemsg(db)); } CAMLreturn(Val_unit); }
extern CAMLprim value kc_find(value caml_db, value key) { CAMLparam2(caml_db, key); CAMLlocal1(val); KCDB* db = get_db(caml_db); if (! kcdbaccept(db, String_val(key), caml_string_length(key), get_the_value, found_no_value, &val, 0 )) { RAISE(kcdbemsg(db)); } if ((char*)val == NULL) raise_not_found(); CAMLreturn(val); }
extern CAMLprim value kc_path(value caml_db) { CAMLparam1(caml_db); CAMLlocal1(val); KCDB* db = get_db(caml_db); const char* path = kcdbpath(db); if (! path) { const char *error = kcdbemsg(db); RAISE(error); } val = caml_copy_string(path); kcfree((void*) path); CAMLreturn(val); }
extern CAMLprim value kc_status(value caml_db) { CAMLparam1(caml_db); CAMLlocal1(val); KCDB* db = get_db(caml_db); const char* status = kcdbstatus(db); if (! status) { const char *error = kcdbemsg(db); RAISE(error); } val = caml_copy_string(status); kcfree((void*) status); CAMLreturn(val); }
/* * 从DATA_IN中读取用户的输入,比如scanf 需要的输入就从这里得到 */ void get_data_input(char *code_id) { char sql[SQL_LEN] = {0}; char *errmsg; int ret; sqlite3 *db; FILE *fp = fopen(DATA_IN, "w"); if (fp == NULL) { write_log(INT_RE, 1, "fopen error(in get_data_input)"); exit(-1); } sprintf(sql, "select data_input from code where code_id='%s'", code_id); db = get_db(); ret = sqlite3_exec(db, (const char*)sql, get_data_input_cb,\ (void*)fp, &errmsg); fclose(fp); if (ret != SQLITE_OK) { database_error(errmsg, sql); } }