bool Tree::ParseFromHdfString(const std::string &serialized) { Clear(); if (serialized.empty()) { return true; } HDF *hdf; NEOERR *err = hdf_init(&hdf); if (err != STATUS_OK) { nerr_ignore(&err); return false; } err = hdf_read_string(hdf, serialized.c_str()); if (err != STATUS_OK) { nerr_ignore(&err); hdf_destroy(&hdf); return false; } HDF *child = hdf_obj_child(hdf); if (!child) { hdf_destroy(&hdf); return false; } if (!ParseFromHdfInternal(child)) { hdf_destroy(&hdf); return false; } hdf_destroy(&hdf); return true; }
bool Tree::SerializeToHdfFile(const std::string &filename, bool only_nodes_with_value) const { NEOERR *err; HDF *hdf; err = hdf_init(&hdf); if (err != STATUS_OK) { nerr_ignore(&err); return false; } if (!SerializeToHdfInternal(hdf, only_nodes_with_value)) { hdf_destroy(&hdf); return false; } err = hdf_write_file(hdf, filename.c_str()); if (err != STATUS_OK) { nerr_ignore(&err); hdf_destroy(&hdf); return false; } hdf_destroy(&hdf); return true; }
bool Tree::SerializeToHdfString(std::string *serialized, bool only_nodes_with_value) const { if (!serialized) { return false; } NEOERR *err; HDF *hdf; err = hdf_init(&hdf); if (err != STATUS_OK) { nerr_ignore(&err); return false; } if (!SerializeToHdfInternal(hdf, only_nodes_with_value)) { hdf_destroy(&hdf); return false; } char *str; err = hdf_write_string(hdf, &str); if (err != STATUS_OK) { nerr_ignore(&err); hdf_destroy(&hdf); return false; } serialized->assign(str); hdf_destroy(&hdf); free(str); return true; }
bool Tree::ParseFromHdfFile(const std::string &filename) { Clear(); HDF *hdf; NEOERR *err = hdf_init(&hdf); if (err != STATUS_OK) { nerr_ignore(&err); return false; } err = hdf_read_file(hdf, filename.c_str()); if (err != STATUS_OK) { nerr_ignore(&err); hdf_destroy(&hdf); return false; } HDF *child = hdf_obj_child(hdf); if (!child) { hdf_destroy(&hdf); return false; } if (!ParseFromHdfInternal(child)) { hdf_destroy(&hdf); return false; } hdf_destroy(&hdf); return true; }
void msqueue_entry_destroy(struct msqueue_entry *e) { if (!e) return; if (e->ename) free(e->ename); if (e->cmd) free(e->cmd); hdf_destroy(&e->hdfrcv); hdf_destroy(&e->hdfsnd); free(e); return; }
static int diary_handle_feed_rss(request_rec *r, diary_conf *conf) { HDF *hdf; CSPARSE *cs; NEOERR *cs_err; STRING cs_err_str; ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "diary_handle_feed_rss()"); hdf_init(&hdf); hdf_set_value(hdf, "hdf.loadpaths.1", conf->path); hdf_set_value(hdf, "diary.title", conf->title); hdf_set_value(hdf, "diary.uri", conf->uri); cs_err = hdf_read_file(hdf, INDEX_HDF); if(cs_err){ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "cannot read index.hdf."); hdf_destroy(&hdf); return HTTP_INTERNAL_SERVER_ERROR; } //hdf_dump(hdf, NULL); cs_err = cs_init(&cs, hdf); if(cs_err){ string_init(&cs_err_str); nerr_error_string(cs_err, &cs_err_str); ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "error at cs_init(): %s", cs_err_str.buf); cs_destroy(&cs); hdf_destroy(&hdf); return HTTP_INTERNAL_SERVER_ERROR; } cgi_register_strfuncs(cs); cs_err = cs_parse_string(cs, strdup(RSS_TMPL), RSS_TMPL_LEN); if(cs_err){ string_init(&cs_err_str); nerr_error_string(cs_err, &cs_err_str); ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "error in cs_parse_string(): %s", cs_err_str.buf); cs_destroy(&cs); hdf_destroy(&hdf); return HTTP_INTERNAL_SERVER_ERROR; } r->content_type = "application/rss+xml"; cs_render(cs, r, diary_cs_render_cb); cs_destroy(&cs); hdf_destroy(&hdf); return OK; }
int main(int argc, char *argv[]) { NEOERR *err; HDF *hdf, *h2; err = hdf_init(&hdf); if (err != STATUS_OK) { nerr_log_error(err); return -1; } err = hdf_set_value(hdf, "CGI.Foo", "Bar"); if (err) { nerr_log_error(err); return -1; } err = hdf_set_value(hdf, "CGI.Foo", "Baz"); if (err) { nerr_log_error(err); return -1; } h2 = hdf_get_obj(hdf, "CGI"); err = hdf_set_value(h2, "Foo", "Bang"); hdf_dump(hdf, NULL); hdf_destroy(&hdf); return 0; }
char* mmg_get_valuef(mmg_conn *db, char *dsn, char *key, int skip, char *qfmt, ...) { HDF *tmpnode; hdf_init(&tmpnode); char *val, *querys, sels[256]; va_list ap; NEOERR *err; va_start(ap, qfmt); querys = vsprintf_alloc(qfmt, ap); va_end(ap); if (!querys) { mtc_err("Unable to allocate mem for query string"); return NULL; } snprintf(sels, sizeof(sels), "{'%s': 1}", key); err = mmg_prepare(db, MMG_FLAG_EMPTY, skip, 1, NULL, sels, querys); RETURN_V_NOK(err, NULL); err = mmg_query(db, dsn, NULL, tmpnode); RETURN_V_NOK(err, NULL); err = hdf_get_copy(tmpnode, key, &val, NULL); RETURN_V_NOK(err, NULL); hdf_destroy(&tmpnode); SAFE_FREE(querys); return val; }
/* load a specified version of the file, version -1 is latest */ NEOERR * rcfs_load (const char *path, int version, char **data) { NEOERR *err; char fpath[_POSIX_PATH_MAX]; if (version == -1) { HDF *meta, *vers; int x; err = rcfs_meta_load (path, &meta); if (err) return nerr_pass (err); for (vers = hdf_get_child (meta, "Versions"); vers; vers = hdf_obj_next (vers)) { x = atoi (hdf_obj_name (vers)); if (x > version) version = x; } hdf_destroy (&meta); } snprintf (fpath, sizeof (fpath), "%s,%d", path, version); err = ne_load_file (fpath, data); return nerr_pass (err); }
static void p_hdf_dealloc (HDFObject *ho) { /* ne_warn("deallocating hdf: %X", ho); */ if (ho->data && ho->dealloc) { hdf_destroy (&(ho->data)); } PyObject_DEL(ho); }
void session_destroy(session_t **ses) { session_t *lses; if (ses == NULL) return; lses = *ses; if (lses == NULL) return; SAFE_FREE(lses->mname); SAFE_FREE(lses->dataer); SAFE_FREE(lses->render); hdf_destroy(&lses->province); hdf_destroy(&lses->city); free(lses); lses = NULL; }
int main(void) { HDF *hdf_1, *hdf_2; HDF *cur_node,*last_node; hdf_init(&hdf_1); hdf_read_file(hdf_1,"hdf_copy_test.hdf"); hdf_dump(hdf_1,NULL); cur_node = hdf_get_obj(hdf_1,"Chart"); last_node = cur_node; cur_node = hdf_get_obj(cur_node,"next_stage"); while (hdf_get_obj(cur_node,"next_stage") && strcmp(hdf_get_value(cur_node,"Bucket.FieldId",""),"QUEUE")) { last_node = cur_node; cur_node = hdf_get_obj(cur_node,"next_stage"); } if (hdf_get_obj(cur_node,"next_stage")) { hdf_copy(hdf_1,"TempHolderPlace",hdf_get_obj(cur_node,"next_stage")); } ne_warn("Delete tree from node: %s", hdf_obj_name(last_node)); hdf_remove_tree(last_node,"next_stage"); hdf_dump(hdf_1,NULL); fprintf(stderr,"-----------------\n"); hdf_copy(last_node,"next_stage",hdf_get_obj(hdf_1,"TempHolderPlace")); hdf_dump(hdf_1,NULL); /* Test copy and destroy, make sure we actually copy everything and * don't reference anything */ hdf_init(&hdf_2); hdf_copy(hdf_2, "", hdf_1); hdf_destroy(&hdf_1); hdf_dump(hdf_2, NULL); hdf_destroy(&hdf_2); return 0; }
static NEOERR* rend_blog(HASH *dbh, HASH *tplh, int bid) { CSPARSE *cs = NULL; HDF *hdf, *dhdf; STRING str; NEOERR *err; char fname[_POSIX_PATH_MAX]; if (!dbh || !tplh || bid < 0) return nerr_raise(NERR_ASSERT, "paramter null"); cs = (CSPARSE*)hash_lookup(tplh, "blog"); dhdf = (HDF*)hash_lookup(tplh, "blog_hdf"); if (!cs || !dhdf) return nerr_raise(LERR_MISS_TPL, "blog_index not found"); err = hdf_init(&hdf); if (err != STATUS_OK) return nerr_pass(err); hdf_copy(hdf, NULL, dhdf); ltpl_prepare_rend(hdf, "layout.html"); hdf_set_int_value(hdf, PRE_QUERY".bid", bid); err = blog_static_get(hdf, dbh); if (err != STATUS_OK) goto done; hdf_set_copy(hdf, PRE_LAYOUT".title", PRE_OUTPUT".blog.title"); cs->hdf = hdf; string_init(&str); err = cs_render(cs, &str, mcs_strcb); if (err != STATUS_OK) goto done; snprintf(fname, sizeof(fname), "%s%d/%d.html", PATH_BLOG, bid%BLOG_SUBDIR_NUM, bid); err = mutil_makesure_dir(fname); if (err != STATUS_OK) goto done; err = mcs_str2file(str, fname); if (err != STATUS_OK) goto done; #ifdef DEBUG_HDF hdf_write_file(hdf, TC_ROOT"hdf.blg"); #endif done: hdf_destroy(&hdf); cs->hdf = NULL; string_clear(&str); return nerr_pass(err); }
int main(int argc, char **argv, char **envp) { char key[10]; HDF *bignode; mtc_init("cshdf", 7); hdf_init(&bignode); for (int i = 0; i < 5003929; i++) { mstr_rand_string_with_len(key, 10); hdf_set_valuef(bignode, "%s.today=10", key); hdf_set_valuef(bignode, "%s.toweek=11", key); hdf_set_valuef(bignode, "%s.tomonth=12", key); hdf_set_valuef(bignode, "%s.total=234", key); if (i % 10000 == 0) printf("%d\n", i); } //hdf_dump(bignode, NULL); printf("child num %d\n", mcs_get_child_num(bignode, NULL)); int count = 0; mtimer_start(); HDF *cnode = hdf_obj_child(bignode); while (cnode) { char *name = hdf_obj_name(cnode); if (mcs_get_int_valuef(bignode, 0, "%s.today", name) != 10) printf("error\n"); if (mcs_get_int_valuef(bignode, 0, "%s.toweek", name) != 11) printf("error\n"); if (mcs_get_int_valuef(bignode, 0, "%s.tomonth", name) != 12) printf("error\n"); if (mcs_get_int_valuef(bignode, 0, "%s.total", name) != 234) printf("error\n"); count++; cnode = hdf_obj_next(cnode); } mtimer_stop("get time"); printf("get child count %d\n", count); hdf_destroy(&bignode); return 0; }
int main(int argc, char **argv, char **envp) { char *s = "{\"a\": [{\"aaa\": 12}, 2], \"b\": \"xxx\"}"; //char *s = "f**k"; HDF *node; hdf_init(&node); //mjson_string_to_hdf(hdf_get_obj(node, "s"), s); hdf_write_file(node, "x.hdf"); hdf_destroy(&node); return 0; }
void formatter_kml_split_output(formatter_split_t *obj, char *filename) { FILE *fp = fopen(filename, "w"); CSPARSE *csparse; HDF *hdf; NEOERR *err; int stage = 0; if ((err = hdf_init(&hdf)) != STATUS_OK) { goto error; } coordinate_subset_t *subset = obj->set->first_subset; int16_t i = 0; while (subset) { _cs_set_valuef(hdf, "tracks.%d.colour=%s", i, kml_colours[i % 9]); coordinate_t *coordinate = subset->first; int16_t j = 0; while (coordinate && coordinate != subset->last) { _cs_set_valuef(hdf, "tracks.%d.points.%d.lng=%f", i, j, coordinate->lng); _cs_set_valuef(hdf, "tracks.%d.points.%d.lat=%f", i, j, coordinate->lat); _cs_set_valuef(hdf, "tracks.%d.points.%d.ele=%d", i, j, coordinate->ele); _cs_set_valuef(hdf, "tracks.%d.points.%d.time=%f", i, j, coordinate->timestamp); coordinate = coordinate->next; j++; } subset = subset->next; i++; } if ((err = cs_init(&csparse, hdf)) != STATUS_OK || (err = cs_parse_file(csparse, "formatter/templates/flight.split.kml.cs.xml")) != STATUS_OK || (err = cs_render(csparse, fp, cs_fwrite)) != STATUS_OK) { goto error; } goto end; error: nerr_log_error(err); goto end; end: hdf_destroy(&hdf); cs_destroy(&csparse); fclose(fp); }
int main(void) { GC_INIT(); TF_Buffer *buffer = TF_GetAllOpList(); Tensorflow__OpList *op_list = tensorflow__op_list__unpack(NULL, buffer->length, buffer->data); HDF *hdf; hdf_init(&hdf); for (int i=0; i<op_list->n_op; i++) { struct _Tensorflow__OpDef *op = op_list->op[i]; char variable[256]; char value[256]; snprintf(variable, 256, "Op.%s.name", op->name); snprintf(value, 256, "tf-%s", kebab_case(op->name)); hdf_set_value(hdf, variable, value); for (int j=0; j<op->n_input_arg; j++) { Tensorflow__OpDef__ArgDef *arg = op->input_arg[j]; snprintf(variable, 256, "Op.%s.input_arg.%s", op->name, arg->name); const char *multiple = arg->number_attr && *arg->number_attr ? "list" : "single"; hdf_set_value(hdf, variable, multiple); }; for (int j=0; j<op->n_attr; j++) { Tensorflow__OpDef__AttrDef *attr = op->attr[j]; snprintf(variable, 256, "Op.%s.attr.%s", op->name, attr->name); snprintf(value, 256, "%s", attr->type); hdf_set_value(hdf, variable, value); }; snprintf(variable, 256, "Op.%s.n_output", op->name); snprintf(value, 256, "%d", op->n_output_arg); hdf_set_value(hdf, variable, value); }; CSPARSE *parse; cs_init(&parse, hdf); cs_parse_file(parse, "tensorflow.scm.in"); cs_render(parse, stdout, output); cs_destroy(&parse); hdf_destroy(&hdf); tensorflow__op_list__free_unpacked(op_list, NULL); TF_DeleteBuffer(buffer); }
NEOERR * rcfs_meta_load (const char *path, HDF **meta) { NEOERR *err; char fpath[_POSIX_PATH_MAX]; HDF *m; snprintf (fpath, sizeof(fpath), "%s,log", path); err = hdf_init (&m); if (err) return nerr_pass (err); err = hdf_read_file (m, fpath); if (err) { hdf_destroy (&m); return nerr_pass (err); } *meta = m; return STATUS_OK; }
int mmg_get_int_valuef(mmg_conn *db, char *dsn, char *key, int skip, int limit, char *qfmt, ...) { HDF *tmpnode; hdf_init(&tmpnode); char *querys, sels[256]; int val; va_list ap; HDF *node; NEOERR *err; va_start(ap, qfmt); querys = vsprintf_alloc(qfmt, ap); va_end(ap); if (!querys) { mtc_err("Unable to allocate mem for query string"); return 0; } snprintf(sels, sizeof(sels), "{'%s': 1}", key); err = mmg_prepare(db, MMG_FLAG_EMPTY, skip, limit, NULL, sels, querys); RETURN_V_NOK(err, 0); err = mmg_query(db, dsn, NULL, tmpnode); RETURN_V_NOK(err, 0); val = 0; if(hdf_get_valuef(tmpnode, "0.%s", key)) { node = hdf_obj_child(tmpnode); while (node) { val += hdf_get_int_value(node, key, 0); node = hdf_obj_next(node); } } else { val = hdf_get_int_value(tmpnode, key, 0); } hdf_destroy(&tmpnode); SAFE_FREE(querys); return val; }
static void wiki_output(MMIOT *doc, request_rec *r) { char *title; int ret; int size; char *p; wiki_conf *conf; list_t *css; HDF *hdf; CSPARSE *cs; int i; conf = (wiki_conf *) ap_get_module_config(r->per_dir_config, &wiki_module); ret = mkd_compile(doc, MKD_TOC | MKD_AUTOLINK); hdf_init(&hdf); if(conf->name){ hdf_set_value(hdf, "wikiname", conf->name); } title = mkd_doc_title(doc); if(title == NULL){ title = "notitle"; } hdf_set_value(hdf, "title", title); for(i=0, css = conf->css; css; i++, css = (list_t *) css->next){ hdf_set_valuef(hdf, "css.%d=%s", i, (char *)css->data); } if ((size = mkd_document(doc, &p)) != EOF) { hdf_set_value(hdf, "document", p); } cs_init(&cs, hdf); cs_parse_string(cs, strdup(DEFAULT_TEMPLATE), strlen(DEFAULT_TEMPLATE)); cs_render(cs, r, cs_output); hdf_destroy(&hdf); cs_destroy(&cs); }
int main(int argc, char *argv[]) { HDF *hdf = NULL; int i, j; hdf_init(&hdf); ne_warn("creating 100000x10 nodes"); for (i = 0; i < 100000; i++) { char buffer[64]; for (j = 0; j < 10; j++) { snprintf(buffer, sizeof(buffer), "node.%d.test.%d", i, j); hdf_set_value(hdf, buffer, "test"); } } ne_warn("calling dealloc"); hdf_destroy(&hdf); // <-- this takes forever to return with a hugely return 0; }
int main(int argc, char **argv, char **envp) { unsigned char buf[2048]; int blen = 2048, len; char *s; HDF *hdf; //mconfig_parse_file("/tpl/oms.hdf", &g_cfg); mtimer_start(); for (int i = 0; i < 100000; i++) { memset(buf, 2048, 0x0); len = pack_hdf(g_cfg, buf, blen); unpack_hdf(buf, len, &hdf); s = hdf_get_value(hdf, "manual.Layout", NULL); hdf_destroy(&hdf); } mtimer_stop(NULL); return 0; }
int main(int argc, char *argv[]) { NEOERR *err; HDF *hdf; double tstart = 0; err = hdf_init(&hdf); if (err != STATUS_OK) { nerr_log_error(err); return -1; } tstart = ne_timef(); TestSort(hdf); ne_warn("sort took %5.5fs", ne_timef() - tstart); hdf_dump(hdf, NULL); hdf_destroy(&hdf); return 0; }
~HdfRaw() { if (m_hdf) { hdf_destroy(&m_hdf); } }
JNIEXPORT void JNICALL Java_org_clearsilver_HDF__1dealloc( JNIEnv *env, jclass objClass, jint hdf_obj_ptr) { HDF *hdf = (HDF *)hdf_obj_ptr; hdf_destroy(&hdf); }
int main (int argc, char *argv[]) { NEOERR *err; CSPARSE *parse; HDF *hdf; int verbose = 0; char *hdf_file, *cs_file; if (argc < 3) { ne_warn ("Usage: cstest [-v] <file.hdf> <file.cs>"); return -1; } if (!strcmp(argv[1], "-v")) { verbose = 1; if (argc < 4) { ne_warn ("Usage: cstest [-v] <file.hdf> <file.cs>"); return -1; } hdf_file = argv[2]; cs_file = argv[3]; } else { hdf_file = argv[1]; cs_file = argv[2]; } err = hdf_init(&hdf); if (err != STATUS_OK) { nerr_warn_error(err); return -1; } err = hdf_read_file(hdf, hdf_file); if (err != STATUS_OK) { nerr_warn_error(err); return -1; } printf ("Parsing %s\n", cs_file); err = cs_init (&parse, hdf); if (err != STATUS_OK) { nerr_warn_error(err); return -1; } err = cgi_register_strfuncs(parse); if (err != STATUS_OK) { nerr_warn_error(err); return -1; } err = cs_parse_file (parse, cs_file); if (err != STATUS_OK) { err = nerr_pass(err); nerr_warn_error(err); return -1; } err = cs_render(parse, NULL, output); if (err != STATUS_OK) { err = nerr_pass(err); nerr_warn_error(err); return -1; } if (verbose) { printf ("\n-----------------------\nCS DUMP\n"); err = cs_dump(parse, NULL, output); } cs_destroy (&parse); if (verbose) { printf ("\n-----------------------\nHDF DUMP\n"); hdf_dump (hdf, NULL); } hdf_destroy(&hdf); return 0; }
bool Tree::RenderTemplateInternal(const std::string &tmpl, bool file_or_string, std::ostream *out, std::string *error) const { if (!out) { return false; } CSPARSE *csparse = NULL; HDF *hdf = NULL; NEOERR *err; do { err = hdf_init(&hdf); if (err != STATUS_OK) { break; } if (!SerializeToHdfInternal(hdf, true)) { hdf_destroy(&hdf); if (error) { *error = "SerializationError: serializing to HDF failed"; } return false; } err = cs_init(&csparse, hdf); if (err != STATUS_OK) { break; } err = cgi_register_strfuncs(csparse); if (err != STATUS_OK) { break; } if (file_or_string) { err = cs_parse_file(csparse, tmpl.c_str()); } else { char *ctmpl = strdup(tmpl.c_str()); if (!ctmpl) { cs_destroy(&csparse); hdf_destroy(&hdf); if (error) { *error = "MemoryError: allocating buffer for template"; } break; } err = cs_parse_string(csparse, ctmpl, tmpl.length()); } if (err != STATUS_OK) { break; } err = cs_render(csparse, out, RenderCallback); if (err != STATUS_OK) { break; } } while (false); cs_destroy(&csparse); hdf_destroy(&hdf); if (err != STATUS_OK && error) { STRING str; string_init(&str); nerr_error_string(err, &str); *error = str.buf; string_clear(&str); nerr_ignore(&err); return false; } return true; }
static int diary_handle_entry(request_rec *r, diary_conf *conf, const char *filename) { FILE *fp; CSPARSE *cs; NEOERR *cs_err; HDF *hdf; MMIOT *doc; char *title; char *author; char *date; int size; char *p; int flag = 0; int github_flavoured = conf->github_flavoured; calendar_info cal; char *theme_path; char *theme_file; theme_path = apr_pstrcat(r->pool, conf->path, "/themes/", conf->theme, NULL); theme_file = apr_pstrcat(r->pool, theme_path, "/index.cst", NULL); fp = fopen(filename, "r"); if(fp == NULL){ switch (errno) { case ENOENT: return HTTP_NOT_FOUND; case EACCES: return HTTP_FORBIDDEN; default: ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "diary_parse_entry error: errno=%d\n", errno); return HTTP_INTERNAL_SERVER_ERROR; } } doc = github_flavoured ? gfm_in(fp, 0) : mkd_in(fp, 0); fclose(fp); if (doc == NULL) { return HTTP_INTERNAL_SERVER_ERROR; } title = mkd_doc_title(doc); if(title == NULL){ title = "notitle"; } date = mkd_doc_date(doc); author = mkd_doc_author(doc); if(conf->autolink){ flag = MKD_AUTOLINK; } mkd_compile(doc, flag); if ((size = mkd_document(doc, &p)) == EOF) { return HTTP_INTERNAL_SERVER_ERROR; } hdf_init(&hdf); hdf_set_value(hdf, "hdf.loadpaths.1", conf->path); hdf_set_value(hdf, "hdf.loadpaths.2", theme_path); cs_err = hdf_read_file(hdf, INDEX_HDF); if(cs_err){ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "cannot read index.hdf."); // TODO: no need to free cs_err and cs_err_str? hdf_destroy(&hdf); return HTTP_INTERNAL_SERVER_ERROR; } hdf_set_value(hdf, "diary.title", conf->title); hdf_set_value(hdf, "diary.uri", conf->uri); hdf_set_value(hdf, "diary.theme", conf->theme); hdf_set_value(hdf, "entry.uri", r->uri); hdf_set_value(hdf, "entry.title", title); hdf_set_value(hdf, "entry.author", author); hdf_set_value(hdf, "entry.date", date); hdf_set_value(hdf, "entry.desc", p); //hdf_dump(hdf, NULL); if (conf->calendar) { diary_set_calendar_info(&cal, r->args); hdf_set_int_value(hdf, "cal.year", cal.year); hdf_set_value(hdf, "cal.month", cal.month); hdf_set_value(hdf, "cal.day", cal.day); hdf_set_value(hdf, "cal.today", cal.today); hdf_set_int_value(hdf, "cal.lastdayofmonth", cal.lastdayofmonth); hdf_set_int_value(hdf, "cal.dayofweek_1stdayofmonth", cal.dayofweek_1stdayofmonth); } cs_err = cs_init(&cs, hdf); if(cs_err){ return HTTP_INTERNAL_SERVER_ERROR; } cgi_register_strfuncs(cs); mkd_cleanup(doc); cs_parse_file(cs, theme_file); r->content_type = "text/html"; cs_render(cs, r, diary_cs_render_cb); hdf_destroy(&hdf); cs_destroy(&cs); return 0; }
NEOERR* mmg_query(mmg_conn *db, char *dsn, char *prefix, HDF *outnode) { int count; char key[LEN_HDF_KEY]; HDF *node, *cnode; bson *doc; NEOERR *err; MCS_NOT_NULLB(db, dsn); db->p = mongo_sync_cmd_query(db->con, dsn, db->flags & 0x3FF, db->skip, db->limit, db->docq, db->docs); if (!db->p) { if (errno == ENOENT) { mtc_noise("queried %s 0 result", dsn); if (db->flags & MMG_FLAG_EMPTY) { if (db->query_callback && !db->incallback) { /* * empty result, call callback */ db->incallback = true; err = db->query_callback(db, NULL, true); TRACE_NOK(err); db->incallback = false; db->query_callback = NULL; db->callbackdata = NULL; } return STATUS_OK; } return nerr_raise(NERR_NOT_FOUND, "无此记录"); } return nerr_raise(NERR_DB, "query: %s %d", strerror(errno), errno); } /* * process result */ if (outnode || (db->query_callback && !db->incallback)) { if (outnode) node = outnode; /* need store result */ else hdf_init(&node); db->c = mongo_sync_cursor_new(db->con, dsn, db->p); if (!db->c) return nerr_raise(NERR_DB, "cursor: %s", strerror(errno)); cnode = NULL; count = 0; while (mongo_sync_cursor_next(db->c) && count < db->limit) { memset(key, sizeof(key), 0x0); if (prefix) { if (!(db->flags & MMG_FLAG_MIXROWS) && db->limit > 1) snprintf(key, sizeof(key), "%s.%d", prefix, count); else snprintf(key, sizeof(key), "%s", prefix); } else { if (!(db->flags & MMG_FLAG_MIXROWS) && db->limit > 1) sprintf(key, "%d", count); else key[0] = '\0'; } doc = mongo_sync_cursor_get_data(db->c); err = mbson_export_to_hdf(node, doc, key, MBSON_EXPORT_TYPE, true); if (err != STATUS_OK) return nerr_pass(err); if (!cnode) cnode = hdf_get_obj(node, key); count++; } db->rescount = count; mongo_sync_cursor_free(db->c); db->c = NULL; db->p = NULL; mtc_noise("queried %s %d result", dsn, count); /* * call callback at last. because we don't want declare more mmg_conn* * it's safe to do new query in callback on result stored (db->c freeed) * we can call mmg_query() recursive, the callback won't. */ if (db->query_callback && !db->incallback) { db->incallback = true; count = 0; while (cnode) { count++; if (db->rescount == count) err = db->query_callback(db, cnode, true); else err = db->query_callback(db, cnode, false); TRACE_NOK(err); cnode = hdf_obj_next(cnode); } db->incallback = false; /* * query_callback can't be shared with multiply query * later query must set them again even if TheSameOne */ db->query_callback = NULL; db->callbackdata = NULL; } if (!outnode) hdf_destroy(&node); } else { /* don't need result */ mongo_wire_packet_free(db->p); db->c = NULL; db->p = NULL; } return STATUS_OK; }
static NEOERR* rend_blog_index(HASH *dbh, HASH *tplh, int pageid, int *pgttr) { CSPARSE *cs = NULL; HDF *hdf, *dhdf; STRING str; NEOERR *err = STATUS_OK; char fname[_POSIX_PATH_MAX]; if (!dbh || !tplh) return nerr_raise(NERR_ASSERT, "paramter null"); cs = (CSPARSE*)hash_lookup(tplh, "blog_index"); dhdf = (HDF*)hash_lookup(tplh, "blog_index_hdf"); if (!cs || !dhdf) return nerr_raise(LERR_MISS_TPL, "blog_index not found"); err = hdf_init(&hdf); if (err != STATUS_OK) return nerr_pass(err); hdf_copy(hdf, NULL, dhdf); ltpl_prepare_rend(hdf, "layout.html"); hdf_set_int_value(hdf, PRE_QUERY".pageid", pageid); err = blog_index_static_get(hdf, dbh); if (err != STATUS_OK) goto done; int ntt = hdf_get_int_value(hdf, PRE_OUTPUT".ntt", 0); int pgtt = hdf_get_int_value(hdf, PRE_OUTPUT".pgtt", 1); if (pgttr) *pgttr = pgtt; if (pageid == 0) { if (pgtt > 1) { err = hdf_set_int_value(hdf, PRE_OUTPUT".pgprev", pgtt-1); TRACE_NOK(err); if (ntt % BLOG_NUM_PERPAGE == 1) { err = rend_blog_index(dbh, tplh, pgtt-1, NULL); TRACE_NOK(err); if (pgtt > 2) { /* origin 1.html's nex is index.html, change them into 2.html */ err = rend_blog_index(dbh, tplh, pgtt-2, NULL); TRACE_NOK(err); } } } } else { if (pageid > 1 && pgtt > 1) hdf_set_int_value(hdf, PRE_OUTPUT".pgprev", pageid-1); if (pgtt == pageid+1) hdf_set_value(hdf, PRE_OUTPUT".pgnext", "index"); else if (pgtt > pageid) hdf_set_int_value(hdf, PRE_OUTPUT".pgnext", pageid+1); } cs->hdf = hdf; string_init(&str); err = cs_render(cs, &str, mcs_strcb); if (err != STATUS_OK) goto done; if (pageid == 0) snprintf(fname, sizeof(fname), "%sindex.html", PATH_BLOG); else snprintf(fname, sizeof(fname), "%s%d.html", PATH_BLOG, pageid); err = mutil_makesure_dir(fname); if (err != STATUS_OK) goto done; err = mcs_str2file(str, fname); if (err != STATUS_OK) goto done; #ifdef DEBUG_HDF hdf_write_file(hdf, TC_ROOT"hdf.blg.index"); #endif done: hdf_destroy(&hdf); cs->hdf = NULL; string_clear(&str); return nerr_pass(err); }