NEOERR* ldml_parse_file(char *dir, char *name, HASH *outhash) { char fname[_POSIX_PATH_MAX], *attrval = NULL; HDF *node, *child, *dhdf; STRING str; NEOERR *err; memset(fname, 0x0, sizeof(fname)); snprintf(fname, sizeof(fname), "%s/%s", dir, name); err = hdf_init(&node); if (err != STATUS_OK) return nerr_pass(err); err = hdf_read_file(node, fname); if (err != STATUS_OK) return nerr_pass(err); child = hdf_obj_child(node); while (child != NULL) { mtc_dbg("parse node %s", hdf_obj_name(child)); string_init(&str); attrval = mcs_obj_attr(child, "merge"); if (attrval) { ULIST *list; string_array_split(&list, attrval, ",", 10); ITERATE_MLIST(list) { snprintf(fname, sizeof(fname), "%s/%s", dir, neos_strip((char*)list->items[t_rsv_i])); err = hdf_init(&dhdf); JUMP_NOK(err, wnext); err = hdf_read_file(dhdf, fname); JUMP_NOK(err, wnext); err = hdf_copy(child, NULL, dhdf); JUMP_NOK(err, wnext); } uListDestroy(&list, ULIST_FREE); } wnext: string_clear(&str); child = hdf_obj_next(child); } err = hash_insert(outhash, (void*)strdup(name), (void*)node); JUMP_NOK(err, wnext); return STATUS_OK; }
char* mmg_get_valuef(mmg_conn *db, char *dsn, char *key, int skip, char *qfmt, ...) { HDF *tmpnode; hdf_init(&tmpnode); char *val, *querys, sels[256]; va_list ap; NEOERR *err; va_start(ap, qfmt); querys = vsprintf_alloc(qfmt, ap); va_end(ap); if (!querys) { mtc_err("Unable to allocate mem for query string"); return NULL; } snprintf(sels, sizeof(sels), "{'%s': 1}", key); err = mmg_prepare(db, MMG_FLAG_EMPTY, skip, 1, NULL, sels, querys); RETURN_V_NOK(err, NULL); err = mmg_query(db, dsn, NULL, tmpnode); RETURN_V_NOK(err, NULL); err = hdf_get_copy(tmpnode, key, &val, NULL); RETURN_V_NOK(err, NULL); hdf_destroy(&tmpnode); SAFE_FREE(querys); return val; }
int main(int argc, char *argv[]) { NEOERR *err; HDF *hdf, *h2; err = hdf_init(&hdf); if (err != STATUS_OK) { nerr_log_error(err); return -1; } err = hdf_set_value(hdf, "CGI.Foo", "Bar"); if (err) { nerr_log_error(err); return -1; } err = hdf_set_value(hdf, "CGI.Foo", "Baz"); if (err) { nerr_log_error(err); return -1; } h2 = hdf_get_obj(hdf, "CGI"); err = hdf_set_value(h2, "Foo", "Bang"); hdf_dump(hdf, NULL); hdf_destroy(&hdf); return 0; }
bool Tree::SerializeToHdfString(std::string *serialized, bool only_nodes_with_value) const { if (!serialized) { return false; } NEOERR *err; HDF *hdf; err = hdf_init(&hdf); if (err != STATUS_OK) { nerr_ignore(&err); return false; } if (!SerializeToHdfInternal(hdf, only_nodes_with_value)) { hdf_destroy(&hdf); return false; } char *str; err = hdf_write_string(hdf, &str); if (err != STATUS_OK) { nerr_ignore(&err); hdf_destroy(&hdf); return false; } serialized->assign(str); hdf_destroy(&hdf); free(str); return true; }
bool Tree::SerializeToHdfFile(const std::string &filename, bool only_nodes_with_value) const { NEOERR *err; HDF *hdf; err = hdf_init(&hdf); if (err != STATUS_OK) { nerr_ignore(&err); return false; } if (!SerializeToHdfInternal(hdf, only_nodes_with_value)) { hdf_destroy(&hdf); return false; } err = hdf_write_file(hdf, filename.c_str()); if (err != STATUS_OK) { nerr_ignore(&err); hdf_destroy(&hdf); return false; } hdf_destroy(&hdf); return true; }
bool Tree::ParseFromHdfFile(const std::string &filename) { Clear(); HDF *hdf; NEOERR *err = hdf_init(&hdf); if (err != STATUS_OK) { nerr_ignore(&err); return false; } err = hdf_read_file(hdf, filename.c_str()); if (err != STATUS_OK) { nerr_ignore(&err); hdf_destroy(&hdf); return false; } HDF *child = hdf_obj_child(hdf); if (!child) { hdf_destroy(&hdf); return false; } if (!ParseFromHdfInternal(child)) { hdf_destroy(&hdf); return false; } hdf_destroy(&hdf); return true; }
NEOERR* fill_trackarea(HASH *evth, HASH *dbh) { mdb_conn *conn = (mdb_conn*)hash_lookup(dbh, "dyn"); mevent_t *evt = (mevent_t*)hash_lookup(evth, "place"); HDF *node; hdf_init(&node); char *ip, *aid, *uid, *city, *area; NEOERR *err; LPRE_ALLOP(node, conn, evt); MDB_QUERY_RAW(conn, "track", COL_TRACK, "intime > current_date AND " " area='' GROUP BY aid, uid", NULL); err = mdb_set_rows(node, conn, COL_TRACK, NULL, NULL); if (err != STATUS_OK) return nerr_pass(err); node = hdf_obj_child(node); while (node) { ip = hdf_get_value(node, "ip", NULL); aid = hdf_get_value(node, "aid", "0"); uid = hdf_get_value(node, "uid", "0"); if (ip) { hdf_set_value(evt->hdfsnd, "ip", ip); MEVENT_TRIGGER_NRET(evt, ip, REQ_CMD_PLACEGET, FLAGS_SYNC); city = hdf_get_value(evt->hdfrcv, "0.c", "Mars"); area = city2area(city); MDB_EXEC(conn, NULL, "UPDATE track SET area=$1 WHERE aid=$2 AND uid=$3", "sss", area, aid, uid); } node = hdf_obj_next(node); } return STATUS_OK; }
bool Tree::ParseFromHdfString(const std::string &serialized) { Clear(); if (serialized.empty()) { return true; } HDF *hdf; NEOERR *err = hdf_init(&hdf); if (err != STATUS_OK) { nerr_ignore(&err); return false; } err = hdf_read_string(hdf, serialized.c_str()); if (err != STATUS_OK) { nerr_ignore(&err); hdf_destroy(&hdf); return false; } HDF *child = hdf_obj_child(hdf); if (!child) { hdf_destroy(&hdf); return false; } if (!ParseFromHdfInternal(child)) { hdf_destroy(&hdf); return false; } hdf_destroy(&hdf); return true; }
int main() { CGI *cgi = NULL; HDF *hdf = NULL; hdf_init(&hdf); cgi_init(&cgi, hdf); sem_init(&cmd_sem, 0, 0); hdf_set_value(cgi->hdf,"Config.Upload.TmpDir","/tmp/cgiupload"); hdf_set_value(cgi->hdf,"Config.Upload.Unlink","0"); cgi_parse(cgi); Setup_MQ(IMG_MQ_SEND, IMG_MQ_RECEIVE); //bind Receive callback NotifySetup(&msg_queue_receive); if (AmbaIQPage_set_params (hdf) < 0) { PrintResult(STATUS_FAILURE); } else { while (1){ if (0 != mq_send (msg_queue_send, (char *)send_buffer, MAX_MESSAGES_SIZE, 0)) { LOG_MESSG("%s", "mq_send failed!"); sleep(1); continue; } break; } sem_wait(&cmd_sem); } //hdf_dump(hdf,"<br>"); return 0; }
static PyObject * p_hdf_init (PyObject *self, PyObject *args) { HDF *hdf = NULL; NEOERR *err; err = hdf_init (&hdf); if (err) return p_neo_error (err); return p_hdf_to_object (hdf, 1); }
struct msqueue_entry* msqueue_entry_create() { struct msqueue_entry *e = calloc(1, sizeof(struct msqueue_entry)); if (!e) { mtc_err("out of memory"); return NULL; } e->ename = NULL; e->cmd = NULL; hdf_init(&e->hdfrcv); hdf_init(&e->hdfsnd); e->prev = NULL; return e; }
JNIEXPORT jint JNICALL Java_org_clearsilver_HDF__1init( JNIEnv *env, jclass objClass) { HDF *hdf = NULL; NEOERR *err; err = hdf_init(&hdf); if (err != STATUS_OK) { return jNeoErr(env, err); } return (jint) hdf; }
int main(void) { HDF *hdf_1, *hdf_2; HDF *cur_node,*last_node; hdf_init(&hdf_1); hdf_read_file(hdf_1,"hdf_copy_test.hdf"); hdf_dump(hdf_1,NULL); cur_node = hdf_get_obj(hdf_1,"Chart"); last_node = cur_node; cur_node = hdf_get_obj(cur_node,"next_stage"); while (hdf_get_obj(cur_node,"next_stage") && strcmp(hdf_get_value(cur_node,"Bucket.FieldId",""),"QUEUE")) { last_node = cur_node; cur_node = hdf_get_obj(cur_node,"next_stage"); } if (hdf_get_obj(cur_node,"next_stage")) { hdf_copy(hdf_1,"TempHolderPlace",hdf_get_obj(cur_node,"next_stage")); } ne_warn("Delete tree from node: %s", hdf_obj_name(last_node)); hdf_remove_tree(last_node,"next_stage"); hdf_dump(hdf_1,NULL); fprintf(stderr,"-----------------\n"); hdf_copy(last_node,"next_stage",hdf_get_obj(hdf_1,"TempHolderPlace")); hdf_dump(hdf_1,NULL); /* Test copy and destroy, make sure we actually copy everything and * don't reference anything */ hdf_init(&hdf_2); hdf_copy(hdf_2, "", hdf_1); hdf_destroy(&hdf_1); hdf_dump(hdf_2, NULL); hdf_destroy(&hdf_2); return 0; }
static int diary_handle_feed_rss(request_rec *r, diary_conf *conf) { HDF *hdf; CSPARSE *cs; NEOERR *cs_err; STRING cs_err_str; ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "diary_handle_feed_rss()"); hdf_init(&hdf); hdf_set_value(hdf, "hdf.loadpaths.1", conf->path); hdf_set_value(hdf, "diary.title", conf->title); hdf_set_value(hdf, "diary.uri", conf->uri); cs_err = hdf_read_file(hdf, INDEX_HDF); if(cs_err){ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "cannot read index.hdf."); hdf_destroy(&hdf); return HTTP_INTERNAL_SERVER_ERROR; } //hdf_dump(hdf, NULL); cs_err = cs_init(&cs, hdf); if(cs_err){ string_init(&cs_err_str); nerr_error_string(cs_err, &cs_err_str); ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "error at cs_init(): %s", cs_err_str.buf); cs_destroy(&cs); hdf_destroy(&hdf); return HTTP_INTERNAL_SERVER_ERROR; } cgi_register_strfuncs(cs); cs_err = cs_parse_string(cs, strdup(RSS_TMPL), RSS_TMPL_LEN); if(cs_err){ string_init(&cs_err_str); nerr_error_string(cs_err, &cs_err_str); ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "error in cs_parse_string(): %s", cs_err_str.buf); cs_destroy(&cs); hdf_destroy(&hdf); return HTTP_INTERNAL_SERVER_ERROR; } r->content_type = "application/rss+xml"; cs_render(cs, r, diary_cs_render_cb); cs_destroy(&cs); hdf_destroy(&hdf); return OK; }
static NEOERR* rend_blog(HASH *dbh, HASH *tplh, int bid) { CSPARSE *cs = NULL; HDF *hdf, *dhdf; STRING str; NEOERR *err; char fname[_POSIX_PATH_MAX]; if (!dbh || !tplh || bid < 0) return nerr_raise(NERR_ASSERT, "paramter null"); cs = (CSPARSE*)hash_lookup(tplh, "blog"); dhdf = (HDF*)hash_lookup(tplh, "blog_hdf"); if (!cs || !dhdf) return nerr_raise(LERR_MISS_TPL, "blog_index not found"); err = hdf_init(&hdf); if (err != STATUS_OK) return nerr_pass(err); hdf_copy(hdf, NULL, dhdf); ltpl_prepare_rend(hdf, "layout.html"); hdf_set_int_value(hdf, PRE_QUERY".bid", bid); err = blog_static_get(hdf, dbh); if (err != STATUS_OK) goto done; hdf_set_copy(hdf, PRE_LAYOUT".title", PRE_OUTPUT".blog.title"); cs->hdf = hdf; string_init(&str); err = cs_render(cs, &str, mcs_strcb); if (err != STATUS_OK) goto done; snprintf(fname, sizeof(fname), "%s%d/%d.html", PATH_BLOG, bid%BLOG_SUBDIR_NUM, bid); err = mutil_makesure_dir(fname); if (err != STATUS_OK) goto done; err = mcs_str2file(str, fname); if (err != STATUS_OK) goto done; #ifdef DEBUG_HDF hdf_write_file(hdf, TC_ROOT"hdf.blg"); #endif done: hdf_destroy(&hdf); cs->hdf = NULL; string_clear(&str); return nerr_pass(err); }
int main(int argc, char **argv, char **envp) { char key[10]; HDF *bignode; mtc_init("cshdf", 7); hdf_init(&bignode); for (int i = 0; i < 5003929; i++) { mstr_rand_string_with_len(key, 10); hdf_set_valuef(bignode, "%s.today=10", key); hdf_set_valuef(bignode, "%s.toweek=11", key); hdf_set_valuef(bignode, "%s.tomonth=12", key); hdf_set_valuef(bignode, "%s.total=234", key); if (i % 10000 == 0) printf("%d\n", i); } //hdf_dump(bignode, NULL); printf("child num %d\n", mcs_get_child_num(bignode, NULL)); int count = 0; mtimer_start(); HDF *cnode = hdf_obj_child(bignode); while (cnode) { char *name = hdf_obj_name(cnode); if (mcs_get_int_valuef(bignode, 0, "%s.today", name) != 10) printf("error\n"); if (mcs_get_int_valuef(bignode, 0, "%s.toweek", name) != 11) printf("error\n"); if (mcs_get_int_valuef(bignode, 0, "%s.tomonth", name) != 12) printf("error\n"); if (mcs_get_int_valuef(bignode, 0, "%s.total", name) != 234) printf("error\n"); count++; cnode = hdf_obj_next(cnode); } mtimer_stop("get time"); printf("get child count %d\n", count); hdf_destroy(&bignode); return 0; }
int main() { HDF *hdf = NULL; hdf_init(&hdf); cgi_init(&cgi, hdf); sem_init(&cmd_sem, 0, 0); AmbaPMPage_get_params (); sem_wait(&cmd_sem); cgi_display(cgi, "../html/pm.html"); return 0; }
int main(int argc, char **argv, char **envp) { char *s = "{\"a\": [{\"aaa\": 12}, 2], \"b\": \"xxx\"}"; //char *s = "f**k"; HDF *node; hdf_init(&node); //mjson_string_to_hdf(hdf_get_obj(node, "s"), s); hdf_write_file(node, "x.hdf"); hdf_destroy(&node); return 0; }
void formatter_kml_split_output(formatter_split_t *obj, char *filename) { FILE *fp = fopen(filename, "w"); CSPARSE *csparse; HDF *hdf; NEOERR *err; int stage = 0; if ((err = hdf_init(&hdf)) != STATUS_OK) { goto error; } coordinate_subset_t *subset = obj->set->first_subset; int16_t i = 0; while (subset) { _cs_set_valuef(hdf, "tracks.%d.colour=%s", i, kml_colours[i % 9]); coordinate_t *coordinate = subset->first; int16_t j = 0; while (coordinate && coordinate != subset->last) { _cs_set_valuef(hdf, "tracks.%d.points.%d.lng=%f", i, j, coordinate->lng); _cs_set_valuef(hdf, "tracks.%d.points.%d.lat=%f", i, j, coordinate->lat); _cs_set_valuef(hdf, "tracks.%d.points.%d.ele=%d", i, j, coordinate->ele); _cs_set_valuef(hdf, "tracks.%d.points.%d.time=%f", i, j, coordinate->timestamp); coordinate = coordinate->next; j++; } subset = subset->next; i++; } if ((err = cs_init(&csparse, hdf)) != STATUS_OK || (err = cs_parse_file(csparse, "formatter/templates/flight.split.kml.cs.xml")) != STATUS_OK || (err = cs_render(csparse, fp, cs_fwrite)) != STATUS_OK) { goto error; } goto end; error: nerr_log_error(err); goto end; end: hdf_destroy(&hdf); cs_destroy(&csparse); fclose(fp); }
int main() { HDF *hdf; NEOERR *err; err = hdf_init(&hdf); if (err) { printf("error: %s\n", err->desc); return 1; } printf("success: 0x%X\n", hdf); hdf_set_value(hdf,"a.b.c","somevalue"); printf("get_value returned: %s", hdf_get_value(hdf,"a.b.c","default")); }
int main(void) { GC_INIT(); TF_Buffer *buffer = TF_GetAllOpList(); Tensorflow__OpList *op_list = tensorflow__op_list__unpack(NULL, buffer->length, buffer->data); HDF *hdf; hdf_init(&hdf); for (int i=0; i<op_list->n_op; i++) { struct _Tensorflow__OpDef *op = op_list->op[i]; char variable[256]; char value[256]; snprintf(variable, 256, "Op.%s.name", op->name); snprintf(value, 256, "tf-%s", kebab_case(op->name)); hdf_set_value(hdf, variable, value); for (int j=0; j<op->n_input_arg; j++) { Tensorflow__OpDef__ArgDef *arg = op->input_arg[j]; snprintf(variable, 256, "Op.%s.input_arg.%s", op->name, arg->name); const char *multiple = arg->number_attr && *arg->number_attr ? "list" : "single"; hdf_set_value(hdf, variable, multiple); }; for (int j=0; j<op->n_attr; j++) { Tensorflow__OpDef__AttrDef *attr = op->attr[j]; snprintf(variable, 256, "Op.%s.attr.%s", op->name, attr->name); snprintf(value, 256, "%s", attr->type); hdf_set_value(hdf, variable, value); }; snprintf(variable, 256, "Op.%s.n_output", op->name); snprintf(value, 256, "%d", op->n_output_arg); hdf_set_value(hdf, variable, value); }; CSPARSE *parse; cs_init(&parse, hdf); cs_parse_file(parse, "tensorflow.scm.in"); cs_render(parse, stdout, output); cs_destroy(&parse); hdf_destroy(&hdf); tensorflow__op_list__free_unpacked(op_list, NULL); TF_DeleteBuffer(buffer); }
int mmg_get_int_valuef(mmg_conn *db, char *dsn, char *key, int skip, int limit, char *qfmt, ...) { HDF *tmpnode; hdf_init(&tmpnode); char *querys, sels[256]; int val; va_list ap; HDF *node; NEOERR *err; va_start(ap, qfmt); querys = vsprintf_alloc(qfmt, ap); va_end(ap); if (!querys) { mtc_err("Unable to allocate mem for query string"); return 0; } snprintf(sels, sizeof(sels), "{'%s': 1}", key); err = mmg_prepare(db, MMG_FLAG_EMPTY, skip, limit, NULL, sels, querys); RETURN_V_NOK(err, 0); err = mmg_query(db, dsn, NULL, tmpnode); RETURN_V_NOK(err, 0); val = 0; if(hdf_get_valuef(tmpnode, "0.%s", key)) { node = hdf_obj_child(tmpnode); while (node) { val += hdf_get_int_value(node, key, 0); node = hdf_obj_next(node); } } else { val = hdf_get_int_value(tmpnode, key, 0); } hdf_destroy(&tmpnode); SAFE_FREE(querys); return val; }
static void wiki_output(MMIOT *doc, request_rec *r) { char *title; int ret; int size; char *p; wiki_conf *conf; list_t *css; HDF *hdf; CSPARSE *cs; int i; conf = (wiki_conf *) ap_get_module_config(r->per_dir_config, &wiki_module); ret = mkd_compile(doc, MKD_TOC | MKD_AUTOLINK); hdf_init(&hdf); if(conf->name){ hdf_set_value(hdf, "wikiname", conf->name); } title = mkd_doc_title(doc); if(title == NULL){ title = "notitle"; } hdf_set_value(hdf, "title", title); for(i=0, css = conf->css; css; i++, css = (list_t *) css->next){ hdf_set_valuef(hdf, "css.%d=%s", i, (char *)css->data); } if ((size = mkd_document(doc, &p)) != EOF) { hdf_set_value(hdf, "document", p); } cs_init(&cs, hdf); cs_parse_string(cs, strdup(DEFAULT_TEMPLATE), strlen(DEFAULT_TEMPLATE)); cs_render(cs, r, cs_output); hdf_destroy(&hdf); cs_destroy(&cs); }
NEOERR * rcfs_meta_load (const char *path, HDF **meta) { NEOERR *err; char fpath[_POSIX_PATH_MAX]; HDF *m; snprintf (fpath, sizeof(fpath), "%s,log", path); err = hdf_init (&m); if (err) return nerr_pass (err); err = hdf_read_file (m, fpath); if (err) { hdf_destroy (&m); return nerr_pass (err); } *meta = m; return STATUS_OK; }
int main(int argc, char *argv[]) { HDF *hdf = NULL; int i, j; hdf_init(&hdf); ne_warn("creating 100000x10 nodes"); for (i = 0; i < 100000; i++) { char buffer[64]; for (j = 0; j < 10; j++) { snprintf(buffer, sizeof(buffer), "node.%d.test.%d", i, j); hdf_set_value(hdf, buffer, "test"); } } ne_warn("calling dealloc"); hdf_destroy(&hdf); // <-- this takes forever to return with a hugely return 0; }
int main() { HDF *hdf = NULL; char* stream = NULL; hdf_init(&hdf); cgi_init(&cgi, hdf); cgi_parse(cgi); sem_init(&cmd_sem, 0, 0); stream = hdf_get_value(cgi->hdf,"Query.stream",NULL); if(strstr(stream, "1")) { urlid = 0; } else if (strstr(stream, "2")) { urlid = 1; } else if (strstr(stream, "3")) { urlid = 2; } else if (strstr(stream, "4")) { urlid = 3; }/* else if (strstr(stream, "stream=5")) { urlid = 4; } else if (strstr(stream, "stream=6")) { urlid = 5; } else if (strstr(stream, "stream=7")) { urlid = 6; } else if (strstr(stream, "stream=8")) { urlid = 7; }*/ AmbaEncPage_get_params (); sem_wait(&cmd_sem); if(camtype == WARP_MODE) { cgi_display(cgi, "../html/dwp_enc.html"); } else { cgi_display(cgi, "../html/enc.html"); } return 0; }
int main(int argc, char *argv[]) { NEOERR *err; HDF *hdf; double tstart = 0; err = hdf_init(&hdf); if (err != STATUS_OK) { nerr_log_error(err); return -1; } tstart = ne_timef(); TestSort(hdf); ne_warn("sort took %5.5fs", ne_timef() - tstart); hdf_dump(hdf, NULL); hdf_destroy(&hdf); return 0; }
int main(int argc,char **agrs,char **env) { char *realcgi; char *realcgi_path; FILE *fp_read; char *buffer; char *page = NULL; char *action = NULL; CGI *cgi = NULL; HDF *hdf = NULL; hdf_init(&hdf); cgi_init(&cgi, hdf); page = hdf_get_value(cgi->hdf,"Query.page",NULL); action = hdf_get_value(cgi->hdf,"Query.action","query"); buffer = (char *)malloc(BUFFERLENTH); realcgi = get_realcgi(page,action,CONFPATH); if (realcgi == NULL){ printf("Content-type: text/html\n\n"); printf("CGI Not Found\n"); return 1; } realcgi_path = parse_absulute_path(realcgi); fp_read=popen(realcgi_path,"r"); while(fgets(buffer,BUFFERLENTH,fp_read)){ printf("%s",buffer); } free(buffer); pclose(fp_read); return 0; }
HdfRaw() : m_hdf(nullptr), m_count(1) { // ClearSilver is not thread-safe when calling hdf_init(), so guarding it. Lock lock(HdfMutex); Hdf::CheckNeoError(hdf_init(&m_hdf)); assert(m_hdf); }
NEOERR* session_init(CGI *cgi, HASH *dbh, session_t **ses) { session_t *lses; HDF *node, *onode; char tok[LEN_HDF_KEY], *s; NEOERR *err; /* * follow cgi_parse(), to process _type_object */ s = hdf_get_value(cgi->hdf, PRE_QUERY"._type_object", NULL); if (s) { ULIST *list; string_array_split(&list, s, ",", 50); ITERATE_MLIST(list) { snprintf(tok, sizeof(tok), "%s.%s", PRE_QUERY, neos_strip((char*)list->items[t_rsv_i])); onode = hdf_get_obj(cgi->hdf, tok); if (onode) { err = mjson_string_to_hdf(onode, NULL, MJSON_EXPORT_NONE); TRACE_NOK(err); } } uListDestroy(&list, ULIST_FREE); } *ses = NULL; lses = calloc(1, sizeof(session_t)); if (!lses) return nerr_raise(NERR_NOMEM, "calloc memory for session_t failure"); /* * mname */ HDF_FETCH_STR(cgi->hdf, PRE_COOKIE".mname", s); if (!s) HDF_FETCH_STR(cgi->hdf, PRE_COOKIE".username", s); if (s) lses->mname = strdup(s); /* * province */ HDF_FETCH_STR(cgi->hdf, PRE_COOKIE".province", s); hdf_init(&lses->province); if (s) { neos_unescape((UINT8*)s, strlen(s), '%'); hdf_set_value(lses->province, NULL, s); mjson_export_to_hdf(lses->province, NULL, MJSON_EXPORT_NONE, false); } /* * city */ HDF_FETCH_STR(cgi->hdf, PRE_COOKIE".city", s); hdf_init(&lses->city); if (s) { neos_unescape((UINT8*)s, strlen(s), '%'); hdf_set_value(lses->city, NULL, s); mjson_export_to_hdf(lses->city, NULL, MJSON_EXPORT_NONE, false); } /* * browser */ HDF_FETCH_STR(cgi->hdf, PRE_HTTP".UserAgent", s); if (s) { mstr_repchr(s, ' ', '\0'); for (int i = 0; i < m_browsers_size; i++) { if (!strncasecmp(s, m_browsers[i], strlen(m_browsers[i]))) { lses->browser = i; break; } } s = strchr(s, '/'); if (s) lses->bversion = strtof(s+1, NULL); } /* * reqtype */ lses->reqtype = CGI_REQ_HTML; char *uri = hdf_get_value(cgi->hdf, PRE_REQ_URI_RW, NULL); if (!uri) { uri = "terminal"; lses->reqtype = CGI_REQ_TERMINAL; } mstr_repchr(uri, '/', '_'); uri = mstr_strip(uri, '_'); if (!strncmp(uri, "json_", 5)) { uri = uri+5; lses->reqtype = CGI_REQ_AJAX; } else if (!strncmp(uri, "image_", 6)) { uri = uri+6; lses->reqtype = CGI_REQ_IMAGE; } /* * dataer, render */ switch (http_req_method(cgi)) { case CGI_REQ_POST: snprintf(tok, sizeof(tok), "%s_data_mod", uri); break; case CGI_REQ_PUT: snprintf(tok, sizeof(tok), "%s_data_add", uri); break; case CGI_REQ_DEL: snprintf(tok, sizeof(tok), "%s_data_del", uri); break; default: case CGI_REQ_GET: snprintf(tok, sizeof(tok), "%s_data_get", uri); break; } lses->dataer = strdup(tok); lses->render = strdup(uri); /* * tm_cache_browser */ node = hdf_get_obj(g_cfg, PRE_CFG_FILECACHE".0"); while (node != NULL) { if (reg_search(hdf_get_value(node, "uri", "NULL"), uri)) { lses->tm_cache_browser = hdf_get_int_value(node, "tm_cache", 0); break; } node = hdf_obj_next(node); } /* * DONE */ *ses = lses; return STATUS_OK; }