/* ** Use data accumulated in gg from a "blob" record to add a new file ** to the BLOB table. */ static void finish_blob(void){ Blob content; blob_init(&content, gg.aData, gg.nData); fast_insert_content(&content, gg.zMark, 0); blob_reset(&content); import_reset(0); }
static struct ubus_rawsocket_client *ubus_rawsocket_client_new(int fd){ struct ubus_rawsocket_client *self = calloc(1, sizeof(struct ubus_rawsocket_client)); INIT_LIST_HEAD(&self->tx_queue); self->fd = fd; self->recv_count = 0; blob_init(&self->data, 0, 0); return self; }
int orange_eq_recv(struct orange_eq *self, struct blob *out){ if(!self->buf || self->mq == -1) return -EINVAL; struct timespec ts; timespec_from_now_us(&ts, 5000000UL); int rsize = mq_timedreceive(self->mq, self->buf, self->attr.mq_msgsize, NULL, &ts); if(rsize <= 0) return -EAGAIN; blob_init(out, self->buf, rsize); return 1; }
/* Test that data values are written and read with proper alignment. */ static void test_alignment(void) { struct blob blob; struct blob_reader reader; uint8_t bytes[] = "ABCDEFGHIJKLMNOP"; size_t delta, last, num_bytes; blob_init(&blob); /* First, write an intptr value to the blob and capture that size. This is * the expected offset between any pair of intptr values (if written with * alignment). */ blob_write_intptr(&blob, (intptr_t) &blob); delta = blob.size; last = blob.size; /* Then loop doing the following: * * 1. Write an unaligned number of bytes * 2. Verify that write results in an unaligned size * 3. Write an intptr_t value * 2. Verify that that write results in an aligned size */ for (num_bytes = 1; num_bytes < sizeof(intptr_t); num_bytes++) { blob_write_bytes(&blob, bytes, num_bytes); expect_unequal(delta, blob.size - last, "unaligned write of bytes"); blob_write_intptr(&blob, (intptr_t) &blob); expect_equal(2 * delta, blob.size - last, "aligned write of intptr"); last = blob.size; } /* Finally, test that reading also does proper alignment. Since we know * that values were written with all the right alignment, all we have to do * here is verify that correct values are read. */ blob_reader_init(&reader, blob.data, blob.size); expect_equal((intptr_t) &blob, blob_read_intptr(&reader), "read of initial, aligned intptr_t"); for (num_bytes = 1; num_bytes < sizeof(intptr_t); num_bytes++) { expect_equal_bytes(bytes, blob_read_bytes(&reader, num_bytes), num_bytes, "unaligned read of bytes"); expect_equal((intptr_t) &blob, blob_read_intptr(&reader), "aligned read of intptr_t"); } blob_finish(&blob); }
void shader_cache_write_program_metadata(struct gl_context *ctx, struct gl_shader_program *prog) { struct disk_cache *cache = ctx->Cache; if (!cache) return; /* Exit early when we are dealing with a ff shader with no source file to * generate a source from. * * TODO: In future we should use another method to generate a key for ff * programs. */ static const char zero[sizeof(prog->data->sha1)] = {0}; if (memcmp(prog->data->sha1, zero, sizeof(prog->data->sha1)) == 0) return; struct blob metadata; blob_init(&metadata); serialize_glsl_program(&metadata, ctx, prog); struct cache_item_metadata cache_item_metadata; cache_item_metadata.type = CACHE_ITEM_TYPE_GLSL; cache_item_metadata.keys = (cache_key *) malloc(prog->NumShaders * sizeof(cache_key)); cache_item_metadata.num_keys = prog->NumShaders; if (!cache_item_metadata.keys) goto fail; char sha1_buf[41]; for (unsigned i = 0; i < prog->NumShaders; i++) { disk_cache_put_key(cache, prog->Shaders[i]->sha1); memcpy(cache_item_metadata.keys[i], prog->Shaders[i]->sha1, sizeof(cache_key)); if (ctx->_Shader->Flags & GLSL_CACHE_INFO) { _mesa_sha1_format(sha1_buf, prog->Shaders[i]->sha1); fprintf(stderr, "marking shader: %s\n", sha1_buf); } } disk_cache_put(cache, prog->data->sha1, metadata.data, metadata.size, &cache_item_metadata); if (ctx->_Shader->Flags & GLSL_CACHE_INFO) { _mesa_sha1_format(sha1_buf, prog->data->sha1); fprintf(stderr, "putting program metadata in cache: %s\n", sha1_buf); } fail: free(cache_item_metadata.keys); blob_finish(&metadata); }
/* ** SSH initialization of the transport layer */ int transport_ssh_open(UrlData *pUrlData){ /* For SSH we need to create and run SSH fossil http ** to talk to the remote machine. */ const char *zSsh; /* The base SSH command */ Blob zCmd; /* The SSH command */ char *zHost; /* The host name to contact */ int n; /* Size of prefix string */ socket_ssh_resolve_addr(pUrlData); zSsh = db_get("ssh-command", zDefaultSshCmd); blob_init(&zCmd, zSsh, -1); if( pUrlData->port!=pUrlData->dfltPort && pUrlData->port ){ #ifdef __MINGW32__ blob_appendf(&zCmd, " -P %d", pUrlData->port); #else blob_appendf(&zCmd, " -p %d", pUrlData->port); #endif } if( g.fSshTrace ){ fossil_force_newline(); fossil_print("%s", blob_str(&zCmd)); /* Show the base of the SSH command */ } if( pUrlData->user && pUrlData->user[0] ){ zHost = mprintf("%s@%s", pUrlData->user, pUrlData->name); }else{ zHost = mprintf("%s", pUrlData->name); } n = blob_size(&zCmd); blob_append(&zCmd, " ", 1); shell_escape(&zCmd, zHost); blob_append(&zCmd, " ", 1); shell_escape(&zCmd, mprintf("%s", pUrlData->fossil)); blob_append(&zCmd, " test-http", 10); if( pUrlData->path && pUrlData->path[0] ){ blob_append(&zCmd, " ", 1); shell_escape(&zCmd, mprintf("%s", pUrlData->path)); } if( g.fSshTrace ){ fossil_print("%s\n", blob_str(&zCmd)+n); /* Show tail of SSH command */ } free(zHost); popen2(blob_str(&zCmd), &sshIn, &sshOut, &sshPid); if( sshPid==0 ){ socket_set_errmsg("cannot start ssh tunnel using [%b]", &zCmd); } blob_reset(&zCmd); return sshPid==0; }
/* ** Make sure empty directories are created */ void ensure_empty_dirs_created(void){ /* Make empty directories? */ char *zEmptyDirs = db_get("empty-dirs", 0); if( zEmptyDirs!=0 ){ char *bc; Blob dirName; Blob dirsList; blob_zero(&dirsList); blob_init(&dirsList, zEmptyDirs, strlen(zEmptyDirs)); /* Replace commas by spaces */ bc = blob_str(&dirsList); while( (*bc)!='\0' ){ if( (*bc)==',' ) { *bc = ' '; } ++bc; } /* Make directories */ blob_zero(&dirName); while( blob_token(&dirsList, &dirName) ){ const char *zDir = blob_str(&dirName); /* Make full pathname of the directory */ Blob path; const char *zPath; blob_zero(&path); blob_appendf(&path, "%s/%s", g.zLocalRoot, zDir); zPath = blob_str(&path); /* Handle various cases of existence of the directory */ switch( file_wd_isdir(zPath) ){ case 0: { /* doesn't exist */ if( file_mkdir(zPath, 0)!=0 ) { fossil_warning("couldn't create directory %s as " "required by empty-dirs setting", zDir); } break; } case 1: { /* exists, and is a directory */ /* do nothing - required directory exists already */ break; } case 2: { /* exists, but isn't a directory */ fossil_warning("file %s found, but a directory is required " "by empty-dirs setting", zDir); } } blob_reset(&path); } } }
/* ** TH1 command: combobox NAME TEXT-LIST NUMLINES ** ** Generate an HTML combobox. NAME is both the name of the ** CGI parameter and the name of a variable that contains the ** currently selected value. TEXT-LIST is a list of possible ** values for the combobox. NUMLINES is 1 for a true combobox. ** If NUMLINES is greater than one then the display is a listbox ** with the number of lines given. */ static int comboboxCmd( Th_Interp *interp, void *p, int argc, const char **argv, int *argl ){ if( argc!=4 ){ return Th_WrongNumArgs(interp, "combobox NAME TEXT-LIST NUMLINES"); } if( enableOutput ){ int height; Blob name; int nValue; const char *zValue; char *z, *zH; int nElem; int *aszElem; char **azElem; int i; if( Th_ToInt(interp, argv[3], argl[3], &height) ) return TH_ERROR; Th_SplitList(interp, argv[2], argl[2], &azElem, &aszElem, &nElem); blob_init(&name, (char*)argv[1], argl[1]); zValue = Th_Fetch(blob_str(&name), &nValue); z = mprintf("<select name=\"%z\" size=\"%d\">", htmlize(blob_buffer(&name), blob_size(&name)), height); sendText(z, -1, 0); free(z); blob_reset(&name); for(i=0; i<nElem; i++){ zH = htmlize((char*)azElem[i], aszElem[i]); if( zValue && aszElem[i]==nValue && memcmp(zValue, azElem[i], nValue)==0 ){ z = mprintf("<option value=\"%s\" selected=\"selected\">%s</option>", zH, zH); }else{ z = mprintf("<option value=\"%s\">%s</option>", zH, zH); } free(zH); sendText(z, -1, 0); free(z); } sendText("</select>", -1, 0); Th_Free(interp, azElem); } return TH_OK; }
/* ** COMMAND: test-name-to-id ** ** Convert a name to a full artifact ID. */ void test_name_to_id(void){ int i; Blob name; db_must_be_within_tree(); for(i=2; i<g.argc; i++){ blob_init(&name, g.argv[i], -1); fossil_print("%s -> ", g.argv[i]); if( name_to_uuid(&name, 1, "*") ){ fossil_print("ERROR: %s\n", g.zErrMsg); fossil_error_reset(); }else{ fossil_print("%s\n", blob_buffer(&name)); } blob_reset(&name); } }
int cmd_dump(int argc, char **argv) { struct mapped_file map; struct blob *blob; argc = parse_options(dump_option_specs, argc, argv); die_if(argc == 0, "usage: arsc dump <resource-file-or-apk>"); map_file(argv[0], &map); blob_init(&blob, map.data, map.data_size); dump(blob); blob_destroy(blob); unmap_file(&map); return 0; }
/* ** TH command: wiki STRING ** ** Render the input string as wiki. */ static int wikiCmd( Th_Interp *interp, void *p, int argc, const char **argv, int *argl ){ if( argc!=2 ){ return Th_WrongNumArgs(interp, "wiki STRING"); } if( enableOutput ){ Blob src; blob_init(&src, (char*)argv[1], argl[1]); wiki_convert(&src, 0, WIKI_INLINE); blob_reset(&src); } return TH_OK; }
static int test_method(struct ubus_method *self, struct ubus_context *ctx, struct ubus_object *obj, struct ubus_request *req, struct blob_field *msg) { void *t; struct blob bb; blob_init(&bb, 0, 0); t = blob_open_table(&bb); blob_put_string(&bb, "foo"); blob_put_string(&bb, "bar"); blob_put_string(&bb, "bar"); blob_put_int(&bb, 11); blob_close_table(&bb, t); //ubus_send_reply(ctx, req, blob_head(&bb)); return 0; }
/* ** Recursively read all files from the directory zPath and install ** every file read as a new artifact in the repository. */ void recon_read_dir(char *zPath){ DIR *d; struct dirent *pEntry; Blob aContent; /* content of the just read artifact */ static int nFileRead = 0; void *zUnicodePath; char *zUtf8Name; zUnicodePath = fossil_utf8_to_filename(zPath); d = opendir(zUnicodePath); if( d ){ while( (pEntry=readdir(d))!=0 ){ Blob path; char *zSubpath; if( pEntry->d_name[0]=='.' ){ continue; } zUtf8Name = fossil_filename_to_utf8(pEntry->d_name); zSubpath = mprintf("%s/%s", zPath, zUtf8Name); fossil_filename_free(zUtf8Name); if( file_isdir(zSubpath)==1 ){ recon_read_dir(zSubpath); } blob_init(&path, 0, 0); blob_appendf(&path, "%s", zSubpath); if( blob_read_from_file(&aContent, blob_str(&path))==-1 ){ fossil_fatal("some unknown error occurred while reading \"%s\"", blob_str(&path)); } content_put(&aContent); blob_reset(&path); blob_reset(&aContent); free(zSubpath); fossil_print("\r%d", ++nFileRead); fflush(stdout); } closedir(d); }else { fossil_fatal("encountered error %d while trying to open \"%s\".", errno, g.argv[3]); } fossil_filename_free(zUnicodePath); }
static int l_json_parse(lua_State *L){ const char *str = lua_tostring(L, 1); struct blob tmp; blob_init(&tmp, 0, 0); if(!blob_put_json(&tmp, str)){ // put emtpy object if json was invalid! blob_offset_t b = blob_open_table(&tmp); blob_close_table(&tmp, b); } /* if(orange_debug_level >= JUCI_DBG_TRACE){ TRACE("lua blob: "); blob_dump_json(&tmp); } */ orange_lua_blob_to_table(L, blob_field_first_child(blob_head(&tmp)), true); blob_free(&tmp); return 1; }
/* Test that we detect overrun. */ static void test_overrun(void) { struct blob blob; struct blob_reader reader; uint32_t value = 0xdeadbeef; blob_init(&blob); blob_write_uint32(&blob, value); blob_reader_init(&reader, blob.data, blob.size); expect_equal(value, blob_read_uint32(&reader), "read before overrun"); expect_equal(false, reader.overrun, "overrun flag not set"); expect_equal(0, blob_read_uint32(&reader), "read at overrun"); expect_equal(true, reader.overrun, "overrun flag set"); blob_finish(&blob); }
int main(int argc, char **argv){ ubus_socket_t client1 = ubus_rawsocket_new(); ubus_socket_t client2 = ubus_rawsocket_new(); ubus_socket_t client3 = ubus_rawsocket_new(); ubus_socket_on_message(client1, &on_message1); ubus_socket_on_message(client2, &on_message2); ubus_socket_on_message(client3, &on_message3); if(ubus_socket_listen(client1, "client1.sock") < 0){ printf("client1: could not listen!\n"); } printf("trying to connect..\n"); if(ubus_socket_connect(client2, "client1.sock", NULL) < 0){ printf("client2: could not connect!\n"); } printf("client3 connecting..\n"); if(ubus_socket_connect(client3, "client1.sock", NULL) < 0){ printf("client3: could not connect!\n"); } printf("processing events..\n"); struct blob buf; blob_init(&buf, 0, 0); blob_put_int(&buf, 123); ubus_socket_send(client2, UBUS_PEER_BROADCAST, blob_head(&buf)); while(true){ //ubus_socket_send(client2, 0, 0, blob_head(&buf)); ubus_socket_handle_events(client1, 0); ubus_socket_handle_events(client2, 0); ubus_socket_handle_events(client3, 0); } return 0; }
/* Test that we can read and write some large objects, (exercising the code in * the blob_write functions to realloc blob->data. */ static void test_big_objects(void) { void *ctx = ralloc_context(NULL); struct blob blob; struct blob_reader reader; int size = 1000; int count = 1000; size_t i; char *buf; blob_init(&blob); /* Initialize our buffer. */ buf = ralloc_size(ctx, size); for (i = 0; i < size; i++) { buf[i] = i % 256; } /* Write it many times. */ for (i = 0; i < count; i++) { blob_write_bytes(&blob, buf, size); } blob_reader_init(&reader, blob.data, blob.size); /* Read and verify it many times. */ for (i = 0; i < count; i++) { expect_equal_bytes((uint8_t *) buf, blob_read_bytes(&reader, size), size, "read of large objects"); } expect_equal(reader.end - reader.data, reader.current - reader.data, "number of bytes read reading large objects"); expect_equal(false, reader.overrun, "overrun flag not set reading large objects"); blob_finish(&blob); ralloc_free(ctx); }
/* ** Create empty directories specified by the empty-dirs setting. */ void ensure_empty_dirs_created(void) { char *zEmptyDirs = db_get("empty-dirs", 0); if( zEmptyDirs!=0 ) { int i; Blob dirName; Blob dirsList; zEmptyDirs = fossil_strdup(zEmptyDirs); for(i=0; zEmptyDirs[i]; i++) { if( zEmptyDirs[i]==',' ) zEmptyDirs[i] = ' '; } blob_init(&dirsList, zEmptyDirs, -1); while( blob_token(&dirsList, &dirName) ) { char *zDir = blob_str(&dirName); char *zPath = mprintf("%s/%s", g.zLocalRoot, zDir); switch( file_wd_isdir(zPath) ) { case 0: { /* doesn't exist */ fossil_free(zPath); zPath = mprintf("%s/%s/x", g.zLocalRoot, zDir); if( file_mkfolder(zPath, 0, 1)!=0 ) { fossil_warning("couldn't create directory %s as " "required by empty-dirs setting", zDir); } break; } case 1: { /* exists, and is a directory */ /* do nothing - required directory exists already */ break; } case 2: { /* exists, but isn't a directory */ fossil_warning("file %s found, but a directory is required " "by empty-dirs setting", zDir); } } fossil_free(zPath); blob_reset(&dirName); } blob_reset(&dirsList); fossil_free(zEmptyDirs); } }
void ubus_method_init(struct ubus_method *self, const char *name, ubus_method_handler_t cb){ if(name) self->name = strdup(name); else self->name = 0; self->handler = cb; blob_init(&self->signature, 0, 0); }
/* ** Initialize a blob to a nul-terminated string obtained from fossil_malloc(). ** The blob will take responsibility for freeing the string. */ void blob_set_dynamic(Blob *pBlob, char *zStr){ blob_init(pBlob, zStr, -1); pBlob->xRealloc = blobReallocMalloc; }
/* ** Global initialization of the transport layer */ void transport_global_startup(void){ if( g.urlIsSsh ){ /* Only SSH requires a global initialization. For SSH we need to create ** and run an SSH command to talk to the remote machine. */ const char *zSsh; /* The base SSH command */ Blob zCmd; /* The SSH command */ char *zHost; /* The host name to contact */ char zIn[200]; /* An input line received back from remote */ zSsh = db_get("ssh-command", zDefaultSshCmd); blob_init(&zCmd, zSsh, -1); if( g.urlPort!=g.urlDfltPort ){ #ifdef __MINGW32__ blob_appendf(&zCmd, " -P %d", g.urlPort); #else blob_appendf(&zCmd, " -p %d", g.urlPort); #endif } if( g.urlUser && g.urlUser[0] ){ zHost = mprintf("%s@%s", g.urlUser, g.urlName); #ifdef __MINGW32__ /* Only win32 (and specifically PLINK.EXE support the -pw option */ if( g.urlPasswd && g.urlPasswd[0] ){ Blob pw; blob_zero(&pw); if( g.urlPasswd[0]=='*' ){ char *zPrompt; zPrompt = mprintf("Password for [%s]: ", zHost); prompt_for_password(zPrompt, &pw, 0); free(zPrompt); }else{ blob_init(&pw, g.urlPasswd, -1); } blob_append(&zCmd, " -pw ", -1); shell_escape(&zCmd, blob_str(&pw)); blob_reset(&pw); } #endif }else{ zHost = mprintf("%s", g.urlName); } blob_append(&zCmd, " ", 1); shell_escape(&zCmd, zHost); free(zHost); /* printf("%s\n", blob_str(&zCmd)); */ popen2(blob_str(&zCmd), &sshIn, &sshOut, &sshPid); if( sshPid==0 ){ fossil_fatal("cannot start ssh tunnel using [%b]", &zCmd); } blob_reset(&zCmd); /* Send an "echo" command to the other side to make sure that the ** connection is up and working. */ fprintf(sshOut, "echo test\n"); fflush(sshOut); sshin_read(zIn, sizeof(zIn)); if( memcmp(zIn, "test", 4)!=0 ){ pclose2(sshIn, sshOut, sshPid); fossil_fatal("ssh connection failed: [%s]", zIn); } } }
static void test_blob_new(void) { int rc; char a[BUF_SIZE]; blob_t *bp; uint32_t ms; blob_t *dupbp; int32_t c; memset(a, 'a', sizeof(a)); bp = NULL; rc = blob_new(a, sizeof(a)-1, &bp); assert(rc == 0 && bp != NULL); assert(blob_size(bp) == sizeof(a) - 1); ms = blob_memory_size(bp); assert(blob_size(bp) == ms); rc = blob_init(bp, a, sizeof(a)); assert(rc != 0); assert(blob_size(bp) == sizeof(a) - 1); assert(blob_size(bp) == ms); assert(ms == blob_memory_size(bp)); rc = blob_init(bp, a, 1); assert(rc == 0); assert(blob_size(bp) == 1); assert(ms == blob_memory_size(bp)); dupbp = NULL; rc = blob_dup(bp, &dupbp); assert(rc == 0 && dupbp != NULL); assert(blob_size(dupbp) == 1); assert(blob_memory_size(dupbp) == 1); c = blob_compare(bp, dupbp); assert(c == 0); c = blob_compare(dupbp, bp); assert(c == 0); rc = blob_init(bp, a, sizeof(a)); assert(rc != 0); assert(blob_size(bp) == 1); assert(ms == blob_memory_size(bp)); rc = blob_init(bp, a, sizeof(a)-1); assert(rc == 0); assert(blob_size(bp) == sizeof(a) - 1); assert(blob_size(bp) == ms); assert(ms == blob_memory_size(bp)); c = blob_compare(bp, dupbp); assert(c > 0); assert(c == blob_size(bp) - blob_size(dupbp)); c = blob_compare(dupbp, bp); assert(c < 0); assert(c == blob_size(dupbp) - blob_size(bp)); blob_free(bp); bp = NULL; blob_free(dupbp); dupbp = NULL; }
/* ** COMMAND: user ** ** Usage: %fossil user SUBCOMMAND ... ?-R|--repository FILE? ** ** Run various subcommands on users of the open repository or of ** the repository identified by the -R or --repository option. ** ** %fossil user capabilities USERNAME ?STRING? ** ** Query or set the capabilities for user USERNAME ** ** %fossil user default ?USERNAME? ** ** Query or set the default user. The default user is the ** user for command-line interaction. ** ** %fossil user list ** ** List all users known to the repository ** ** %fossil user new ?USERNAME? ?CONTACT-INFO? ?PASSWORD? ** ** Create a new user in the repository. Users can never be ** deleted. They can be denied all access but they must continue ** to exist in the database. ** ** %fossil user password USERNAME ?PASSWORD? ** ** Change the web access password for a user. */ void user_cmd(void){ int n; db_find_and_open_repository(1); if( g.argc<3 ){ usage("capabilities|default|list|new|password ..."); } n = strlen(g.argv[2]); if( n>=2 && strncmp(g.argv[2],"new",n)==0 ){ Blob passwd, login, contact; char *zPw; if( g.argc>=4 ){ blob_init(&login, g.argv[3], -1); }else{ prompt_user("login: "******"SELECT 1 FROM user WHERE login=%B", &login) ){ fossil_fatal("user %b already exists", &login); } if( g.argc>=5 ){ blob_init(&contact, g.argv[4], -1); }else{ prompt_user("contact-info: ", &contact); } if( g.argc>=6 ){ blob_init(&passwd, g.argv[5], -1); }else{ prompt_for_password("password: "******"INSERT INTO user(login,pw,cap,info)" "VALUES(%B,%Q,'v',%B)", &login, zPw, &contact ); free(zPw); }else if( n>=2 && strncmp(g.argv[2],"default",n)==0 ){ user_select(); if( g.argc==3 ){ printf("%s\n", g.zLogin); }else{ if( !db_exists("SELECT 1 FROM user WHERE login=%Q", g.argv[3]) ){ fossil_fatal("no such user: %s", g.argv[3]); } if( g.localOpen ){ db_lset("default-user", g.argv[3]); }else{ db_set("default-user", g.argv[3], 0); } } }else if( n>=2 && strncmp(g.argv[2],"list",n)==0 ){ Stmt q; db_prepare(&q, "SELECT login, info FROM user ORDER BY login"); while( db_step(&q)==SQLITE_ROW ){ printf("%-12s %s\n", db_column_text(&q, 0), db_column_text(&q, 1)); } db_finalize(&q); }else if( n>=2 && strncmp(g.argv[2],"password",2)==0 ){ char *zPrompt; int uid; Blob pw; if( g.argc!=4 && g.argc!=5 ) usage("password USERNAME ?NEW-PASSWORD?"); uid = db_int(0, "SELECT uid FROM user WHERE login=%Q", g.argv[3]); if( uid==0 ){ fossil_fatal("no such user: %s", g.argv[3]); } if( g.argc==5 ){ blob_init(&pw, g.argv[4], -1); }else{ zPrompt = mprintf("new passwd for %s: ", g.argv[3]); prompt_for_password(zPrompt, &pw, 1); } if( blob_size(&pw)==0 ){ printf("password unchanged\n"); }else{ char *zSecret = sha1_shared_secret(blob_str(&pw), g.argv[3]); db_multi_exec("UPDATE user SET pw=%Q WHERE uid=%d", zSecret, uid); free(zSecret); } }else if( n>=2 && strncmp(g.argv[2],"capabilities",2)==0 ){ int uid; if( g.argc!=4 && g.argc!=5 ){ usage("user capabilities USERNAME ?PERMISSIONS?"); } uid = db_int(0, "SELECT uid FROM user WHERE login=%Q", g.argv[3]); if( uid==0 ){ fossil_fatal("no such user: %s", g.argv[3]); } if( g.argc==5 ){ db_multi_exec( "UPDATE user SET cap=%Q WHERE uid=%d", g.argv[4], uid ); } printf("%s\n", db_text(0, "SELECT cap FROM user WHERE uid=%d", uid)); }else{ fossil_panic("user subcommand should be one of: " "capabilities default list new password"); } }
int main(void){ orange_debug_level+=4; struct orange *app = orange_new("test-plugins", "test-pwfile", "test-acls"); struct blob out; struct blob args; blob_init(&out, 0, 0); blob_init(&args, 0, 0); blob_offset_t o = blob_open_table(&args); blob_put_string(&args, "cmd"); blob_put_string(&args, "echo 'Hello From Defferred!'"); blob_put_string(&args, "msg"); blob_put_string(&args, "Hello You"); blob_put_string(&args, "arr"); blob_offset_t a = blob_open_array(&args); blob_put_int(&args, 1); blob_offset_t t = blob_open_table(&args); blob_put_string(&args, "a"); blob_put_string(&args, "b"); blob_close_table(&args, t); blob_close_array(&args, a); blob_close_table(&args, o); struct orange_user *admin = orange_user_new("admin"); orange_user_add_acl(admin, "test-acl"); orange_add_user(app, &admin); struct orange_sid sid; TEST(orange_login_plaintext(app, "admin", "admin", &sid) == 0); TEST(orange_call(app, sid.hash, "/test", "echo", blob_field_first_child(blob_head(&args)), &out) == 0); TEST(orange_call(app, sid.hash, "/test", "noexist", blob_field_first_child(blob_head(&args)), &out) < 0); TEST(orange_call(app, sid.hash, "/test", "test_c_calls", blob_field_first_child(blob_head(&args)), &out) == 0); // test deferred struct blob def_args; blob_init(&def_args, 0, 0); o = blob_open_table(&def_args); blob_put_string(&def_args, "cmd"); char cmd[64]; char cookie[16]; snprintf(cookie, sizeof(cookie), "%lu", time(NULL)); snprintf(cmd, sizeof(cmd), "printf %s > test-deferred.out", cookie); blob_put_string(&def_args, cmd); blob_close_table(&def_args, o); int r = system("rm test-deferred.out"); TEST(orange_call(app, sid.hash, "/test", "deferred_shell", blob_field_first_child(blob_head(&def_args)), &out) == 0); FILE *f; sleep(2); TEST((f = fopen("test-deferred.out", "r")) == NULL); sleep(3); TEST(f = fopen("test-deferred.out", "r")); // try to read the file char cmdr[32] = {0}; TEST(fread(cmdr, 1, 32, f) > 0); TEST(strcmp(cookie, cmdr) == 0); blob_free(&def_args); TEST(orange_logout(app, sid.hash) == 0); blob_free(&out); blob_free(&args); orange_delete(&app); return 0; }
/* Test at least one call of each blob_write_foo and blob_read_foo function, * verifying that we read out everything we wrote, that every bytes is * consumed, and that the overrun bit is not set. */ static void test_write_and_read_functions (void) { struct blob blob; struct blob_reader reader; ssize_t reserved; size_t str_offset, uint_offset; uint8_t reserve_buf[sizeof(reserve_test_str)]; blob_init(&blob); /*** Test blob by writing one of every possible kind of value. */ blob_write_bytes(&blob, bytes_test_str, sizeof(bytes_test_str)); reserved = blob_reserve_bytes(&blob, sizeof(reserve_test_str)); blob_overwrite_bytes(&blob, reserved, reserve_test_str, sizeof(reserve_test_str)); /* Write a placeholder, (to be replaced later via overwrite_bytes) */ str_offset = blob.size; blob_write_bytes(&blob, placeholder_str, sizeof(placeholder_str)); blob_write_uint32(&blob, uint32_test); /* Write a placeholder, (to be replaced later via overwrite_uint32) */ uint_offset = blob.size; blob_write_uint32(&blob, uint32_placeholder); blob_write_uint64(&blob, uint64_test); blob_write_intptr(&blob, (intptr_t) &blob); blob_write_string(&blob, string_test_str); /* Finally, overwrite our placeholders. */ blob_overwrite_bytes(&blob, str_offset, overwrite_test_str, sizeof(overwrite_test_str)); blob_overwrite_uint32(&blob, uint_offset, uint32_overwrite); /*** Now read each value and verify. */ blob_reader_init(&reader, blob.data, blob.size); expect_equal_str(bytes_test_str, blob_read_bytes(&reader, sizeof(bytes_test_str)), "blob_write/read_bytes"); blob_copy_bytes(&reader, reserve_buf, sizeof(reserve_buf)); expect_equal_str(reserve_test_str, (char *) reserve_buf, "blob_reserve_bytes/blob_copy_bytes"); expect_equal_str(overwrite_test_str, blob_read_bytes(&reader, sizeof(overwrite_test_str)), "blob_overwrite_bytes"); expect_equal(uint32_test, blob_read_uint32(&reader), "blob_write/read_uint32"); expect_equal(uint32_overwrite, blob_read_uint32(&reader), "blob_overwrite_uint32"); expect_equal(uint64_test, blob_read_uint64(&reader), "blob_write/read_uint64"); expect_equal((intptr_t) &blob, blob_read_intptr(&reader), "blob_write/read_intptr"); expect_equal_str(string_test_str, blob_read_string(&reader), "blob_write/read_string"); expect_equal(reader.end - reader.data, reader.current - reader.data, "read_consumes_all_bytes"); expect_equal(false, reader.overrun, "read_does_not_overrun"); blob_finish(&blob); }
/* ** Initialize a blob to a nul-terminated string. ** Any prior data in the blob is discarded. */ void blob_set(Blob *pBlob, const char *zStr){ blob_init(pBlob, zStr, -1); }
/* ** COMMAND: addremove ** ** Usage: %fossil addremove ?OPTIONS? ** ** Do all necessary "add" and "rm" commands to synchronize the repository ** with the content of the working checkout: ** ** * All files in the checkout but not in the repository (that is, ** all files displayed using the "extra" command) are added as ** if by the "add" command. ** ** * All files in the repository but missing from the checkout (that is, ** all files that show as MISSING with the "status" command) are ** removed as if by the "rm" command. ** ** The command does not "commit". You must run the "commit" separately ** as a separate step. ** ** Files and directories whose names begin with "." are ignored unless ** the --dotfiles option is used. ** ** The --ignore option overrides the "ignore-glob" setting, as does the ** --case-sensitive option with the "case-sensitive" setting. See the ** documentation on the "settings" command for further information. ** ** The --test option shows what would happen without actually doing anything. ** ** This command can be used to track third party software. ** ** Options: ** --case-sensitive <BOOL> override case-sensitive setting ** --dotfiles include files beginning with a dot (".") ** --ignore <CSG> ignore files matching patterns from the ** comma separated list of glob patterns. ** --test If given, display instead of run actions ** ** See also: add, rm */ void addremove_cmd(void){ Blob path; const char *zIgnoreFlag = find_option("ignore",0,1); unsigned scanFlags = find_option("dotfiles",0,0)!=0 ? SCAN_ALL : 0; int isTest = find_option("test",0,0)!=0; int caseSensitive; int n; Stmt q; int vid; int nAdd = 0; int nDelete = 0; Glob *pIgnore; capture_case_sensitive_option(); db_must_be_within_tree(); caseSensitive = filenames_are_case_sensitive(); if( zIgnoreFlag==0 ){ zIgnoreFlag = db_get("ignore-glob", 0); } vid = db_lget_int("checkout",0); if( vid==0 ){ fossil_panic("no checkout to add to"); } db_begin_transaction(); /* step 1: ** Populate the temp table "sfile" with the names of all unmanged ** files currently in the check-out, except for files that match the ** --ignore or ignore-glob patterns and dot-files. Then add all of ** the files in the sfile temp table to the set of managed files. */ db_multi_exec("CREATE TEMP TABLE sfile(x TEXT PRIMARY KEY)"); n = strlen(g.zLocalRoot); blob_init(&path, g.zLocalRoot, n-1); /* now we read the complete file structure into a temp table */ pIgnore = glob_create(zIgnoreFlag); vfile_scan(&path, blob_size(&path), scanFlags, pIgnore); glob_free(pIgnore); nAdd = add_files_in_sfile(vid, caseSensitive); /* step 2: search for missing files */ db_prepare(&q, "SELECT pathname, %Q || pathname, deleted FROM vfile" " WHERE NOT deleted" " ORDER BY 1", g.zLocalRoot ); while( db_step(&q)==SQLITE_ROW ){ const char * zFile; const char * zPath; zFile = db_column_text(&q, 0); zPath = db_column_text(&q, 1); if( !file_wd_isfile_or_link(zPath) ){ if( !isTest ){ db_multi_exec("UPDATE vfile SET deleted=1 WHERE pathname=%Q", zFile); } fossil_print("DELETED %s\n", zFile); nDelete++; } } db_finalize(&q); /* show cmmand summary */ fossil_print("added %d files, deleted %d files\n", nAdd, nDelete); db_end_transaction(isTest); }
/* ** Write content into the database. Return the record ID. If the ** content is already in the database, just return the record ID. ** ** If srcId is specified, then pBlob is delta content from ** the srcId record. srcId might be a phantom. ** ** pBlob is normally uncompressed text. But if nBlob>0 then the ** pBlob value has already been compressed and nBlob is its uncompressed ** size. If nBlob>0 then zUuid must be valid. ** ** zUuid is the UUID of the artifact, if it is specified. When srcId is ** specified then zUuid must always be specified. If srcId is zero, ** and zUuid is zero then the correct zUuid is computed from pBlob. ** ** If the record already exists but is a phantom, the pBlob content ** is inserted and the phatom becomes a real record. ** ** The original content of pBlob is not disturbed. The caller continues ** to be responsible for pBlob. This routine does *not* take over ** responsibility for freeing pBlob. */ int content_put_ex( Blob *pBlob, /* Content to add to the repository */ const char *zUuid, /* SHA1 hash of reconstructed pBlob */ int srcId, /* pBlob is a delta from this entry */ int nBlob, /* pBlob is compressed. Original size is this */ int isPrivate /* The content should be marked private */ ){ int size; int rid; Stmt s1; Blob cmpr; Blob hash; int markAsUnclustered = 0; int isDephantomize = 0; assert( g.repositoryOpen ); assert( pBlob!=0 ); assert( srcId==0 || zUuid!=0 ); if( zUuid==0 ){ assert( nBlob==0 ); sha1sum_blob(pBlob, &hash); }else{ blob_init(&hash, zUuid, -1); } if( nBlob ){ size = nBlob; }else{ size = blob_size(pBlob); if( srcId ){ size = delta_output_size(blob_buffer(pBlob), size); } } db_begin_transaction(); /* Check to see if the entry already exists and if it does whether ** or not the entry is a phantom */ db_prepare(&s1, "SELECT rid, size FROM blob WHERE uuid=%B", &hash); if( db_step(&s1)==SQLITE_ROW ){ rid = db_column_int(&s1, 0); if( db_column_int(&s1, 1)>=0 || pBlob==0 ){ /* Either the entry is not a phantom or it is a phantom but we ** have no data with which to dephantomize it. In either case, ** there is nothing for us to do other than return the RID. */ db_finalize(&s1); db_end_transaction(0); return rid; } }else{ rid = 0; /* No entry with the same UUID currently exists */ markAsUnclustered = 1; } db_finalize(&s1); /* Construct a received-from ID if we do not already have one */ if( g.rcvid==0 ){ db_multi_exec( "INSERT INTO rcvfrom(uid, mtime, nonce, ipaddr)" "VALUES(%d, julianday('now'), %Q, %Q)", g.userUid, g.zNonce, g.zIpAddr ); g.rcvid = db_last_insert_rowid(); } if( nBlob ){ cmpr = pBlob[0]; }else{ blob_compress(pBlob, &cmpr); } if( rid>0 ){ /* We are just adding data to a phantom */ db_prepare(&s1, "UPDATE blob SET rcvid=%d, size=%d, content=:data WHERE rid=%d", g.rcvid, size, rid ); db_bind_blob(&s1, ":data", &cmpr); db_exec(&s1); db_multi_exec("DELETE FROM phantom WHERE rid=%d", rid); if( srcId==0 || content_is_available(srcId) ){ isDephantomize = 1; content_mark_available(rid); } }else{ /* We are creating a new entry */ db_prepare(&s1, "INSERT INTO blob(rcvid,size,uuid,content)" "VALUES(%d,%d,'%b',:data)", g.rcvid, size, &hash ); db_bind_blob(&s1, ":data", &cmpr); db_exec(&s1); rid = db_last_insert_rowid(); if( !pBlob ){ db_multi_exec("INSERT OR IGNORE INTO phantom VALUES(%d)", rid); } if( g.markPrivate || isPrivate ){ db_multi_exec("INSERT INTO private VALUES(%d)", rid); markAsUnclustered = 0; } } if( nBlob==0 ) blob_reset(&cmpr); /* If the srcId is specified, then the data we just added is ** really a delta. Record this fact in the delta table. */ if( srcId ){ db_multi_exec("REPLACE INTO delta(rid,srcid) VALUES(%d,%d)", rid, srcId); } if( !isDephantomize && bag_find(&contentCache.missing, rid) && (srcId==0 || content_is_available(srcId)) ){ content_mark_available(rid); } if( isDephantomize ){ after_dephantomize(rid, 0); } /* Add the element to the unclustered table if has never been ** previously seen. */ if( markAsUnclustered ){ db_multi_exec("INSERT OR IGNORE INTO unclustered VALUES(%d)", rid); } /* Finish the transaction and cleanup */ db_finalize(&s1); db_end_transaction(0); blob_reset(&hash); /* Make arrangements to verify that the data can be recovered ** before we commit */ verify_before_commit(rid); return rid; }
static void st_serialise_ir_program(struct gl_context *ctx, struct gl_program *prog, bool nir) { if (prog->driver_cache_blob) return; struct blob blob; blob_init(&blob); switch (prog->info.stage) { case MESA_SHADER_VERTEX: { struct st_vertex_program *stvp = (struct st_vertex_program *) prog; blob_write_uint32(&blob, stvp->num_inputs); blob_write_bytes(&blob, stvp->index_to_input, sizeof(stvp->index_to_input)); blob_write_bytes(&blob, stvp->input_to_index, sizeof(stvp->input_to_index)); blob_write_bytes(&blob, stvp->result_to_output, sizeof(stvp->result_to_output)); write_stream_out_to_cache(&blob, &stvp->tgsi); if (nir) write_nir_to_cache(&blob, prog); else write_tgsi_to_cache(&blob, stvp->tgsi.tokens, prog, stvp->num_tgsi_tokens); break; } case MESA_SHADER_TESS_CTRL: case MESA_SHADER_TESS_EVAL: case MESA_SHADER_GEOMETRY: { struct st_common_program *stcp = (struct st_common_program *) prog; write_stream_out_to_cache(&blob, &stcp->tgsi); if (nir) write_nir_to_cache(&blob, prog); else write_tgsi_to_cache(&blob, stcp->tgsi.tokens, prog, stcp->num_tgsi_tokens); break; } case MESA_SHADER_FRAGMENT: { struct st_fragment_program *stfp = (struct st_fragment_program *) prog; if (nir) write_nir_to_cache(&blob, prog); else write_tgsi_to_cache(&blob, stfp->tgsi.tokens, prog, stfp->num_tgsi_tokens); break; } case MESA_SHADER_COMPUTE: { struct st_compute_program *stcp = (struct st_compute_program *) prog; if (nir) write_nir_to_cache(&blob, prog); else write_tgsi_to_cache(&blob, stcp->tgsi.prog, prog, stcp->num_tgsi_tokens); break; } default: unreachable("Unsupported stage"); } blob_finish(&blob); }