int queue_envelope_walk(struct envelope *ep) { const char *e; uint64_t evpid; char evpbuf[sizeof(struct envelope)]; int r; profile_enter("queue_envelope_walk"); r = handler_envelope_walk(&evpid, evpbuf, sizeof evpbuf); profile_leave(); log_trace(TRACE_QUEUE, "queue-backend: queue_envelope_walk() -> %d (%016"PRIx64")", r, evpid); if (r == -1) return (r); if (r && queue_envelope_load_buffer(ep, evpbuf, (size_t)r)) { if ((e = envelope_validate(ep)) == NULL) { ep->id = evpid; if (env->sc_queue_flags & QUEUE_EVPCACHE) queue_envelope_cache_add(ep); return (1); } log_debug("debug: invalid envelope %016" PRIx64 ": %s", ep->id, e); (void)queue_message_corrupt(evpid_to_msgid(evpid)); } return (0); }
HASHDATA * hash_find( struct hash * hp, OBJECT * key ) { ITEM * i; unsigned int keyval = hash_keyval( key ); #ifdef HASH_DEBUG_PROFILE profile_frame prof[ 1 ]; if ( DEBUG_PROFILE ) profile_enter( 0, prof ); #endif if ( !hp->items.nel ) { #ifdef HASH_DEBUG_PROFILE if ( DEBUG_PROFILE ) profile_exit( prof ); #endif return 0; } i = hash_search( hp, keyval, key, 0 ); #ifdef HASH_DEBUG_PROFILE if ( DEBUG_PROFILE ) profile_exit( prof ); #endif return i ? hash_item_data( i ) : 0; }
int queue_envelope_create(struct envelope *ep) { int r; char evpbuf[sizeof(struct envelope)]; size_t evplen; uint64_t evpid; uint32_t msgid; ep->creation = time(NULL); evplen = queue_envelope_dump_buffer(ep, evpbuf, sizeof evpbuf); if (evplen == 0) return (0); evpid = ep->id; msgid = evpid_to_msgid(evpid); profile_enter("queue_envelope_create"); r = handler_envelope_create(msgid, evpbuf, evplen, &ep->id); profile_leave(); log_trace(TRACE_QUEUE, "queue-backend: queue_envelope_create(%016"PRIx64", %zu) -> %d (%016"PRIx64")", evpid, evplen, r, ep->id); if (!r) { ep->creation = 0; ep->id = 0; } if (r && env->sc_queue_flags & QUEUE_EVPCACHE) queue_envelope_cache_add(ep); return (r); }
int queue_message_delete(uint32_t msgid) { char msgpath[PATH_MAX]; uint64_t evpid; void *iter; int r; profile_enter("queue_message_delete"); r = handler_message_delete(msgid); profile_leave(); /* in case the message is incoming */ queue_message_path(msgid, msgpath, sizeof(msgpath)); unlink(msgpath); /* remove remaining envelopes from the cache if any (on rollback) */ evpid = msgid_to_evpid(msgid); for (;;) { iter = NULL; if (!tree_iterfrom(&evpcache_tree, &iter, evpid, &evpid, NULL)) break; if (evpid_to_msgid(evpid) != msgid) break; queue_envelope_cache_del(evpid); } log_trace(TRACE_QUEUE, "queue-backend: queue_message_delete(%08"PRIx32") -> %d", msgid, r); return (r); }
int queue_message_corrupt(uint32_t msgid) { int r; profile_enter("queue_message_corrupt"); r = handler_message_corrupt(msgid); profile_leave(); log_trace(TRACE_QUEUE, "queue-backend: queue_message_corrupt(%08"PRIx32") -> %d", msgid, r); return (r); }
int queue_message_create(uint32_t *msgid) { int r; profile_enter("queue_message_create"); r = handler_message_create(msgid); profile_leave(); log_trace(TRACE_QUEUE, "queue-backend: queue_message_create() -> %d (%08"PRIx32")", r, *msgid); return (r); }
HASHDATA * hash_insert( struct hash * hp, OBJECT * key, int * found ) { ITEM * i; unsigned int keyval = hash_keyval( key ); #ifdef HASH_DEBUG_PROFILE profile_frame prof[1]; if ( DEBUG_PROFILE ) profile_enter( 0, prof ); #endif if ( !hp->items.more ) hashrehash( hp ); i = hash_search( hp, keyval, key, 0 ); if ( i ) { *found = 1; } else { ITEM * * base = hash_bucket( hp, keyval ); /* try to grab one from the free list */ if ( hp->items.free ) { i = hp->items.free; hp->items.free = i->hdr.next; assert( hash_item_key( i ) == 0 ); } else { i = (ITEM *)hp->items.next; hp->items.next += hp->items.size; } hp->items.more--; i->hdr.next = *base; *base = i; *found = 0; } #ifdef HASH_DEBUG_PROFILE if ( DEBUG_PROFILE ) profile_exit( prof ); #endif return hash_item_data( i ); }
int queue_envelope_delete(uint64_t evpid) { int r; if (env->sc_queue_flags & QUEUE_EVPCACHE) queue_envelope_cache_del(evpid); profile_enter("queue_envelope_delete"); r = handler_envelope_delete(evpid); profile_leave(); log_trace(TRACE_QUEUE, "queue-backend: queue_envelope_delete(%016"PRIx64") -> %d", evpid, r); return (r); }
int queue_envelope_load(uint64_t evpid, struct envelope *ep) { const char *e; char evpbuf[sizeof(struct envelope)]; size_t evplen; struct envelope *cached; if ((env->sc_queue_flags & QUEUE_EVPCACHE) && (cached = tree_get(&evpcache_tree, evpid))) { *ep = *cached; stat_increment("queue.evpcache.load.hit", 1); return (1); } ep->id = evpid; profile_enter("queue_envelope_load"); evplen = handler_envelope_load(ep->id, evpbuf, sizeof evpbuf); profile_leave(); log_trace(TRACE_QUEUE, "queue-backend: queue_envelope_load(%016"PRIx64") -> %zu", evpid, evplen); if (evplen == 0) return (0); if (queue_envelope_load_buffer(ep, evpbuf, evplen)) { if ((e = envelope_validate(ep)) == NULL) { ep->id = evpid; if (env->sc_queue_flags & QUEUE_EVPCACHE) { queue_envelope_cache_add(ep); stat_increment("queue.evpcache.load.missed", 1); } return (1); } log_debug("debug: invalid envelope %016" PRIx64 ": %s", ep->id, e); } (void)queue_message_corrupt(evpid_to_msgid(evpid)); return (0); }
int queue_message_delete(uint32_t msgid) { char msgpath[PATH_MAX]; int r; profile_enter("queue_message_delete"); r = handler_message_delete(msgid); profile_leave(); /* in case the message is incoming */ queue_message_path(msgid, msgpath, sizeof(msgpath)); unlink(msgpath); log_trace(TRACE_QUEUE, "queue-backend: queue_message_delete(%08"PRIx32") -> %d", msgid, r); return (r); }
int queue_message_walk(struct envelope *ep, uint32_t msgid, int *done, void **data) { char evpbuf[sizeof(struct envelope)]; uint64_t evpid; int r; const char *e; profile_enter("queue_message_walk"); r = handler_message_walk(&evpid, evpbuf, sizeof evpbuf, msgid, done, data); profile_leave(); log_trace(TRACE_QUEUE, "queue-backend: queue_message_walk() -> %d (%016"PRIx64")", r, evpid); if (r == -1) return (r); if (r && queue_envelope_load_buffer(ep, evpbuf, (size_t)r)) { if ((e = envelope_validate(ep)) == NULL) { ep->id = evpid; /* * do not cache the envelope here, while discovering * envelopes one could re-run discover on already * scheduled envelopes which leads to triggering of * strict checks in caching. Envelopes could anyway * be loaded from backend if it isn't cached. */ return (1); } log_debug("debug: invalid envelope %016" PRIx64 ": %s", ep->id, e); (void)queue_message_corrupt(evpid_to_msgid(evpid)); } return (0); }
int queue_envelope_update(struct envelope *ep) { char evpbuf[sizeof(struct envelope)]; size_t evplen; int r; evplen = queue_envelope_dump_buffer(ep, evpbuf, sizeof evpbuf); if (evplen == 0) return (0); profile_enter("queue_envelope_update"); r = handler_envelope_update(ep->id, evpbuf, evplen); profile_leave(); if (r && env->sc_queue_flags & QUEUE_EVPCACHE) queue_envelope_cache_update(ep); log_trace(TRACE_QUEUE, "queue-backend: queue_envelope_update(%016"PRIx64") -> %d", ep->id, r); return (r); }
LIST * evaluate_rule( char * rulename, FRAME * frame ) { LIST * result = L0; RULE * rule; profile_frame prof[1]; module_t * prev_module = frame->module; LIST * l; { LOL arg_context_, * arg_context = &arg_context_; if ( !frame->prev ) lol_init(arg_context); else arg_context = frame->prev->args; l = var_expand( L0, rulename, rulename+strlen(rulename), arg_context, 0 ); } if ( !l ) { backtrace_line( frame->prev ); printf( "warning: rulename %s expands to empty string\n", rulename ); backtrace( frame->prev ); return result; } rulename = l->string; rule = bindrule( l->string, frame->module ); #ifdef HAVE_PYTHON if ( rule->python_function ) { /* The below messing with modules is due to the way modules are * implemented in Jam. Suppose we are in module M1 now. The global * variable map actually holds 'M1' variables, and M1->variables hold * global variables. * * If we call Python right away, Python calls back Jam and then Jam * does 'module M1 { }' then Jam will try to swap the current global * variables with M1->variables. The result will be that global * variables map will hold global variables, and any variable settings * we do will go to the global module, not M1. * * By restoring basic state, where the global variable map holds global * variable, we make sure any future 'module M1' entry will work OK. */ LIST * result; module_t * m = python_module(); frame->module = m; exit_module( prev_module ); enter_module( m ); result = call_python_function( rule, frame ); exit_module( m ); enter_module ( prev_module ); return result; } #endif /* Drop the rule name. */ l = list_pop_front( l ); /* Tack the rest of the expansion onto the front of the first argument. */ frame->args->list[0] = list_append( l, lol_get( frame->args, 0 ) ); if ( DEBUG_COMPILE ) { /* Try hard to indicate in which module the rule is going to execute. */ if ( rule->module != frame->module && rule->procedure != 0 && strcmp( rulename, rule->procedure->rulename ) ) { char buf[256] = ""; strncat( buf, rule->module->name, sizeof( buf ) - 1 ); strncat( buf, rule->name, sizeof( buf ) - 1 ); debug_compile( 1, buf, frame ); } else { debug_compile( 1, rulename, frame ); } lol_print( frame->args ); printf( "\n" ); } if ( rule->procedure && rule->module != prev_module ) { /* Propagate current module to nested rule invocations. */ frame->module = rule->module; /* Swap variables. */ exit_module( prev_module ); enter_module( rule->module ); } /* Record current rule name in frame. */ if ( rule->procedure ) { frame->rulename = rulename; /* And enter record profile info. */ if ( DEBUG_PROFILE ) profile_enter( rule->procedure->rulename, prof ); } /* Check traditional targets $(<) and sources $(>). */ if ( !rule->actions && !rule->procedure ) { backtrace_line( frame->prev ); printf( "rule %s unknown in module %s\n", rule->name, frame->module->name ); backtrace( frame->prev ); exit( 1 ); } /* If this rule will be executed for updating the targets then construct the * action for make(). */ if ( rule->actions ) { TARGETS * t; ACTION * action; /* The action is associated with this instance of this rule. */ action = (ACTION *)BJAM_MALLOC( sizeof( ACTION ) ); memset( (char *)action, '\0', sizeof( *action ) ); action->rule = rule; action->targets = targetlist( (TARGETS *)0, lol_get( frame->args, 0 ) ); action->sources = targetlist( (TARGETS *)0, lol_get( frame->args, 1 ) ); /* If we have a group of targets all being built using the same action * then we must not allow any of them to be used as sources unless they * had all already been built in the first place or their joined action * has had a chance to finish its work and build all of them anew. * * Without this it might be possible, in case of a multi-process build, * for their action, triggered by buiding one of the targets, to still * be running when another target in the group reports as done in order * to avoid triggering the same action again and gets used prematurely. * * As a quick-fix to achieve this effect we make all the targets list * each other as 'included targets'. More precisely, we mark the first * listed target as including all the other targets in the list and vice * versa. This makes anyone depending on any of those targets implicitly * depend on all of them, thus making sure none of those targets can be * used as sources until all of them have been built. Note that direct * dependencies could not have been used due to the 'circular * dependency' issue. * * TODO: Although the current implementation solves the problem of one * of the targets getting used before its action completes its work it * also forces the action to run whenever any of the targets in the * group is not up to date even though some of them might not actually * be used by the targets being built. We should see how we can * correctly recognize such cases and use that to avoid running the * action if possible and not rebuild targets not actually depending on * targets that are not up to date. * * TODO: Using the 'include' feature might have side-effects due to * interaction with the actual 'inclusion scanning' system. This should * be checked. */ if ( action->targets ) { TARGET * t0 = action->targets->target; for ( t = action->targets->next; t; t = t->next ) { target_include( t->target, t0 ); target_include( t0, t->target ); } } /* Append this action to the actions of each target. */ for ( t = action->targets; t; t = t->next ) t->target->actions = actionlist( t->target->actions, action ); } /* Now recursively compile any parse tree associated with this rule. * parse_refer()/parse_free() call pair added to ensure rule not freed * during use. */ if ( rule->procedure ) { SETTINGS * local_args = collect_arguments( rule, frame ); PARSE * parse = rule->procedure; parse_refer( parse ); pushsettings( local_args ); result = parse_evaluate( parse, frame ); popsettings( local_args ); freesettings( local_args ); parse_free( parse ); } if ( frame->module != prev_module ) { exit_module( frame->module ); enter_module( prev_module ); } if ( DEBUG_PROFILE && rule->procedure ) profile_exit( prof ); if ( DEBUG_COMPILE ) debug_compile( -1, 0, frame); return result; }
profile_frame * profile_init( OBJECT * rulename, profile_frame * frame ) { if ( DEBUG_PROFILE ) profile_enter( rulename, frame ); return frame; }
int hashitem( register struct hash *hp, HASHDATA **data, int enter ) { register ITEM *i; OBJECT *b = (*data)->key; unsigned int keyval = hash_keyval(b); #ifdef HASH_DEBUG_PROFILE profile_frame prof[1]; if ( DEBUG_PROFILE ) profile_enter( 0, prof ); #endif if ( enter && !hp->items.more ) hashrehash( hp ); if ( !enter && !hp->items.nel ) { #ifdef HASH_DEBUG_PROFILE if ( DEBUG_PROFILE ) profile_exit( prof ); #endif return 0; } i = hash_search( hp, keyval, (*data)->key, 0 ); if (i) { *data = &i->data; #ifdef HASH_DEBUG_PROFILE if ( DEBUG_PROFILE ) profile_exit( prof ); #endif return !0; } if ( enter ) { ITEM * * base = hash_bucket(hp,keyval); /* try to grab one from the free list */ if ( hp->items.free ) { i = hp->items.free; hp->items.free = i->hdr.next; assert( i->data.key == 0 ); } else { i = (ITEM *)hp->items.next; hp->items.next += hp->items.size; } hp->items.more--; memcpy( (char *)&i->data, (char *)*data, hp->items.datalen ); i->hdr.next = *base; *base = i; *data = &i->data; #ifdef OPT_BOEHM_GC if (sizeof(HASHDATA) == hp->items.datalen) { GC_REGISTER_FINALIZER(i->data.key,&hash_mem_finalizer,hp,0,0); } #endif } #ifdef HASH_DEBUG_PROFILE if ( DEBUG_PROFILE ) profile_exit( prof ); #endif return 0; }
int queue_message_commit(uint32_t msgid) { int r; char msgpath[PATH_MAX]; char tmppath[PATH_MAX]; FILE *ifp = NULL; FILE *ofp = NULL; profile_enter("queue_message_commit"); queue_message_path(msgid, msgpath, sizeof(msgpath)); if (env->sc_queue_flags & QUEUE_COMPRESSION) { bsnprintf(tmppath, sizeof tmppath, "%s.comp", msgpath); ifp = fopen(msgpath, "r"); ofp = fopen(tmppath, "w+"); if (ifp == NULL || ofp == NULL) goto err; if (! compress_file(ifp, ofp)) goto err; fclose(ifp); fclose(ofp); ifp = NULL; ofp = NULL; if (rename(tmppath, msgpath) == -1) { if (errno == ENOSPC) return (0); unlink(tmppath); log_warn("rename"); return (0); } } if (env->sc_queue_flags & QUEUE_ENCRYPTION) { bsnprintf(tmppath, sizeof tmppath, "%s.enc", msgpath); ifp = fopen(msgpath, "r"); ofp = fopen(tmppath, "w+"); if (ifp == NULL || ofp == NULL) goto err; if (! crypto_encrypt_file(ifp, ofp)) goto err; fclose(ifp); fclose(ofp); ifp = NULL; ofp = NULL; if (rename(tmppath, msgpath) == -1) { if (errno == ENOSPC) return (0); unlink(tmppath); log_warn("rename"); return (0); } } r = handler_message_commit(msgid, msgpath); profile_leave(); /* in case it's not done by the backend */ unlink(msgpath); log_trace(TRACE_QUEUE, "queue-backend: queue_message_commit(%08"PRIx32") -> %d", msgid, r); return (r); err: if (ifp) fclose(ifp); if (ofp) fclose(ofp); return 0; }
LIST * evaluate_rule( char *rulename, FRAME *frame ) { LIST *result = L0; RULE *rule; profile_frame prof[1]; module_t *prev_module = frame->module; LIST *l; { LOL arg_context_, *arg_context = &arg_context_; if ( !frame->prev ) lol_init(arg_context); else arg_context = frame->prev->args; l = var_expand( L0, rulename, rulename+strlen(rulename), arg_context, 0 ); } if ( !l ) { backtrace_line( frame->prev ); printf( "warning: rulename %s expands to empty string\n", rulename ); backtrace( frame->prev ); return result; } rulename = l->string; rule = bindrule( l->string, frame->module ); #ifdef HAVE_PYTHON if (rule->python_function) { return call_python_function(rule, frame); } #endif /* drop the rule name */ l = list_pop_front( l ); /* tack the rest of the expansion onto the front of the first argument */ frame->args->list[0] = list_append( l, lol_get( frame->args, 0 ) ); if ( DEBUG_COMPILE ) { /* Try hard to indicate in which module the rule is going to execute */ if ( rule->module != frame->module && rule->procedure != 0 && strcmp(rulename, rule->procedure->rulename) ) { char buf[256] = ""; strncat( buf, rule->module->name, sizeof(buf) - 1 ); strncat( buf, rule->name, sizeof(buf) - 1 ); debug_compile( 1, buf, frame); } else { debug_compile( 1, rulename, frame); } lol_print( frame->args ); printf( "\n" ); } if ( rule->procedure && rule->module != prev_module ) { /* propagate current module to nested rule invocations */ frame->module = rule->module; /* swap variables */ exit_module( prev_module ); enter_module( rule->module ); } /* record current rule name in frame */ if ( rule->procedure ) { frame->rulename = rulename; /* and enter record profile info */ if ( DEBUG_PROFILE ) profile_enter( rule->procedure->rulename, prof ); } /* Check traditional targets $(<) and sources $(>) */ if( !rule->actions && !rule->procedure ) { backtrace_line( frame->prev ); printf( "rule %s unknown in module %s\n", rule->name, frame->module->name ); backtrace( frame->prev ); exit(1); } /* If this rule will be executed for updating the targets */ /* then construct the action for make(). */ if( rule->actions ) { TARGETS *t; ACTION *action; /* The action is associated with this instance of this rule */ action = (ACTION *)malloc( sizeof( ACTION ) ); memset( (char *)action, '\0', sizeof( *action ) ); action->rule = rule; action->targets = targetlist( (TARGETS *)0, lol_get( frame->args, 0 ) ); action->sources = targetlist( (TARGETS *)0, lol_get( frame->args, 1 ) ); /* Append this action to the actions of each target */ for( t = action->targets; t; t = t->next ) t->target->actions = actionlist( t->target->actions, action ); } /* Now recursively compile any parse tree associated with this rule */ /* refer/free to ensure rule not freed during use */ if( rule->procedure ) { SETTINGS *local_args = collect_arguments( rule, frame ); PARSE *parse = rule->procedure; parse_refer( parse ); pushsettings( local_args ); result = parse_evaluate( parse, frame ); popsettings( local_args ); freesettings( local_args ); parse_free( parse ); } if ( frame->module != prev_module ) { exit_module( frame->module ); enter_module( prev_module ); } if ( DEBUG_PROFILE && rule->procedure ) profile_exit( prof ); if( DEBUG_COMPILE ) debug_compile( -1, 0, frame); return result; }
int queue_message_fd_r(uint32_t msgid) { int fdin, fdout = -1, fd = -1; FILE *ifp = NULL; FILE *ofp = NULL; profile_enter("queue_message_fd_r"); fdin = handler_message_fd_r(msgid); profile_leave(); log_trace(TRACE_QUEUE, "queue-backend: queue_message_fd_r(%08"PRIx32") -> %d", msgid, fdin); if (fdin == -1) return (-1); if (env->sc_queue_flags & QUEUE_ENCRYPTION) { if ((fdout = mktmpfile()) == -1) goto err; if ((fd = dup(fdout)) == -1) goto err; if ((ifp = fdopen(fdin, "r")) == NULL) goto err; fdin = fd; fd = -1; if ((ofp = fdopen(fdout, "w+")) == NULL) goto err; if (! crypto_decrypt_file(ifp, ofp)) goto err; fclose(ifp); ifp = NULL; fclose(ofp); ofp = NULL; lseek(fdin, SEEK_SET, 0); } if (env->sc_queue_flags & QUEUE_COMPRESSION) { if ((fdout = mktmpfile()) == -1) goto err; if ((fd = dup(fdout)) == -1) goto err; if ((ifp = fdopen(fdin, "r")) == NULL) goto err; fdin = fd; fd = -1; if ((ofp = fdopen(fdout, "w+")) == NULL) goto err; if (! uncompress_file(ifp, ofp)) goto err; fclose(ifp); ifp = NULL; fclose(ofp); ofp = NULL; lseek(fdin, SEEK_SET, 0); } return (fdin); err: if (fd != -1) close(fd); if (fdin != -1) close(fdin); if (fdout != -1) close(fdout); if (ifp) fclose(ifp); if (ofp) fclose(ofp); return -1; }