bool ng_check_range( p_ng generator, const int64_t first, const uint64_t count ) { bool res = ( generator != NULL ); if ( res ) { ng_node defined_range; uint32_t invalid_nodes = 0; if ( count > 0 ) { /* walk all nodes to check for boundaries... */ defined_range.start = first; defined_range.count = count; VectorForEach ( &(generator->nodes), false, ng_check_range_callback, &defined_range ); VectorForEach ( &(generator->nodes), false, ng_count_invalid_nodes, &invalid_nodes ); if ( invalid_nodes > 0 ) { ng_remove_invalid_nodes( generator, invalid_nodes ); } } } return res; }
static void matcher_match_column( p_mcol col, const VSchema *schema, const KConfig *cfg ) { uint32_t pair_count = VectorLength( &(col->pairs) ); col->type_cast = NULL; if ( col->excluded ) return; /* call VTypedeclCommonAncestor for every type-pair */ VectorForEach ( &(col->pairs), false, matcher_measure_dist_cb, (void*)schema ); /* if we have more than one pair left... */ if ( pair_count > 1 ) { /* enter the lossy-ness into the src-types... */ VectorForEach ( &(col->src_types), false, matcher_enter_type_score_cb, (void*)cfg ); /* reorder the remaining pair's by: compatibility, lossy-ness, distance, default, order */ VectorReorder ( &(col->pairs), matcher_match_cb, NULL ); } /* pick the winner = first item in the vector */ if ( pair_count > 0 ) { col->type_cast = (p_mpair)VectorFirst ( &(col->pairs) ); /* if the winner is not a compatible pair, we have no cast ! */ if ( col->type_cast->compatible != 0 ) col->type_cast = NULL; } }
/* helper function for range-check */ static void num_gen_remove_invalid_nodes( num_gen* self ) { Vector temp_nodes; uint32_t count = VectorLength( &(self->nodes) ); if ( count < 1 ) return; /* create a temp. vector */ VectorInit( &temp_nodes, 0, count ); /* copy all valid nodes into the temp. vector */ VectorForEach ( &(self->nodes), false, num_gen_copy_valid_nodes, &temp_nodes ); /* clear all nodes so far..., DO NOT PASS num_gen_node_destroy into it */ VectorWhack( &(self->nodes), NULL, NULL ); /* initialize and copy (shallow) the valid nodes back into the generator */ VectorCopy ( &temp_nodes, &(self->nodes) ); /* destroy the temp-vector, DO NOT PASS num_gen_node_destroy into it */ VectorWhack ( &temp_nodes, NULL, NULL ); }
/* helper function that adds up all count values in the vector*/ static uint64_t num_gen_total_count( const Vector * src ) { uint64_t res = 0; if ( src != NULL ) VectorForEach ( src, false, num_gen_total_count_cb, &res ); return res; }
static uint32_t count_valid_entries( Vector * v, uint32_t * first ) { on_count_ctx occ = { 0, 0, 0 }; VectorForEach ( v, false, on_count, &occ ); *first = occ . first; return occ . valid; }
void vdcd_ins_trans_fkt( col_defs* defs, const VSchema *my_schema ) { if ( defs == NULL ) return; if ( my_schema == NULL ) return; VectorForEach( &(defs->cols), false, vdcd_ins_1_trans_fkt, (void*)my_schema ); }
static void ng_remove_invalid_nodes( p_ng generator, const uint32_t invalid_nodes ) { Vector temp_nodes; /* create a temp. vector */ VectorInit( &temp_nodes, 0, 5 ); /* copy all valid nodes into the temp. vector */ VectorForEach ( &(generator->nodes), false, ng_copy_valid_nodes, &temp_nodes ); /* clear all nodes so far... */ VectorWhack( &(generator->nodes), ng_node_destroy, NULL ); /* re-init the vector */ VectorInit( &(generator->nodes ), 0, 5 ); /* copy (swallow) the valid nodes back into the generator */ VectorCopy ( &temp_nodes, &(generator->nodes) ); /* correct the node count */ generator->node_count -= invalid_nodes; /* destroy the temp-vector, DO NOT PASS vdn_node_destroy into it */ VectorWhack ( &temp_nodes, NULL, NULL ); }
static void KDyldForEach ( const KDyld *self, void ( CC * f ) ( const KDirectory *dir, void *data ), void *data ) { VectorForEach ( & self -> search, false, ( void ( CC * ) ( void*, void* ) ) f, data ); }
/* -------------------------------------------------------------------- */ rc_t temp_registry_merge( temp_registry * self, KDirectory * dir, const char * output_filename, size_t buf_size, bool show_progress, bool force, compress_t compress ) { rc_t rc = 0; if ( self == NULL ) rc = RC( rcVDB, rcNoTarg, rcConstructing, rcSelf, rcNull ); else if ( output_filename == NULL ) rc = RC( rcVDB, rcNoTarg, rcConstructing, rcParam, rcNull ); else { struct bg_progress * progress = NULL; if ( show_progress ) { rc = KOutMsg( "concat :" ); if ( rc == 0 ) { uint64_t total = total_size( dir, &self -> lists ); rc = bg_progress_make( &progress, total, 0, 0 ); /* progress_thread.c */ } } if ( rc == 0 ) { uint32_t first; uint32_t count = count_valid_entries( &self -> lists, &first ); /* above */ if ( count == 1 ) { /* we have only ONE set of files... */ VNamelist * l = VectorGet ( &self -> lists, first ); VNamelistReorder ( l, false ); rc = execute_concat( dir, output_filename, l, buf_size, progress, force, compress ); /* concatenator.c */ } else if ( count > 1 ) { /* we have MULTIPLE sets of files... */ cmn_merge cmn = { dir, output_filename, buf_size, progress, force, compress }; on_merge_ctx omc = { &cmn, 0 }; VectorInit( &omc . threads, 0, count ); VectorForEach ( &self -> lists, false, on_merge, &omc ); join_and_release_threads( &omc . threads ); /* helper.c */ } bg_progress_release( progress ); /* progress_thread.c ( ignores NULL )*/ } } return rc; }
rc_t foreach_reply_obj( struct reply_obj_list * list, on_reply_obj_t on_obj, void * data ) { reply_obj_ctx rctx = { on_obj, data, 0 }; VectorForEach( &list->v, false, reply_obj_cb, &rctx ); return rctx.rc; }
uint32_t vdcd_add_to_cursor( col_defs* defs, const VCursor *my_cursor ) { add_2_cur_context ctx; ctx.count = 0; ctx.my_cursor = my_cursor; VectorForEach( &(defs->cols), false, vdcd_add_1_to_cursor, &ctx ); return ctx.count; }
uint64_t ng_count( p_ng generator ) { uint64_t res = 0; if ( generator != NULL ) { VectorForEach ( &(generator->nodes), false, ng_count_callback, &res ); } return res; }
/* helper function for trim */ rc_t num_gen_trim( num_gen* self, const int64_t first, const uint64_t count ) { num_gen_node trim_range; uint32_t invalid_nodes = 0; if ( self == NULL ) return RC( rcVDB, rcNoTarg, rcValidating, rcSelf, rcNull ); if ( count == 0 ) return RC( rcVDB, rcNoTarg, rcValidating, rcParam, rcNull ); /* walk all nodes to check for boundaries... */ trim_range.start = first; trim_range.count = count; VectorForEach ( &(self->nodes), false, num_gen_check_range_callback, &trim_range ); VectorForEach ( &(self->nodes), false, num_gen_count_invalid_nodes, &invalid_nodes ); if ( invalid_nodes > 0 ) num_gen_remove_invalid_nodes( self ); return 0; }
static void VProdResolveWritableColumns ( struct resolve_phys_data *pb, bool suspend_triggers ) { const STable *dad, *stbl = pb -> pr . stbl; /* walk table schema looking for parents */ uint32_t i = VectorStart ( & stbl -> overrides ); uint32_t end = VectorLength ( & stbl -> overrides ); for ( end += i; i < end; ++ i ) { dad = STableFindOrdAncestor ( stbl, i ); VectorForEach ( & dad -> phys, false, resolve_writable_sphys, pb ); } /* walk current table */ VectorForEach ( & stbl -> phys, false, resolve_writable_sphys, pb ); /* add triggers */ if ( !suspend_triggers && pb -> seed == NULL ) { pb -> pr . chain = chainUncommitted; VProdResolveAddTriggers ( & pb -> pr, stbl ); } }
/* ---------------------------------------------------------------------- */ static rc_t build_tree_then_run () { rc_data data; data.rc = 0; BSTreeInit (&options.pathtree); VectorForEach (&options.pathvpath, false, handle_path, &data); if (data.rc == 0) data.rc = extract(); BSTreeWhack (&options.pathtree, extnode_whack, NULL); return data.rc; }
/* ---------------------------------------------------------------------- * pull paramstring 1-N and comnvert then to internal VPaths */ static rc_t build_vpath_then_run () { rc_data data; data.rc = 0; VectorInit (&options.pathvpath, 0, VectorLength (&options.pathstr)); VectorForEach (&options.pathstr, false, build_vpath_one, &data); if (data.rc == 0) build_tree_then_run(); VectorWhack (&options.pathvpath, build_vpath_whack, NULL); return data.rc; }
/* CloseRow * balances OpenRow message * if there are uncommitted modifications, * discard all changes. otherwise, * advance to next row */ LIB_EXPORT rc_t CC VCursorCloseRow ( const VCursor *cself ) { rc_t rc = 0; /* needed in case FlushPage isn't called */ VCursor *self = ( VCursor* ) cself; if ( self == NULL ) rc = RC ( rcVDB, rcCursor, rcClosing, rcSelf, rcNull ); else if ( self -> state == vcFailed ) rc = RC ( rcVDB, rcCursor, rcClosing, rcCursor, rcInvalid ); else if ( self -> state < vcRowOpen ) rc = 0; else if ( self -> read_only ) rc = VCursorCloseRowRead ( self ); else { /* tell each of the columns that no further data may be written and to abandon any uncommitted writes */ VectorForEach ( & self -> row, false, WColumnCloseRow, NULL ); /* if the row was committed... */ if ( self -> state >= vcRowCommitted ) { /* close off the page if so requested */ if ( self -> state == vcPageCommit ) { rc = VCursorFlushPageInt ( self ); if ( rc ) { self -> state = vcFailed; return rc; } } /* advance to next id */ ++ self -> row_id; } self -> state = vcReady; rc = 0; } return rc; }
rc_t num_gen_debug( const num_gen* self, char **s ) { string_ctx ctx; if ( self == NULL ) return RC( rcVDB, rcNoTarg, rcReading, rcSelf, rcNull ); if ( s == NULL ) return RC( rcVDB, rcNoTarg, rcReading, rcParam, rcNull ); ctx.s = NULL; ctx.len = 0; VectorForEach ( &(self->nodes), false, num_gen_debug_cb, &ctx ); if ( ctx.len == 0 ) { *s = NULL; return RC( rcVDB, rcNoTarg, rcReading, rcData, rcEmpty ); } ctx.s[ ctx.len ] = 0; *s = ctx.s; return 0; }
/* OpenRow * open currently closed row indicated by row id */ LIB_EXPORT rc_t CC VCursorOpenRow ( const VCursor *cself ) { rc_t rc; VCursor *self = ( VCursor* ) cself; if ( self == NULL ) rc = RC ( rcVDB, rcCursor, rcOpening, rcSelf, rcNull ); else if ( self -> state != vcReady ) { switch ( self -> state ) { case vcConstruct: rc = RC ( rcVDB, rcCursor, rcOpening, rcRow, rcIncomplete ); break; case vcFailed: rc = RC ( rcVDB, rcCursor, rcOpening, rcCursor, rcInvalid ); break; case vcRowOpen: rc = 0; break; default: rc = RC ( rcVDB, rcCursor, rcOpening, rcRow, rcBusy ); } } else if ( self -> read_only ) rc = VCursorOpenRowRead ( self ); else { /* validate that all columns have the same starting row_id */ int64_t row_id = self -> row_id; VectorForEach ( & self -> row, false, WColumnOpenRow, & row_id ); assert ( row_id == self -> row_id ); self -> state = vcRowOpen; rc = 0; } return rc; }
/* RepeatRow * repeats the current row by the count provided * row must have been committed * * AVAILABILITY: version 2.6 * * "count" [ IN ] - the number of times to repeat * the current row. */ LIB_EXPORT rc_t CC VCursorRepeatRow ( VCursor *self, uint64_t count ) { rc_t rc = 0; if ( self == NULL ) rc = RC ( rcVDB, rcCursor, rcUpdating, rcSelf, rcNull ); else if ( self -> read_only ) rc = RC ( rcVDB, rcCursor, rcUpdating, rcCursor, rcReadonly ); else if ( self -> state == vcFailed ) rc = RC ( rcVDB, rcCursor, rcUpdating, rcCursor, rcInvalid ); else if ( self -> state < vcRowOpen ) rc = RC ( rcVDB, rcCursor, rcUpdating, rcRow, rcNotOpen ); else if ( self -> state < vcRowCommitted ) rc = RC ( rcVDB, rcCursor, rcUpdating, rcRow, rcInvalid ); else if ( count != 0 ) { WColumnRepeatRowData pb; pb . end_id = self -> row_id; pb . count = count; /* tell columns to commit the row, and allow each to return an earlier cutoff id ( half-closed ) */ VectorForEach ( & self -> row, false, WColumnRepeatRow, & pb ); /* extend the current row-id */ if ( self -> end_id < self -> row_id ) self -> row_id += count; else { self -> row_id += count; self -> end_id += count; } } return rc; }
void vdcd_exclude_this_column( col_defs* defs, const char* column_name ) { VectorForEach( &(defs->cols), false, vdcd_exclude_column_cb, (void*)column_name ); }
void vdcd_reset_content( col_defs* defs ) { VectorForEach( &(defs->cols), false, vdcd_reset_1_content, NULL ); }
static rc_t CC run_flush_thread ( const KThread *t, void *data ) { rc_t rc; VCursor *self = data; /* acquire lock */ MTCURSOR_DBG (( "run_flush_thread: acquiring lock\n" )); rc = KLockAcquire ( self -> flush_lock ); if ( rc == 0 ) { do { bool failed; run_trigger_prod_data pb; /* wait for data */ if ( self -> flush_state == vfReady ) { MTCURSOR_DBG (( "run_flush_thread: waiting for input\n" )); rc = KConditionWait ( self -> flush_cond, self -> flush_lock ); if ( rc != 0 ) break; } /* bail unless state is busy */ if ( self -> flush_state != vfBusy ) { MTCURSOR_DBG (( "run_flush_thread: exiting\n" )); break; } /* prepare param block */ pb . id = self -> flush_id; pb . cnt = self -> flush_cnt; pb . rc = 0; MTCURSOR_DBG (( "run_flush_thread: unlocking and running\n" )); KLockUnlock ( self -> flush_lock ); /* run productions from trigger roots */ failed = VectorDoUntil ( & self -> trig, false, run_trigger_prods, & pb ); /* drop page buffers */ MTCURSOR_DBG (( "run_flush_thread: dropping page buffers\n" )); VectorForEach ( & self -> row, false, WColumnDropPage, NULL ); /* reacquire lock */ MTCURSOR_DBG (( "run_flush_thread: re-acquiring lock" )); rc = KLockAcquire ( self -> flush_lock ); if ( rc != 0 ) { self -> flush_state = vfBgErr; LOGERR ( klogSys, rc, "run_flush_thread: re-acquiring lock failed - exit" ); return rc; } #if FORCE_FLUSH_ERROR_EXIT if ( ! failed ) { pb . rc = RC ( rcVDB, rcCursor, rcFlushing, rcThread, rcCanceled ); failed = true; } #endif /* get out on failure */ if ( failed ) { self -> flush_state = vfBgErr; LOGERR ( klogInt, pb . rc, "run_flush_thread: run_trigger_prods failed - exit" ); KConditionSignal ( self -> flush_cond ); rc = pb . rc; } /* no longer busy */ else if ( self -> flush_state == vfBusy ) { /* signal waiter */ self -> flush_state = vfReady; MTCURSOR_DBG (( "run_flush_thread: signaling ready\n" )); rc = KConditionSignal ( self -> flush_cond ); if ( rc != 0 ) LOGERR ( klogSys, rc, "run_flush_thread: failed to signal foreground thread - exit" ); } } while ( rc == 0 ); MTCURSOR_DBG (( "run_flush_thread: unlocking\n" )); KLockUnlock ( self -> flush_lock ); } MTCURSOR_DBG (( "run_flush_thread: exit\n" )); return rc; }
static rc_t VCursorFlushPageInt ( VCursor *self ) { rc_t rc; if ( self == NULL ) rc = RC ( rcVDB, rcCursor, rcFlushing, rcSelf, rcNull ); else if ( self -> read_only ) rc = RC ( rcVDB, rcCursor, rcFlushing, rcCursor, rcReadonly ); else { int64_t end_id; #if ! VCURSOR_FLUSH_THREAD run_trigger_prod_data pb; #endif switch ( self -> state ) { case vcConstruct: rc = RC ( rcVDB, rcCursor, rcFlushing, rcCursor, rcNotOpen ); break; case vcFailed: rc = RC ( rcVDB, rcCursor, rcFlushing, rcCursor, rcInvalid ); break; case vcRowOpen: rc = RC ( rcVDB, rcCursor, rcFlushing, rcCursor, rcBusy ); break; default: /* ignore request if there is no page to commit */ if ( self -> start_id == self -> end_id ) { /* the cursor should be in unwritten state, where the row_id can be reset but drags along the other markers. */ assert ( self -> end_id == self -> row_id ); return 0; } #if VCURSOR_FLUSH_THREAD MTCURSOR_DBG (( "VCursorFlushPageInt: going to acquire lock\n" )); /* get lock */ rc = KLockAcquire ( self -> flush_lock ); if ( rc != 0 ) return rc; MTCURSOR_DBG (( "VCursorFlushPageInt: have lock\n" )); /* make sure that background thread is ready */ while ( self -> flush_state == vfBusy ) { MTCURSOR_DBG (( "VCursorFlushPageInt: waiting for background thread\n" )); rc = KConditionWait ( self -> flush_cond, self -> flush_lock ); if ( rc != 0 ) { LOGERR ( klogSys, rc, "VCursorFlushPageInt: wait failed - exiting" ); KLockUnlock ( self -> flush_lock ); return rc; } } if ( self -> flush_state != vfReady ) { if ( self -> flush_state != vfBgErr ) rc = RC ( rcVDB, rcCursor, rcFlushing, rcCursor, rcInconsistent ); else { rc_t rc2; MTCURSOR_DBG (( "VCursorFlushPageInt: waiting on thread to exit\n" )); rc = KThreadWait ( self -> flush_thread, & rc2 ); if ( rc == 0 ) { rc = rc2; MTCURSOR_DBG (( "VCursorFlushPageInt: releasing thread\n" )); KThreadRelease ( self -> flush_thread ); self -> flush_thread = NULL; } } PLOGERR ( klogInt, (klogInt, rc, "VCursorFlushPageInt: not in ready state[$(state)] - exiting","state=%hu",self -> flush_state )); KLockUnlock ( self -> flush_lock ); return rc; } MTCURSOR_DBG (( "VCursorFlushPageInt: running buffer page\n" )); #endif /* first, tell all columns to bundle up their pages into buffers */ end_id = self -> end_id; rc = RC ( rcVDB, rcCursor, rcFlushing, rcMemory, rcExhausted ); if ( VectorDoUntil ( & self -> row, false, WColumnBufferPage, & end_id ) ) { VectorForEach ( & self -> row, false, WColumnDropPage, NULL ); self -> flush_state = vfFgErr; } else { /* supposed to be constant */ assert ( end_id == self -> end_id ); #if VCURSOR_FLUSH_THREAD MTCURSOR_DBG (( "VCursorFlushPageInt: pages buffered - capturing id and count\n" )); self -> flush_id = self -> start_id; self -> flush_cnt = self -> end_id - self -> start_id; self -> start_id = self -> end_id; self -> end_id = self -> row_id + 1; self -> state = vcReady; MTCURSOR_DBG (( "VCursorFlushPageInt: state set to busy - signaling bg thread\n" )); self -> flush_state = vfBusy; rc = KConditionSignal ( self -> flush_cond ); if ( rc != 0 ) LOGERR ( klogSys, rc, "VCursorFlushPageInt: condition returned error on signal" ); #else /* run all validation and trigger productions */ pb . id = self -> start_id; pb . cnt = self -> end_id - self -> start_id; pb . rc = 0; if ( ! VectorDoUntil ( & self -> trig, false, run_trigger_prods, & pb ) ) { self -> start_id = self -> end_id; self -> end_id = self -> row_id + 1; self -> state = vcReady; } rc = pb . rc; /* drop page buffers */ VectorForEach ( & self -> row, false, WColumnDropPage, NULL ); #endif } #if VCURSOR_FLUSH_THREAD MTCURSOR_DBG (( "VCursorFlushPageInt: unlocking\n" )); KLockUnlock ( self -> flush_lock ); #endif } } return rc; }
static uint64_t total_size( KDirectory * dir, Vector * v ) { on_list_ctx olc = { dir, 0 }; VectorForEach ( v, false, on_list, &olc ); return olc . res; }
/* helper function that creates a deep and conditional copy of a node-vector */ static void num_gen_copy_vector( const Vector * src, Vector * dst ) { if ( src == NULL || dst == NULL ) return; VectorForEach ( src, false, num_gen_copy_cb, dst ); }