Rsrc :: Rsrc ( const MemMgr & pmmgr, const char * ident ) : mmgr ( pmmgr ) { // one-shot latch if ( rsrc != 0 ) { assert ( callstk != 0 ); FUNC_ENTRY (); CONST_THROW ( xc_program_state_violation, "not at top of call stack" ); } rsrc = this; try { // create obligatory resources tmmgr = PrimordTimeMgr :: make_primordial (); fdmgr = PrimordFDMgr :: make_primordial (); log = plogger_t :: make ( ident ); #if UNIX FileDesc fd2 = fdmgr . make ( 2, CAP_WRITE ); err = Stream ( fd2 ); #endif } catch ( exception & x ) { NULTermString what = x . what (); fprintf ( stderr, "ERROR: %s:\n", ( const char * ) what ); #if _DEBUGGING NULTermString stk = x . stack_trace (); fprintf ( stderr, "%s\n", ( const char * ) stk ); #endif } }
/* AddRef * creates a new reference * ignores NULL references */ LIB_EXPORT rc_t CC KFileFormatAddRef ( const KFileFormat *self ) { FUNC_ENTRY(); if (self != NULL) atomic32_inc (& ((KFileFormat*) self)->refcount); return 0; }
bytes_t Stream :: read ( const bytes_t & num_bytes, Mem & dst, index_t start ) const { FUNC_ENTRY (); if ( start < 0 ) THROW ( xc_param_err, "bad start index: %ld", start ); bytes_t size = dst . size (); if ( ( U64 ) start >= size ) return bytes_t ( 0 ); bytes_t to_read = size - ( U64 ) start; if ( to_read > num_bytes ) to_read = num_bytes; if ( null_ref () ) return bytes_t ( 0 ); StreamItf * itf = get_itf ( CAP_PROP_READ | CAP_READ ); bytes_t mtu = itf -> get_mtu (); if ( mtu < to_read ) to_read = mtu; return itf -> read ( to_read, dst, start ); }
bytes_t Mem :: copy ( const bytes_t & _amount, index_t dst_offset, const Mem & src, index_t src_offset ) { FUNC_ENTRY (); test_caps ( CAP_WRITE ); src . test_caps ( CAP_READ ); U64 amount = _amount; if ( dst_offset < 0 || ( U64 ) dst_offset >= bytes || src_offset < 0 || ( U64 ) src_offset >= src . bytes ) return bytes_t ( 0 ); if ( amount + dst_offset > bytes ) amount = ( U64 ) bytes - dst_offset; if ( amount + src_offset > src . bytes ) amount = ( U64 ) src . bytes - src_offset; if ( amount != 0 ) { char * dp = ( char * ) ptr; const char * sp = ( const char * ) src . ptr; memmove ( & dp [ dst_offset ], & sp [ src_offset ], ( size_t ) amount ); } return bytes_t ( amount ); }
bytes_t Stream :: write_all ( const Mem & src, index_t start ) const { FUNC_ENTRY (); if ( start < 0 ) THROW ( xc_param_err, "bad start index: %ld", start ); bytes_t size = src . size (); if ( ( U64 ) start >= size ) return bytes_t ( 0 ); bytes_t all_bytes = size - ( U64 ) start; if ( null_ref () ) THROW ( xc_null_self_err, "wrote 0 of %lu bytes", ( U64 ) all_bytes ); StreamItf * itf = get_itf ( CAP_WRITE ); bytes_t mtu = itf -> get_mtu (); bytes_t to_write = ( mtu < all_bytes ) ? mtu : all_bytes; bytes_t total = itf -> write ( to_write, src, start ); while ( total < all_bytes ) { to_write = all_bytes - total; if ( mtu < to_write ) to_write = mtu; bytes_t num_writ = itf -> write ( to_write, src, start + ( U64 ) total ); if ( num_writ == ( U64 ) 0 ) THROW ( xc_transfer_incomplete_err, "wrote %lu of %lu bytes", ( U64 ) total, ( U64 ) all_bytes ); total += num_writ; } return total; }
vers_t :: vers_t ( U32 maj ) : val ( maj << 24 ) { FUNC_ENTRY (); if ( maj > 255 ) THROW ( xc_bounds_err, "major version = %u", maj ); }
ATerm lf_2 ( ATerm arg0 , ATerm arg1 ) { { ATerm tmp [ 2 ] ; FUNC_ENTRY ( lf_2sym , ATmakeAppl ( lf_2sym , arg0 , arg1 ) ) ; if ( check_sym ( arg1 , lf_list_1sym ) ) { ( tmp [ 1 ] = arg_0 ( arg1 ) ) ; { ATerm atmp1010 ; ATerm atmp100 [ 2 ] ; ( atmp100 [ 0 ] = tmp [ 1 ] ) ; ( atmp100 [ 1 ] = tmp [ 1 ] ) ; while ( not_empty_list ( tmp [ 1 ] ) ) { ( atmp1010 = list_head ( tmp [ 1 ] ) ) ; ( tmp [ 1 ] = list_tail ( tmp [ 1 ] ) ) ; if ( term_equal ( arg0 , atmp1010 ) ) { FUNC_EXIT_CONST ( constant0 , make_nf0 ( lf_3sym ) ) ; } ( atmp100 [ 1 ] = list_tail ( atmp100 [ 1 ] ) ) ; ( tmp [ 1 ] = atmp100 [ 1 ] ) ; } } } if ( check_sym ( arg1 , lf_list_1sym ) ) { { ATerm atmp10 = arg_0 ( arg1 ) ; FUNC_EXIT_CONST ( constant1 , make_nf0 ( lf_4sym ) ) ; } } FUNC_EXIT ( make_nf2 ( lf_2sym , arg0 , arg1 ) ) ; } }
// copy from source // return the number of bytes actually copied bytes_t Stream :: copy ( const Stream & src ) const { FUNC_ENTRY (); // a null ref should act like nothing was there // the behavior of /dev/null is a different concept if ( null_ref () ) return bytes_t ( 0 ); // access stream StreamItf * itf = get_itf ( CAP_PROP_READ | CAP_WRITE ); // allocate buffer bytes_t mtu = itf -> get_mtu (); Mem buffer = rsrc -> mmgr . alloc ( mtu, false ); // read from source bytes_t num_read = src . read ( mtu, buffer, 0 ); if ( num_read == ( U64 ) 0 ) return num_read; // write everything read return write_all ( num_read, buffer, 0 ); }
bool SRA_StatisticsNextPath ( const SRA_Statistics * self, ctx_t ctx, const char * path, const char** next ) { FUNC_ENTRY ( ctx, rcSRA, rcDatabase, rcAccessing ); const DictionaryEntry * node = NULL; assert ( self ); if ( path == NULL ) INTERNAL_ERROR ( xcParamNull, "path is NULL" ); else if ( path[0] == 0 ) { node = ( const DictionaryEntry * ) BSTreeFirst ( & self -> dictionary ); } else { node = ( const DictionaryEntry * ) BSTreeFind ( & self -> dictionary, ( const void * ) path, DictionaryEntryFind ); if ( node == NULL ) { INTERNAL_ERROR ( xcUnexpected, "dictionary item '%s' is not found", path ); } else { node = ( const DictionaryEntry * ) BSTNodeNext ( & node -> dad ); } } if ( node == NULL ) { *next = NULL; return false; } *next = node -> path; return true; }
static DictionaryEntry * MakeNode ( SRA_Statistics * self, ctx_t ctx, const char * path ) { FUNC_ENTRY ( ctx, rcSRA, rcDatabase, rcAccessing ); size_t path_size = string_size ( path ); DictionaryEntry * node = malloc ( sizeof ( * node ) + path_size ); if ( node == NULL ) { SYSTEM_ERROR ( xcNoMemory, "allocating dictionary item" ); } else { rc_t rc; string_copy ( node -> path, path_size + 1, path, path_size ); /*TODO: decide whether to allow overwriting (not allowed now) */ rc = BSTreeInsertUnique ( & self -> dictionary, & node -> dad, NULL, DictionaryEntryCompare ); if ( rc == 0 ) { return node; } INTERNAL_ERROR ( xcUnexpected, "inserting dictionary item '%s' rc = %R", node -> path, rc ); free ( node ); } return NULL; }
ATerm lf_8 ( ATerm arg0 ) { { ATerm tmp [ 2 ] ; FUNC_ENTRY ( lf_8sym , ATmakeAppl ( lf_8sym , arg0 ) ) ; if ( check_sym ( arg0 , lf_list_6sym ) ) { ( tmp [ 1 ] = arg_0 ( arg0 ) ) ; { ATerm atmp001110 ; ATerm atmp00110 [ 2 ] ; ATerm atmp0010 ; ATerm atmp000 [ 2 ] ; ( atmp000 [ 0 ] = tmp [ 1 ] ) ; ( atmp000 [ 1 ] = tmp [ 1 ] ) ; while ( not_empty_list ( tmp [ 1 ] ) ) { ( atmp0010 = list_head ( tmp [ 1 ] ) ) ; ( tmp [ 1 ] = list_tail ( tmp [ 1 ] ) ) ; ( atmp00110 [ 0 ] = tmp [ 1 ] ) ; ( atmp00110 [ 1 ] = tmp [ 1 ] ) ; while ( not_empty_list ( tmp [ 1 ] ) ) { ( atmp001110 = list_head ( tmp [ 1 ] ) ) ; ( tmp [ 1 ] = list_tail ( tmp [ 1 ] ) ) ; if ( term_equal ( atmp0010 , atmp001110 ) ) { FUNC_EXIT ( lf_8_recursive ( cons ( slice ( atmp000 [ 0 ] , atmp000 [ 1 ] ) , cons ( make_list ( atmp0010 ) , cons ( slice ( atmp00110 [ 0 ] , atmp00110 [ 1 ] ) , tmp [ 1 ] ) ) ) ) ) ; } ( atmp00110 [ 1 ] = list_tail ( atmp00110 [ 1 ] ) ) ; ( tmp [ 1 ] = atmp00110 [ 1 ] ) ; } ( atmp000 [ 1 ] = list_tail ( atmp000 [ 1 ] ) ) ; ( tmp [ 1 ] = atmp000 [ 1 ] ) ; } } } FUNC_EXIT ( make_nf1 ( lf_8sym , arg0 ) ) ; } }
Mem Mem :: subrange ( index_t offset, const bytes_t & _sz ) const { FUNC_ENTRY (); test_caps ( CAP_SUBRANGE ); U64 sz = _sz; Mem m ( * this ); if ( offset < ( I64 ) 0 || ( U64 ) offset >= bytes ) { m . ptr = ( void* ) & ( ( char* ) ptr ) [ bytes ]; m . bytes = ( U64 ) 0; } else { U64 b = ( U64 ) bytes - ( U64 ) offset; if ( sz > b ) sz = b; m . ptr = ( void* ) & ( ( char* ) ptr ) [ offset ]; m . bytes = sz; } return m; }
int CSRA1_PileupEventGetEventType ( const CSRA1_PileupEvent * self, ctx_t ctx ) { FUNC_ENTRY ( ctx, rcSRA, rcCursor, rcAccessing ); int event_type = 0; TRY ( CHECK_STATE ( self, ctx ) ) { const bool * REF_ORIENTATION; CSRA1_Pileup_Entry * entry = self -> entry; /* during "next" we took these steps: 1. if within a deletion, decrement deletion repeat && exit if ! 0 2. check HAS_REF_OFFSET. if not false: a. a positive REF_OFFSET[ref_offset_idx] indicates a deletion b. a negative REF_OFFSET[ref_offset_idx] indicates an insertion 3. move current offset ahead until ref_pos >= that of pileup so here, we first detect a deletion event next, we detect a match or mismatch by checking HAS_MISMATCH. if there was a prior insertion, we or that onto the event. if this event starts a new alignment, or start onto event. if it ends an alignment, or that onto the event. */ if ( entry -> del_cnt != 0 ) event_type = NGS_PileupEventType_deletion; else { const bool * HAS_MISMATCH = entry -> cell_data [ pileup_event_col_HAS_MISMATCH ]; assert ( HAS_MISMATCH != NULL ); assert ( entry -> seq_idx < entry -> cell_len [ pileup_event_col_HAS_MISMATCH ] ); event_type = HAS_MISMATCH [ entry -> seq_idx ]; } /* detect prior insertion */ if ( entry -> ins_cnt != 0 ) event_type |= NGS_PileupEventType_insertion; /* detect initial event */ if ( CSRA1_PileupEventGetPileup ( self ) -> ref_zpos == entry -> zstart ) event_type |= NGS_PileupEventType_start; /* detect final event */ if ( CSRA1_PileupEventGetPileup ( self ) -> ref_zpos + 1 == entry -> xend ) event_type |= NGS_PileupEventType_stop; /* detect minus strand */ TRY ( REF_ORIENTATION = CSRA1_PileupEventGetEntry ( self, ctx, entry, pileup_event_col_REF_ORIENTATION ) ) { assert ( REF_ORIENTATION != NULL ); assert ( entry -> cell_len [ pileup_event_col_REF_ORIENTATION ] == 1 ); if ( REF_ORIENTATION [ 0 ] ) event_type |= NGS_PileupEventType_minus_strand; } }
void Mem :: resize ( const bytes_t & sz, bool clear ) { FUNC_ENTRY (); test_caps ( CAP_RESIZE ); // early cheap detection of noop if ( sz == bytes ) return; // a null ref can be resized to allocate if ( ptr == 0 ) { assert ( bytes == ( U64 ) 0 ); #ifdef _hpp_vdb3_rsrc_ * this = rsrc -> mmgr . alloc ( sz, clear ); #else CONST_THROW ( xc_unimplemented_err, "unimplemented" ); #endif } else { assert ( bytes != ( U64 ) 0 ); MemoryItf * itf = get_itf ( CAP_RESIZE ); // there are cases when the obj can be null if ( itf == 0 ) { #ifdef _hpp_vdb3_rsrc_ // ref represents constant data, not heap data Mem tmp = rsrc -> mmgr . alloc ( sz, false ); // copy in data from self bytes_t num_writ = tmp . copy ( bytes, 0, * this, 0 ); if ( bytes >= sz ) assert ( num_writ == sz ); else { assert ( num_writ == bytes ); if ( clear ) { bytes_t num_zeroed = tmp . fill ( ( U64 ) sz - bytes, ( I64 ) ( U64 ) num_writ, 0 ); assert ( num_writ + num_zeroed == sz ); } } * this = tmp; #else CONST_THROW ( xc_unimplemented_err, "unimplemented" ); #endif } else { // normal case itf -> resize ( sz, clear ); ptr = itf -> get_mapped_memory ( & bytes ); } } }
void SRA_StatisticsWhack ( SRA_Statistics * self, ctx_t ctx ) { FUNC_ENTRY ( ctx, rcSRA, rcDatabase, rcDestroying ); assert ( self ); BSTreeWhack ( & self -> dictionary, DictionaryEntryWhack, ( void * ) ctx ); }
static void cSRATblPairWhack ( cSRATblPair *self, const ctx_t *ctx ) { FUNC_ENTRY ( ctx ); RowSetIteratorRelease ( self -> rsi, ctx ); TablePairDestroy ( & self -> dad, ctx ); MemFree ( ctx, self, sizeof * self ); }
int64_t CSRA1_PileupEventGetLastAlignmentPosition ( const CSRA1_PileupEvent * self, ctx_t ctx ) { FUNC_ENTRY ( ctx, rcSRA, rcCursor, rcAccessing ); TRY ( CHECK_STATE ( self, ctx ) ) { return self -> entry -> xend - 1; } return 0; }
NGS_String* SRA_StatisticsGetAsString ( const SRA_Statistics * self, ctx_t ctx, const char * path ) { FUNC_ENTRY ( ctx, rcSRA, rcDatabase, rcAccessing ); assert ( self ); if ( path == NULL ) INTERNAL_ERROR ( xcParamNull, "path is NULL" ); else { DictionaryEntry * node = ( DictionaryEntry * ) BSTreeFind ( & self -> dictionary, ( const void * ) path, DictionaryEntryFind ); if ( node == NULL ) { INTERNAL_ERROR ( xcUnexpected, "dictionary item '%s' is not found", path ); } else { switch ( node -> type ) { case NGS_StatisticValueType_UInt64: { char buf[1024]; size_t num_writ; string_printf ( buf, sizeof(buf), &num_writ, "%lu", node -> value . u64 ); return NGS_StringMakeCopy ( ctx, buf, num_writ ); } break; case NGS_StatisticValueType_Int64: { char buf[1024]; size_t num_writ; string_printf ( buf, sizeof(buf), &num_writ, "%li", node -> value . i64 ); return NGS_StringMakeCopy ( ctx, buf, num_writ ); } case NGS_StatisticValueType_Real: { char buf[1024]; size_t num_writ; string_printf ( buf, sizeof(buf), &num_writ, "%f", node -> value . real ); return NGS_StringMakeCopy ( ctx, buf, num_writ ); } case NGS_StatisticValueType_String: return NGS_StringDuplicate ( node -> value . str, ctx ); default : INTERNAL_ERROR ( xcUnexpected, "unexpected type %u for dictionary item '%s'", node -> type, path ); break; } } } return NULL; }
vers_t :: vers_t ( U32 maj, U32 min ) : val ( ( maj << 24 ) | ( min << 16 ) ) { FUNC_ENTRY (); if ( maj > 255 ) THROW ( xc_bounds_err, "major version = %u", maj ); if ( min > 255 ) THROW ( xc_bounds_err, "minor version = %u", min ); }
uint64_t SRA_StatisticsGetAsU64 ( const SRA_Statistics * self, ctx_t ctx, const char * path ) { FUNC_ENTRY ( ctx, rcSRA, rcDatabase, rcAccessing ); assert ( self ); if ( path == NULL ) INTERNAL_ERROR ( xcParamNull, "path is NULL" ); else { DictionaryEntry * node = ( DictionaryEntry * ) BSTreeFind ( & self -> dictionary, ( const void * ) path, DictionaryEntryFind ); if ( node == NULL ) { INTERNAL_ERROR ( xcUnexpected, "dictionary item '%s' is not found", path ); } else { switch ( node -> type ) { case NGS_StatisticValueType_Int64: if ( node -> value . i64 < 0 ) { INTERNAL_ERROR ( xcUnexpected, "cannot convert dictionary item '%s' from in64_t to uint64_t", path ); } else { return ( uint64_t ) node -> value . i64; } break; case NGS_StatisticValueType_UInt64: return node -> value . i64; case NGS_StatisticValueType_Real: if ( node -> value . real < 0 || node -> value . real > ULLONG_MAX ) { INTERNAL_ERROR ( xcUnexpected, "cannot convert dictionary item '%s' from double to uint64_t", path ); } else { return ( uint64_t ) xtrunc ( node -> value . real ); } break; case NGS_StatisticValueType_String: return NGS_StringToU64 ( node -> value . str, ctx ); default : INTERNAL_ERROR ( xcUnexpected, "unexpected type %u for dictionary item '%s'", node -> type, path ); break; } } } return 0; }
/* Data * retrieve data pointer */ const char * NGS_StringData ( const NGS_String * self, ctx_t ctx ) { if ( self == NULL ) { FUNC_ENTRY ( ctx, rcSRA, rcString, rcAccessing ); INTERNAL_ERROR ( xcSelfNull, "attempt to access NULL NGS_String" ); return NULL; } return self -> str; }
/* Size * retrieve data length */ size_t NGS_StringSize ( const NGS_String * self, ctx_t ctx ) { if ( self == NULL ) { FUNC_ENTRY ( ctx, rcSRA, rcString, rcAccessing ); INTERNAL_ERROR ( xcSelfNull, "attempt to access NULL NGS_String" ); return 0; } return self -> size; }
static rc_t KFFClassAddRef (const KFFClass * self) { rc_t rc = 0; FUNC_ENTRY(); if (self != NULL) atomic32_inc (&((KFFClass*)self)->refcount); return rc; }
index_t Mem :: find_first ( U8 byte ) const { FUNC_ENTRY (); test_caps ( CAP_READ ); void * loc = memchr ( ptr, byte, bytes ); if ( loc != 0 ) return ( char * ) loc - ( char * ) ptr; return ( I64 ) -1; }
vers_t :: vers_t ( U32 maj, U32 min, U32 rel ) : val ( ( maj << 24 ) | ( min << 16 ) | ( rel << 8 ) ) { FUNC_ENTRY (); if ( maj > 255 ) THROW ( xc_bounds_err, "major version = %u", maj ); if ( min > 255 ) THROW ( xc_bounds_err, "minor version = %u", min ); if ( rel > 255 ) THROW ( xc_bounds_err, "release component = %u", rel ); }
/* Release * discard reference to file * ignores NULL references */ LIB_EXPORT rc_t CC KFileFormatRelease ( const KFileFormat *cself ) { FUNC_ENTRY(); if (cself != NULL) { KFileFormat *self = (KFileFormat*)cself; if (atomic32_dec_and_test (&self->refcount)) return KFileFormatDestroy (self); } return 0; }
static const void * CSRA1_PileupEventGetEntry ( const CSRA1_PileupEvent * self, ctx_t ctx, CSRA1_Pileup_Entry * entry, uint32_t col_idx ) { if ( entry -> cell_data [ col_idx ] == NULL ) { FUNC_ENTRY ( ctx, rcSRA, rcCursor, rcAccessing ); return CSRA1_PileupGetEntry ( CSRA1_PileupEventGetPileup ( self ), ctx, entry, col_idx ); } return entry -> cell_data [ col_idx ]; }
// support for C++ new and delete void * MemMgrItf :: _new ( size_t bytes ) { FUNC_ENTRY (); // allocate new object plus header size Refcount * obj = ( Refcount * ) _alloc ( bytes, true ); obj -> mmgr = ( MemMgrItf * ) duplicate (); obj -> obj_size = bytes; // return allocation return ( void * ) obj; }
static VSchema *map_schema_types ( TypeParams *type, const ctx_t *ctx, const VSchema *src_schema ) { FUNC_ENTRY ( ctx ); bool mapped; char schema_src [ 256 ]; assert ( sizeof schema_src == sizeof type -> dst_type ); TRY ( mapped = map_typename ( ctx, "out-map", type -> src_type, type -> dst_type, schema_src, sizeof schema_src ) ) { rc_t rc; VSchema *dst_schema; if ( ! mapped ) { type -> view_type [ 0 ] = 0; rc = VSchemaAddRef ( src_schema ); if ( rc != 0 ) ERROR ( rc, "VSchemaAddRef failed" ); return ( VSchema* ) src_schema; } rc = VDBManagerMakeSchema ( ctx -> caps -> vdb, & dst_schema ); if ( rc != 0 ) ERROR ( rc, "VDBManagerMakeSchema failed" ); else { rc = VSchemaParseFile ( dst_schema, "%s", schema_src ); if ( rc != 0 ) ERROR ( rc, "VSchemaParseFile failed adding file '%s' for destination", src_schema ); else { TRY ( mapped = map_typename ( ctx, "view-map", type -> src_type, type -> view_type, schema_src, sizeof schema_src ) ) { if ( ! mapped ) { type -> view_type [ 0 ] = 0; return dst_schema; } rc = VSchemaParseFile ( dst_schema, "%s", schema_src ); if ( rc == 0 ) return dst_schema; ERROR ( rc, "VSchemaParseFile failed adding file '%s' for view", src_schema ); } } VSchemaRelease ( dst_schema ); } }
static bool map_typename ( const ctx_t *ctx, const char *sub_node, const char *in, char *out, char *schema_src, size_t size ) { FUNC_ENTRY ( ctx ); rc_t rc; const KConfigNode *n; size_t num_read, remaining; typename_to_config_path ( in, out ); rc = KConfigOpenNodeRead ( ctx -> caps -> cfg, & n, "sra-sort/%s/%s", sub_node, out ); if ( rc != 0 ) return map_typename_builtin ( ctx, sub_node, in, out, schema_src, size ); rc = KConfigNodeRead ( n, 0, out, size - 1, & num_read, & remaining ); if ( rc != 0 ) ERROR ( rc, "KConfigNodeRead failed" ); else if ( remaining != 0 ) { rc = RC ( rcExe, rcNode, rcReading, rcBuffer, rcInsufficient ); ERROR ( rc, "type map of '%s' is too long", in ); } else { out [ num_read ] = 0; typename_to_config_path ( out, schema_src ); KConfigNodeRelease ( n ); rc = KConfigOpenNodeRead ( ctx -> caps -> cfg, & n, "sra-sort/schema-src/%s", schema_src ); if ( rc != 0 ) ERROR ( rc, "KConfigOpenNodeRead - failed to find entry 'sra-sort/schema-src/%s'", schema_src ); else { rc = KConfigNodeRead ( n, 0, schema_src, size - 1, & num_read, & remaining ); if ( rc != 0 ) ERROR ( rc, "KConfigNodeRead failed" ); else if ( remaining != 0 ) { rc = RC ( rcExe, rcNode, rcReading, rcBuffer, rcInsufficient ); ERROR ( rc, "type map of '%s' is too long", in ); } else { schema_src [ num_read ] = 0; } } } KConfigNodeRelease ( n ); return true; }