rc_t VBlobHeaderReplace( VBlobHeader *targ, const VBlobHeader *src ) { BlobHeaders *parent; struct VBlobHeaderData *data; atomic32_t temp; if (src == NULL || targ == NULL) return RC(rcVDB, rcHeader, rcCopying, rcParam, rcNull); if (src->vt != &VBlobHeader_vt[1] || targ->vt != &VBlobHeader_vt[0]) return RC(rcVDB, rcHeader, rcCopying, rcParam, rcInvalid); if (atomic32_read(&targ->refcount) != 1) return RC(rcVDB, rcHeader, rcCopying, rcParam, rcInvalid); data = calloc(1, sizeof(*data)); if (data == NULL) return RC(rcVDB, rcHeader, rcCopying, rcMemory, rcExhausted); *data = *src->parent->data; atomic32_set(&data->refcount, 1); parent = (BlobHeaders *)targ->parent; VBlobHeaderDataRelease(parent->data); parent->data = data; atomic32_set(&temp, atomic32_read(&src->refcount)); *targ = *src; atomic32_set(&targ->refcount, atomic32_read(&temp)); targ->parent = parent; targ->vt = &VBlobHeader_vt[0]; return 0; }
rc_t CopierMake (Copier ** c, KDirectory * dir, const char * path, uint32_t timeout, uint32_t length) { rc_t rc = 0, orc; Copier * self; self = malloc (sizeof *self); if (self == NULL) { rc = RC (rcExe, rcNoTarg, rcAllocating, rcMemory, rcExhausted); } else { atomic32_set (&self->refcount, 1); self->f = NULL; self->q = NULL; rc = CopierMakeF (self, dir, path); if (rc == 0) { rc = BufferQMake (&self->q, timeout, length); if (rc == 0) { *c = self; atomic32_set (&self->refcount, 1); return rc; } } orc = CopierRelease (self); if (orc) LOGERR (klogInt, orc, "Error releasing Copier"); } return rc; }
/* Make * create an empty queue object * * "capacity" [ IN ] - minimum queue length * always expands to a power of 2, i.e. providing * a length of 10 will result in a length of 16. */ LIB_EXPORT rc_t CC KQueueMake ( KQueue **qp, uint32_t capacity ) { rc_t rc; if ( qp == NULL ) rc = RC ( rcCont, rcQueue, rcConstructing, rcParam, rcNull ); else { KQueue *q; uint32_t cap = 1; while ( cap < capacity ) cap += cap; q = malloc ( sizeof * q - sizeof q -> buffer + cap * sizeof q -> buffer [ 0 ] ); if ( q == NULL ) rc = RC ( rcCont, rcQueue, rcConstructing, rcMemory, rcExhausted ); else { rc = KSemaphoreMake ( & q -> rc, 0 ); if ( rc == 0 ) { rc = KSemaphoreMake ( & q -> wc, cap ); if ( rc == 0 ) { rc = KLockMake ( & q -> rl ); if ( rc == 0 ) { rc = KLockMake ( & q -> wl ); if ( rc == 0 ) { q -> capacity = cap; q -> bmask = cap - 1; q -> imask = ( cap + cap ) - 1; q -> read = q -> write = 0; atomic32_set ( & q -> refcount, 1 ); atomic32_set ( & q -> sealed, 0 ); QMSG ( "%s[%p]: created queue with capacity %u, " "bmask %#032b, imask %#032b.\n" , __func__, q, q -> capacity, q -> bmask, q -> imask ); * qp = q; return 0; } KLockRelease ( q -> rl ); } KSemaphoreRelease ( q -> wc ); } KSemaphoreRelease ( q -> rc ); } free ( q ); } * qp = NULL; } return rc; }
static rc_t BlobHeadersCreateInternal ( BlobHeaders **lhs, unsigned ops, unsigned args ) { BlobHeaders *y = calloc(1, sizeof *y); *lhs = NULL; if (y) { atomic32_set(&y->refcount, 1); y->data = calloc(1, sizeof(*y->data) + args * sizeof(y->data->args[0]) + ops); if (y->data) { atomic32_set(&y->data->refcount, 1); if (args) { y->data->args = (void *)(&y->data[1]); if (ops) { y->data->ops = (void *)(&y->data->args[args]); y->data->op_count = ops; } y->data->arg_count = args; } else if (ops) { y->data->ops = (void *)(&y->data[1]); y->data->op_count = ops; } *lhs = y; return 0; } free(y); } return RC(rcVDB, rcHeader, rcConstructing, rcMemory, rcExhausted); }
/* Init */ rc_t KConditionInit ( KCondition *self ) { int status; assert ( self != NULL ); status = pthread_cond_init ( & self -> cond, NULL ); switch ( status ) { case 0: break; case EAGAIN: return RC ( rcPS, rcCondition, rcConstructing, rcCondition, rcExhausted ); case ENOMEM: return RC ( rcPS, rcCondition, rcConstructing, rcMemory, rcExhausted ); case EBUSY: return RC ( rcPS, rcCondition, rcConstructing, rcCondition, rcBusy ); case EINVAL: return RC ( rcPS, rcCondition, rcConstructing, rcCondition, rcInvalid ); default: return RC ( rcPS, rcCondition, rcConstructing, rcNoObj, rcUnknown ); } atomic32_set ( & self -> refcount, 1 ); return 0; }
/* Make * make a simple mutex */ LIB_EXPORT rc_t CC KLockMake ( KLock **lockp ) { rc_t rc; if ( lockp == NULL ) rc = RC ( rcPS, rcLock, rcConstructing, rcParam, rcNull ); else { KLock *lock = malloc ( sizeof * lock ); if ( lock == NULL ) rc = RC ( rcPS, rcLock, rcConstructing, rcMemory, rcExhausted ); else { int status = pthread_mutex_init ( & lock -> mutex, NULL ); if ( status == 0 ) { atomic32_set ( & lock -> refcount, 1 ); * lockp = lock; return 0; } /* pthread_mutex_init is documented as always returning 0 */ rc = RC ( rcPS, rcLock, rcConstructing, rcNoObj, rcUnknown ); free ( lock ); } * lockp = NULL; } return rc; }
LIB_EXPORT rc_t CC AlignAccessMgrMake(const AlignAccessMgr **mgr) { AlignAccessMgr *self = malloc(sizeof(*self)); *mgr = self; if (self != NULL) { atomic32_set(&self->refcount, 1); return 0; } return RC(rcAlign, rcMgr, rcConstructing, rcMemory, rcExhausted); }
rc_t CC XTaskerSetDone ( const struct XTasker * self ) { if ( self == NULL ) { return RC ( rcExe, rcNoTarg, rcProcessing, rcParam, rcNull ); } atomic32_set ( ( atomic32_t * ) & ( self -> is_done ), 1 ); return 0; } /* XTaskerSetDone () */
rc_t CC XTaskerDispose ( const struct XTasker * self ) { struct XTasker * Tasker; size_t llp; Tasker = ( struct XTasker * ) self; llp = 0; if ( Tasker != NULL ) { if ( Tasker -> stat != NULL ) { XStatsDispose ( Tasker -> stat ); Tasker -> stat = NULL; } if ( Tasker -> tasks != NULL ) { for ( llp = 0; llp < Tasker -> task_qty; llp ++ ) { if ( Tasker -> tasks [ llp ] != NULL ) { _TaskDispose ( Tasker -> tasks [ llp ] ); Tasker -> tasks [ llp ] = NULL; } } } Tasker -> tasks = NULL; Tasker -> task_qty = 0; atomic32_set ( & ( Tasker -> iteration ), 0 ); atomic32_set ( & ( Tasker -> is_run ), 0 ); atomic32_set ( & ( Tasker -> is_done ), 0 ); Tasker -> stop_at = 0; Tasker -> job_4_task_data = NULL; Tasker -> job_4_task = NULL; free ( Tasker ); } return 0; } /* XTaskerDispose () */
rc_t CCCopyMake (CCCopy ** p, const KDirectory * in, KDirectory * out, KDirectory * xml, bool force, KMD5SumFmt * md5, CCFileFormat * ff, CCTree * tree, const char * path) { rc_t rc; size_t pathlen; CCCopy * self; char relpath [4096]; assert (in != NULL); assert (out != NULL); assert (xml != NULL); assert (path != NULL); rc = KDirectoryVResolvePath (in, false, relpath, sizeof relpath, path, NULL); if (rc != 0) { pLOGERR (klogErr, rc, "unable to resolve path $(P)", PLOG_S(P), path); return rc; } if ((relpath[0] == '.') && (relpath[1] == '.') && (relpath[2] == '/')) { rc = RC (rcExe, rcDirectory, rcResolving, rcPath, rcOutOfKDirectory); pLOGERR (klogErr, rc, "Path must resolve to current directory or subdirectories $(P)", PLOG_S(P), relpath); return rc; } pathlen = strlen(relpath); self = malloc (sizeof (*self) - sizeof (*self->path) + pathlen + 1); if (self == NULL) rc = RC (rcExe, rcNoTarg, rcAllocating, rcMemory, rcExhausted); else { atomic32_set (&self->refcount, 1); KDirectoryAddRef (in); KDirectoryAddRef (out); KDirectoryAddRef (xml); KMD5SumFmtAddRef (md5); CCFileFormatAddRef (ff); self->in = in; self->out = out; self->xml = xml; self->force = force; self->md5 = md5; self->ff = ff; self->tree = tree; memcpy (self->path, relpath, pathlen+1); *p = self; } return rc; }
LIB_EXPORT rc_t CC KRWLockRelease ( const KRWLock *cself ) { KRWLock *self = ( KRWLock* ) cself; if ( cself != NULL ) { if ( atomic32_dec_and_test ( & self -> refcount ) ) { atomic32_set ( & self -> refcount, 1 ); return KRWLockWhack ( self ); } } return 0; }
/* Make * create path manager * * the path manager should already be configured with * standard search paths, but can be augmented by using * the Add*Path messages. * * "dir" [ IN, NULL OKAY ] - optional root directory to use * attaches a new reference */ MOD_EXPORT rc_t CC SRAPathMakeImpl ( SRAPath **pm, const KDirectory *dir ) { rc_t rc; if ( pm == NULL ) rc = RC ( rcSRA, rcMgr, rcConstructing, rcParam, rcNull ); else { NCBISRAPath *p = (NCBISRAPath *) malloc ( sizeof * p ); if ( p == NULL ) rc = RC ( rcSRA, rcMgr, rcConstructing, rcMemory, rcExhausted ); else { p -> dad . vt = ( SRAPath_vt* ) & vtSRAPath; p -> dir = dir; if ( dir != NULL ) rc = KDirectoryAddRef ( dir ); else { KDirectory *wd; rc = KDirectoryNativeDir ( & wd ); p -> dir = wd; } if ( rc != 0 ) free ( p ); else { DLListInit ( & p -> repos ); p -> dflt_repo = NULL; atomic32_set ( & p -> refcount, 1 ); /* the object is now complete */ rc = SRAPathConfig ( p ); if ( rc == 0 ) { * pm = & p -> dad; return 0; } SRAPathWhack ( p ); } } * pm = NULL; } return rc; }
/* Make */ static rc_t KColumnBlobMake ( KColumnBlob **blobp, bool bswap ) { KColumnBlob *blob = malloc ( sizeof * blob ); if ( blob == NULL ) return RC ( rcDB, rcBlob, rcConstructing, rcMemory, rcExhausted ); memset ( blob, 0, sizeof * blob ); atomic32_set ( & blob -> refcount, 1 ); blob -> bswap = bswap; * blobp = blob; return 0; }
VBlobHeader *BlobHeadersCreateDummyHeader ( uint8_t version, uint32_t fmt, uint8_t flags, uint64_t size ) { struct BlobHeaderDummy *y; y = calloc(1, sizeof(*y)); if (y) { y->data.fmt = fmt; y->data.version = version; y->data.osize = size; y->data.flags = flags; atomic32_set(&y->data.refcount, 1); y->dummy.data = &y->data; atomic32_set(&y->dummy.refcount, 1); y->hdr.vt = &VBlobHeader_vt[1]; y->hdr.parent = &y->dummy; atomic32_set(&y->hdr.refcount, 1); return &y->hdr; } return 0; }
rc_t BAMReaderMake( const BAMReader **result, char const headerText[], char const path[] ) { rc_t rc; BAMReader *self = malloc(sizeof(BAMReader)); if ( self == NULL ) { *result = NULL; return RC(rcAlign, rcFile, rcConstructing, rcMemory, rcExhausted); } else { atomic32_set( & self->refcount, 1 ); rc = BAMFileMakeWithHeader( & self->file, headerText, "%s", path); if ( rc != 0 ) { free(self); *result = 0; } else *result = self; } self->nque = 0; self->rc = 0; self->eof = false; rc = KLockMake(&self->lock); if (rc == 0) { rc = KConditionMake(&self->have_data); if (rc == 0) { rc = KConditionMake(&self->need_data); if (rc == 0) { rc = KThreadMake(&self->th, BAMReaderThreadMain, self); if (rc == 0) return 0; KConditionRelease(self->need_data); } KConditionRelease(self->have_data); } KLockRelease(self->lock); } return rc; }
/* Release * discard reference to file * ignores NULL references */ LIB_EXPORT rc_t CC KArrayFileRelease ( const KArrayFile *cself ) { KArrayFile *self = ( KArrayFile* ) cself; if ( cself != NULL ) { if ( atomic32_dec_and_test ( & self -> refcount ) ) { rc_t rc = KArrayFileDestroy ( self ); if ( rc != 0 ) atomic32_set ( & self -> refcount, 1 ); return rc; } } return 0; }
LIB_EXPORT rc_t CC AlignAccessMgrRelease(const AlignAccessMgr *cself) { rc_t rc = 0; AlignAccessMgr *self = (AlignAccessMgr *)cself; if (cself != NULL) { if (atomic32_dec_and_test(&self->refcount)) { rc = AlignAccessMgrWhack(self); if (rc) atomic32_set(&self->refcount, 1); else free(self); } } return rc; }
/* Wait * waits for a thread to exit * * "status" [ OUT ] - return parameter for thread's exit code */ LIB_EXPORT rc_t CC KThreadWait ( KThread *self, rc_t *out ) { DWORD wait_res; if ( self == NULL ) return RC ( rcPS, rcThread, rcWaiting, rcSelf, rcNull ); /* prevent multiple waiters */ if ( atomic32_test_and_set ( & self -> waiting, 0, 1 ) != 0 ) return RC ( rcPS, rcThread, rcWaiting, rcThread, rcBusy ); wait_res = WaitForSingleObject( self->thread_handle, INFINITE ); /* release waiter lock */ atomic32_set ( & self -> waiting, 0 ); switch( wait_res ) { case WAIT_FAILED : return RC ( rcPS, rcThread, rcWaiting, rcNoObj, rcUnknown ); } /* switch ( status ) { case 0: break; case ESRCH: return RC ( rcPS, rcThread, rcWaiting, rcThread, rcDestroyed ); case EINVAL: return RC ( rcPS, rcThread, rcWaiting, rcThread, rcDetached ); case EDEADLK: return RC ( rcPS, rcThread, rcWaiting, rcThread, rcDeadlock ); default: return RC ( rcPS, rcThread, rcWaiting, rcNoObj, rcUnknown ); } */ self -> join = false; /* if ( td == PTHREAD_CANCELED ) self -> rc = RC ( rcPS, rcThread, rcWaiting, rcThread, rcCanceled ); */ if ( out != NULL ) * out = self -> rc; return 0; }
VBlobHeader *BlobHeadersGetHeader ( const BlobHeaders *self ) { VBlobHeader *y = calloc(1, sizeof(*y)); if (y) { y->vt = &VBlobHeader_vt[0]; atomic32_set(&y->refcount, 1); y->parent = self; BlobHeadersAddRef(y->parent); if (self->data) { y->op_tail = self->data->op_count; y->arg_tail = self->data->arg_count; } } return y; }
/* Make * make a simple read/write lock */ LIB_EXPORT rc_t CC KRWLockMake ( KRWLock **lockp ) { rc_t rc; if ( lockp == NULL ) rc = RC ( rcPS, rcRWLock, rcConstructing, rcParam, rcNull ); else { KRWLock *lock = malloc ( sizeof * lock ); if ( lock == NULL ) rc = RC ( rcPS, rcRWLock, rcConstructing, rcMemory, rcExhausted ); else { int status = pthread_rwlock_init ( & lock -> lock, NULL ); if ( status == 0 ) { atomic32_set ( & lock -> refcount, 1 ); * lockp = lock; return 0; } switch ( status ) { case EAGAIN: rc = RC ( rcPS, rcRWLock, rcConstructing, rcRWLock, rcExhausted ); break; case ENOMEM: rc = RC ( rcPS, rcRWLock, rcConstructing, rcMemory, rcExhausted ); break; case EPERM: rc = RC ( rcPS, rcRWLock, rcConstructing, rcProcess, rcUnauthorized ); break; case EBUSY: rc = RC ( rcPS, rcRWLock, rcConstructing, rcRWLock, rcBusy ); break; default: rc = RC ( rcPS, rcRWLock, rcConstructing, rcNoObj, rcUnknown ); } free ( lock ); } * lockp = NULL; } return rc; }
static rc_t CC AlignAccessDBMakeEnumerator(const AlignAccessDB *self, AlignAccessAlignmentEnumerator **align_enum) { AlignAccessAlignmentEnumerator *lhs = malloc(sizeof(*lhs)); *align_enum = lhs; if (lhs == NULL) return RC(rcAlign, rcTable, rcConstructing, rcMemory, rcExhausted); lhs->innerSelf = NULL; lhs->parent = self; AlignAccessDBAddRef(lhs->parent); atomic32_set(&lhs->refcount, 1); lhs->atend = 0; lhs->refSeqID = -1; lhs->endpos = 0; lhs->startpos = 0; return 0; }
LIB_EXPORT rc_t CC AlignAccessMgrMakeBAMDB(const AlignAccessMgr *self, const AlignAccessDB **db, const VPath *bam) { AlignAccessDB *lhs = malloc(sizeof(*lhs)); rc_t rc; if (lhs == NULL) return RC(rcAlign, rcMgr, rcConstructing, rcMemory, rcExhausted); rc = BAMFileMakeWithVPath(&lhs->innerSelf, bam); if (rc) { free(lhs); return rc; } lhs->mgr = self; AlignAccessMgrAddRef(lhs->mgr); atomic32_set(&lhs->refcount, 1); *db = lhs; return 0; }
rc_t ProcessOneMake (ProcessOne ** ppo, const KDirectory * dir, KDirectory * xml, const KFile * file, KMD5SumFmt *md5, CCFileFormat * ff, const char * path) { ProcessOne * self; rc_t rc = 0; size_t pathlen; PLOGMSG (klogDebug10, "ProcessOneMake $(f)", PLOG_S(f), path); /* legit seeming inputs? these could be replaced with RC returns */ assert (ppo != NULL); assert (file != NULL); assert (path != NULL); /* allocate the object */ pathlen = strlen (path); self = malloc (sizeof (*self) - sizeof(self->path) + pathlen + 1); if (self == NULL) { rc = RC (rcExe, rcNoTarg, rcAllocating, rcMemory, rcExhausted); } else { atomic32_set (&self->refcount, 1); KDirectoryAddRef (dir); KDirectoryAddRef (xml); KMD5SumFmtAddRef (md5); CCFileFormatAddRef (ff); KFileAddRef (file); self->dir = dir; self->xml = xml; self->md5 = md5; self->file = file; self->ff = ff; memcpy (self->path, path, pathlen); self->path[pathlen] = '\0'; rc = 0; } *ppo = self; return rc; }
rc_t BufferQMake (BufferQ ** q, uint32_t timeout, uint32_t length) { rc_t rc = 0; BufferQ * self; assert (q != NULL); self = malloc (sizeof * self); if (self == NULL) rc = RC (rcExe, rcQueue, rcAllocating, rcMemory, rcExhausted); else { rc = KQueueMake (&self->q, length); if (rc == 0) { self->timeout = timeout; atomic32_set (&self->refcount, 1); *q = self; } } return rc; }
LIB_EXPORT rc_t CC AlignAccessDBEnumerateRefSequences(const AlignAccessDB *self, AlignAccessRefSeqEnumerator **refseq_enum) { AlignAccessRefSeqEnumerator *lhs; unsigned cur = 0; unsigned end; BAMFileGetRefSeqCount(self->innerSelf, &end); if (BAMFileIsIndexed(self->innerSelf)) { while (cur != end && BAMFileIndexHasRefSeqId(self->innerSelf, cur) == 0) ++cur; if (cur == end) return AlignAccessRefSeqEnumeratorEOFCode; } lhs = malloc(sizeof(*lhs)); *refseq_enum = lhs; if (lhs != NULL) { lhs->parent = self; AlignAccessDBAddRef(lhs->parent); lhs->cur = cur; lhs->end = end; atomic32_set(&lhs->refcount, 1); return 0; } return RC(rcAlign, rcDatabase, rcConstructing, rcMemory, rcExhausted); }
/* Make * create and run a thread * * "run_thread" [ IN ] - thread entrypoint * * "data" [ IN, OPAQUE ] - user-supplied thread data */ LIB_EXPORT rc_t CC KThreadMake ( KThread **tp, rc_t ( CC * run_thread ) ( const KThread*, void* ), void *data ) { rc_t rc; if ( tp == NULL ) rc = RC ( rcPS, rcThread, rcCreating, rcParam, rcNull ); else { if ( run_thread == NULL ) rc = RC ( rcPS, rcThread, rcCreating, rcFunction, rcNull ); else { KThread *t = malloc ( sizeof * t ); if ( t == NULL ) rc = RC ( rcPS, rcThread, rcCreating, rcMemory, rcExhausted ); else { /* finish constructing thread */ t -> run = run_thread; t -> data = data; atomic32_set ( & t -> waiting, 0 ); atomic32_set ( & t -> refcount, 2 ); t -> rc = 0; t -> join = true; /* attempt to create thread */ t -> thread_handle = CreateThread( NULL, /* default security attributes */ 0, /* use default stack size */ int_ThreadProc, /* thread function */ t, /* argument to thread function */ 0, /* run immediately */ &t->thread_id ); /* returns the thread identifier */ /* status = pthread_create ( & t -> thread, 0, KThreadRun, t ); */ if ( t->thread_handle != NULL ) { * tp = t; return 0; } rc = RC ( rcPS, rcThread, rcCreating, rcNoObj, rcUnknown ); /* see why we failed */ /* switch ( status ) { case EAGAIN: rc = RC ( rcPS, rcThread, rcCreating, rcThread, rcExhausted ); break; default: rc = RC ( rcPS, rcThread, rcCreating, rcNoObj, rcUnknown ); } */ free ( t ); } } * tp = NULL; } return rc; }
/* JOJOBA here will be variable part */ rc_t CC XTaskerMake ( const struct XTasker ** Tasker, size_t NumThreads, void * Data, Job4Task Jobber ) { rc_t RCt; struct XTasker * Ret; size_t llp; RCt = 0; Ret = NULL; llp = 0; if ( Tasker != NULL ) { * Tasker = NULL; } if ( Tasker == NULL ) { return RC ( rcExe, rcNoTarg, rcProcessing, rcParam, rcNull ); } if ( Jobber == NULL ) { return RC ( rcExe, rcNoTarg, rcProcessing, rcParam, rcNull ); } if ( NumThreads == 0 ) { return RC ( rcExe, rcNoTarg, rcProcessing, rcParam, rcInvalid ); } Ret = calloc ( 1, sizeof ( struct XTasker ) ); if ( Ret == NULL ) { return RC ( rcExe, rcNoTarg, rcProcessing, rcParam, rcExhausted ); } else { Ret -> tasks = calloc ( NumThreads, sizeof ( struct XTask * ) ); if ( Ret -> tasks == NULL ) { RCt = RC ( rcExe, rcNoTarg, rcProcessing, rcParam, rcExhausted ); } else { if ( RCt == 0 ) { Ret -> task_qty = NumThreads; for ( llp = 0; llp < NumThreads; llp ++ ) { RCt = _TaskMake ( Ret -> tasks + llp, Ret, llp ); if ( RCt != 0 ) { break; } } if ( RCt == 0 ) { RCt = XStatsMake ( & ( Ret -> stat ) ); if ( RCt == 0 ) { atomic32_set ( & ( Ret -> iteration ), 0 ); atomic32_set ( & ( Ret -> is_run ), 0 ); atomic32_set ( & ( Ret -> is_done ), 0 ); Ret -> stop_at = 0; Ret -> job_4_task_data = Data; Ret -> job_4_task = Jobber; * Tasker = Ret; } } } } } if ( RCt != 0 ) { * Tasker = NULL; if ( Ret != NULL ) { XTaskerDispose ( Ret ); } } return RCt; } /* XTaskerMake () */