rc_t register_temp_file( temp_registry * self, uint32_t read_id, const char * filename ) { rc_t rc = 0; if ( self == NULL ) rc = RC( rcVDB, rcNoTarg, rcConstructing, rcSelf, rcNull ); else if ( filename == NULL ) rc = RC( rcVDB, rcNoTarg, rcConstructing, rcParam, rcNull ); else { rc = KLockAcquire ( self -> lock ); if ( rc == 0 ) { VNamelist * l = VectorGet ( &self -> lists, read_id ); if ( l == NULL ) { rc = VNamelistMake ( &l, 12 ); if ( rc == 0 ) { rc = VectorSet ( &self -> lists, read_id, l ); if ( rc != 0 ) VNamelistRelease ( l ); } } if ( rc == 0 && l != NULL ) { rc = VNamelistAppend ( l, filename ); if ( rc == 0 ) rc = Add_File_to_Cleanup_Task ( self -> cleanup_task, filename ); } KLockUnlock ( self -> lock ); } } return rc; }
LIB_EXPORT rc_t CC VCursorFlushPage ( VCursor *self ) { rc_t rc = VCursorFlushPageInt ( self ); if ( rc == 0 ) { #if VCURSOR_FLUSH_THREAD MTCURSOR_DBG (( "VCursorFlushPage: going to acquire lock\n" )); /* get lock */ rc = KLockAcquire ( self -> flush_lock ); if ( rc != 0 ) return rc; MTCURSOR_DBG (( "VCursorFlushPage: have lock\n" )); /* wait until background thread has finished */ while ( self -> flush_state == vfBusy ) { MTCURSOR_DBG (( "VCursorFlushPage: waiting for background thread\n" )); rc = KConditionWait ( self -> flush_cond, self -> flush_lock ); if ( rc != 0 ) { LOGERR ( klogSys, rc, "VCursorFlushPage: wait failed - exiting" ); KLockUnlock ( self -> flush_lock ); return rc; } } /* what was the proper rc */ if ( self -> flush_state != vfReady ) { if ( self -> flush_state != vfBgErr ) rc = RC ( rcVDB, rcCursor, rcFlushing, rcCursor, rcInconsistent ); else { rc_t rc2; MTCURSOR_DBG (( "VCursorFlushPage: waiting on thread to exit\n" )); rc = KThreadWait ( self -> flush_thread, & rc2 ); if ( rc == 0 ) { rc = rc2; MTCURSOR_DBG (( "VCursorFlushPage: releasing thread\n" )); KThreadRelease ( self -> flush_thread ); self -> flush_thread = NULL; } } PLOGERR ( klogInt, (klogInt, rc, "VCursorFlushPage: not in ready state[$(state)] - exiting", "state=%hu", self -> flush_state )); KLockUnlock ( self -> flush_lock ); return rc; } KLockUnlock ( self -> flush_lock ); #endif assert ( self -> row_id == self -> start_id ); self -> end_id = self -> row_id; } return rc; }
DWORD CC _SidStorageGetSize ( const char * Name ) { DWORD RetVal; rc_t RCt; const struct __SidNode * Node; RetVal = 0; RCt = 0; Node = NULL; if ( _IsSidStorageGood () ) { RCt = KLockAcquire ( _sMutabor ); if ( RCt == 0 ) { Node = _FindSidNoLock ( Name ); if ( Node != NULL ) { RetVal = Node -> size; } KLockUnlock ( _sMutabor ); } } return RetVal; } /* _SidStorageGetSize () */
static rc_t BAMReaderThreadMain(KThread const *const th, void *const vp) { BAMReader *const self = (BAMReader *)vp; rc_t rc = 0; const BAMAlignment* rec; KLockAcquire(self->lock); do { while (self->nque == BUFFER_COUNT) KConditionWait(self->need_data, self->lock); { rc = BAMFileRead( self->file, &rec); if (rc == END_OF_DATA) { rec = NULL; rc = 0; } else if (rc) break; self->que[self->nque] = rec; ++self->nque; KConditionSignal(self->have_data); } } while (rec); self->rc = rc; KLockUnlock(self->lock); return 0; }
/* Whack */ rc_t VCursorWhack ( VCursor *self ) { #if VCURSOR_FLUSH_THREAD if ( self -> flush_thread != NULL ) { rc_t rc = KLockAcquire ( self -> flush_lock ); if ( rc == 0 ) { while ( self -> flush_state == vfBusy ) { MTCURSOR_DBG (( "VCursorWhack: waiting for thread to process\n" )); KConditionWait ( self -> flush_cond, self -> flush_lock ); } self -> flush_state = vfExit; KConditionSignal ( self -> flush_cond ); KLockUnlock ( self -> flush_lock ); } MTCURSOR_DBG (( "VCursorWhack: waiting on thread to exit\n" )); KThreadWait ( self -> flush_thread, NULL ); } MTCURSOR_DBG (( "VCursorWhack: finishing\n" )); KThreadRelease ( self -> flush_thread ); KConditionRelease ( self -> flush_cond ); KLockRelease ( self -> flush_lock ); #endif VCursorTerminatePagemapThread(self); return VCursorDestroy ( self ); }
LIB_EXPORT rc_t CC XFSTreeDepotGet ( const struct XFSTreeDepot * self, const struct XFSTree ** Tree ) { rc_t RCt; RCt = 0; if ( self == NULL || Tree == NULL ) { return XFS_RC ( rcNull ); } * Tree = NULL; RCt = KLockAcquire ( self -> mutabor ); if ( RCt == 0 ) { RCt = XFSTreeAddRef ( self -> Tree ); if ( RCt == 0 ) { * Tree = self -> Tree; } KLockUnlock ( self -> mutabor ); } return RCt; } /* XFSTreeDepotGet () */
rc_t CC XStatsReport ( const struct XStats * self ) { rc_t RCt; uint64_t Per; uint64_t Tim; RCt = 0; Per = 0; Tim = 0; if ( self != NULL ) { RCt = KLockAcquire ( self -> mutabor ); if ( RCt == 0 ) { printf ( "<<== Read Stats\n" ); printf ( " READ QTY : %lu\n", self -> qty ); Per = self -> qty == 0 ? 0 : ( ( int ) ( ( ( float ) self -> err_qty * 100.0f ) / ( float ) ( self -> qty ) ) ); printf ( " ERRORS : %lu [%lu%%]\n", self -> err_qty, Per ); Tim = ( XTmNow () - self -> start_time ) / 1000000; printf ( " TIME : %lu sec\n", Tim ); printf ( " ACC TIME : %lu sec\n", self -> time / 1000000 ); printf ( " READ SIZE : %lu but\n", self -> size ); Per = self -> time == 0 ? 0 : ( self -> size / Tim ) ; printf ( " THOUGHOUT : %lu bit per sec\n", Per ); KLockUnlock ( self -> mutabor ); } } return RCt; } /* XStatDispose () */
static rc_t SRAFastqFile_Read(const SRAFastqFile* self, uint64_t pos, void *buffer, size_t size, size_t *num_read) { rc_t rc = 0; if( pos >= self->file_sz ) { *num_read = 0; } else if( (rc = KLockAcquire(self->lock)) == 0 ) { do { if( pos < self->from || pos >= (self->from + self->size) ) { int64_t id = 0; uint64_t id_qty = 0; DEBUG_MSG(10, ("Caching for pos %lu %lu bytes\n", pos, size - *num_read)); if( (rc = KIndexFindU64(self->kidx, pos, &((SRAFastqFile*)self)->from, &((SRAFastqFile*)self)->size, &id, &id_qty)) == 0 ) { DEBUG_MSG(10, ("Caching from %lu:%lu, %lu bytes\n", self->from, self->from + self->size - 1, self->size)); DEBUG_MSG(10, ("Caching spot %ld, %lu spots\n", id, id_qty)); if( (rc = FastqReaderSeekSpot(self->reader, id)) == 0 ) { size_t inbuf = 0, w = 0; char* b = self->buf; uint64_t left = self->buffer_sz; do { if( (rc = FastqReader_GetCurrentSpotSplitData(self->reader, b, left, &w)) != 0 ) { break; } b += w; left -= w; inbuf += w; --id_qty; } while( id_qty > 0 && (rc = FastqReaderNextSpot(self->reader)) == 0); if( GetRCObject(rc) == rcRow && GetRCState(rc) == rcExhausted ) { DEBUG_MSG(10, ("No more rows\n")); rc = 0; } DEBUG_MSG(8, ("Cached %u bytes\n", inbuf)); if( self->gzipped != NULL ) { size_t compressed = 0; if( (rc = ZLib_DeflateBlock(self->buf, inbuf, self->gzipped, self->buffer_sz, &compressed)) == 0 ) { char* b = self->buf; ((SRAFastqFile*)self)->buf = self->gzipped; ((SRAFastqFile*)self)->gzipped = b; ((SRAFastqFile*)self)->size = compressed; DEBUG_MSG(10, ("gzipped %lu bytes\n", self->size)); } } } } } if( rc == 0 ) { off_t from = pos - self->from; size_t q = (self->size - from) > (size - *num_read) ? (size - *num_read) : (self->size - from); DEBUG_MSG(10, ("Copying from %lu %u bytes\n", from, q)); memcpy(&((char*)buffer)[*num_read], &self->buf[from], q); *num_read = *num_read + q; pos += q; } } while( rc == 0 && *num_read < size && pos < self->file_sz ); ReleaseComplain(KLockUnlock, self->lock); } return rc; }
/* Seal * indicate that the queue has been closed off * meaning there will be no further push operations * if "writes" is true, and no further pop operations * otherwise. */ LIB_EXPORT rc_t CC KQueueSeal ( KQueue *self ) { rc_t rc = 0; QMSG ( "%s[%p] called\n", __func__, self ); if ( self == NULL ) return RC ( rcCont, rcQueue, rcFreezing, rcSelf, rcNull ); if ( atomic32_test_and_set ( & self -> sealed, 1, 0 ) == 0 ) { #if 1 QMSG ( "%s[%p]: acquiring write lock ( %p )\n", __func__, self, self -> wl ); rc = KLockAcquire ( self -> wl ); if ( rc == 0 ) { QMSG ( "%s[%p]: canceling write semaphore...\n", __func__, self ); rc = KSemaphoreCancel ( self -> wc ); QMSG ( "%s[%p]: ...done, rc = %R.\n", __func__, self, rc ); KLockUnlock ( self -> wl ); if ( rc == 0 ) { QMSG ( "%s[%p]: acquiring read lock ( %p )\n", __func__, self, self -> rl ); rc = KLockAcquire ( self -> rl ); if ( rc == 0 ) { QMSG ( "%s[%p]: canceling read semaphore...\n", __func__, self ); rc = KSemaphoreCancel ( self -> rc ); QMSG ( "%s[%p]: ...done, rc = %R.\n", __func__, self, rc ); KLockUnlock ( self -> rl ); } } } #endif } return rc; }
/* Seal * indicate that the queue has been closed off * meaning there will be no further push operations * if "writes" is true, and no further pop operations * otherwise. */ LIB_EXPORT rc_t CC KQueueSeal ( KQueue *self ) { rc_t rc = 0; QMSG ( "%s called\n", __func__ ); if ( self == NULL ) return RC ( rcCont, rcQueue, rcFreezing, rcSelf, rcNull ); self -> sealed = true; #if 0 QMSG ( "%s: acquiring write lock ( %p )\n", __func__, self -> wl ); rc = KLockAcquire ( self -> wl ); if ( rc == 0 ) { QMSG ( "%s: canceling write semaphore...\n", __func__ ); rc = KSemaphoreCancel ( self -> wc ); QMSG ( "%s: ...done, rc = %R.\n", __func__, rc ); KLockUnlock ( self -> wl ); if ( rc == 0 ) { QMSG ( "%s: acquiring read lock ( %p )\n", __func__, self -> rl ); rc = KLockAcquire ( self -> rl ); if ( rc == 0 ) { QMSG ( "%s: canceling read semaphore...\n", __func__ ); rc = KSemaphoreCancel ( self -> rc ); QMSG ( "%s: ...done, rc = %R.\n", __func__, rc ); KLockUnlock ( self -> rl ); } } } #endif return rc; }
static rc_t SRAFastqFile_Destroy(SRAFastqFile *self) { if( KLockAcquire(self->lock) == 0 ) { ReleaseComplain(FastqReaderWhack, self->reader); ReleaseComplain(KIndexRelease, self->kidx); ReleaseComplain(KTableRelease, self->ktbl); ReleaseComplain(SRATableRelease, self->stbl); FREE(self->buf < self->gzipped ? self->buf : self->gzipped); ReleaseComplain(KLockUnlock, self->lock); ReleaseComplain(KLockRelease, self->lock); FREE(self); } return 0; }
rc_t CC XStatsReset ( const struct XStats * self ) { rc_t RCt = 0; if ( self != NULL ) { RCt = KLockAcquire ( self -> mutabor ); if ( RCt == 0 ) { RCt = _XStatsReset_NoLock ( self ); KLockUnlock ( self -> mutabor ); } } return RCt; } /* XStatReset () */
rc_t CC _SidStorageRehash () { rc_t RCt; RCt = 0; if ( _IsSidStorageGood () ) { RCt = KLockAcquire ( _sMutabor ); if ( RCt == 0 ) { RCt = _ClearSidStorageNoLock (); KLockUnlock ( _sMutabor ); } } return RCt; } /* _SidStorageRehash () */
rc_t CC XStatsBad ( const struct XStats * self ) { rc_t RCt; struct XStats * Stats; RCt = 0; Stats = ( struct XStats * ) self; if ( Stats != NULL ) { RCt = KLockAcquire ( Stats -> mutabor ); if ( RCt == 0 ) { Stats -> err_qty ++; KLockUnlock ( Stats -> mutabor ); } } return RCt; } /* XStatDispose () */
rc_t CC XStatsGood ( const struct XStats * self, uint64_t Size, uint64_t Time ) { rc_t RCt; struct XStats * Stats; RCt = 0; Stats = ( struct XStats * ) self; if ( Stats != NULL ) { RCt = KLockAcquire ( Stats -> mutabor ); if ( RCt == 0 ) { Stats -> qty ++; Stats -> time += Time; Stats -> size += Size; KLockUnlock ( Stats -> mutabor ); } } return RCt; } /* XStatDispose () */
LIB_EXPORT rc_t CC XFSTreeDepotSet ( const struct XFSTreeDepot * self, const struct XFSTree * Tree ) { rc_t RCt; RCt = 0; if ( self == NULL || Tree == NULL ) { return XFS_RC ( rcNull ); } RCt = KLockAcquire ( self -> mutabor ); if ( RCt == 0 ) { RCt = XFSTreeAddRef ( Tree ); if ( RCt == 0 ) { if ( self -> Tree != NULL ) { /*)) I do not check return code here, because we // are going to drop that tree anyway. ((*/ XFSTreeRelease ( self -> Tree ); } ( ( struct XFSTreeDepot * ) self ) -> Tree = ( struct XFSTree * ) Tree; } KLockUnlock ( self -> mutabor ); } return RCt; } /* XFSTreeDepotSet () */
/* Read * read an aligment * * "result" [ OUT ] - return param for BAMAlignment object * must be released with BAMAlignmentRelease * * returns RC(..., ..., ..., rcRow, rcNotFound) at end */ rc_t BAMReaderRead ( const BAMReader *cself, const BAMAlignment **result ) { rc_t rc; BAMReader *self = (BAMReader *)cself; if (self == NULL) return RC(rcAlign, rcFile, rcReading, rcParam, rcNull); if (self->eof) return RC(rcAlign, rcFile, rcReading, rcData, rcInsufficient); KLockAcquire(self->lock); if ((rc = self->rc) == 0) { while (self->nque == 0 && (rc = self->rc) == 0) KConditionWait(self->have_data, self->lock); if (rc == 0) { *result = self->que[0]; if (*result) { --self->nque; memmove(&self->que[0], &self->que[1], self->nque * sizeof(self->que[0])); KConditionSignal(self->need_data); } else { self->eof = true; rc = END_OF_DATA; } } } KLockUnlock(self->lock); return rc; }
static rc_t CC run_flush_thread ( const KThread *t, void *data ) { rc_t rc; VCursor *self = data; /* acquire lock */ MTCURSOR_DBG (( "run_flush_thread: acquiring lock\n" )); rc = KLockAcquire ( self -> flush_lock ); if ( rc == 0 ) { do { bool failed; run_trigger_prod_data pb; /* wait for data */ if ( self -> flush_state == vfReady ) { MTCURSOR_DBG (( "run_flush_thread: waiting for input\n" )); rc = KConditionWait ( self -> flush_cond, self -> flush_lock ); if ( rc != 0 ) break; } /* bail unless state is busy */ if ( self -> flush_state != vfBusy ) { MTCURSOR_DBG (( "run_flush_thread: exiting\n" )); break; } /* prepare param block */ pb . id = self -> flush_id; pb . cnt = self -> flush_cnt; pb . rc = 0; MTCURSOR_DBG (( "run_flush_thread: unlocking and running\n" )); KLockUnlock ( self -> flush_lock ); /* run productions from trigger roots */ failed = VectorDoUntil ( & self -> trig, false, run_trigger_prods, & pb ); /* drop page buffers */ MTCURSOR_DBG (( "run_flush_thread: dropping page buffers\n" )); VectorForEach ( & self -> row, false, WColumnDropPage, NULL ); /* reacquire lock */ MTCURSOR_DBG (( "run_flush_thread: re-acquiring lock" )); rc = KLockAcquire ( self -> flush_lock ); if ( rc != 0 ) { self -> flush_state = vfBgErr; LOGERR ( klogSys, rc, "run_flush_thread: re-acquiring lock failed - exit" ); return rc; } #if FORCE_FLUSH_ERROR_EXIT if ( ! failed ) { pb . rc = RC ( rcVDB, rcCursor, rcFlushing, rcThread, rcCanceled ); failed = true; } #endif /* get out on failure */ if ( failed ) { self -> flush_state = vfBgErr; LOGERR ( klogInt, pb . rc, "run_flush_thread: run_trigger_prods failed - exit" ); KConditionSignal ( self -> flush_cond ); rc = pb . rc; } /* no longer busy */ else if ( self -> flush_state == vfBusy ) { /* signal waiter */ self -> flush_state = vfReady; MTCURSOR_DBG (( "run_flush_thread: signaling ready\n" )); rc = KConditionSignal ( self -> flush_cond ); if ( rc != 0 ) LOGERR ( klogSys, rc, "run_flush_thread: failed to signal foreground thread - exit" ); } } while ( rc == 0 ); MTCURSOR_DBG (( "run_flush_thread: unlocking\n" )); KLockUnlock ( self -> flush_lock ); } MTCURSOR_DBG (( "run_flush_thread: exit\n" )); return rc; }
static rc_t VCursorFlushPageInt ( VCursor *self ) { rc_t rc; if ( self == NULL ) rc = RC ( rcVDB, rcCursor, rcFlushing, rcSelf, rcNull ); else if ( self -> read_only ) rc = RC ( rcVDB, rcCursor, rcFlushing, rcCursor, rcReadonly ); else { int64_t end_id; #if ! VCURSOR_FLUSH_THREAD run_trigger_prod_data pb; #endif switch ( self -> state ) { case vcConstruct: rc = RC ( rcVDB, rcCursor, rcFlushing, rcCursor, rcNotOpen ); break; case vcFailed: rc = RC ( rcVDB, rcCursor, rcFlushing, rcCursor, rcInvalid ); break; case vcRowOpen: rc = RC ( rcVDB, rcCursor, rcFlushing, rcCursor, rcBusy ); break; default: /* ignore request if there is no page to commit */ if ( self -> start_id == self -> end_id ) { /* the cursor should be in unwritten state, where the row_id can be reset but drags along the other markers. */ assert ( self -> end_id == self -> row_id ); return 0; } #if VCURSOR_FLUSH_THREAD MTCURSOR_DBG (( "VCursorFlushPageInt: going to acquire lock\n" )); /* get lock */ rc = KLockAcquire ( self -> flush_lock ); if ( rc != 0 ) return rc; MTCURSOR_DBG (( "VCursorFlushPageInt: have lock\n" )); /* make sure that background thread is ready */ while ( self -> flush_state == vfBusy ) { MTCURSOR_DBG (( "VCursorFlushPageInt: waiting for background thread\n" )); rc = KConditionWait ( self -> flush_cond, self -> flush_lock ); if ( rc != 0 ) { LOGERR ( klogSys, rc, "VCursorFlushPageInt: wait failed - exiting" ); KLockUnlock ( self -> flush_lock ); return rc; } } if ( self -> flush_state != vfReady ) { if ( self -> flush_state != vfBgErr ) rc = RC ( rcVDB, rcCursor, rcFlushing, rcCursor, rcInconsistent ); else { rc_t rc2; MTCURSOR_DBG (( "VCursorFlushPageInt: waiting on thread to exit\n" )); rc = KThreadWait ( self -> flush_thread, & rc2 ); if ( rc == 0 ) { rc = rc2; MTCURSOR_DBG (( "VCursorFlushPageInt: releasing thread\n" )); KThreadRelease ( self -> flush_thread ); self -> flush_thread = NULL; } } PLOGERR ( klogInt, (klogInt, rc, "VCursorFlushPageInt: not in ready state[$(state)] - exiting","state=%hu",self -> flush_state )); KLockUnlock ( self -> flush_lock ); return rc; } MTCURSOR_DBG (( "VCursorFlushPageInt: running buffer page\n" )); #endif /* first, tell all columns to bundle up their pages into buffers */ end_id = self -> end_id; rc = RC ( rcVDB, rcCursor, rcFlushing, rcMemory, rcExhausted ); if ( VectorDoUntil ( & self -> row, false, WColumnBufferPage, & end_id ) ) { VectorForEach ( & self -> row, false, WColumnDropPage, NULL ); self -> flush_state = vfFgErr; } else { /* supposed to be constant */ assert ( end_id == self -> end_id ); #if VCURSOR_FLUSH_THREAD MTCURSOR_DBG (( "VCursorFlushPageInt: pages buffered - capturing id and count\n" )); self -> flush_id = self -> start_id; self -> flush_cnt = self -> end_id - self -> start_id; self -> start_id = self -> end_id; self -> end_id = self -> row_id + 1; self -> state = vcReady; MTCURSOR_DBG (( "VCursorFlushPageInt: state set to busy - signaling bg thread\n" )); self -> flush_state = vfBusy; rc = KConditionSignal ( self -> flush_cond ); if ( rc != 0 ) LOGERR ( klogSys, rc, "VCursorFlushPageInt: condition returned error on signal" ); #else /* run all validation and trigger productions */ pb . id = self -> start_id; pb . cnt = self -> end_id - self -> start_id; pb . rc = 0; if ( ! VectorDoUntil ( & self -> trig, false, run_trigger_prods, & pb ) ) { self -> start_id = self -> end_id; self -> end_id = self -> row_id + 1; self -> state = vcReady; } rc = pb . rc; /* drop page buffers */ VectorForEach ( & self -> row, false, WColumnDropPage, NULL ); #endif } #if VCURSOR_FLUSH_THREAD MTCURSOR_DBG (( "VCursorFlushPageInt: unlocking\n" )); KLockUnlock ( self -> flush_lock ); #endif } } return rc; }
/* Pop * pop an object from queue * * "item" [ OUT, OPAQUE* ] - return parameter for popped item * * "tm" [ IN, NULL OKAY ] - pointer to system specific timeout * structure. if the queue is empty, wait for indicated period * of time for an object to become available, or return status * code indicating a timeout. when NULL and queue is empty, * Pop will time out immediately and return status code. */ LIB_EXPORT rc_t CC KQueuePop ( KQueue *self, void **item, timeout_t *tm ) { rc_t rc; if ( item == NULL ) rc = RC ( rcCont, rcQueue, rcRemoving, rcParam, rcNull ); else { * item = NULL; if ( self == NULL ) rc = RC ( rcCont, rcQueue, rcRemoving, rcSelf, rcNull ); else { QMSG ( "%s: acquiring read lock ( %p )\n", __func__, self -> rl ); rc = KLockAcquire ( self -> rl ); if ( rc == 0 ) { QMSG ( "%s: waiting on read semaphore...\n", __func__ ); rc = KSemaphoreTimedWait ( self -> rc, self -> rl, self -> sealed ? NULL : tm ); QMSG ( "%s: ...done, rc = %R. unlocking read lock. ( %p )\n", __func__, rc, self -> rl ); KLockUnlock ( self -> rl ); if ( rc == 0 ) { uint32_t r, idx; /* got an element */ QMSG ( "%s: asserting self -> read ( %u ) != self -> write ( %u )\n", __func__, self -> read, self -> write ); assert ( self -> read != self -> write ); /* read element */ r = self -> read & self -> imask; QMSG ( "%s: read index is %u, masked against 0x%x\n", __func__, r, self -> imask ); idx = r & self -> bmask; * item = self -> buffer [ idx ]; QMSG ( "%s: read item from buffer [ %u ], using mask 0x%x\n", __func__, idx, self -> bmask ); self -> buffer [ idx ] = NULL; self -> read = r + 1; /* let write know there's a free slot available */ QMSG ( "%s: acquiring write lock ( %p )\n", __func__, self -> wl ); if ( KLockAcquire ( self -> wl ) == 0 ) { QMSG ( "%s: signaling write semaphore\n", __func__ ); KSemaphoreSignal ( self -> wc ); QMSG ( "%s: unlocking write lock ( %p )\n", __func__, self -> wl ); KLockUnlock ( self -> wl ); } } else if ( self -> sealed && GetRCObject ( rc ) == rcTimeout ) { rc = RC ( rcCont, rcQueue, rcRemoving, rcData, rcDone ); QMSG ( "%s: resetting rc to %R\n", __func__, rc ); } } } } return rc; }
/* Push * add an object to the queue * * "item" [ IN, OPAQUE ] - pointer to item being queued * * "tm" [ IN, NULL OKAY ] - pointer to system specific timeout * structure. if the queue is full, wait for indicated period * of time for space to become available, or return status * code indicating a timeout. when NULL and queue is full, * Push will time out immediately and return status code. */ LIB_EXPORT rc_t CC KQueuePush ( KQueue *self, const void *item, timeout_t *tm ) { rc_t rc; if ( self == NULL ) return RC ( rcCont, rcQueue, rcInserting, rcSelf, rcNull ); if ( self -> sealed ) { QMSG ( "%s: failed to insert into queue due to seal\n", __func__ ); return RC ( rcCont, rcQueue, rcInserting, rcQueue, rcReadonly ); } if ( item == NULL ) return RC ( rcCont, rcQueue, rcInserting, rcTimeout, rcNull ); QMSG ( "%s: acquiring write lock ( %p )...\n", __func__, self -> wl ); rc = KLockAcquire ( self -> wl ); QMSG ( "%s: ...done, rc = %R\n", __func__, rc ); if ( rc == 0 ) { QMSG ( "%s: waiting on write semaphore...\n", __func__ ); rc = KSemaphoreTimedWait ( self -> wc, self -> wl, tm ); QMSG ( "%s: ...done, rc = %R. unlocking write lock ( %p ).\n", __func__, rc, self -> wl ); KLockUnlock ( self -> wl ); if ( rc == 0 ) { uint32_t w; /* re-check the seal */ if ( self -> sealed ) { QMSG ( "%s: queue has been sealed\n", __func__ ); /* not a disaster if semaphore not signaled */ QMSG ( "%s: acquiring write lock\n", __func__ ); if ( ! KLockAcquire ( self -> wl ) ) { QMSG ( "%s: signaling write semaphore\n", __func__ ); KSemaphoreSignal ( self -> wc ); QMSG ( "%s: unlocking write lock\n", __func__ ); KLockUnlock ( self -> wl ); } QMSG ( "%s: failed to insert into queue due to seal\n", __func__ ); return RC ( rcCont, rcQueue, rcInserting, rcQueue, rcReadonly ); } /* insert item */ w = self -> write & self -> imask; QMSG ( "%s: write index is %u, masked against 0x%x\n", __func__, w, self -> imask ); self -> buffer [ w & self -> bmask ] = ( void* ) item; QMSG ( "%s: inserted item into buffer [ %u ], using mask 0x%x\n", __func__, w & self -> bmask, self -> bmask ); self -> write = w + 1; /* let listeners know about item */ QMSG ( "%s: acquiring read lock ( %p )\n", __func__, self -> rl ); if ( KLockAcquire ( self -> rl ) == 0 ) { QMSG ( "%s: signaling read semaphore\n", __func__ ); KSemaphoreSignal ( self -> rc ); QMSG ( "%s: unlocking read lock ( %p )\n", __func__, self -> rl ); KLockUnlock ( self -> rl ); } } } return rc; }