/** Write an HSP list to the collector HSP stream. The HSP stream assumes * ownership of the HSP list and sets the dereferenced pointer to NULL. * @param hsp_stream Stream to write to. [in] [out] * @param hsp_list Pointer to the HSP list to save in the collector. [in] * @return Success or error, if stream is already closed for writing. */ int BlastHSPStreamWrite(BlastHSPStream* hsp_stream, BlastHSPList** hsp_list) { Int2 status = 0; if (!hsp_stream) return kBlastHSPStream_Error; /** Lock the mutex, if necessary */ MT_LOCK_Do(hsp_stream->x_lock, eMT_Lock); /** Prohibit writing after reading has already started. This prohibition * can be lifted later. There is no inherent problem in using read and * write in any order, except that sorting would have to be done on * every read after a write. */ if (hsp_stream->results_sorted) { MT_LOCK_Do(hsp_stream->x_lock, eMT_Unlock); return kBlastHSPStream_Error; } if (hsp_stream->writer) { /** if writer has not been initialized, initialize it first */ if (!(hsp_stream->writer_initialized)) { (hsp_stream->writer->InitFnPtr) (hsp_stream->writer->data, hsp_stream->results); hsp_stream->writer_initialized = TRUE; } /** filtering processing */ status = (hsp_stream->writer->RunFnPtr) (hsp_stream->writer->data, *hsp_list); } if (status != 0) { MT_LOCK_Do(hsp_stream->x_lock, eMT_Unlock); return kBlastHSPStream_Error; } /* Results structure is no longer sorted, even if it was before. The following assignment is only necessary if the logic to prohibit writing after the first read is removed. */ hsp_stream->results_sorted = FALSE; /* Free the caller from this pointer's ownership. */ *hsp_list = NULL; /** Unlock the mutex */ MT_LOCK_Do(hsp_stream->x_lock, eMT_Unlock); return kBlastHSPStream_Success; }
static void TEST_CORE_Lock(void) { /* MT_LOCK API */ MT_LOCK x_lock; /* dummy */ TEST_CORE_LockUserData = 111; x_lock = MT_LOCK_Create(&TEST_CORE_LockUserData, 0, TEST_CORE_LockCleanup); assert(x_lock); verify(MT_LOCK_AddRef(x_lock) == x_lock); verify(MT_LOCK_AddRef(x_lock) == x_lock); verify(MT_LOCK_Delete(x_lock) == x_lock); assert(TEST_CORE_LockUserData == 111); verify(MT_LOCK_Do(x_lock, eMT_LockRead)); verify(MT_LOCK_Do(x_lock, eMT_Lock)); verify(MT_LOCK_Do(x_lock, eMT_Unlock)); verify(MT_LOCK_Do(x_lock, eMT_Unlock)); verify(MT_LOCK_Delete(x_lock) == x_lock); assert(TEST_CORE_LockUserData == 111); verify(MT_LOCK_Delete(x_lock) == 0); assert(TEST_CORE_LockUserData == 222); /* real */ x_lock = MT_LOCK_Create(&TEST_CORE_LockUserData, TEST_CORE_LockHandler, TEST_CORE_LockCleanup); assert(x_lock); /* NB: Write after read is not usually an allowed lock nesting */ verify(MT_LOCK_Do(x_lock, eMT_LockRead)); verify(MT_LOCK_Do(x_lock, eMT_Lock)); verify(MT_LOCK_Do(x_lock, eMT_Unlock)); verify(MT_LOCK_Do(x_lock, eMT_Unlock)); /* Read after write is usually okay */ verify(MT_LOCK_Do(x_lock, eMT_Lock)); verify(MT_LOCK_Do(x_lock, eMT_LockRead)); verify(MT_LOCK_Do(x_lock, eMT_Unlock)); verify(MT_LOCK_Do(x_lock, eMT_Unlock)); /* Try-locking sequence */ verify(MT_LOCK_Do(x_lock, eMT_TryLock)); verify(MT_LOCK_Do(x_lock, eMT_TryLockRead)); verify(MT_LOCK_Do(x_lock, eMT_Unlock)); verify(MT_LOCK_Do(x_lock, eMT_Unlock)); verify(MT_LOCK_Delete(x_lock) == 0); }