SKRecord::~SKRecord() { #ifdef FIELD_CACHE if(m_ppFieldCache) { skPtr<SKIFldCollection> pFldCol; SKERR err = m_pTable->GetFldCollection(pFldCol.already_AddRefed()); SK_ASSERT(err == noErr); SK_ASSERT(NULL != pFldCol); skPtr<SKField> pField; for(PRUint32 i = 0; i < m_lFieldCount; ++i) { if(m_ppFieldCache[i]) { err=pFldCol->GetField(i,(SKIField**)pField.already_AddRefed()); SK_ASSERT(err == noErr); FieldType eType = pField->GetType(); if(eType == SKFT_DATA) ((SKBinary*)m_ppFieldCache[i])->Release(); else SK_ASSERT(NULL != PR_FALSE); } } delete[] m_ppFieldCache; } #endif }
SKERR SKRecord::SetTable(SKIRecordSet* pTable) { if(!pTable) return err_tbl_invalid; if(pTable == m_pTable) return noErr; #ifdef FIELD_CACHE skPtr<SKIFldCollection> pFldCol; SKERR err; if(m_ppFieldCache) { err = m_pTable->GetFldCollection(pFldCol.already_AddRefed()); if(err != noErr) return err_tbl_invalid; SK_ASSERT(NULL != pFldCol); skPtr<SKField> pField; for(PRUint32 i = 0; i < m_lFieldCount; ++i) { if(m_ppFieldCache[i]) { err=pFldCol->GetField(i,(SKIField**)pField.already_AddRefed()); SK_ASSERT(err == noErr); FieldType eType = pField->GetType(); if(eType == SKFT_DATA) ((SKBinary*)m_ppFieldCache[i])->Release(); else if(eType == SKFT_LINK) ((SKIRecordSet*)m_ppFieldCache[i])->Release(); else SK_ASSERT(PR_FALSE); } } delete[] m_ppFieldCache; m_ppFieldCache = NULL; } #endif m_pTable = pTable; #ifdef FIELD_CACHE err = m_pTable->GetFldCollection(pFldCol.already_AddRefed()); if(err != noErr) return err_tbl_invalid; SK_ASSERT(NULL != pFldCol); err = pFldCol->GetFieldCount(&m_lFieldCount); if(err != noErr) return err_tbl_invalid; m_ppFieldCache = new void*[m_lFieldCount]; for(PRUint32 i = 0; i < m_lFieldCount; ++i) m_ppFieldCache[i] = NULL; #endif return pTable ? noErr : err_tbl_invalid; }
static void _ep_cb(sk_ep_ret_t ret, const void* response, size_t len, void* ud) { sk_print("prepare to run ep callback\n"); // 1. Prepare ep_job_t* job = ud; skull_ep_ret_t skull_ret; skull_service_t* service = &job->service; // TODO: Tricky here, should manually convert the fields one by one memcpy(&skull_ret, &ret, sizeof(ret)); // Q: Why we have to make the service data un-mutable? // A: Here we passed a skull_service into user level, but we cannot modify // the service data due to the callback function may be ran in parallel, // some in workers, some in bio, so to be safety, we have to make sure about // the user cannot use a mutable service structure, so that's why we set // the 'new_svc.freezed = 1' skull_service_t new_svc = *service; new_svc.freezed = 1; // 2. Call user callback if (job->cb.type_ == PENDING) { sk_txn_taskdata_t* task_data = service->task; SK_ASSERT(task_data); SK_ASSERT(service->txn); job->cb.cb_.pending_cb_(&new_svc, skull_ret, response, len, job->ud, task_data->request, task_data->request_sz, task_data->response, task_data->response_sz); // Reduce pending tasks counts task_data->pendings--; sk_print("service task pending cnt: %u\n", service->task->pendings); // Try to call api callback sk_service_api_complete(service->service, service->txn, service->task, service->task->api_name); } else { // Reset task and txn to NULL, to prevent user to create a pending job // from a no pending job's callback new_svc.task = NULL; new_svc.txn = NULL; job->cb.cb_.nopending_cb_(&new_svc, skull_ret, response, len, job->ud); } }
SKERR SKCursorRecordSet::Init(PRUint32 iOffset, PRUint32 iCount, SKIRecordSet *pRecordSet) { SK_ASSERT(NULL != pRecordSet); SKERR err; *m_pCursor.already_AddRefed() = sk_CreateInstance(SKCursor)(iCount); if(!m_pCursor) return err_memory; err = m_pCursor->ComputeCursorForm(); if (err != noErr) return err; PRUint32 *pData = m_pCursor->GetSharedCursorDataWrite(); for(PRUint32 i = 0; i < iCount; ++i) pData[i] = iOffset + i; m_pCursor->ReleaseSharedCursorDataWrite(); m_pRecordSet = pRecordSet; m_iSpeederCount = 0; m_ppSpeeders = NULL; return noErr; }
SKERR SKRecordPool::GetRecord(SKRecord **ppRecord, void *pBuffer, PRBool bVolatileBuffer) { SKERR err; for(PRUint32 i = 0; i < m_iPoolsCount; ++i) { err = m_ppPools[i]->GetRecord(ppRecord, pBuffer, bVolatileBuffer); // If succeeded then return if(err == noErr) return noErr; } // All the pools are full, create a new one SKFixedRecordPool * pPool = sk_CreateInstance(SKFixedRecordPool)(m_iPoolSize, m_iRecordSize); if(!pPool) return err_memory; err = pPool->Init(); if(err != noErr) return err; m_ppPools = (SKFixedRecordPool **) PR_Realloc(m_ppPools, (m_iPoolsCount + 1) * sizeof(SKFixedRecordPool *)); SK_ASSERT(NULL != m_ppPools); m_ppPools[m_iPoolsCount++] = pPool; return m_ppPools[m_iPoolsCount - 1]->GetRecord(ppRecord, pBuffer, bVolatileBuffer); }
SKERR SKRecord::GetLinkFieldValue(SKIField* pIField, SKIRecordSet** ppRecordSet) { SKERR err = noErr; *ppRecordSet = NULL; if(!m_pTable || !m_pBuffer) return SKError(err_rec_invalid,"[SKRecord::GetLinkFieldValue] " "Invalid record"); // get file skPtr<SKIRecordSet> pLinkRS; err = pIField->GetLinkSubRecordSet(pLinkRS.already_AddRefed()); if(err != noErr) { return SKError(err, "[SKRecord::GetLinkFieldValue] " "LIN not initialized."); } // get offset skPtr<SKField> pOffsetField; ((SKField*)pIField)->GetOffsetField(pOffsetField.already_AddRefed()); SK_ASSERT(NULL != pOffsetField); PRUint32 lOffset = 0; ((SKField*) pOffsetField)->GetUNumFieldValue(m_pBuffer, &lOffset); // get count PRUint32 lCount = 0; err = GetLinkFieldCount(pIField, &lCount); return pLinkRS->GetSubRecordSet(lOffset, lCount, ppRecordSet); }
SKERR SKMux::InsertIndexWithComparator(PRUint32 iIndex, SKICursorComparator *pComparator) { SK_ASSERT(m_piPosition[iIndex] < m_piCount[iIndex]); m_piInputData[iIndex] = m_ppiData[iIndex][m_piPosition[iIndex]++]; PRUint32 iInf = (PRUint32)-1; PRUint32 iSup = m_iSortedWidth; while(iSup > iInf + 1) { PRUint32 iPos = (iSup + iInf) >> 1; PRInt32 iCmp = 0; SKERR err = pComparator->CompareRanks( m_piInputData[m_piSortedIndexes[iPos]], m_piInputData[iIndex], &iCmp); if(err != noErr) return err; if(iCmp == 0) { if(m_piPosition[iIndex] >= m_piCount[iIndex]) return noErr; m_piInputData[iIndex] = m_ppiData[iIndex][m_piPosition[iIndex]++]; iInf = (PRUint32)-1; iSup = iPos + 1; } else if(iCmp < 0) iSup = iPos; else iInf = iPos; } SK_ASSERT(iInf + 1 == iSup); memmove(m_piSortedIndexes + iSup + 1, m_piSortedIndexes + iSup, (m_iSortedWidth - iSup) * sizeof(PRUint32)); m_piSortedIndexes[iSup] = iIndex; m_iSortedWidth++; return noErr; }
void SKRecordSet::InsertRecordInCache(SKIRecord* pIRecord) { SK_ASSERT(NULL != pIRecord); if(m_pRecordCache && m_pInsertItem) { SK_ASSERT(!m_pLastItem->m_pNext); SKRecordCacheItem* pNextLastItem = m_pLastItem; // Detach the last item if(m_pInsertItem != m_pLastItem) { SK_ASSERT(NULL != m_pLastItem->m_pPrev); m_pLastItem->m_pPrev->m_pNext = NULL; pNextLastItem = pNextLastItem->m_pPrev; } // Init the item if(!*m_pLastItem) ++m_lCacheCount; m_lWarnCounter = m_lCacheCount; *m_pLastItem = pIRecord; m_pLastItem->m_lScore = m_lIncrement; if(m_pInsertItem != m_pLastItem) { // Link it m_pLastItem->m_pPrev = m_pInsertItem->m_pPrev; m_pLastItem->m_pNext = m_pInsertItem; // Back-link it if(m_pLastItem->m_pPrev) m_pLastItem->m_pPrev->m_pNext = m_pLastItem; if(m_pLastItem->m_pNext) m_pLastItem->m_pNext->m_pPrev = m_pLastItem; // Update the first item if(m_pFirstItem == m_pInsertItem) m_pFirstItem = m_pLastItem; // Update the insert item m_pInsertItem = m_pLastItem; // Update the last item m_pLastItem = pNextLastItem; } } }
SKERR SKFixedRecordPool::ReleaseRecord(SKRecord *pRecord) { SK_ASSERT( (pRecord >= m_pRawRecords) && (pRecord < m_pRawRecords + m_iPoolSize)); m_ppRecords[--m_iRecordPointer] = pRecord; return noErr; }
SKERR SKCursorRecordSet::AppendSpeeder(SKRefCount *pSpeeder) { m_ppSpeeders = (SKRefCount **) PR_Realloc(m_ppSpeeders, (m_iSpeederCount + 1) * sizeof(SKRefCount *)); SK_ASSERT(NULL != m_ppSpeeders); m_ppSpeeders[m_iSpeederCount++] = pSpeeder; pSpeeder->AddRef(); return noErr; }
SKERR SKIndex::MultiMerge(SKIRecordSet** ppResults, PRUint32 iCount) { SK_ASSERT (NULL != ppResults); while (iCount > 1) { PRUint32 iLast = iCount & 0xFFFFFFFE; PRUint32 i; SKERR err; // One loop iteration merges the 2N cursors [0..2N-1] // (or 2N+1 cursors [0..2N]) into [0..N-1]. for (i = 0; i < iLast; i+= 2) { SK_ASSERT (NULL != ppResults[i]); SK_ASSERT (NULL != ppResults[i + 1]); // Merge each couple of cursors together err = ppResults[i]->Merge(ppResults[i + 1], skfopOR, &m_docComparator, PR_TRUE); if (err != noErr) return err; // Move the result to the left if (i != 0) { SK_ASSERT (!ppResults[i / 2]); ppResults[i / 2] = ppResults[i]; ppResults[i] = NULL; } ppResults[i + 1]->Release(); ppResults[i + 1] = NULL; } // Merge the last, unpaired cursor if (iLast != iCount) { SK_ASSERT (iCount == iLast + 1); SK_ASSERT (NULL != ppResults[iLast]); SK_ASSERT (NULL != ppResults[0]); err = ppResults[0]->Merge(ppResults[iLast], skfopOR, &m_docComparator, PR_TRUE); if (err != noErr) return err; // For a very large value of iCount, it may be interesting // to choose a random cursor, instead of the first one. } iCount = iLast / 2; // Iterate until we have only one cursor. } return noErr; }
SKFixedRecordPool::~SKFixedRecordPool() { SK_ASSERT(m_iRecordPointer == 0); if(m_pRecordBuffers) PR_Free(m_pRecordBuffers); if(m_pRawRecords) delete[] m_pRawRecords; if(m_ppRecords) delete[] m_ppRecords; }
SKERR SKCursorRecordSet::Init(SKCursor *pCursor, SKIRecordSet *pRecordSet) { SK_ASSERT(NULL != pRecordSet); m_pCursor = pCursor; m_pRecordSet = pRecordSet; m_iSpeederCount = 0; m_ppSpeeders = NULL; return noErr; }
void SKMux::InsertIndexWithoutComparator(PRUint32 iIndex) { SK_ASSERT(m_piPosition[iIndex] < m_piCount[iIndex]); m_piInputData[iIndex] = m_ppiData[iIndex][m_piPosition[iIndex]++]; PRUint32 iInf = (PRUint32)-1; PRUint32 iSup = m_iSortedWidth; PRUint32 iIndexValue = m_piInputData[iIndex]; while(iSup > iInf + 1) { PRUint32 iPos = (iSup + iInf) >> 1; PRUint32 iPosValue = m_piInputData[m_piSortedIndexes[iPos]]; if(iPosValue == iIndexValue) { if(m_piPosition[iIndex] >= m_piCount[iIndex]) return; iIndexValue = m_piInputData[iIndex] = m_ppiData[iIndex][m_piPosition[iIndex]++]; iInf = (PRUint32)-1; iSup = iPos + 1; } else if(iPosValue < iIndexValue) iSup = iPos; else iInf = iPos; } SK_ASSERT(iInf + 1 == iSup); memmove(m_piSortedIndexes + iSup + 1, m_piSortedIndexes + iSup, (m_iSortedWidth - iSup) * sizeof(PRUint32)); m_piSortedIndexes[iSup] = iIndex; m_iSortedWidth++; return; }
SKERR SKMux::RetrieveDataWithAncillary(PRUint32 *piCount, PRUint32 **ppiData, void** ppAncillaryData) { SK_ASSERT(NULL != piCount); SK_ASSERT(NULL != ppiData); if(m_iFinalCount && !m_piFinalData) return err_failure; *piCount = m_iFinalCount; *ppiData = m_piFinalData; if(ppAncillaryData) *ppAncillaryData = m_pAncillary; else PR_Free(m_pAncillary); m_piFinalData = NULL; m_iFinalCount = 0; m_pAncillary = NULL; return noErr; }
SKERR SKIndex::SearchExpression(const char *pszSearchString, PRBool bUseFlex /*= PR_FALSE*/, skIStringSimplifier *pSimp /*= NULL */, SKIRecordFilter *pFilter /*= NULL */, SKIndexResult **ppResult) { SK_ASSERT(NULL != pszSearchString); SK_ASSERT(NULL != ppResult); // check input variables if (!pszSearchString || !ppResult) return SKError(err_idx_invalid, "[SKIndex::SearchExpression] " "Invalid arguments"); *ppResult = NULL; if(PL_strlen(pszSearchString) == 0) return SKError(err_idx_invalid, "[SKIndex::SearchExpression] " "Empty request"); // Instanciate the result object (all our garbage will automatically // be freed if an error occurs) skPtr<SKIndexResult> pResult; *pResult.already_AddRefed() = sk_CreateInstance(SKIndexResult)(); if(!pResult) return SKError(err_memory, "[SKIndex::SearchExpression] " "Unable to allocate the result"); SKERR err = Init(); if(err != noErr) return err; err = pResult->DoSearch(this, pszSearchString, bUseFlex, pSimp, pFilter); if(err != noErr) return err; return pResult.CopyTo(ppResult); }
SKERR SKCursorScorer::MuxCursorScorer( SKCursorScorer** ppCursorScorer, PRUint32 iWidth, SKIMuxCursorScorer* pCallback, PRBool* pbInterrupt) { if(!iWidth) return noErr; SK_ASSERT(ppCursorScorer && *ppCursorScorer); if( !ppCursorScorer || !*ppCursorScorer ) return err_invalid; SKMux mux; SKCursor** ppCursors = new SKCursor*[iWidth]; for(PRUint32 i = 0; i<iWidth; i++) ppCursors[i] = ppCursorScorer[i]->m_pUnsortedCursor; SKCursorScorerWrapper wrapper(pCallback); SKERR err = mux.MuxCursorsWithAncillary(ppCursors, iWidth, sizeof(PRUint32), &wrapper, NULL, pbInterrupt); delete [] ppCursors; if(err != noErr) return err; PRUint32 iCount; PRUint32* piData; PRUint32* piScores; err = mux.RetrieveDataWithAncillary(&iCount, &piData, (void**) &piScores); if(err != noErr) return err; err = Init(iCount, ppCursorScorer[0]->m_bSignedScores); if(err != noErr) return err; memcpy(m_bSignedScores ? (PRUint32*)m_piUnsortedSignedScores : m_piUnsortedUnsignedScores, piScores, iCount*sizeof(PRUint32)); skPtr<SKCursor> pCursor; *pCursor.already_AddRefed() = sk_CreateInstance(SKCursor)(iCount, piData); err = SetUnsortedCursor(pCursor); if(err != noErr) return err; return noErr; }
skull_ep_status_t skull_ep_send(const skull_service_t* service, const skull_ep_handler_t handler, const void* data, size_t count, skull_ep_cb_t cb, void* ud) { sk_print("calling skull_ep_send...\n"); if (!service->task) { sk_print("skull_ep_send cannot be called outside a service api call\n"); if (handler.release) { handler.release(ud); } return SKULL_EP_ERROR; } // Construct ep job sk_ep_handler_t sk_handler; ep_job_cb_t ep_job_cb = { .type_ = PENDING, .cb_.pending_cb_ = cb }; ep_job_t* job = _ep_job_create(service, &handler, ep_job_cb, ud, &sk_handler); const sk_entity_t* ett = service->txn ? sk_txn_entity(service->txn) : NULL; // Send job to ep pool sk_ep_status_t ret = sk_ep_send(SK_ENV_EP, service->service, ett, sk_handler, data, count, _ep_cb, job); if (ret == SK_EP_OK) { service->task->pendings++; sk_print("service task pending cnt: %u\n", service->task->pendings); } else { if (sk_handler.release) { sk_handler.release(job); } } switch (ret) { case SK_EP_OK: return SKULL_EP_OK; case SK_EP_ERROR: return SKULL_EP_ERROR; case SK_EP_TIMEOUT: return SKULL_EP_TIMEOUT; default: SK_ASSERT(0); return SKULL_EP_ERROR; } } skull_ep_status_t skull_ep_send_np(const skull_service_t* service, const skull_ep_handler_t handler, const void* data, size_t count, skull_ep_np_cb_t cb, void* ud) { sk_print("calling skull_ep_send_np...\n"); // Construct ep job sk_ep_handler_t sk_handler; ep_job_cb_t ep_job_cb = { .type_ = NOPENDING, .cb_.nopending_cb_ = cb }; ep_job_t* job = _ep_job_create(service, &handler, ep_job_cb, ud, &sk_handler); const sk_entity_t* ett = service->txn ? sk_txn_entity(service->txn) : NULL; // Send job to ep pool sk_ep_status_t ret = sk_ep_send(SK_ENV_EP, service->service, ett, sk_handler, data, count, _ep_cb, job); if (ret != SK_EP_OK) { if (sk_handler.release) { sk_handler.release(job); } } switch (ret) { case SK_EP_OK: return SKULL_EP_OK; case SK_EP_ERROR: return SKULL_EP_ERROR; case SK_EP_TIMEOUT: return SKULL_EP_TIMEOUT; default: SK_ASSERT(0); return SKULL_EP_ERROR; } }
SKERR SKRecordSet::GetCachedRecord(PRUint32 lId, SKIRecord** ppIRecord) { *ppIRecord = NULL; if(m_pRecordCache) { SKRecordCacheItem* pItem = m_pFirstItem; m_pInsertItem = NULL; while(pItem && (SKIRecord*)*(skPtr<SKIRecord>*)pItem) { PRUint32 l; SKERR err = (*pItem)->GetId(&l); SK_ASSERT(err == noErr); if(err != noErr) return err; if(l == lId) { // This is the result *ppIRecord = *pItem; // Move up this item pItem->m_lScore += m_lIncrement; SKRecordCacheItem* pUpItem = pItem; while( pUpItem->m_pPrev && (pUpItem->m_pPrev->m_lScore <= pItem->m_lScore)) { pUpItem = pUpItem->m_pPrev; } if(pUpItem != pItem) { SKRecordCacheItem* pTmpItem = pItem->m_pPrev; pItem->m_pPrev->m_pNext = pItem->m_pNext; if(pItem->m_pNext) pItem->m_pNext->m_pPrev = pItem->m_pPrev; pItem->m_pPrev = pUpItem->m_pPrev; pItem->m_pNext = pUpItem; if(pItem->m_pPrev) pItem->m_pPrev->m_pNext = pItem; pUpItem->m_pPrev = pItem; if(m_pFirstItem == pUpItem) m_pFirstItem = pItem; if(!pUpItem->m_pNext) m_pLastItem = pUpItem; pItem = pTmpItem; } pItem = pItem->m_pNext; break; } else { // Release obsolete data if((pItem->m_lScore += m_lDecrement) <= 0) { pItem->m_lScore = 0; if(--m_lCacheCount > 0) --m_lWarnCounter; else m_lWarnCounter = -1; *pItem = NULL; } if( (pItem->m_lScore <= m_lIncrement) && ( !pItem->m_pPrev || ( pItem->m_pPrev && (pItem->m_pPrev->m_lScore > m_lIncrement)))) { m_pInsertItem = pItem; } } pItem = pItem->m_pNext; } while(pItem && (SKIRecord*)*(skPtr<SKIRecord>*)pItem) { // Release obsolete data if((pItem->m_lScore += m_lDecrement) <= 0) { pItem->m_lScore = 0; if(--m_lCacheCount > 0) --m_lWarnCounter; else m_lWarnCounter = -1; *pItem = NULL; } if( (pItem->m_lScore <= m_lIncrement) && ( !pItem->m_pPrev || ( pItem->m_pPrev && (pItem->m_pPrev->m_lScore > m_lIncrement)))) { m_pInsertItem = pItem; } pItem = pItem->m_pNext; } if(!m_pInsertItem && pItem) m_pInsertItem = pItem; } if(*ppIRecord) (*ppIRecord)->AddRef(); return (*ppIRecord == NULL); }
// Every time pick up the next module in the workflow module list, then execute // the its `run` method. If reach the last module of the workflow, then will // execute the `pack` method static int _run(const sk_sched_t* sched, const sk_sched_t* src, sk_entity_t* entity, sk_txn_t* txn, sk_pto_hdr_t* msg) { SK_ASSERT(sched && entity && txn); sk_txn_state_t state = sk_txn_state(txn); // Check whether timeout if (sk_txn_timeout(txn)) { sk_txn_setstate(txn, SK_TXN_TIMEOUT); } switch (state) { case SK_TXN_INIT: { sk_print("txn - INIT\n"); sk_txn_setstate(txn, SK_TXN_RUNNING); return _run(sched, src, entity, txn, msg); } case SK_TXN_UNPACKED: { sk_print("txn - UNPACKED\n"); sk_txn_setstate(txn, SK_TXN_RUNNING); return _run(sched, src, entity, txn, msg); } case SK_TXN_RUNNING: { sk_print("txn - RUNNING\n"); return _module_run(sched, src, entity, txn, msg); } case SK_TXN_PENDING: { sk_print("txn - PENDING\n"); sk_txn_setstate(txn, SK_TXN_RUNNING); return _run(sched, src, entity, txn, msg); } case SK_TXN_COMPLETED: { sk_print("txn - COMPLETED\n"); return _module_pack(sched, src, entity, txn, msg); } case SK_TXN_PACKED: { sk_print("txn - PACKED: txn destroy\n"); sk_txn_setstate(txn, SK_TXN_DESTROYED); _txn_log_and_destroy(sched, txn); break; } case SK_TXN_ERROR: case SK_TXN_TIMEOUT: { sk_print("txn - ERROR or TIMEOUT: txn error or timeout\n"); return _module_pack(sched, src, entity, txn, msg); } case SK_TXN_DESTROYED: { sk_print("txn - DESTROYED: txn destroy\n"); _txn_log_and_destroy(sched, txn); break; } default: sk_print("Unexpect txn state: %d, ignored\n", state); SK_LOG_FATAL(SK_ENV_LOGGER, "Unexpect txn state: %d, exit", state); SK_ASSERT(0); break; } return 0; }
/** * @desc Run api callback * * @note This method will run in the api caller engine (worker) */ static int _run(const sk_sched_t* sched, const sk_sched_t* src, sk_entity_t* entity, sk_txn_t* txn, sk_pto_hdr_t* msg) { SK_ASSERT(sched); SK_ASSERT(entity); SK_ASSERT(txn); SK_ASSERT(msg); SK_ASSERT(sk_pto_check(SK_PTO_SVC_TASK_CB, msg)); SK_ASSERT(sched == sk_entity_sched(entity)); // 1. unpack the parameters uint32_t id = SK_PTO_SVC_TASK_CB; sk_txn_taskdata_t* taskdata = sk_pto_arg(id, msg, 0)->p; const char* service_name = sk_pto_arg(id, msg, 1)->s; const char* api_name = sk_pto_arg(id, msg, 2)->s; sk_txn_task_status_t task_status = (sk_txn_task_status_t)sk_pto_arg(id, msg, 3)->i; int svc_task_done = sk_pto_arg(id, msg, 4)->i; sk_module_t* caller_module = taskdata->caller_module; const char* caller_module_name = caller_module->cfg->name; uint64_t task_id = sk_txn_task_id(taskdata->owner); // 2. mark the txn task complete if (task_status == SK_TXN_TASK_DONE || task_status == SK_TXN_TASK_BUSY) { sk_txn_task_setcomplete(txn, task_id, task_status); // 3. get the target service sk_service_t* service = sk_core_service(SK_ENV_CORE, service_name); SK_ASSERT(service); // 4. run a specific service api callback int ret = 0; SK_LOG_SETCOOKIE("module.%s", caller_module_name); SK_ENV_POS_SAVE(SK_ENV_POS_MODULE, caller_module); ret = sk_service_run_iocall_cb(service, txn, task_id, api_name); SK_ENV_POS_RESTORE(); SK_LOG_SETCOOKIE(SK_CORE_LOG_COOKIE, NULL); if (ret) { SK_LOG_TRACE(SK_ENV_LOGGER, "Error in service task callback, module: %s ret: %d, task_id: %d\n", caller_module_name, ret, task_id); // Mark txn as ERROR, then after iocall complete, the workflow will // be go to 'pack' directly sk_txn_setstate(txn, SK_TXN_ERROR); } slong_t txn_starttime = sk_txn_starttime(txn); slong_t txn_alivetime = sk_txn_alivetime(txn); slong_t task_starttime = sk_txn_task_starttime(txn, task_id); sk_txn_log_add(txn, "; t:%s:%s st: %d cb_st: %d start: %ld end: %ld ", service_name, api_name, task_status, ret, task_starttime - txn_starttime, txn_alivetime); } // 5. log the task lifetime for debugging purpose SK_LOG_TRACE(SK_ENV_LOGGER, "service: one task id: %d completed, " "cost %ld usec", (int)task_id, sk_txn_task_lifetime(txn, task_id)); // 6. send a complete protocol back to master bool resume_wf = taskdata->cb ? sk_txn_module_complete(txn) : sk_txn_alltask_complete(txn); sk_sched_send(SK_ENV_SCHED, SK_ENV_MASTER_SCHED, entity, txn, 0, SK_PTO_SVC_TASK_DONE, service_name, resume_wf, svc_task_done); return 0; }
SKERR SKRecord::SetBuffer(void *pBuffer) { m_pBuffer = pBuffer; SK_ASSERT(NULL != m_pBuffer); return noErr; }
SKERR SKRecord::GetFldCollection(SKIFldCollection** fldcol) { SK_ASSERT(NULL != m_pTable); return m_pTable->GetFldCollection(fldcol); }
SKERR SKRecord::GetLinkFieldCount(SKIField* pIField, PRUint32 *piCount) { SKERR err = noErr; if(!m_pTable || !m_pBuffer) return SKError(err_rec_invalid,"[SKRecord::GetLinkFieldCount] " "Invalid record"); // get offset skPtr<SKField> pOffsetField; ((SKField*)pIField)->GetOffsetField(pOffsetField.already_AddRefed()); SK_ASSERT(NULL != pOffsetField); PRUint32 lOffset = 0; ((SKField*) pOffsetField)->GetUNumFieldValue(m_pBuffer, &lOffset); // is there a length ? skPtr<SKField> pCountField; ((SKField*)pIField)->GetCountField(pCountField.already_AddRefed()); PRUint32 lCount = 0; if(pCountField) { pCountField->GetUNumFieldValue(m_pBuffer, &lCount); } else { // if we do not know the id of the record, we can not get the data. if(m_lId == (PRUint32)-1) { err = SKError(err_rec_invalid, "[SKRecord::GetDataFieldValue] " "Not count for data and record detached from table."); } // is it the last record ? PRUint32 lTableCount = 0; m_pTable->GetCount(&lTableCount); if(m_lId == lTableCount - 1) { // read all the end of the file PRUint32 iLinCount = 0; skPtr<SKIRecordSet> pLinkRS; err = pIField->GetLinkSubRecordSet(pLinkRS.already_AddRefed()); if(err != noErr) { return SKError(err, "[SKRecord::GetLinkFieldCount] " "LIN not initialized."); } err = pLinkRS->GetCount(&iLinCount); if(err != noErr) return err; lCount = iLinCount - lOffset; } else { // stop where next record begins. PRUint32 lNextOffset; skPtr<SKIRecord> next; m_pTable->GetRecord(m_lId+1, next.already_AddRefed()); pOffsetField->GetUNumFieldValue( ((SKRecord*)(SKIRecord*)next)->GetSharedBuffer(), &lNextOffset); lCount = lNextOffset - lOffset; } } *piCount = lCount; return noErr; };
SKERR SKRecord::GetDataFieldValue(SKIField* pIField, SKBinary** ppBinary) { SKERR err; *ppBinary = NULL; if(!m_pTable || !m_pBuffer) return SKError(err_rec_invalid,"[SKRecord::GetDataFieldValue] " "Invalid record"); if(!pIField) return SKError(err_rec_invalid,"[SKRecord::GetDataFieldValue] " "Invalid field"); PRBool bCheckType; err = pIField->IsData(&bCheckType); SK_ASSERT(err == noErr); if(!bCheckType) return SKError(err_rec_invalid,"[SKRecord::GetDataFieldValue] " "Not called with a data field"); #ifdef FIELD_CACHE if(m_ppFieldCache[((SKField*)pIField)->GetPosition()]) { SK_ASSERT(((SKField*)pIField)->GetType() == SKFT_DATA); *ppBinary = (SKBinary*) m_ppFieldCache[((SKField*)pIField)->GetPosition()]; (*ppBinary)->AddRef(); return noErr; } #endif if(m_lId != (PRUint32)-1) { PRUint32 lCount; m_pTable->GetCount(&lCount); if(m_lId == lCount-1) { err = ((SKField*)pIField) ->GetDataFieldValue(m_pBuffer, ppBinary, (const void*) -1); } else { skPtr<SKIRecord> next; m_pTable->GetRecord(m_lId+1, next.already_AddRefed()); err = ((SKField*)pIField)-> GetDataFieldValue(m_pBuffer, ppBinary, ((SKRecord*)(SKIRecord*)next)-> GetSharedBuffer()); } } else { err = ((SKField*)pIField)->GetDataFieldValue(m_pBuffer,ppBinary); } if(*ppBinary) { #ifdef FIELD_CACHE (*ppBinary)->AddRef(); m_ppFieldCache[((SKField*)pIField)->GetPosition()] = *ppBinary; #endif } return err; }
SKERR SKIndex::BuildOccurrenceList(SKIRecordSet *pRS, PRUint32 *piNext, PRUint32 iMax, PRUint32 iDocId, SKCursor **ppOccList) { *ppOccList = NULL; skPtr<SKCursor> pResult; *pResult.already_AddRefed() = sk_CreateInstance(SKCursor)(); if(!pResult) return err_memory; if(m_pOccLinkField) { SKERR err; skPtr<SKIRecord> pRec; PRUint32 i, iId; for(i = 0; i < iMax; ++i) { err = pRS->GetRecord(i, pRec.already_AddRefed()); if(err != noErr) return err; err = pRec->GetUNumFieldValue(m_pDocIdField, &iId); if(err != noErr) return err; if(iId == iDocId) { break; } } if(i < iMax) { SK_ASSERT(iId == iDocId); *piNext = i; skPtr<SKIRecordSet> pOccRS; skPtr<SKCursor> pNewCursor; do { err = pRec->GetLinkFieldValue(m_pOccLinkField, pOccRS.already_AddRefed()); if(err != noErr) return err; PRUint32 iCount = 0; err = pOccRS->GetCount(&iCount); if(err != noErr) return err; err = pOccRS->ExtractCursor(m_pOccField, 0, iCount, pNewCursor.already_AddRefed()); if(err != noErr) return err; err = pResult->Merge(pNewCursor, skfopOR); if(err != noErr) return err; ++i; if (i < iMax) { err = pRS->GetRecord(i, pRec.already_AddRefed()); if(err != noErr) return err; err = pRec->GetUNumFieldValue(m_pDocIdField, &iId); if(err != noErr) return err; } } while((i < iMax) && (iId == iDocId)); } *piNext = i; } return pResult.CopyTo(ppOccList); }
int skull_srv_init (sk_service_t* srv, void* data) { skull_service_opt_t* opt = data; skull_service_t skull_service = { .service = srv, .txn = NULL, .task = NULL, .freezed = 0 }; return opt->init(&skull_service, opt->ud); } void skull_srv_release (sk_service_t* srv, void* data) { skull_service_opt_t* opt = data; skull_service_t skull_service = { .service = srv, .txn = NULL, .task = NULL, .freezed = 0 }; opt->release(&skull_service, opt->ud); } /** * Invoke the real user service api function * Notes: This api can be ran on worker/bio */ int skull_srv_iocall (sk_service_t* srv, const sk_txn_t* txn, void* sdata, sk_txn_taskdata_t* task_data, const char* api_name, sk_srv_io_status_t ustatus) { SK_ASSERT(task_data); // find the api func skull_service_opt_t* opt = sdata; const sk_service_api_t* api = sk_service_api(srv, api_name); if (!api) { return 1; } // invoke the user service api skull_service_t skull_service = { .service = srv, .txn = txn, .task = task_data, .freezed = 0 }; return opt->iocall(&skull_service, api_name, opt->ud); } /** * Run api callback * Notes: This api only can be ran on worker */ int skull_srv_iocall_complete(sk_service_t* srv, sk_txn_t* txn, void* sdata, uint64_t task_id, const char* api_name) { int ret = 0; skull_service_opt_t* opt = sdata; sk_txn_taskdata_t* task_data = sk_txn_taskdata(txn, task_id); SK_ASSERT(task_data); skull_service_t skull_service = { .service = srv, .txn = txn, .task = task_data, .freezed = 0 }; if (!task_data->cb) { sk_print("no service api callback function to run, skip it\n"); goto iocall_cleanup; } skull_txn_t skull_txn; skull_txn_init(&skull_txn, txn); // Set service return code skull_txn_ioret_t skull_ret; sk_txn_task_status_t st = sk_txn_task_status(txn, task_id); SK_ASSERT_MSG(st == SK_TXN_TASK_DONE || st == SK_TXN_TASK_BUSY, "st %d\n: st"); if (st == SK_TXN_TASK_DONE) { skull_ret = SKULL_TXN_IO_OK; } else { skull_ret = SKULL_TXN_IO_ERROR_SRVBUSY; } // Invoke task callback ret = ((skull_txn_iocb)task_data->cb)(&skull_txn, skull_ret, sk_service_name(srv), api_name, task_data->request, task_data->request_sz, task_data->response, task_data->response_sz, task_data->user_data); // Invoke iocomplete to do the cleanup job iocall_cleanup: opt->iocomplete(&skull_service, api_name, opt->ud); // Release the skull_txn skull_txn_release(&skull_txn, txn); return ret; }
SKERR SKMux::MuxCursorsWithAncillary( SKCursor **ppCursors, PRUint32 iWidth, PRUint32 lAncillaryItemSize, SKIMuxAncillaryCallback* pCallback, SKICursorComparator *pComparator, PRBool *pbInterrupt) { if(iWidth == 0) { m_iFinalCount = 0; return noErr; } else if(iWidth == 1) { SKERR err = ppCursors[0]->GetCount(&m_iFinalCount); if(err != noErr) { m_iFinalCount = 0; return err; } else if(!m_iFinalCount) return noErr; m_piFinalData = new PRUint32[m_iFinalCount]; if(!m_piFinalData) { m_iFinalCount = 0; return err_memory; } m_pAncillary = (void*) PR_Malloc(m_iFinalCount * lAncillaryItemSize); if(!m_pAncillary) { m_iFinalCount = 0; return err_memory; } err = ppCursors[0]->ComputeCursorForm(); if (err != noErr) return err; const PRUint32 *piData = ppCursors[0]->GetSharedCursorDataRead(); SK_ASSERT(NULL != piData); if(!piData) return err_failure; memcpy(m_piFinalData, piData, m_iFinalCount * sizeof(PRUint32)); PRBool bTrue = PR_TRUE; if(lAncillaryItemSize && pCallback) { for(PRUint32 i = 0; i<m_iFinalCount; i++) { err = pCallback->Compute(m_piFinalData[i], &i, &bTrue, (char*)m_pAncillary + i*lAncillaryItemSize); if(err != noErr) return err; } } return noErr; } SK_ASSERT(iWidth >= 2); // 1/ Preparation m_ppiData = new const PRUint32 *[iWidth]; m_piCount = new PRUint32[iWidth]; m_piPosition = new PRUint32[iWidth]; m_piInputData = new PRUint32[iWidth]; m_piSortedIndexes = new PRUint32[iWidth]; m_pbKept = new PRBool[iWidth]; if( !m_ppiData || !m_piCount | !m_piPosition || !m_piInputData || !m_piSortedIndexes) return err_memory; SKERR err; m_iFinalCount = 0; for(PRUint32 i = 0; i < iWidth; ++i) { err = ppCursors[i]->ComputeCursorForm(); if (err != noErr) return err; m_ppiData[i] = ppCursors[i]->GetSharedCursorDataRead(); err = ppCursors[i]->GetCount(m_piCount + i); if(err != noErr) return err; if(m_piCount[i] && !m_ppiData[i]) return err_failure; m_piPosition[i] = 0; m_iFinalCount += m_piCount[i]; } m_piFinalData = new PRUint32[m_iFinalCount]; if(!m_piFinalData) return err_memory; m_pAncillary = (void*) PR_Malloc(m_iFinalCount * lAncillaryItemSize); if(!m_pAncillary) { m_iFinalCount = 0; return err_memory; } m_iFinalCount = 0; m_iSortedWidth = 0; for(PRUint32 j = 0; j < iWidth; ++j) { if(m_piPosition[j] < m_piCount[j]) { if(pComparator) { err = InsertIndexWithComparator(j, pComparator); if(err != noErr) return err; } else InsertIndexWithoutComparator(j); } if(pbInterrupt && *pbInterrupt) return err_interrupted; } // 2/ Merge while(m_iSortedWidth) { // Check interruption flag if(pbInterrupt && *pbInterrupt) return err_interrupted; PRUint32 iIndex = m_piSortedIndexes[--m_iSortedWidth]; SK_ASSERT(iIndex < iWidth); m_piFinalData[m_iFinalCount] = m_piInputData[iIndex]; if(lAncillaryItemSize && pCallback) { for(PRUint32 k = 0; k<iWidth; k++) m_pbKept[k] = m_piInputData[k] == m_piInputData[iIndex]; err = pCallback->Compute(m_piInputData[iIndex], m_piPosition, m_pbKept, (char*) m_pAncillary + m_iFinalCount*lAncillaryItemSize); if(err != noErr) return err; } m_iFinalCount++; if(m_piPosition[iIndex] < m_piCount[iIndex]) { if(pComparator) { err = InsertIndexWithComparator(iIndex, pComparator); if(err != noErr) return err; } else InsertIndexWithoutComparator(iIndex); } } return noErr; }