Exemplo n.º 1
0
/*
 * Commit schema modification into cache.
 *
 * 1. commit pending cache to go live
 * 2. create self reference ctx
 * 3. release old self reference ctx
 * 4. update pEntry to have new live schema ctx association
 */
VOID
VmDirSchemaCacheModifyCommit(
    PVDIR_ENTRY  pSchemaEntry
    )
{
    DWORD               dwError = 0;
    BOOLEAN             bInLock = FALSE;
    PVDIR_SCHEMA_CTX    pOldCtx = NULL;

    if ( !pSchemaEntry || !pSchemaEntry->pSchemaCtx )
    {
        dwError = ERROR_INVALID_PARAMETER;
        BAIL_ON_VMDIR_ERROR(dwError);
    }

    VMDIR_LOCK_MUTEX(bInLock, gVdirSchemaGlobals.mutex);

    if ( ! gVdirSchemaGlobals.bHasPendingChange )
    {
        VMDIR_LOG_INFO( VMDIR_LOG_MASK_ALL, "Schema cache update pass through" );
        goto cleanup;   // no schema definition change, just pass through.
    }

    gVdirSchemaGlobals.bHasPendingChange = FALSE;
    gVdirSchemaGlobals.pSchema = pSchemaEntry->pSchemaCtx->pSchema;

    pOldCtx = gVdirSchemaGlobals.pCtx;
    gVdirSchemaGlobals.pCtx = NULL;

    VdirSchemaCtxAcquireInLock(TRUE, &gVdirSchemaGlobals.pCtx); // add global instance self reference
    assert(gVdirSchemaGlobals.pCtx);

    VMDIR_LOG_INFO( VMDIR_LOG_MASK_ALL, "Enable schema instance (%p)", gVdirSchemaGlobals.pSchema);

    VMDIR_UNLOCK_MUTEX(bInLock, gVdirSchemaGlobals.mutex);

cleanup:

    VMDIR_UNLOCK_MUTEX(bInLock, gVdirSchemaGlobals.mutex);

    if (pOldCtx)
    {
        VmDirSchemaCtxRelease(pOldCtx);
    }

    return;

error:

    VMDIR_LOG_ERROR( VMDIR_LOG_MASK_ALL, "VmDirSchemaCacheModifyCommit failed (%d)", dwError);

    goto cleanup;
}
Exemplo n.º 2
0
PVDIR_SCHEMA_CTX
VmDirSchemaCtxClone(
    PVDIR_SCHEMA_CTX    pOrgCtx
    )
{
    DWORD   dwError = 0;
    BOOLEAN bInLock = FALSE;
    PVDIR_SCHEMA_CTX    pCtx = NULL;

    assert(pOrgCtx);

    dwError = VmDirAllocateMemory(
            sizeof(VDIR_SCHEMA_CTX),
            (PVOID*)&pCtx);
    BAIL_ON_VMDIR_ERROR(dwError);

    pCtx->pSchema = pOrgCtx->pSchema;

    VMDIR_LOCK_MUTEX(bInLock, pCtx->pSchema->mutex);
    pCtx->pSchema->usRefCount++;

cleanup:

    VMDIR_UNLOCK_MUTEX(bInLock, pCtx->pSchema->mutex);

    return pCtx;

error:

    VMDIR_SAFE_FREE_MEMORY(pCtx);

    goto cleanup;
}
Exemplo n.º 3
0
VOID
VmDirDDVectorShutdown(
    VOID
    )
{
    BOOLEAN bInLock = FALSE;

    if (gVmdirServerGlobals.pReplDeadlockDetectionVector)
    {
        VMDIR_LOCK_MUTEX(bInLock, gVmdirServerGlobals.pReplDeadlockDetectionVector->pMutex);

        if (gVmdirServerGlobals.pReplDeadlockDetectionVector->pEmptyPageSentMap)
        {
            LwRtlHashMapClear(
                    gVmdirServerGlobals.pReplDeadlockDetectionVector->pEmptyPageSentMap,
                    VmDirSimpleHashMapPairFree,
                    NULL);
            LwRtlFreeHashMap(&gVmdirServerGlobals.pReplDeadlockDetectionVector->pEmptyPageSentMap);
        }

        VMDIR_SAFE_FREE_MEMORY(gVmdirServerGlobals.pReplDeadlockDetectionVector->pszInvocationId);

        VMDIR_UNLOCK_MUTEX(bInLock, gVmdirServerGlobals.pReplDeadlockDetectionVector->pMutex);
    }

    VMDIR_SAFE_FREE_MUTEX(gVmdirServerGlobals.pReplDeadlockDetectionVector->pMutex);
    VMDIR_SAFE_FREE_MEMORY(gVmdirServerGlobals.pReplDeadlockDetectionVector);
}
Exemplo n.º 4
0
//The log is to detect bursting traffic that
// caused write transaction avg. latency to spike.
VOID
VmDirWtxnOutstandingInc()
{
   BOOLEAN bLock = FALSE;
   double stats_peroid_ms = 0.0;
   double offered_rate = 0.0;

   VMDIR_LOCK_MUTEX(bLock, g_w_txns_mutex);
   if (g_w_txns_outstanding == 0)
   {
      g_start_ts_ms = VmDirGetTimeInMilliSec();
      g_stats_cnt = 0;
   }
   g_w_txns_outstanding++;
   g_stats_cnt++;

   if (g_w_txns_outstanding >= g_w_txns_outstanding_thresh && g_stats_cnt >= g_w_txns_outstanding_thresh)
   {
       stats_peroid_ms = (double)(VmDirGetTimeInMilliSec() - g_start_ts_ms);
       if (stats_peroid_ms > 1) //avoid float point division overflow
       {
           offered_rate = (double)g_stats_cnt * 1000.0 / stats_peroid_ms;
           VMDIR_LOG_INFO(VMDIR_LOG_MASK_ALL, "%s: write transactions outstanding %d for peroid %.2g ms with offered rate %.3g on %d write requests",
                       __func__,  g_w_txns_outstanding, stats_peroid_ms, offered_rate, g_stats_cnt);
       }
       g_stats_cnt = 0;
       g_start_ts_ms = VmDirGetTimeInMilliSec();
   }
   VMDIR_UNLOCK_MUTEX(bLock, g_w_txns_mutex);
}
Exemplo n.º 5
0
static
PVDIR_PAGED_SEARCH_RECORD
VmDirPagedSearchCacheFind(
    PCSTR pszCookie
    )
{
    DWORD dwError = 0;
    PLW_HASHTABLE_NODE pNode = NULL;
    PVDIR_PAGED_SEARCH_RECORD pSearchRecord = NULL;
    BOOLEAN bInLock = FALSE;

    if (IsNullOrEmptyString(pszCookie))
    {
        BAIL_WITH_VMDIR_ERROR(dwError, VMDIR_ERROR_INVALID_PARAMETER);
    }

    VMDIR_LOCK_MUTEX(bInLock, gPagedSearchCache.mutex);
    dwError = LwRtlHashTableFindKey(
                    gPagedSearchCache.pHashTbl,
                    &pNode,
                    (PVOID)pszCookie);
    dwError = LwNtStatusToWin32Error(dwError);
    BAIL_ON_VMDIR_ERROR(dwError);

    pSearchRecord = LW_STRUCT_FROM_FIELD(pNode, VDIR_PAGED_SEARCH_RECORD, Node);
    _RefPagedSearchRecord(pSearchRecord);

cleanup:
    VMDIR_UNLOCK_MUTEX(bInLock, gPagedSearchCache.mutex);
    return pSearchRecord;
error:
    goto cleanup;
}
Exemplo n.º 6
0
static
VOID
VmDirPagedSearchCacheRecordFree(
    PVDIR_PAGED_SEARCH_RECORD pSearchRecord
    )
{
    PVOID pvData = NULL;
    BOOLEAN bInLock = FALSE;

    if (pSearchRecord == NULL)
    {
        return;
    }

    VMDIR_LOCK_MUTEX(bInLock, gPagedSearchCache.mutex);
    (VOID)LwRtlHashTableRemove(gPagedSearchCache.pHashTbl, &pSearchRecord->Node);
    VMDIR_UNLOCK_MUTEX(bInLock, gPagedSearchCache.mutex);

    VmDirFreeStringA(pSearchRecord->pszGuid);

    DeleteFilter(pSearchRecord->pFilter);

    while (dequePop(pSearchRecord->pQueue, (PVOID*)&pvData) == 0)
    {
        VmDirFreeMemory(pvData);
    }
    dequeFree(pSearchRecord->pQueue);

    VmDirFreeMutex(pSearchRecord->mutex);
    VmDirFreeCondition(pSearchRecord->pDataAvailable);

    VmDirSrvThrFree(pSearchRecord->pThreadInfo);

    VmDirFreeMemory(pSearchRecord);
}
Exemplo n.º 7
0
DWORD
VmDirDDVectorUpdate(
    PCSTR   pszInvocationId,
    DWORD   dwValue
    )
{
    DWORD     dwError = 0;
    BOOLEAN   bInLock = FALSE;

    dwError = VmDirAllocateStringA(
            pszInvocationId,
            &gVmdirServerGlobals.pReplDeadlockDetectionVector->pszInvocationId);
    BAIL_ON_VMDIR_ERROR(dwError);

    VMDIR_LOCK_MUTEX(bInLock, gVmdirServerGlobals.pReplDeadlockDetectionVector->pMutex);

    dwError = _VmDirDDVectorUpdateInLock(dwValue);
    BAIL_ON_VMDIR_ERROR(dwError);

cleanup:
    VMDIR_UNLOCK_MUTEX(bInLock, gVmdirServerGlobals.pReplDeadlockDetectionVector->pMutex);
    return dwError;

error:
    VMDIR_LOG_ERROR(VMDIR_LOG_MASK_ALL, "failed, error (%d)", dwError);
    goto cleanup;
}
Exemplo n.º 8
0
static
DWORD
_VmDirRidSyncThr(
    PVOID    pArg
    )
{
    DWORD               dwError = 0;
    BOOLEAN             bInLock = FALSE;
    PVDIR_THREAD_INFO   pThrInfo = (PVDIR_THREAD_INFO)pArg;
    PVMDIR_SID_GEN_STACK_NODE pSidGenStackNode = NULL;

    VMDIR_LOG_VERBOSE( VMDIR_LOG_MASK_ALL, "_VmDirRidSyc thr started" );

    while (1)
    {
        if (VmDirdState() == VMDIRD_STATE_SHUTDOWN)
        {
            goto cleanup;
        }

        VMDIR_SAFE_FREE_MEMORY(pSidGenStackNode);
        while (VmDirPopTSStack(gSidGenState.pStack, (PVOID*)&pSidGenStackNode) == 0 &&
               pSidGenStackNode != NULL)
        {
            (VOID)VmDirSyncRIDSeqToDB(
                    pSidGenStackNode->pszDomainDn,
                    pSidGenStackNode->dwDomainRidSequence);

            if (VmDirdState() == VMDIRD_STATE_SHUTDOWN)
            {
                //
                // Any pending updates will be performed by VmDirVmAclShutdown.
                //
                goto cleanup;
            }

            VMDIR_SAFE_FREE_MEMORY(pSidGenStackNode);
        }

        VMDIR_LOCK_MUTEX(bInLock, pThrInfo->mutexUsed);

        VmDirConditionTimedWait(
            pThrInfo->conditionUsed,
            pThrInfo->mutexUsed,
            3 * 1000);          // time wait 3 seconds
        // ignore error

        VMDIR_UNLOCK_MUTEX(bInLock, pThrInfo->mutexUsed);
    }

cleanup:

    VMDIR_LOG_VERBOSE( VMDIR_LOG_MASK_ALL, "_VmDirRidSyc thr stopped (%d)", dwError );

    VMDIR_SAFE_FREE_MEMORY(pSidGenStackNode);

    return dwError;
}
Exemplo n.º 9
0
DWORD
VmDirIndexCfgAcquire(
    PCSTR               pszAttrName,
    VDIR_INDEX_USAGE    usage,
    PVDIR_INDEX_CFG*    ppIndexCfg
    )
{
    DWORD   dwError = 0;
    BOOLEAN bInLock = FALSE;
    PVDIR_INDEX_CFG pIndexCfg = NULL;
    PVMDIR_MUTEX    pMutex = NULL;

    if (IsNullOrEmptyString(pszAttrName) || !ppIndexCfg)
    {
        dwError = VMDIR_ERROR_INVALID_PARAMETER;
        BAIL_ON_VMDIR_ERROR(dwError);
    }

    *ppIndexCfg = NULL;

    if (LwRtlHashMapFindKey(
            gVdirIndexGlobals.pIndexCfgMap, (PVOID*)&pIndexCfg, pszAttrName))
    {
        goto cleanup;
    }

    pMutex = pIndexCfg->mutex;
    VMDIR_LOCK_MUTEX(bInLock, pMutex);

    if (pIndexCfg->status == VDIR_INDEXING_SCHEDULED)
    {
        goto cleanup;
    }
    else if (pIndexCfg->status == VDIR_INDEXING_IN_PROGRESS &&
            usage == VDIR_INDEX_READ)
    {
        dwError = VMDIR_ERROR_UNWILLING_TO_PERFORM;
    }
    else if (pIndexCfg->status == VDIR_INDEXING_DISABLED ||
            pIndexCfg->status == VDIR_INDEXING_DELETED)
    {
        goto cleanup;
    }
    BAIL_ON_VMDIR_ERROR(dwError);

    pIndexCfg->usRefCnt++;
    *ppIndexCfg = pIndexCfg;

cleanup:
    VMDIR_UNLOCK_MUTEX(bInLock, pMutex);
    return dwError;

error:
    VMDIR_LOG_ERROR( VMDIR_LOG_MASK_ALL,
            "%s failed, error (%d)", __FUNCTION__, dwError );

    goto cleanup;
}
Exemplo n.º 10
0
DWORD
VmDirWriteQueuePush(
    PVDIR_BACKEND_CTX           pBECtx,
    PVMDIR_WRITE_QUEUE          pWriteQueue,
    PVMDIR_WRITE_QUEUE_ELEMENT  pWriteQueueEle
    )
{
    int       dbRetVal = 0;
    USN       localUsn = 0;
    DWORD     dwError = 0;
    BOOLEAN   bInLock = FALSE;

    if (!pBECtx || !pWriteQueue || !pWriteQueueEle)
    {
        BAIL_WITH_VMDIR_ERROR(dwError, VMDIR_ERROR_INVALID_PARAMETER);
    }

    if (pBECtx->wTxnUSN != 0)
    {
        VMDIR_LOG_ERROR(
            VMDIR_LOG_MASK_ALL,
            "%s: acquiring multiple usn in same operation context, USN: %" PRId64,
            __FUNCTION__,
            pBECtx->wTxnUSN);
        BAIL_WITH_VMDIR_ERROR(dwError, LDAP_OPERATIONS_ERROR);
    }

    VMDIR_LOCK_MUTEX(bInLock, gVmDirServerOpsGlobals.pMutex);

    if ((dbRetVal = pBECtx->pBE->pfnBEGetNextUSN(pBECtx, &localUsn)) != 0)
    {
        VMDIR_LOG_ERROR(
            VMDIR_LOG_MASK_ALL,
            "%s: pfnBEGetNextUSN failed with error code: %d",
            __FUNCTION__,
            dbRetVal);
        BAIL_WITH_VMDIR_ERROR(dwError, LDAP_OPERATIONS_ERROR);
    }

    pWriteQueueEle->usn = localUsn;

    dwError = VmDirLinkedListInsertTail(
            pWriteQueue->pList,
            (PVOID) pWriteQueueEle,
            NULL);
    BAIL_ON_VMDIR_ERROR(dwError);

    VMDIR_LOG_INFO(LDAP_DEBUG_WRITE_QUEUE, "%s: usn: %"PRId64, __FUNCTION__, localUsn);

cleanup:
    VMDIR_UNLOCK_MUTEX(bInLock, gVmDirServerOpsGlobals.pMutex);
    return dwError;

error:
    VMDIR_LOG_ERROR(VMDIR_LOG_MASK_ALL, "failed, error (%d) localUsn %"PRId64, dwError, localUsn);
    goto cleanup;
}
Exemplo n.º 11
0
DWORD
VmDirQueueDequeue(
    BOOL        bInLock,
    PVDIR_QUEUE pQueue,
    int64_t     iTimeoutMs,
    PVOID*      ppElement
    )
{
    DWORD dwError = 0;
    PVDIR_QUEUE_NODE pTemp= NULL;

    if (!pQueue || !ppElement)
    {
        BAIL_WITH_VMDIR_ERROR(dwError, ERROR_INVALID_PARAMETER);
    }

    VMDIR_LOCK_MUTEX(bInLock, pQueue->pMutex);

    if (!pQueue->pHead)
    {
        if (iTimeoutMs < 0) // Blocking
        {
            while (!pQueue->pHead)
            {
                dwError = VmDirConditionWait(pQueue->pCond, pQueue->pMutex);
                BAIL_ON_VMDIR_ERROR(dwError);
            }
        }
        else if (iTimeoutMs > 0) // Waiting
        {
            VmDirConditionTimedWait(pQueue->pCond, pQueue->pMutex, iTimeoutMs);
            if (!pQueue->pHead)
            {
                dwError = VMDIR_ERROR_QUEUE_EMPTY;
            }
        }
        else // Non Blocking
        {
            dwError = VMDIR_ERROR_QUEUE_EMPTY;
        }
        BAIL_ON_VMDIR_ERROR(dwError);
    }

    pQueue->iSize--;
    pTemp = pQueue->pHead;
    pQueue->pHead = pQueue->pHead->pNext;
    *ppElement = pTemp->pElement;
    VMDIR_SAFE_FREE_MEMORY(pTemp);

cleanup:
    VMDIR_UNLOCK_MUTEX(bInLock, pQueue->pMutex);
    return dwError;

error:
    VMDIR_LOG_ERROR(VMDIR_LOG_MASK_ALL, "%s failed, error (%d)", __FUNCTION__, dwError);
    goto cleanup;
}
Exemplo n.º 12
0
VOID
VmDirWtxnOutstandingDec()
{
    BOOLEAN bLock = FALSE;

    VMDIR_LOCK_MUTEX(bLock, g_w_txns_mutex);
    g_w_txns_outstanding--;
    VMDIR_UNLOCK_MUTEX(bLock, g_w_txns_mutex);
}
Exemplo n.º 13
0
/*
 * This always call in single thread mode during startup or from tools.
 */
DWORD
VmDirSchemaInitializeViaFile(
    PCSTR pszSchemaFilePath
    )
{
    DWORD     dwError = 0;
    PVDIR_ENTRY    pEntry = NULL;
    BOOLEAN   bInLock = FALSE;

    if (IsNullOrEmptyString(pszSchemaFilePath))
    {
        dwError = ERROR_INVALID_PARAMETER;
        BAIL_ON_VMDIR_ERROR(dwError);
    }

    dwError = VmDirSchemaInitalizeFileToEntry(
            pszSchemaFilePath,
            &pEntry);
    BAIL_ON_VMDIR_ERROR(dwError);

    dwError = VmDirSchemaInitializeViaEntry(pEntry);
    BAIL_ON_VMDIR_ERROR(dwError);

    VMDIR_LOCK_MUTEX(bInLock, gVdirSchemaGlobals.mutex);
    // globals takes over pEntry, use to write to db later
    gVdirSchemaGlobals.pLoadFromFileEntry = pEntry;

    VMDIR_UNLOCK_MUTEX(bInLock, gVdirSchemaGlobals.mutex);

cleanup:

    VMDIR_UNLOCK_MUTEX(bInLock, gVdirSchemaGlobals.mutex);

    return dwError;

error:

    if (pEntry)
    {
        VmDirFreeEntry(pEntry);
    }

    goto cleanup;
}
Exemplo n.º 14
0
DWORD
VmDirPagedSearchCacheRead(
    PCSTR pszCookie,
    ENTRYID **ppValidatedEntries,
    DWORD *pdwEntryCount
    )
{
    DWORD dwError = 0;
    PVDIR_PAGED_SEARCH_RECORD pSearchRecord = NULL;
    BOOLEAN bInLock = FALSE;
    PVDIR_PAGED_SEARCH_ENTRY_LIST pEntryList = NULL;

    pSearchRecord = VmDirPagedSearchCacheFind(pszCookie);
    if (pSearchRecord == NULL)
    {
        //
        // Barring the client sending us an invalid cookie, failure here
        // means that the worker thread timed out and freed the cache.
        //
        BAIL_WITH_VMDIR_ERROR(dwError, VMDIR_ERROR_NOT_FOUND);
    }

    VMDIR_LOCK_MUTEX(bInLock, pSearchRecord->mutex);

    dwError = _VmDirPagedSearchCacheWaitAndRead_inlock(
                pSearchRecord,
                &pEntryList);
    BAIL_ON_VMDIR_ERROR(dwError);

    if (pSearchRecord->bSearchCompleted)
    {
        VmDirPagedSearchCacheCullWorkerThread(pSearchRecord);
    }
    else
    {
        *ppValidatedEntries = pEntryList->pEntryIds;
        *pdwEntryCount = pEntryList->dwCount;

        //
        // We transferred ownership of pEntryList->pEntryIds above but we still
        // need to delete the rest of the structure.
        //
        pEntryList->pEntryIds = NULL;
        _VmDirPagedSearchEntryListFree(pEntryList);
    }

cleanup:
    VMDIR_UNLOCK_MUTEX(bInLock, pSearchRecord->mutex);
    if (pSearchRecord != NULL)
    {
        _DerefPagedSearchRecord(pSearchRecord);
    }
    return dwError;
error:
    goto cleanup;
}
Exemplo n.º 15
0
DWORD
VdirSchemaCtxAcquireInLock(
    BOOLEAN    bHasLock,    // TRUE if owns gVdirSchemaGlobals.mutex already
    PVDIR_SCHEMA_CTX* ppSchemaCtx
    )
{
    DWORD   dwError = 0;
    BOOLEAN bInLock = FALSE;
    BOOLEAN bInLockNest = FALSE;
    PVDIR_SCHEMA_CTX    pCtx = NULL;

    dwError = VmDirAllocateMemory(
            sizeof(VDIR_SCHEMA_CTX),
            (PVOID*)&pCtx);
    BAIL_ON_VMDIR_ERROR(dwError);

    if (!bHasLock)
    {
        VMDIR_LOCK_MUTEX(bInLock, gVdirSchemaGlobals.mutex);
    }

    pCtx->pSchema = gVdirSchemaGlobals.pSchema;
    assert(pCtx->pSchema);

    VMDIR_LOCK_MUTEX(bInLockNest, pCtx->pSchema->mutex);
    pCtx->pSchema->usRefCount++;

error:

    VMDIR_UNLOCK_MUTEX(bInLockNest, pCtx->pSchema->mutex);
    VMDIR_UNLOCK_MUTEX(bInLock, gVdirSchemaGlobals.mutex);

    if (dwError != ERROR_SUCCESS)
    {
        dwError = ERROR_NO_SCHEMA;
        VMDIR_SAFE_FREE_MEMORY(pCtx);
        pCtx = NULL;
    }

    *ppSchemaCtx = pCtx;

    return dwError;
}
Exemplo n.º 16
0
/*
 * Get the Entry used to startup/load schema for the very first time from file.
 * Caller owns pEntry.
 */
PVDIR_ENTRY
VmDirSchemaAcquireAndOwnStartupEntry(
    VOID
    )
{
    BOOLEAN     bInLock = FALSE;
    PVDIR_ENTRY      pEntry = NULL;

    VMDIR_LOCK_MUTEX(bInLock, gVdirSchemaGlobals.mutex);

    pEntry = gVdirSchemaGlobals.pLoadFromFileEntry;
    gVdirSchemaGlobals.pLoadFromFileEntry = NULL;

    VMDIR_UNLOCK_MUTEX(bInLock, gVdirSchemaGlobals.mutex);

    return pEntry;
}
Exemplo n.º 17
0
static
BOOLEAN
vdirIsLiveSchema(
    PVDIR_SCHEMA_INSTANCE    pSchema
    )
{
    BOOLEAN bInLock = FALSE;
    PVDIR_SCHEMA_INSTANCE    pLiveSchema = NULL;

    VMDIR_LOCK_MUTEX(bInLock, gVdirSchemaGlobals.mutex);

    pLiveSchema = gVdirSchemaGlobals.pSchema;

    VMDIR_UNLOCK_MUTEX(bInLock, gVdirSchemaGlobals.mutex);

    return (pSchema == pLiveSchema);
}
Exemplo n.º 18
0
VOID
VmDirSchemaLibShutdown(
    VOID
    )
{
    BOOLEAN bInLock = FALSE;

    VMDIR_LOCK_MUTEX(bInLock, gVdirSchemaGlobals.mutex);

    // release live context and live schema
    VmDirSchemaCtxRelease(gVdirSchemaGlobals.pCtx);
    VMDIR_SAFE_FREE_MEMORY(gVdirSchemaGlobals.pszDN);

    VMDIR_UNLOCK_MUTEX(bInLock, gVdirSchemaGlobals.mutex);

    VMDIR_SAFE_FREE_MUTEX( gVdirSchemaGlobals.mutex );
}
Exemplo n.º 19
0
size_t
VmDirWriteQueueSize(
    PVMDIR_WRITE_QUEUE          pWriteQueue
    )
{
    size_t   iSize = 0;
    BOOLEAN bInLock = FALSE;

    if (pWriteQueue)
    {
        VMDIR_LOCK_MUTEX(bInLock, gVmDirServerOpsGlobals.pMutex);
        iSize = VmDirWriteQueueSizeInLock(pWriteQueue);
    }

    VMDIR_UNLOCK_MUTEX(bInLock, gVmDirServerOpsGlobals.pMutex);

    return iSize;
}
Exemplo n.º 20
0
DWORD
VmDirQueueEnqueue(
    BOOL                bInLock,
    PVDIR_QUEUE         pQueue,
    PVOID               pElement
    )
{
    DWORD dwError = 0;
    PVDIR_QUEUE_NODE pQueueNode = NULL;

    if (!pQueue || !pElement)
    {
        BAIL_WITH_VMDIR_ERROR(dwError, ERROR_INVALID_PARAMETER);
    }

    dwError = VmDirAllocateMemory(sizeof(VDIR_QUEUE_NODE), (PVOID*)&pQueueNode);
    BAIL_ON_VMDIR_ERROR(dwError);

    pQueueNode->pElement = pElement;

    VMDIR_LOCK_MUTEX(bInLock, pQueue->pMutex);

    if (pQueue->pHead)
    {
        pQueue->pTail->pNext = pQueueNode;
        pQueue->pTail = pQueueNode;
    }
    else
    {
        pQueue->pHead = pQueueNode;
        pQueue->pTail = pQueueNode;
        VmDirConditionSignal(pQueue->pCond);
    }
    pQueue->iSize++;

cleanup:
    VMDIR_UNLOCK_MUTEX(bInLock, pQueue->pMutex);
    return dwError;

error:
    VMDIR_LOG_ERROR(VMDIR_LOG_MASK_ALL, "%s failed, error (%d)", __FUNCTION__, dwError);
    VMDIR_SAFE_FREE_MEMORY(pQueueNode);
    goto cleanup;
}
Exemplo n.º 21
0
DWORD
VdirIndexingPreCheck(
    PVDIR_ATTR_INDEX_INSTANCE* ppCache)
{
    DWORD   dwError = 0;
    BOOLEAN bInLock = FALSE;
    USHORT  usVersion = 0;
    BOOLEAN bIndexInProgress = FALSE;
    PVDIR_ATTR_INDEX_INSTANCE pCache = NULL;

    VMDIR_LOCK_MUTEX(bInLock, gVdirAttrIndexGlobals.mutex);

    usVersion = gVdirAttrIndexGlobals.usLive;
    pCache = gVdirAttrIndexGlobals.pCaches[usVersion];
    bIndexInProgress = gVdirAttrIndexGlobals.bIndexInProgress;

    VMDIR_UNLOCK_MUTEX(bInLock, gVdirAttrIndexGlobals.mutex);

    // no more reserved cache slot available
    if (usVersion >= MAX_ATTR_INDEX_CACHE_INSTANCE - 1)
    {
        dwError = LDAP_UNWILLING_TO_PERFORM;
        //TODO, log more meaningful error
        BAIL_ON_VMDIR_ERROR(dwError);
    }

    // have out standing indexing job
    if (bIndexInProgress)
    {
        dwError = LDAP_UNWILLING_TO_PERFORM;
        //TODO, log more meaningful error
        BAIL_ON_VMDIR_ERROR(dwError);
    }

    if (ppCache)
    {
        *ppCache = pCache;
    }

error:

    return dwError;
}
Exemplo n.º 22
0
/*
 * should only be used during bootstrap
 * maybe add state check?
 */
DWORD
VmDirIndexOpen(
    PVDIR_INDEX_CFG pIndexCfg
    )
{
    DWORD   dwError = 0;
    BOOLEAN bInLock = FALSE;
    PVDIR_BACKEND_INTERFACE pBE = NULL;

    if (!pIndexCfg)
    {
        dwError = ERROR_INVALID_PARAMETER;
        BAIL_ON_VMDIR_ERROR(dwError);
    }

    VMDIR_LOCK_MUTEX(bInLock, gVdirIndexGlobals.mutex);

    if (LwRtlHashMapFindKey(
            gVdirIndexGlobals.pIndexCfgMap, NULL, pIndexCfg->pszAttrName) == 0)
    {
        dwError = ERROR_ALREADY_INITIALIZED;
        BAIL_ON_VMDIR_ERROR(dwError);
    }

    dwError = LwRtlHashMapInsert(
            gVdirIndexGlobals.pIndexCfgMap,
            pIndexCfg->pszAttrName,
            pIndexCfg,
            NULL);
    BAIL_ON_VMDIR_ERROR(dwError);

    pBE = VmDirBackendSelect(NULL);
    dwError = pBE->pfnBEIndexOpen(pIndexCfg);
    BAIL_ON_VMDIR_ERROR(dwError);

cleanup:
    VMDIR_UNLOCK_MUTEX(bInLock, gVdirIndexGlobals.mutex);
    return dwError;

error:
    goto cleanup;
}
Exemplo n.º 23
0
VOID
VmDirIndexCfgRelease(
    PVDIR_INDEX_CFG pIndexCfg
    )
{
    BOOLEAN bInLock = FALSE;

    if (pIndexCfg)
    {
        VMDIR_LOCK_MUTEX(bInLock, pIndexCfg->mutex);
        pIndexCfg->usRefCnt--;
        VMDIR_UNLOCK_MUTEX(bInLock, pIndexCfg->mutex);

        if (pIndexCfg->usRefCnt == 0)
        {
            VmDirFreeIndexCfg(pIndexCfg);

        }
    }
}
Exemplo n.º 24
0
/*
 * Ldap operation thr exit
 */
static
VOID
_VmDirFlowCtrlThrExit(
    VOID
    )
{
    BOOLEAN bInLock = FALSE;
    DWORD   dwFlowCtrl = 0;

    VMDIR_LOCK_MUTEX(bInLock, gVmdirGlobals.pFlowCtrlMutex);

    gVmdirGlobals.dwMaxFlowCtrlThr++;
    dwFlowCtrl = gVmdirGlobals.dwMaxFlowCtrlThr;

    VMDIR_UNLOCK_MUTEX(bInLock, gVmdirGlobals.pFlowCtrlMutex);

    VMDIR_LOG_INFO( LDAP_DEBUG_CONNS, "FlowCtrlThr++ %d", dwFlowCtrl);

    return;
}
Exemplo n.º 25
0
static
VOID
_VmDirPagedSearchCacheWaitForClientCompletion(
    PVDIR_PAGED_SEARCH_RECORD pSearchRecord
    )
{
    BOOLEAN bInLock = FALSE;
    DWORD dwError = 0;

    //
    // Sleep for our timeout period or until the client has read all the data
    // (at which point bSearchCompleted will be set and the condition will be
    // signalled). If we timeout we're going to clear the cache forcibly, which
    // will cause the client to fallback to the old search semantics. Note
    // that the timeout is reset everytime the client issues a read request.
    //
    VMDIR_LOCK_MUTEX(bInLock, pSearchRecord->pThreadInfo->mutexUsed);

    pSearchRecord->bProcessingCompleted = TRUE;

    while (TRUE)
    {
        if (pSearchRecord->bSearchCompleted)
        {
            break;
        }
        dwError = VmDirConditionTimedWait(
                    pSearchRecord->pThreadInfo->conditionUsed,
                    pSearchRecord->pThreadInfo->mutexUsed,
                    gVmdirGlobals.dwLdapRecvTimeoutSec);
        if (dwError == ETIMEDOUT)
        {
            if ((time(NULL) - pSearchRecord->tLastClientRead) > gVmdirGlobals.dwLdapRecvTimeoutSec)
            {
                break;
            }
        }
    }

    VMDIR_UNLOCK_MUTEX(bInLock, pSearchRecord->pThreadInfo->mutexUsed);
}
Exemplo n.º 26
0
static
DWORD
VmDirPagedSearchCacheAddData(
    PVDIR_PAGED_SEARCH_RECORD pSearchRecord,
    PVDIR_PAGED_SEARCH_ENTRY_LIST pEntryList
    )
{
    DWORD dwError = 0;
    BOOLEAN bInLock = FALSE;

    VMDIR_LOCK_MUTEX(bInLock, pSearchRecord->mutex);
    dwError = dequePush(pSearchRecord->pQueue, pEntryList);
    BAIL_ON_VMDIR_ERROR(dwError);

    VmDirPagedSearchCacheReaderSignal_inlock(pSearchRecord);
cleanup:
    VMDIR_UNLOCK_MUTEX(bInLock, pSearchRecord->mutex);
    return dwError;
error:
    goto cleanup;
}
Exemplo n.º 27
0
VOID
VmDirOPStatisticUpdate(
    ber_tag_t opTag,
    uint64_t iThisTimeInMilliSec
    )
{
    BOOLEAN     bInLock = FALSE;
    uint64_t    iNewTotalTime = 0;
    PVMDIR_OPERATION_STATISTIC pStatistic = NULL;

    pStatistic = _VmDirGetStatisticFromTag(opTag);
    if (pStatistic == NULL)
    {
        return;
    }

    if (iThisTimeInMilliSec <=  0)
    {
        iThisTimeInMilliSec = 1;
    }

    VMDIR_LOCK_MUTEX(bInLock, pStatistic->pmutex);
    pStatistic->iTotalCount++;

    iNewTotalTime = pStatistic->iTimeInMilliSec + iThisTimeInMilliSec;

    if (iNewTotalTime < pStatistic->iTimeInMilliSec)
    {
        // overflow, reset time and counter
        pStatistic->iTimeInMilliSec = iThisTimeInMilliSec;
        pStatistic->iCount = 1;
    }
    else
    {
        pStatistic->iTimeInMilliSec = iNewTotalTime;
        pStatistic->iCount++;
    }

    VMDIR_UNLOCK_MUTEX(bInLock, pStatistic->pmutex);
}
Exemplo n.º 28
0
/*
 * Caller release schema ctx
 */
VOID
VmDirSchemaCtxRelease(
    PVDIR_SCHEMA_CTX    pCtx
    )
{
    BOOLEAN    bInLock = FALSE;
    USHORT     usRefCnt = 0;
    USHORT     usSelfRef = 0;

    if ( pCtx )
    {
        if (  pCtx->pSchema )
        {
            VMDIR_LOCK_MUTEX(bInLock, pCtx->pSchema->mutex);

            pCtx->pSchema->usRefCount--;
            usRefCnt = pCtx->pSchema->usRefCount;
            usSelfRef = pCtx->pSchema->usNumSelfRef;

            VMDIR_UNLOCK_MUTEX(bInLock, pCtx->pSchema->mutex);

            if (usRefCnt == usSelfRef)
            {   // only self reference within pSchema exists, free pSchema itself.
                // self references are established during init before normal references.
                VMDIR_LOG_VERBOSE( VMDIR_LOG_MASK_ALL,
                                "Free unreferenced schema instance (%p)",
                                pCtx->pSchema);
#if 0 /* BUGBUG - reenable this when Purify report is clean */
                VdirSchemaInstanceFree(pCtx->pSchema);
#endif
            }
        }

        VMDIR_SAFE_FREE_STRINGA(pCtx->pszErrorMsg);
        VMDIR_SAFE_FREE_MEMORY(pCtx);
    }

    return;
}
Exemplo n.º 29
0
// VmDirRemoveDeletedRAsFromCache() Remove RAs from gVmdirReplAgrs that are marked as isDeleted = TRUE
VOID
VmDirRemoveDeletedRAsFromCache()
{
    PVMDIR_REPLICATION_AGREEMENT    pPrevReplAgr = NULL; // or pointing to a valid (non-deleted) RA
    PVMDIR_REPLICATION_AGREEMENT    pCurrReplAgr = NULL;
    PVMDIR_REPLICATION_AGREEMENT    pNextReplAgr = NULL;
    PVMDIR_REPLICATION_AGREEMENT    pNewStartReplAgr = NULL;
    BOOLEAN                         bInReplAgrsLock = FALSE;

    VMDIR_LOCK_MUTEX(bInReplAgrsLock, gVmdirGlobals.replAgrsMutex);

    for (pCurrReplAgr = gVmdirReplAgrs; pCurrReplAgr; pCurrReplAgr = pNextReplAgr)
    {
        pNextReplAgr = pCurrReplAgr->next;

        if (pCurrReplAgr->isDeleted)
        {
            VmDirFreeReplicationAgreement( pCurrReplAgr );
            if (pPrevReplAgr != NULL)
            {
                pPrevReplAgr->next = pNextReplAgr;
            }
        }
        else // valid (non-deleted) RA
        {
            // Set New Start RA pointer, if not already set.
            if (pNewStartReplAgr == NULL)
            {
                pNewStartReplAgr = pCurrReplAgr;
            }
            pPrevReplAgr = pCurrReplAgr;
        }
    }

    gVmdirReplAgrs = pNewStartReplAgr;

    VMDIR_UNLOCK_MUTEX(bInReplAgrsLock, gVmdirGlobals.replAgrsMutex);
}
Exemplo n.º 30
0
BOOLEAN
VmDirConsumerRoleActive(
    VOID
    )
{
    BOOLEAN   bInLock = FALSE;
    BOOLEAN   bActiveReplCycle = FALSE;

    VMDIR_LOCK_MUTEX(bInLock, gVmdirServerGlobals.pReplDeadlockDetectionVector->pMutex);

    // if consumer role is active, then DD vector will definitely have bvServerObjName key
    if (LwRtlHashMapFindKey(
            gVmdirServerGlobals.pReplDeadlockDetectionVector->pEmptyPageSentMap,
            NULL,
            gVmdirServerGlobals.bvServerObjName.lberbv_val) == 0)
    {
        bActiveReplCycle = TRUE;
    }

    VMDIR_UNLOCK_MUTEX(bInLock, gVmdirServerGlobals.pReplDeadlockDetectionVector->pMutex);

    return bActiveReplCycle;
}