Beispiel #1
0
static int alloc_area(struct pstore *ps)
{
	int r = -ENOMEM;
	size_t i, len, nr_pages;
	struct page *page, *last = NULL;

	len = ps->chunk_size << SECTOR_SHIFT;

	/*
	 * Allocate the chunk_size block of memory that will hold
	 * a single metadata area.
	 */
	ps->area = vmalloc(len);
	if (!ps->area)
		return r;

	nr_pages = sectors_to_pages(ps->chunk_size);

	/*
	 * We lock the pages for ps->area into memory since
	 * they'll be doing a lot of io.  We also chain them
	 * together ready for dm-io.
	 */
	for (i = 0; i < nr_pages; i++) {
		page = vmalloc_to_page(ps->area + (i * PAGE_SIZE));
		LockPage(page);
		if (last)
			last->list.next = &page->list;
		last = page;
	}

	return 0;
}
/**
Write data through a single page. If the page is not found or not valid anymore, read media onto iActive page
first, then write data through iActive page.
@param  aPos    the starting position of the media address to be write.
@param  aData   the starting address that the writing content lives in the ram.
@param  aDataLen    the length of the content to be written.
@pre    aDataLen    should be no more than page size.
*/
void CDynamicDirCache::WriteDataOntoSinglePageL(TInt64 aPos, const TUint8* aData, TUint32 aDataLen)
    {
    ASSERT(aDataLen <= iPageSizeInBytes);
    //-- the data section is in the cache page entirely, take data directly from the cache
    TDynamicDirCachePage* pPage = FindPageByPos(aPos);
    if (pPage)
        {
        // lock page before writing,
        if (LockPage(pPage) != NULL)
            {
            //-- update cache
            Mem::Copy(pPage->PtrInPage(aPos), aData, aDataLen);
            }
        else
            {
            ASSERT(pPage->PageType() == TDynamicDirCachePage::EUnlocked);
            DeQueue(pPage);
            LookupTblRemove(pPage->StartPos());
            DecommitPage(pPage);
            delete pPage;
            pPage = NULL;
            }
        }

    // if page not found or page data not valid anymore, use active page to read data in
    if (!pPage)
        {
        pPage = UpdateActivePageL(aPos);
        //-- update cache
        Mem::Copy(pPage->PtrInPage(aPos), aData, aDataLen);
        }

    // make sure the page is unlocked after use
    if (pPage->PageType() == TDynamicDirCachePage::EUnlocked)
        {
        UnlockPage(pPage);
        }

    // always make writting events MRU
    MakePageMRU(aPos);
    return;
    }
/**
Read data from a single page. If the page is not found or not valid anymore, read media onto iActive page
first.
@param  aPos    the starting position of the media address to be read.
@param  aLength the length of the content to be read.
@param  aDes    the descriptor to contain the content.
@pre    aLength should be no more than page size.
*/
void CDynamicDirCache::ReadDataFromSinglePageL(TInt64 aPos, TInt aLength, TDes8& aDes)
    {
    //-- the data section is in the cache page entirely, take data directly from the cache
    TDynamicDirCachePage* pPage = FindPageByPos(aPos);
    if (pPage)
        {
        // lock page before reading,
        if (LockPage(pPage) != NULL)
            {
            // read data
            aDes.Copy(pPage->PtrInPage(aPos), aLength);

            // if page is from unlocked queue, insert it onto the last page of the locked
            //  queue. this is to avoid excessive locking and unlocking operations that is
            //  highly likely to happen for following reads.
            if (pPage->PageType() == TDynamicDirCachePage::EUnlocked)
                {
                DeQueue(pPage);
                MakePageLastLocked(pPage);
                }
            }
        else    // page locking failed
            {
            ASSERT(pPage->PageType() == TDynamicDirCachePage::EUnlocked);
            DeQueue(pPage);
            LookupTblRemove(pPage->StartPos());
            DecommitPage(pPage);
            delete pPage;
            pPage = NULL;
            }
        }

    if (!pPage)
        {
        // if page not found or page data not valid anymore, use active page to read data in
        pPage = UpdateActivePageL(aPos);
        // read data
        aDes.Copy(pPage->PtrInPage(aPos), aLength);
        }

    }
/**
Implementation of pure virtual function.
@see    MWTCacheInterface::PosCached()
*/
TUint32 CDynamicDirCache::PosCached(const TInt64& aPos, TInt64& aCachedPosStart)
    {
    const TInt64 pageStartMedPos = CalcPageStartPos(aPos);

    // only search the page in lookup table
    // NOTE: we don't count the active page into acount here,
    // this is to avoid pulling next pages recursively
    TDynamicDirCachePage* pPage = LookupTblFind(pageStartMedPos);

    // then check if page is still valid if page is on Unlocked Page Queue
    if (pPage && pPage->PageType() == TDynamicDirCachePage::EUnlocked)
        {
        if (LockPage(pPage) != NULL)
            {
//          __PRINT1(_L("CDynamicDirCache::PosCached: page(0x%lx) found on Unlocked Queue!"), aPos);
            // have to unlock it before returning, otherwise there will be memory leak
            UnlockPage(pPage);
            aCachedPosStart = pPage->StartPos();
            return pPage->PageSizeInBytes();
            }
        else    // if the unlocked page is not valid anymore, remove it
            {
            DeQueue(pPage);
            LookupTblRemove(pPage->StartPos());
            DecommitPage(pPage);
            delete pPage;
            pPage = NULL;
            }
        }
    // otherwise if page is already locked or valid active page
    else if (pPage)
        {
        __PRINT1(_L("CDynamicDirCache::PosCached: page(0x%lx) on Locked Queue!"), aPos);
        aCachedPosStart = pPage->StartPos();
        return pPage->PageSizeInBytes();
        }

    // page is not found or not valid anymore
    return 0;
    }
Beispiel #5
0
static int pag_lock(unsigned long addr)
{
	unsigned long page;
	unsigned long kva;

	kva = uvirt_to_kva(pgd_offset(current->mm, addr), addr);
	if(kva)
	{
	    lock_it:
	    page = uvirt_to_pa((unsigned long)addr);
	    LockPage(virt_to_page(__va(page)));
	    SetPageReserved(virt_to_page(__va(page)));
	}
	else
	{
	    copy_from_user(&page,(char *)addr,1); /* try access it */
	    kva = uvirt_to_kva(pgd_offset(current->mm, addr), addr);
	    if(kva) goto lock_it;
	    else return EPERM;
	}
	return 0;
}
Beispiel #6
0
/*
 * _hash_getlock() -- Acquire an lmgr lock.
 *
 * 'whichlock' should be zero to acquire the split-control lock, or the
 * block number of a bucket's primary bucket page to acquire the per-bucket
 * lock.  (See README for details of the use of these locks.)
 *
 * 'access' must be HASH_SHARE or HASH_EXCLUSIVE.
 */
void
_hash_getlock(Relation rel, BlockNumber whichlock, int access)
{
	if (USELOCKING(rel))
		LockPage(rel, whichlock, access);
}
Beispiel #7
0
/*
 * Move tuples from pending pages into regular GIN structure.
 *
 * On first glance it looks completely not crash-safe. But if we crash
 * after posting entries to the main index and before removing them from the
 * pending list, it's okay because when we redo the posting later on, nothing
 * bad will happen.
 *
 * fill_fsm indicates that ginInsertCleanup should add deleted pages
 * to FSM otherwise caller is responsible to put deleted pages into
 * FSM.
 *
 * If stats isn't null, we count deleted pending pages into the counts.
 */
void
ginInsertCleanup(GinState *ginstate, bool full_clean,
				 bool fill_fsm, IndexBulkDeleteResult *stats)
{
	Relation	index = ginstate->index;
	Buffer		metabuffer,
				buffer;
	Page		metapage,
				page;
	GinMetaPageData *metadata;
	MemoryContext opCtx,
				oldCtx;
	BuildAccumulator accum;
	KeyArray	datums;
	BlockNumber blkno,
				blknoFinish;
	bool		cleanupFinish = false;
	bool		fsm_vac = false;
	Size		workMemory;
	bool		inVacuum = (stats == NULL);

	/*
	 * We would like to prevent concurrent cleanup process. For that we will
	 * lock metapage in exclusive mode using LockPage() call. Nobody other
	 * will use that lock for metapage, so we keep possibility of concurrent
	 * insertion into pending list
	 */

	if (inVacuum)
	{
		/*
		 * We are called from [auto]vacuum/analyze or gin_clean_pending_list()
		 * and we would like to wait concurrent cleanup to finish.
		 */
		LockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock);
		workMemory =
			(IsAutoVacuumWorkerProcess() && autovacuum_work_mem != -1) ?
			autovacuum_work_mem : maintenance_work_mem;
	}
	else
	{
		/*
		 * We are called from regular insert and if we see concurrent cleanup
		 * just exit in hope that concurrent process will clean up pending
		 * list.
		 */
		if (!ConditionalLockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock))
			return;
		workMemory = work_mem;
	}

	metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO);
	LockBuffer(metabuffer, GIN_SHARE);
	metapage = BufferGetPage(metabuffer);
	metadata = GinPageGetMeta(metapage);

	if (metadata->head == InvalidBlockNumber)
	{
		/* Nothing to do */
		UnlockReleaseBuffer(metabuffer);
		UnlockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock);
		return;
	}

	/*
	 * Remember a tail page to prevent infinite cleanup if other backends add
	 * new tuples faster than we can cleanup.
	 */
	blknoFinish = metadata->tail;

	/*
	 * Read and lock head of pending list
	 */
	blkno = metadata->head;
	buffer = ReadBuffer(index, blkno);
	LockBuffer(buffer, GIN_SHARE);
	page = BufferGetPage(buffer);

	LockBuffer(metabuffer, GIN_UNLOCK);

	/*
	 * Initialize.  All temporary space will be in opCtx
	 */
	opCtx = AllocSetContextCreate(CurrentMemoryContext,
								  "GIN insert cleanup temporary context",
								  ALLOCSET_DEFAULT_MINSIZE,
								  ALLOCSET_DEFAULT_INITSIZE,
								  ALLOCSET_DEFAULT_MAXSIZE);

	oldCtx = MemoryContextSwitchTo(opCtx);

	initKeyArray(&datums, 128);
	ginInitBA(&accum);
	accum.ginstate = ginstate;

	/*
	 * At the top of this loop, we have pin and lock on the current page of
	 * the pending list.  However, we'll release that before exiting the loop.
	 * Note we also have pin but not lock on the metapage.
	 */
	for (;;)
	{
		Assert(!GinPageIsDeleted(page));

		/*
		 * Are we walk through the page which as we remember was a tail when
		 * we start our cleanup?  But if caller asks us to clean up whole
		 * pending list then ignore old tail, we will work until list becomes
		 * empty.
		 */
		if (blkno == blknoFinish && full_clean == false)
			cleanupFinish = true;

		/*
		 * read page's datums into accum
		 */
		processPendingPage(&accum, &datums, page, FirstOffsetNumber);

		vacuum_delay_point();

		/*
		 * Is it time to flush memory to disk?	Flush if we are at the end of
		 * the pending list, or if we have a full row and memory is getting
		 * full.
		 */
		if (GinPageGetOpaque(page)->rightlink == InvalidBlockNumber ||
			(GinPageHasFullRow(page) &&
			 (accum.allocatedMemory >= workMemory * 1024L)))
		{
			ItemPointerData *list;
			uint32		nlist;
			Datum		key;
			GinNullCategory category;
			OffsetNumber maxoff,
						attnum;

			/*
			 * Unlock current page to increase performance. Changes of page
			 * will be checked later by comparing maxoff after completion of
			 * memory flush.
			 */
			maxoff = PageGetMaxOffsetNumber(page);
			LockBuffer(buffer, GIN_UNLOCK);

			/*
			 * Moving collected data into regular structure can take
			 * significant amount of time - so, run it without locking pending
			 * list.
			 */
			ginBeginBAScan(&accum);
			while ((list = ginGetBAEntry(&accum,
								  &attnum, &key, &category, &nlist)) != NULL)
			{
				ginEntryInsert(ginstate, attnum, key, category,
							   list, nlist, NULL);
				vacuum_delay_point();
			}

			/*
			 * Lock the whole list to remove pages
			 */
			LockBuffer(metabuffer, GIN_EXCLUSIVE);
			LockBuffer(buffer, GIN_SHARE);

			Assert(!GinPageIsDeleted(page));

			/*
			 * While we left the page unlocked, more stuff might have gotten
			 * added to it.  If so, process those entries immediately.  There
			 * shouldn't be very many, so we don't worry about the fact that
			 * we're doing this with exclusive lock. Insertion algorithm
			 * guarantees that inserted row(s) will not continue on next page.
			 * NOTE: intentionally no vacuum_delay_point in this loop.
			 */
			if (PageGetMaxOffsetNumber(page) != maxoff)
			{
				ginInitBA(&accum);
				processPendingPage(&accum, &datums, page, maxoff + 1);

				ginBeginBAScan(&accum);
				while ((list = ginGetBAEntry(&accum,
								  &attnum, &key, &category, &nlist)) != NULL)
					ginEntryInsert(ginstate, attnum, key, category,
								   list, nlist, NULL);
			}

			/*
			 * Remember next page - it will become the new list head
			 */
			blkno = GinPageGetOpaque(page)->rightlink;
			UnlockReleaseBuffer(buffer);		/* shiftList will do exclusive
												 * locking */

			/*
			 * remove read pages from pending list, at this point all content
			 * of read pages is in regular structure
			 */
			shiftList(index, metabuffer, blkno, fill_fsm, stats);

			/* At this point, some pending pages have been freed up */
			fsm_vac = true;

			Assert(blkno == metadata->head);
			LockBuffer(metabuffer, GIN_UNLOCK);

			/*
			 * if we removed the whole pending list or we cleanup tail (which
			 * we remembered on start our cleanup process) then just exit
			 */
			if (blkno == InvalidBlockNumber || cleanupFinish)
				break;

			/*
			 * release memory used so far and reinit state
			 */
			MemoryContextReset(opCtx);
			initKeyArray(&datums, datums.maxvalues);
			ginInitBA(&accum);
		}
		else
		{
			blkno = GinPageGetOpaque(page)->rightlink;
			UnlockReleaseBuffer(buffer);
		}

		/*
		 * Read next page in pending list
		 */
		vacuum_delay_point();
		buffer = ReadBuffer(index, blkno);
		LockBuffer(buffer, GIN_SHARE);
		page = BufferGetPage(buffer);
	}

	UnlockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock);
	ReleaseBuffer(metabuffer);

	/*
	 * As pending list pages can have a high churn rate, it is desirable to
	 * recycle them immediately to the FreeSpace Map when ordinary backends
	 * clean the list.
	 */
	if (fsm_vac && fill_fsm)
		IndexFreeSpaceMapVacuum(index);


	/* Clean up temporary space */
	MemoryContextSwitchTo(oldCtx);
	MemoryContextDelete(opCtx);
}
/**
Implementation of pure virtual function.
@see    MWTCacheInterface::MakePageMRU()
*/
void CDynamicDirCache::MakePageMRU(TInt64 aPos)
    {
    __PRINT1(_L("MakePageMRU (%lx)"), aPos);
//  __PRINT4(_L("Current Cache State: iLockedQCount=%d, iUnlockedQCount=%d, iLookupTbl=%d, iMaxSizeInPages=%d"), iLockedQCount, iUnlockedQCount, iLookupTable.Count(), iMaxSizeInPages);
    // check the MRU page first, if it is already the MRU page, we can return immediately
    TInt64 pageStartMedPos = CalcPageStartPos(aPos);
    if (!iLockedQ.IsEmpty())
        {
        if (iLockedQ.First()->StartPos() == pageStartMedPos)
            {
            return;
            }
        }

    TDynamicDirCachePage* pPage = FindPageByPos(aPos);
    if (pPage)
        {
        ASSERT(pPage->IsValid());
        // lock page before make it MRU
        if (pPage->PageType() == TDynamicDirCachePage::EUnlocked)
            {
            ASSERT(!pPage->IsLocked());
            if (LockPage(pPage) == NULL)
                {
                DeQueue(pPage);
                LookupTblRemove(pPage->StartPos());
                DecommitPage(pPage);
                delete pPage;
                pPage = NULL;
                }
            }
        else
            {
            // error checking: page should either be locked or active
            ASSERT(LockPage(pPage) != NULL);
            }
        }

    // if page not found or page data not valid anymore, use active page to read data
    if (!pPage)
        {
        TRAPD(err, pPage = UpdateActivePageL(aPos));
        if (err != KErrNone)
            {
            // problem occurred reading active page, return immediately.
            return;
            }
        }

    // by now, the page is either locked or active page
    ASSERT(pPage && pPage->IsValid() && pPage->IsLocked());

    switch (pPage->PageType())
        {
        // if the page is the active page, we will need to find a new active page for replacement
        case TDynamicDirCachePage::EActivePage:
            {
            TDynamicDirCachePage* newAP = NULL;
            // if there is more cache room available, try to create a new page first
            if (!CacheIsFull())
                {
                // allocate and lock a new page
                TRAPD(err, newAP = AllocateAndLockNewPageL(0));
                // if any error ocurrs, return immediately
                if (err != KErrNone)
                    {
                    // unlock the page that was originally unlocked before leave
                    if (pPage->PageType() == TDynamicDirCachePage::EUnlocked)
                        {
                        UnlockPage(pPage);
                        }
                    return;
                    }

                if (newAP)
                    {
                    // replace the active page with the new page
                    newAP->SetPageType(TDynamicDirCachePage::EActivePage);
                    iActivePage = newAP;
                    }
                }

            // if cache has grown to its max size, or new page allocation failed
            if (!newAP)
                {
                // try to lock the LRU page on the unlocked page queque first
                if (!iUnlockedQ.IsEmpty())
                    {
                    newAP = iUnlockedQ.Last();
                    ASSERT(newAP->IsValid());
                    if (LockPage(newAP) != NULL)
                        {
                        // deque, reset pos, set new type
                        DeQueue(newAP);
                        LookupTblRemove(newAP->StartPos());
                        ResetPagePos(newAP);
                        newAP->SetPageType(TDynamicDirCachePage::EActivePage);
                        // replace active page
                        iActivePage = newAP;
                        }
                    // if falied locking the LRU page from unclocked queque,
                    // delete it
                    else
                        {
                        DeQueue(newAP);
                        LookupTblRemove(newAP->StartPos());
                        DecommitPage(newAP);
                        delete newAP;
                        newAP = NULL;
                        }
                    }
                }

            // if still have not found new active page
            // grab the LRU page from Locked Page Queue for active page
            if (!newAP)
                {
                ASSERT(!iLockedQ.IsEmpty());
                newAP = iLockedQ.Last();
                // deque, reset pos, set new type
                DeQueue(newAP);
                LookupTblRemove(newAP->StartPos());
                ResetPagePos(newAP);
                newAP->SetPageType(TDynamicDirCachePage::EActivePage);
                // replace active page
                iActivePage = newAP;
                }

            // we should always be able to find a locked page for active page
            ASSERT(newAP != NULL);

            // make original page (i.e. former active page) MRU
            // add onto locked queue
            AddFirstOntoQueue(pPage, TDynamicDirCachePage::ELocked);
            // add onto lookuptbl, as active page is not on lookup tbl originally
            LookupTblAdd(pPage);
            // check cache limit
            CheckThresholds();
            return;
            }
        case TDynamicDirCachePage::EUnlocked:
            {
            // if page was originally on Unlocked Page Queque, remove it from Unlocked Page Queue, add it
            // to the Locked Page Queue and make it MRU
            DeQueue(pPage);
            AddFirstOntoQueue(pPage, TDynamicDirCachePage::ELocked);
            // check cache limit
            CheckThresholds();
            return;
            }
        case TDynamicDirCachePage::ELocked:
            {
            // otherwise the page was on Locked Page Queue, make it MRU
            // no need to check cache limit
            if (pPage != iLockedQ.First())
                {
                DeQueue(pPage);
                AddFirstOntoQueue(pPage, TDynamicDirCachePage::ELocked);
                return;
                }
            break;
            }
        default:
            ASSERT(0);
        }
    }