Example #1
0
void Program::FinalizeSegments () {
    header.a_magic = ZMAGIC;
    header.a_text = RoundUp(textAlloc, PAGSIZ);
    header.a_data = RoundUp(dataAlloc, PAGSIZ);
    header.a_bss = bssAlloc - (header.a_data - dataAlloc);
#ifdef sun
    header.a_machtype = M_68020;
    header.a_entry = PAGSIZ + sizeof(header);
    UpdateSegment(textStart, header.a_entry, N_TEXT);
    UpdateSegment(dataStart, RoundUp(textAlloc, SEGSIZ), N_DATA);
    segPos[N_TEXT] = sizeof(header);
    segPos[N_DATA] = header.a_text;
#endif
#ifdef vax
    UpdateSegment(textStart, 0, N_TEXT);
    UpdateSegment(dataStart, header.a_text, N_DATA);
    segPos[N_TEXT] = PAGSIZ;
    segPos[N_DATA] = segPos[N_TEXT] + header.a_text;
#endif
    UpdateSegment(bssStart, dataStart + dataAlloc, N_BSS);
    segPos[N_BSS] = segPos[N_DATA] + dataAlloc;
    segPos[ANON_SEG] = segPos[N_BSS] + bssAlloc;
    int newAnonStart = AlignTo(WORDSIZE/BYTESIZE, bssStart + bssAlloc);
    if (anonStart != newAnonStart) {
	anonStart = newAnonStart;
	segStart[ANON_SEG] = newAnonStart;
	anonList->NeedToReloc();
    }
}
Example #2
0
SortTerms ComputeSortTerms(int numSortThreads, int valuesPerThread, 
	bool useTransList, int numBits, int numElements, int numSMs) {

	SortTerms terms;
	
	int numValues = numSortThreads * valuesPerThread;
	terms.numSortBlocks = DivUp(numElements, numValues);
	terms.numCountBlocks = DivUp(terms.numSortBlocks, NumCountWarps);
	terms.countValuesPerThread = numValues / WarpSize;

	int numBuckets = 1<< numBits;
	terms.countSize = 4 * std::max(WarpSize, numBuckets * NumCountWarps) * 
		terms.numCountBlocks;

	int numChannels = numBuckets / 2;
	int numSortBlocksPerCountWarp = 
		std::min(NumCountWarps, WarpSize / numChannels);
	terms.numHistRows = DivUp(terms.numSortBlocks, numSortBlocksPerCountWarp);
	terms.numHistBlocks = std::min(numSMs, 
		DivUp(terms.numHistRows, NumHistWarps));

	int bucketCodeBlockSize = numBuckets;
	if(useTransList) bucketCodeBlockSize += numBuckets + WarpSize;
	terms.scatterStructSize = RoundUp(bucketCodeBlockSize, WarpSize);
	
	// hist3 kernel (for 1 - 5 radix bits) may write two blocks of codes at a 
	// time, even if only one block is required. To support this, round the
	// number of sort blocks up to a multiple of 2.
	terms.bucketCodesSize = 4 * RoundUp(terms.numSortBlocks, 2) * 
		terms.scatterStructSize;

	terms.numEndKeys = RoundUp(numElements, numValues) - numElements;

	return terms;
}
Example #3
0
void
GCInit(void)
{
	init_int(&minOffRequest, 1 * TILT_PAGESIZE);
	init_int(&maxOffRequest, 8 * TILT_PAGESIZE);
	init_int(&minOnRequest, 1 * TILT_PAGESIZE);
	init_int(&copyPageSize, TILT_PAGESIZE / 2);
	init_int(&copyCheckSize, TILT_PAGESIZE / 2);
	init_int(&copyChunkSize, 256);
	minOffRequest = RoundUp(minOffRequest, TILT_PAGESIZE);
	minOnRequest = RoundUp(minOnRequest, TILT_PAGESIZE);

	reset_statistic(&minorSurvivalStatistic);
	reset_statistic(&heapSizeStatistic);
	reset_statistic(&majorSurvivalStatistic);

	switch (collector_type) {
	case Semispace:
		GCFun = GC_Semi;
		GCReleaseFun = GCRelease_Semi;
		GCPollFun = NULL;
		GCInit_Semi();
		break;
	case Generational:
		GCFun = GC_Gen;
		GCReleaseFun = GCRelease_Gen;
		GCPollFun = NULL;
		GCInit_Gen();
		break;
	case SemispaceParallel:
		GCFun = GC_SemiPara;
		GCReleaseFun = GCRelease_SemiPara;
		GCPollFun = GCPoll_SemiPara;
		GCInit_SemiPara();
		break;
	case GenerationalParallel:
		GCFun = GC_GenPara;
		GCReleaseFun = GCRelease_GenPara;
		GCPollFun = GCPoll_GenPara;
		GCInit_GenPara();
		break;
	case SemispaceConcurrent:
		GCFun = GC_SemiConc;
		GCReleaseFun = GCRelease_SemiConc;
		GCPollFun = GCPoll_SemiConc;
		GCInit_SemiConc();
		break;
	case GenerationalConcurrent:
		GCFun = GC_GenConc;
		GCReleaseFun = GCRelease_GenConc;
		GCPollFun = GCPoll_GenConc;
		GCInit_GenConc();
		break;
	default:
		DIE("bad collector type");
	}
	if (forceMirrorArray)
		mirrorArray = 1;
}
Example #4
0
BOOL SaveLibraryToFile(LPVOID lpBase, TCHAR* lpOutFileName)
{
   DWORD numberOfBytesWritten;
   HANDLE hOutFile = CreateFile( lpOutFileName, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, 0, NULL);

   if(hOutFile == INVALID_HANDLE_VALUE)
       return FALSE;
      // DOS header
   PIMAGE_DOS_HEADER dos_header;

   dos_header = (PIMAGE_DOS_HEADER) lpBase;

   if (dos_header->e_magic != IMAGE_DOS_SIGNATURE || dos_header->e_lfanew == 0)
   {
       CloseHandle(hOutFile);
       return FALSE;
   }

   if( !WriteFile( hOutFile, lpBase, dos_header->e_lfanew, &numberOfBytesWritten, NULL))
       return FALSE;

   // PE header
   PIMAGE_NT_HEADERS32 pe_header;   
   pe_header = (PIMAGE_NT_HEADERS32)((DWORD) lpBase + (DWORD)dos_header->e_lfanew );
      if (pe_header->Signature != IMAGE_NT_SIGNATURE)
   {
       CloseHandle(hOutFile);
       return FALSE;
   }
      DWORD dwSizeHeader = pe_header->FileHeader.SizeOfOptionalHeader  +
                       sizeof(pe_header->FileHeader) + 4;// +
                      //sizeof(IMAGE_SECTION_HEADER)*pe_header->FileHeader.NumberOfSections;

   PIMAGE_SECTION_HEADER sections = (PIMAGE_SECTION_HEADER)((DWORD)(lpBase) + dwSizeHeader + dos_header->e_lfanew);

   if (sections[0].PointerToRawData == 0)
	WriteFile( hOutFile, pe_header, 0x400 - dos_header->e_lfanew, &numberOfBytesWritten,NULL);
   else
	   WriteFile( hOutFile, pe_header, sections[0].PointerToRawData - dos_header->e_lfanew, &numberOfBytesWritten,NULL);

   // write sections...
   for(unsigned short i = 0; i < pe_header->FileHeader.NumberOfSections; i++)
   {
       LPVOID addr = (LPVOID)((DWORD)lpBase + sections[i].VirtualAddress);

	   // experiment for ".text"
	   if (strcmp((char *) sections[i].Name, ".text") == 0)
	   {	//
		   WriteFile( hOutFile, addr, RoundUp(sections[i].Misc.VirtualSize, pe_header->OptionalHeader.FileAlignment), &numberOfBytesWritten, NULL);
	   }
	   else
			WriteFile( hOutFile, addr, RoundUp(sections[i].SizeOfRawData, pe_header->OptionalHeader.FileAlignment), &numberOfBytesWritten, NULL);
   }

   CloseHandle(hOutFile);
   return TRUE;
} 
Example #5
0
void IntelH264Decoder::CreateWorkSurface(mfxFrameSurface1& surf)
{
	// Setup the "work surface". I don't understand why doesn't do this itself (malloc control is all I can assume).
	memset(&surf, 0, sizeof(surf));
	surf.Info = VideoParam.mfx.FrameInfo;
	surf.Info.FourCC = MFX_FOURCC_NV12; // IMSDK docs say NV12 is "Native Format" for IMSDK

	surf.Data.PitchLow = RoundUp(VideoParam.mfx.FrameInfo.Width, 64);
	surf.Data.Y = (mfxU8 *) AlignedAlloc(surf.Data.PitchLow * RoundUp(VideoParam.mfx.FrameInfo.Height, 64) * 3 / 2, 16);
	surf.Data.UV = surf.Data.Y + (surf.Data.PitchLow * RoundUp(VideoParam.mfx.FrameInfo.Height, 64));
}
Example #6
0
sortStatus_t SORTAPI sortAllocData(sortEngine_t engine, sortData_t data) {
	if(data->valueCount > 6) return SORT_STATUS_INVALID_VALUE;

	DeviceMemPtr mem[7 * 2];
	uint maxElements = RoundUp(data->maxElements, 2048);
	int count = (-1 == data->valueCount) ? 1 : data->valueCount;

	CUresult result = CUDA_SUCCESS;

	CUdeviceptr* memPtr = data->keys;
	for(int i = 0; (i < 2 + 2 * count) && (CUDA_SUCCESS == result); ++i) {
		if(!memPtr[i]) {
			result = engine->context->MemAlloc<uint>(maxElements, &mem[i]);
			if(CUDA_SUCCESS != result) return SORT_STATUS_DEVICE_ALLOC_FAILED;
		}
	}

	if(CUDA_SUCCESS != result) return SORT_STATUS_DEVICE_ALLOC_FAILED;

	for(int i(0); i < 7 * 2; ++i)
		if(mem[i]) {
			memPtr[i] = mem[i]->Handle();
			mem[i].release();
		}

	data->parity = 0;
	return SORT_STATUS_SUCCESS;
}
Example #7
0
sortStatus_t SORTAPI sortCreateData(sortEngine_t engine, int maxElements, 
	int valueCount, sortData_t* data) {

	if(valueCount > 6) return SORT_STATUS_INVALID_VALUE;

	std::auto_ptr<sortData_d> d(new sortData_d);

	// sortData_d
	d->maxElements = RoundUp(maxElements, 2048);
	d->numElements = maxElements;
	d->valueCount = valueCount;
	d->firstBit = 0;
	d->endBit = 32;
	d->preserveEndKeys = false;
	d->earlyExit = false;
	d->keys[0] = d->keys[1] = 0;
	d->values1[0] = d->values1[1] = 0;
	d->values2[0] = d->values2[1] = 0;
	d->values3[0] = d->values3[1] = 0;
	d->values4[0] = d->values4[1] = 0;
	d->values5[0] = d->values5[1] = 0;
	d->values6[0] = d->values6[1] = 0;
	
	sortAllocData(engine, d.get());

	*data = d.release();
	return SORT_STATUS_SUCCESS;
}
Example #8
0
char* AllocateBufferSpace(const DWORD bufSize, const DWORD bufCount, DWORD& totalBufferSize, DWORD& totalBufferCount)
{
	SYSTEM_INFO systemInfo;

	::GetSystemInfo(&systemInfo);

	const unsigned __int64 granularity = systemInfo.dwAllocationGranularity;

	const unsigned __int64 desiredSize = bufSize * bufCount;

	unsigned __int64 actualSize = RoundUp(desiredSize, granularity);

	if (actualSize > UINT_MAX )
	{
		actualSize = (UINT_MAX / granularity) * granularity;
	}

	totalBufferCount =  min(bufCount, static_cast<DWORD>(actualSize / bufSize));

	totalBufferSize = static_cast<DWORD>(actualSize);

	char* pBuffer = reinterpret_cast<char*>(VirtualAllocEx(GetCurrentProcess(), 0, totalBufferSize, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE));

	if (pBuffer == 0)
	{
		printf_s("VirtualAllocEx Error: %d\n", GetLastError());
		exit(0);
	}

	return pBuffer;
}
Example #9
0
size_t DSSMReader<ElemType>::RecordsToRead(size_t mbStartSample, bool tail)
{
    assert(mbStartSample >= m_epochStartSample);
    // determine how far ahead we need to read
    bool randomize = Randomize();
    // need to read to the end of the next minibatch
    size_t epochSample = mbStartSample;
    epochSample %= m_epochSize;

    // determine number left to read for this epoch
    size_t numberToEpoch = m_epochSize - epochSample;
    // we will take either a minibatch or the number left in the epoch
    size_t numberToRead = min(numberToEpoch, m_mbSize);
    if (numberToRead == 0 && !tail)
        numberToRead = m_mbSize;

    if (randomize)
    {
        size_t randomizeSweep = RandomizeSweep(mbStartSample);
        // if first read or read takes us to another randomization range
        // we need to read at least randomization range records
        if (randomizeSweep != m_randomordering.CurrentSeed()) // the range has changed since last time
        {
            numberToRead = RoundUp(epochSample, m_randomizeRange) - epochSample;
            if (numberToRead == 0 && !tail)
                numberToRead = m_randomizeRange;
        }
    }
    return numberToRead;
}
Example #10
0
void
Heap_Resize(Heap_t* h, long newSize, int reset)
{
	long usedSize;
	long maxSize = (h->mappedTop - h->bottom) * sizeof(val_t);
	long oldWriteableSize = (h->writeableTop - h->bottom) * sizeof(val_t);
	long newSizeRound = RoundUp(newSize, TILT_PAGESIZE);

	if (newSize > maxSize) DIE("resized heap too big");
	if (reset) {
		h->cursor = h->bottom;
	}
	usedSize = (h->cursor - h->bottom) * sizeof(val_t);
	assert(usedSize <= newSize);
	h->top = h->bottom + (newSize / sizeof(val_t));

	if (newSizeRound > oldWriteableSize) {
		my_mprotect(6,(caddr_t) h->writeableTop,
			newSizeRound - oldWriteableSize,
			PROT_READ | PROT_WRITE);
		h->writeableTop = h->bottom + newSizeRound / sizeof(val_t);
	}
	else if (paranoid && newSizeRound < oldWriteableSize) {
		my_mprotect(7,(caddr_t) (h->bottom +
			newSizeRound / sizeof(val_t)),
			oldWriteableSize - newSizeRound, PROT_NONE);
		h->writeableTop = h->bottom + newSizeRound / sizeof(val_t);
	}
	assert(h->bottom <= h->cursor);
	assert(h->cursor <= h->top);
	assert(h->top <= h->writeableTop);
	assert(h->writeableTop <= h->mappedTop);
}
Example #11
0
Heap_t*
Heap_Alloc(int MinSize, int MaxSize)
{
	static int heap_count = 0;
	Heap_t *res = &(Heaps[heap_count++]);
	int maxsize_pageround = RoundUp(MaxSize,TILT_PAGESIZE);
	res->size = maxsize_pageround;
	res->bottom = (mem_t) my_mmap(maxsize_pageround,
		PROT_READ | PROT_WRITE);
	res->cursor = res->bottom;
	res->top = res->bottom + MinSize / (sizeof (val_t));
	res->writeableTop = res->bottom +
		maxsize_pageround / (sizeof (val_t));
	res->mappedTop = res->writeableTop;
	SetRange(&(res->range), res->bottom, res->mappedTop);
	res->valid = 1;
	res->bitmap = paranoid ? CreateBitmap(maxsize_pageround / 4) : NULL;
	res->freshPages = (int *)emalloc(
		DivideUp(maxsize_pageround / TILT_PAGESIZE, 32) * sizeof(int));
	memset((int *)res->freshPages, 0,
		DivideUp(maxsize_pageround / TILT_PAGESIZE, 32) * sizeof(int));
	assert(res->bottom != (mem_t) -1);
	assert(heap_count < NumHeap);
	assert(MaxSize >= MinSize);
	/*
		Try to lock down pages and force page-table to be initialized;
		otherwise, PadHeapArea can often take 0.1 - 0.2 ms.  Even with
		this, there are occasional (but far fewer) page table misses.
	*/
	if (geteuid() == 0)
		(void)mlock((caddr_t) res->bottom, maxsize_pageround);
	return res;
}
static void _UpdateRange(_Inout_ OVS_PI_RANGE* pPiRange, SIZE_T offset, SIZE_T size)
{
    SIZE_T startPos = 0;
    SIZE_T endPos = 0;

    OVS_CHECK(pPiRange);

    startPos = RoundDown(offset, sizeof(UINT64));
    endPos = RoundUp(offset + size, sizeof(UINT64));

    if (!pPiRange)
    {
        return;
    }

    //i.e. in the beginning
    if (pPiRange->startRange == pPiRange->endRange)
    {
        pPiRange->startRange = startPos;
        pPiRange->endRange = endPos;
    }
    else
    {
        //i.e. if it was set before
        if (pPiRange->startRange > startPos)
        {
            pPiRange->startRange = startPos;
        }

        if (pPiRange->endRange < endPos)
        {
            pPiRange->endRange = endPos;
        }
    }
}
Example #13
0
/* internal routines */
static void 
init_dispinfo(struct dispinfo *d)
{
    int total_squares;
    ulong h;

    d->win_width = WIN_WIDTH;
    d->win_height = WIN_HEIGHT;

    d->pixel_xmult = PIX_SIZE;
    d->pixel_ymult = PIX_SIZE;

    total_squares = WinSize(d) / PixelSize(d);
    d->bytes_vp = d->alloc_unit;

    /* compute how many bytes of memory one square on the display should 
     * represent so you have the smallest granularity without the window size 
     * exceeding the initial height.
     */
    while (total_squares < DivideRoundedUp(d->arena_size, d->bytes_vp))
	d->bytes_vp *= 2;
    
    h = DivideRoundedUp(d->arena_size, d->bytes_vp) * PixelSize(d);
    d->win_height = DivideRoundedUp(h, d->win_width);
    d->win_height = RoundUp(d->win_height, d->pixel_ymult);
    
    sprintf(d->title + strlen(d->title), " (1 square = %d bytes)", 
	    d->bytes_vp);
}
bool iupPlotTickIterLog::AdjustRange (double &ioMin, double &ioMax) const 
{
  double theBase = mAxis->mLogBase;
  if (mAxis->mMaxDecades > 0)
    ioMin = ioMax/iupPlotExp (mAxis->mMaxDecades, theBase);

  if (ioMin == 0 && ioMax == 0) 
  {
    ioMin = kLogMinClipValue;
    ioMax = 1.0;
  }
  if (ioMin <= 0 || ioMax<=0)
    return false;
  
  ioMin = RoundDown(ioMin*kLittleIncrease);
  ioMax = RoundUp(ioMax*kLittleDecrease);

  if (ioMin<kLogMinClipValue)
    ioMin = kLogMinClipValue;
  
  if (mAxis->mMaxDecades > 0)
    ioMin = ioMax/iupPlotExp (mAxis->mMaxDecades, theBase);
  
  return true;
}
Example #15
0
int Round(double x, Rounding rounding) {
  switch (rounding) {
    case ROUND_UP  : return RoundUp     (x);
    case ROUND_DOWN: return RoundDown   (x);
    default        : return RoundNearest(x);
  }
}
Example #16
0
/* compute (xUL,yUL) and nrRows, nrCols from some coordinates
 * RcomputeExtend computes parameters to create a raster maps
 * from minimum and maximum x and y coordinates, projection information,
 * cellsize and units. The resulting parameters are computed that the 
 * smallest raster map can be created that will include the two 
 * coordinates given, assuming a default angle of 0.
 * Which coordinates are the maximum or minimum are
 * determined by the function itself.
 */
void RcomputeExtend(
	REAL8 *xUL,     /* write-only, resulting xUL */
	REAL8 *yUL,     /* write-only, resulting yUL */
	size_t *nrRows, /* write-only, resulting nrRows */
	size_t *nrCols, /* write-only, resulting nrCols */
	double x_1,      /* first x-coordinate */ 
	double y_1,      /* first y-coordinate */
	double x_2,      /* second x-coordinate */
	double y_2,      /* second y-coordinate */
	CSF_PT projection, /* required projection */
	REAL8 cellSize, /* required cellsize, > 0 */
	double rounding) /* assure that (xUL/rounding), (yUL/rouding)
	                  * (xLL/rounding) and (yLL/rounding) will
	                 * will all be an integers values > 0 
	                 */
{
    /*
     * xUL ______
	   |    |
	   |    |
	   |    |
	   ------

     */
	double yLL,xUR = x_1 > x_2 ? x_1 : x_2;
	*xUL = x_1 < x_2 ? x_1 : x_2;
	*xUL = RoundDown(*xUL, rounding); /* Round down */
	xUR  = RoundUp(   xUR, rounding); /* Round up */
	POSTCOND(*xUL <= xUR);
	*nrCols = (size_t)ceil((xUR - *xUL)/cellSize);
	if (projection == PT_YINCT2B)
	{
		 yLL = y_1 > y_2 ? y_1 : y_2;  /* highest value at bottom */
		*yUL = y_1 < y_2 ? y_1 : y_2;  /* lowest value at top */
	        *yUL = RoundDown(*yUL, rounding);
	         yLL = RoundUp(   yLL, rounding);
	}
	else
	{
		 yLL = y_1 < y_2 ? y_1 : y_2;  /* lowest value at bottom */
		*yUL = y_1 > y_2 ? y_1 : y_2;  /* highest value at top */
	        *yUL = RoundUp(  *yUL, rounding);
	         yLL = RoundDown( yLL, rounding);
	}
	*nrRows = (size_t)ceil(fabs(yLL - *yUL)/cellSize);
}
Example #17
0
// [A]
void* Thread::Alloc(size_t size) {
  ASSERT(size > 0);
  auto const alloc_size = RoundUp(size, Arch::Align_Object);
  for (;;) {
    if (auto const pv = object_area_->Alloc(alloc_size)) {
      return pv;
    }
    object_area_ = Mm::GetDataArea(Mm::Area::ScanType_Record, alloc_size);
  }
}
Example #18
0
/*
	Interface Routines
*/
ptr_t
alloc_bigintarray(int elemLen, int initVal, int ptag)
{
	ArraySpec_t spec;
	spec.type = IntField;
	spec.elemLen = elemLen;
	spec.byteLen = RoundUp(elemLen, 4);	/* excluding tag */
	spec.intVal = initVal;
	return alloc_bigdispatcharray(&spec);
}
Example #19
0
static void 
resize_display(struct dispinfo *d, int new_width, int new_height)
{
    int total_squares;
    uint h;
    char *cp;

    /* round down to even multiple of square size */
    new_width = RoundDown(new_width, d->pixel_xmult);
    new_height = RoundDown(new_height, d->pixel_ymult);

    if (new_width <= 0 || new_height <= 0)
	return;

    total_squares = (new_width * new_height) / PixelSize(d);
    d->bytes_vp = d->alloc_unit;

    /* compute how many bytes of memory one square on the display should 
     * represent, so you have the smallest granularity without the window size
     * exceeding the initial height.
     */
    while (total_squares < DivideRoundedUp(d->arena_size, d->bytes_vp))
	d->bytes_vp *= 2;
    
    h = DivideRoundedUp(d->arena_size, d->bytes_vp) * PixelSize(d);
    new_height = DivideRoundedUp(h, new_width);
    new_height = RoundUp(new_height, d->pixel_ymult);

    cp = strchr(d->title, '=');
    if (cp)
	sprintf(cp, "= %d bytes)", d->bytes_vp);

    if (!(d->memory_pixel = DXReAllocate((Pointer)d->memory_pixel, 
					 new_width * new_height))) {
	xerror = 1;
	return;
    }
	
    d->win_width = new_width;
    d->win_height = new_height;
    memset (d->memory_pixel, color_free, d->win_width * d->win_height);

    /* don't let X free our pixel buffer while destroying old image */
    d->memory_image->data = NULL;
    XDestroyImage(d->memory_image);

    d->memory_image = XCreateImage(d->disp,
			     XDefaultVisual(d->disp, XDefaultScreen(d->disp)),
			     8, ZPixmap, 0, (char *) d->memory_pixel,
			     d->win_width, d->win_height, 8, 0);

    XResizeWindow(d->disp, d->wind, d->win_width, d->win_height);
    XStoreName(d->disp, d->wind, d->title);

}
Example #20
0
int
reducedToExpanded(int size, double rate, int phases)
{
	if (phases == 0)
		return size;
	size = RoundUp(size, minOnRequest);
	size = size + (2 * NumProc) * minOnRequest;
	size = (int) size / (1.0 - computeReserve(rate, phases));
	size = RoundDown(size, minOnRequest);
	return size;
}
Example #21
0
/*
	Compute the new (reduced) heap size given the liveness ratio and
	amount of live data.
*/
long
ComputeHeapSize(long live, double curRatio, double rate, int phases)
{
	double rawWhere = (live - (1024 * MinRatioSize)) /
		(1024.0 * (MaxRatioSize - MinRatioSize));
	double where = (rawWhere > 1.0)
		? 1.0
		: ((rawWhere < 0.0) ? 0.0 : rawWhere);
	double newratio = MinRatio + where * (MaxRatio - MinRatio);
	long newReducedSize = RoundUp(live / newratio, 1024);
	long newExpandedSize = reducedToExpanded(newReducedSize, rate, phases);
	/*
		maxReducedSize and minReducedSize are not reduced if relaxed
		is true or if phases is already zero.
	*/
	long maxReducedSize = expandedToReduced(MaxHeapByte, rate,
		relaxed ? 0 : phases);
	long minReducedSize = expandedToReduced(MinHeapByte, rate,
		relaxed ? 0 : phases);

	if (live > maxReducedSize) {
		fprintf(stderr,"GC error: Amount of live data (%ld) exceeds maxiumum heap size (%ld)\n",
			live, maxReducedSize);
		DIE("out of memory");
	}
	if (newExpandedSize > MaxHeapByte) {
		double constrainedRatio = ((double)live) / maxReducedSize;
		if (collectDiag >= 1 || constrainedRatio > 0.95)
			fprintf(stderr,"GC warning: There is %ld kb of live data.  The desired new heap size is %ld kb but is downwardly constrained to %ld kb.\n",
				live / 1024, newReducedSize / 1024,
				maxReducedSize / 1024);
		if (constrainedRatio >= 1.00)
			fprintf(stderr,"GC warning: New liveness ratio is too high %lf.\n",
				constrainedRatio);
		else if (constrainedRatio > 0.90)
			fprintf(stderr,"GC warning: New liveness ratio is dangerously high %lf.\n",
				constrainedRatio);
		newReducedSize = maxReducedSize;
		newExpandedSize = MaxHeapByte;
	}
	if (newExpandedSize < MinHeapByte) {
		if (collectDiag >= 1)
			fprintf(stderr,"GC warning: There is %ld kb of live data.  The desired new heap size is %ld kb but is upwardly constrained to %ld kb.\n",
				live / 1024, newReducedSize / 1024,
				minReducedSize / 1024);
		newReducedSize = minReducedSize;
		newExpandedSize = MinHeapByte;
	}
	assert(newExpandedSize >= MinHeapByte);
	assert(newExpandedSize <= MaxHeapByte);
	assert(newReducedSize >= live);
	return newReducedSize;
}
Example #22
0
double initGrid(double low, double step, int logFlag)
{
    double ratio, x;
    double RoundUp(), stepGrid();

    gridNJuke = gridCurJuke = 0;
    gridJuke[gridNJuke++] = 0.0;

    if (logFlag) {
	ratio = pow(10.0, step);
	gridBase = floor(low);
	gridStep = ceil(step);
	if (ratio <= 3.0) {
	    if (ratio > 2.0) {
		ADD_GRID(3.0);
	    } else if (ratio > 1.333) {
		ADD_GRID(2.0);	ADD_GRID(5.0);
	    } else if (ratio > 1.25) {
		ADD_GRID(1.5);	ADD_GRID(2.0);	ADD_GRID(3.0);
		ADD_GRID(5.0);	ADD_GRID(7.0);
	    } else {
		for (x = 1.0; x < 10.0 && (x+.5)/(x+.4) >= ratio; x += .5) {
		    ADD_GRID(x + .1);	ADD_GRID(x + .2);
		    ADD_GRID(x + .3);	ADD_GRID(x + .4);
		    ADD_GRID(x + .5);
		}
		if (floor(x) != x) ADD_GRID(x += .5);
		for ( ; x < 10.0 && (x+1.0)/(x+.5) >= ratio; x += 1.0) {
		    ADD_GRID(x + .5);	ADD_GRID(x + 1.0);
		}
		for ( ; x < 10.0 && (x+1.0)/x >= ratio; x += 1.0) {
		    ADD_GRID(x + 1.0);
		}
		if (x == 7.0) {
		    gridNJuke--;
		    x = 6.0;
		}
		if (x < 7.0) {
		    ADD_GRID(x + 2.0);
		}
		if (x == 10.0) gridNJuke--;
	    }
	    x = low - gridBase;
	    for (gridCurJuke = -1; x >= gridJuke[gridCurJuke+1]; gridCurJuke++){
	    }
	}
    } else {
	gridStep = RoundUp(step);
	gridBase = floor(low / gridStep) * gridStep;
    }
    return(stepGrid());
}
Example #23
0
/* bytesToAlloc does not include alignment */
mem_t
AllocFromHeap(Heap_t* heap, Thread_t* thread, int bytesToAlloc, Align_t align)
{
	mem_t start, cursor, limit;
	int padBytes = bytesToAlloc + ((align == NoWordAlign) ? 0 : 4);
	int pagePadBytes = RoundUp(padBytes, TILT_PAGESIZE);
	GetHeapArea(fromSpace, pagePadBytes, &start, &cursor, &limit);
	if (start == NULL)
		return NULL;
	cursor = AlignMemoryPointer(cursor, align);
	PadHeapArea(cursor + bytesToAlloc / sizeof(val_t), limit);
	return cursor;
}
Example #24
0
void NPC::CreatePath() {
	mNPCData.mPathIndex = 0;
	int x = (RoundUp((int) mNPCData.mPosition.x, 32) / 32);
	int y = (RoundUp((int) mNPCData.mPosition.y, 32) / 32);
	int startIndex = x + (y * mMapWidth);

	WalkableFunctor walkF = WalkableFunctor();
	ICostFunctor costF = ICostFunctor();
	IHeuristicFunctor heuristicF = IHeuristicFunctor();
	mGraph->SearchAStar(startIndex, mNPCData.mPathEnd, walkF, costF,
			heuristicF);

	NodeList nodeList = mGraph->GetPath();
	mPath.clear();

	for (NodeList::iterator iter = nodeList.begin(); iter != nodeList.end();
			++iter) {
		mPath.push_back((*iter)->GetPosition());
	}

	mReachedDestination = false;
}
Example #25
0
    /*!
     * Create an instance of class <T>.
     */
    static T * Create()
    {
        // We use placement new() for two reasons:
        // -  To create a never-destructed instance of <T>. This allows using this instance 
        //    at any time, even during the module's destruction.
        // -  We could use the regular "new" operator here instead, but placement new is advantageous
        //    because some clients limit the amount of memory that can be dynamically allocated at
        //    static initialization time (e.g. clients that replace the "malloc" implementation).
        //    Allocating the data statically like this for a singleton class has no real disadvantage.

        static FUND::UINT8 storage[sizeof(T) + FUND_ALIGNMENT_OF(T)];
        return new((void *)RoundUp(&(storage[0]), FUND_ALIGNMENT_OF(T))) T();
    }
Example #26
0
/**
    Write data to the media through the directory cache.
    
    @param  aPos    linear media position to start writing with
    @param  aDes    data to write
*/
void CMediaWTCache::WriteL(TInt64 aPos,const TDesC8& aDes)
    {

#ifdef _DEBUG
    if(iCacheDisabled)
        {//-- cache is disabled for debug purposes
        User::LeaveIfError(iDrive.WriteCritical(aPos,aDes));
        return;
        }
#endif //_DEBUG

          TUint32 dataLen = aDes.Size();
    const TUint8* pData   = aDes.Ptr();
    const TUint32 PageSz  = PageSize(); //-- cache page size

    //-- find out if aPos is in cache. If not, find a spare page and read data there
    TInt nPage = FindOrGrabReadPageL(aPos);
    CWTCachePage* pPage = iPages[nPage];

    const TUint32 bytesToPageEnd = (TUint32)(pPage->iStartPos+PageSize() - aPos); //-- number of bytes from aPos to the end of the page
//    __PRINT5(_L("CMediaWTCache::WriteL: aPos=%lx, aLength=%x, page:%lx, pageSz:%x, bytesToPageEnd=%x"), aPos, dataLen, pPage->iStartPos, PageSz, bytesToPageEnd);
    if(dataLen <= bytesToPageEnd)
        {//-- data section completely fits to the cache page
        Mem::Copy(pPage->PtrInCachePage(aPos), pData, dataLen);   //-- update cache

        //-- make small write a multiple of a write granularity size (if it is used at all)
        //-- this is not the best way to use write granularity, but we would need to refactor cache pages code to make it normal
        TPtrC8 desBlock(aDes);
        
        if(iWrGranularityLog2)
            {//-- write granularity is used
            const TInt64  newPos = (aPos >> iWrGranularityLog2) << iWrGranularityLog2; //-- round position down to the write granularity size
            TUint32 newLen = (TUint32)(aPos - newPos)+dataLen;  //-- round block size up to the write granularity size
            newLen = RoundUp(newLen, iWrGranularityLog2);
       
            const TUint8* pd = pPage->PtrInCachePage(newPos);
            desBlock.Set(pd, newLen);
            aPos = newPos;
            }


        //-- write data to the media
        const TInt nErr = iDrive.WriteCritical(aPos, desBlock); 
        if(nErr != KErrNone)
            {//-- some serious problem occured during writing, invalidate cache.
            InvalidateCache();
            User::Leave(nErr);
            }

        }
void* OS::Allocate(const size_t requested,
                   size_t* allocated,
                   bool executable) {
  const size_t msize = RoundUp(requested, getpagesize());
  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);

  if (mbase == MAP_FAILED) {
    LOG(StringEvent("OS::Allocate", "mmap failed"));
    return NULL;
  }
  *allocated = msize;
  UpdateAllocatedSpaceLimits(mbase, msize);
  return mbase;
}
Example #28
0
void GLIX::AssignOffsets()
	{
	int Offset = 0;
	for (int h = 0; h < m_HashTableSize; ++h)
		{
		for (SEQDATA *i = m_HashTable[h]; i; i = i->Next)
			{
			i->Offset = Offset;
			Offset += i->Length;
			const int NewOffset = RoundUp(Offset, m_Pad, SEQMAP_BLOCKSIZE);
			i->RoundedLength = i->Length + NewOffset - Offset;
			Offset = NewOffset;
			}
		}
	m_GlobalLength = Offset;
	}
Example #29
0
void LDesktopPluginSpace::addDesktopPlugin(QString plugID){
  //This is used for generic plugins (QWidget-based)
  if(DEBUG){ qDebug() << "Adding Desktop Plugin:" << plugID; }
  LDPlugin *plug = NewDP::createPlugin(plugID, this);
    if(plug==0){ return; } //invalid plugin
    //plug->setAttribute(Qt::WA_TranslucentBackground);
    plug->setWhatsThis(plugID);
  //Now get the saved geometry for the plugin
  QRect geom = plug->gridGeometry(); //grid coordinates
  if(geom.isNull()){
    //Try the old format (might be slight drift between sessions if the grid size changes)
    geom = plug->loadPluginGeometry(); //in pixel coords
    if(!geom.isNull()){ geom = geomToGrid(geom); } //convert to grid coordinates
  }
  if(DEBUG){ qDebug() << "Saved plugin geom:" << geom << plugID; }
  //Now determine the position to put it
  if(geom.isNull()){
    //No previous location - need to calculate initial geom
    QSize sz = plug->defaultPluginSize(); //in grid coordinates
    geom.setSize(sz);
    //if an applauncher - add from top-left, otherwise add in from bottom-right
    if(plugID.startsWith("applauncher")){ geom = findOpenSpot(geom.width(), geom.height() ); }
    else{ geom = findOpenSpot(geom.width(), geom.height(), RoundUp(this->height()/GRIDSIZE), RoundUp(this->width()/GRIDSIZE), true); }
  }else if(!ValidGeometry(plugID, gridToGeom(geom)) ){
    //Find a new location for the plugin (saved location is invalid)
    geom = findOpenSpot(geom.width(), geom.height(), geom.y(), geom.x(), false); //try to get it within the same general area first
  }
  if(geom.x() < 0 || geom.y() < 0){
    qDebug() << "No available space for desktop plugin:" << plugID << " - IGNORING";
    delete plug;
  }else{
    if(DEBUG){ qDebug() <<  " - New Plugin Geometry (grid):" << geom; }
    //Now place the item in the proper spot/size
    plug->setGridGeometry(geom); //save for later
    MovePlugin(plug, gridToGeom(geom));
    //plug->setGeometry( gridToGeom(geom) );
    plug->show();
    if(DEBUG){ qDebug() << " - New Plugin Geometry (px):" << plug->geometry(); }
    ITEMS << plug;
    connect(plug, SIGNAL(StartMoving(QString)), this, SLOT(StartItemMove(QString)) );
    connect(plug, SIGNAL(StartResizing(QString)), this, SLOT(StartItemResize(QString)) );
    connect(plug, SIGNAL(RemovePlugin(QString)), this, SLOT(RemoveItem(QString)) );
    connect(plug, SIGNAL(IncreaseIconSize()), this, SIGNAL(IncreaseIcons()) );
    connect(plug, SIGNAL(DecreaseIconSize()), this, SIGNAL(DecreaseIcons()) );
    connect(plug, SIGNAL(CloseDesktopMenu()), this, SIGNAL(HideDesktopMenu()) );
  }
}
Example #30
0
double MgpuBenchmark(searchEngine_t engine, int count, CuDeviceMem* values,
	searchType_t type, CuDeviceMem* btree, int numIterations, int numQueries,
	CuDeviceMem* keys, CuDeviceMem* indices, const T* valuesHost,
	const T* keysHost) {

	CuEventTimer timer;
	timer.Start();
	
	int size = (SEARCH_TYPE_INT32 == type) ? 4 : 8;
	int offset = 0;
	for(int it(0); it < numIterations; ++it) {
		offset += RoundUp(numQueries, 32);
		if(offset + numQueries > MaxQuerySize) offset = 0;

		searchStatus_t status = searchKeys(engine, count, type, 
			values->Handle(), SEARCH_ALGO_LOWER_BOUND,
			keys->Handle() + offset * size, numQueries, btree->Handle(),
			indices->Handle());
		if(SEARCH_STATUS_SUCCESS != status) {
			printf("FAIL!\n");
			exit(0);
		}
	}

	double elapsed = timer.Stop();
	double throughput = (double)numQueries * numIterations / elapsed;

	// Verify the results for the last set of queries run.
	std::vector<uint> results(numQueries);
	indices->ToHost(results);

	for(int i(0); i < numQueries; ++i) {
		const T* lower = std::lower_bound(valuesHost, valuesHost + count, 
			keysHost[offset + i]);
		if((lower - valuesHost) != results[i]) {
			printf("Failure in MGPU Search.\n");
			exit(0);
		}
	}

	return throughput;
}