void	SStopWatch::MarkInternal(Root* root)
{
	FlushICache();

	root->suspendTime = 0;

	if ((root->extensions&EXTENSION_PERFORMANCE) != 0) {
		IOSFastIoctl(CounterFD(), PERFCNT_READ_COUNTERS(NUM_COUNTERS+1),
					0, NULL, sizeof(m_node->counters), m_node->counters, &root->perfError);
	}

#if TARGET_PLATFORM == TARGET_PLATFORM_PALMSIM_WIN32
	if ((root->extensions&EXTENSION_QUANTIFY_RUNNING) != 0) {
		QuantifyStopRecordingData();
	}
	if ((root->flags&B_STOP_WATCH_QUANTIFY) != 0) {
		QuantifyStartRecordingData();
		root->extensions |= EXTENSION_QUANTIFY_RUNNING;
	}
#endif

	if ((root->flags&B_STOP_WATCH_NO_TIME) == 0) {
		m_node->time = KALGetTime(B_TIMEBASE_RUN_TIME);
	}

#if TARGET_PLATFORM == TARGET_PLATFORM_DEVICE_ARM
	if ((root->extensions&EXTENSION_PERFORMANCE_HIGH) != 0) {
		m_node->counters[0] = ReadPerformanceCounter(0);
		m_node->counters[1] = ReadPerformanceCounter(1);
		m_node->counters[2] = ReadPerformanceCounter(2);
	}
#endif
}
Exemple #2
0
lib7_val_t   _lib7_runtime_mkexec   (   lib7_state_t*   lib7_state,
                                          lib7_val_t      arg
                                      )
{
    /* _lib7_runtime_mkexec : rw_unt8_vector.Rw_Vector * int -> (chunk -> chunk)
     *
     * Turn a previously allocated code chunk into a closure.
     * This requires that we flush the I-cache.
     */

    lib7_val_t   seq        = REC_SEL(   arg, 0);
    int           entrypoint = REC_SELINT(arg, 1);

    char*	  code       = GET_SEQ_DATAPTR( char, seq );
    Word_t	  nbytes     = GET_SEQ_LEN(           seq );

    FlushICache (code, nbytes);

    {   lib7_val_t	        result;
        REC_ALLOC1(lib7_state, result, PTR_CtoLib7(code + entrypoint));
        return                  result;
    }
}
void	SStopWatch::RestartInternal(Root* root)
{
	if (m_type != TYPE_CHILD) {
		if ((root->extensions&(EXTENSION_PERFORMANCE|EXTENSION_PERFORMANCE_HIGH)) != 0) {
			IOSFastIoctl(CounterFD(), PERFCNT_CONFIGURE_COUNTERS(NUM_COUNTERS),
						sizeof(root->perfCounters), root->perfCounters, 0, NULL, &root->perfError);
			//if (root->perfError != B_OK) root->extensions &= ~EXTENSION_PERFORMANCE;
		}
		if ((root->extensions&EXTENSION_PROFILING) != 0) {
			// Stopping returns an error code if already stopped, so ignore this result.
			IOSFastIoctl(ProfileFD(), profilerStopProfiling, 0, NULL, 0, NULL, &root->profError);
			root->profError = B_OK;

			if ((root->profFlags&B_PROFILE_KEEP_SAMPLES) == 0) {
				IOSFastIoctl(ProfileFD(), profilerResetSamples, 0, NULL, 0, NULL, &root->perfError);
			}
			if (root->profError == B_OK) {
				IOSFastIoctl(ProfileFD(), profilerStartProfiling, 0, NULL, 0, NULL, &root->perfError);
			}
			if (root->profError != B_OK) root->extensions &= ~EXTENSION_PROFILING;
		}

		if ((root->flags&B_STOP_WATCH_CLEAR_CACHE) != 0) {
			// A "big" size says to just blindly flush the whole cache.
			FlushICache();
		}

		// Compute overhead -- baseline
		if ((root->flags&B_STOP_WATCH_NO_TIME) == 0) {
			root->timeOverhead = KALGetTime(B_TIMEBASE_RUN_TIME);
		}
		if ((root->extensions&EXTENSION_PERFORMANCE) != 0) {
			memset(root->counterOverhead, 0, sizeof(root->counterOverhead));
			IOSFastIoctl(CounterFD(), PERFCNT_READ_COUNTERS(NUM_COUNTERS+1),
						0, NULL, sizeof(root->counterOverhead), root->counterOverhead, &root->perfError);
		}
#if TARGET_PLATFORM == TARGET_PLATFORM_DEVICE_ARM
		if ((root->extensions&EXTENSION_PERFORMANCE_HIGH) != 0) {
			root->counterOverhead[0] = ReadPerformanceCounter(0);
			root->counterOverhead[1] = ReadPerformanceCounter(1);
			root->counterOverhead[2] = ReadPerformanceCounter(2);
		}
#endif

		// Some internal stuff we always have to do
		if ((root->flags&B_STOP_WATCH_NESTING) != 0) {
			Root* foo = CurrentRoot();
		}

		// Compute overhead -- total up measurement overhead
		if ((root->flags&B_STOP_WATCH_NO_TIME) == 0) {
			root->timeOverhead = KALGetTime(B_TIMEBASE_RUN_TIME)-root->timeOverhead;
		}
		if ((root->extensions&EXTENSION_PERFORMANCE) != 0) {
			status_t err;
			IOSFastIoctl(CounterFD(), PERFCNT_READ_COUNTERS(NUM_COUNTERS+1), 0, NULL, sizeof(root->tmpPerf), root->tmpPerf, &err);
			for (int i=0; i<NUM_COUNTERS+1; i++) {
				root->counterOverhead[i] = root->tmpPerf[i] - root->counterOverhead[i];
			}
		}
#if TARGET_PLATFORM == TARGET_PLATFORM_DEVICE_ARM
		if ((root->extensions&EXTENSION_PERFORMANCE_HIGH) != 0) {
			root->tmpPerf[0] = ReadPerformanceCounter(0);
			root->tmpPerf[1] = ReadPerformanceCounter(1);
			root->tmpPerf[2] = ReadPerformanceCounter(2);
			for (int i=0; i<NUM_COUNTERS+1; i++) {
				root->counterOverhead[i] = root->tmpPerf[i] - root->counterOverhead[i];
			}
		}
#endif
	}
}
Exemple #4
0
static void   read_heap   (   inbuf_t*            bp,
                             lib7_heap_hdr_t*   header,
                             lib7_state_t*      lib7_state,
                             lib7_val_t*        externs
                         )
{
    heap_t*		heap = lib7_state->lib7_heap;
    heap_arena_hdr_t	*arenaHdrs, *p, *q;
    int			arenaHdrsSize;
    int			i, j, k;
    long		prevSzB[NUM_ARENAS], size;
    bibop_t		oldBIBOP;
    Addr_t		addrOffset[MAX_NUM_GENS][NUM_ARENAS];
    bo_region_reloc_t	*boRelocInfo;
    addr_table_t		*boRegionTable;

    /* Allocate a BIBOP for the imported
     * heap image's address space:
     */
#ifdef TWO_LEVEL_MAP
#  error two level map not supported
#else
    oldBIBOP = NEW_VEC (aid_t, BIBOP_SZ);
#endif

    /* Read in the big-chunk region descriptors
     * for the old address space:
     */
    {
	int		  size;
	bo_region_info_t* boRgnHdr;

	boRegionTable = MakeAddrTable(BIBOP_SHIFT+1, header->numBORegions);
	size = header->numBORegions * sizeof(bo_region_info_t);
	boRgnHdr = (bo_region_info_t *) MALLOC (size);
	HeapIO_ReadBlock (bp, boRgnHdr, size);

	boRelocInfo = NEW_VEC(bo_region_reloc_t, header->numBORegions);

	for (i = 0;  i < header->numBORegions;  i++) {
	    MarkRegion(oldBIBOP,
		(lib7_val_t *)(boRgnHdr[i].baseAddr),
		RND_HEAP_CHUNK_SZB(boRgnHdr[i].sizeB),
		AID_BIGCHUNK(1)
            );
	    oldBIBOP[BIBOP_ADDR_TO_INDEX(boRgnHdr[i].baseAddr)] = AID_BIGCHUNK_HDR(MAX_NUM_GENS);
	    boRelocInfo[i].firstPage = boRgnHdr[i].firstPage;

	    boRelocInfo[i].nPages
                =
                (boRgnHdr[i].sizeB - (boRgnHdr[i].firstPage - boRgnHdr[i].baseAddr))
                >>
                BIGCHUNK_PAGE_SHIFT;

	    boRelocInfo[i].chunkMap = NEW_VEC(bo_reloc_t *, boRelocInfo[i].nPages);

	    for (j = 0;  j < boRelocInfo[i].nPages;  j++) {
		boRelocInfo[i].chunkMap[j] = NULL;
            } 
	    AddrTableInsert (boRegionTable, boRgnHdr[i].baseAddr, &(boRelocInfo[i]));
	}
	FREE (boRgnHdr);
    }

    /* Read the arena headers: */
    arenaHdrsSize = header->numGens * NUM_CHUNK_KINDS * sizeof(heap_arena_hdr_t);
    arenaHdrs = (heap_arena_hdr_t *) MALLOC (arenaHdrsSize);
    HeapIO_ReadBlock (bp, arenaHdrs, arenaHdrsSize);

    for (i = 0;  i < NUM_ARENAS;  i++) {
	prevSzB[i] = heap->allocSzB;
    }

    /* Allocate the arenas and read in the heap image: */
    for (p = arenaHdrs, i = 0;  i < header->numGens;  i++) {
	gen_t	*gen = heap->gen[i];

	/* Compute the space required for this generation,
	 * and mark the oldBIBOP to reflect the old address space:
	 */
	for (q = p, j = 0;  j < NUM_ARENAS;  j++) {
	    MarkRegion (oldBIBOP,
		(lib7_val_t *)(q->info.o.baseAddr),
		RND_HEAP_CHUNK_SZB(q->info.o.sizeB),
		gen->arena[j]->id);
	    size = q->info.o.sizeB + prevSzB[j];
	    if ((j == PAIR_INDEX) && (size > 0))
		size += 2*WORD_SZB;
	    gen->arena[j]->tospSizeB = RND_HEAP_CHUNK_SZB(size);
	    prevSzB[j] = q->info.o.sizeB;
	    q++;
	}

        /* Allocate space for the generation: */
	if (NewGeneration(gen) == FAILURE) {
	    Die ("unable to allocated space for generation %d\n", i+1);
        } 
	if (isACTIVE(gen->arena[ARRAY_INDEX])) {
	    NewDirtyVector (gen);
        }

	/* Read in the arenas for this generation
	 * and initialize the address offset table:
	 */
	for (j = 0;  j < NUM_ARENAS;  j++) {

	    arena_t* ap = gen->arena[j];

	    if (p->info.o.sizeB > 0) {

		addrOffset[i][j] = (Addr_t)(ap->tospBase) - (Addr_t)(p->info.o.baseAddr);

		HeapIO_Seek (bp, (long)(p->offset));
		HeapIO_ReadBlock(bp, (ap->tospBase), p->info.o.sizeB);

		ap->nextw  = (lib7_val_t *)((Addr_t)(ap->tospBase) + p->info.o.sizeB);
		ap->oldTop = ap->tospBase;

	    } else if (isACTIVE(ap)) {

		ap->oldTop = ap->tospBase;
	    }

	    if (verbosity > 0)   say(".");

	    p++;
	}

        /* Read in the big-chunk arenas: */
	for (j = 0;  j < NUM_BIGCHUNK_KINDS;  j++) {
	    Addr_t		totSizeB;
	    bigchunk_desc_t	*freeChunk, *bdp;
	    bigchunk_region_t	*freeRegion;
	    bigchunk_hdr_t	*boHdrs;
	    int			boHdrSizeB, index;
	    bo_region_reloc_t   *region;

	    if (p->info.bo.numBOPages > 0) {
		totSizeB = p->info.bo.numBOPages << BIGCHUNK_PAGE_SHIFT;
		freeChunk = BO_AllocRegion (heap, totSizeB);
		freeRegion = freeChunk->region;
		freeRegion->minGen = i;
		MarkRegion (BIBOP, (lib7_val_t *)freeRegion,
		    HEAP_CHUNK_SZB( freeRegion->heap_chunk ), AID_BIGCHUNK(i));
		BIBOP[BIBOP_ADDR_TO_INDEX(freeRegion)] = AID_BIGCHUNK_HDR(i);

	        /* Read in the big-chunk headers */
		boHdrSizeB = p->info.bo.numBigChunks * sizeof(bigchunk_hdr_t);
		boHdrs = (bigchunk_hdr_t *) MALLOC (boHdrSizeB);
		HeapIO_ReadBlock (bp, boHdrs, boHdrSizeB);

	        /* Read in the big-chunks: */
		HeapIO_ReadBlock (bp, (void *)(freeChunk->chunk), totSizeB);
		if (j == CODE_INDEX) {
		    FlushICache ((void *)(freeChunk->chunk), totSizeB);
		}

	        /* Set up the big-chunk descriptors 
                 * and per-chunk relocation info:
                 */
		for (k = 0;  k < p->info.bo.numBigChunks;  k++) {
		  /* find the region relocation info for the chunk's region in
		   * the exported heap.
		   */
		    for (index = BIBOP_ADDR_TO_INDEX(boHdrs[k].baseAddr);
			!BO_IS_HDR(oldBIBOP[index]);
			index--)
			continue;
		    region = LookupBORegion (boRegionTable, index);

		    /* Allocate the big-chunk descriptor for
		     * the chunk and link it into the list
                     * of big-chunks for its generation.
		     */
		    bdp = AllocBODesc (freeChunk, &(boHdrs[k]), region);
		    bdp->next = gen->bigChunks[j];
		    gen->bigChunks[j] = bdp;
		    ASSERT(bdp->gen == i+1);

		    if (show_code_chunk_comments && (j == CODE_INDEX)) {

		        /* Dump the comment string of the code chunk: */
			char* namestring;
			if ((namestring = BO_GetCodeChunkTag(bdp))) {
			    SayDebug ("[%6d bytes] %s\n", bdp->sizeB, namestring);
                        }
		    }
		}

		if (freeChunk != bdp) {
		    /* There was some extra space left in the region: */
		    ADD_BODESC(heap->freeBigChunks, freeChunk);
		}

		FREE (boHdrs);
	    }

	    if (verbosity > 0)   say(".");

	    p++;
	}
    }

    repair_heap (heap, oldBIBOP, addrOffset, boRegionTable, externs);

    /* Adjust the run-time globals
     * that point into the heap:
     */
    *PTR_LIB7toC(lib7_val_t, PervasiveStruct) = repair_word (
	*PTR_LIB7toC(lib7_val_t, PervasiveStruct),
	oldBIBOP, addrOffset, boRegionTable, externs);

    runtimeCompileUnit = repair_word( runtimeCompileUnit, oldBIBOP, addrOffset, boRegionTable, externs );

#ifdef ASM_MATH
    MathVec = repair_word (MathVec, oldBIBOP, addrOffset, boRegionTable, externs);
#endif

    /* Adjust the Lib7 registers to the new address space */
    ASSIGN(Lib7SignalHandler, repair_word (
	DEREF(Lib7SignalHandler), oldBIBOP, addrOffset, boRegionTable, externs)
    );

    lib7_state->lib7_argument = repair_word (
        lib7_state->lib7_argument, oldBIBOP, addrOffset, boRegionTable, externs
    );

    lib7_state->lib7_fate = repair_word (
        lib7_state->lib7_fate, oldBIBOP, addrOffset, boRegionTable, externs
    );

    lib7_state->lib7_closure = repair_word (
        lib7_state->lib7_closure, oldBIBOP, addrOffset, boRegionTable, externs
    );

    lib7_state->lib7_program_counter = repair_word (
        lib7_state->lib7_program_counter, oldBIBOP, addrOffset, boRegionTable, externs
    );

    lib7_state->lib7_link_register = repair_word (
        lib7_state->lib7_link_register, oldBIBOP, addrOffset, boRegionTable, externs
    );

    lib7_state->lib7_exception_fate = repair_word (
        lib7_state->lib7_exception_fate, oldBIBOP, addrOffset, boRegionTable, externs
    );

    lib7_state->lib7_current_thread = repair_word (
        lib7_state->lib7_current_thread, oldBIBOP, addrOffset, boRegionTable, externs
    );

    lib7_state->lib7_calleeSave[0] = repair_word (
        lib7_state->lib7_calleeSave[0], oldBIBOP, addrOffset, boRegionTable, externs
    );

    lib7_state->lib7_calleeSave[1] = repair_word (
        lib7_state->lib7_calleeSave[1], oldBIBOP, addrOffset, boRegionTable, externs
    );

    lib7_state->lib7_calleeSave[2] = repair_word (
        lib7_state->lib7_calleeSave[2], oldBIBOP, addrOffset, boRegionTable, externs
    );

    /* Release storage: */
    for (i = 0; i < header->numBORegions;  i++) {
	bo_reloc_t	*p;
	for (p = NULL, j = 0;  j < boRelocInfo[i].nPages;  j++) {
	    if ((boRelocInfo[i].chunkMap[j] != NULL)
	    && (boRelocInfo[i].chunkMap[j] != p)) {
		FREE (boRelocInfo[i].chunkMap[j]);
		p = boRelocInfo[i].chunkMap[j];
	    }
	}
    }
    FreeAddrTable (boRegionTable, FALSE);
    FREE (boRelocInfo);
    FREE (arenaHdrs);
    FREE (oldBIBOP);

    /* Reset the sweep_nextw pointers: */
    for (i = 0;  i < heap->numGens;  i++) {
	gen_t	*gen = heap->gen[i];
	for (j = 0;  j < NUM_ARENAS;  j++) {
	    arena_t		*ap = gen->arena[j];
	    if (isACTIVE(ap))
		ap->sweep_nextw = ap->nextw;
	}
    }

}                                                       /* read_heap. */