static profile_block_t* _profile_allocate_block( void ) { //Grab block from free list, avoiding ABA issues by //using high 16 bit as a loop counter profile_block_t* block; uint32_t free_block_tag, free_block, next_block_tag; do { free_block_tag = atomic_load32( &_profile_free ); free_block = free_block_tag & 0xffff; next_block_tag = GET_BLOCK( free_block )->child; next_block_tag |= ( atomic_incr32( &_profile_loopid ) & 0xffff ) << 16; } while( free_block && !atomic_cas32( &_profile_free, next_block_tag, free_block_tag ) ); if( !free_block ) { static atomic32_t has_warned = {0}; if( atomic_cas32( &has_warned, 1, 0 ) ) log_error( 0, ERROR_OUT_OF_MEMORY, ( _profile_num_blocks < 65535 ) ? "Profile blocks exhausted, increase profile memory block size" : "Profile blocks exhausted, decrease profile output wait time" ); return 0; } block = GET_BLOCK( free_block ); memset( block, 0, sizeof( profile_block_t ) ); return block; }
//Pass each block once, writing it to stream and adjusting child/sibling pointers to form a single-linked list through child pointer //Potential drawback of this is that block access order will degenerate over time and result in random access over the whole //profile memory area in the end static profile_block_t* _profile_process_block( profile_block_t* block ) { profile_block_t* leaf = block; if( _profile_write ) _profile_write( block, sizeof( profile_block_t ) ); if( block->child ) { leaf = _profile_process_block( GET_BLOCK( block->child ) ); if( block->sibling ) { profile_block_t* subleaf = _profile_process_block( GET_BLOCK( block->sibling ) ); subleaf->child = block->child; block->child = block->sibling; block->sibling = 0; } } else if( block->sibling ) { leaf = _profile_process_block( GET_BLOCK( block->sibling ) ); block->child = block->sibling; block->sibling = 0; } return leaf; }
static void _profile_put_root_block( uint32_t block ) { uint32_t sibling; profile_block_t* self = GET_BLOCK( block ); #if PROFILE_ENABLE_SANITY_CHECKS FOUNDATION_ASSERT( self->sibling == 0 ); #endif while( !atomic_cas32( &_profile_root, block, 0 ) ) { do { sibling = atomic_load32( &_profile_root ); } while( sibling && !atomic_cas32( &_profile_root, 0, sibling ) ); if( sibling ) { if( self->sibling ) { uint32_t leaf = self->sibling; while( GET_BLOCK( leaf )->sibling ) leaf = GET_BLOCK( leaf )->sibling; GET_BLOCK( sibling )->previous = leaf; GET_BLOCK( leaf )->sibling = sibling; } else { self->sibling = sibling; } } } }
void profile_end_block( void ) { uint32_t block_index = get_thread_profile_block(); profile_block_t* block; if( !_profile_enable || !block_index ) return; block = GET_BLOCK( block_index ); block->data.end = time_current() - _profile_ground_time; if( block->previous ) { unsigned int processor; profile_block_t* current = block; profile_block_t* previous = GET_BLOCK( block->previous ); profile_block_t* parent; unsigned int current_index = block_index; unsigned int parent_index; while( previous->child != current_index ) { current_index = current->previous; //Walk sibling list backwards current = GET_BLOCK( current_index ); previous = GET_BLOCK( current->previous ); #if PROFILE_ENABLE_SANITY_CHECKS FOUNDATION_ASSERT( current_index != 0 ); FOUNDATION_ASSERT( current->previous != 0 ); #endif } parent_index = current->previous; //Previous now points to parent parent = GET_BLOCK( parent_index ); #if PROFILE_ENABLE_SANITY_CHECKS FOUNDATION_ASSERT( parent_index != block_index ); #endif set_thread_profile_block( parent_index ); processor = thread_hardware(); if( parent->data.processor != processor ) { const char* message = parent->data.name; //Thread migrated, split into new block profile_end_block(); profile_begin_block( message ); } } else { _profile_put_root_block( block_index ); set_thread_profile_block( 0 ); } }
void _free(void *ptr) { t_block *block; RETURN(!ptr || ptr > LAST_PTR() || ptr < FIRST_PTR()); block = GET_BLOCK(ptr); RETURN(block->isFree); block->isFree = true; if (block->parent == last && last->startBlock == last->lastBlock) { if (!(last = last->prev)) blocks = NULL; else last->next = NULL; brk(block->parent); } else if (block == block->parent->lastBlock) { if (!(block->parent->lastBlock = block->prev)) block->parent->startBlock = NULL; block->parent->freeSize += B_SIZE(block->size); } else if (block->parent->maxFreeSize < block->size) block->parent->maxFreeSize = block->size; }
void _free(void *ptr) { t_block *block; RETURN(!ptr || ptr > (void *)moreSpace(0, true)); block = GET_BLOCK(ptr); RETURN(block->isFree); mergeBlocks(&block); block->isFree = true; if (block == block->parent->lastBlock) { block->parent->lastBlock = block->prev; block->parent->freeSize += B_SIZE(block->size); if (!block->parent->next && block->parent->freeSize > PAGE_SIZE) { if (blocks != block->parent) { if (block->parent->prev) block->parent->prev->next = block->parent->next; brk(block->parent); } } } else if (block->parent->maxFreeSize < block->size) block->parent->maxFreeSize = block->size; }
void profile_begin_block( const char* message ) { uint32_t parent; if( !_profile_enable ) return; parent = get_thread_profile_block(); if( !parent ) { //Allocate new master block profile_block_t* block = _profile_allocate_block(); uint32_t blockindex; if( !block ) return; blockindex = BLOCK_INDEX( block ); block->data.id = atomic_add32( &_profile_counter, 1 ); string_copy( block->data.name, message, MAX_MESSAGE_LENGTH ); block->data.processor = thread_hardware(); block->data.thread = (uint32_t)thread_id(); block->data.start = time_current() - _profile_ground_time; set_thread_profile_block( blockindex ); } else { //Allocate new child block profile_block_t* parentblock; profile_block_t* subblock = _profile_allocate_block(); uint32_t subindex; if( !subblock ) return; subindex = BLOCK_INDEX( subblock ); parentblock = GET_BLOCK( parent ); subblock->data.id = atomic_add32( &_profile_counter, 1 ); subblock->data.parentid = parentblock->data.id; string_copy( subblock->data.name, message, MAX_MESSAGE_LENGTH ); subblock->data.processor = thread_hardware(); subblock->data.thread = (uint32_t)thread_id(); subblock->data.start = time_current() - _profile_ground_time; subblock->previous = parent; subblock->sibling = parentblock->child; if( parentblock->child ) GET_BLOCK( parentblock->child )->previous = subindex; parentblock->child = subindex; set_thread_profile_block( subindex ); } }
void profile_shutdown( void ) { profile_enable( 0 ); while( thread_is_thread( _profile_io_thread ) ) thread_sleep( 1 ); _profile_io_thread = 0; //Discard and free up blocks remaining in queue _profile_thread_finalize(); if( atomic_load32( &_profile_root ) ) _profile_process_root_block(); //Sanity checks { uint64_t num_blocks = 0; uint32_t free_block = atomic_load32( &_profile_free ) & 0xffff; if( atomic_load32( &_profile_root ) ) log_error( 0, ERROR_INTERNAL_FAILURE, "Profile module state inconsistent on shutdown, at least one root block still allocated/active" ); while( free_block ) { profile_block_t* block = GET_BLOCK( free_block ); if( block->sibling ) log_errorf( 0, ERROR_INTERNAL_FAILURE, "Profile module state inconsistent on shutdown, block %d has sibling set", free_block ); ++num_blocks; free_block = GET_BLOCK( free_block )->child; } if( _profile_num_blocks ) ++num_blocks; //Include the wasted block 0 if( num_blocks != _profile_num_blocks ) { //If profile output function (user) crashed, this will probably trigger since at least one block will be lost in space log_errorf( 0, ERROR_INTERNAL_FAILURE, "Profile module state inconsistent on shutdown, lost blocks (found %llu of %llu)", num_blocks, _profile_num_blocks ); } } atomic_store32( &_profile_root, 0 ); atomic_store32( &_profile_free, 0 ); _profile_num_blocks = 0; _profile_identifier = 0; }
/* *src is a tag tainted pointer */ void evacuate_big(void** src, struct s_gc* s_gc){ struct big_bdescr* big_block = (struct big_bdescr*)GET_BLOCK(*src); if(! big_block->used){ TAILQ_REMOVE(&(s_gc->arena->big_blocks),(struct bdescr*)big_block,link); TAILQ_INSERT_TAIL(&(s_gc->big_live_queue),(struct bdescr*)big_block,link); big_block->used = 1; s_gc->big_evaced ++; } }
static void _profile_free_block( uint32_t block, uint32_t leaf ) { uint32_t last_tag, block_tag; do { block_tag = block | ( ( atomic_incr32( &_profile_loopid ) & 0xffff ) << 16 ); last_tag = atomic_load32( &_profile_free ); GET_BLOCK( leaf )->child = last_tag & 0xffff; } while( !atomic_cas32( &_profile_free, block_tag, last_tag ) ); }
void scavenge_big(void *obj, struct s_gc* s_gc){ struct big_bdescr* big_block = (struct big_bdescr*)GET_BLOCK(obj); unsigned int nptrs = big_block->nptrs; void** obj_ptr = (void**)obj; debug("%s : obj %08x\n",__FUNCTION__,(unsigned int)(obj)); for(;nptrs > 0;nptrs--){ evacuate(obj_ptr,s_gc); obj_ptr++; } }
static void _profile_put_simple_block( uint32_t block ) { //Add to current block, or if no current add to array uint32_t parent_block = get_thread_profile_block(); if( parent_block ) { profile_block_t* self = GET_BLOCK( block ); profile_block_t* parent = GET_BLOCK( parent_block ); uint32_t next_block = parent->child; self->previous = (uint16_t)parent_block; self->sibling = (uint16_t)next_block; if( next_block ) GET_BLOCK( next_block )->previous = (uint16_t)block; parent->child = block; } else { _profile_put_root_block( block ); } }
void CArticulationPoints::APUtil(const TBlock board[], TBlock oBoard[], int u, bool visited[], int disc[], int low[], int parent[]) { // A static variable is used for simplicity, we can avoid use of static // variable by passing a pointer. static int time = 0; // Count of children in DFS Tree int children = 0; // Mark the current node as visited visited[u] = true; // Initialize discovery time and low value disc[u] = low[u] = ++time; // Go through all vertices adjacent to this for (int iMove = 1; iMove <= 4; iMove++) { TPos v2d = MOVE(u, iMove); if (GET_BLOCK(board, v2d) != BLOCK_EMPTY) continue; int v = v2d; // v is current adjacent of u // If v is not visited yet, then make it a child of u // in DFS tree and recur for it if (!visited[v]) { children++; parent[v] = u; APUtil(board, oBoard, v, visited, disc, low, parent); // Check if the subtree rooted with v has a connection to // one of the ancestors of u low[u] = min(low[u], low[v]); // u is an articulation point in following cases // (1) u is root of DFS tree and has two or more children. if (parent[u] == -1 && children > 1) oBoard[u] = SPECIAL_BLOCK; // (2) If u is not root and low value of one of its child is more // than discovery value of u. if (parent[u] != -1 && low[v] >= disc[u]) oBoard[u] = SPECIAL_BLOCK; } // Update low value of u for parent function calls. else if (v != parent[u]) low[u] = min(low[u], disc[v]); } }
static void _profile_put_message_block( uint32_t id, const char* message ) { profile_block_t* subblock = 0; int len = (int)string_length( message ); //Allocate new master block profile_block_t* block = _profile_allocate_block(); if( !block ) return; block->data.id = id; block->data.processor = thread_hardware(); block->data.thread = (uint32_t)thread_id(); block->data.start = time_current() - _profile_ground_time; block->data.end = atomic_add32( &_profile_counter, 1 ); memcpy( block->data.name, message, ( len >= MAX_MESSAGE_LENGTH ) ? MAX_MESSAGE_LENGTH : len ); len -= MAX_MESSAGE_LENGTH; message += MAX_MESSAGE_LENGTH; subblock = block; while( len > 0 ) { //add subblock profile_block_t* cblock = _profile_allocate_block(); uint16_t cblock_index; if( !cblock ) return; cblock_index = BLOCK_INDEX( cblock ); cblock->data.id = id + 1; cblock->data.parentid = (uint32_t)subblock->data.end; cblock->data.processor = block->data.processor; cblock->data.thread = block->data.thread; cblock->data.start = block->data.start; cblock->data.end = atomic_add32( &_profile_counter, 1 ); memcpy( cblock->data.name, message, ( len >= MAX_MESSAGE_LENGTH ) ? MAX_MESSAGE_LENGTH : len ); cblock->sibling = subblock->child; if( cblock->sibling ) GET_BLOCK( cblock->sibling )->previous = cblock_index; subblock->child = cblock_index; cblock->previous = BLOCK_INDEX( subblock ); subblock = cblock; len -= MAX_MESSAGE_LENGTH; message += MAX_MESSAGE_LENGTH; } _profile_put_simple_block( BLOCK_INDEX( block ) ); }
void evacuate(void** src, struct s_gc* s_gc){ debug("%s : *src %08x\n",__FUNCTION__,(unsigned int)(*src)); debug("%s : BLOCK %08x\n",__FUNCTION__,(unsigned int)GET_BLOCK(*src)); enum e_descr_type e = BLOCK_TYPE(*src); if (e == E_BLOCK_SINGLE){ debug("%s : small\n",__FUNCTION__); evacuate_small(src,s_gc); } else if (e == E_BLOCK_BIG){ debug("%s : big\n",__FUNCTION__); evacuate_big(src,s_gc); } else { debug("ERROR: %s : %s : %08x\n", __FUNCTION__,"unknown object type",(unsigned int)e); debug("ERROR: %s : BIG %08x SINGLE %08x\n", __FUNCTION__,E_BLOCK_BIG,E_BLOCK_SINGLE); } }
static debug_view_t* get_debug_view(kaddr_t addr) { void* k_debug_view; int k_debug_view_size; debug_view_t* rc; rc = (debug_view_t*)malloc(sizeof(debug_view_t)); memset(rc, 0, sizeof(debug_view_t)); k_debug_view_size = kl_struct_len("debug_view"); k_debug_view = malloc(k_debug_view_size); GET_BLOCK(addr, k_debug_view_size, k_debug_view); strncpy(rc->name,K_PTR(k_debug_view,"debug_view","name"), DEBUG_MAX_PROCF_LEN); free(k_debug_view); return rc; }
void *CarveMapIndex( carve_t cv, void *aindex ) /*********************************************/ { unsigned index = (unsigned)(pointer_int)aindex; blk_t * block; blk_t ** block_map; unsigned block_index; unsigned block_offset; /* given an index; find and allocate the carve element */ if( index == CARVE_NULL_INDEX ) { return( NULL ); } block_index = GET_BLOCK( index ); block_offset = GET_OFFSET( index ); block_map = cv->blk_map; block = block_map[ block_index - 1 ]; return( &(block->data[ block_offset ]) ); }
void profile_update_block( void ) { char* message; unsigned int processor; uint32_t block_index = get_thread_profile_block(); profile_block_t* block; if( !_profile_enable || !block_index ) return; block = GET_BLOCK( block_index ); message = block->data.name; processor = thread_hardware(); if( block->data.processor == processor ) return; //Thread migrated to another core, split into new block profile_end_block(); profile_begin_block( message ); }
static void debug_get_areas_v1(debug_info_t* db_info, void* k_dbi) { kaddr_t mem_pos; kaddr_t dbe_addr; int area_size, i; /* get areas */ /* place to hold ptrs to debug areas in lcrash */ area_size = PAGE_SIZE << db_info->page_order; db_info->areas = (void**)malloc(db_info->nr_areas * sizeof(void *)); memset(db_info->areas, 0, db_info->nr_areas * sizeof(void *)); mem_pos = KL_ULONG(k_dbi,"debug_info","areas"); for (i = 0; i < db_info->nr_areas; i++) { dbe_addr = KL_VREAD_PTR(mem_pos); db_info->areas[i] = (debug_entry_t *) malloc(area_size); /* read raw data for debug area */ GET_BLOCK(dbe_addr, area_size, db_info->areas[i]); mem_pos += KL_NBPW; } }
void createBoardWithOutIsland(TBlock *board, TPos p1 = 0i16, TPos p2 = 120i16){ memset(board, 0, BOARD_SIZE*sizeof(TBlock)); int nObject = rand() % 30 + 30; vector<TPos> obstacles = { 60 }; for (int i = 0; i < 121; i++){ SET_BLOCK(board, TPos(i), BLOCK_OBSTACLE); } SET_BLOCK(board, TPos(60), BLOCK_EMPTY); for (int i = 1; i < nObject;){ TPos t = TPos(obstacles[rand() % obstacles.size()]); TPos newT = MOVE(t, rand() % 4 + 1); if (GET_BLOCK(board, newT) == BLOCK_OBSTACLE){ SET_BLOCK(board, newT, BLOCK_EMPTY); obstacles.push_back(newT); i++; } } p1 = obstacles[rand() % obstacles.size()]; SET_BLOCK(board, p1, BLOCK_PLAYER_1); }
void CArticulationPoints::getArticulationPoints(const TBlock board[], const TPos& _p1, const TPos&_p2, TBlock oBoard[]) { // Mark all the vertices as not visited memcpy(oBoard, board, sizeof(TBlock)*BOARD_SIZE); bool visited[BOARD_SIZE]; int disc[BOARD_SIZE]; int low[BOARD_SIZE]; int parent[BOARD_SIZE]; // Initialize parent and visited, and ap(articulation point) arrays for (int i = 0; i < BOARD_SIZE; i++) { parent[i] = -1; visited[i] = false; } // Call the recursive helper function to find articulation points // in DFS tree rooted with vertex 'i' for (int i = 0; i < BOARD_SIZE; i++) if (visited[i] == false && GET_BLOCK(board, i) == BLOCK_EMPTY) APUtil(board, oBoard, i, visited, disc, low, parent); }
static void _profile_process_root_block( void ) { uint32_t block; do { block = atomic_load32( &_profile_root ); } while( block && !atomic_cas32( &_profile_root, 0, block ) ); while( block ) { profile_block_t* leaf; profile_block_t* current = GET_BLOCK( block ); uint32_t next = current->sibling; current->sibling = 0; leaf = _profile_process_block( current ); _profile_free_block( block, BLOCK_INDEX( leaf ) ); block = next; } }
static void debug_get_areas_v2(debug_info_t* db_info, void* k_dbi) { kaddr_t area_ptr; kaddr_t page_array_ptr; kaddr_t page_ptr; int i,j; db_info->areas_v2=(void***)malloc(db_info->nr_areas * sizeof(void **)); area_ptr = KL_ULONG(k_dbi,"debug_info","areas"); for (i = 0; i < db_info->nr_areas; i++) { db_info->areas_v2[i] = (void**)malloc(db_info->pages_per_area_v2 * sizeof(void*)); page_array_ptr = KL_VREAD_PTR(area_ptr); for(j=0; j < db_info->pages_per_area_v2; j++) { page_ptr = KL_VREAD_PTR(page_array_ptr); db_info->areas_v2[i][j] = (void*)malloc(PAGE_SIZE); /* read raw data for debug area */ GET_BLOCK(page_ptr, PAGE_SIZE, db_info->areas_v2[i][j]); page_array_ptr += KL_NBPW; } area_ptr += KL_NBPW; } }
TPoint CSearchEngine::alphaBetaTT(TBlock board[], const TPos&_p1, const TPos&_p2, const TPlayer next, vector<TMove> &history, int depth, TPoint alpha, TPoint beta) { #ifdef _DEBUG TBlock backup[BOARD_SIZE]; memcpy(backup, board, BOARD_SIZE*sizeof(TBlock)); vector<TMove> _history(history); #endif // _DEBUG assert((GET_BLOCK(board, _p1)) == BLOCK_PLAYER_1); assert((GET_BLOCK(board, _p2)) == BLOCK_PLAYER_2); assert(next == PLAYER_1 || next == PLAYER_2); static CMyTimer *timer = CMyTimer::getInstance(); static CMyAI* pAI = CMyAI::getInstance(); if (pAI->shouldEndMoveNow()) return TIMEOUT_POINTS; static CTranspositionTable *tt = CTranspositionTable::getInstance(); CGameState state(history); state.depth = depth; CGameState* ttEntry = tt->get(state); if (ttEntry && ttEntry->depth >= depth){ if (ttEntry->lower >= beta) { return ttEntry->lower; } if (ttEntry->upper <= alpha) { return ttEntry->upper; } alpha = max(alpha, ttEntry->lower); beta = min(beta, ttEntry->upper); } bool bOk; TPoint g; TPoint point = CHeuristicBase::evaluateBoardTT(board, _p1, _p2, next, history); if (point != 0) { g = point; } else if (depth == 0) { g = heuristic.rateBoardTT(board, _p1, _p2, next, history); } else { vector<TMove> moves; moves = next == PLAYER_1 ? getAvailableMoves(board, _p1) : getAvailableMoves(board, _p2); CHeuristicBase::sortMoves(moves, board, _p1, _p2, next, history); TPoint ab; if (next == PLAYER_1){ g = -MY_INFINITY; TPoint a = alpha; for (auto m = moves.begin(); m != moves.end(); m++){ bOk = move(board, _p1, *m, false); assert(bOk); history.push_back(*m); g = max(g, ab = alphaBetaTT(board, MOVE(_p1, *m), _p2, PLAYER_2, history, depth - 1, a, beta)); bOk = move(board, MOVE(_p1, *m), getOpositeDirection(*m), true); assert(bOk); history.pop_back(); if (ab == TIMEOUT_POINTS) return TIMEOUT_POINTS; assert(ab >= -MY_INFINITY && ab <= MY_INFINITY); a = max(g, a); } } else { g = MY_INFINITY; TPoint b = beta; for (auto m = moves.begin(); m != moves.end(); m++){ bOk = move(board, _p2, *m, false); assert(bOk); history.push_back(*m); g = min(g, ab = alphaBetaTT(board, _p1, MOVE(_p2, *m), PLAYER_1, history, depth - 1, alpha, b)); bOk = move(board, MOVE(_p2, *m), getOpositeDirection(*m), true); assert(bOk); history.pop_back(); if (ab == TIMEOUT_POINTS) return TIMEOUT_POINTS; assert(ab >= -MY_INFINITY && ab <= MY_INFINITY); b = min(g, b); } } } if (!ttEntry) ttEntry = tt->put(state); if (g <= alpha) ttEntry->upper = g; if (g > alpha && g < beta){ ttEntry->lower = g; ttEntry->upper = g; } if (g >= beta) ttEntry->lower = g; ttEntry->depth = depth; #ifdef _DEBUG assert(memcmp(board, backup, BOARD_SIZE*sizeof(TBlock)) == 0); assert(_history.size() == history.size()); assert(equal(_history.begin(), _history.end(), history.begin())); #endif // _DEBUG return g; }
TPoint CSearchEngine::alphaBeta(TBlock board[], const TPos&_p1, const TPos&_p2, const TPlayer next, vector<TMove> &history, int depth, TPoint a, TPoint b) { #ifdef _DEBUG TBlock backup[BOARD_SIZE]; memcpy(backup, board, BOARD_SIZE*sizeof(TBlock)); vector<TMove> _history(history); #endif // _DEBUG assert((GET_BLOCK(board, _p1)) == BLOCK_PLAYER_1); assert((GET_BLOCK(board, _p2)) == BLOCK_PLAYER_2); assert(next == PLAYER_1 || next == PLAYER_2); static CMyAI* pAI = CMyAI::getInstance(); static CMyTimer *timer = CMyTimer::getInstance(); if (pAI->shouldEndMoveNow()) return TIMEOUT_POINTS; bool bOk; TPoint bestValue = -MY_INFINITY; TPoint point = CHeuristicBase::evaluateBoardTT(board, _p1, _p2, next, history); if (point == TIMEOUT_POINTS) return point; if (point != 0) { assert(point > POINTS / 2 || point < -POINTS / 2); return point; } if (depth == 0) { TPoint t = heuristic.rateBoardTT(board, _p1, _p2, next, history); assert(abs(t) <= MY_INFINITY); return t; } vector<TMove> moves; moves = next == PLAYER_1 ? getAvailableMoves(board, _p1) : getAvailableMoves(board, _p2); CHeuristicBase::sortMoves(moves, board, _p1, _p2, next, history); TPoint value; TPoint ab; if (next == PLAYER_1){ value = -MY_INFINITY; for (auto m = moves.begin(); m != moves.end(); m++){ bOk = move(board, _p1, *m, false); assert(bOk); history.push_back(*m); value = max(value, ab = alphaBeta(board, MOVE(_p1, *m), _p2, PLAYER_2, history, depth - 1, a, b)); bOk = move(board, MOVE(_p1, *m), getOpositeDirection(*m), true); assert(bOk); history.pop_back(); if (ab == TIMEOUT_POINTS) return TIMEOUT_POINTS; assert(ab >= -MY_INFINITY && ab <= MY_INFINITY); a = max(value, a); if (b <= a) break; } } else { value = MY_INFINITY; for (auto m = moves.begin(); m != moves.end(); m++){ bOk = move(board, _p2, *m, false); assert(bOk); history.push_back(*m); value = min(value, ab = alphaBeta(board, _p1, MOVE(_p2, *m), PLAYER_1, history, depth - 1, a, b)); bOk = move(board, MOVE(_p2, *m), getOpositeDirection(*m), true); assert(bOk); history.pop_back(); if (ab == TIMEOUT_POINTS) return TIMEOUT_POINTS; assert(ab >= -MY_INFINITY && ab <= MY_INFINITY); b = min(value, b); if (b <= a) break; } } #ifdef _DEBUG assert(memcmp(board, backup, BOARD_SIZE*sizeof(TBlock)) == 0); assert(_history.size() == history.size()); assert(equal(_history.begin(), _history.end(), history.begin())); #endif // _DEBUG assert(value >= -MY_INFINITY && value <= MY_INFINITY); return value; }
static debug_info_t* get_debug_info(kaddr_t addr,int get_areas) { void *k_dbi; kaddr_t mem_pos; kaddr_t view_addr; debug_info_t* db_info; int i; int dbi_size; /* get sizes of kernel structures */ if(!(dbi_size = kl_struct_len("debug_info"))){ fprintf (KL_ERRORFP, "Could not determine sizeof(struct debug_info)\n"); return(NULL); } if(!(dbe_size = kl_struct_len("__debug_entry"))){ fprintf(KL_ERRORFP, "Could not determine sizeof(struct __debug_entry)\n"); return(NULL); } /* get kernel debug_info structure */ k_dbi = malloc(dbi_size); GET_BLOCK(addr, dbi_size, k_dbi); db_info = (debug_info_t*)malloc(sizeof(debug_info_t)); memset(db_info, 0, sizeof(debug_info_t)); /* copy members */ db_info->level = KL_INT(k_dbi,"debug_info","level"); db_info->nr_areas = KL_INT(k_dbi,"debug_info","nr_areas"); db_info->pages_per_area_v2= KL_INT(k_dbi,"debug_info","pages_per_area"); db_info->page_order = KL_INT(k_dbi,"debug_info","page_order"); db_info->buf_size = KL_INT(k_dbi,"debug_info","buf_size"); db_info->entry_size = KL_INT(k_dbi,"debug_info","entry_size"); db_info->next_dbi = KL_ULONG(k_dbi,"debug_info","next"); db_info->prev_dbi = KL_ULONG(k_dbi,"debug_info","prev"); db_info->addr = addr; strncpy(db_info->name,K_PTR(k_dbi,"debug_info","name"), DEBUG_MAX_PROCF_LEN); if(get_areas){ if(dbf_version == DBF_VERSION_V1) debug_get_areas_v1(db_info,k_dbi); else debug_get_areas_v2(db_info,k_dbi); } else { db_info->areas = NULL; } /* get views */ mem_pos = (uaddr_t) K_PTR(k_dbi,"debug_info","views"); memset(&db_info->views, 0, DEBUG_MAX_VIEWS * sizeof(void*)); for (i = 0; i < DEBUG_MAX_VIEWS; i++) { view_addr = KL_GET_PTR((void*)(uaddr_t)mem_pos); if(view_addr == 0){ break; } else { db_info->views[i] = get_debug_view(view_addr); } mem_pos += KL_NBPW; } free(k_dbi); return db_info; }
/* pop object to be scavenged. */ void* find_small_object(struct s_gc* s_gc){ size_t size_in_words; void** next_object = NULL; struct single_bdescr* to_space; if(s_gc->scavenging_object == NULL){ /* not initialized : scavenging_object points to the before head. */ to_space = (struct single_bdescr*)TAILQ_FIRST(&(s_gc->to_space_queue)); debug("%s : to_space : %08x\n",__FUNCTION__,(unsigned int)to_space); if(to_space == NULL){ /* no to_space. no object to scavenge. */ debug("%s : no to_space\n",__FUNCTION__); return NULL; } s_gc->scavenging_object = (void*)(((void**)to_space)+(WORDS_OF_TYPE(struct single_bdescr)) + 1); } size_in_words = GET_SIZE(s_gc->scavenging_object); to_space = (struct single_bdescr*)GET_BLOCK(s_gc->scavenging_object); debug("%s : scavenging object : %08x size : %08x\n",__FUNCTION__,(unsigned int)s_gc->scavenging_object,size_in_words); if(size_in_words == 0){ /* not found at s_gc->scavenging_object. but may be found in next block. */ to_space = (struct single_bdescr*)TAILQ_NEXT((struct bdescr*)to_space,link); if(to_space == NULL){ /* no to_space. no object to scavenge. */ return NULL; } void* next_scavenging_object = (void*)(((void**)to_space)+(WORDS_OF_TYPE(struct single_bdescr)) + 1); size_in_words = GET_SIZE(next_scavenging_object); if(size_in_words == 0){ /* do not update scavenging object pointer */ return NULL; } /* next scavenging object found */ s_gc->scavenging_object = next_scavenging_object; } next_object = s_gc->scavenging_object; /* shifting s_gc->scavenging_object */ /* now to_space points to the object found. */ if(next_object + size_in_words + 2 <= ((void**)to_space) + (BLOCK_SIZE/sizeof(void*))){ /* enough space. safely shitt the s_gc->scavenging_object */ debug("%s : old scavenging object : %08x size : %08x\n",__FUNCTION__,(unsigned int)s_gc->scavenging_object,size_in_words); s_gc->scavenging_object = (void*)(next_object + size_in_words +1); debug("%s : new scavenging object : %08x size : %08x\n",__FUNCTION__,(unsigned int)s_gc->scavenging_object,size_in_words); } else { debug("%s : terrible point\n",__FUNCTION__); /* shift to next page */ to_space = (struct single_bdescr*)TAILQ_NEXT((struct bdescr*)to_space,link); if(to_space == NULL){ /* force prepare for next page */ debug("%s : force prepare for next page\n",__FUNCTION__); struct single_bdescr* new_block = find_new_single_block(s_gc->arena); TAILQ_INSERT_TAIL(&(s_gc->to_space_queue),(struct bdescr*)new_block,link); } debug("%s : old scavenging object : %08x size : %08x\n",__FUNCTION__,(unsigned int)s_gc->scavenging_object,size_in_words); s_gc->scavenging_object = (void*)(((void**)to_space)+(WORDS_OF_TYPE(struct single_bdescr)) + 1); debug("%s : new scavenging object : %08x size : %08x\n",__FUNCTION__,(unsigned int)s_gc->scavenging_object,size_in_words); } return (void*)next_object; }
/* * prints debug data in sprintf format */ static int sprintf_format_fn(debug_info_t * id, debug_view_t *view, char *out_buf, const char *in_buf) { #define _BUFSIZE 1024 char buf[_BUFSIZE]; int i, k, rc = 0, num_longs = 0, num_used_args = 0, num_strings = 0; /* use kaddr_t to store long values of 32bit and 64bit archs here */ kaddr_t inbuf_cpy[DEBUG_SPRINTF_MAX_ARGS]; /* store ptrs to strings to be deallocated at end of this function */ uaddr_t to_dealloc[DEBUG_SPRINTF_MAX_ARGS]; kaddr_t addr; memset(buf, 0, sizeof(buf)); memset(inbuf_cpy, 0, sizeof(inbuf_cpy)); memset(to_dealloc, 0, sizeof(to_dealloc)); if (out_buf == NULL || in_buf == NULL) { rc = id->buf_size * 4 + 3; goto out; } /* get the format string into buf */ addr = KL_GET_PTR((void*)in_buf); GET_BLOCK(addr, _BUFSIZE, buf); k = 0; for (i = 0; buf[i] && (buf[i] != '\n'); i++) { if (buf[i] != '%') continue; if (k == DEBUG_SPRINTF_MAX_ARGS) { fprintf(KL_ERRORFP, "\nToo much parameters in sprinf view (%i)\n" ,k + 1); fprintf(KL_ERRORFP, "Format String: %s)\n", buf); break; } /* for sprintf we have only unsigned long values ... */ if (buf[i+1] != 's'){ /* we use KL_GET_PTR here to read ulong value */ addr = KL_GET_PTR((void*) in_buf + ((k + 1)* KL_NBPW)); inbuf_cpy[k] = addr; } else { /* ... or ptrs to strings in debug areas */ inbuf_cpy[k] = (uaddr_t) malloc(_BUFSIZE); to_dealloc[num_strings++] = inbuf_cpy[k]; addr = KL_GET_PTR((void*) in_buf + ((k + 1)* KL_NBPW)); GET_BLOCK(addr, _BUFSIZE, (void*)(uaddr_t)(inbuf_cpy[k])); } k++; } /* count of longs fit into one entry */ num_longs = id->buf_size / KL_NBPW; /* sizeof(long); */ if(num_longs < 1) /* bufsize of entry too small */ goto out; if(num_longs == 1) { /* no args, just print the format string */ rc = sprintf(out_buf + rc, "%s", buf); goto out; } /* number of arguments used for sprintf (without the format string) */ num_used_args = MIN(DEBUG_SPRINTF_MAX_ARGS, (num_longs - 1)); rc = sprintf(out_buf + rc, buf, (uaddr_t)(inbuf_cpy[0]), (uaddr_t)(inbuf_cpy[1]), (uaddr_t)(inbuf_cpy[2]), (uaddr_t)(inbuf_cpy[3]), (uaddr_t)(inbuf_cpy[4]), (uaddr_t)(inbuf_cpy[5]), (uaddr_t)(inbuf_cpy[6]), (uaddr_t)(inbuf_cpy[7]), (uaddr_t)(inbuf_cpy[8]), (uaddr_t)(inbuf_cpy[9])); out: while (num_strings--){ free((char*)(to_dealloc[num_strings])); } return rc; }
/* otherwise, pimage must be reinitialized */ uint8_t text_to_image(const char* ptext, uint8_t length, uint8_t *pimage, uint8_t offset_x, uint8_t offset_y, uint8_t upper_x, uint8_t upper_y) { /* Check input properly when you're thinking clearly */ if (upper_x > EPAPER_WIDTH) { printf("1\n"); return 0; } if (upper_y > EPAPER_HEIGHT) { printf("2\n"); return 0; } if ((uint16_t)offset_x + ascii0x41_width > upper_x) { printf("3\n"); return 0; } if ((uint16_t)offset_y + CHARACTER_HEIGHT + HEIGHT_SPACING > upper_y) { printf("4\n"); return 0; } if (length == 0) { return 0; } // Easier to let the compiler handle offsets uint8_t (*ppimage)[EPAPER_HEIGHT][ALIGN_WIDTH(EPAPER_WIDTH)] = (void*)pimage; /* Now replace the image */ { printf("performing a write\n"); uint32_t x = offset_x; uint32_t y = offset_y; uint8_t last_line = 0; uint8_t temp = 0; // Block == byte that contains the 8 values // Cell == bit within a block uint8_t image_cell, char_cell; uint32_t image_block; uint32_t char_block; for (uint32_t i = 0; i < length; i++) { uint8_t c = ptext[i]; if (c == 0) { length = i; break; } if ((uint8_t)c > 0x80) { printf("Char violation %02x\n", c); return 0; } const uint8_t (*pchar)[CHARACTER_HEIGHT][alphabet_byte_width(c)] = (void*)alphabet_bits[c]; // Wrap to next line if (x + alphabet_bit_width[c] >= upper_x) { // Last line but we have more data, can't fulfill if (last_line) { printf("More writing requested but already hit end\n"); return 0; } x = offset_x; y += CHARACTER_HEIGHT + HEIGHT_SPACING; } if (y >= upper_y) { printf("Bounds violation y WITHIN REPLACER\n"); return 0; } if (y + CHARACTER_HEIGHT >= upper_y) { printf("Bounds violation y WITHIN REPLACER\n"); return 0; } if (y + CHARACTER_HEIGHT + HEIGHT_SPACING >= upper_y) { last_line = 1; } printf("writing %c (x=%04d) starting at x=%04d, y=%04d\n", c, alphabet_bit_width[c], x, y); for (uint8_t yi = 0; yi < CHARACTER_HEIGHT; yi++) { if (x + alphabet_bit_width[c] >= upper_x) { printf("Bounds violation x+abw WITHIN REPLACER\n"); return 0; } for (uint8_t xi = 0; xi < alphabet_bit_width[c]; xi++) { image_block = GET_BLOCK(x + xi); image_cell = ((x + xi) % 8); char_block = GET_BLOCK(xi); char_cell = (xi % 8); printf(" x=%02d, xi=%02d ib=%04d, ic=%04d cb=%04d, cc=%04d\n", x, xi, image_block, image_cell, char_block, char_cell); temp = (*ppimage)[y+yi][image_block]; temp &= ((uint8_t)-1) ^ (1<<image_cell); temp |= (((*pchar)[yi][char_block] >> char_cell) & 1) << image_cell; (*ppimage)[y+yi][image_block] = temp; } } x += alphabet_bit_width[c]; x += WIDTH_SPACING; } } return 1; }
/* * Get hdfs file block locations from metadata cache */ BlockLocation * GetHdfsFileBlockLocationsFromCache(MetadataCacheEntry *entry, uint64_t filesize, int *block_num) { Insist(NULL != entry); Insist(entry->file_size >= filesize); int i, j, k; int last_block_length = 0; BlockLocation *locations = NULL; char *metadata_block_info = NULL; if (entry->file_size == filesize) { *block_num = entry->block_num; last_block_length = GET_BLOCK(entry->last_block_id)->length; } else { last_block_length = filesize; j = entry->first_block_id; *block_num = 0; while (true) { MetadataHdfsBlockInfo *block_info = GET_BLOCK(j); (*block_num)++; if (filesize <= block_info->length) { last_block_length = filesize; break; } else { filesize -= block_info->length; j = NEXT_BLOCK_ID(j); } } } locations = (BlockLocation *)palloc(sizeof(BlockLocation) * (*block_num)); if (NULL == locations) { return NULL; } k = entry->first_block_id; for (i=0;i<(*block_num);i++) { MetadataHdfsBlockInfo *block_info = GET_BLOCK(k); locations[i].corrupt = 0; locations[i].numOfNodes = block_info->node_num; locations[i].hosts = (char **)palloc(sizeof(char *) * locations[i].numOfNodes); locations[i].names= (char **)palloc(sizeof(char *) * locations[i].numOfNodes); locations[i].topologyPaths = (char **)palloc(sizeof(char *) * locations[i].numOfNodes); for (j=0;j<locations[i].numOfNodes;j++) { metadata_block_info = GetMetadataBlockInfo(METADATA_BLOCK_INFO_TYPE_HOSTS, block_info->hosts, j); if (NULL == metadata_block_info) { goto err; } locations[i].hosts[j] = pstrdup(metadata_block_info); metadata_block_info = GetMetadataBlockInfo(METADATA_BLOCK_INFO_TYPE_NAMES, block_info->names, j); if (NULL == metadata_block_info) { goto err; } locations[i].names[j] = pstrdup(metadata_block_info); metadata_block_info = GetMetadataBlockInfo(METADATA_BLOCK_INFO_TYPE_TOPOLOGYPATHS, block_info->topologyPaths, j); if (NULL == metadata_block_info) { goto err; } locations[i].topologyPaths[j] = pstrdup(metadata_block_info); } locations[i].offset = block_info->offset; if (i == (*block_num) -1) { // last block locations[i].length = last_block_length; } else { locations[i].length = block_info->length; } k = NEXT_BLOCK_ID(k); } entry->last_access_time = time(NULL); return locations; err: FreeHdfsFileBlockLocations(locations, *block_num); return NULL; }