/* * It is up to the caller to fill in the object's fields in a meaningful * fashion! */ inline object *factorvm::allot_object(header header, cell size) { #ifdef GC_DEBUG if(!gc_off) gc(); #endif object *obj; if(nursery.size - allot_buffer_zone > size) { /* If there is insufficient room, collect the nursery */ if(nursery.here + allot_buffer_zone + size > nursery.end) garbage_collection(data->nursery(),false,0); cell h = nursery.here; nursery.here = h + align8(size); obj = (object *)h; } /* If the object is bigger than the nursery, allocate it in tenured space */ else { zone *tenured = &data->generations[data->tenured()]; /* If tenured space does not have enough room, collect */ if(tenured->here + size > tenured->end) { gc(); tenured = &data->generations[data->tenured()]; } /* If it still won't fit, grow the heap */ if(tenured->here + size > tenured->end) { garbage_collection(data->tenured(),true,size); tenured = &data->generations[data->tenured()]; } obj = allot_zone(tenured,size); /* Allows initialization code to store old->new pointers without hitting the write barrier in the common case of a nursery allocation */ write_barrier(obj); } obj->h = header; return obj; }
strict_fibonacci_node* pq_insert( strict_fibonacci_heap *queue, item_type item, key_type key ) { strict_fibonacci_node* wrapper = pq_alloc_node( queue->map, STRICT_NODE_FIB ); ITEM_ASSIGN( wrapper->item, item ); wrapper->key = key; wrapper->right = wrapper; wrapper->left = wrapper; wrapper->q_next = wrapper; wrapper->q_prev = wrapper; strict_fibonacci_node *parent, *child; if( queue->root == NULL ) queue->root = wrapper; else { choose_order_pair( wrapper, queue->root, &parent, &child ); link( queue, parent, child ); queue->root = parent; enqueue_node( queue, child ); post_meld_reduction( queue ); } queue->size++; garbage_collection( queue ); return wrapper; }
key_type pq_delete_min( strict_fibonacci_heap *queue ) { if( pq_empty( queue ) ) return 0; key_type key = queue->root->key; strict_fibonacci_node *current, *new_root, *old_root; int i, j; old_root = queue->root; if( old_root->left_child == NULL ) { old_root = queue->root; if( is_active( queue, old_root ) ) convert_to_passive( queue, old_root ); queue->root = NULL; } else { new_root = select_new_root( queue ); remove_from_siblings( queue, new_root ); dequeue_node( queue, new_root ); queue->root = new_root; if( is_active( queue, new_root ) ) convert_to_passive( queue, new_root ); if( is_active( queue, old_root ) ) convert_to_passive( queue, old_root ); while( old_root->left_child != NULL ) link( queue, new_root, old_root->left_child ); for( i = 0; i < 2; i++ ) { current = consume_node( queue ); if( current != NULL ) { for( j = 0; j < 2; j++ ) { if( current->left_child != NULL && !is_active( queue, current->left_child->left ) ) link( queue, new_root, current->left_child->left ); else break; } } } } pq_free_node( queue->map, STRICT_NODE_FIB, old_root ); post_delete_min_reduction( queue ); garbage_collection( queue ); queue->size--; return key; }
virtual int write_pkt(p_pkt_buffer& buf) { if (buf->is_empty()) { if (next_pkt_ready()) { rx_deliver_next_pkt(buf); return buf->len; } else if (first_pkt_time_out()) { last_fw_id = rx_pkt_queue.begin()->first-1; rx_deliver_next_pkt(buf); return buf->len; } } WRITE(super, buf); if (use_fixed_timeout == false) { update_rx_pkt_timeout(); } buf->id = get_packet_field<T_pkt_id>(buf); if (buf->id <= last_fw_id) { #ifdef VERBOSE printf("Discarding (to late) pkt of %lu bytes and id %u, last_fw_id %u\n", buf->len, buf->id, last_fw_id); #endif buf->clear(); return 0; } if (buf->id == next_id) { ++next_id; } if (last_fw_id + 1 == buf->id) { #ifdef VERBOSE printf("Forwading pkt of %lu bytes and id %u\n", buf->len, buf->id); #endif ++last_fw_id; return buf->len; } rx_store_pkt(buf); if (next_pkt_ready()) { rx_deliver_next_pkt(buf); } garbage_collection(); return buf->len; }
static UINT32 assign_new_write_vpn(UINT32 const bank) { UINT32 write_vpn; UINT32 vblock; write_vpn = get_cur_write_vpn(bank); vblock = write_vpn / PAGES_PER_BLK; // NOTE: if next new write page's offset is // the last page offset of vblock (i.e. PAGES_PER_BLK - 1), if ((write_vpn % PAGES_PER_BLK) == (PAGES_PER_BLK - 2)) { // then, because of the flash controller limitation // (prohibit accessing a spare area (i.e. OOB)), // thus, we persistenly write a lpn list into last page of vblock. mem_copy(TEMP_BUF(bank), g_misc_meta[bank].lpn_list_of_cur_vblock, sizeof(UINT32) * PAGES_PER_BLK); // fix minor bug misc_w++; nand_page_ptprogram(bank, vblock, PAGES_PER_BLK - 1, 0, ((sizeof(UINT32) * PAGES_PER_BLK + BYTES_PER_SECTOR - 1 ) / BYTES_PER_SECTOR), TEMP_BUF(bank)); mem_set_sram(g_misc_meta[bank].lpn_list_of_cur_vblock, 0x00000000, sizeof(UINT32) * PAGES_PER_BLK); inc_full_blk_cnt(bank); // do garbage collection if necessary if (is_full_all_blks(bank)) { GC: garbage_collection(bank); return get_cur_write_vpn(bank); } do { vblock++; if(vblock == VBLKS_PER_BANK) { uart_printf(" vblock == VBLKS_PER_BANK"); goto GC; } }while (get_vcount(bank, vblock) == VC_MAX); } // write page -> next block if (vblock != (write_vpn / PAGES_PER_BLK)) { write_vpn = vblock * PAGES_PER_BLK; } else { write_vpn++; } set_new_write_vpn(bank, write_vpn); return write_vpn; }
strict_fibonacci_heap* pq_meld( strict_fibonacci_heap *a, strict_fibonacci_heap *b ) { strict_fibonacci_heap *new_heap = pq_create( a->map ); strict_fibonacci_heap *big, *small; strict_fibonacci_node *big_head, *big_tail, *small_head, *small_tail; strict_fibonacci_node *parent, *child; // pick which heap to preserve if( a->size < b->size ) { big = b; small = a; } else { big = a; small = b; } // set heap fields new_heap->size = big->size + small->size; new_heap->q_head = big->q_head; new_heap->active = big->active; new_heap->rank_list = big->rank_list; new_heap->fix_list[0] = big->fix_list[0]; new_heap->fix_list[1] = big->fix_list[1]; if( small->active != NULL ) small->active->flag = 0; // merge the queues big_head = big->q_head; big_tail = big_head->q_prev; small_head = small->q_head; small_tail = small_head->q_prev; big_head->q_prev = small_tail; small_tail->q_next = big_head; small_head->q_prev = big_tail; big_tail->q_next = small_head; // actually link the two trees choose_order_pair( big->root, small->root, &parent, &child ); link( new_heap, parent, child ); new_heap->root = parent; enqueue_node( new_heap, child ); // take care of some garbage collection release_to_garbage_collector( new_heap, small ); free( small ); free( big ); garbage_collection( new_heap ); return new_heap; }
/* Move all free space to the end of the code heap. This is not very efficient, since it makes several passes over the code and data heaps, but we only ever do this before saving a deployed image and exiting, so performaance is not critical here */ void factor_vm::compact_code_heap() { /* Free all unreachable code blocks, don't trace contexts */ garbage_collection(data->tenured(),false,false,0); /* Figure out where the code heap blocks are going to end up */ cell size = code->compute_heap_forwarding(forwarding); /* Update word and quotation code pointers */ forward_object_xts(); /* Actually perform the compaction */ code->compact_heap(forwarding); /* Update word and quotation XTs */ fixup_object_xts(); /* Now update the free list; there will be a single free block at the end */ code->build_free_list(size); }
void pq_decrease_key( strict_fibonacci_heap *queue, strict_fibonacci_node *node, key_type new_key ) { strict_fibonacci_node *old_parent = node->parent; node->key = new_key; if( old_parent == NULL || node->key > old_parent->key) return; strict_fibonacci_node *parent, *child; choose_order_pair( node, queue->root, &parent, &child ); link( queue, parent, child ); queue->root = parent; queue->root->parent = NULL; if( parent == node ) { dequeue_node( queue, parent ); enqueue_node( queue, child ); } if( is_active( queue, node ) ) { if( is_active( queue, old_parent ) ) decrease_rank( queue, old_parent ); if( node->type != STRICT_TYPE_ROOT ) convert_active_to_root( queue, node ); } if( is_active( queue, old_parent ) && old_parent->type != STRICT_TYPE_ROOT ) increase_loss( queue, old_parent ); post_decrease_key_reduction( queue ); garbage_collection( queue ); }
void replica::on_checkpoint_timer() { check_hashed_access(); init_checkpoint(); garbage_collection(); }
void CN_CONNECTIVITY_ALGO::searchConnections() { #ifdef CONNECTIVITY_DEBUG printf("Search start\n"); #endif #ifdef PROFILE PROF_COUNTER garbage_collection( "garbage-collection" ); #endif std::vector<CN_ITEM*> garbage; garbage.reserve( 1024 ); m_itemList.RemoveInvalidItems( garbage ); for( auto item : garbage ) delete item; #ifdef PROFILE garbage_collection.Show(); PROF_COUNTER search_basic( "search-basic" ); #endif std::vector<CN_ITEM*> dirtyItems; std::copy_if( m_itemList.begin(), m_itemList.end(), std::back_inserter( dirtyItems ), [] ( CN_ITEM* aItem ) { return aItem->Dirty(); } ); if( m_progressReporter ) { m_progressReporter->SetMaxProgress( dirtyItems.size() ); m_progressReporter->KeepRefreshing(); } if( m_itemList.IsDirty() ) { size_t parallelThreadCount = std::min<size_t>( std::thread::hardware_concurrency(), ( dirtyItems.size() + 7 ) / 8 ); std::atomic<size_t> nextItem( 0 ); std::vector<std::future<size_t>> returns( parallelThreadCount ); auto conn_lambda = [&nextItem, &dirtyItems] ( CN_LIST* aItemList, PROGRESS_REPORTER* aReporter) -> size_t { for( size_t i = nextItem++; i < dirtyItems.size(); i = nextItem++ ) { CN_VISITOR visitor( dirtyItems[i] ); aItemList->FindNearby( dirtyItems[i], visitor ); if( aReporter ) aReporter->AdvanceProgress(); } return 1; }; if( parallelThreadCount <= 1 ) conn_lambda( &m_itemList, m_progressReporter ); else { for( size_t ii = 0; ii < parallelThreadCount; ++ii ) returns[ii] = std::async( std::launch::async, conn_lambda, &m_itemList, m_progressReporter ); for( size_t ii = 0; ii < parallelThreadCount; ++ii ) { // Here we balance returns with a 100ms timeout to allow UI updating std::future_status status; do { if( m_progressReporter ) m_progressReporter->KeepRefreshing(); status = returns[ii].wait_for( std::chrono::milliseconds( 100 ) ); } while( status != std::future_status::ready ); } } if( m_progressReporter ) m_progressReporter->KeepRefreshing(); } #ifdef PROFILE search_basic.Show(); #endif m_itemList.ClearDirtyFlags(); #ifdef CONNECTIVITY_DEBUG printf("Search end\n"); #endif }