/* * sfs_block_inuse - check the inode with NO. ino inuse info in bitmap */ static bool sfs_block_inuse(struct sfs_fs *sfs, uint32_t ino) { if (ino != 0 && ino < sfs->super.blocks) { return !bitmap_test(sfs->freemap, ino); } panic("sfs_block_inuse: called out of range (0, %u) %u.\n", sfs->super.blocks, ino); }
/** * update Delta and activeset, and packing the recv_buf */ int ClientProc::UpdtAsDelta(int t) { int bid = _blk_iter_order[t]; client_t *blk = _blks + bid; double *D = _recv_buf[t]; pthread_mutex_lock(&_as_lock); ix_t c1 = 0, c2 = 0; for (ix_t j = 0; j < blk->p; j++) { if (!bitmap_test(j, blk->as)) { continue; } double d = D[c1++]; if (d > 100) { bitmap_clr(j, blk->as); blk->nas --; d = 0; } else { D[c2++] = d; } blk->delta[j] = 2 * fabs(d) + .1; } pthread_mutex_unlock(&_as_lock); #ifdef _DEBUG_ DEBUG("t %3d, r [%d] blk [%d], bitmap nnz [%u] after updt exw", t, _my_rank, bid, blk->nas); #endif return 0; }
static void check_output(struct gpio_kp *kp, int out, int polarity) { struct gpio_keypad_info *kpinfo = kp->keypad_info; int key_index; int in; int gpio; int changed = 0; key_index = out * kpinfo->ninputs; for (in = 0; in < kpinfo->ninputs; in++, key_index++) { gpio = kpinfo->input_gpios[in]; changed = 0; if (gpio_get(gpio) ^ !polarity) { if (kp->some_keys_pressed < 3) kp->some_keys_pressed++; changed = !bitmap_set(kp->keys_pressed, key_index); } else { changed = bitmap_clear(kp->keys_pressed, key_index); } if (changed) { int state = bitmap_test(kp->keys_pressed, key_index); keys_post_event(kpinfo->keymap[key_index], state); } } /* sets up the right state for the next poll cycle */ gpio = kpinfo->output_gpios[out]; if (kpinfo->flags & GPIOKPF_DRIVE_INACTIVE) gpio_set(gpio, !polarity); else gpio_config(gpio, GPIO_INPUT); }
uint32 bitmap_scan (struct bitmap* bmap, size_t num) { uint32 i; if (bmap == NULL || num == 0) return BITMAP_ERROR; for (i = 0; i < bmap->length; i++) { if (bmap->bits[i] != 0xFFFFFFFF) { uint32 j; uint32 count = 0; for (j = 0; j < 31; j++) { if (bitmap_test (bmap, i * 32 + j)) count = 0; else count++; if (count == num) break; } if (count == num) /* J is the index */ return i * 32 + j; } } return BITMAP_ERROR; };
/** * This function will cause - given page to reinstate from swap sectors to frame */ void swap_page_in_from_disk_to_frame (struct supplementary_page_table_entry * page) { int i = 0; if (swap_sectors_btmp == NULL) { PANIC("NO swap sectors bitmap available!!! :("); } else if (bitmap_test (swap_sectors_btmp, page->area_swap) == false) { PANIC("Reading from non empty swap sectors!!! :("); } for (i = 0; i < NUM_OF_PER_PAGE_SECTORS; i++) { block_read (swap_partition, ( page->area_swap * NUM_OF_PER_PAGE_SECTORS ) + i, // block_read (swap_partition, page->area_swap + i, page->supplementary_frame_in_memory-> base_vir_address + i * BLOCK_SECTOR_SIZE); } // Reset the sector bit in swap_sector_bitmap to 0 - indicate // its free now and can be used by other page-swaps bitmap_set (swap_sectors_btmp, page->area_swap, false); // Set the Index of a block device sector to maximum value, // BITMAP_ERROR - indicating its not swapped page->area_swap = (block_sector_t) -1; }
/** Reserve an ID in the allocator. * @param alloc Allocator to reserve in. * @param id ID to reserve. */ void id_allocator_reserve(id_allocator_t *alloc, int32_t id) { mutex_lock(&alloc->lock); assert(!bitmap_test(alloc->bitmap, id)); bitmap_set(alloc->bitmap, id); mutex_unlock(&alloc->lock); }
int keys_get_state(uint16_t code) { if (code >= MAX_KEYS) { return -1; } return bitmap_test(key_bitmap, code); }
void swap_remove (uint32_t swap_index) { ASSERT (swap_index < total_swap_size); lock_acquire (&swap_lock); ASSERT (bitmap_test (swap_bitmap, swap_index)); bitmap_reset (swap_bitmap, swap_index); lock_release (&swap_lock); }
/** Free a previously allocated ID. * @param alloc Allocator to free to. * @param id ID to free. */ void id_allocator_free(id_allocator_t *alloc, int32_t id) { mutex_lock(&alloc->lock); assert(bitmap_test(alloc->bitmap, id)); bitmap_clear(alloc->bitmap, id); mutex_unlock(&alloc->lock); }
size_t bitmap_ffz(const bitmap_t *const bitmap) { // Same thing as ffs but compare with the NOt version of what you compare with in ffs to check for a 0 instead of 1 if (!bitmap) return SIZE_MAX; // error check params size_t i; size_t total_bits = bitmap->bit_count; for (i=total_bits;i>=1;i--) { // loop through bits to find which is the first that isn't checked if (!bitmap_test(bitmap, i-1)) return i-1; } return SIZE_MAX; }
size_t block_store_write(block_store_t *const bs, const size_t block_id, const void *buffer, const size_t nbytes, const size_t offset) { if (bs && bs->fbm && bs->data_blocks && BLOCKID_VALID(block_id) && buffer && nbytes && (nbytes + offset <= BLOCK_SIZE)) { size_t total_offset = offset + (BLOCK_SIZE * (block_id - FBM_SIZE)); memcpy((void *)(bs->data_blocks + total_offset), buffer, nbytes); block_store_errno = bitmap_test(bs->fbm, block_id) ? BS_OK : BS_FBM_REQUEST_MISMATCH; return nbytes; } block_store_errno = BS_FATAL; return 0; }
size_t bitmap_ffs(const bitmap_t *const bitmap) { // Start at the end of the array "front of the bit string" and check from left to right "high bit to low bit" for the first 1 if (!bitmap) return SIZE_MAX; // error check parameters int i; size_t total_bits = bitmap->bit_count; for (i=total_bits-1;i>=0;i--) { // loops through each bit to see which is the first set bit if (bitmap_test(bitmap, i)) return (size_t)i; // use bitmap_test function to test each bit } return SIZE_MAX; // return size_max if not found }
/* Swap a page back to frame. */ void swap_back_to_frame (int slot_index, uint8_t *kpage) { if (!lock_held_by_current_thread (&swap_table_lock)) lock_acquire (&swap_table_lock); ASSERT (bitmap_test (swap_table, slot_index) == true); bitmap_flip (swap_table, slot_index); int i = 0; for (i = 0; i < 8; i++) block_read (swap_block, slot_index * 8 + i, kpage + i * BLOCK_SECTOR_SIZE); lock_release (&swap_table_lock); }
// Need to refactor this for V3. Have this switch on FILE_BASED and have it either do // block_mem_write or block_file_write (both with same params) which then handles everything // (same for read) size_t block_store_write(block_store_t *const bs, const size_t block_id, const void *buffer, const size_t nbytes, const size_t offset) { if (bs && BLOCKID_VALID(block_id) && buffer && nbytes && (nbytes + offset <= BLOCK_SIZE)) { // Not going to forbid writing of not-in-use blocks (but we'll log it via errno) bitmap_set(bs->dbm, block_id); FLAG_SET(bs, DIRTY); memcpy((void *)(bs->data_blocks + BLOCK_OFFSET_POSITION(block_id, offset)), buffer, nbytes); bs_errno = bitmap_test(bs->fbm, block_id) ? BS_OK : BS_REQUEST_MISMATCH; return nbytes; } bs_errno = BS_PARAM; return 0; }
size_t bitmap_ffz(const bitmap_t *const bitmap) { if(!bitmap) { //Bitmap does not exist return SIZE_MAX; } for(int counter = 0; counter < bitmap->bit_count; ++counter) { //Loops through all of the bits and tests to find the first not set bit if(!bitmap_test(bitmap, counter)) { return counter; } } return SIZE_MAX; //Return SIZE_MAX if not found }
size_t block_store_read(const block_store_t *const bs, const size_t block_id, void *buffer, const size_t nbytes, const size_t offset) { if (bs && BLOCKID_VALID(block_id) && buffer && nbytes && (nbytes + offset <= BLOCK_SIZE)) { // Not going to forbid reading of not-in-use blocks (but we'll log it via the errno) memcpy(buffer, bs->data_blocks + BLOCK_OFFSET_POSITION(block_id, offset), nbytes); bs_errno = bitmap_test(bs->fbm, block_id) ? BS_OK : BS_REQUEST_MISMATCH; return nbytes; } // technically we return BS_PARAM even if the internal structure of the BS object is busted // Which, in reality, would be more of a BS_INTERNAL or a BS_FATAL... but it'll add another branch to everything // And technically the bs is a parameter... bs_errno = BS_PARAM; return 0; }
/** * Set bit in bitmap * * @v bitmap Bitmap * @v bit Bit index */ void bitmap_set ( struct bitmap *bitmap, unsigned int bit ) { unsigned int index = BITMAP_INDEX ( bit ); bitmap_block_t mask = BITMAP_MASK ( bit ); DBGC ( bitmap, "Bitmap %p setting bit %d\n", bitmap, bit ); /* Update bitmap */ bitmap->blocks[index] |= mask; /* Update first gap counter */ while ( bitmap_test ( bitmap, bitmap->first_gap ) ) { bitmap->first_gap++; } }
void swap_read (void *dest, uint32_t swap_index) { uint32_t i; ASSERT (swap_index < total_swap_size); lock_acquire (&swap_lock); ASSERT (bitmap_test (swap_bitmap, swap_index)); bitmap_reset (swap_bitmap, swap_index); for (i = 0u; i < blocks_per_page; i++) { block_read (swap_block, swap_index * blocks_per_page + i, (char *)dest + i * BLOCK_SECTOR_SIZE); } lock_release (&swap_lock); }
//Loads from the swap to main memory void swap_in (size_t used_index, void* frame){ if (!swap_block || !swap_map) return; //Returns if there is no swap block or frame lock_acquire(&swap_lock); //Acquires the lock status for swap if (bitmap_test(swap_map, used_index) == SWAP_FREE) PANIC ("Trying to swap in a free block! Kernel panicking."); //Goes into panic mode if the swap block is free and swap in is in process bitmap_flip(swap_map, used_index); //Flips the bitmap size_t i; for (i = 0; i < SECTORS_PER_PAGE; i++){ //Block reader block_read(swap_block, used_index * SECTORS_PER_PAGE + i, (uint8_t *) frame + i * BLOCK_SECTOR_SIZE); } lock_release(&swap_lock); //Releases the swap lock }
void swap_in(size_t used_index,void *kaddr) { if(!swap_block||!swap_map)return ; lock_acquire(&swap_lock); if(bitmap_test(swap_map,used_index) == SWAP_FREE); bitmap_flip(swap_map,used_index); size_t i; for(i = 0;i< SECTORS_PER_PAGE;i++) { block_read(swap_block,used_index*SECTORS_PER_PAGE +i, (uint8_t *)kaddr + i*BLOCK_SECTOR_SIZE); } lock_release(&swap_lock); }
void free_slot (void *page, size_t index) { /* Mark the slot as free again */ ASSERT (bitmap_test (swap_slot_map, index)); bitmap_flip (swap_slot_map, index); /* This is almost identical to the loop in pick_slot_and_swap, we're just going the other way */ int i = 0; for (; i < SECTORS_PER_PAGE; ++i) { block_read (block_device, (index * SECTORS_PER_PAGE) + i, page + (BLOCK_SECTOR_SIZE * i)); } }
/* move data from swap slot to frame */ void vm_swap_in(size_t idx, void *page){ lock_acquire(&swap_lock); if (!bitmap_test(swap_partition, idx)){ /* Swap slot is already free */ lock_release(&swap_lock); PANIC("Swap slot is already free\n"); } bitmap_flip(swap_partition, idx); size_t i; for (i = 0; i < SECTORS_PER_PAGE; ++i){ block_read(swap_slot, (idx * SECTORS_PER_PAGE) + i, page + (BLOCK_SECTOR_SIZE * i)); } lock_release(&swap_lock); }
bool block_store_request(block_store_t *const bs, const size_t block_id) { if (bs && BLOCKID_VALID(block_id)) { if (!bitmap_test(bs->fbm, block_id)) { bitmap_set(bs->fbm, block_id); bitmap_set(bs->dbm, FBM_BLOCK_CHANGE_LOCATION(block_id)); // Set that FBM block as changed FLAG_SET(bs, DIRTY); bs_errno = BS_OK; return true; } else { bs_errno = BS_IN_USE; return false; } } bs_errno = BS_PARAM; return false; }
/* write the cache entry back to disk */ static void cache_writeback (int idx) { if(CACHE_DEBUG) printf("writeback cache block %i to disk block %i\n", idx, (int) cache[idx]->bid); ASSERT(bitmap_test (cache_table,idx)); ASSERT(lock_held_by_current_thread(&cache_globallock)); /* register as writer */ cache[idx]->writer++; /* if cache block is dirty right it back */ if (cache[idx]->dirty) block_write(fs_device, cache[idx]->bid, cache[idx]->kpage); /* unregister as writer */ cache[idx]->writer--; }
/* Swaps in page P, which must have a locked frame (and be swapped out). */ void swap_in(void *f, size_t index) { if (!swap_device || !swap_bitmap) { PANIC("No swap partition available!"); } lock_acquire(&swap_lock); if (bitmap_test(swap_bitmap, index) == SWAP_FREE) { lock_release(&swap_lock); return; } bitmap_flip(swap_bitmap, index); lock_release(&swap_lock); size_t i; for (i = 0; i < SECTORS_PER_PAGE; i++) { block_read(swap_device, index * SECTORS_PER_PAGE + i, (uint8_t *) f + i * BLOCK_SECTOR_SIZE); } }
// //go through m_pools_to_delete bitmap to delete each pool // int NVM_KV_Pool_Del_Manager::start_pool_delete(bool delete_all_pools) { NVM_KV_Store *kv_store = get_store(); uint32_t max_pools = kv_store->get_store_metadata()->max_pools; int ret_code = NVM_SUCCESS; if (delete_all_pools) { if ((ret_code = delete_pool(-1, m_validate_pool_id_on_media)) != NVM_SUCCESS) { return ret_code; } if ((ret_code = kv_store->get_pool_mgr()->clear_pool_bitmaps(m_pools_to_delete)) != NVM_SUCCESS) { return ret_code; } } else { for (uint32_t i = 1; i < max_pools; i++) { if (bitmap_test(m_pools_to_delete, i)) { if ((ret_code = delete_pool(i, m_validate_pool_id_on_media)) != NVM_SUCCESS) { return ret_code; } if ((ret_code = kv_store->get_pool_mgr()->clear_pool_bitmaps(i)) != NVM_SUCCESS) { return ret_code; } } } } return ret_code; }
//get a page from the swap table and save it to a memory position void get_swap (size_t idx, void * addr) { //check if bitmap is set correctly ASSERT(bitmap_test (swap_table,idx)); int i; //get frame and the memory position and write it to swap for (i = 0; i< SECPP; i++) //read the value from swap to addr block_read (swap, idx * SECPP + i, addr + BLOCK_SECTOR_SIZE * i); //lock the swap table lock_acquire(&swap_lock); //remove the swap entry at position idx bitmap_set (swap_table, idx, false); //release the lock for the swap table lock_release(&swap_lock); }
void add_page_to_swapfile(struct page_info* p) { lock_acquire(&swap_lock); // Find the first open 4KB slot in the swap file unsigned int index = bitmap_scan_and_flip(swapmap, 0, 1, false); ASSERT(index != BITMAP_ERROR); ASSERT(bitmap_test(swapmap, index)); // should be true i.e. occupied // Write out the passed-in page to that 4KB slot // We're going to need to write out eight sectors, one at a time block_sector_t sector_index = index * SECTORS_PER_PAGE; int i; for(i = 0; i < SECTORS_PER_PAGE; i++) { block_sector_t target_sector = sector_index + i; void* source_buf = (uint8_t *)p->virtual_address + (i * BLOCK_SECTOR_SIZE); block_write(swap_block, target_sector, source_buf); } p->swap_info.swap_index = index; lock_release(&swap_lock); }
void swap_in (size_t used_index, void* frame) { if (!swap_block || !swap_map) { return; } lock_acquire(&swap_lock); if (bitmap_test(swap_map, used_index) == SWAP_FREE) { PANIC ("Trying to swap in a free block! Kernel panicking."); } bitmap_flip(swap_map, used_index); size_t i; for (i = 0; i < SECTORS_PER_PAGE; i++) { block_read(swap_block, used_index * SECTORS_PER_PAGE + i, (uint8_t *) frame + i * BLOCK_SECTOR_SIZE); } lock_release(&swap_lock); }
static int psmx2_alloc_vlane(struct psmx2_fid_domain *domain, uint8_t *vl) { int i; int id; fastlock_acquire(&domain->vl_lock); for (i=0; i<BITMAP_SIZE; i++) { id = (domain->vl_alloc + i) % BITMAP_SIZE; if (bitmap_test(domain->vl_map, id) == 0) { bitmap_set(domain->vl_map, id); domain->vl_alloc = id + 1; break; } } fastlock_release(&domain->vl_lock); if (i >= BITMAP_SIZE) return -FI_ENOSPC; *vl = (uint8_t)id; return 0; }