// //delete all keys from the media for the given pool id // int NVM_KV_Pool_Del_Manager::delete_pool(int pool_id, bool validate_pool_id_on_media) { uint64_t max_trim_size_per_iov = 0; uint64_t trim_len = 0; uint32_t num_iovs = 0; nvm_block_range_t *current_range = NULL; uint32_t num_ranges_found = 0; uint32_t num_ranges = 0; uint32_t found_pool_id; NVM_KV_Store *kv_store = get_store(); nvm_kv_store_device_t *device = kv_store->get_store_device(); uint32_t pool_bits = kv_store->get_layout()->get_pool_bits(); uint32_t pool_mask = (1 << pool_bits) - 1; uint32_t pool_hash; uint32_t sector_size = device->capabilities.nvm_sector_size; int default_pool_id = kv_store->get_pool_mgr()->get_default_poolid(); nvm_iovec_t *iovec = get_iovec(); uint64_t iter_filter_mask = 0; int ret_code = NVM_SUCCESS; max_trim_size_per_iov = device->capabilities.nvm_atomic_write_multiplicity * device->capabilities.nvm_max_trim_size_per_iov; //setup the iterator parameters m_iter.range_to_iterate.start_lba = m_usr_data_start_lba; m_iter.range_to_iterate.length = m_usr_data_max_lba; if (pool_id == -1) { //if deleting all user created pools using pool id -1, //disable filter mask and filter pattern //save the old range iterator's filter mask value and //set the filter mask to 0 for now iter_filter_mask = m_iter.filters.filter_mask; m_iter.filters.filter_mask = 0; //set filter pattern to 0 as well m_iter.filters.filter_pattern = 0; } else { m_iter.filters.filter_pattern = kv_store->get_pool_mgr()->get_poolid_hash(pool_id); } //iterate through the whole user data area while (true) { ret_code = nvm_logical_range_iterator(device->nvm_handle, &m_iter); if (ret_code == -1) { ret_code = -NVM_ERR_INTERNAL_FAILURE; goto end_kv_delete_all_keys; } num_ranges_found = ret_code; if (num_ranges_found > 0) { num_ranges = 0; current_range = m_iter.ranges; while (num_ranges < num_ranges_found) { if (validate_pool_id_on_media) { ret_code = is_valid_for_del(current_range->start_lba, &found_pool_id); if (ret_code < 0) { goto end_kv_delete_all_keys; } else if ((pool_id != -1 && pool_id != found_pool_id) || (pool_id == -1 && found_pool_id == default_pool_id)) { //if pool id is not -1, skip the key that does not //belong to this pool //or if deleting all user created pools using pool id //-1, skip the key of the default pool num_ranges++; current_range++; continue; } } else { //if deleting all user pools, skip the key belong to the //default pool if (pool_id == -1) { pool_hash = current_range->start_lba & pool_mask; if (pool_hash == default_pool_id) { num_ranges++; current_range++; continue; } } } //check if trim length is greater than max_trim_size_per_iov trim_len = current_range->length * sector_size; if (trim_len > max_trim_size_per_iov) { fprintf(stderr, "Error: Corrupted key\n"); ret_code = -NVM_ERR_INTERNAL_FAILURE; goto end_kv_delete_all_keys; } iovec[num_iovs].iov_base = 0; iovec[num_iovs].iov_len = trim_len; iovec[num_iovs].iov_lba = current_range->start_lba; iovec[num_iovs].iov_opcode = NVM_IOV_TRIM; num_iovs++; num_ranges++; current_range++; //batch delete the keys once the iovec has been filled if (num_iovs == device->capabilities.nvm_max_num_iovs) { if ((ret_code = kv_store->batch_delete(iovec, num_iovs)) != NVM_SUCCESS) { goto end_kv_delete_all_keys; } num_iovs = 0; } } } if (num_ranges_found < m_iter.max_ranges) { if (num_iovs) { ret_code = kv_store->batch_delete(iovec, num_iovs); } goto end_kv_delete_all_keys; } } end_kv_delete_all_keys: if (pool_id == -1) { //reset the range iterator's filter mask back to original m_iter.filters.filter_mask = iter_filter_mask; } return ret_code; }
static pc_bool_t perform_write(pc_channel_t *channel) { while (1) { struct iovec *iov; int iovcnt; ssize_t amt; if (channel->pending_iov == NULL) { pc_error_t *err; /* Ask the application for some data to write. */ err = channel->callbacks->write_cb( &channel->pending_iov, &channel->pending_iovcnt, channel, channel->cb_baton, channel->ctx->cctx->callback_scratch); pc_pool_clear(channel->ctx->cctx->callback_scratch); /* ### what to do with an error... */ pc_error_handled(err); /* The application may state that it has nothing further to write. */ if (channel->pending_iov == NULL) { /* Stop writing. */ channel->desire_write = FALSE; return TRUE; } /* We have original data, without a partial write. */ channel->pending_buf = NULL; } /* Juggle around the iovecs as necessary to find data to write. Note that IOV might point into CALLBACK_SCRATCH. */ get_iovec(&iov, &iovcnt, channel); do { amt = writev(channel->fd, iov, iovcnt); /* The socket should be immediately writeable, but maybe we'll need to try a couple times. */ } while (amt == -1 && errno == EINTR); /* Done with the iovec. We can (later) use AMT to adjust all the data within CHANNEL->PENDING_*. */ maybe_free_iovec(channel, iov, iovcnt); if (amt == 0 || (amt == -1 && (errno == EAGAIN || errno == EWOULDBLOCK))) { /* Nothing was written. No need to change PENDING_IOV. Just return, signalling no change in desire to write. */ return FALSE; } if (amt == -1) { if (errno == ECONNRESET) { /* ### signal the problem somehow */ } else { /* ### what others errors, and how to signal? */ } /* Stop writing to this socket. */ channel->desire_write = FALSE; return TRUE; } /* Adjust the pending data, possibly emptying it. */ adjust_pending(channel, amt); /* Loop to see if we can write more data into the socket. */ } /* NOTREACHED */ }