/*! \todo does this really belong here and not in cache_getput ? */ void cache_invalidate_block_io_end(struct bittern_cache *bc, struct work_item *wi, struct cache_block *cache_block) { ASSERT(cache_block != NULL); ASSERT_BITTERN_CACHE(bc); ASSERT_WORK_ITEM(wi, bc); ASSERT_CACHE_BLOCK(cache_block, bc); ASSERT(wi->wi_cache_block == cache_block); ASSERT(wi->wi_original_bio == NULL); ASSERT(wi->wi_cloned_bio == NULL); ASSERT(wi->wi_io_xid != 0); BT_TRACE(BT_LEVEL_TRACE1, bc, wi, cache_block, NULL, NULL, "invalidate done"); ASSERT(cache_block->bcb_state == S_CLEAN_INVALIDATE_END || cache_block->bcb_state == S_DIRTY_INVALIDATE_END); ASSERT(is_sector_number_valid(cache_block->bcb_sector)); if (cache_block->bcb_state == S_DIRTY_INVALIDATE_END) cache_move_to_invalid(bc, cache_block, 1); else cache_move_to_invalid(bc, cache_block, 0); cache_timer_add(&bc->bc_timer_invalidations, wi->wi_ts_started); work_item_free(bc, wi); atomic_inc(&bc->bc_completed_requests); atomic_inc(&bc->bc_completed_invalidations); atomic_dec(&bc->bc_pending_invalidate_requests); /* * wakeup possible waiters */ wakeup_deferred(bc); wake_up_interruptible(&bc->bc_invalidator_wait); }
void *workflow_remove_item(workflow_t *wf) { void *ret = NULL; work_item_t *item; pthread_mutex_lock(&wf->main_mutex); //printf("Writer mutex lock\n"); while (workflow_get_num_completed_items_(wf) <= 0) { //printf("Cond wait completed items...\n"); pthread_cond_wait(&wf->consumer_cond, &wf->main_mutex); } //printf("writer continue\n"); item = array_list_remove_at(0, wf->completed_items); pthread_cond_broadcast(&wf->producer_cond); pthread_mutex_unlock(&wf->main_mutex); if (item) { ret = item->data; work_item_free(item); } return ret; }
void sm_pwrite_miss_copy_to_cache_end(struct bittern_cache *bc, struct work_item *wi, int err) { struct bio *bio = wi->wi_original_bio; struct cache_block *cache_block = wi->wi_cache_block; enum cache_state original_state = cache_block->bcb_state; unsigned long cache_flags; M_ASSERT_FIXME(err == 0); M_ASSERT(bio != NULL); ASSERT((wi->wi_flags & WI_FLAG_BIO_CLONED) != 0); ASSERT(wi->wi_original_bio != NULL); cache_block = wi->wi_cache_block; ASSERT(bio != NULL); ASSERT(bio_is_request_single_cache_block(bio)); ASSERT(cache_block->bcb_sector == bio_sector_to_cache_block_sector(bio)); ASSERT(bio == wi->wi_original_bio); ASSERT(cache_block->bcb_state == S_CLEAN_P_WRITE_MISS_CPT_CACHE_END || cache_block->bcb_state == S_DIRTY_P_WRITE_MISS_CPT_CACHE_END); ASSERT(wi->wi_original_cache_block == NULL); BT_TRACE(BT_LEVEL_TRACE2, bc, wi, cache_block, bio, NULL, "copy-to-cache-end"); ASSERT(wi->wi_original_cache_block == NULL); ASSERT_CACHE_STATE(cache_block); ASSERT_CACHE_BLOCK(cache_block, bc); if (cache_block->bcb_state == S_CLEAN_P_WRITE_MISS_CPT_CACHE_END) { spin_lock_irqsave(&cache_block->bcb_spinlock, cache_flags); cache_state_transition_final(bc, cache_block, TS_NONE, S_CLEAN); spin_unlock_irqrestore(&cache_block->bcb_spinlock, cache_flags); } else { ASSERT(cache_block->bcb_state == S_DIRTY_P_WRITE_MISS_CPT_CACHE_END); spin_lock_irqsave(&cache_block->bcb_spinlock, cache_flags); cache_state_transition_final(bc, cache_block, TS_NONE, S_DIRTY); spin_unlock_irqrestore(&cache_block->bcb_spinlock, cache_flags); } cache_put_update_age(bc, cache_block, 1); cache_timer_add(&bc->bc_timer_writes, wi->wi_ts_started); cache_timer_add(&bc->bc_timer_write_misses, wi->wi_ts_started); if (original_state == S_CLEAN_P_WRITE_MISS_CPT_CACHE_END) { cache_timer_add(&bc->bc_timer_write_clean_misses, wi->wi_ts_started); } else { ASSERT(original_state == S_DIRTY_P_WRITE_MISS_CPT_CACHE_END); cache_timer_add(&bc->bc_timer_write_dirty_misses, wi->wi_ts_started); } work_item_free(bc, wi); atomic_dec(&bc->bc_pending_requests); if (bio_data_dir(bio) == WRITE) { atomic_dec(&bc->bc_pending_write_requests); atomic_inc(&bc->bc_completed_write_requests); } else { atomic_dec(&bc->bc_pending_read_requests); atomic_inc(&bc->bc_completed_read_requests); } atomic_inc(&bc->bc_completed_requests); /* * wakeup possible waiters */ wakeup_deferred(bc); bio_endio(bio, 0); }