Esempio n. 1
0
void btu_task_start_up(UNUSED_ATTR void *context) {
  BT_TRACE(TRACE_LAYER_BTU, TRACE_TYPE_API,
      "btu_task pending for preload complete event");

  LOG_INFO(LOG_TAG, "Bluetooth chip preload is complete");

  BT_TRACE(TRACE_LAYER_BTU, TRACE_TYPE_API,
      "btu_task received preload complete event");

  /* Initialize the mandatory core stack control blocks
     (BTU, BTM, L2CAP, and SDP)
   */
  btu_init_core();

  /* Initialize any optional stack components */
  BTE_InitStack();

  bta_sys_init();

  /* Initialise platform trace levels at this point as BTE_InitStack() and bta_sys_init()
   * reset the control blocks and preset the trace level with XXX_INITIAL_TRACE_LEVEL
   */
#if ( BT_USE_TRACES==TRUE )
  module_init(get_module(BTE_LOGMSG_MODULE));
#endif

  // Inform the bt jni thread initialization is ok.
  btif_transfer_context(btif_init_ok, 0, NULL, 0, NULL);

  fixed_queue_register_dequeue(btu_bta_msg_queue,
      thread_get_reactor(bt_workqueue_thread),
      btu_bta_msg_ready,
      NULL);

  fixed_queue_register_dequeue(btu_hci_msg_queue,
      thread_get_reactor(bt_workqueue_thread),
      btu_hci_msg_ready,
      NULL);

  fixed_queue_register_dequeue(btu_general_alarm_queue,
      thread_get_reactor(bt_workqueue_thread),
      btu_general_alarm_ready,
      NULL);

  fixed_queue_register_dequeue(btu_oneshot_alarm_queue,
      thread_get_reactor(bt_workqueue_thread),
      btu_oneshot_alarm_ready,
      NULL);

  fixed_queue_register_dequeue(btu_l2cap_alarm_queue,
      thread_get_reactor(bt_workqueue_thread),
      btu_l2cap_alarm_ready,
      NULL);
}
int cache_invalidator_kthread(void *__bc)
{
	struct bittern_cache *bc = (struct bittern_cache *)__bc;

	set_user_nice(current, S_INVALIDATOR_THREAD_NICE);

	BT_TRACE(BT_LEVEL_TRACE0, bc, NULL, NULL, NULL, NULL,
		 "enter, nice=%d", S_INVALIDATOR_THREAD_NICE);

	while (!kthread_should_stop()) {
		int ret;

		ASSERT(bc != NULL);
		ASSERT_BITTERN_CACHE(bc);

		ret = wait_event_interruptible(bc->bc_invalidator_wait,
					       (cache_invalidator_has_work
						(bc) || kthread_should_stop()));
		if (signal_pending(current))
			flush_signals(current);

		cache_invalidate_clean_blocks(bc);

		schedule();
	}

	/*
	 * wait for any pending invalidations to complete before quitting
	 */
	while (atomic_read(&bc->bc_pending_invalidate_requests) != 0) {
		int ret;

		ret = wait_event_interruptible(bc->bc_invalidator_wait,
			atomic_read(&bc->bc_pending_invalidate_requests)
			< bc->bc_max_pending_requests);
		if (signal_pending(current))
			flush_signals(current);
		BT_TRACE(BT_LEVEL_TRACE0, bc, NULL, NULL, NULL, NULL,
			 "wait: kthread_should_stop=%d, has_work=%d, pending=%d",
			 kthread_should_stop(),
			 cache_invalidator_has_work(bc),
			 atomic_read(&bc->bc_pending_invalidate_requests));
	}

	BT_TRACE(BT_LEVEL_TRACE0, bc, NULL, NULL, NULL, NULL, "exit");

	bc->bc_invalidator_task = NULL;
	return 0;
}
Esempio n. 3
0
static void pmem_header_update_worker(struct work_struct *work)
{
	struct delayed_work *dwork = to_delayed_work(work);
	struct bittern_cache *bc;
	int ret;

	bc = container_of(dwork,
			  struct bittern_cache,
			  bc_pmem_update_work);
	ASSERT(bc != NULL);
	ASSERT_BITTERN_CACHE(bc);
	M_ASSERT(bc->bc_pmem_update_workqueue != NULL);

	if (bc->error_state == ES_NOERROR) {
		BT_TRACE(BT_LEVEL_TRACE2, bc, NULL, NULL, NULL, NULL, "bc=%p", bc);
		ret = pmem_header_update(bc, 0);

		/* should make this a common function */
		if (ret != 0) {
			printk_err("%s: cannot update header: %d. will fail all future requests\n",
				   bc->bc_name,
				   ret);
			bc->error_state = ES_ERROR_FAIL_ALL;
		}
	}

	schedule_delayed_work(&bc->bc_pmem_update_work,
			      msecs_to_jiffies(30000));
}
void cache_invalidate_clean_blocks(struct bittern_cache *bc)
{
	int did_work = 0;

	ASSERT(bc != NULL);
	ASSERT_BITTERN_CACHE(bc);

	BT_TRACE(BT_LEVEL_TRACE2, bc, NULL, NULL, NULL, NULL,
		 "entr: bc_min_invalid_count=%d, bc_invalid_blocks=%d, %d/%d",
		 bc->bc_invalidator_conf_min_invalid_count,
		 atomic_read(&bc->bc_invalid_entries),
		 bc->bc_invalidator_work_count,
		 bc->bc_invalidator_no_work_count);

	while (cache_invalidator_has_work_schmitt(bc)) {
		struct cache_block *cache_block;
		int ret;

		ret = cache_get_clean(bc, &cache_block);
		BT_TRACE(BT_LEVEL_TRACE1, bc, NULL, cache_block, NULL, NULL,
			 "kthread_should_stop=%d, has_work=%d, pending=%d",
			 kthread_should_stop(),
			 cache_invalidator_has_work(bc),
			 atomic_read(&bc->bc_pending_invalidate_requests));
		if (ret == CACHE_GET_RET_HIT_IDLE) {
			/* found a clean block, start async invalidation */
			ASSERT(cache_block != NULL);
			cache_invalidate_block_io_start(bc, cache_block);
			atomic_inc(&bc->bc_invalidations_invalidator);
			did_work = 1;
		} else {
			/* no blocks, bail out */
			break;
		}
	}
	if (did_work)
		bc->bc_invalidator_work_count++;
	else
		bc->bc_invalidator_no_work_count++;

	BT_TRACE(BT_LEVEL_TRACE2, bc, NULL, NULL, NULL, NULL,
		 "exit: bc_min_invalid_count=%d, bc_invalid_blocks=%d, %d/%d",
		 bc->bc_invalidator_conf_min_invalid_count,
		 atomic_read(&bc->bc_invalid_entries),
		 bc->bc_invalidator_work_count,
		 bc->bc_invalidator_no_work_count);
}
void sm_pwrite_miss_copy_to_device_end(struct bittern_cache *bc,
				       struct work_item *wi,
				       int err)
{
	struct bio *bio = wi->wi_original_bio;
	struct cache_block *cache_block = wi->wi_cache_block;

	M_ASSERT_FIXME(err == 0);

	M_ASSERT(bio != NULL);
	ASSERT(bio_is_request_single_cache_block(bio));
	ASSERT(cache_block->bcb_sector ==
	       bio_sector_to_cache_block_sector(bio));
	ASSERT(bio == wi->wi_original_bio);
	ASSERT(cache_block->bcb_state ==
	       S_CLEAN_P_WRITE_MISS_CPT_DEVICE_END);
	ASSERT(wi->wi_original_cache_block == NULL);

	BT_TRACE(BT_LEVEL_TRACE2, bc, wi, cache_block, bio, NULL,
		 "copy-to-device-endio");

	atomic_dec(&bc->bc_pending_cached_device_requests);

	/*
	 * for writeback we commit to cache and then we are done
	 */
	cache_state_transition3(bc,
				cache_block,
				TS_P_WRITE_MISS_WB,
				S_DIRTY_P_WRITE_MISS_CPF_DEVICE_END,
				S_DIRTY_P_WRITE_MISS_CPT_CACHE_END);

	pmem_data_put_page_write(bc,
				 cache_block,
				 &wi->wi_pmem_ctx,
				 wi, /*callback context */
				 cache_put_page_write_callback,
				 S_CLEAN);
}
/*! \todo does this really belong here and not in cache_getput ? */
void cache_invalidate_block_io_end(struct bittern_cache *bc,
				   struct work_item *wi,
				   struct cache_block *cache_block)
{
	ASSERT(cache_block != NULL);
	ASSERT_BITTERN_CACHE(bc);
	ASSERT_WORK_ITEM(wi, bc);
	ASSERT_CACHE_BLOCK(cache_block, bc);
	ASSERT(wi->wi_cache_block == cache_block);
	ASSERT(wi->wi_original_bio == NULL);
	ASSERT(wi->wi_cloned_bio == NULL);
	ASSERT(wi->wi_io_xid != 0);

	BT_TRACE(BT_LEVEL_TRACE1, bc, wi, cache_block, NULL, NULL,
		 "invalidate done");
	ASSERT(cache_block->bcb_state == S_CLEAN_INVALIDATE_END ||
	       cache_block->bcb_state == S_DIRTY_INVALIDATE_END);
	ASSERT(is_sector_number_valid(cache_block->bcb_sector));

	if (cache_block->bcb_state == S_DIRTY_INVALIDATE_END)
		cache_move_to_invalid(bc, cache_block, 1);
	else
		cache_move_to_invalid(bc, cache_block, 0);

	cache_timer_add(&bc->bc_timer_invalidations, wi->wi_ts_started);

	work_item_free(bc, wi);

	atomic_inc(&bc->bc_completed_requests);
	atomic_inc(&bc->bc_completed_invalidations);
	atomic_dec(&bc->bc_pending_invalidate_requests);

	/*
	 * wakeup possible waiters
	 */
	wakeup_deferred(bc);
	wake_up_interruptible(&bc->bc_invalidator_wait);
}
/*! \todo does this really belong here and not in cache_getput ? */
void cache_invalidate_block_io_start(struct bittern_cache *bc,
				     struct cache_block *cache_block)
{
	unsigned long flags;
	unsigned long cache_flags;
	struct work_item *wi;
	int val;
	int ret;

	BT_TRACE(BT_LEVEL_TRACE1, bc, NULL, cache_block, NULL, NULL, "enter");
	ASSERT_BITTERN_CACHE(bc);
	ASSERT_CACHE_BLOCK(cache_block, bc);

	ASSERT(cache_block->bcb_state == S_CLEAN ||
	       cache_block->bcb_state == S_DIRTY);
	ASSERT(cache_block->bcb_cache_transition ==
	       TS_NONE);
	ASSERT(is_sector_number_valid(cache_block->bcb_sector));
	BT_TRACE(BT_LEVEL_TRACE1, bc, NULL, cache_block, NULL, NULL,
		 "invalidating clean block id #%d", cache_block->bcb_block_id);

	spin_lock_irqsave(&bc->bc_entries_lock, flags);
	spin_lock_irqsave(&cache_block->bcb_spinlock, cache_flags);

	/*
	 * VALID_CLEAN -> CLEAN_INVALIDATE_START
	 */
	if (cache_block->bcb_state == S_CLEAN)
		cache_state_transition_initial(bc, cache_block,
				TS_CLEAN_INVALIDATION_WTWB,
				S_CLEAN_INVALIDATE_START);
	else
		cache_state_transition_initial(bc, cache_block,
				TS_DIRTY_INVALIDATION_WB,
				S_DIRTY_INVALIDATE_START);

	spin_unlock_irqrestore(&cache_block->bcb_spinlock, cache_flags);
	spin_unlock_irqrestore(&bc->bc_entries_lock, flags);

	/*
	 * allocate work_item and initialize it
	 */
	wi = work_item_allocate(bc,
				cache_block,
				NULL,
				(WI_FLAG_BIO_NOT_CLONED |
				 WI_FLAG_XID_USE_CACHE_BLOCK));
	M_ASSERT_FIXME(wi != NULL);
	ASSERT_WORK_ITEM(wi, bc);
	ASSERT(wi->wi_io_xid != 0);
	ASSERT(wi->wi_io_xid == cache_block->bcb_xid);
	ASSERT(wi->wi_original_bio == NULL);
	ASSERT(wi->wi_cloned_bio == NULL);
	ASSERT(wi->wi_cache == bc);

	ret = pmem_context_setup(bc,
				 bc->bc_kmem_threads,
				 cache_block,
				 NULL,
				 &wi->wi_pmem_ctx);
	M_ASSERT_FIXME(ret == 0);

	wi->wi_ts_started = current_kernel_time_nsec();

	val = atomic_inc_return(&bc->bc_pending_invalidate_requests);
	atomic_set_if_higher(&bc->bc_highest_pending_invalidate_requests, val);

	/*
	 * kick off state machine to write this out.
	 * cache_bgwriter_io_endio() will be called on completion.
	 */
	work_item_add_pending_io(bc,
				 wi,
				 "invalidate",
				 cache_block->bcb_sector,
				 WRITE);
	ASSERT(wi->wi_cache_block == cache_block);
	cache_state_machine(bc, wi, 0);
}
void sm_pwrite_miss_copy_from_device_end(struct bittern_cache *bc,
					 struct work_item *wi,
					 int err)
{
	struct bio *bio = wi->wi_original_bio;
	struct cache_block *cache_block = wi->wi_cache_block;
	uint128_t hash_data;
	char *cache_vaddr;
	struct page *cache_page;

	M_ASSERT_FIXME(err == 0);

	M_ASSERT(bio != NULL);
	ASSERT((wi->wi_flags & WI_FLAG_BIO_CLONED) != 0);
	ASSERT(wi->wi_original_bio != NULL);
	ASSERT(bio_is_request_single_cache_block(bio));
	ASSERT(cache_block->bcb_sector ==
	       bio_sector_to_cache_block_sector(bio));
	ASSERT(cache_block->bcb_state ==
	       S_CLEAN_P_WRITE_MISS_CPF_DEVICE_END ||
	       cache_block->bcb_state ==
	       S_DIRTY_P_WRITE_MISS_CPF_DEVICE_END);
	ASSERT(wi->wi_original_cache_block == NULL);

	BT_TRACE(BT_LEVEL_TRACE2, bc, wi, cache_block, bio, NULL, "endio");

	cache_vaddr = pmem_context_data_vaddr(&wi->wi_pmem_ctx);
	cache_page = pmem_context_data_page(&wi->wi_pmem_ctx);

	atomic_dec(&bc->bc_pending_cached_device_requests);

	/*
	 * we can check the original hash
	 */
	cache_track_hash_check_buffer(bc, cache_block, cache_vaddr);

	/*
	 * copy to cache from bio, aka userland writes
	 */
	bio_copy_to_cache(wi, bio, &hash_data);

	/* update hash */
	cache_block->bcb_hash_data = hash_data;

	/*
	 * update hash
	 */
	cache_track_hash_set(bc, cache_block, cache_block->bcb_hash_data);

	ASSERT(wi->wi_original_cache_block == NULL);

	if (cache_block->bcb_state ==
	    S_CLEAN_P_WRITE_MISS_CPF_DEVICE_END) {
		int val;

		BT_TRACE(BT_LEVEL_TRACE2, bc, wi, cache_block, bio,
			 wi->wi_cloned_bio, "copy-to-device");

		atomic_inc(&bc->bc_write_cached_device_requests);
		val = atomic_inc_return(&bc->bc_pending_cached_device_requests);
		atomic_set_if_higher(
				&bc->bc_highest_pending_cached_device_requests,
				val);

		ASSERT_BITTERN_CACHE(bc);
		ASSERT_CACHE_BLOCK(cache_block, bc);
		ASSERT_WORK_ITEM(wi, bc);

		cache_state_transition3(bc,
					cache_block,
					TS_P_WRITE_MISS_WT,
					S_CLEAN_P_WRITE_MISS_CPF_DEVICE_END,
					S_CLEAN_P_WRITE_MISS_CPT_DEVICE_END);

		/*
		 * we are in the first state -- process context
		 */
		M_ASSERT(!in_irq() && !in_softirq());
		wi->wi_ts_workqueue = current_kernel_time_nsec();
		cached_dev_do_make_request(bc,
					   wi,
					   WRITE, /* datadir */
					   false); /* do not set original bio */

	} else {

		BT_TRACE(BT_LEVEL_TRACE2, bc, wi, cache_block, bio,
			 wi->wi_cloned_bio, "copy-to-cache");
		/*
		 * for writeback we commit to cache and then we are done
		 */
		cache_state_transition3(bc,
					cache_block,
					TS_P_WRITE_MISS_WB,
					S_DIRTY_P_WRITE_MISS_CPF_DEVICE_END,
					S_DIRTY_P_WRITE_MISS_CPT_CACHE_END);

		pmem_data_put_page_write(bc,
					 cache_block,
					 &wi->wi_pmem_ctx,
					 wi, /*callback context */
					 cache_put_page_write_callback,
					 S_DIRTY);

	}
}
void sm_pwrite_miss_copy_to_cache_end(struct bittern_cache *bc,
				      struct work_item *wi,
				      int err)
{
	struct bio *bio = wi->wi_original_bio;
	struct cache_block *cache_block = wi->wi_cache_block;
	enum cache_state original_state = cache_block->bcb_state;
	unsigned long cache_flags;

	M_ASSERT_FIXME(err == 0);

	M_ASSERT(bio != NULL);

	ASSERT((wi->wi_flags & WI_FLAG_BIO_CLONED) != 0);
	ASSERT(wi->wi_original_bio != NULL);
	cache_block = wi->wi_cache_block;

	ASSERT(bio != NULL);
	ASSERT(bio_is_request_single_cache_block(bio));
	ASSERT(cache_block->bcb_sector ==
	       bio_sector_to_cache_block_sector(bio));
	ASSERT(bio == wi->wi_original_bio);
	ASSERT(cache_block->bcb_state ==
	       S_CLEAN_P_WRITE_MISS_CPT_CACHE_END ||
	       cache_block->bcb_state ==
	       S_DIRTY_P_WRITE_MISS_CPT_CACHE_END);
	ASSERT(wi->wi_original_cache_block == NULL);

	BT_TRACE(BT_LEVEL_TRACE2, bc, wi, cache_block, bio, NULL,
		 "copy-to-cache-end");

	ASSERT(wi->wi_original_cache_block == NULL);

	ASSERT_CACHE_STATE(cache_block);
	ASSERT_CACHE_BLOCK(cache_block, bc);

	if (cache_block->bcb_state == S_CLEAN_P_WRITE_MISS_CPT_CACHE_END) {
		spin_lock_irqsave(&cache_block->bcb_spinlock, cache_flags);
		cache_state_transition_final(bc,
					     cache_block,
					     TS_NONE,
					     S_CLEAN);
		spin_unlock_irqrestore(&cache_block->bcb_spinlock, cache_flags);
	} else {
		ASSERT(cache_block->bcb_state ==
		       S_DIRTY_P_WRITE_MISS_CPT_CACHE_END);
		spin_lock_irqsave(&cache_block->bcb_spinlock, cache_flags);
		cache_state_transition_final(bc,
					     cache_block,
					     TS_NONE,
					     S_DIRTY);
		spin_unlock_irqrestore(&cache_block->bcb_spinlock, cache_flags);
	}

	cache_put_update_age(bc, cache_block, 1);

	cache_timer_add(&bc->bc_timer_writes, wi->wi_ts_started);
	cache_timer_add(&bc->bc_timer_write_misses, wi->wi_ts_started);
	if (original_state == S_CLEAN_P_WRITE_MISS_CPT_CACHE_END) {
		cache_timer_add(&bc->bc_timer_write_clean_misses,
				wi->wi_ts_started);
	} else {
		ASSERT(original_state == S_DIRTY_P_WRITE_MISS_CPT_CACHE_END);
		cache_timer_add(&bc->bc_timer_write_dirty_misses,
				wi->wi_ts_started);
	}

	work_item_free(bc, wi);

	atomic_dec(&bc->bc_pending_requests);
	if (bio_data_dir(bio) == WRITE) {
		atomic_dec(&bc->bc_pending_write_requests);
		atomic_inc(&bc->bc_completed_write_requests);
	} else {
		atomic_dec(&bc->bc_pending_read_requests);
		atomic_inc(&bc->bc_completed_read_requests);
	}
	atomic_inc(&bc->bc_completed_requests);
	/*
	 * wakeup possible waiters
	 */
	wakeup_deferred(bc);
	bio_endio(bio, 0);
}
Esempio n. 10
0
void sm_pwrite_miss_copy_from_device_start(struct bittern_cache *bc,
					   struct work_item *wi)
{
	struct bio *bio = wi->wi_original_bio;
	struct cache_block *cache_block = wi->wi_cache_block;
	int val;
	struct page *cache_page;

	M_ASSERT(bio != NULL);
	ASSERT((wi->wi_flags & WI_FLAG_BIO_CLONED) != 0);
	ASSERT(wi->wi_original_bio != NULL);
	ASSERT(bio_is_request_single_cache_block(bio));
	ASSERT(cache_block->bcb_sector ==
	       bio_sector_to_cache_block_sector(bio));
	ASSERT(cache_block->bcb_state ==
	       S_CLEAN_P_WRITE_MISS_CPF_DEVICE_START ||
	       cache_block->bcb_state ==
	       S_DIRTY_P_WRITE_MISS_CPF_DEVICE_START);
	ASSERT(wi->wi_original_cache_block == NULL);

	pmem_data_get_page_write(bc,
				 cache_block,
				 &wi->wi_pmem_ctx);

	cache_page = pmem_context_data_page(&wi->wi_pmem_ctx);

	atomic_inc(&bc->bc_read_cached_device_requests);
	val = atomic_inc_return(&bc->bc_pending_cached_device_requests);
	atomic_set_if_higher(&bc->bc_highest_pending_cached_device_requests,
			     val);

	BT_TRACE(BT_LEVEL_TRACE2, bc, wi, cache_block, bio, wi->wi_cloned_bio,
		 "copy-from-device");
	ASSERT_BITTERN_CACHE(bc);
	ASSERT_CACHE_BLOCK(cache_block, bc);

	if (cache_block->bcb_state == S_CLEAN_P_WRITE_MISS_CPF_DEVICE_START) {
		cache_state_transition3(bc,
					cache_block,
					TS_P_WRITE_MISS_WT,
					S_CLEAN_P_WRITE_MISS_CPF_DEVICE_START,
					S_CLEAN_P_WRITE_MISS_CPF_DEVICE_END);
	} else {
		ASSERT(cache_block->bcb_state ==
		       S_DIRTY_P_WRITE_MISS_CPF_DEVICE_START);
		cache_state_transition3(bc,
					cache_block,
					TS_P_WRITE_MISS_WB,
					S_DIRTY_P_WRITE_MISS_CPF_DEVICE_START,
					S_DIRTY_P_WRITE_MISS_CPF_DEVICE_END);
	}

	/*
	 * we are in the first state -- process context
	 */
	M_ASSERT(!in_irq() && !in_softirq());
	wi->wi_ts_workqueue = current_kernel_time_nsec();
	cached_dev_do_make_request(bc,
				   wi,
				   READ, /* datadir */
				   false); /* do not set original bio */
}