static void event_lpm_allow_sleep(UNUSED_ATTR void *context) { lpm_allow_bt_device_sleep(); }
/******************************************************************************* ** ** Function bt_hc_worker_thread ** ** Description Mian worker thread ** ** Returns void * ** *******************************************************************************/ static void *bt_hc_worker_thread(void *arg) { uint16_t events; HC_BT_HDR *p_msg, *p_next_msg; ALOGI("bt_hc_worker_thread started"); prctl(PR_SET_NAME, (unsigned long)"bt_hc_worker", 0, 0, 0); tx_cmd_pkts_pending = FALSE; raise_priority_a2dp(TASK_HIGH_HCI_WORKER); while (lib_running) { pthread_mutex_lock(&hc_cb.mutex); while (ready_events == 0) { pthread_cond_wait(&hc_cb.cond, &hc_cb.mutex); } events = ready_events; ready_events = 0; pthread_mutex_unlock(&hc_cb.mutex); if (events & HC_EVENT_RX && ( p_hci_if->rcv != NULL)) { p_hci_if->rcv(); if ((tx_cmd_pkts_pending == TRUE) && (num_hci_cmd_pkts > 0)) { /* Got HCI Cmd Credits from Controller. * Prepare to send prior pending Cmd packets in the * following HC_EVENT_TX session. */ events |= HC_EVENT_TX; } } if (events & HC_EVENT_PRELOAD) { p_userial_if->open(USERIAL_PORT_1); /* Calling vendor-specific part */ if (bt_vnd_if) { bt_vnd_if->op(BT_VND_OP_FW_CFG, NULL); } else { if (bt_hc_cbacks) bt_hc_cbacks->preload_cb(NULL, BT_HC_PRELOAD_FAIL); } } if (events & HC_EVENT_POSTLOAD) { /* Start from SCO related H/W configuration, if SCO configuration * is required. Then, follow with reading requests of getting * ACL data length for both BR/EDR and LE. */ int result = -1; /* Calling vendor-specific part */ if (bt_vnd_if) result = bt_vnd_if->op(BT_VND_OP_SCO_CFG, NULL); if (result == -1) p_hci_if->get_acl_max_len(); } if (events & HC_EVENT_TX) { /* * We will go through every packets in the tx queue. * Fine to clear tx_cmd_pkts_pending. */ tx_cmd_pkts_pending = FALSE; HC_BT_HDR * sending_msg_que[64]; int sending_msg_count = 0; int sending_hci_cmd_pkts_count = 0; utils_lock(); p_next_msg = tx_q.p_first; while (p_next_msg && sending_msg_count < (int)sizeof(sending_msg_que)/sizeof(sending_msg_que[0])) { if ((p_next_msg->event & MSG_EVT_MASK)==MSG_STACK_TO_HC_HCI_CMD) { /* * if we have used up controller's outstanding HCI command * credits (normally is 1), skip all HCI command packets in * the queue. * The pending command packets will be sent once controller * gives back us credits through CommandCompleteEvent or * CommandStatusEvent. */ if ((tx_cmd_pkts_pending == TRUE) || (sending_hci_cmd_pkts_count >= num_hci_cmd_pkts)) { tx_cmd_pkts_pending = TRUE; p_next_msg = utils_getnext(p_next_msg); continue; } sending_hci_cmd_pkts_count++; } p_msg = p_next_msg; p_next_msg = utils_getnext(p_msg); utils_remove_from_queue_unlocked(&tx_q, p_msg); sending_msg_que[sending_msg_count++] = p_msg; } utils_unlock(); int i; for(i = 0; i < sending_msg_count; i++) p_hci_if->send(sending_msg_que[i]); if (tx_cmd_pkts_pending == TRUE) BTHCDBG("Used up Tx Cmd credits"); } if (events & HC_EVENT_LPM_ENABLE) { lpm_enable(TRUE); } if (events & HC_EVENT_LPM_DISABLE) { lpm_enable(FALSE); } if (events & HC_EVENT_LPM_IDLE_TIMEOUT) { lpm_wake_deassert(); } if (events & HC_EVENT_LPM_ALLOW_SLEEP) { lpm_allow_bt_device_sleep(); } if (events & HC_EVENT_LPM_WAKE_DEVICE) { lpm_wake_assert(); } if (events & HC_EVENT_EPILOG) { /* Calling vendor-specific part */ if (bt_vnd_if) bt_vnd_if->op(BT_VND_OP_EPILOG, NULL); else break; // equivalent to HC_EVENT_EXIT } if (events & HC_EVENT_EXIT) break; } ALOGI("bt_hc_worker_thread exiting"); lib_running = 0; pthread_exit(NULL); return NULL; // compiler friendly }