/******************************************************************************* ** ** Function GKI_getpoolbuf ** ** Description Called by an application to get a free buffer from ** a specific buffer pool. ** ** Note: If there are no more buffers available from the pool, ** the public buffers are searched for an available buffer. ** ** Parameters pool_id - (input) pool ID to get a buffer out of. ** ** Returns A pointer to the buffer, or NULL if none available ** *******************************************************************************/ void *GKI_getpoolbuf (UINT8 pool_id) { FREE_QUEUE_T *Q; BUFFER_HDR_T *p_hdr; tGKI_COM_CB *p_cb = &gki_cb.com; if (pool_id >= GKI_NUM_TOTAL_BUF_POOLS) { GKI_exception(GKI_ERROR_GETPOOLBUF_BAD_QID, "getpoolbuf bad pool"); return (NULL); } /* Make sure the buffers aren't disturbed til finished with allocation */ GKI_disable(); Q = &p_cb->freeq[pool_id]; if(Q->cur_cnt < Q->total) { // btla-specific ++ #ifdef GKI_USE_DEFERED_ALLOC_BUF_POOLS if(Q->p_first == 0 && gki_alloc_free_queue(pool_id) != TRUE) return NULL; #endif // btla-specific -- p_hdr = Q->p_first; Q->p_first = p_hdr->p_next; if (!Q->p_first) Q->p_last = NULL; if(++Q->cur_cnt > Q->max_cnt) Q->max_cnt = Q->cur_cnt; GKI_enable(); p_hdr->task_id = GKI_get_taskid(); p_hdr->status = BUF_STATUS_UNLINKED; p_hdr->p_next = NULL; p_hdr->Type = 0; return ((void *) ((UINT8 *)p_hdr + BUFFER_HDR_SIZE)); } /* If here, no buffers in the specified pool */ GKI_enable(); /* try for free buffers in public pools */ return (GKI_getbuf(p_cb->freeq[pool_id].size)); }
/******************************************************************************* ** ** Function GKI_read_mbox ** ** Description Called by applications to read a buffer from one of ** the task mailboxes. A task can only read its own mailbox. ** ** Parameters: mbox - (input) mailbox ID to read (0, 1, 2, or 3) ** ** Returns NULL if the mailbox was empty, else the address of a buffer ** *******************************************************************************/ void *GKI_read_mbox (UINT8 mbox) { UINT8 task_id = GKI_get_taskid(); void *p_buf = NULL; BUFFER_HDR_T *p_hdr; if ((task_id >= GKI_MAX_TASKS) || (mbox >= NUM_TASK_MBOX)) return (NULL); GKI_disable(); if (gki_cb.com.OSTaskQFirst[task_id][mbox]) { p_hdr = gki_cb.com.OSTaskQFirst[task_id][mbox]; gki_cb.com.OSTaskQFirst[task_id][mbox] = p_hdr->p_next; p_hdr->p_next = NULL; p_hdr->status = BUF_STATUS_UNLINKED; p_buf = (UINT8 *)p_hdr + BUFFER_HDR_SIZE; } GKI_enable(); return (p_buf); }
/******************************************************************************* ** ** Function GKI_stop_timer ** ** Description An application can call this function to stop one of ** it's four general purpose timers. There is no harm in ** stopping a timer that is already stopped. ** ** Parameters tnum - (input) timer number to be started (TIMER_0, ** TIMER_1, TIMER_2, or TIMER_3) ** Returns void ** *******************************************************************************/ void GKI_stop_timer (UINT8 tnum) { UINT8 task_id = GKI_get_taskid(); GKI_disable(); switch (tnum) { #if (GKI_NUM_TIMERS > 0) case TIMER_0: gki_cb.com.OSTaskTmr0R[task_id] = 0; gki_cb.com.OSTaskTmr0 [task_id] = 0; break; #endif #if (GKI_NUM_TIMERS > 1) case TIMER_1: gki_cb.com.OSTaskTmr1R[task_id] = 0; gki_cb.com.OSTaskTmr1 [task_id] = 0; break; #endif #if (GKI_NUM_TIMERS > 2) case TIMER_2: gki_cb.com.OSTaskTmr2R[task_id] = 0; gki_cb.com.OSTaskTmr2 [task_id] = 0; break; #endif #if (GKI_NUM_TIMERS > 3) case TIMER_3: gki_cb.com.OSTaskTmr3R[task_id] = 0; gki_cb.com.OSTaskTmr3 [task_id] = 0; break; #endif } if (gki_timers_is_timer_running() == FALSE) { if (gki_cb.com.p_tick_cb) { #if (defined(GKI_DELAY_STOP_SYS_TICK) && (GKI_DELAY_STOP_SYS_TICK > 0)) /* if inactivity delay timer is not running */ if ((gki_cb.com.system_tick_running)&&(gki_cb.com.OSTicksTilStop == 0)) { /* set inactivity delay timer */ /* when timer expires, system tick will be stopped */ gki_cb.com.OSTicksTilStop = GKI_DELAY_STOP_SYS_TICK; } #else gki_cb.com.system_tick_running = FALSE; gki_cb.com.p_tick_cb(FALSE); /* stop system tick */ #endif } } GKI_enable(); }
/******************************************************************************* ** ** Function bta_av_co_audio_get_sbc_config ** ** Description Retrieves the SBC codec configuration. If the codec in use ** is not SBC, return the default SBC codec configuration. ** ** Returns TRUE if codec is SBC, FALSE otherwise ** *******************************************************************************/ BOOLEAN bta_av_co_audio_get_sbc_config(tA2D_SBC_CIE *p_sbc_config, UINT16 *p_minmtu) { BOOLEAN result = FALSE; UINT8 index, jndex; tBTA_AV_CO_PEER *p_peer; tBTA_AV_CO_SINK *p_sink; APPL_TRACE_EVENT1("bta_av_co_cb.codec_cfg.id : codec 0x%x", bta_av_co_cb.codec_cfg.id); /* Minimum MTU is by default very large */ *p_minmtu = 0xFFFF; GKI_disable(); if (bta_av_co_cb.codec_cfg.id == BTIF_AV_CODEC_SBC) { if (A2D_ParsSbcInfo(p_sbc_config, bta_av_co_cb.codec_cfg.info, FALSE) == A2D_SUCCESS) { for (index = 0; index < BTA_AV_CO_NUM_ELEMENTS(bta_av_co_cb.peers); index++) { p_peer = &bta_av_co_cb.peers[index]; if (p_peer->opened) { if (p_peer->mtu < *p_minmtu) { *p_minmtu = p_peer->mtu; } for (jndex = 0; jndex < p_peer->num_sup_snks; jndex++) { p_sink = &p_peer->snks[jndex]; if (p_sink->codec_type == A2D_MEDIA_CT_SBC) { /* Update the bitpool boundaries of the current config */ p_sbc_config->min_bitpool = BTA_AV_CO_MAX(p_sink->codec_caps[BTA_AV_CO_SBC_MIN_BITPOOL_OFF], p_sbc_config->min_bitpool); p_sbc_config->max_bitpool = BTA_AV_CO_MIN(p_sink->codec_caps[BTA_AV_CO_SBC_MAX_BITPOOL_OFF], p_sbc_config->max_bitpool); APPL_TRACE_EVENT2("bta_av_co_audio_get_sbc_config : sink bitpool min %d, max %d", p_sbc_config->min_bitpool, p_sbc_config->max_bitpool); break; } } } } result = TRUE; } } if (!result) { /* Not SBC, still return the default values */ *p_sbc_config = btif_av_sbc_default_config; } GKI_enable(); return result; }
/******************************************************************************* ** ** Function GKI_remove_from_queue ** ** Description Dequeue a buffer from the middle of the queue ** ** Parameters: p_q - (input) pointer to a queue. ** p_buf - (input) address of the buffer to enqueue ** ** Returns NULL if queue is empty, else buffer ** *******************************************************************************/ void *GKI_remove_from_queue (BUFFER_Q *p_q, void *p_buf) { BUFFER_HDR_T *p_prev; BUFFER_HDR_T *p_buf_hdr; GKI_disable(); if (p_buf == p_q->p_first) { GKI_enable(); return (GKI_dequeue (p_q)); } p_buf_hdr = (BUFFER_HDR_T *)((UINT8 *)p_buf - BUFFER_HDR_SIZE); p_prev = (BUFFER_HDR_T *)((UINT8 *)p_q->p_first - BUFFER_HDR_SIZE); for ( ; p_prev; p_prev = p_prev->p_next) { /* If the previous points to this one, move the pointers around */ if (p_prev->p_next == p_buf_hdr) { p_prev->p_next = p_buf_hdr->p_next; /* If we are removing the last guy in the queue, update p_last */ if (p_buf == p_q->p_last) p_q->p_last = p_prev + 1; /* One less in the queue */ p_q->count--; /* The buffer is now unlinked */ p_buf_hdr->p_next = NULL; p_buf_hdr->status = BUF_STATUS_UNLINKED; GKI_enable(); return (p_buf); } } GKI_enable(); return (NULL); }
/******************************************************************************* ** ** Function GAP_ConnReadData ** ** Description Normally not GKI aware application will call this function ** after receiving GAP_EVT_RXDATA event. ** ** Parameters: handle - Handle of the connection returned in the Open ** p_data - Data area ** max_len - Byte count requested ** p_len - Byte count received ** ** Returns BT_PASS - data read ** GAP_ERR_BAD_HANDLE - invalid handle ** GAP_NO_DATA_AVAIL - no data available ** *******************************************************************************/ UINT16 GAP_ConnReadData (UINT16 gap_handle, UINT8 *p_data, UINT16 max_len, UINT16 *p_len) { tGAP_CCB *p_ccb = gap_find_ccb_by_handle (gap_handle); BT_HDR *p_buf; UINT16 copy_len; if (!p_ccb) return (GAP_ERR_BAD_HANDLE); *p_len = 0; p_buf = (BT_HDR *)GKI_getfirst (&p_ccb->rx_queue); if (!p_buf) return (GAP_NO_DATA_AVAIL); GKI_disable(); while (max_len && p_buf) { copy_len = (p_buf->len > max_len)?max_len:p_buf->len; max_len -= copy_len; *p_len += copy_len; if (p_data) { memcpy (p_data, (UINT8 *)(p_buf + 1) + p_buf->offset, copy_len); p_data += copy_len; } if (p_buf->len > copy_len) { p_buf->offset += copy_len; p_buf->len -= copy_len; break; } else { if (max_len) { p_buf = (BT_HDR *)GKI_getnext (p_buf); } GKI_freebuf (GKI_dequeue (&p_ccb->rx_queue)); } } p_ccb->rx_queue_size -= *p_len; GKI_enable(); GAP_TRACE_EVENT ("GAP_ConnReadData - rx_queue_size left=%d, *p_len=%d", p_ccb->rx_queue_size, *p_len); return (BT_PASS); }
/******************************************************************************* ** ** Function GKI_freebuf ** ** Description Called by an application to return a buffer to the free pool. ** ** Parameters p_buf - (input) address of the beginning of a buffer. ** ** Returns void ** *******************************************************************************/ void GKI_freebuf (void *p_buf) { FREE_QUEUE_T *Q; BUFFER_HDR_T *p_hdr; #if (GKI_ENABLE_BUF_CORRUPTION_CHECK == TRUE) if (!p_buf || gki_chk_buf_damage(p_buf)) { GKI_exception(GKI_ERROR_BUF_CORRUPTED, "Free - Buf Corrupted"); return; } #endif p_hdr = (BUFFER_HDR_T *) ((UINT8 *)p_buf - BUFFER_HDR_SIZE); #if GKI_BUFFER_DEBUG LOGD("GKI_freebuf() freeing, %x, %x, func:%s(line=%d)", p_buf, p_hdr, p_hdr->_function, p_hdr->_line); #endif if (p_hdr->status != BUF_STATUS_UNLINKED) { GKI_exception(GKI_ERROR_FREEBUF_BUF_LINKED, "Freeing Linked Buf"); return; } if (p_hdr->q_id >= GKI_NUM_TOTAL_BUF_POOLS) { GKI_exception(GKI_ERROR_FREEBUF_BAD_QID, "Bad Buf QId"); return; } GKI_disable(); /* ** Release the buffer */ Q = &gki_cb.com.freeq[p_hdr->q_id]; if (Q->p_last) Q->p_last->p_next = p_hdr; else Q->p_first = p_hdr; Q->p_last = p_hdr; p_hdr->p_next = NULL; p_hdr->status = BUF_STATUS_FREE; p_hdr->task_id = GKI_INVALID_TASK; if (Q->cur_cnt > 0) Q->cur_cnt--; GKI_enable(); return; }
/******************************************************************************* ** ** Function: NfcAdaptation::Initialize() ** ** Description: class initializer ** ** Returns: none ** *******************************************************************************/ void NfcAdaptation::Initialize () { const char* func = "NfcAdaptation::Initialize"; ALOGD("%s: enter", func); ALOGE("%s: ver=%s nfa=%s", func, nfca_version_string, nfa_version_string); unsigned long num; if ( !GetStrValue ( NAME_NFA_STORAGE, bcm_nfc_location, sizeof ( bcm_nfc_location ) ) ) { memset (bcm_nfc_location, 0, sizeof(bcm_nfc_location)); strncpy (bcm_nfc_location, "/data/nfc", 9); } if ( GetNumValue ( NAME_PROTOCOL_TRACE_LEVEL, &num, sizeof ( num ) ) ) ScrProtocolTraceFlag = num; if ( GetStrValue ( NAME_NFA_DM_CFG, (char*)nfa_dm_cfg, sizeof ( nfa_dm_cfg ) ) ) p_nfa_dm_cfg = ( tNFA_DM_CFG * ) &nfa_dm_cfg[0]; if ( GetNumValue ( NAME_NFA_MAX_EE_SUPPORTED, &num, sizeof ( num ) ) ) { nfa_ee_max_ee_cfg = num; ALOGD("%s: Overriding NFA_EE_MAX_EE_SUPPORTED to use %d", func, nfa_ee_max_ee_cfg); } initializeGlobalAppLogLevel (); verify_stack_non_volatile_store (); if ( GetNumValue ( NAME_PRESERVE_STORAGE, (char*)&num, sizeof ( num ) ) && (num == 1) ) ALOGD ("%s: preserve stack NV store", __FUNCTION__); else { delete_stack_non_volatile_store (FALSE); } GKI_init (); GKI_enable (); GKI_create_task ((TASKPTR)NFCA_TASK, BTU_TASK, (INT8*)"NFCA_TASK", 0, 0, (pthread_cond_t*)NULL, NULL); { AutoThreadMutex guard(mCondVar); GKI_create_task ((TASKPTR)Thread, MMI_TASK, (INT8*)"NFCA_THREAD", 0, 0, (pthread_cond_t*)NULL, NULL); mCondVar.wait(); } mHalDeviceContext = NULL; mHalCallback = NULL; memset (&mHalEntryFuncs, 0, sizeof(mHalEntryFuncs)); InitializeHalDeviceContext (); ALOGD ("%s: exit", func); }
void GKI_exception (UINT16 code, char *msg) { UINT8 task_id; int i = 0; FREE_QUEUE_T *Q; tGKI_COM_CB *p_cb = &gki_cb.com; ALOGE( "GKI_exception(): Task State Table"); for(task_id = 0; task_id < GKI_MAX_TASKS; task_id++) { ALOGE( "TASK ID [%d] task name [%s] state [%d]", task_id, gki_cb.com.OSTName[task_id], gki_cb.com.OSRdyTbl[task_id]); } ALOGE("GKI_exception %d %s", code, msg); ALOGE( "********************************************************************"); ALOGE( "* GKI_exception(): %d %s", code, msg); ALOGE( "********************************************************************"); #if 0//(GKI_DEBUG == TRUE) GKI_disable(); if (gki_cb.com.ExceptionCnt < GKI_MAX_EXCEPTION) { EXCEPTION_T *pExp; pExp = &gki_cb.com.Exception[gki_cb.com.ExceptionCnt++]; pExp->type = code; pExp->taskid = GKI_get_taskid(); strncpy((char *)pExp->msg, msg, GKI_MAX_EXCEPTION_MSGLEN - 1); } GKI_enable(); #endif if (code == GKI_ERROR_OUT_OF_BUFFERS) { for(i=0; i<p_cb->curr_total_no_of_pools; i++) { Q = &p_cb->freeq[p_cb->pool_list[i]]; if (Q !=NULL) ALOGE("GKI_exception Buffer current cnt:%x, Total:%x", Q->cur_cnt, Q->total); } } GKI_TRACE("GKI_exception %d %s done", code, msg); return; }
/******************************************************************************* ** ** Function GKI_send_msg ** ** Description Called by applications to send a buffer to a task ** ** Returns Nothing ** *******************************************************************************/ void GKI_send_msg (UINT8 task_id, UINT8 mbox, void *msg) { BUFFER_HDR_T *p_hdr; tGKI_COM_CB *p_cb = &gki_cb.com; /* If task non-existant or not started, drop buffer */ if ((task_id >= GKI_MAX_TASKS) || (mbox >= NUM_TASK_MBOX) || (p_cb->OSRdyTbl[task_id] == TASK_DEAD)) { GKI_exception(GKI_ERROR_SEND_MSG_BAD_DEST, "Sending to unknown dest"); GKI_freebuf (msg); return; } #if (GKI_ENABLE_BUF_CORRUPTION_CHECK == TRUE) if (gki_chk_buf_damage(msg)) { GKI_exception(GKI_ERROR_BUF_CORRUPTED, "Send - Buffer corrupted"); return; } #endif p_hdr = (BUFFER_HDR_T *) ((UINT8 *) msg - BUFFER_HDR_SIZE); if (p_hdr->status != BUF_STATUS_UNLINKED) { GKI_exception(GKI_ERROR_SEND_MSG_BUF_LINKED, "Send - buffer linked"); return; } GKI_disable(); if (p_cb->OSTaskQFirst[task_id][mbox]) p_cb->OSTaskQLast[task_id][mbox]->p_next = p_hdr; else p_cb->OSTaskQFirst[task_id][mbox] = p_hdr; p_cb->OSTaskQLast[task_id][mbox] = p_hdr; p_hdr->p_next = NULL; p_hdr->status = BUF_STATUS_QUEUED; p_hdr->task_id = task_id; GKI_enable(); GKI_send_event(task_id, (UINT16)EVENT_MASK(mbox)); return; }
/******************************************************************************* ** ** Function bta_av_co_audio_codec_reset ** ** Description Reset the current codec configuration ** ** Returns void ** *******************************************************************************/ void bta_av_co_audio_codec_reset(void) { GKI_disable(); FUNC_TRACE(); /* Reset the current configuration to SBC */ bta_av_co_cb.codec_cfg.id = BTIF_AV_CODEC_SBC; if (A2D_BldSbcInfo(A2D_MEDIA_TYPE_AUDIO, (tA2D_SBC_CIE *)&btif_av_sbc_default_config, bta_av_co_cb.codec_cfg.info) != A2D_SUCCESS) { APPL_TRACE_ERROR0("bta_av_co_audio_codec_reset A2D_BldSbcInfo failed"); } GKI_enable(); }
/******************************************************************************* ** ** Function GKI_exit_task ** ** Description This function is called to stop a GKI task. ** ** Parameters: task_id - (input) the id of the task that has to be stopped ** ** Returns void ** ** NOTE This function is NOT called by the Broadcom stack and ** profiles. If you want to use it in your own implementation, ** put specific code here to kill a task. ** *******************************************************************************/ void GKI_exit_task (UINT8 task_id) { GKI_disable(); gki_cb.com.OSRdyTbl[task_id] = TASK_DEAD; /* Destroy mutex and condition variable objects */ pthread_mutex_destroy(&gki_cb.os.thread_evt_mutex[task_id]); pthread_cond_destroy (&gki_cb.os.thread_evt_cond[task_id]); pthread_mutex_destroy(&gki_cb.os.thread_timeout_mutex[task_id]); pthread_cond_destroy (&gki_cb.os.thread_timeout_cond[task_id]); GKI_enable(); ALOGI("GKI_exit_task %d done", task_id); return; }
/******************************************************************************* ** ** Function GKI_exit_task ** ** Description This function is called to stop a GKI task. ** ** Parameters: task_id - (input) the id of the task that has to be stopped ** ** Returns void ** ** NOTE This function is NOT called by the Broadcom stack and ** profiles. If you want to use it in your own implementation, ** put specific code here to kill a task. ** *******************************************************************************/ void GKI_exit_task (UINT8 task_id) { GKI_disable(); gki_cb.com.OSRdyTbl[task_id] = TASK_DEAD; /* Destroy mutex and condition variable objects */ pthread_mutex_destroy(&gki_cb.os.thread_evt_mutex[task_id]); pthread_cond_destroy (&gki_cb.os.thread_evt_cond[task_id]); pthread_mutex_destroy(&gki_cb.os.thread_timeout_mutex[task_id]); pthread_cond_destroy (&gki_cb.os.thread_timeout_cond[task_id]); GKI_enable(); //GKI_send_event(task_id, EVENT_MASK(GKI_SHUTDOWN_EVT)); GKI_INFO("GKI_exit_task %d done", task_id); return; }
/******************************************************************************* ** ** Function GKI_enqueue ** ** Description Enqueue a buffer at the tail of the queue ** ** Parameters: p_q - (input) pointer to a queue. ** p_buf - (input) address of the buffer to enqueue ** ** Returns void ** *******************************************************************************/ void GKI_enqueue (BUFFER_Q *p_q, void *p_buf) { BUFFER_HDR_T *p_hdr; #if (GKI_ENABLE_BUF_CORRUPTION_CHECK == TRUE) if (gki_chk_buf_damage(p_buf)) { GKI_exception(GKI_ERROR_BUF_CORRUPTED, "Enqueue - Buffer corrupted"); return; } #endif p_hdr = (BUFFER_HDR_T *) ((UINT8 *) p_buf - BUFFER_HDR_SIZE); if (p_hdr->status != BUF_STATUS_UNLINKED) { GKI_exception(GKI_ERROR_ENQUEUE_BUF_LINKED, "Eneueue - buf already linked"); return; } GKI_disable(); /* Since the queue is exposed (C vs C++), keep the pointers in exposed format */ if (p_q->p_last) { BUFFER_HDR_T *p_last_hdr = (BUFFER_HDR_T *)((UINT8 *)p_q->p_last - BUFFER_HDR_SIZE); p_last_hdr->p_next = p_hdr; } else p_q->p_first = p_buf; p_q->p_last = p_buf; p_q->count++; p_hdr->p_next = NULL; p_hdr->status = BUF_STATUS_QUEUED; GKI_enable(); return; }
void GKI_exception (UINT16 code, char *msg) { UINT8 task_id; int i = 0; GKI_TRACE_ERROR_0( "GKI_exception(): Task State Table"); for(task_id = 0; task_id < GKI_MAX_TASKS; task_id++) { GKI_TRACE_ERROR_3( "TASK ID [%d] task name [%s] state [%d]", task_id, gki_cb.com.OSTName[task_id], gki_cb.com.OSRdyTbl[task_id]); } GKI_TRACE_ERROR_2("GKI_exception %d %s", code, msg); GKI_TRACE_ERROR_0( "********************************************************************"); GKI_TRACE_ERROR_2( "* GKI_exception(): %d %s", code, msg); GKI_TRACE_ERROR_0( "********************************************************************"); #if (GKI_DEBUG == TRUE) GKI_disable(); if (gki_cb.com.ExceptionCnt < GKI_MAX_EXCEPTION) { EXCEPTION_T *pExp; pExp = &gki_cb.com.Exception[gki_cb.com.ExceptionCnt++]; pExp->type = code; pExp->taskid = GKI_get_taskid(); strncpy((char *)pExp->msg, msg, GKI_MAX_EXCEPTION_MSGLEN - 1); } GKI_enable(); #endif GKI_TRACE_ERROR_2("GKI_exception %d %s done", code, msg); return; }
/******************************************************************************* ** ** Function GKI_enqueue_head ** ** Description Enqueue a buffer at the head of the queue ** ** Parameters: p_q - (input) pointer to a queue. ** p_buf - (input) address of the buffer to enqueue ** ** Returns void ** *******************************************************************************/ void GKI_enqueue_head (BUFFER_Q *p_q, void *p_buf) { BUFFER_HDR_T *p_hdr; #if (GKI_ENABLE_BUF_CORRUPTION_CHECK == TRUE) if (gki_chk_buf_damage(p_buf)) { GKI_exception(GKI_ERROR_BUF_CORRUPTED, "Enqueue - Buffer corrupted"); return; } #endif p_hdr = (BUFFER_HDR_T *) ((UINT8 *) p_buf - BUFFER_HDR_SIZE); if (p_hdr->status != BUF_STATUS_UNLINKED) { GKI_exception(GKI_ERROR_ENQUEUE_BUF_LINKED, "Eneueue head - buf already linked"); return; } GKI_disable(); if (p_q->p_first) { p_hdr->p_next = (BUFFER_HDR_T *)((UINT8 *)p_q->p_first - BUFFER_HDR_SIZE); p_q->p_first = p_buf; } else { p_q->p_first = p_buf; p_q->p_last = p_buf; p_hdr->p_next = NULL; } p_q->count++; p_hdr->status = BUF_STATUS_QUEUED; GKI_enable(); return; }
/** Callback from Java thread after alarm from AlarmService fires. */ static void bt_alarm_cb(void *data) { GKI_disable(); alarm_service.timer_last_expired_us = GKI_now_us(); UINT32 ticks_taken = 0; UINT32 ticks_scheduled = alarm_service.ticks_scheduled; if (alarm_service.timer_last_expired_us > alarm_service.timer_started_us) { ticks_taken = GKI_MS_TO_TICKS((alarm_service.timer_last_expired_us - alarm_service.timer_started_us) / 1000); } else { // this could happen on some platform ALOGE("%s now_us %lld less than %lld", __func__, alarm_service.timer_last_expired_us, alarm_service.timer_started_us); } GKI_enable(); GKI_timer_update(ticks_taken > ticks_scheduled ? ticks_taken : ticks_scheduled); }
/******************************************************************************* ** ** Function GKI_delete_pool ** ** Description Called by applications to delete a buffer pool. The function ** calls the operating specific function to free the actual memory. ** An exception is generated if an error is detected. ** ** Parameters: pool_id - (input) Id of the poll being deleted. ** ** Returns void ** *******************************************************************************/ void GKI_delete_pool (UINT8 pool_id) { FREE_QUEUE_T *Q; tGKI_COM_CB *p_cb = &gki_cb.com; if ((pool_id >= GKI_NUM_TOTAL_BUF_POOLS) || (!p_cb->pool_start[pool_id])) return; GKI_disable(); Q = &p_cb->freeq[pool_id]; if (!Q->cur_cnt) { Q->size = 0; Q->total = 0; Q->cur_cnt = 0; Q->max_cnt = 0; Q->p_first = NULL; Q->p_last = NULL; GKI_os_free (p_cb->pool_start[pool_id]); p_cb->pool_start[pool_id] = NULL; p_cb->pool_end[pool_id] = NULL; p_cb->pool_size[pool_id] = 0; gki_remove_from_pool_list(pool_id); p_cb->curr_total_no_of_pools--; } else GKI_exception(GKI_ERROR_DELETE_POOL_BAD_QID, "Deleting bad pool"); GKI_enable(); return; }
/******************************************************************************* ** ** Function GKI_timer_update ** ** Description This function is called by an OS to drive the GKI's timers. ** It is typically called at every system tick to ** update the timers for all tasks, and check for timeouts. ** ** Note: It has been designed to also allow for variable tick updates ** so that systems with strict power savings requirements can ** have the update occur at variable intervals. ** ** Parameters: ticks_since_last_update - (input) This is the number of TICKS that have ** occurred since the last time GKI_timer_update was called. ** ** Returns void ** *******************************************************************************/ void GKI_timer_update (INT32 ticks_since_last_update) { UINT8 task_id; long next_expiration; /* Holds the next soonest expiration time after this update */ /* Increment the number of ticks used for time stamps */ gki_cb.com.OSTicks += ticks_since_last_update; /* If any timers are running in any tasks, decrement the remaining time til * the timer updates need to take place (next expiration occurs) */ gki_cb.com.OSTicksTilExp -= ticks_since_last_update; /* Don't allow timer interrupt nesting */ if (gki_cb.com.timer_nesting) return; gki_cb.com.timer_nesting = 1; #if (defined(GKI_DELAY_STOP_SYS_TICK) && (GKI_DELAY_STOP_SYS_TICK > 0)) /* if inactivity delay timer is set and expired */ if (gki_cb.com.OSTicksTilStop) { if( gki_cb.com.OSTicksTilStop <= (UINT32)ticks_since_last_update ) { if(gki_cb.com.p_tick_cb) { gki_cb.com.system_tick_running = FALSE; (gki_cb.com.p_tick_cb) (FALSE); /* stop system tick */ } gki_cb.com.OSTicksTilStop = 0; /* clear inactivity delay timer */ gki_cb.com.timer_nesting = 0; return; } else gki_cb.com.OSTicksTilStop -= ticks_since_last_update; } #endif /* No need to update the ticks if no timeout has occurred */ if (gki_cb.com.OSTicksTilExp > 0) { gki_cb.com.timer_nesting = 0; return; } GKI_disable(); next_expiration = GKI_NO_NEW_TMRS_STARTED; /* If here then gki_cb.com.OSTicksTilExp <= 0. If negative, then increase gki_cb.com.OSNumOrigTicks to account for the difference so timer updates below are decremented by the full number of ticks. gki_cb.com.OSNumOrigTicks is reset at the bottom of this function so changing this value only affects the timer updates below */ gki_cb.com.OSNumOrigTicks -= gki_cb.com.OSTicksTilExp; /* Check for OS Task Timers */ for (task_id = 0; task_id < GKI_MAX_TASKS; task_id++) { if (gki_cb.com.OSWaitTmr[task_id] > 0) /* If timer is running */ { gki_cb.com.OSWaitTmr[task_id] -= gki_cb.com.OSNumOrigTicks; if (gki_cb.com.OSWaitTmr[task_id] <= 0) { /* Timer Expired */ gki_cb.com.OSRdyTbl[task_id] = TASK_READY; } } #if (GKI_NUM_TIMERS > 0) /* If any timer is running, decrement */ if (gki_cb.com.OSTaskTmr0[task_id] > 0) { gki_cb.com.OSTaskTmr0[task_id] -= gki_cb.com.OSNumOrigTicks; if (gki_cb.com.OSTaskTmr0[task_id] <= 0) { /* Set Timer 0 Expired event mask and reload timer */ #if (defined(GKI_TIMER_UPDATES_FROM_ISR) && GKI_TIMER_UPDATES_FROM_ISR == TRUE) GKI_isend_event (task_id, TIMER_0_EVT_MASK); #else GKI_send_event (task_id, TIMER_0_EVT_MASK); #endif gki_cb.com.OSTaskTmr0[task_id] = gki_cb.com.OSTaskTmr0R[task_id]; } } /* Check to see if this timer is the next one to expire */ if (gki_cb.com.OSTaskTmr0[task_id] > 0 && gki_cb.com.OSTaskTmr0[task_id] < next_expiration) next_expiration = gki_cb.com.OSTaskTmr0[task_id]; #endif #if (GKI_NUM_TIMERS > 1) /* If any timer is running, decrement */ if (gki_cb.com.OSTaskTmr1[task_id] > 0) { gki_cb.com.OSTaskTmr1[task_id] -= gki_cb.com.OSNumOrigTicks; if (gki_cb.com.OSTaskTmr1[task_id] <= 0) { /* Set Timer 1 Expired event mask and reload timer */ #if (defined(GKI_TIMER_UPDATES_FROM_ISR) && GKI_TIMER_UPDATES_FROM_ISR == TRUE) GKI_isend_event (task_id, TIMER_1_EVT_MASK); #else GKI_send_event (task_id, TIMER_1_EVT_MASK); #endif gki_cb.com.OSTaskTmr1[task_id] = gki_cb.com.OSTaskTmr1R[task_id]; } } /* Check to see if this timer is the next one to expire */ if (gki_cb.com.OSTaskTmr1[task_id] > 0 && gki_cb.com.OSTaskTmr1[task_id] < next_expiration) next_expiration = gki_cb.com.OSTaskTmr1[task_id]; #endif #if (GKI_NUM_TIMERS > 2) /* If any timer is running, decrement */ if (gki_cb.com.OSTaskTmr2[task_id] > 0) { gki_cb.com.OSTaskTmr2[task_id] -= gki_cb.com.OSNumOrigTicks; if (gki_cb.com.OSTaskTmr2[task_id] <= 0) { /* Set Timer 2 Expired event mask and reload timer */ #if (defined(GKI_TIMER_UPDATES_FROM_ISR) && GKI_TIMER_UPDATES_FROM_ISR == TRUE) GKI_isend_event (task_id, TIMER_2_EVT_MASK); #else GKI_send_event (task_id, TIMER_2_EVT_MASK); #endif gki_cb.com.OSTaskTmr2[task_id] = gki_cb.com.OSTaskTmr2R[task_id]; } } /* Check to see if this timer is the next one to expire */ if (gki_cb.com.OSTaskTmr2[task_id] > 0 && gki_cb.com.OSTaskTmr2[task_id] < next_expiration) next_expiration = gki_cb.com.OSTaskTmr2[task_id]; #endif #if (GKI_NUM_TIMERS > 3) /* If any timer is running, decrement */ if (gki_cb.com.OSTaskTmr3[task_id] > 0) { gki_cb.com.OSTaskTmr3[task_id] -= gki_cb.com.OSNumOrigTicks; if (gki_cb.com.OSTaskTmr3[task_id] <= 0) { /* Set Timer 3 Expired event mask and reload timer */ #if (defined(GKI_TIMER_UPDATES_FROM_ISR) && GKI_TIMER_UPDATES_FROM_ISR == TRUE) GKI_isend_event (task_id, TIMER_3_EVT_MASK); #else GKI_send_event (task_id, TIMER_3_EVT_MASK); #endif gki_cb.com.OSTaskTmr3[task_id] = gki_cb.com.OSTaskTmr3R[task_id]; } } /* Check to see if this timer is the next one to expire */ if (gki_cb.com.OSTaskTmr3[task_id] > 0 && gki_cb.com.OSTaskTmr3[task_id] < next_expiration) next_expiration = gki_cb.com.OSTaskTmr3[task_id]; #endif } /* Set the next timer experation value if there is one to start */ if (next_expiration < GKI_NO_NEW_TMRS_STARTED) { gki_cb.com.OSTicksTilExp = gki_cb.com.OSNumOrigTicks = next_expiration; } else { gki_cb.com.OSTicksTilExp = gki_cb.com.OSNumOrigTicks = 0; } gki_cb.com.timer_nesting = 0; GKI_enable(); return; }
void *GKI_getbuf (UINT16 size) #endif { UINT8 i; FREE_QUEUE_T *Q; BUFFER_HDR_T *p_hdr; tGKI_COM_CB *p_cb = &gki_cb.com; #if GKI_BUFFER_DEBUG UINT8 x; #endif if (size == 0) { GKI_exception (GKI_ERROR_BUF_SIZE_ZERO, "getbuf: Size is zero"); return (NULL); } #if GKI_BUFFER_DEBUG LOGD("GKI_getbuf() requesting %d func:%s(line=%d)", size, _function_, _line_); #endif /* Find the first buffer pool that is public that can hold the desired size */ for (i=0; i < p_cb->curr_total_no_of_pools; i++) { if ( size <= p_cb->freeq[p_cb->pool_list[i]].size ) break; } if(i == p_cb->curr_total_no_of_pools) { GKI_exception (GKI_ERROR_BUF_SIZE_TOOBIG, "getbuf: Size is too big"); return (NULL); } /* Make sure the buffers aren't disturbed til finished with allocation */ GKI_disable(); /* search the public buffer pools that are big enough to hold the size * until a free buffer is found */ for ( ; i < p_cb->curr_total_no_of_pools; i++) { /* Only look at PUBLIC buffer pools (bypass RESTRICTED pools) */ if (((UINT16)1 << p_cb->pool_list[i]) & p_cb->pool_access_mask) continue; Q = &p_cb->freeq[p_cb->pool_list[i]]; if(Q->cur_cnt < Q->total) { #ifdef GKI_USE_DEFERED_ALLOC_BUF_POOLS if(Q->p_first == 0 && gki_alloc_free_queue(i) != TRUE) { GKI_TRACE_ERROR_0("GKI_getbuf() out of buffer"); GKI_enable(); return NULL; } #endif if(Q->p_first == 0) { /* gki_alloc_free_queue() failed to alloc memory */ GKI_TRACE_ERROR_0("GKI_getbuf() fail alloc free queue"); GKI_enable(); return NULL; } p_hdr = Q->p_first; Q->p_first = p_hdr->p_next; if (!Q->p_first) Q->p_last = NULL; if(++Q->cur_cnt > Q->max_cnt) Q->max_cnt = Q->cur_cnt; GKI_enable(); p_hdr->task_id = GKI_get_taskid(); p_hdr->status = BUF_STATUS_UNLINKED; p_hdr->p_next = NULL; p_hdr->Type = 0; #if GKI_BUFFER_DEBUG LOGD("GKI_getbuf() allocated, %x, %x (%d of %d used) %d", (UINT8*)p_hdr + BUFFER_HDR_SIZE, p_hdr, Q->cur_cnt, Q->total, p_cb->freeq[i].total); strncpy(p_hdr->_function, _function_, _GKI_MAX_FUNCTION_NAME_LEN); p_hdr->_function[_GKI_MAX_FUNCTION_NAME_LEN] = '\0'; p_hdr->_line = _line_; #endif return ((void *) ((UINT8 *)p_hdr + BUFFER_HDR_SIZE)); } } GKI_TRACE_ERROR_0("GKI_getbuf() unable to allocate buffer!!!!!"); #if GKI_BUFFER_DEBUG LOGD("GKI_getbuf() unable to allocate buffer!!!!!"); LOGD("******************** GKI Memory Pool Dump ********************"); p_cb = &gki_cb.com; LOGD("Dumping total of %d buffer pools", p_cb->curr_total_no_of_pools); for (i=0 ; i < p_cb->curr_total_no_of_pools; i++) { p_hdr = (BUFFER_HDR_T *)p_cb->pool_start[i]; LOGD("pool %d has a total of %d buffers (start=%p)", i, p_cb->freeq[i].total, p_hdr); for (x=0; p_hdr && x < p_cb->freeq[i].total; x++) { if (p_hdr->status != BUF_STATUS_FREE) { LOGD("pool:%d, buf[%d]:%x, hdr:%x status=%d func:%s(line=%d)", i, x, (UINT8*)p_hdr + BUFFER_HDR_SIZE, p_hdr, p_hdr->status, p_hdr->_function, p_hdr->_line); } p_hdr = (BUFFER_HDR_T *)((UINT8 *)p_hdr + p_cb->pool_size[i]); } } LOGD("**************************************************************"); #endif GKI_TRACE_ERROR_0("Failed to allocate GKI buffer"); GKI_enable(); return (NULL); }
/******************************************************************************* ** ** Function GKI_start_timer ** ** Description An application can call this function to start one of ** it's four general purpose timers. Any of the four timers ** can be 1-shot or continuous. If a timer is already running, ** it will be reset to the new parameters. ** ** Parameters tnum - (input) timer number to be started (TIMER_0, ** TIMER_1, TIMER_2, or TIMER_3) ** ticks - (input) the number of system ticks til the ** timer expires. ** is_continuous - (input) TRUE if timer restarts automatically, ** else FALSE if it is a 'one-shot'. ** ** Returns void ** *******************************************************************************/ void GKI_start_timer (UINT8 tnum, INT32 ticks, BOOLEAN is_continuous) { INT32 reload; INT32 orig_ticks; UINT8 task_id = GKI_get_taskid(); BOOLEAN bad_timer = FALSE; if (ticks <= 0) ticks = 1; orig_ticks = ticks; /* save the ticks in case adjustment is necessary */ /* If continuous timer, set reload, else set it to 0 */ if (is_continuous) reload = ticks; else reload = 0; GKI_disable(); if(gki_timers_is_timer_running() == FALSE) { #if (defined(GKI_DELAY_STOP_SYS_TICK) && (GKI_DELAY_STOP_SYS_TICK > 0)) /* if inactivity delay timer is not running, start system tick */ if(gki_cb.com.OSTicksTilStop == 0) { #endif if(gki_cb.com.p_tick_cb) { /* start system tick */ gki_cb.com.system_tick_running = TRUE; (gki_cb.com.p_tick_cb) (TRUE); } #if (defined(GKI_DELAY_STOP_SYS_TICK) && (GKI_DELAY_STOP_SYS_TICK > 0)) } else { /* clear inactivity delay timer */ gki_cb.com.OSTicksTilStop = 0; } #endif } /* Add the time since the last task timer update. ** Note that this works when no timers are active since ** both OSNumOrigTicks and OSTicksTilExp are 0. */ if (GKI_MAX_INT32 - (gki_cb.com.OSNumOrigTicks - gki_cb.com.OSTicksTilExp) > ticks) { ticks += gki_cb.com.OSNumOrigTicks - gki_cb.com.OSTicksTilExp; } else ticks = GKI_MAX_INT32; switch (tnum) { #if (GKI_NUM_TIMERS > 0) case TIMER_0: gki_cb.com.OSTaskTmr0R[task_id] = reload; gki_cb.com.OSTaskTmr0 [task_id] = ticks; break; #endif #if (GKI_NUM_TIMERS > 1) case TIMER_1: gki_cb.com.OSTaskTmr1R[task_id] = reload; gki_cb.com.OSTaskTmr1 [task_id] = ticks; break; #endif #if (GKI_NUM_TIMERS > 2) case TIMER_2: gki_cb.com.OSTaskTmr2R[task_id] = reload; gki_cb.com.OSTaskTmr2 [task_id] = ticks; break; #endif #if (GKI_NUM_TIMERS > 3) case TIMER_3: gki_cb.com.OSTaskTmr3R[task_id] = reload; gki_cb.com.OSTaskTmr3 [task_id] = ticks; break; #endif default: bad_timer = TRUE; /* Timer number is bad, so do not use */ } /* Update the expiration timeout if a legitimate timer */ if (!bad_timer) { /* Only update the timeout value if it is less than any other newly started timers */ gki_adjust_timer_count (orig_ticks); } GKI_enable(); }
void *GKI_getpoolbuf (UINT8 pool_id) #endif { FREE_QUEUE_T *Q; BUFFER_HDR_T *p_hdr; tGKI_COM_CB *p_cb = &gki_cb.com; if (pool_id >= GKI_NUM_TOTAL_BUF_POOLS) return (NULL); #if GKI_BUFFER_DEBUG LOGD("GKI_getpoolbuf() requesting from %d func:%s(line=%d)", pool_id, _function_, _line_); #endif /* Make sure the buffers aren't disturbed til finished with allocation */ GKI_disable(); Q = &p_cb->freeq[pool_id]; if(Q->cur_cnt < Q->total) { #ifdef GKI_USE_DEFERED_ALLOC_BUF_POOLS if(Q->p_first == 0 && gki_alloc_free_queue(pool_id) != TRUE) return NULL; #endif if(Q->p_first == 0) { /* gki_alloc_free_queue() failed to alloc memory */ GKI_TRACE_ERROR_0("GKI_getpoolbuf() fail alloc free queue"); return NULL; } p_hdr = Q->p_first; Q->p_first = p_hdr->p_next; if (!Q->p_first) Q->p_last = NULL; if(++Q->cur_cnt > Q->max_cnt) Q->max_cnt = Q->cur_cnt; GKI_enable(); p_hdr->task_id = GKI_get_taskid(); p_hdr->status = BUF_STATUS_UNLINKED; p_hdr->p_next = NULL; p_hdr->Type = 0; #if GKI_BUFFER_DEBUG LOGD("GKI_getpoolbuf() allocated, %x, %x (%d of %d used) %d", (UINT8*)p_hdr + BUFFER_HDR_SIZE, p_hdr, Q->cur_cnt, Q->total, p_cb->freeq[pool_id].total); strncpy(p_hdr->_function, _function_, _GKI_MAX_FUNCTION_NAME_LEN); p_hdr->_function[_GKI_MAX_FUNCTION_NAME_LEN] = '\0'; p_hdr->_line = _line_; #endif return ((void *) ((UINT8 *)p_hdr + BUFFER_HDR_SIZE)); } /* If here, no buffers in the specified pool */ GKI_enable(); #if GKI_BUFFER_DEBUG /* try for free buffers in public pools */ return (GKI_getbuf_debug(p_cb->freeq[pool_id].size, _function_, _line_)); #else /* try for free buffers in public pools */ return (GKI_getbuf(p_cb->freeq[pool_id].size)); #endif }
/******************************************************************************* ** ** Function GKI_timer_update ** ** Description This function is called by an OS to drive the GKI's timers. ** It is typically called at every system tick to ** update the timers for all tasks, and check for timeouts. ** ** Note: It has been designed to also allow for variable tick updates ** so that systems with strict power savings requirements can ** have the update occur at variable intervals. ** ** Parameters: ticks_since_last_update - (input) This is the number of TICKS that have ** occurred since the last time GKI_timer_update was called. ** ** Returns void ** *******************************************************************************/ void GKI_timer_update (INT32 ticks_since_last_update) { UINT8 task_id; long next_expiration; /* Holds the next soonest expiration time after this update */ /* Increment the number of ticks used for time stamps */ gki_cb.com.OSTicks += ticks_since_last_update; /* If any timers are running in any tasks, decrement the remaining time til * the timer updates need to take place (next expiration occurs) */ gki_cb.com.OSTicksTilExp -= ticks_since_last_update; /* Don't allow timer interrupt nesting */ if (gki_cb.com.timer_nesting) return; gki_cb.com.timer_nesting = 1; /* No need to update the ticks if no timeout has occurred */ if (gki_cb.com.OSTicksTilExp > 0) { // When using alarms from AlarmService we should // always have work to be done here. ALOGE("%s no work to be done when expected work", __func__); gki_cb.com.timer_nesting = 0; return; } next_expiration = GKI_NO_NEW_TMRS_STARTED; /* If here then gki_cb.com.OSTicksTilExp <= 0. If negative, then increase gki_cb.com.OSNumOrigTicks to account for the difference so timer updates below are decremented by the full number of ticks. gki_cb.com.OSNumOrigTicks is reset at the bottom of this function so changing this value only affects the timer updates below */ gki_cb.com.OSNumOrigTicks -= gki_cb.com.OSTicksTilExp; /* Protect this section because if a GKI_timer_stop happens between: * - gki_cb.com.OSTaskTmr0[task_id] -= gki_cb.com.OSNumOrigTicks; * - gki_cb.com.OSTaskTmr0[task_id] = gki_cb.com.OSTaskTmr0R[task_id]; * then the timer may appear stopped while it is about to be reloaded. */ GKI_disable(); /* Check for OS Task Timers */ for (task_id = 0; task_id < GKI_MAX_TASKS; task_id++) { if (gki_cb.com.OSWaitTmr[task_id] > 0) /* If timer is running */ { gki_cb.com.OSWaitTmr[task_id] -= gki_cb.com.OSNumOrigTicks; if (gki_cb.com.OSWaitTmr[task_id] <= 0) { /* Timer Expired */ gki_cb.com.OSRdyTbl[task_id] = TASK_READY; } } #if (GKI_NUM_TIMERS > 0) /* If any timer is running, decrement */ if (gki_cb.com.OSTaskTmr0[task_id] > 0) { gki_cb.com.OSTaskTmr0[task_id] -= gki_cb.com.OSNumOrigTicks; if (gki_cb.com.OSTaskTmr0[task_id] <= 0) { /* Reload timer and set Timer 0 Expired event mask */ gki_cb.com.OSTaskTmr0[task_id] = gki_cb.com.OSTaskTmr0R[task_id]; GKI_send_event (task_id, TIMER_0_EVT_MASK); } } /* Check to see if this timer is the next one to expire */ if (gki_cb.com.OSTaskTmr0[task_id] > 0 && gki_cb.com.OSTaskTmr0[task_id] < next_expiration) next_expiration = gki_cb.com.OSTaskTmr0[task_id]; #endif #if (GKI_NUM_TIMERS > 1) /* If any timer is running, decrement */ if (gki_cb.com.OSTaskTmr1[task_id] > 0) { gki_cb.com.OSTaskTmr1[task_id] -= gki_cb.com.OSNumOrigTicks; if (gki_cb.com.OSTaskTmr1[task_id] <= 0) { /* Reload timer and set Timer 1 Expired event mask */ gki_cb.com.OSTaskTmr1[task_id] = gki_cb.com.OSTaskTmr1R[task_id]; GKI_send_event (task_id, TIMER_1_EVT_MASK); } } /* Check to see if this timer is the next one to expire */ if (gki_cb.com.OSTaskTmr1[task_id] > 0 && gki_cb.com.OSTaskTmr1[task_id] < next_expiration) next_expiration = gki_cb.com.OSTaskTmr1[task_id]; #endif #if (GKI_NUM_TIMERS > 2) /* If any timer is running, decrement */ if (gki_cb.com.OSTaskTmr2[task_id] > 0) { gki_cb.com.OSTaskTmr2[task_id] -= gki_cb.com.OSNumOrigTicks; if (gki_cb.com.OSTaskTmr2[task_id] <= 0) { /* Reload timer and set Timer 2 Expired event mask */ gki_cb.com.OSTaskTmr2[task_id] = gki_cb.com.OSTaskTmr2R[task_id]; GKI_send_event (task_id, TIMER_2_EVT_MASK); } } /* Check to see if this timer is the next one to expire */ if (gki_cb.com.OSTaskTmr2[task_id] > 0 && gki_cb.com.OSTaskTmr2[task_id] < next_expiration) next_expiration = gki_cb.com.OSTaskTmr2[task_id]; #endif #if (GKI_NUM_TIMERS > 3) /* If any timer is running, decrement */ if (gki_cb.com.OSTaskTmr3[task_id] > 0) { gki_cb.com.OSTaskTmr3[task_id] -= gki_cb.com.OSNumOrigTicks; if (gki_cb.com.OSTaskTmr3[task_id] <= 0) { /* Reload timer and set Timer 3 Expired event mask */ gki_cb.com.OSTaskTmr3[task_id] = gki_cb.com.OSTaskTmr3R[task_id]; GKI_send_event (task_id, TIMER_3_EVT_MASK); } } /* Check to see if this timer is the next one to expire */ if (gki_cb.com.OSTaskTmr3[task_id] > 0 && gki_cb.com.OSTaskTmr3[task_id] < next_expiration) next_expiration = gki_cb.com.OSTaskTmr3[task_id]; #endif } /* Set the next timer experation value if there is one to start */ if (next_expiration < GKI_NO_NEW_TMRS_STARTED) { gki_cb.com.OSTicksTilExp = gki_cb.com.OSNumOrigTicks = next_expiration; } else { gki_cb.com.OSTicksTilExp = gki_cb.com.OSNumOrigTicks = 0; } // Set alarm service for next alarm. alarm_service_reschedule(); GKI_enable(); gki_cb.com.timer_nesting = 0; return; }
/******************************************************************************* ** ** Function GKI_start_timer ** ** Description An application can call this function to start one of ** it's four general purpose timers. Any of the four timers ** can be 1-shot or continuous. If a timer is already running, ** it will be reset to the new parameters. ** ** Parameters tnum - (input) timer number to be started (TIMER_0, ** TIMER_1, TIMER_2, or TIMER_3) ** ticks - (input) the number of system ticks til the ** timer expires. ** is_continuous - (input) TRUE if timer restarts automatically, ** else FALSE if it is a 'one-shot'. ** ** Returns void ** *******************************************************************************/ void GKI_start_timer (UINT8 tnum, INT32 ticks, BOOLEAN is_continuous) { INT32 reload; INT32 orig_ticks; UINT8 task_id = GKI_get_taskid(); BOOLEAN bad_timer = FALSE; if (ticks <= 0) ticks = 1; orig_ticks = ticks; /* save the ticks in case adjustment is necessary */ /* If continuous timer, set reload, else set it to 0 */ if (is_continuous) reload = ticks; else reload = 0; GKI_disable(); /* Add the time since the last task timer update. ** Note that this works when no timers are active since ** both OSNumOrigTicks and OSTicksTilExp are 0. */ if (INT32_MAX - (gki_cb.com.OSNumOrigTicks - gki_cb.com.OSTicksTilExp) > ticks) { ticks += gki_cb.com.OSNumOrigTicks - gki_cb.com.OSTicksTilExp; } else ticks = INT32_MAX; switch (tnum) { #if (GKI_NUM_TIMERS > 0) case TIMER_0: gki_cb.com.OSTaskTmr0R[task_id] = reload; gki_cb.com.OSTaskTmr0 [task_id] = ticks; break; #endif #if (GKI_NUM_TIMERS > 1) case TIMER_1: gki_cb.com.OSTaskTmr1R[task_id] = reload; gki_cb.com.OSTaskTmr1 [task_id] = ticks; break; #endif #if (GKI_NUM_TIMERS > 2) case TIMER_2: gki_cb.com.OSTaskTmr2R[task_id] = reload; gki_cb.com.OSTaskTmr2 [task_id] = ticks; break; #endif #if (GKI_NUM_TIMERS > 3) case TIMER_3: gki_cb.com.OSTaskTmr3R[task_id] = reload; gki_cb.com.OSTaskTmr3 [task_id] = ticks; break; #endif default: bad_timer = TRUE; /* Timer number is bad, so do not use */ } /* Update the expiration timeout if a legitimate timer */ if (!bad_timer) { /* Only update the timeout value if it is less than any other newly started timers */ gki_adjust_timer_count (orig_ticks); } GKI_enable(); }
/******************************************************************************* ** ** Function bta_av_co_audio_setconfig ** ** Description This callout function is executed by AV to set the codec and ** content protection configuration of the audio stream. ** ** ** Returns void ** *******************************************************************************/ BTA_API void bta_av_co_audio_setconfig(tBTA_AV_HNDL hndl, tBTA_AV_CODEC codec_type, UINT8 *p_codec_info, UINT8 seid, BD_ADDR addr, UINT8 num_protect, UINT8 *p_protect_info) { tBTA_AV_CO_PEER *p_peer; UINT8 status = A2D_SUCCESS; UINT8 category = A2D_SUCCESS; BOOLEAN recfg_needed = FALSE; FUNC_TRACE(); APPL_TRACE_DEBUG6("bta_av_co_audio_setconfig p_codec_info[%x:%x:%x:%x:%x:%x]", p_codec_info[1], p_codec_info[2], p_codec_info[3], p_codec_info[4], p_codec_info[5], p_codec_info[6]); APPL_TRACE_DEBUG4("num_protect:0x%02x protect_info:0x%02x%02x%02x", num_protect, p_protect_info[0], p_protect_info[1], p_protect_info[2]); /* Retrieve the peer info */ p_peer = bta_av_co_get_peer(hndl); if (p_peer == NULL) { APPL_TRACE_ERROR0("bta_av_co_audio_setconfig could not find peer entry"); /* Call call-in rejecting the configuration */ bta_av_ci_setconfig(hndl, A2D_BUSY, AVDT_ASC_CODEC, 0, NULL, FALSE); return; } /* Sanity check: should not be opened at this point */ if (p_peer->opened) { APPL_TRACE_ERROR0("bta_av_co_audio_setconfig peer already in use"); } #if defined(BTA_AV_CO_CP_SCMS_T) && (BTA_AV_CO_CP_SCMS_T == TRUE) if (num_protect != 0) { /* If CP is supported */ if ((num_protect != 1) || (bta_av_co_cp_is_scmst(p_protect_info) == FALSE)) { APPL_TRACE_ERROR0("bta_av_co_audio_setconfig wrong CP configuration"); status = A2D_BAD_CP_TYPE; category = AVDT_ASC_PROTECT; } } #else /* Do not support content protection for the time being */ if (num_protect != 0) { APPL_TRACE_ERROR0("bta_av_co_audio_setconfig wrong CP configuration"); status = A2D_BAD_CP_TYPE; category = AVDT_ASC_PROTECT; } #endif if (status == A2D_SUCCESS) { /* Check if codec configuration is supported */ if (bta_av_co_audio_media_supports_config(codec_type, p_codec_info)) { /* Protect access to bta_av_co_cb.codec_cfg */ GKI_disable(); /* Check if the configuration matches the current codec config */ switch (bta_av_co_cb.codec_cfg.id) { case BTIF_AV_CODEC_SBC: if ((codec_type != BTA_AV_CODEC_SBC) || memcmp(p_codec_info, bta_av_co_cb.codec_cfg.info, 5)) { recfg_needed = TRUE; } else if ((num_protect == 1) && (!bta_av_co_cb.cp.active)) { recfg_needed = TRUE; } /* if remote side requests a restricted notify sinks preferred bitpool range as all other params are already checked for validify */ APPL_TRACE_EVENT2("remote peer setconfig bitpool range [%d:%d]", p_codec_info[BTA_AV_CO_SBC_MIN_BITPOOL_OFF], p_codec_info[BTA_AV_CO_SBC_MAX_BITPOOL_OFF] ); bta_av_co_cb.codec_cfg_setconfig.id = BTIF_AV_CODEC_SBC; memcpy(bta_av_co_cb.codec_cfg_setconfig.info, p_codec_info, AVDT_CODEC_SIZE); break; default: APPL_TRACE_ERROR1("bta_av_co_audio_setconfig unsupported cid %d", bta_av_co_cb.codec_cfg.id); recfg_needed = TRUE; break; } /* Protect access to bta_av_co_cb.codec_cfg */ GKI_enable(); } else { category = AVDT_ASC_CODEC; status = A2D_WRONG_CODEC; } } if (status != A2D_SUCCESS) { APPL_TRACE_DEBUG2("bta_av_co_audio_setconfig reject s=%d c=%d", status, category); /* Call call-in rejecting the configuration */ bta_av_ci_setconfig(hndl, status, category, 0, NULL, FALSE); } else { /* Mark that this is an acceptor peer */ p_peer->acp = TRUE; p_peer->recfg_needed = recfg_needed; APPL_TRACE_DEBUG1("bta_av_co_audio_setconfig accept reconf=%d", recfg_needed); /* Call call-in accepting the configuration */ bta_av_ci_setconfig(hndl, A2D_SUCCESS, A2D_SUCCESS, 0, NULL, recfg_needed); } }
/******************************************************************************* ** ** Function bta_av_co_audio_getconfig ** ** Description This callout function is executed by AV to retrieve the ** desired codec and content protection configuration for the ** audio stream. ** ** ** Returns Stream codec and content protection configuration info. ** *******************************************************************************/ BTA_API UINT8 bta_av_co_audio_getconfig(tBTA_AV_HNDL hndl, tBTA_AV_CODEC codec_type, UINT8 *p_codec_info, UINT8 *p_sep_info_idx, UINT8 seid, UINT8 *p_num_protect, UINT8 *p_protect_info) { UINT8 result = A2D_FAIL; BOOLEAN supported; tBTA_AV_CO_PEER *p_peer; tBTA_AV_CO_SINK *p_sink; UINT8 codec_cfg[AVDT_CODEC_SIZE]; UINT8 index; FUNC_TRACE(); APPL_TRACE_DEBUG3("bta_av_co_audio_getconfig handle:0x%x codec_type:%d seid:%d", hndl, codec_type, seid); APPL_TRACE_DEBUG4("num_protect:0x%02x protect_info:0x%02x%02x%02x", *p_num_protect, p_protect_info[0], p_protect_info[1], p_protect_info[2]); /* Retrieve the peer info */ p_peer = bta_av_co_get_peer(hndl); if (p_peer == NULL) { APPL_TRACE_ERROR0("bta_av_co_audio_getconfig could not find peer entry"); return A2D_FAIL; } APPL_TRACE_DEBUG4("bta_av_co_audio_getconfig peer(o=%d,n_snks=%d,n_rx_snks=%d,n_sup_snks=%d)", p_peer->opened, p_peer->num_snks, p_peer->num_rx_snks, p_peer->num_sup_snks); /* Increment the number of received sinks capabilities */ p_peer->num_rx_snks++; /* Check if this is a supported configuration */ supported = FALSE; switch (codec_type) { case BTA_AV_CODEC_SBC: supported = TRUE; break; default: break; } if (supported) { /* If there is room for a new one */ if (p_peer->num_sup_snks < BTA_AV_CO_NUM_ELEMENTS(p_peer->snks)) { p_sink = &p_peer->snks[p_peer->num_sup_snks++]; APPL_TRACE_DEBUG6("bta_av_co_audio_getconfig saved caps[%x:%x:%x:%x:%x:%x]", p_codec_info[1], p_codec_info[2], p_codec_info[3], p_codec_info[4], p_codec_info[5], p_codec_info[6]); memcpy(p_sink->codec_caps, p_codec_info, AVDT_CODEC_SIZE); p_sink->codec_type = codec_type; p_sink->sep_info_idx = *p_sep_info_idx; p_sink->seid = seid; p_sink->num_protect = *p_num_protect; memcpy(p_sink->protect_info, p_protect_info, BTA_AV_CP_INFO_LEN); } else { APPL_TRACE_ERROR0("bta_av_co_audio_getconfig no more room for SNK info"); } } /* If last SNK get capabilities or all supported codec capa retrieved */ if ((p_peer->num_rx_snks == p_peer->num_snks) || (p_peer->num_sup_snks == BTA_AV_CO_NUM_ELEMENTS(p_peer->snks))) { APPL_TRACE_DEBUG0("bta_av_co_audio_getconfig last sink reached"); /* Protect access to bta_av_co_cb.codec_cfg */ GKI_disable(); /* Find a sink that matches the codec config */ if (bta_av_co_audio_peer_supports_codec(p_peer, &index)) { /* stop fetching caps once we retrieved a supported codec */ if (p_peer->acp) { *p_sep_info_idx = p_peer->num_seps; APPL_TRACE_EVENT0("no need to fetch more SEPs"); } p_sink = &p_peer->snks[index]; /* Build the codec configuration for this sink */ if (bta_av_co_audio_codec_build_config(p_sink->codec_caps, codec_cfg)) { APPL_TRACE_DEBUG6("bta_av_co_audio_getconfig reconfig p_codec_info[%x:%x:%x:%x:%x:%x]", codec_cfg[1], codec_cfg[2], codec_cfg[3], codec_cfg[4], codec_cfg[5], codec_cfg[6]); /* Save the new configuration */ p_peer->p_snk = p_sink; memcpy(p_peer->codec_cfg, codec_cfg, AVDT_CODEC_SIZE); /* By default, no content protection */ *p_num_protect = 0; #if defined(BTA_AV_CO_CP_SCMS_T) && (BTA_AV_CO_CP_SCMS_T == TRUE) /* Check if this sink supports SCMS */ if (bta_av_co_audio_sink_has_scmst(p_sink)) { p_peer->cp_active = TRUE; bta_av_co_cb.cp.active = TRUE; *p_num_protect = BTA_AV_CP_INFO_LEN; memcpy(p_protect_info, bta_av_co_cp_scmst, BTA_AV_CP_INFO_LEN); } else { p_peer->cp_active = FALSE; bta_av_co_cb.cp.active = FALSE; } #endif /* If acceptor -> reconfig otherwise reply for configuration */ if (p_peer->acp) { if (p_peer->recfg_needed) { APPL_TRACE_DEBUG1("bta_av_co_audio_getconfig call BTA_AvReconfig(x%x)", hndl); BTA_AvReconfig(hndl, TRUE, p_sink->sep_info_idx, p_peer->codec_cfg, *p_num_protect, (UINT8 *)bta_av_co_cp_scmst); } } else { *p_sep_info_idx = p_sink->sep_info_idx; memcpy(p_codec_info, p_peer->codec_cfg, AVDT_CODEC_SIZE); } result = A2D_SUCCESS; } } /* Protect access to bta_av_co_cb.codec_cfg */ GKI_enable(); } return result; }
/******************************************************************************* ** ** Function GKI_getbuf ** ** Description Called by an application to get a free buffer which ** is of size greater or equal to the requested size. ** ** Note: This routine only takes buffers from public pools. ** It will not use any buffers from pools ** marked GKI_RESTRICTED_POOL. ** ** Parameters size - (input) number of bytes needed. ** ** Returns A pointer to the buffer, or NULL if none available ** *******************************************************************************/ void *GKI_getbuf (UINT16 size) { UINT8 i; FREE_QUEUE_T *Q; BUFFER_HDR_T *p_hdr; tGKI_COM_CB *p_cb = &gki_cb.com; if (size == 0) { GKI_exception (GKI_ERROR_BUF_SIZE_ZERO, "getbuf: Size is zero"); return (NULL); } /* Find the first buffer pool that is public that can hold the desired size */ for (i=0; i < p_cb->curr_total_no_of_pools; i++) { if ( size <= p_cb->freeq[p_cb->pool_list[i]].size ) break; } if(i == p_cb->curr_total_no_of_pools) { GKI_exception (GKI_ERROR_BUF_SIZE_TOOBIG, "getbuf: Size is too big"); return (NULL); } /* Make sure the buffers aren't disturbed til finished with allocation */ GKI_disable(); /* search the public buffer pools that are big enough to hold the size * until a free buffer is found */ for ( ; i < p_cb->curr_total_no_of_pools; i++) { /* Only look at PUBLIC buffer pools (bypass RESTRICTED pools) */ if (((UINT16)1 << p_cb->pool_list[i]) & p_cb->pool_access_mask) continue; Q = &p_cb->freeq[p_cb->pool_list[i]]; if(Q->cur_cnt < Q->total) { // btla-specific ++ #ifdef GKI_USE_DEFERED_ALLOC_BUF_POOLS if(Q->p_first == 0 && gki_alloc_free_queue(i) != TRUE) return NULL; #endif // btla-specific -- p_hdr = Q->p_first; Q->p_first = p_hdr->p_next; if (!Q->p_first) Q->p_last = NULL; if(++Q->cur_cnt > Q->max_cnt) Q->max_cnt = Q->cur_cnt; GKI_enable(); p_hdr->task_id = GKI_get_taskid(); p_hdr->status = BUF_STATUS_UNLINKED; p_hdr->p_next = NULL; p_hdr->Type = 0; return ((void *) ((UINT8 *)p_hdr + BUFFER_HDR_SIZE)); } } GKI_enable(); GKI_exception (GKI_ERROR_OUT_OF_BUFFERS, "getbuf: out of buffers"); return (NULL); }