/******************************************************************************* ** ** Function gatt_init ** ** Description This function is enable the GATT profile on the device. ** It clears out the control blocks, and registers with L2CAP. ** ** Returns void ** *******************************************************************************/ void gatt_init (void) { tL2CAP_FIXED_CHNL_REG fixed_reg; GATT_TRACE_DEBUG("gatt_init()"); memset (&gatt_cb, 0, sizeof(tGATT_CB)); memset (&fixed_reg, 0, sizeof(tL2CAP_FIXED_CHNL_REG)); #if defined(GATT_INITIAL_TRACE_LEVEL) gatt_cb.trace_level = GATT_INITIAL_TRACE_LEVEL; #else gatt_cb.trace_level = BT_TRACE_LEVEL_NONE; /* No traces */ #endif gatt_cb.def_mtu_size = GATT_DEF_BLE_MTU_SIZE; GKI_init_q (&gatt_cb.sign_op_queue); GKI_init_q (&gatt_cb.srv_chg_clt_q); GKI_init_q (&gatt_cb.pending_new_srv_start_q); /* First, register fixed L2CAP channel for ATT over BLE */ fixed_reg.fixed_chnl_opts.mode = L2CAP_FCR_BASIC_MODE; fixed_reg.fixed_chnl_opts.max_transmit = 0xFF; fixed_reg.fixed_chnl_opts.rtrans_tout = 2000; fixed_reg.fixed_chnl_opts.mon_tout = 12000; fixed_reg.fixed_chnl_opts.mps = 670; fixed_reg.fixed_chnl_opts.tx_win_sz = 1; fixed_reg.pL2CA_FixedConn_Cb = gatt_le_connect_cback; fixed_reg.pL2CA_FixedData_Cb = gatt_le_data_ind; fixed_reg.pL2CA_FixedCong_Cb = gatt_le_cong_cback; /* congestion callback */ fixed_reg.default_idle_tout = 0xffff; /* 0xffff default idle timeout */ L2CA_RegisterFixedChannel (L2CAP_ATT_CID, &fixed_reg); /* Now, register with L2CAP for ATT PSM over BR/EDR */ if (!L2CA_Register (BT_PSM_ATT, (tL2CAP_APPL_INFO *) &dyn_info)) { GATT_TRACE_ERROR ("ATT Dynamic Registration failed"); } BTM_SetSecurityLevel(TRUE, "", BTM_SEC_SERVICE_ATT, BTM_SEC_NONE, BT_PSM_ATT, 0, 0); BTM_SetSecurityLevel(FALSE, "", BTM_SEC_SERVICE_ATT, BTM_SEC_NONE, BT_PSM_ATT, 0, 0); gatt_cb.hdl_cfg.gatt_start_hdl = GATT_GATT_START_HANDLE; gatt_cb.hdl_cfg.gap_start_hdl = GATT_GAP_START_HANDLE; gatt_cb.hdl_cfg.app_start_hdl = GATT_APP_START_HANDLE; gatt_profile_db_init(); }
/******************************************************************************* ** ** Function bta_pan_conn_state_cback ** ** Description Connection state callback from Pan profile ** ** ** Returns void ** *******************************************************************************/ static void bta_pan_conn_state_cback(UINT16 handle, BD_ADDR bd_addr, tPAN_RESULT state, BOOLEAN is_role_change, UINT8 src_role, UINT8 dst_role) { tBTA_PAN_CONN * p_buf; tBTA_PAN_SCB *p_scb; if ((p_buf = (tBTA_PAN_CONN *) GKI_getbuf(sizeof(tBTA_PAN_CONN))) != NULL) { if((state == PAN_SUCCESS) && !is_role_change) { p_buf->hdr.event = BTA_PAN_CONN_OPEN_EVT; if((p_scb = bta_pan_scb_by_handle(handle)) == NULL) { /* allocate an scb */ p_scb = bta_pan_scb_alloc(); } /* we have exceeded maximum number of connections */ if(!p_scb) { PAN_Disconnect (handle); return; } p_scb->handle = handle; p_scb->local_role = src_role; p_scb->peer_role = dst_role; p_scb->pan_flow_enable = TRUE; bdcpy(p_scb->bd_addr, bd_addr); GKI_init_q(&p_scb->data_queue); if(src_role == PAN_ROLE_CLIENT) p_scb->app_id = bta_pan_cb.app_id[0]; else if (src_role == PAN_ROLE_GN_SERVER) p_scb->app_id = bta_pan_cb.app_id[1]; else if (src_role == PAN_ROLE_NAP_SERVER) p_scb->app_id = bta_pan_cb.app_id[2]; } else if((state != PAN_SUCCESS) && !is_role_change) { p_buf->hdr.event = BTA_PAN_CONN_CLOSE_EVT; } else { return; } p_buf->result = state; p_buf->hdr.layer_specific = handle; bta_sys_sendmsg(p_buf); } }
/******************************************************************************* ** ** Function AVDT_WriteReqOpt ** ** Description Send a media packet to the peer device. The stream must ** be started before this function is called. Also, this ** function can only be called if the stream is a SRC. ** ** When AVDTP has sent the media packet and is ready for the ** next packet, an AVDT_WRITE_CFM_EVT is sent to the ** application via the control callback. The application must ** wait for the AVDT_WRITE_CFM_EVT before it makes the next ** call to AVDT_WriteReq(). If the applications calls ** AVDT_WriteReq() before it receives the event the packet ** will not be sent. The application may make its first call ** to AVDT_WriteReq() after it receives an AVDT_START_CFM_EVT ** or AVDT_START_IND_EVT. ** ** The application passes the packet using the BT_HDR structure. ** This structure is described in section 2.1. The offset ** field must be equal to or greater than AVDT_MEDIA_OFFSET ** (if NO_RTP is specified, L2CAP_MIN_OFFSET can be used). ** This allows enough space in the buffer for the L2CAP and ** AVDTP headers. ** ** The memory pointed to by p_pkt must be a GKI buffer ** allocated by the application. This buffer will be freed ** by the protocol stack; the application must not free ** this buffer. ** ** The opt parameter allows passing specific options like: ** - NO_RTP : do not add the RTP header to buffer ** ** Returns AVDT_SUCCESS if successful, otherwise error. ** *******************************************************************************/ UINT16 AVDT_WriteReqOpt(UINT8 handle, BT_HDR *p_pkt, UINT32 time_stamp, UINT8 m_pt, tAVDT_DATA_OPT_MASK opt) { tAVDT_SCB *p_scb; tAVDT_SCB_EVT evt; UINT16 result = AVDT_SUCCESS; BTTRC_AVDT_API0(AVDT_TRACE_API_WRITE_REQ); /* map handle to scb */ if ((p_scb = avdt_scb_by_hdl(handle)) == NULL) { result = AVDT_BAD_HANDLE; } else { evt.apiwrite.p_buf = p_pkt; evt.apiwrite.time_stamp = time_stamp; evt.apiwrite.m_pt = m_pt; evt.apiwrite.opt = opt; #if AVDT_MULTIPLEXING == TRUE GKI_init_q (&evt.apiwrite.frag_q); #endif avdt_scb_event(p_scb, AVDT_SCB_API_WRITE_REQ_EVT, &evt); } return result; }
/******************************************************************************* ** ** Function AVDT_WriteDataReq ** ** Description Send a media packet to the peer device. The stream must ** be started before this function is called. Also, this ** function can only be called if the stream is a SRC. ** ** When AVDTP has sent the media packet and is ready for the ** next packet, an AVDT_WRITE_CFM_EVT is sent to the ** application via the control callback. The application must ** wait for the AVDT_WRITE_CFM_EVT before it makes the next ** call to AVDT_WriteDataReq(). If the applications calls ** AVDT_WriteDataReq() before it receives the event the packet ** will not be sent. The application may make its first call ** to AVDT_WriteDataReq() after it receives an ** AVDT_START_CFM_EVT or AVDT_START_IND_EVT. ** ** Returns AVDT_SUCCESS if successful, otherwise error. ** *******************************************************************************/ extern UINT16 AVDT_WriteDataReq(UINT8 handle, UINT8 *p_data, UINT32 data_len, UINT32 time_stamp, UINT8 m_pt, UINT8 marker) { tAVDT_SCB *p_scb; tAVDT_SCB_EVT evt; UINT16 result = AVDT_SUCCESS; do { /* check length of media frame */ if (data_len > AVDT_MAX_MEDIA_SIZE) { result = AVDT_BAD_PARAMS; break; } /* map handle to scb */ if ((p_scb = avdt_scb_by_hdl(handle)) == NULL) { result = AVDT_BAD_HANDLE; break; } AVDT_TRACE_WARNING("mux_tsid_media:%d\n", p_scb->curr_cfg.mux_tsid_media); if (p_scb->p_pkt != NULL || p_scb->p_ccb == NULL || !GKI_queue_is_empty(&p_scb->frag_q) || p_scb->frag_off != 0 || p_scb->curr_cfg.mux_tsid_media == 0) { result = AVDT_ERR_BAD_STATE; AVDT_TRACE_WARNING("p_scb->p_pkt=%p, p_scb->p_ccb=%p, IsQueueEmpty=%x, p_scb->frag_off=%x\n", p_scb->p_pkt, p_scb->p_ccb, GKI_queue_is_empty(&p_scb->frag_q), p_scb->frag_off); break; } evt.apiwrite.p_buf = 0; /* it will indicate using of fragments queue frag_q */ /* create queue of media fragments */ GKI_init_q (&evt.apiwrite.frag_q); /* compose fragments from media payload and put fragments into gueue */ avdt_scb_queue_frags(p_scb, &p_data, &data_len, &evt.apiwrite.frag_q); if (GKI_queue_is_empty(&evt.apiwrite.frag_q)) { AVDT_TRACE_WARNING("AVDT_WriteDataReq out of GKI buffers"); result = AVDT_ERR_RESOURCE; break; } evt.apiwrite.data_len = data_len; evt.apiwrite.p_data = p_data; /* process the fragments queue */ evt.apiwrite.time_stamp = time_stamp; evt.apiwrite.m_pt = m_pt | (marker << 7); avdt_scb_event(p_scb, AVDT_SCB_API_WRITE_REQ_EVT, &evt); } while (0); #if (BT_USE_TRACES == TRUE) if (result != AVDT_SUCCESS) { AVDT_TRACE_WARNING("*** AVDT_WriteDataReq failed result=%d\n", result); } #endif return result; }
/******************************************************************************* ** ** Function avdt_scb_alloc ** ** Description Allocate a stream control block. ** ** ** Returns pointer to the scb, or NULL if none could be allocated. ** *******************************************************************************/ tAVDT_SCB *avdt_scb_alloc(tAVDT_CS *p_cs) { tAVDT_SCB *p_scb = &avdt_cb.scb[0]; int i; /* find available scb */ for (i = 0; i < AVDT_NUM_SEPS; i++, p_scb++) { if (!p_scb->allocated) { memset(p_scb,0,sizeof(tAVDT_SCB)); p_scb->allocated = TRUE; p_scb->p_ccb = NULL; /* initialize sink as activated */ if (p_cs->tsep == AVDT_TSEP_SNK) { p_scb->sink_activated = TRUE; } memcpy(&p_scb->cs, p_cs, sizeof(tAVDT_CS)); #if AVDT_MULTIPLEXING == TRUE /* initialize fragments gueue */ GKI_init_q(&p_scb->frag_q); if(p_cs->cfg.psc_mask & AVDT_PSC_MUX) { p_scb->cs.cfg.mux_tcid_media = avdt_ad_type_to_tcid(AVDT_CHAN_MEDIA, p_scb); #if AVDT_REPORTING == TRUE if(p_cs->cfg.psc_mask & AVDT_PSC_REPORT) { p_scb->cs.cfg.mux_tcid_report = avdt_ad_type_to_tcid(AVDT_CHAN_REPORT, p_scb); } #endif } #endif p_scb->timer_entry.param = (UINT32) p_scb; AVDT_TRACE_DEBUG("avdt_scb_alloc hdl=%d, psc_mask:0x%x", i+1, p_cs->cfg.psc_mask); break; } } if (i == AVDT_NUM_SEPS) { /* out of ccbs */ p_scb = NULL; AVDT_TRACE_WARNING("Out of scbs"); } return p_scb; }
static void init_rfc_slots() { int i; memset(rfc_slots, 0, sizeof(rfc_slot_t)*MAX_RFC_CHANNEL); for(i = 0; i < MAX_RFC_CHANNEL; i++) { rfc_slots[i].scn = -1; rfc_slots[i].sdp_handle = 0; rfc_slots[i].fd = rfc_slots[i].app_fd = -1; GKI_init_q(&rfc_slots[i].incoming_que); } BTA_JvEnable(jv_dm_cback); init_slot_lock(&slot_lock); }
static void init_l2c_slots() { int i; memset(l2c_slots, 0, sizeof(l2c_slot_t)*MAX_L2C_SOCK_CHANNEL); for(i = 0; i < MAX_L2C_SOCK_CHANNEL; i++) { l2c_slots[i].psm = -1; l2c_slots[i].f.client = FALSE; l2c_slots[i].put_size_set = FALSE; l2c_slots[i].sdp_handle = 0; l2c_slots[i].l2c_handle = INVALID_L2C_HANDLE; l2c_slots[i].fd = l2c_slots[i].app_fd = -1; l2c_slots[i].id = BASE_L2C_SLOT_ID + i; GKI_init_q(&l2c_slots[i].incoming_que); } BTA_JvRegisterL2cCback(jv_dm_cback); init_slot_lock(&slot_lock); }
/******************************************************************************* ** ** Function bta_pan_conn_close ** ** Description process connection close event ** ** ** ** Returns void ** *******************************************************************************/ void bta_pan_conn_close(tBTA_PAN_SCB *p_scb, tBTA_PAN_DATA *p_data) { tBTA_PAN_CLOSE data; BT_HDR *p_buf; data.handle = p_data->hdr.layer_specific; bta_sys_conn_close( BTA_ID_PAN ,p_scb->app_id, p_scb->bd_addr); /* free all queued up data buffers */ while((p_buf = (BT_HDR *)GKI_dequeue(&p_scb->data_queue)) != NULL) GKI_freebuf(p_buf); GKI_init_q(&p_scb->data_queue); bta_pan_co_close(p_scb->handle, p_scb->app_id); bta_pan_scb_dealloc(p_scb); bta_pan_cb.p_cback(BTA_PAN_CLOSE_EVT, (tBTA_PAN *)&data); }