void CVarDataFile::MarkSlotDeleted(UInt32 inSlotPos, RecIDT inRecID) { RecIDT recID = 0; // use 0 as deleted slot marker #if DB_DEBUG_MODE || DB_INTEGRITY_CHECKING ASSERT(mBytesUsed <= mAllocatedBytes); // can't use more than we've allocated ASSERT((mAllocatedBytes+mFirstItemPos) == GetLength()); // LFileStream needs to be in synch if (GetLength() < (SInt32)inSlotPos) { DB_LOG("ERROR: Delete Rec " << inRecID << " pos: "<<inSlotPos <<" FAILED, overran datafile length "<<GetLength()<<"B"); DB_DEBUG("ERROR: Trying to delete-mark slot at offset "<<inSlotPos<<", but datafile is only "<<GetLength()<<" bytes long.", DEBUG_ERROR); Throw_( dbIndexCorrupt ); } SetMarker(inSlotPos + kVarDBFileRecIDOffset, streamFrom_Start); // debugging, check old ID ReadBlock(&recID, kSizeOfRecID); #if PLATFORM_LITTLE_ENDIAN recID = BigEndian32_ToNative(recID); #endif // PLATFORM_LITTLE_ENDIAN DB_DEBUG("MarkSlotDeleted(); deleted_id: " << recID, DEBUG_TRIVIA); if (recID != inRecID) { DB_LOG("ERROR: Delete Rec " << inRecID << " pos: "<<inSlotPos << " FAILED, slot has Record "<<recID); DB_DEBUG("ERROR: Tried to delete Record ID " << inRecID << " but record " <<recID <<" found in slot at "<<inSlotPos, DEBUG_ERROR); Throw_( dbIndexCorrupt ); } if (recID == 0) { DB_LOG("WARNING: Delete Rec " << inRecID << " pos: "<<inSlotPos << " previously deleted"); DB_DEBUG("NON-CRITICAL ERROR: Deleting a slot with a zero (deleted) ID", DEBUG_ERROR); } recID = 0; #endif // writing zero, don't worry about endian swap SetMarker(inSlotPos + kVarDBFileRecIDOffset, streamFrom_Start); // move to position of recID in slot WriteBlock(&recID, kSizeOfRecID); // write the deletion maker DB_LOG("Deleted Rec " << inRecID << " pos: "<<inSlotPos); DB_DEBUG("MarkSlotDeleted(); pos: " << inSlotPos, DEBUG_TRIVIA); }
/**@brief Function for handling primary service discovery response. * * @details This function will handle the primary service discovery response and start the * discovery of characteristics within that service. * * @param[in] p_db_discovery Pointer to the DB Discovery structure. * @param[in] p_ble_gattc_evt Pointer to the GATT Client event. */ static void on_primary_srv_discovery_rsp(ble_db_discovery_t * const p_db_discovery, const ble_gattc_evt_t * const p_ble_gattc_evt) { ble_gatt_db_srv_t * p_srv_being_discovered; p_srv_being_discovered = &(p_db_discovery->services[p_db_discovery->curr_srv_ind]); if (p_ble_gattc_evt->conn_handle != p_db_discovery->conn_handle) { return; } if (p_ble_gattc_evt->gatt_status == BLE_GATT_STATUS_SUCCESS) { uint32_t err_code; const ble_gattc_evt_prim_srvc_disc_rsp_t * p_prim_srvc_disc_rsp_evt; DB_LOG("Found service UUID 0x%x\r\n", p_srv_being_discovered->srv_uuid.uuid); p_prim_srvc_disc_rsp_evt = &(p_ble_gattc_evt->params.prim_srvc_disc_rsp); p_srv_being_discovered->srv_uuid = p_prim_srvc_disc_rsp_evt->services[0].uuid; p_srv_being_discovered->handle_range = p_prim_srvc_disc_rsp_evt->services[0].handle_range; err_code = characteristics_discover(p_db_discovery, p_ble_gattc_evt->conn_handle); if (err_code != NRF_SUCCESS) { p_db_discovery->discovery_in_progress = false; // Error with discovering the service. // Indicate the error to the registered user application. discovery_error_evt_trigger(p_db_discovery, err_code, p_ble_gattc_evt->conn_handle); m_pending_user_evts[0].evt.evt_type = BLE_DB_DISCOVERY_AVAILABLE; m_pending_user_evts[0].evt.conn_handle = p_ble_gattc_evt->conn_handle; //m_evt_handler(&m_pending_user_evts[0].evt); } } else { DB_LOG("Service UUID 0x%x Not found\r\n", p_srv_being_discovered->srv_uuid.uuid); // Trigger Service Not Found event to the application. discovery_complete_evt_trigger(p_db_discovery, false, p_ble_gattc_evt->conn_handle); on_srv_disc_completion(p_db_discovery, p_ble_gattc_evt->conn_handle); } }
void CVarDataFile::UpdateRecord(DatabaseRec *inRecP) { SInt32 recSize = inRecP->recSize; SInt32 recPos = itsMasterIndex->UpdateEntry(inRecP->recID, recSize); // find index entry inRecP->recPos = recPos; #if DB_DEBUG_MODE || DB_INTEGRITY_CHECKING if (inRecP->recID > mLastRecID) { DB_DEBUG("ERROR: Invalid Record ID "<<inRecP->recID<<" updated. Last ID is "<<mLastRecID, DEBUG_ERROR); Throw_( dbInvalidID ); } if (recSize < (SInt32)sizeof(DatabaseRec)) { DB_DEBUG("ERROR: Trying to update Rec "<<inRecP->recID<<" with size of "<<recSize<<" bytes, smaller than the record header alone.", DEBUG_ERROR); Throw_( dbDataCorrupt ); } ASSERT(mBytesUsed <= mAllocatedBytes); // can't use more than we've allocated ASSERT((mAllocatedBytes+mFirstItemPos) == GetLength()); // LFileStream needs to be in synch if (GetLength() < recPos) { DB_DEBUG("ERROR: Index returned offset "<<recPos<<" for Rec "<<inRecP->recID<<", but datafile is only "<<GetLength()<<" bytes long.", DEBUG_ERROR); mFileIsDamaged = true; Throw_( dbIndexCorrupt ); } // DB_DEBUG("in UpdateRecord("<< inRecP->recID <<"); size: "<<recSize<<" pos: "<<recPos, DEBUG_TRIVIA); RecIDT oldID; SInt32 slotSize; SetMarker(recPos + kVarDBFileSlotSizeOffset, streamFrom_Start); // debugging, check old ID ReadBlock(&slotSize, kSizeOfSlotSize); ReadBlock(&oldID, kSizeOfRecID); #if PLATFORM_LITTLE_ENDIAN slotSize = BigEndian32_ToNative(slotSize); oldID = BigEndian32_ToNative(oldID); #endif // PLATFORM_LITTLE_ENDIAN if ( (oldID != 0) && (oldID != inRecP->recID) ) { DB_LOG("ERROR: Update Rec " << inRecP->recID << " " << recSize << "B pos: "<<recPos<<" FAILED, overwriting Rec "<<oldID); DB_DEBUG("ERROR: Updating "<< inRecP->recID << " into wrong place [" << recPos << "] , overwriting Rec "<<oldID, DEBUG_ERROR); mFileIsDamaged = true; Throw_( dbIndexCorrupt ); } if (slotSize < RecSizeToSlotSize(recSize)) { DB_LOG("ERROR: Update Rec " << inRecP->recID << " " << recSize << "B pos: "<<recPos<<" FAILED, slot too small "<<slotSize<<"B"); DB_DEBUG("ERROR: Writing "<< inRecP->recID <<" of "<<RecSizeToSlotSize(recSize)<<" bytes into a "<<slotSize<<" byte slot at "<< recPos, DEBUG_ERROR); mFileIsDamaged = true; Throw_( dbDataCorrupt ); } #endif SetMarker(recPos + kVarDBFileRecIDOffset, streamFrom_Start); // move to start of slot's data, skipping size inRecP->recID = Native_ToBigEndian32(inRecP->recID); WriteBlock(&inRecP->recID, RecSizeToIOSize(recSize)); // write the new record data into the slot inRecP->recID = BigEndian32_ToNative(inRecP->recID); DB_LOG("Updated Rec " << inRecP->recID << " " << recSize << "B pos: "<<recPos); DB_DEBUG("UpdateRecord("<< inRecP->recID <<"); size: "<<recSize<<" pos: "<<recPos, DEBUG_TRIVIA); }
static int async_db_get_hook(aroop_txt_t*bin, aroop_txt_t*output) { /** * The database is available only in the master process */ aroop_assert(is_master()); aroop_txt_t key = {}; // the key to set int srcpid = 0; int cb_token = 0; aroop_txt_t cb_hook = {}; // 0 = srcpid, 1 = command, 2 = token, 3 = cb_hook, 4 = key, 5 = val binary_unpack_int(bin, 0, &srcpid); binary_unpack_int(bin, 2, &cb_token); binary_unpack_string(bin, 3, &cb_hook); // needs cleanup binary_unpack_string(bin, 4, &key); // needs cleanup DB_LOG(LOG_NOTICE, "[token%d]-get-doing ..--[dest:%d]-[key:%s]-[app:%s]", cb_token, srcpid, aroop_txt_to_string(&key), aroop_txt_to_string(&cb_hook)); //syslog(LOG_NOTICE, "[pid:%d]-getting:%s", getpid(), aroop_txt_to_string(&key)); aroop_txt_t*oldval = NULL; do { oldval = (aroop_txt_t*)opp_hash_table_get_no_ref(&global_db, &key); // no cleanup needed #if 0 if(destpid <= 0) { break; } #endif //syslog(LOG_NOTICE, "[pid:%d]-got:%s", getpid(), aroop_txt_to_string(oldval)); async_db_op_reply(srcpid, cb_token, &cb_hook, 1, &key, oldval); } while(0); // cleanup aroop_txt_destroy(&cb_hook); aroop_txt_destroy(&key); return 0; }
static int async_db_CAS_hook(aroop_txt_t*bin, aroop_txt_t*output) { /** * The database is available only in the master process */ aroop_assert(is_master()); aroop_txt_t key = {}; // the key to set aroop_txt_t expval = {}; // the val to compare with the oldval aroop_txt_t newval = {}; // the val to set int srcpid = 0; int cb_token = 0; aroop_txt_t cb_hook = {}; // 0 = srcpid, 1 = command, 2 = token, 3 = cb_hook, 4 = key, 5 = newval, 6 = oldval binary_unpack_int(bin, 0, &srcpid); binary_unpack_int(bin, 2, &cb_token); binary_unpack_string(bin, 3, &cb_hook); // needs cleanup binary_unpack_string(bin, 4, &key); // needs cleanup binary_unpack_string(bin, 5, &newval); // needs cleanup binary_unpack_string(bin, 6, &expval); // needs cleanup DB_LOG(LOG_NOTICE, "[token%d]-CAS-doing ..--[dest:%d]-[key:%s]-[app:%s]", cb_token, srcpid, aroop_txt_to_string(&key), aroop_txt_to_string(&cb_hook)); int success = 0; success = !async_db_op_helper(&key, &newval, &expval); #if 0 if(destpid > 0) { #endif async_db_op_reply(srcpid, cb_token, &cb_hook, success, &key, &newval); #if 0 } #endif // cleanup aroop_txt_destroy(&key); aroop_txt_destroy(&newval); aroop_txt_destroy(&expval); aroop_txt_destroy(&cb_hook); return 0; }
/**@brief Function for handling service discovery completion. * * @details This function will be used to determine if there are more services to be discovered, * and if so, initiate the discovery of the next service. * * @param[in] p_db_discovery Pointer to the DB Discovery Structure. * @param[in] conn_handle Connection Handle. */ static void on_srv_disc_completion(ble_db_discovery_t * p_db_discovery, uint16_t const conn_handle) { p_db_discovery->discoveries_count++; // Check if more services need to be discovered. if (p_db_discovery->discoveries_count < m_num_of_handlers_reg) { // Reset the current characteristic index since a new service discovery is about to start. p_db_discovery->curr_char_ind = 0; // Initiate discovery of the next service. p_db_discovery->curr_srv_ind++; ble_gatt_db_srv_t * p_srv_being_discovered; p_srv_being_discovered = &(p_db_discovery->services[p_db_discovery->curr_srv_ind]); p_srv_being_discovered->srv_uuid = m_registered_handlers[p_db_discovery->curr_srv_ind]; // Reset the characteristic count in the current service to zero since a new service // discovery is about to start. p_srv_being_discovered->char_count = 0; DB_LOG("[DB]: Starting discovery of service with UUID 0x%x for Connection handle %d\r\n", p_srv_being_discovered->srv_uuid.uuid, conn_handle); uint32_t err_code; err_code = sd_ble_gattc_primary_services_discover ( conn_handle, SRV_DISC_START_HANDLE, &(p_srv_being_discovered->srv_uuid) ); if (err_code != NRF_SUCCESS) { p_db_discovery->discovery_in_progress = false; // Error with discovering the service. // Indicate the error to the registered user application. discovery_error_evt_trigger(p_db_discovery, err_code, conn_handle); m_pending_user_evts[0].evt.evt_type = BLE_DB_DISCOVERY_AVAILABLE; m_pending_user_evts[0].evt.conn_handle = conn_handle; // m_evt_handler(&m_pending_user_evts[0].evt); return; } } else { // No more service discovery is needed. p_db_discovery->discovery_in_progress = false; m_pending_user_evts[0].evt.evt_type = BLE_DB_DISCOVERY_AVAILABLE; m_pending_user_evts[0].evt.conn_handle = conn_handle; //m_evt_handler(&m_pending_user_evts[0].evt); } }
void ADataStore::Close() { // ** NOT THREAD SAFE ** if (mFileOpen) { WriteHeader(kFileIsClosed); } mFileOpen = false; DB_LOG("Closed Database"); }
RecIDT //** Thread Safe ** ADataStore::GetNewRecordID(DatabaseRec *) { //inRecP #if DB_THREAD_SUPPORT StSafeMutex mutex(mChangeID); #endif mLastRecID++; DB_LOG("GetNewRecordID(): " << mLastRecID); return mLastRecID; }
void CVarDataFile::Close() { CDataFile::Close(); DB_LOG("DataFile Header: lastRecID ["<<mLastRecID<<"] allocatedBytes ["<<mAllocatedBytes <<"] bytesUsed ["<<mBytesUsed << "] largestRecSize ["<<mLargestRecSize <<"] itemSize ["<<mItemSize<<"] itemCount ["<<mItemCount << "] allocatedSlots ["<<mAllocatedSlots<<"] firstItemPos ["<<mFirstItemPos << "] numValidRecs ["<<mNumValidRecs<<"] allocBlockSize ["<<mAllocBlockSize <<(const char*)(mFileIsDamaged ? "] DAMAGED" : "]")); }
static int async_db_op_reply(int destpid, int cb_token, aroop_txt_t*cb_hook, int success, aroop_txt_t*key, aroop_txt_t*newval) { // send response // 0 = pid, 1 = src pid, 2 = command, 3 = token, 4 = cb_hook, 5 = success aroop_txt_t*args[3] = {key, newval, NULL}; DB_LOG(LOG_NOTICE, "[token%d]-replying-throwing to--[dest:%d]-[key:%s]-[app:%s]", cb_token, destpid, aroop_txt_to_string(key), aroop_txt_to_string(cb_hook)); if(aroop_txt_is_empty(cb_hook)) { return async_pm_reply_worker(destpid, cb_token, &null_hook, success, args); } return async_pm_reply_worker(destpid, cb_token, cb_hook, success, args); }
bool CVarDataFile::Open() { bool result = CDataFile::Open(); DB_LOG("DataFile Header: lastRecID ["<<mLastRecID<<"] allocatedBytes ["<<mAllocatedBytes <<"] bytesUsed ["<<mBytesUsed << "] largestRecSize ["<<mLargestRecSize <<"] itemSize ["<<mItemSize<<"] itemCount ["<<mItemCount << "] allocatedSlots ["<<mAllocatedSlots<<"] firstItemPos ["<<mFirstItemPos << "] numValidRecs ["<<mNumValidRecs<<"] allocBlockSize ["<<mAllocBlockSize <<(const char*)(mFileIsDamaged ? "] DAMAGED" : "]")); CheckDatabaseIntegrity(kRepairProblems); return result; }
uint32_t ble_db_discovery_start(ble_db_discovery_t * const p_db_discovery, uint16_t conn_handle) { if (p_db_discovery == NULL) { return NRF_ERROR_NULL; } if (!m_initialized) { return NRF_ERROR_INVALID_STATE; } if (m_num_of_handlers_reg == 0) { // No user modules were registered. There are no services to discover. return NRF_ERROR_INVALID_STATE; } if (p_db_discovery->discovery_in_progress) { return NRF_ERROR_BUSY; } ble_db_discovery_srv_t * p_srv_being_discovered; m_num_of_discoveries_made = 0; m_pending_usr_evt_index = 0; p_db_discovery->curr_srv_ind = 0; p_db_discovery->conn_handle = conn_handle; p_srv_being_discovered = &(p_db_discovery->services[p_db_discovery->curr_srv_ind]); p_srv_being_discovered->srv_uuid = m_registered_handlers[p_db_discovery->curr_srv_ind].srv_uuid; DB_LOG("[DB]: Starting discovery of service with UUID 0x%x for Connection handle %d\r\n", p_srv_being_discovered->srv_uuid.uuid, p_db_discovery->conn_handle); uint32_t err_code; err_code = sd_ble_gattc_primary_services_discover(p_db_discovery->conn_handle, SRV_DISC_START_HANDLE, &(p_srv_being_discovered->srv_uuid)); if (err_code != NRF_SUCCESS) { return err_code; } p_db_discovery->discovery_in_progress = true; return NRF_SUCCESS; }
bool ADataStore::Open() { // ** NOT THREAD SAFE ** bool result; if (!mFileOpen && HeaderExists()) { result = ReadHeader(); } else { result = false; } WriteHeader(kFileIsOpen); mFileOpen = true; DB_LOG("Opened Database"); return result; }
UInt32 CVarDataFile::AddNewEmptySlot(SInt32 inSize) { if (inSize > mLargestRecSize) { DB_DEBUG("New largest rec is "<<inSize<<" bytes. Previously "<<mLargestRecSize, DEBUG_TRIVIA); mLargestRecSize = inSize; // keep track of largest record } mItemCount++; mAllocatedSlots = mItemCount + 1L; // update #slots UInt32 oldBytesUsed = mBytesUsed; mBytesUsed += inSize; if (mBytesUsed >= mAllocatedBytes) { if (!mBatchMode) { mAllocatedBytes = mBytesUsed + kAllocationBlockSize; } else { mAllocatedBytes = mBytesUsed + kAllocationBlockSize*8L; // big blocks for batches } ASSERT(mAllocatedBytes > mBytesUsed); ASSERT(mAllocatedBytes >= (SInt32)(mItemCount*RecSizeToSlotSize(sizeof(DatabaseRec)))); // must have at least a header UInt32 fileSize = mFirstItemPos + mAllocatedBytes; // fill the newly allocated stuff with FFFF UInt32 eraseStart = mFirstItemPos + oldBytesUsed; ASSERT(eraseStart < fileSize); UInt32 eraseLen = fileSize - eraseStart; void* p = std::malloc(eraseLen); SetLength(fileSize); // expand file if (p) { std::memset(p, 0xff, eraseLen); SetMarker(eraseStart, streamFrom_Start); WriteBlock(p, eraseLen); // fill the unused part of the data file } else { DB_DEBUG("Failed to malloc "<<eraseLen<<" bytes for new space erasure in data file", DEBUG_ERROR); } if (!mBatchMode) { WriteHeader(); // write new # slots, etc.. in disk file header } } UInt32 recPos = oldBytesUsed + mFirstItemPos; SetMarker(recPos + kVarDBFileSlotSizeOffset, streamFrom_Start); // write the slot size in the slot inSize = Native_ToBigEndian32(inSize); WriteBlock(&inSize, kSizeOfSlotSize); inSize = BigEndian32_ToNative(inSize); // restore endianness RecIDT tempID = 0; // debugging, make sure the ID is cleared from new slots WriteBlock(&tempID, sizeof(RecIDT)); DB_LOG("Add New Slot size: " << inSize << "B pos: "<<recPos <<" slots: " << mAllocatedSlots); DB_DEBUG("AddNewEmptySlot(); size: "<<inSize<<" pos: "<<recPos<<" slots: "<<mAllocatedSlots, DEBUG_TRIVIA); return recPos; }
uint32_t ble_db_discovery_start(ble_db_discovery_t * const p_db_discovery, uint16_t conn_handle) { VERIFY_PARAM_NOT_NULL(p_db_discovery); VERIFY_MODULE_INITIALIZED(); if (m_num_of_handlers_reg == 0) { // No user modules were registered. There are no services to discover. return NRF_ERROR_INVALID_STATE; } if (p_db_discovery->discovery_in_progress) { return NRF_ERROR_BUSY; } p_db_discovery->conn_handle = conn_handle; ble_gatt_db_srv_t * p_srv_being_discovered; m_pending_usr_evt_index = 0; p_db_discovery->discoveries_count = 0; p_db_discovery->curr_srv_ind = 0; p_srv_being_discovered = &(p_db_discovery->services[p_db_discovery->curr_srv_ind]); p_srv_being_discovered->srv_uuid = m_registered_handlers[p_db_discovery->curr_srv_ind]; DB_LOG("[DB]: Starting discovery of service with UUID 0x%x for Connection handle %d\r\n", p_srv_being_discovered->srv_uuid.uuid, conn_handle); uint32_t err_code; err_code = sd_ble_gattc_primary_services_discover(conn_handle, SRV_DISC_START_HANDLE, &(p_srv_being_discovered->srv_uuid)); VERIFY_SUCCESS(err_code); p_db_discovery->discovery_in_progress = true; return NRF_SUCCESS; }
static int async_db_op_helper(aroop_txt_t*key, aroop_txt_t*newval, aroop_txt_t*expval) { if(aroop_txt_is_empty(key)) { // we cannot process the request .. return -1; } if(newval == NULL) { opp_hash_table_set(&global_db, key, NULL); // unset return 0; } aroop_txt_t*oldval = (aroop_txt_t*)opp_hash_table_get_no_ref(&global_db, key); // no cleanup needed if(!((oldval == NULL && expval == NULL) || (oldval != NULL && expval != NULL && aroop_txt_equals(expval, oldval)))) { return -1; } aroop_txt_t*xnval = aroop_txt_new_copy_deep(newval, NULL); aroop_txt_t*xkey = aroop_txt_new_copy_deep(key, NULL); DB_LOG(LOG_NOTICE, "--op----[key:%s]", aroop_txt_to_string(key)); opp_hash_table_set(&global_db, xkey, xnval); OPPUNREF(xnval); OPPUNREF(xkey); return 0; }
uint32_t ble_db_discovery_start(ble_db_discovery_t * const p_db_discovery, uint16_t conn_handle) { if (p_db_discovery == NULL) { return NRF_ERROR_NULL; } if (!m_initialized) { return NRF_ERROR_INVALID_STATE; } p_db_discovery->srv_being_discovered.srv_uuid = m_srv_uuid; p_db_discovery->conn_handle = conn_handle; DB_LOG("[DB]: Starting service discovery\r\n"); return sd_ble_gattc_primary_services_discover(p_db_discovery->conn_handle, SRV_DISC_START_HANDLE, &m_srv_uuid); }
static uint32_t descriptors_discover(ble_db_discovery_t * const p_db_discovery) { ble_gattc_handle_range_t handle_range; ble_db_discovery_char_t * p_curr_char_being_discovered; bool is_discovery_reqd = false; p_curr_char_being_discovered = &p_db_discovery->srv_being_discovered.charateristics[p_db_discovery->curr_char_ind]; if (p_db_discovery->curr_char_ind + 1 == p_db_discovery->srv_being_discovered.char_count) { is_discovery_reqd = is_desc_discover_reqd(p_db_discovery, p_curr_char_being_discovered, NULL, &handle_range); } else { uint8_t i; ble_db_discovery_char_t * p_next_char; for (i = p_db_discovery->curr_char_ind; i < p_db_discovery->srv_being_discovered.char_count; i++) { if (i == (p_db_discovery->srv_being_discovered.char_count - 1)) { // The current characteristic is the last characteristic in the service. p_next_char = NULL; } else { p_next_char = &(p_db_discovery->srv_being_discovered.charateristics[i + 1]); } // Check if there is a possibility of a descriptor existing for the current // characteristic. if (is_desc_discover_reqd(p_db_discovery, p_curr_char_being_discovered, p_next_char, &handle_range)) { is_discovery_reqd = true; break; } else { // No descriptors can exist. p_curr_char_being_discovered = p_next_char; p_db_discovery->curr_char_ind++; } } } if (!is_discovery_reqd) { // No more descriptor discovery required. // Discovery is complete. // Send a discovery complete event to the user application. DB_LOG("[DB]: DB Discovery complete \r\n"); discovery_complete_evt_trigger(p_db_discovery); return NRF_SUCCESS; } return sd_ble_gattc_descriptors_discover(p_db_discovery->conn_handle, &handle_range); }
/**@brief Function for handling descriptor discovery response. * * @param[in] p_db_discovery Pointer to the DB Discovery structure. * @param[in] p_ble_gattc_evt Pointer to the GATT Client event. */ static void on_descriptor_discovery_rsp(ble_db_discovery_t * const p_db_discovery, const ble_gattc_evt_t * const p_ble_gattc_evt) { const ble_gattc_evt_desc_disc_rsp_t * p_desc_disc_rsp_evt; ble_gatt_db_srv_t * p_srv_being_discovered; if (p_ble_gattc_evt->conn_handle != p_db_discovery->conn_handle) { return; } p_srv_being_discovered = &(p_db_discovery->services[p_db_discovery->curr_srv_ind]); p_desc_disc_rsp_evt = &(p_ble_gattc_evt->params.desc_disc_rsp); ble_gatt_db_char_t * p_char_being_discovered = &(p_srv_being_discovered->charateristics[p_db_discovery->curr_char_ind]); if (p_ble_gattc_evt->gatt_status == BLE_GATT_STATUS_SUCCESS) { // The descriptor was found at the peer. // If the descriptor was a CCCD, then the cccd_handle needs to be populated. uint32_t i; // Loop through all the descriptors to find the CCCD. for (i = 0; i < p_desc_disc_rsp_evt->count; i++) { if ( p_desc_disc_rsp_evt->descs[i].uuid.uuid == BLE_UUID_DESCRIPTOR_CLIENT_CHAR_CONFIG ) { p_char_being_discovered->cccd_handle = p_desc_disc_rsp_evt->descs[i].handle; break; } } } bool raise_discov_complete = false; if ((p_db_discovery->curr_char_ind + 1) == p_srv_being_discovered->char_count) { // No more characteristics and descriptors need to be discovered. Discovery is complete. // Send a discovery complete event to the user application. raise_discov_complete = true; } else { // Begin discovery of descriptors for the next characteristic. uint32_t err_code; p_db_discovery->curr_char_ind++; err_code = descriptors_discover(p_db_discovery, &raise_discov_complete, p_ble_gattc_evt->conn_handle); if (err_code != NRF_SUCCESS) { p_db_discovery->discovery_in_progress = false; // Error with discovering the service. // Indicate the error to the registered user application. discovery_error_evt_trigger(p_db_discovery, err_code, p_ble_gattc_evt->conn_handle); m_pending_user_evts[0].evt.evt_type = BLE_DB_DISCOVERY_AVAILABLE; m_pending_user_evts[0].evt.conn_handle = p_ble_gattc_evt->conn_handle; return; } } if (raise_discov_complete) { DB_LOG("[DB]: Discovery of service with UUID 0x%x completed with success for Connection" "handle %d\r\n", p_srv_being_discovered->srv_uuid.uuid, p_ble_gattc_evt->conn_handle); discovery_complete_evt_trigger(p_db_discovery, true, p_ble_gattc_evt->conn_handle); on_srv_disc_completion(p_db_discovery, p_ble_gattc_evt->conn_handle); } }
/**@brief Function for handling characteristic discovery response. * * @param[in] p_db_discovery Pointer to the DB Discovery structure. * @param[in] p_ble_gattc_evt Pointer to the GATT Client event. */ static void on_characteristic_discovery_rsp(ble_db_discovery_t * const p_db_discovery, const ble_gattc_evt_t * const p_ble_gattc_evt) { uint32_t err_code; ble_gatt_db_srv_t * p_srv_being_discovered; bool perform_desc_discov = false; if (p_ble_gattc_evt->conn_handle != p_db_discovery->conn_handle) { return; } p_srv_being_discovered = &(p_db_discovery->services[p_db_discovery->curr_srv_ind]); if (p_ble_gattc_evt->gatt_status == BLE_GATT_STATUS_SUCCESS) { const ble_gattc_evt_char_disc_rsp_t * p_char_disc_rsp_evt; p_char_disc_rsp_evt = &(p_ble_gattc_evt->params.char_disc_rsp); // Find out the number of characteristics that were previously discovered (in earlier // characteristic discovery responses, if any). uint8_t num_chars_prev_disc = p_srv_being_discovered->char_count; // Find out the number of characteristics that are currently discovered (in the // characteristic discovery response being handled). uint8_t num_chars_curr_disc = p_char_disc_rsp_evt->count; // Check if the total number of discovered characteristics are supported by this module. if ((num_chars_prev_disc + num_chars_curr_disc) <= BLE_GATT_DB_MAX_CHARS) { // Update the characteristics count. p_srv_being_discovered->char_count += num_chars_curr_disc; } else { // The number of characteristics discovered at the peer is more than the supported // maximum. This module will store only the characteristics found up to this point. p_srv_being_discovered->char_count = BLE_GATT_DB_MAX_CHARS; } uint32_t i; uint32_t j; for (i = num_chars_prev_disc, j = 0; i < p_srv_being_discovered->char_count; i++, j++) { p_srv_being_discovered->charateristics[i].characteristic = p_char_disc_rsp_evt->chars[j]; p_srv_being_discovered->charateristics[i].cccd_handle = BLE_GATT_HANDLE_INVALID; } ble_gattc_char_t * p_last_known_char; p_last_known_char = &(p_srv_being_discovered->charateristics[i - 1].characteristic); // If no more characteristic discovery is required, or if the maximum number of supported // characteristic per service has been reached, descriptor discovery will be performed. if ( !is_char_discovery_reqd(p_db_discovery, p_last_known_char) || (p_srv_being_discovered->char_count == BLE_GATT_DB_MAX_CHARS) ) { perform_desc_discov = true; } else { // Update the current characteristic index. p_db_discovery->curr_char_ind = p_srv_being_discovered->char_count; // Perform another round of characteristic discovery. err_code = characteristics_discover(p_db_discovery, p_ble_gattc_evt->conn_handle); if (err_code != NRF_SUCCESS) { p_db_discovery->discovery_in_progress = false; discovery_error_evt_trigger(p_db_discovery, err_code, p_ble_gattc_evt->conn_handle); m_pending_user_evts[0].evt.evt_type = BLE_DB_DISCOVERY_AVAILABLE; m_pending_user_evts[0].evt.conn_handle = p_ble_gattc_evt->conn_handle; //m_evt_handler(&m_pending_user_evts[0].evt); return; } } } else { // The previous characteristic discovery resulted in no characteristics. // descriptor discovery should be performed. perform_desc_discov = true; } if (perform_desc_discov) { bool raise_discov_complete; p_db_discovery->curr_char_ind = 0; err_code = descriptors_discover(p_db_discovery, &raise_discov_complete, p_ble_gattc_evt->conn_handle); if (err_code != NRF_SUCCESS) { p_db_discovery->discovery_in_progress = false; discovery_error_evt_trigger(p_db_discovery, err_code, p_ble_gattc_evt->conn_handle); m_pending_user_evts[0].evt.evt_type = BLE_DB_DISCOVERY_AVAILABLE; m_pending_user_evts[0].evt.conn_handle = p_ble_gattc_evt->conn_handle; //m_evt_handler(&m_pending_user_evts[0].evt); return; } if (raise_discov_complete) { // No more characteristics and descriptors need to be discovered. Discovery is complete. // Send a discovery complete event to the user application. DB_LOG("[DB]: Discovery of service with UUID 0x%x completed with success for Connection" " handle %d\r\n", p_srv_being_discovered->srv_uuid.uuid, p_ble_gattc_evt->conn_handle); discovery_complete_evt_trigger(p_db_discovery, true, p_ble_gattc_evt->conn_handle); on_srv_disc_completion(p_db_discovery, p_ble_gattc_evt->conn_handle); } } }
// return true if checks pass (or repairs successful) bool CVarDataFile::CheckDatabaseIntegrity(bool inRepairProblems) { // #warning never repairing problems in CheckDatabaseIntegrity // inRepairProblems = false; // we don't know that this works yet bool bResult = true; bool bRepairsWereAttempted = false; #warning FIXME: extensive checks completely disabled until user feedback provided for this long process bool bExtensiveChecks = false; //mFileIsDamaged; // always do extensive checks if we've detected problems try { if (IsReadOnly() && inRepairProblems) { inRepairProblems = false; // we don't know that this works yet DB_LOG("Integrity Check Warning: Database is read only"); } // load all items that are in the master index and validate them against the db contents long fileLen = GetLength(); if ((mAllocatedBytes + mFirstItemPos) != fileLen) { DB_LOG("INTEGRITY CHECK ERROR: Header thought file was "<<mAllocatedBytes<<" bytes long, but file is really " <<fileLen <<" bytes long"); if (inRepairProblems) { bRepairsWereAttempted = true; mAllocatedBytes = fileLen - mFirstItemPos; WriteHeader(kFileIsOpen); DB_LOG("Integrity Check Repair: Told header that file is " << fileLen << " bytes long"); } } long bytesUsed = mBytesUsed; if ( (bytesUsed < 0) || (bytesUsed > fileLen)) { bytesUsed = fileLen; } long largestSize = mLargestRecSize; if ( (largestSize < 0) || (largestSize > bytesUsed) ) { largestSize = bytesUsed; } // load all items that are in the master index and validate them against the db contents // we track an actualLargestSize as well as the largestSize, since the later refects a maximum // value that is physically possible, whereas the former tracks what we've found as we go through // the records long actualLargestRecSize = 0; long actualLargestSlotSize = 0; long numRecords = itsMasterIndex->GetEntryCount(); long lastIndex = numRecords; SInt32 slotSize = 0; RecIDT recID = kInvalidRecord; SInt32 recPos = 0; IndexEntryT entry; for (int i = lastIndex; i>=1; --i) { bool bBadIndexEntry = false; bool bBadSlotSize = false; slotSize = 0; try { itsMasterIndex->FetchEntryAt(i, entry); // can throw dbItemNotFound or read errors DB_DEBUG("Checking index entry "<<i<<": Record ID "<<entry.recID<<" size "<<entry.recSize <<" dbpos "<<entry.recPos, DEBUG_TRIVIA); SInt32 nextRecPos = 0; if (bExtensiveChecks) { // look at what the index thinks the next slot is if ( !itsMasterIndex->FindFirstSlotFromDatabasePos(entry.recPos+1L, nextRecPos) ) { // this is the last record, make sure we update bytesUsed as necessary // this should only happen once, might want to check that if (bytesUsed < (entry.recPos + entry.recSize)) { bytesUsed = entry.recPos + entry.recSize; if (bytesUsed > fileLen) { bytesUsed = fileLen; } } nextRecPos = bytesUsed; } } else { // since we aren't doing extensive checking, we don't actually know exactly where // the next record starts, but we know it isn't allowed to be closer than recSize // bytes away nextRecPos = entry.recPos + entry.recSize; } // extensive checks recID = kInvalidRecord; SetMarker(entry.recPos + kVarDBFileSlotSizeOffset, streamFrom_Start); ReadBlock(&slotSize, kSizeOfSlotSize); ReadBlock(&recID, kSizeOfRecID); #if PLATFORM_LITTLE_ENDIAN slotSize = BigEndian32_ToNative(slotSize); recID = BigEndian32_ToNative(recID); #endif // PLATFORM_LITTLE_ENDIAN if (recID != entry.recID) { DB_LOG("INTEGRITY CHECK ERROR: Bad Entry at index slot "<<i<<" dbpos "<<entry.recPos <<"; slot contains record id "<<recID<<" ("<<slotSize<<"B) but index expected record id " <<entry.recID<<" ("<<entry.recSize<<"B)"); bBadIndexEntry = true; } SInt32 nextRecOffset = nextRecPos - entry.recPos; if ( (slotSize < RecSizeToSlotSize(entry.recSize)) || ((entry.recPos + slotSize) > fileLen) ) { if (!bExtensiveChecks) { // we aren't doing extensive checks, so we don't know where the index believes the // next record needs to be. However, this information is crucial for a proper repair // so we will figure it out now if ( !itsMasterIndex->FindFirstSlotFromDatabasePos(entry.recPos+1L, nextRecPos) ) { // this is the last record, make sure we update bytesUsed as necessary // this should only happen once, might want to check that if (bytesUsed < (entry.recPos + entry.recSize)) { bytesUsed = entry.recPos + entry.recSize; if (bytesUsed > fileLen) { bytesUsed = fileLen; } } nextRecPos = bytesUsed; } } DB_LOG("INTEGRITY CHECK ERROR: Damaged database file, affects index entry "<<i<<" dbpos " <<entry.recPos<<"; slot for record id "<<recID<<" ("<<entry.recSize<<"B) is an impossible " <<slotSize<<" bytes, but it should be "<<nextRecOffset); bBadSlotSize = true; slotSize = nextRecOffset; } else if (bExtensiveChecks) { // these checks rely on the nextRecOffset being correct, which it might not be if // we aren't doing extensive checks. So we skip them. if (slotSize > nextRecOffset) { DB_LOG("INTEGRITY CHECK ERROR: Bad Entry at index slot "<<i<<" dbpos "<<entry.recPos <<"; index thinks record id "<<recID<<" ("<<entry.recSize <<"B) is at that position, co-located with next record at dbpos "<<nextRecPos <<" leaving only "<<nextRecOffset<<" bytes in this slot"); bBadIndexEntry = true; bBadSlotSize = true; slotSize = nextRecOffset; } else if (slotSize < nextRecOffset) { DB_LOG("INTEGRITY CHECK ERROR: Minor damage to database file, affects index entry "<<i<<" dbpos " <<entry.recPos<<"; slot for record id "<<recID<<" ("<<entry.recSize<<"B) should be " <<nextRecOffset<<" bytes, not " << slotSize); bBadSlotSize = true; slotSize = nextRecOffset; } } // update the largest record size if this was a good index entry if ( !bBadIndexEntry && (actualLargestRecSize < entry.recSize) ) { actualLargestRecSize = entry.recSize; } // calculate the largest slot size if ( actualLargestSlotSize < slotSize ) { actualLargestSlotSize = slotSize; } } // end Try catch (...) { DB_LOG("INTEGRITY CHECK ERROR: Exception reading entry at index slot "<<i<<" dbpos "<<entry.recPos); bBadIndexEntry = true; } // end Catch if (bBadIndexEntry) { bResult = false; if (inRepairProblems) { bRepairsWereAttempted = true; itsMasterIndex->DeleteEntry(entry.recID); --numRecords; DB_LOG("Integrity Check Repair: Deleted index entry for record " << entry.recID); // we don't mark the slot deleted because this could be part of an entirely // different record that is valid. Overwriting a zero into four bytes of it // would be damaging, so we don't screw with it /* // slot size is okay, so this is RecIDT deletedRecID = Native_ToBigEndian32(kInvalidRecord); SetMarker(entry.recPos + kVarDBFileRecIDOffset, streamFrom_Start); WriteBlock(&deletedRecID, kSizeOfRecID); DB_LOG("Integrity Check Repair: Deleted index entry and slot for record " << entry.recID); */ } } else if (bBadSlotSize) { // in this case, only the slot size was wrong, but the record pointed to the right // place, so we will try to update that. There is no guarantee that the data is good though, // most likely this was stomped by something else, but at least this seems to have a valid // record id tag bResult = false; if (inRepairProblems) { bRepairsWereAttempted = true; SetMarker(entry.recPos + kVarDBFileSlotSizeOffset, streamFrom_Start); SInt32 outSize = Native_ToBigEndian32(slotSize); WriteBlock(&outSize, kSizeOfSlotSize); DB_LOG("Integrity Check Repair: Updated slot size for record " << entry.recID << " at dbpos "<<entry.recPos<<" to " << slotSize << " bytes"); } } } // only do this if extensive checks have been done, since we are relying on all the slot sizes being // correct and the stuff above may have set some of them incorrectly. if (bExtensiveChecks) { // now that we know that the index is good, go through the database file and check // each item to see if it has a corresponding index entry. SetMarker(mFirstItemPos + kVarDBFileSlotSizeOffset, streamFrom_Start); // move to start of the 1st record while (true) { // look at all the records in the datafile slotSize = 0; recID = kInvalidRecord; recPos = GetMarker(); ReadBlock(&slotSize, kSizeOfSlotSize); // read the slot size into the pointer ReadBlock(&recID, kSizeOfRecID); // read the record id #if PLATFORM_LITTLE_ENDIAN slotSize = BigEndian32_ToNative(slotSize); recID = BigEndian32_ToNative(recID); #endif // PLATFORM_LITTLE_ENDIAN // basic checks: the slot size and recID reasonable? if ( (slotSize < 0) || (slotSize < (SInt32)RecSizeToSlotSize(sizeof(DatabaseRec))) || ((recID != kInvalidRecord) && (slotSize > (actualLargestRecSize + kSlotSizeSlop))) ) { DB_LOG("INTEGRITY CHECK ERROR: Slot at dbpos "<<recPos<<" is impossible ("<<slotSize<<"B)"); bResult = false; // scan index for the next record that comes right after this one. SInt32 nextRecPos = 0; itsMasterIndex->FindFirstSlotFromDatabasePos(recPos + kVarDBFileRecDataOffset, nextRecPos); slotSize = nextRecPos - recPos; if (inRepairProblems) { bRepairsWereAttempted = true; SetMarker(recPos + kVarDBFileSlotSizeOffset, streamFrom_Start); SInt32 outSize = Native_ToBigEndian32(slotSize); WriteBlock(&outSize, kSizeOfSlotSize); DB_LOG("Integrity Check Repair: Set slot size at dbpos "<<recPos<<" to "<<slotSize<<" bytes"); } } if ( (recID < 0) || (recID > mLastRecID) ) { DB_LOG("INTEGRITY CHECK ERROR: Slot at dbpos "<<recPos<<" has invalid record id ("<<recID<<")"); bResult = false; recID = kInvalidRecord; // this should be in the delete list, so we will check it in the next conditional if (inRepairProblems) { bRepairsWereAttempted = true; SetMarker(recPos + kVarDBFileRecIDOffset, streamFrom_Start); WriteBlock(&recID, kSizeOfRecID); // writing zero, don't worry about endian swap DB_LOG("Integrity Check Repair: Marked slot at dbpos "<<recPos<<" as deleted"); } } if (recID == 0) { if (!itsMasterIndex->CheckDeletedSlot(recPos, slotSize)) { DB_LOG("INTEGRITY CHECK ERROR: Slot at dbpos "<<recPos << " ("<<slotSize <<"B) is marked deleted but is not in delete list"); bResult = false; if (inRepairProblems) { bRepairsWereAttempted = true; // add this slot to the delete list CDeleteList* deleteList = itsMasterIndex->GetDeleteList(); deleteList->SlotWasDeleted(recPos, slotSize); DB_LOG("Integrity Check Repair: Added slot at dbpos "<<recPos<<" to delete list"); } } } else { try { itsMasterIndex->FetchEntry(recID, entry); if (entry.recPos != recPos) { DB_LOG("INTEGRITY CHECK ERROR: Slot at dbpos "<<recPos<<" for record id "<<recID <<" ("<<slotSize<<"B) has an index entry that says it should be at dbpos " <<entry.recPos); bResult = false; // Not sure how this could ever happen given the index validation // we already did. Perhaps via a duplicate entry in database file #warning TODO: repair dbpos wrong in index problem in CheckDatabaseIntegrity() } } catch (LException& e) { if (e.GetErrorCode() == dbItemNotFound) { DB_LOG("INTEGRITY CHECK ERROR: Slot at dbpos "<<recPos<<" for record id "<<recID <<" ("<<slotSize<<"B) has no index entry"); bResult = false; if (inRepairProblems) { bRepairsWereAttempted = true; recID = kInvalidRecord; // if it's not in the index, delete it, // we don't have any other choice because the index // insists we add things in order SetMarker(recPos + kVarDBFileRecIDOffset, streamFrom_Start); WriteBlock(&recID, kSizeOfRecID); // writing zero, don't worry about endian swap DB_LOG("Integrity Check Repair: Marked record "<< entry.recID <<" in slot at dbpos "<<recPos<<" as deleted"); } // end repair problems } // end (e.GetErrorCode() == dbItemNotFound) } // end catch } // end (recID != 0) if ( (recPos + slotSize) >= bytesUsed) { break; // break out of the while loop } SetMarker(recPos + slotSize, streamFrom_Start); // move to start of the next record } // end while loop #warning TODO: Perhaps rebuild delete list from scratch? } // end bExtensiveChecks // fix the record count in the header if ((long)mNumValidRecs != numRecords) { DB_LOG("INTEGRITY CHECK ERROR: Header has incorrect record count ("<<mNumValidRecs<<"); correct count is " <<numRecords); if (inRepairProblems) { bRepairsWereAttempted = true; mNumValidRecs = numRecords; WriteHeader(kFileIsOpen); DB_LOG("Integrity Check Repair: Set header record count to "<<mNumValidRecs); } } // fix the record count in the header if (mBytesUsed != bytesUsed) { DB_LOG("INTEGRITY CHECK ERROR: Header has incorrect bytes used ("<<mBytesUsed<<"); correct count is " <<bytesUsed); if (inRepairProblems) { bRepairsWereAttempted = true; mBytesUsed = bytesUsed; WriteHeader(kFileIsOpen); DB_LOG("Integrity Check Repair: Set header bytes used to "<<mBytesUsed); } } // fix the largest rec size in the header if (mLargestRecSize != actualLargestRecSize) { DB_LOG("INTEGRITY CHECK WARNING: Header has incorrect largest rec size ("<<mLargestRecSize <<"); correct largest size is " <<actualLargestRecSize); if (inRepairProblems) { mLargestRecSize = actualLargestRecSize; WriteHeader(kFileIsOpen); DB_LOG("Integrity Check Repair: Set header largest rec size to "<<mLargestRecSize); } } } // end main try block catch(...) { DB_LOG("INTEGRITY CHECK ERROR: Unexpected exception, failing check"); bResult = false; // unknown error, so we fail } // if there were problems found and we attempted to fix them, recheck if (bRepairsWereAttempted) { CMasterIndexFile* index = static_cast<CMasterIndexFile*>(itsMasterIndex); index->WriteHeader(kFileIsClosed); // this will update the delete list index->WriteHeader(kFileIsOpen); DB_LOG("================================================================================"); DB_LOG("INTEGRITY CHECK COMPLETE. Verifying Repairs."); DB_LOG("--------------------------------------------------------------------------------"); bResult = CheckDatabaseIntegrity(kCheckOnly); // will not attempt repair on this pass DB_LOG("================================================================================"); if (bResult == false) { mFileIsDamaged = true; WriteHeader(kFileIsOpen); DB_LOG("INTEGRITY CHECK FAILED. Marking file as damaged for extensive checks at next open."); } } else if (inRepairProblems) { DB_LOG("Integrity check passed."); } #warning TODO: Put the asserts below in the proper place or make them fix the file // haven't decided what to do with these yet ASSERT(mBytesUsed <= mAllocatedBytes); // can't use more than we've allocated ASSERT((mAllocatedBytes+mFirstItemPos) == GetLength()); // LFileStream needs to be in synch ASSERT(mAllocatedBytes >= (SInt32)(mItemCount*sizeof(DatabaseRec)));// recs must have at least a header return bResult; }
void CVarDataFile::ReadRecord(DatabaseRec *ioRecP) { SInt32 recSize; SInt32 buffSize = ioRecP->recSize; ASSERT(itsMasterIndex != nil); SInt32 recPos = itsMasterIndex->FindEntry(ioRecP->recID, &recSize); // find index entry ioRecP->recPos = recPos; ioRecP->recSize = recSize; // allocated size of DatabaseRecPtr if (buffSize == 0) { buffSize = recSize; // they were sure the record was big enough } else if (buffSize > recSize) { // don't read past the end of the record buffSize = recSize; } #if DB_DEBUG_MODE else if (buffSize < recSize) { DB_DEBUG("WARNING: Buffer too small. Only reading "<<buffSize<<" bytes of "<<recSize<<" for Rec "<<ioRecP->recID, DEBUG_TRIVIA); } ASSERT(mBytesUsed <= mAllocatedBytes); // can't use more than we've allocated ASSERT((mAllocatedBytes+mFirstItemPos) == GetLength()); // LFileStream needs to be in synch if (recSize > mLargestRecSize) { DB_DEBUG("ERROR: Index says Rec "<<ioRecP->recID<<" is "<<recSize<<" bytes, but largest record is "<<mLargestRecSize, DEBUG_ERROR); } #endif // DB_DEBUG_MODE only checks #if DB_DEBUG_MODE || DB_INTEGRITY_CHECKING if (ioRecP->recID > mLastRecID) { DB_DEBUG("ERROR: Invalid Record ID "<<ioRecP->recID<<" requested. Last ID is "<<mLastRecID, DEBUG_ERROR); Throw_(dbItemNotFound); // not found is an exception } if (recSize < (SInt32)sizeof(DatabaseRec)) { DB_DEBUG("ERROR: Index says Rec "<<ioRecP->recID<<" is "<<recSize<<" bytes, smaller than the record header alone.", DEBUG_ERROR); mFileIsDamaged = true; Throw_(dbIndexCorrupt); // index is clearly damaged // we could clean this up by taking the size directly from the record // or perhaps we should remove this from the index } if (GetLength() < recPos) { DB_DEBUG("ERROR: Index returned offset "<<recPos<<" for Rec "<<ioRecP->recID<<", but datafile is only "<<GetLength()<<" bytes long.", DEBUG_ERROR); mFileIsDamaged = true; Throw_(dbIndexCorrupt); // we don't know if the index is wrong or the file was truncated, blame the index // we could clean this up by removing this from the index } RecIDT actualID; SInt32 slotSize; SetMarker(recPos + kVarDBFileSlotSizeOffset, streamFrom_Start); // debugging, check old ID ReadBlock(&slotSize, kSizeOfSlotSize); ReadBlock(&actualID, kSizeOfRecID); #if PLATFORM_LITTLE_ENDIAN slotSize = BigEndian32_ToNative(slotSize); actualID = BigEndian32_ToNative(actualID); #endif // PLATFORM_LITTLE_ENDIAN if (actualID != ioRecP->recID) { if (actualID == 0) { DB_DEBUG("ERROR: Asked index for Rec "<<ioRecP->recID<<" but got a deleted slot at pos "<< recPos << "; Removing from Index", DEBUG_ERROR); // the index must be wrong, but so we'll delete the index entry and do a "not found" } else { DB_DEBUG("ERROR: Asked index for Rec "<<ioRecP->recID<<" but got a slot with Rec "<< actualID << " at pos "<<recPos <<"; Removing from Index", DEBUG_ERROR); // the index could be wrong, or maybe the data was overwritten // in either case we'll delete the index entry and do a "not found" } mFileIsDamaged = true; itsMasterIndex->DeleteEntry(ioRecP->recID); mNumValidRecs--; WriteHeader(); DB_LOG("ERROR RECOVERY: Removed index entry for Rec " << ioRecP->recID); DB_DEBUG("ERROR RECOVERY: Removed index entry for Rec "<<ioRecP->recID, DEBUG_ERROR); Throw_(dbItemNotFound); // not found is an exception } if (slotSize < RecSizeToSlotSize(recSize)) { DB_DEBUG("ERROR: Index says Rec "<<ioRecP->recID<<" is "<<recSize<<" bytes, but the record's slot size is only "<<slotSize<<" bytes", DEBUG_ERROR); mFileIsDamaged = true; Throw_(dbDataCorrupt); // this is a serious problem } #endif SetMarker(recPos + kVarDBFileRecIDOffset, streamFrom_Start); // move to start of record data (skip slot size) ReadBlock(&ioRecP->recID, RecSizeToIOSize(buffSize)); // read the item into the pointer #if PLATFORM_LITTLE_ENDIAN ioRecP->recID = BigEndian32_ToNative(ioRecP->recID); #endif // PLATFORM_LITTLE_ENDIAN DB_DEBUG("ReadRecord("<<ioRecP->recID<<"); size: "<<recSize<<" pos: "<<recPos<<" bufsize: "<<buffSize, DEBUG_TRIVIA); }
/**@brief Function for handling descriptor discovery response. * * @param[in] p_db_discovery Pointer to the DB Discovery structure. * @param[in] p_ble_gattc_evt Pointer to the GATT Client event. */ static void on_desc_disc_rsp(ble_db_discovery_t * const p_db_discovery, const ble_gattc_evt_t * const p_ble_gattc_evt) { const ble_gattc_evt_desc_disc_rsp_t * p_desc_disc_rsp_evt; p_desc_disc_rsp_evt = &(p_ble_gattc_evt->params.desc_disc_rsp); ble_db_discovery_char_t * p_char_being_discovered = &(p_db_discovery->srv_being_discovered.charateristics[p_db_discovery->curr_char_ind]); if (p_ble_gattc_evt->gatt_status == BLE_GATT_STATUS_SUCCESS) { // The descriptor was found at the peer. // If the descriptor was a CCCD, then the cccd_handle needs to be populated. uint8_t i; // Loop through all the descriptors to find the CCCD. for (i = 0; i < p_desc_disc_rsp_evt->count; i++) { if ( p_desc_disc_rsp_evt->descs[i].uuid.uuid == BLE_UUID_DESCRIPTOR_CLIENT_CHAR_CONFIG ) { // CCCD found. Store the CCCD handle. DB_LOG("[DB]: Storing CCCD Handle %d\r\n", p_desc_disc_rsp_evt->descs[i].handle); p_char_being_discovered->cccd_handle = p_desc_disc_rsp_evt->descs[i].handle; break; } } } if ((p_db_discovery->curr_char_ind + 1) == p_db_discovery->srv_being_discovered.char_count) { // No more characteristics and descriptors need to be discovered. Discovery is complete. // Send a discovery complete event to the user application. DB_LOG("[DB]: DB Discovery complete \r\n"); discovery_complete_evt_trigger(p_db_discovery); } else { // Begin discovery of descriptors of the next characteristic. uint32_t err_code; p_db_discovery->curr_char_ind++; err_code = descriptors_discover(p_db_discovery); if (err_code != NRF_SUCCESS) { // Indicate the error to the user application registered for the service being // discovered. indicate_error_to_app(p_db_discovery, p_ble_gattc_evt, err_code); } } }
RecIDT CVarDataFile::AddRecord(DatabaseRec *inRecP) { SInt32 recID = inRecP->recID; SInt32 recSize = inRecP->recSize; // actual data size // DB_DEBUG("Begin AddRecord("<<recID<<")", DEBUG_TRIVIA); if (0 == recID) { recID = GetNewRecordID(inRecP); // get a new id# if needed } else if (recID > mLastRecID) { // or reject if not last id given by GetNewRecID() DB_LOG("ERROR: Add Rec " << recID << " " << recSize<<" FAILED, id not yet assigned"); DB_DEBUG("ERROR: Trying to add Rec "<<recID<<", but that ID has not yet been assigned.", DEBUG_ERROR); Throw_ ( dbInvalidID ); } SInt32 recPos = itsMasterIndex->AddEntry(recID, recSize); // get or create an empty slot #if DB_DEBUG_MODE || DB_INTEGRITY_CHECKING // DB_DEBUG("Start AddRecord("<<recID<<") debug checks", DEBUG_TRIVIA); if (recSize < (SInt32)sizeof(DatabaseRec)) { DB_LOG("ERROR: Add Rec " << recID << " " << recSize << "B pos: "<<recPos<<" FAILED, record smaller than header"); DB_DEBUG("ERROR: Trying to add Rec "<<recID<<" with size of "<<recSize<<" bytes, smaller than the record header alone.", DEBUG_ERROR); Throw_( dbDataCorrupt ); } ASSERT(mBytesUsed <= mAllocatedBytes); // can't use more than we've allocated ASSERT((mAllocatedBytes+mFirstItemPos) == GetLength()); // LFileStream needs to be in synch if (GetLength() < recPos) { DB_LOG("ERROR: Add Rec " << recID << " " << recSize << "B pos: "<<recPos<<" FAILED, overran datafile length "<<GetLength()<<"B"); DB_DEBUG("ERROR: Index wants to put new Rec "<<recID<<" at offset "<<recPos<<", but datafile is only "<<GetLength()<<" bytes long.", DEBUG_ERROR); mFileIsDamaged = true; Throw_( dbIndexCorrupt ); } RecIDT oldID; SInt32 slotSize; SetMarker(recPos + kVarDBFileSlotSizeOffset, streamFrom_Start); // debugging, check old ID ReadBlock(&slotSize, kSizeOfSlotSize); ReadBlock(&oldID, kSizeOfRecID); #if PLATFORM_LITTLE_ENDIAN slotSize = BigEndian32_ToNative(slotSize); oldID = BigEndian32_ToNative(oldID); #endif // PLATFORM_LITTLE_ENDIAN if (oldID != 0) { DB_LOG("ERROR: Add Rec " << recID << " " << recSize << "B pos: "<<recPos<<" FAILED, overwriting Rec "<<oldID); DB_DEBUG("ERROR: Attempting to add into slot with a non-zero (valid) ID\n Probably overwriting Rec "<<oldID, DEBUG_ERROR); mFileIsDamaged = true; Throw_( dbIndexCorrupt ); } if (slotSize < RecSizeToSlotSize(recSize)) { DB_LOG("ERROR: Add Rec " << recID << " " << recSize << "B pos: "<<recPos<<" FAILED, slot too small "<<slotSize<<"B"); DB_DEBUG("ERROR: Attempting to add "<<RecSizeToSlotSize(recSize)<<" bytes into a "<<slotSize<<" byte slot at "<< recPos, DEBUG_ERROR); mFileIsDamaged = true; Throw_( dbDataCorrupt ); } // DB_DEBUG("Done AddRecord("<<recID<<") debug checks", DEBUG_TRIVIA); #endif SetMarker(recPos + kVarDBFileRecIDOffset, streamFrom_Start); // move to start of slot's data, skipping size // DB_DEBUG("AddRecord("<<recID<<") set marker at "<< recPos+kVarDBFileRecIDOffset, DEBUG_TRIVIA); inRecP->recID = Native_ToBigEndian32(recID); // return the new recID in the DatabaseRecPtr WriteBlock(&inRecP->recID, RecSizeToIOSize(recSize)); // write the new record data into the slot inRecP->recID = recID; // restore native endianness // DB_DEBUG("AddRecord("<<recID<<") wrote block size = " << RecSizeToIOSize(recSize), DEBUG_TRIVIA); mNumValidRecs++; // one more valid record if (!mBatchMode) { WriteHeader(); } inRecP->recPos = recPos; DB_LOG("Added Rec " << recID << " " << recSize << "B pos: "<<recPos); DB_DEBUG("AddRecord("<<recID<<"); size: "<<recSize<<" pos: "<<recPos, DEBUG_TRIVIA); return(recID); }