void test_no_lock() { GstTask *t; gboolean ret; //xmlfile = "test_no_lock"; std_log(LOG_FILENAME_LINE, "Test Started test_no_lock"); t = gst_task_create (task_func, NULL); fail_if (t == NULL); TEST_ASSERT_FAIL /* stop should be possible without lock */ gst_task_stop (t); /* pause should give a warning */ ASSERT_WARNING (ret = gst_task_pause (t)); //b failing fail_unless (ret == FALSE); TEST_ASSERT_FAIL /* start should give a warning */ ASSERT_WARNING (ret = gst_task_start (t)); fail_unless (ret == FALSE); TEST_ASSERT_FAIL /* stop should be possible without lock */ gst_task_stop (t); gst_object_unref (t); std_log(LOG_FILENAME_LINE, "Test Successful"); create_xml(0); }
void ftdf_process_purge_request(ftdf_purge_request_t *purge_request) { ftdf_handle_t msdu_handle = purge_request->msdu_handle; ftdf_status_t status = FTDF_INVALID_HANDLE; int n; for (n = 0; n < FTDF_NR_OF_REQ_BUFFERS; n++) { ftdf_msg_buffer_t *request = ftdf_dequeue_by_handle(msdu_handle, &ftdf_tx_pending_list[n].queue); if (request) { ftdf_data_request_t *data_request = (ftdf_data_request_t*) request; if (data_request->indirect_tx == FTDF_TRUE) { ftdf_remove_tx_pending_timer(request); #if FTDF_FP_BIT_MODE == FTDF_FP_BIT_MODE_AUTO if (ftdf_tx_pending_list[n].addr_mode == FTDF_SHORT_ADDRESS) { uint8_t entry, shortAddrIdx; ftdf_boolean_t found = ftdf_fppr_lookup_short_address( ftdf_tx_pending_list[n].addr.short_address, &entry, &shortAddrIdx); ASSERT_WARNING(found); ftdf_fppr_set_short_address_valid(entry, shortAddrIdx, FTDF_FALSE); } else if (ftdf_tx_pending_list[n].addr_mode == FTDF_EXTENDED_ADDRESS) { uint8_t entry; ftdf_boolean_t found = ftdf_fppr_lookup_ext_address( ftdf_tx_pending_list[n].addr.ext_address, &entry); ASSERT_WARNING(found); ftdf_fppr_set_ext_address_valid(entry, FTDF_FALSE); } else { ASSERT_WARNING(0); } #endif /* FTDF_FP_BIT_MODE == FTDF_FP_BIT_MODE_AUTO */ if (ftdf_is_queue_empty(&ftdf_tx_pending_list[n].queue)) { ftdf_tx_pending_list[n].addr_mode = FTDF_NO_ADDRESS; } } FTDF_REL_DATA_BUFFER(data_request->msdu); FTDF_REL_MSG_BUFFER((ftdf_msg_buffer_t*) data_request); status = FTDF_SUCCESS; break; } } ftdf_purge_confirm_t *purge_confirm = (ftdf_purge_confirm_t*) FTDF_GET_MSG_BUFFER(sizeof(ftdf_purge_confirm_t)); purge_confirm->msg_id = FTDF_PURGE_CONFIRM; purge_confirm->msdu_handle = msdu_handle; purge_confirm->status = status; FTDF_REL_MSG_BUFFER((ftdf_msg_buffer_t*) purge_request); FTDF_RCV_MSG((ftdf_msg_buffer_t*) purge_confirm); }
void XFileDataSet::reset() { ASSERT_ERROR_MESSAGE( example, "Example is NULL, maybe you forgot to call init()?" ); file->rewind(); int x; file->read(&x, sizeof(int), 1); ASSERT_WARNING( x == nExamples ); file->read(&x, sizeof(int), 1); ASSERT_WARNING( x == dim ); currentExampleIndex = -1; }
void XFileDataSet::init() { file->rewind(); file->read(&nExamples, sizeof(int), 1); file->read(&dim, sizeof(int), 1); ASSERT_WARNING( nExamples > 0 ); ASSERT_WARNING( dim > 0 ); // Allocate example. DataSet::init(); currentExampleIndex = -1; }
void test_lock_start() { GstTask *t; gboolean ret; //xmlfile = "test_lock_start"; std_log(LOG_FILENAME_LINE, "Test Started test_lock_start"); t = gst_task_create (task_func, NULL); fail_if (t == NULL); TEST_ASSERT_FAIL gst_task_set_lock (t, &task_mutex); task_cond = g_cond_new (); task_lock = g_mutex_new (); g_mutex_lock (task_lock); GST_DEBUG ("starting"); ret = gst_task_start (t); fail_unless (ret == TRUE); TEST_ASSERT_FAIL /* wait for it to spin up */ GST_DEBUG ("waiting"); g_cond_wait (task_cond, task_lock); GST_DEBUG ("done waiting"); g_mutex_unlock (task_lock); /* cannot set mutex now */ ASSERT_WARNING (gst_task_set_lock (t, &task_mutex));//b failing GST_DEBUG ("joining"); ret = gst_task_join (t); fail_unless (ret == TRUE); TEST_ASSERT_FAIL gst_object_unref (t); std_log(LOG_FILENAME_LINE, "Test Successful"); create_xml(0); }
void Gear_ClusteredDither::internalInit() { _clusterSize = CLAMP((int)_CLUSTER_SIZE_IN->type()->value(), 2, 512); _spotType = (eSpotType)CLAMP((int)_SPOT_TYPE_IN->type()->value(), (int)SQUARE, (int)LINE); _width = _clusterSize * 3; _sizeX = _sizeY = 0; _angle[0] = DEG2RAD(CLAMP((int)_ANGLE_RED_IN->type()->value(), 0, 360)); _angle[1] = DEG2RAD(CLAMP((int)_ANGLE_GREEN_IN->type()->value(), 0, 360)); _angle[2] = DEG2RAD(CLAMP((int)_ANGLE_BLUE_IN->type()->value(), 0, 360)); updateThreshold(); updatePolarCoordinates(); updateAngle(0); updateAngle(1); updateAngle(2); ASSERT_WARNING(_threshold); ASSERT_WARNING(_order); }
int addr_dict_split::load() { proc_data* p_data = proc_data::instance(); FILE * fp = fopen(_fullpath, "r"); ASSERT_WARNING(fp != NULL,"open query dict failed. path[%s]", _fullpath); char line[SIZE_LEN_1024]; char * ptr = NULL; uint32_t query_sign[2]; while (fgets(line, 1024, fp)) { if('\0' == line[0]){ continue; } //line[strlen(line) - 1] = '\0'; ptr = im_chomp(line); if (ptr == NULL || *ptr == '\0'|| *ptr == '#') continue; std::vector<std::string> tmp_vec; SplitString(ptr, '\t', &tmp_vec, SPLIT_MODE_ALL); if (tmp_vec.size() < 2) { continue; } std::string id = *(tmp_vec.begin()); tmp_vec.erase(tmp_vec.begin()); for (auto iit = tmp_vec.begin(); iit != tmp_vec.end(); iit++) { std::shared_ptr<std::string> ss(new std::string(trim(iit->c_str()))); auto iii = _addr_set.find(ss); if (iii != _addr_set.end()) { p_data->_address_index->idle()->insert(std::make_pair(*iii, id)); } else { _addr_set.insert(ss); p_data->_address_index->idle()->insert(std::make_pair(ss, id)); } } } fclose(fp); struct stat st; stat(_fullpath, &st); _last_load = st.st_mtime; p_data->_address_index->idle_2_current(); return 0; }
void Gear_ClusteredDither::updateAngle(int channel) { ASSERT_ERROR(channel >= 0 && channel < SIZE_RGB); double angle = _angle[channel]; _rChannel[channel].resize(_sizeX, _sizeY); Array2DType<double>::iterator rIt = _r.begin(), thetaIt = _theta.begin(); Array2DType<std::pair<int, int> >::iterator rChannelIt = _rChannel[channel].begin(); for (int y=0; y<_sizeY; ++y) { for (int x=0; x<_sizeX; ++x, ++rIt, ++thetaIt, ++rChannelIt) { double theta_c = *thetaIt + angle; int rx = (int)rint(*rIt * fastcos(theta_c) ); int ry = (int)rint(*rIt * fastsin(theta_c) ); /* Make sure rx and ry are positive and within * the range 0 .. width-1 (incl). Can't use % * operator, since its definition on negative * numbers is not helpful. Can't use ABS(), * since that would cause reflection about the * x- and y-axes. Relies on integer division * rounding towards zero. */ rx -= ((rx - isNeg(rx)*(_width-1)) / _width) * _width; ry -= ((ry - isNeg(ry)*(_width-1)) / _width) * _width; ASSERT_WARNING(rx >= 0 && rx <= _width-1); ASSERT_WARNING(ry >= 0 && ry <= _width-1); rChannelIt->first = rx; rChannelIt->second = ry; } } }
//------------------------------------- // ~Load //------------------------------------- void XContainer::Load(const std::string &path) { LPD3DXBUFFER adjacency_buffer; if (FAILED(D3DXLoadMeshFromX( path.c_str(), D3DXMESH_SYSTEMMEM, DirectX9Holder::device_, &adjacency_buffer, &material_buffer_, NULL, &material_count_, &mesh_))) { std::string warning; warning = path; warning += ": このファイルが見つかりません"; ASSERT_WARNING(warning.c_str()); ASSERT_ERROR("モデル読み込みに失敗"); return; } if (FAILED(mesh_->OptimizeInplace( D3DXMESHOPT_COMPACT | D3DXMESHOPT_ATTRSORT | D3DXMESHOPT_VERTEXCACHE, (DWORD*)adjacency_buffer->GetBufferPointer(), NULL, NULL, NULL))) { ASSERT_ERROR("モデルのオプティマイズに失敗"); return; } D3DVERTEXELEMENT9 elements[] = { { 0, 0, D3DDECLTYPE_FLOAT3, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_POSITION, 0 }, { 0, 12, D3DDECLTYPE_FLOAT3, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_NORMAL, 0 }, { 0, 24, D3DDECLTYPE_FLOAT2, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_TEXCOORD, 0 }, D3DDECL_END() }; LPD3DXMESH old_mesh = mesh_; if (FAILED(old_mesh->CloneMesh( D3DXMESH_MANAGED, elements, DirectX9Holder::device_, &mesh_))) { ASSERT_ERROR("モデルのコンバートに失敗"); return; } SAFE_RELEASE(old_mesh); }
int strategy_conf::dump() { FILE * fp = fopen(_dumppath, "w"); ASSERT_WARNING(fp != NULL, "finance_dict dump_data failed, open file [%s] error", _dumppath); for (auto &ii: _cfg) { fprintf(fp, "%s:%s\n", ii.first.c_str(), ii.second.c_str()); } fclose(fp); return 0; }
static __RETAINED_CODE void configure_cache(void) { bool flush = false; GLOBAL_INT_DISABLE(); if (dg_configCACHEABLE_QSPI_AREA_LEN != -1) { uint32_t cache_len; /* dg_configCACHEABLE_QSPI_AREA_LEN must be 64KB-aligned */ ASSERT_WARNING((dg_configCACHEABLE_QSPI_AREA_LEN & 0xFFFF) == 0); /* * dg_configCACHEABLE_QSPI_AREA_LEN shouldn't set any bits that do not fit in * CACHE_CTRL2_REG.CACHE_LEN (9 bits wide) after shifting out the lower 16 bits */ ASSERT_WARNING((dg_configCACHEABLE_QSPI_AREA_LEN & 0x1FF0000) == dg_configCACHEABLE_QSPI_AREA_LEN); /* * set cacheable area * * setting CACHE_CTRL2_REG.CACHE_LEN to N, actually sets the size of the cacheable * area to (N + 1) * 64KB * special cases: * N == 0 --> no caching * N == 1 --> 128KB are cached, i.e. no way to cache only 64KB */ cache_len = dg_configCACHEABLE_QSPI_AREA_LEN >> 16; /* cannot cache only 64KB! */ ASSERT_WARNING(cache_len != 1); if (cache_len > 1) { cache_len--; } REG_SETF(CACHE, CACHE_CTRL2_REG, CACHE_LEN, cache_len); }
static void task_func2 (void *data) { gboolean ret; GstTask *t = *((GstTask **) data); g_mutex_lock (&task_lock); GST_DEBUG ("signal"); g_cond_signal (&task_cond); g_mutex_unlock (&task_lock); ASSERT_WARNING (ret = gst_task_join (t)); fail_unless (ret == FALSE); }
//------------------------------------- // ~Thread() //------------------------------------- MyThread::~MyThread() { BOOL result; if (thread_) { WaitForSingleObject(thread_, INFINITE); result = CloseHandle(thread_); if (!result){ ASSERT_ERROR("スレッドの破棄に失敗"); } } else { ASSERT_WARNING("スレッドは動いていません"); } }
static sys_clk_t ClkGet(void) { sys_clk_t clk = sysclk_RC16; uint32_t hw_clk = hw_cpm_get_sysclk(); switch (hw_clk) { case SYS_CLK_IS_RC16: clk = sysclk_RC16; break; case SYS_CLK_IS_XTAL16M: if (dg_configEXT_CRYSTAL_FREQ == EXT_CRYSTAL_IS_16M) { clk = sysclk_XTAL16M; } else { clk = sysclk_XTAL32M; } break; case SYS_CLK_IS_PLL: if (hw_cpm_get_pll_divider_status() == 1) { clk = sysclk_PLL48; } else { clk = sysclk_PLL96; } break; case SYS_CLK_IS_LP: // fall-through default: ASSERT_WARNING(0); break; } return clk; }
void TestHeapLimit() { if(!isMallocInitialized()) doInitialization(); // tiny limit to stop caching int res = scalable_allocation_mode(TBBMALLOC_SET_SOFT_HEAP_LIMIT, 1); ASSERT(res == TBBMALLOC_OK, NULL); // provoke bootstrap heap initialization before recording memory size scalable_free(scalable_malloc(8)); size_t n, sizeBefore = getMemSize(); // Try to provoke call to OS for memory to check that // requests are not fulfilled from caches. // Single call is not enough here because of backend fragmentation. for (n = minLargeObjectSize; n < 10*1024*1024; n += 16*1024) { void *p = scalable_malloc(n); bool leave = (sizeBefore != getMemSize()); scalable_free(p); if (leave) break; ASSERT(sizeBefore == getMemSize(), "No caching expected"); } ASSERT(n < 10*1024*1024, "scalable_malloc doesn't provoke OS request for memory, " "is some internal cache still used?"); // estimate number of objects in single bootstrap block int objInBootstrapHeapBlock = (slabSize-2*estimatedCacheLineSize)/sizeof(TLSData); // When we have more threads than objects in bootstrap heap block, // additional block can be allocated from a region that is different // from the original region. Thus even after all caches cleaned, // we unable to reach sizeBefore. ASSERT_WARNING(MaxThread<=objInBootstrapHeapBlock, "The test might fail for larger thread number, " "as bootstrap heap is not released till size checking."); for( int p=MaxThread; p>=MinThread; --p ) { RunTestHeapLimit::initBarrier( p ); NativeParallelFor( p, RunTestHeapLimit(sizeBefore) ); } // it's try to match limit as well as set limit, so call here res = scalable_allocation_mode(TBBMALLOC_SET_SOFT_HEAP_LIMIT, 1); ASSERT(res == TBBMALLOC_OK, NULL); size_t m = getMemSize(); ASSERT(sizeBefore == m, NULL); // restore default res = scalable_allocation_mode(TBBMALLOC_SET_SOFT_HEAP_LIMIT, 0); ASSERT(res == TBBMALLOC_OK, NULL); }
/* * Interrupt handler used by hw_i2c_prepare_dma_ex() to handle ABORT for DMA writes */ static void intr_write_buffer_dma_no_stop_handler(HW_I2C_ID id, uint16_t mask) { /* Must provide a valid (> 0) mask */ ASSERT_WARNING(mask != 0); if (mask & HW_I2C_INT_TX_ABORT) { /* disable I2C DMA */ IBA(id)->I2C_DMA_CR_REG = 0; dma_tx_reply(id, false); /* clear abort */ hw_i2c_reset_int_tx_abort(id); return; } }
/* * Interrupt handler used by hw_i2c_prepare_dma_ex() to handle STOP and ABORT for DMA writes */ static void intr_write_buffer_dma_handler(HW_I2C_ID id, uint16_t mask) { struct i2c *i2c = get_i2c(id); struct tx_state *txs = &i2c->tx_state; /* Must provide a valid (> 0) mask */ ASSERT_WARNING(mask != 0); if (mask & HW_I2C_INT_TX_ABORT) { /* disable I2C DMA */ IBA(id)->I2C_DMA_CR_REG = 0; dma_tx_reply(id, false); /* clear abort */ hw_i2c_reset_int_tx_abort(id); return; } if (mask & HW_I2C_INT_STOP_DETECTED) { if (IBA(id)->I2C_DMA_CR_REG != 0) { hw_i2c_reset_int_stop_detected(id); /* * A STOP while DMA is still enabled is caused by a NACK from the slave. * While servicing the STOP_DETECTED interrupt we don't need to call the * reply callback. This will be done when servicing the TX_ABORT interrupt * that will follow. */ return; } dma_tx_reply(id, txs->num == txs->len); hw_i2c_reset_int_stop_detected(id); return; } /* */ }
void operator() ( ) { int track_snapshot[nTracks]; int stall_count = 0, uneven_progress_count = 0, last_progress_mask = 0; for(int i=0; i<nTracks; ++i) track_snapshot[i]=0; bool completed; do { // Yield repeatedly for at least 1 usec TimedYield( 1E-6 ); int overall_progress = 0, progress_mask = 0; const int all_progressed = (1<<nTracks) - 1; completed = true; for(int i=0; i<nTracks; ++i) { int ti = TaskTracks[i]; int pi = ti-track_snapshot[i]; if( pi ) progress_mask |= 1<<i; overall_progress += pi; completed = completed && ti==PairsPerTrack; track_snapshot[i]=ti; } // The constants in the next asserts are subjective and may need correction. if( overall_progress ) stall_count=0; else { ++stall_count; // no progress; consider it dead. ASSERT(stall_count < stall_threshold, "no progress on enqueued tasks; deadlock, or the machine is heavily oversubscribed?"); } if( progress_mask==all_progressed || progress_mask^last_progress_mask ) { uneven_progress_count = 0; last_progress_mask = progress_mask; } else if ( overall_progress > 2 ) { ++uneven_progress_count; // The threshold of 32 is 4x bigger than what was observed on a 8-core machine with oversubscription. ASSERT_WARNING(uneven_progress_count < 32, "some enqueued tasks seem stalling; no simultaneous progress, or the machine is oversubscribed? Investigate if repeated"); } } while( !completed ); }
void operator()( int i ) const { theLocalState->m_isMaster = true; uintptr_t f = i <= MaxFlagIndex ? 1<<i : 0; MyObserver o(f); if ( theTestMode & tmSynchronized ) theMasterBarrier.wait(); // when mode is local observation but not synchronized and when num threads == default if ( theTestMode & tmAutoinitialization ) o.observe(true); // test autoinitialization can be done by observer // when mode is local synchronized observation and when num threads == default if ( theTestMode & tmLeavingControl ) o.test_leaving(); // Observer in enabled state must outlive the scheduler to ensure that // all exit notifications are called. tbb::task_scheduler_init init(m_numThreads); // when local & non-autoinitialized observation mode if ( theTestMode & tmLocalObservation ) o.observe(true); for ( int j = 0; j < 2; ++j ) { tbb::task &t = *new( tbb::task::allocate_root() ) FibTask(m_numThreads, f, o); tbb::task::spawn_root_and_wait(t); thePrevMode = theTestMode; } if( o.is_leaving_test() ) { REMARK( "Testing on_scheduler_leaving()\n"); ASSERT(o.m_workerEntries > 0, "Unbelievable"); // TODO: start from 0? for ( int j = o.m_workerExits; j < o.m_workerEntries; j++ ) { REMARK( "Round %d: entries %d, exits %d\n", j, (int)o.m_workerEntries, (int)o.m_workerExits ); ASSERT_WARNING(o.m_workerExits == j, "Workers unexpectedly leaved arena"); o.dismiss_one(); double n_seconds = 5; (Harness::TimedWaitWhileEq(n_seconds))(o.m_workerExits, j); ASSERT( n_seconds >= 0, "Time out while waiting for a worker to leave arena"); __TBB_Yield(); } } }
int strategy_conf::load() { FILE * fp = fopen(_fullpath, "r"); ASSERT_WARNING(fp != NULL,"open query dict failed. path[%s]", _fullpath); char line[SIZE_LEN_1024]; char * ptr = NULL; _cfg.clear(); std::vector<std::string> tmp_vec; while (fgets(line, 1024, fp)) { if('\0' == line[0]){ continue; } ptr = im_chomp(line); if (ptr == NULL || *ptr == '\0'|| *ptr == '#') continue; SplitString(ptr, "=", &tmp_vec, SPLIT_MODE_ONE | SPLIT_MODE_TRIM); if (tmp_vec.size() != 2) continue; _cfg.insert(std::make_pair(trim(tmp_vec[0].c_str()), trim(tmp_vec[1].c_str()))); } fclose(fp); do_parse(); struct stat st; stat(_fullpath, &st); _last_load = st.st_mtime; return 0; }
void DataSetTrainer::trainEpisode(DataSet* data) { ASSERT_WARNING(data->nExamples >= 0); MESSAGE("Training episode # %d.", nEpisodes); _doTrainEpisode(data); nEpisodes++; }
sleep_mode_t rwip_sleep(void) { sleep_mode_t proc_sleep = mode_active; uint32_t twirq_set_value; uint32_t twirq_reset_value; uint32_t twext_value; #if (DEEP_SLEEP) uint32_t sleep_duration = jump_table_struct[max_sleep_duration_external_wakeup_pos];//MAX_SLEEP_DURATION_EXTERNAL_WAKEUP; #endif //DEEP_SLEEP #ifndef DEVELOPMENT_DEBUG uint32_t sleep_lp_cycles; #endif DBG_SWDIAG(SLEEP, ALGO, 0); #if (BLE_APP_PRESENT) if ( app_ble_ext_wakeup_get() || (rwip_env.ext_wakeup_enable == 2) ) // sleep forever! sleep_duration = 0; #else # if (!EXTERNAL_WAKEUP) // sleep_duration will remain as it was set above.... if (rwip_env.ext_wakeup_enable == 2) sleep_duration = 0; # endif #endif do { /************************************************************************ ************** CHECK STARTUP FLAG ************** ************************************************************************/ POWER_PROFILE_INIT; // Do not allow sleep if system is in startup period if (check_sys_startup_period()) break; /************************************************************************ ************** CHECK KERNEL EVENTS ************** ************************************************************************/ // Check if some kernel processing is ongoing if (!ke_sleep_check()) break; // Processor sleep can be enabled proc_sleep = mode_idle; DBG_SWDIAG(SLEEP, ALGO, 1); #if (DEEP_SLEEP) /************************************************************************ ************** CHECK ENABLE FLAG ************** ************************************************************************/ // Check sleep enable flag if(!rwip_env.sleep_enable) break; /************************************************************************ ************** CHECK RADIO POWER DOWN ************** ************************************************************************/ // Check if BLE + Radio are still sleeping if(GetBits16(SYS_STAT_REG, RAD_IS_DOWN)) { // If BLE + Radio are in sleep return the appropriate mode for ARM proc_sleep = mode_sleeping; break; } /************************************************************************ ************** CHECK RW FLAGS ************** ************************************************************************/ // First check if no pending procedure prevents us from going to sleep if (rwip_prevent_sleep_get() != 0) break; DBG_SWDIAG(SLEEP, ALGO, 2); /************************************************************************ ************** CHECK EXT WAKEUP FLAG ************** ************************************************************************/ /* If external wakeup is enabled, sleep duration can be set to maximum, otherwise * the system must be woken-up periodically to poll incoming packets from HCI */ if((BLE_APP_PRESENT == 0) || (BLE_INTEGRATED_HOST_GTL == 1 )) // No need for periodic wakeup if we have full-hosted system { if(!rwip_env.ext_wakeup_enable) sleep_duration = jump_table_struct[max_sleep_duration_periodic_wakeup_pos]; // MAX_SLEEP_DURATION_PERIODIC_WAKEUP; } /************************************************************************ * * * CHECK DURATION UNTIL NEXT EVENT * * * ************************************************************************/ // If there's any timer pending, compute the time to wake-up to serve it if (ke_env.queue_timer.first != NULL) sleep_duration = jump_table_struct[max_sleep_duration_external_wakeup_pos]; #ifdef USE_POWER_OPTIMIZATIONS // Store sleep_duration calculated so far. Check below if sleep would be allowed. // If not, there's no reason to verify / ensure the available time for SLP... uint32_t tmp_dur = sleep_duration; #endif /************************************************************************ ************** CHECK KERNEL TIMERS ************** ************************************************************************/ // Compute the duration up to the next software timer expires if (!ke_timer_sleep_check(&sleep_duration, rwip_env.wakeup_delay)) break; DBG_SWDIAG(SLEEP, ALGO, 3); #if (BLE_EMB_PRESENT) /************************************************************************ ************** CHECK BLE ************** ************************************************************************/ // Compute the duration up to the next BLE event if (!lld_sleep_check(&sleep_duration, rwip_env.wakeup_delay)) break; #endif // BLE_EMB_PRESENT DBG_SWDIAG(SLEEP, ALGO, 4); #if (BT_EMB_PRESENT) /************************************************************************ ************** CHECK BT ************** ************************************************************************/ // Compute the duration up to the next BT active slot if (!ld_sleep_check(&sleep_duration, rwip_env.wakeup_delay)) break; #endif // BT_EMB_PRESENT DBG_SWDIAG(SLEEP, ALGO, 5); #if (HCIC_ITF) /************************************************************************ ************** CHECK HCI ************** ************************************************************************/ if((BLE_APP_PRESENT == 0) || (BLE_INTEGRATED_HOST_GTL == 1 )) { // Try to switch off HCI if (!hci_enter_sleep()) break; } #endif // HCIC_ITF #if (GTL_ITF) /************************************************************************ ************** CHECK TL ************** ************************************************************************/ if((BLE_APP_PRESENT == 0) || (BLE_INTEGRATED_HOST_GTL == 1 )) { // Try to switch off Transport Layer if (!gtl_enter_sleep()) break; } #endif // GTL_ITF DBG_SWDIAG(SLEEP, ALGO, 6); #ifdef USE_POWER_OPTIMIZATIONS /************************************************************************ ****** BLOCK UNTIL THERE'S TIME FOR sleep() AND SLP ISR ****** ************************************************************************/ uint32_t xtal16m_settling_cycles; bool rcx_duration_corr = false; // Restore sleep_duration sleep_duration = tmp_dur; /* * Wait until there's enough time for SLP to restore clocks when the chip wakes up. * Then check again if sleep is possible. */ if ( ((lp_clk_sel == LP_CLK_RCX20) && (CFG_LP_CLK == LP_CLK_FROM_OTP)) || (CFG_LP_CLK == LP_CLK_RCX20) ) { xtal16m_settling_cycles = lld_sleep_us_2_lpcycles_sel_func(XTAL16M_SETTLING_IN_USEC); while ( (ble_finetimecnt_get() < 550) && (ble_finetimecnt_get() > 200) ); // If we are close to the end of this slot then the actual sleep entry will // occur during the next one. But the sleep_duration will have been calculated // based on the current slot... if (ble_finetimecnt_get() <= 200) rcx_duration_corr = true; } else if ( ((lp_clk_sel == LP_CLK_XTAL32) && (CFG_LP_CLK == LP_CLK_FROM_OTP)) || (CFG_LP_CLK == LP_CLK_XTAL32) ) { while (ble_finetimecnt_get() < 300); } /************************************************************************ * * * CHECK DURATION UNTIL NEXT EVENT * * (this is the 2nd check) * * * ************************************************************************/ bool sleep_check = false; do { /************************************************************************ ************** CHECK KERNEL TIMERS (2) ************** ************************************************************************/ // Compute the duration up to the next software timer expires if (!ke_timer_sleep_check(&sleep_duration, rwip_env.wakeup_delay)) break; DBG_SWDIAG(SLEEP, ALGO, 3); #if (BLE_EMB_PRESENT) /************************************************************************ ************** CHECK BLE (2) ************** ************************************************************************/ // Compute the duration up to the next BLE event if (!lld_sleep_check(&sleep_duration, rwip_env.wakeup_delay)) break; #endif // BLE_EMB_PRESENT sleep_check = true; } while(0); if (!sleep_check) { if((BLE_APP_PRESENT == 0) || (BLE_INTEGRATED_HOST_GTL == 1 )) { #if BLE_HOST_PRESENT gtl_eif_init(); #else hci_eif_init(); #endif } // sleep is aborted and serial i/f communication is restored break; } if (sleep_duration && rcx_duration_corr) sleep_duration--; DBG_SWDIAG(SLEEP, ALGO, 4); #endif POWER_PROFILE_CHECKS_COMPLETED; /************************************************************************ ************** PROGRAM CORE DEEP SLEEP ************** ************************************************************************/ if ( ((lp_clk_sel == LP_CLK_RCX20) && (CFG_LP_CLK == LP_CLK_FROM_OTP)) || (CFG_LP_CLK == LP_CLK_RCX20) ) { #if !defined(USE_POWER_OPTIMIZATIONS) twirq_set_value = lld_sleep_us_2_lpcycles_sel_func(XTAL_TRIMMING_TIME_USEC); twirq_reset_value = TWIRQ_RESET_VALUE; // TWEXT setting twext_value = TWEXT_VALUE_RCX; #else // Calculate the time we need to wake-up before "time 0" to do XTAL16 settling, // call periph_init() and power-up the BLE core. uint32_t lpcycles = lld_sleep_us_2_lpcycles_sel_func(LP_ISR_TIME_USEC); // Set TWIRQ_SET taking into account that some LP cycles are needed for the power up FSM. twirq_set_value = RCX_POWER_UP_TIME + lpcycles; if (sleep_env.slp_state == ARCH_DEEP_SLEEP_ON) twirq_set_value += RCX_OTP_COPY_OVERHEAD; // BOOST mode + RCX is not supported if (GetBits16(ANA_STATUS_REG, BOOST_SELECTED) == 1) ASSERT_WARNING(0); // Program LP deassertion to occur when the XTAL16M has settled twirq_reset_value = lpcycles - xtal16m_settling_cycles; // TWEXT setting twext_value = lpcycles; #endif } else if ( ((lp_clk_sel == LP_CLK_XTAL32) && (CFG_LP_CLK == LP_CLK_FROM_OTP)) || (CFG_LP_CLK == LP_CLK_XTAL32) ) { #if !defined(USE_POWER_OPTIMIZATIONS) twirq_set_value = XTAL_TRIMMING_TIME; twirq_reset_value = TWIRQ_RESET_VALUE; twext_value = TWEXT_VALUE_XTAL32; #else // The time we need to wake-up before "time 0" to do XTAL16 settling, // call periph_init() and power-up the BLE core is LP_ISR_TIME_XTAL32_CYCLES in this case. // Set TWIRQ_SET taking into account that some LP cycles are needed for the power up FSM. twirq_set_value = XTAL32_POWER_UP_TIME + LP_ISR_TIME_XTAL32_CYCLES; if (sleep_env.slp_state == ARCH_DEEP_SLEEP_ON) twirq_set_value += XTAL32_OTP_COPY_OVERHEAD; // Adjust TWIRQ_SET in case of BOOST mode, if needed if (set_boost_low_vbat1v_overhead == APPLY_OVERHEAD) twirq_set_value += BOOST_POWER_UP_OVERHEAD; set_boost_low_vbat1v_overhead = NOT_MEASURED; // Program LP deassertion to occur when the XTAL16M has settled twirq_reset_value = LP_ISR_TIME_XTAL32_CYCLES - XTAL16M_SETTLING_IN_XTAL32_CYCLES; // TWEXT setting twext_value = LP_ISR_TIME_XTAL32_CYCLES; #endif } //Prepare BLE_ENBPRESET_REG for next sleep cycle SetBits32(BLE_ENBPRESET_REG, TWIRQ_RESET, twirq_reset_value); // TWIRQ_RESET SetBits32(BLE_ENBPRESET_REG, TWIRQ_SET, twirq_set_value); // TWIRQ_SET SetBits32(BLE_ENBPRESET_REG, TWEXT, twext_value); // TWEXT //Everything ready for sleep! proc_sleep = mode_sleeping; #ifdef USE_POWER_OPTIMIZATIONS // Eliminate any additional delays. if (sleep_duration) sleep_duration += SLEEP_DURATION_CORR; POWER_PROFILE_SLEEP_TIMES; #endif #if (BT_EMB_PRESENT) // Put BT core into deep sleep ld_sleep_enter(rwip_slot_2_lpcycles(sleep_duration), rwip_env.ext_wakeup_enable); #elif (BLE_EMB_PRESENT) // Put BT core into deep sleep if ( ((lp_clk_sel == LP_CLK_XTAL32) && (CFG_LP_CLK == LP_CLK_FROM_OTP)) || (CFG_LP_CLK == LP_CLK_XTAL32) ) sleep_lp_cycles = rwip_slot_2_lpcycles(sleep_duration); else if ( ((lp_clk_sel == LP_CLK_RCX20) && (CFG_LP_CLK == LP_CLK_FROM_OTP)) || (CFG_LP_CLK == LP_CLK_RCX20) ) sleep_lp_cycles = rwip_slot_2_lpcycles_rcx(sleep_duration); lld_sleep_enter(sleep_lp_cycles, rwip_env.ext_wakeup_enable); #endif //BT_EMB_PRESENT / BT_EMB_PRESENT DBG_SWDIAG(SLEEP, SLEEP, 1); /************************************************************************ ************** SWITCH OFF RF ************** ************************************************************************/ POWER_PROFILE_REMAINING_TIME; rwip_rf.sleep(); #ifdef USE_POWER_OPTIMIZATIONS // We may lower the clock now while we are waiting the BLE to go to sleep... bool slow_system_clk = false; #if (BLE_APP_PRESENT) if ( app_use_lower_clocks_check() ) #endif { // It will save some power if you lower the clock while waiting for STAT... SetBits16(CLK_AMBA_REG, PCLK_DIV, 3); // lowest is 2MHz (div 8, source is @16MHz) SetBits16(CLK_AMBA_REG, HCLK_DIV, 3); slow_system_clk = true; } #endif while(!ble_deep_sleep_stat_getf()); //check and wait till you may disable the radio. 32.768KHz XTAL must be running! //(debug note: use BLE_CNTL2_REG:MON_LP_CLK bit to check (write 0, should be set to 1 by the BLE)) while ( !(GetWord32(BLE_CNTL2_REG) & RADIO_PWRDN_ALLOW) ) {}; #ifdef USE_POWER_OPTIMIZATIONS if (slow_system_clk) { // and restore clock rates (refer to a couple of lines above) use_highest_amba_clocks(); } #endif ble_regs_push(); // push the ble ret.vars to retention memory // smpc_regs_push(); // push smpc ble ret.vars to retention memory //BLE CLK must be turned off when DEEP_SLEEP_STAT is set SetBits16(CLK_RADIO_REG, BLE_ENABLE, 0); #endif // DEEP_SLEEP } while(0); return proc_sleep; }
void ftdf_process_data_request(ftdf_data_request_t *data_request) { #ifndef FTDF_NO_TSCH if (ftdf_pib.tsch_enabled && ftdf_tsch_slot_link->request != data_request) { ftdf_status_t status; if ((data_request->dst_addr_mode == FTDF_SHORT_ADDRESS) && !data_request->indirect_tx) { status = ftdf_schedule_tsch((ftdf_msg_buffer_t*) data_request); if (status == FTDF_SUCCESS) { return; } } else { status = FTDF_INVALID_PARAMETER; } ftdf_send_data_confirm(data_request, status, 0, 0, 0, NULL); return; } #endif /* FTDF_NO_TSCH */ int queue; ftdf_address_mode_t dst_addr_mode = data_request->dst_addr_mode; ftdf_pan_id_t dst_pan_id = data_request->dst_pan_id; ftdf_address_t dst_addr = data_request->dst_addr; /* Search for an existing indirect queue */ for (queue = 0; queue < FTDF_NR_OF_REQ_BUFFERS; queue++) { if (dst_addr_mode == FTDF_SHORT_ADDRESS) { if ((ftdf_tx_pending_list[queue].addr_mode == dst_addr_mode) && (ftdf_tx_pending_list[queue].addr.short_address == dst_addr.short_address)) { break; } } else if (dst_addr_mode == FTDF_EXTENDED_ADDRESS) { if ((ftdf_tx_pending_list[queue].addr_mode == dst_addr_mode) && (ftdf_tx_pending_list[queue].addr.ext_address == dst_addr.ext_address)) { break; } } } if (data_request->indirect_tx) { ftdf_status_t status = FTDF_SUCCESS; if (queue < FTDF_NR_OF_REQ_BUFFERS) { /* Queue request in existing queue */ status = ftdf_queue_req_head((ftdf_msg_buffer_t*) data_request, &ftdf_tx_pending_list[queue].queue); if (status == FTDF_SUCCESS) { ftdf_add_tx_pending_timer((ftdf_msg_buffer_t*) data_request, queue, (ftdf_pib.transaction_persistence_time * FTDF_BASE_SUPERFRAME_DURATION), ftdf_send_transaction_expired); return; } } if ((dst_addr_mode != FTDF_EXTENDED_ADDRESS) && (dst_addr_mode != FTDF_SHORT_ADDRESS)) { status = FTDF_INVALID_PARAMETER; } if (status != FTDF_SUCCESS) { /* Queueing of indirect transfer was not successful */ ftdf_send_data_confirm(data_request, status, 0, 0, 0, NULL); return; } #if FTDF_FP_BIT_MODE == FTDF_FP_BIT_MODE_AUTO uint8_t entry, short_addr_idx; if (dst_addr_mode == FTDF_SHORT_ADDRESS) { if (ftdf_fppr_get_free_short_address(&entry, &short_addr_idx) == FTDF_FALSE) { goto transaction_overflow; } } else if (dst_addr_mode == FTDF_EXTENDED_ADDRESS) { if (ftdf_fppr_get_free_ext_address(&entry) == FTDF_FALSE) { goto transaction_overflow; } } else { status = FTDF_INVALID_PARAMETER; } #endif /* Search for an empty indirect queue */ for (queue = 0; queue < FTDF_NR_OF_REQ_BUFFERS; queue++) { if (ftdf_tx_pending_list[queue].addr_mode == FTDF_NO_ADDRESS) { ftdf_tx_pending_list[queue].addr_mode = dst_addr_mode; ftdf_tx_pending_list[queue].pan_id = dst_pan_id; ftdf_tx_pending_list[queue].addr = dst_addr; status = ftdf_queue_req_head((ftdf_msg_buffer_t*) data_request, &ftdf_tx_pending_list[queue].queue); if (status == FTDF_SUCCESS) { #if FTDF_FP_BIT_MODE == FTDF_FP_BIT_MODE_AUTO if (dst_addr_mode == FTDF_SHORT_ADDRESS) { ftdf_fppr_set_short_address(entry, short_addr_idx, dst_addr.short_address); ftdf_fppr_set_short_address_valid(entry, short_addr_idx, FTDF_TRUE); } else if (dst_addr_mode == FTDF_EXTENDED_ADDRESS) { ftdf_fppr_set_ext_address(entry, dst_addr.short_address); ftdf_fppr_set_ext_address_valid(entry, FTDF_TRUE); } else { ASSERT_WARNING(0); } #endif /* FTDF_FP_BIT_MODE == FTDF_FP_BIT_MODE_AUTO */ ftdf_add_tx_pending_timer((ftdf_msg_buffer_t*) data_request, queue, (ftdf_pib.transaction_persistence_time * FTDF_BASE_SUPERFRAME_DURATION), ftdf_send_transaction_expired); return; } else { break; } } } /* Did not find an existing or an empty queue */ #if FTDF_FP_BIT_MODE == FTDF_FP_BIT_MODE_AUTO transaction_overflow: #endif ftdf_send_data_confirm(data_request, FTDF_TRANSACTION_OVERFLOW, 0, 0, 0, NULL); return; } if (ftdf_req_current == NULL) { ftdf_req_current = (ftdf_msg_buffer_t*) data_request; } else { if (ftdf_queue_req_head((ftdf_msg_buffer_t*) data_request, &ftdf_req_queue) == FTDF_TRANSACTION_OVERFLOW) { ftdf_send_data_confirm(data_request, FTDF_TRANSACTION_OVERFLOW, 0, 0, 0, NULL); } return; } ftdf_frame_header_t *frame_header = &ftdf_fh; ftdf_security_header *security_header = &ftdf_sh; frame_header->frame_type = data_request->send_multi_purpose ? FTDF_MULTIPURPOSE_FRAME : FTDF_DATA_FRAME; ftdf_boolean_t frame_pending; if (queue < FTDF_NR_OF_REQ_BUFFERS) { frame_pending = FTDF_TRUE; } else { frame_pending = FTDF_FALSE; } frame_header->options = (data_request->security_level > 0 ? FTDF_OPT_SECURITY_ENABLED : 0) | (data_request->ack_tx ? FTDF_OPT_ACK_REQUESTED : 0) | (frame_pending ? FTDF_OPT_FRAME_PENDING : 0) | ((data_request->frame_control_options & FTDF_PAN_ID_PRESENT) ? FTDF_OPT_PAN_ID_PRESENT : 0) | ((data_request->frame_control_options & FTDF_IES_INCLUDED) ? FTDF_OPT_IES_PRESENT : 0) | ((data_request->frame_control_options & FTDF_SEQ_NR_SUPPRESSED) ? FTDF_OPT_SEQ_NR_SUPPRESSED : 0); if (ftdf_pib.le_enabled || ftdf_pib.tsch_enabled) { frame_header->options |= FTDF_OPT_ENHANCED; } frame_header->src_addr_mode = data_request->src_addr_mode; frame_header->src_pan_id = ftdf_pib.pan_id; frame_header->dst_addr_mode = data_request->dst_addr_mode; frame_header->dst_pan_id = data_request->dst_pan_id; frame_header->dst_addr = data_request->dst_addr; security_header->security_level = data_request->security_level; security_header->key_id_mode = data_request->key_id_mode; security_header->key_index = data_request->key_index; security_header->key_source = data_request->key_source; security_header->frame_counter = ftdf_pib.frame_counter; security_header->frame_counter_mode = ftdf_pib.frame_counter_mode; #ifndef FTDF_NO_TSCH if (ftdf_pib.tsch_enabled) { frame_header->sn = ftdf_process_tsch_sn((ftdf_msg_buffer_t*)data_request, ftdf_pib.dsn, &data_request->requestSN); } else #endif /* FTDF_NO_TSCH */ { frame_header->sn = ftdf_pib.dsn; } ftdf_octet_t *tx_ptr = (ftdf_octet_t*) &FTDF->FTDF_TX_FIFO_0_0_REG + (FTDF_BUFFER_LENGTH * FTDF_TX_DATA_BUFFER); /* Skip PHY header (= MAC length) */ tx_ptr++; ftdf_data_length_t msdu_length = data_request->msdu_length; tx_ptr = ftdf_add_frame_header(tx_ptr, frame_header, msdu_length); tx_ptr = ftdf_add_security_header(tx_ptr, security_header); #if !defined(FTDF_NO_CSL) || !defined(FTDF_NO_TSCH) if (data_request->frame_control_options & FTDF_IES_INCLUDED) { tx_ptr = ftdf_add_ies(tx_ptr, data_request->header_ie_list, data_request->payload_ie_list, data_request->msdu_length); } #endif /* !FTDF_NO_CSL || !FTDF_NO_TSCH */ ftdf_status_t status = ftdf_send_frame(ftdf_pib.current_channel, frame_header, security_header, tx_ptr, data_request->msdu_length, data_request->msdu); if (status != FTDF_SUCCESS) { ftdf_send_data_confirm(data_request, status, 0, 0, 0, NULL); ftdf_req_current = NULL; return; } ftdf_nr_of_retries = 0; if (frame_header->sn == ftdf_pib.dsn) { ftdf_pib.dsn++; } }
/** * Apply trim values from OTP. * * @brief Writes the trim values located in the OTP to the corresponding system registers. * * @param[out] tcs_array The valid <address, value> pairs are placed in this buffer. * @param[out] valid_entries The number of valid pairs. * * @return True if at least one trim value has been applied, else false. * */ static bool apply_trim_values_from_otp(uint32_t *tcs_array, uint32_t *valid_entries) { uint32_t address; uint32_t inverted_address; uint32_t value; uint32_t inverted_value; uint32_t *p; int i; int index = 0; int vdd = 0; int retries = 0; bool forward_reading = true; bool res = false; p = (uint32_t *)(OTP_HEADER_BASE_ADDR_IN_OTP + TCS_SECTION_OFFSET); for (i = 0; i < TCS_SECTION_LENGTH; i++) { do { address = *p; p++; inverted_address = *p; p++; value = *p; p++; inverted_value = *p; p++; if ((address == 0) && (value == 0)) { break; } // Check validity if ((address != ~inverted_address) || (value != ~inverted_value)) { // Change LDO core voltage level and retry vdd++; vdd &= 0x3; REG_SETF(CRG_TOP, LDO_CTRL1_REG, LDO_CORE_SETVDD, vdd); // Wait for the voltage to settle... SysTick->CTRL = 0; SysTick->LOAD = 500; // 500 * (62.5 * 4) = 125usec SysTick->VAL = 0; SysTick->CTRL = 0x5; // Start using system clock while ((SysTick->CTRL & SysTick_CTRL_COUNTFLAG_Msk) == 0) {} // Adjust the read pointer p -= 4; } retries++; if (retries == 32) { // Unrecoverable problem! Assert in development mode ASSERT_WARNING(0); // Unrecoverable problem! Issue a HW reset. hw_cpm_reset_system(); } } while ((address != ~inverted_address) || (value != ~inverted_value)); retries = 0; // Read the complete TCS area but skip empty entries. if ((address == 0) && (value == 0)) { if ((BLACK_ORCA_TARGET_IC >= BLACK_ORCA_IC_VERSION(A, E)) || ((dg_configUSE_AUTO_CHIP_DETECTION == 1) && (CHIP_IS_AE || CHIP_IS_BA))) { if (!forward_reading) { break; } forward_reading = false; p = (uint32_t *)(OTP_HEADER_BASE_ADDR_IN_OTP + TCS_SECTION_OFFSET); p += (TCS_SECTION_LENGTH - 1) * 4; } else { (void)forward_reading; } continue; } if (!forward_reading) { p -= 8; } sys_tcs_store_pair(address, value); tcs_array[(index * 2) + 0] = address; tcs_array[(index * 2) + 1] = value; *valid_entries = index + 1; index++; res = true; } return res; }
/** **************************************************************************************** * @brief Initialisation of ble core, pwr and clk * * The Hclk and Pclk are set **************************************************************************************** */ void init_pwr_and_clk_ble(void) { SetBits16(CLK_RADIO_REG, BLE_DIV, 0); SetBits16(CLK_RADIO_REG, BLE_ENABLE, 1); SetBits16(CLK_RADIO_REG, RFCU_DIV, 1); SetBits16(CLK_RADIO_REG, RFCU_ENABLE, 1); /* * Power up BLE core & reset BLE Timers */ SetBits16(CLK_32K_REG, RC32K_ENABLE, 1); SetBits16(SYS_CTRL_REG, CLK32_SOURCE, 0); SetBits16(CLK_RADIO_REG, BLE_LP_RESET, 1); SetBits16(PMU_CTRL_REG, RADIO_SLEEP, 0); while (!(GetWord16(SYS_STAT_REG) & RAD_IS_UP)); // Just wait for radio to truely wake up select_lp_clk(); if ( ((lp_clk_sel == LP_CLK_XTAL32) && (CFG_LP_CLK == LP_CLK_FROM_OTP)) || (CFG_LP_CLK == LP_CLK_XTAL32) ) { SetBits16(CLK_32K_REG, XTAL32K_ENABLE, 1); // Enable XTAL32KHz // Disable XTAL32 amplitude regulation in BOOST mode if (GetBits16(ANA_STATUS_REG, BOOST_SELECTED) == 0x1) SetBits16(CLK_32K_REG, XTAL32K_DISABLE_AMPREG, 1); else SetBits16(CLK_32K_REG, XTAL32K_DISABLE_AMPREG, 0); SetBits16(CLK_32K_REG, XTAL32K_CUR, 5); SetBits16(CLK_32K_REG, XTAL32K_RBIAS, 3); SetBits16(SYS_CTRL_REG, CLK32_SOURCE, 1); // Select XTAL32K as LP clock } else if ( ((lp_clk_sel == LP_CLK_RCX20) && (CFG_LP_CLK == LP_CLK_FROM_OTP)) || (CFG_LP_CLK == LP_CLK_RCX20) ) { SetBits16(CLK_RCX20K_REG, RCX20K_NTC, 0xB); SetBits16(CLK_RCX20K_REG, RCX20K_BIAS, 1); SetBits16(CLK_RCX20K_REG, RCX20K_TRIM, 0); SetBits16(CLK_RCX20K_REG, RCX20K_LOWF, 1); SetBits16(CLK_RCX20K_REG, RCX20K_ENABLE, 1); SetBits16(CLK_RCX20K_REG, RCX20K_SELECT, 1); SetBits16(SYS_CTRL_REG, CLK32_SOURCE, 0); SetBits16(CLK_32K_REG, XTAL32K_ENABLE, 0); // Disable Xtal32KHz } else ASSERT_WARNING(0); SetBits16(CLK_32K_REG, RC32K_ENABLE, 0); // Disable RC32KHz SetBits16(CLK_RADIO_REG, BLE_LP_RESET, 0); if (GetBits16(ANA_STATUS_REG, BOOST_SELECTED) == 0x1) SetWord16(DCDC_CTRL3_REG, 0x5); /* * Just make sure that BLE core is stopped (if already running) */ SetBits32(BLE_RWBTLECNTL_REG, RWBLE_EN, 0); /* * Since BLE is stopped (and powered), set CLK_SEL */ SetBits32(BLE_CNTL2_REG, BLE_CLK_SEL, 16); SetBits32(BLE_CNTL2_REG, BLE_RSSI_SEL, 1); }
void prvSystemSleep( TickType_t xExpectedIdleTime ) { uint32_t ulSleepTime; eSleepModeStatus eSleepStatus; /* A simple WFI() is executed in any of the cases below: * 1. the system has just booted and the dg_configINITIAL_SLEEP_DELAY_TIME has not yet * passed * 2. the XTAL32K is used as the LP clock, the system has just woke up after clockless * sleep and the LP clock has not yet settled. */ if( !cm_lp_clk_is_avail() ) { __WFI(); // Wait for an interrupt... return; } if (dg_configUSE_LP_CLK == LP_CLK_RCX) { // Update if a previous calibration was running and is finished. if (cm_rcx_calibration_is_on) { if (cm_calibrate_rcx_update()) { return; } } } /* * Calculate the sleep time */ ulSleepTime = pm_conv_ticks_2_prescaled_lpcycles(xExpectedIdleTime); /* Enter a critical section that will not effect interrupts bringing the MCU * out of sleep mode. */ taskDISABLE_INTERRUPTS(); DBG_CONFIGURE_LOW(CMN_TIMING_DEBUG, CMNDBG_CRITICAL_SECTION); DBG_SET_HIGH(CPM_USE_TIMING_DEBUG, CPMDBG_SLEEP_ENTER); /* Ensure it is still ok to enter the sleep mode. */ eSleepStatus = eTaskConfirmSleepModeStatus(); if( eSleepStatus == eAbortSleep ) { DBG_SET_LOW(CPM_USE_TIMING_DEBUG, CPMDBG_SLEEP_ENTER); /* A task has been moved out of the Blocked state since this macro was * executed, or a context switch is being held pending. Do not enter a * sleep state. Restart the tick and exit the critical section. */ taskENABLE_INTERRUPTS(); } else { #if (dg_configIMAGE_SETUP == DEVELOPMENT_MODE) uint32_t primask; #endif if( eSleepStatus == eNoTasksWaitingTimeout ) { /* It is not necessary to configure an interrupt to bring the * microcontroller out of its low power state at a fixed time in the * future. * Enter the low power state. */ pm_sleep_enter( 0 ); } else { /* Configure an interrupt to bring the microcontroller out of its low * power state at the time the kernel next needs to execute. * Enter the low power state. */ pm_sleep_enter( ulSleepTime ); } #if (dg_configIMAGE_SETUP == DEVELOPMENT_MODE) /* If the code stops at this point then the interrupts were enabled while they * shouldn't be so. */ primask = __get_PRIMASK(); ASSERT_WARNING(primask == 1); #endif /* Wake-up! */ pm_system_wake_up(); } }
/** * @brief System Initialization and creation of the BLE task */ static void system_init( void *pvParameters ) { /* Prepare clocks. Note: cm_cpu_clk_set() and cm_sys_clk_set() can be called only from a * task since they will suspend the task until the XTAL16M has settled and, maybe, the PLL * is locked. */ cm_sys_clk_init(sysclk_XTAL16M); cm_apb_set_clock_divider(apb_div1); cm_ahb_set_clock_divider(ahb_div1); cm_lp_clk_init(); /* * Initialize platform watchdog */ sys_watchdog_init(); #if dg_configUSE_WDOG // Register the Idle task first. idle_task_wdog_id = sys_watchdog_register(false); ASSERT_WARNING(idle_task_wdog_id != -1); sys_watchdog_configure_idle_id(idle_task_wdog_id); #endif /* Set system clock */ cm_sys_clk_set(sysclk_XTAL16M); /* Prepare the hardware to run this demo. */ prvSetupHardware(); /* init resources */ resource_init(); /* init GPADC adapter */ GPADC_INIT(); /* Set the desired sleep mode. */ pm_set_wakeup_mode(true); pm_set_sleep_mode(pm_mode_extended_sleep); /* Initialize NVMS adapter - has to be done before BLE starts */ ad_nvms_init(); /* Initialize BLE Adapter */ ad_ble_init(); /* Initialize BLE Manager */ ble_mgr_init(); /* Start the BLE Peripheral application task. */ OS_TASK_CREATE("BLE Peripheral", /* The text name assigned to the task, for debug only; not used by the kernel. */ ble_att_perm_test_task, /* The function that implements the task. */ NULL, /* The parameter passed to the task. */ 200 * OS_STACK_WORD_SIZE, /* The number of bytes to allocate to the stack of the task. */ mainBLE_ATT_PERM_TEST_TASK_PRIORITY,/* The priority assigned to the task. */ handle); /* The task handle. */ OS_ASSERT(handle); /* the work of the SysInit task is done */ OS_TASK_DELETE(OS_GET_CURRENT_TASK()); }
inline void WaitForException () { int n = 0; while ( ++n < c_Timeout && !__TBB_load_with_acquire(g_ExceptionCaught) ) __TBB_Yield(); ASSERT_WARNING( n < c_Timeout, "WaitForException failed" ); }