static void register_heap(heap_t *region) { region->heap = multi_heap_register((void *)region->start, region->end - region->start); if (region->heap != NULL) { ESP_EARLY_LOGD(TAG, "New heap initialised at %p", region->heap); } }
static void select_rtc_slow_clk(rtc_slow_freq_t slow_clk) { if (slow_clk == RTC_SLOW_FREQ_32K_XTAL) { /* 32k XTAL oscillator needs to be enabled and running before it can * be used. Hardware doesn't have a direct way of checking if the * oscillator is running. Here we use rtc_clk_cal function to count * the number of main XTAL cycles in the given number of 32k XTAL * oscillator cycles. If the 32k XTAL has not started up, calibration * will time out, returning 0. */ rtc_clk_32k_enable(true); uint32_t cal_val = 0; uint32_t wait = 0; // increment of 'wait' counter equivalent to 3 seconds const uint32_t warning_timeout = 3 /* sec */ * 32768 /* Hz */ / (2 * XTAL_32K_DETECT_CYCLES); ESP_EARLY_LOGD(TAG, "waiting for 32k oscillator to start up") do { ++wait; cal_val = rtc_clk_cal(RTC_CAL_32K_XTAL, XTAL_32K_DETECT_CYCLES); if (wait % warning_timeout == 0) { ESP_EARLY_LOGW(TAG, "still waiting for 32k oscillator to start up"); } } while (cal_val == 0); ESP_EARLY_LOGD(TAG, "32k oscillator ready, wait=%d", wait); }
static void IRAM_ATTR rmt_driver_isr_default(void* arg) { uint32_t intr_st = RMT.int_st.val; uint32_t i = 0; uint8_t channel; portBASE_TYPE HPTaskAwoken = 0; for(i = 0; i < 32; i++) { if(i < 24) { if(intr_st & BIT(i)) { channel = i / 3; rmt_obj_t* p_rmt = p_rmt_obj[channel]; switch(i % 3) { //TX END case 0: ESP_EARLY_LOGD(RMT_TAG, "RMT INTR : TX END"); xSemaphoreGiveFromISR(p_rmt->tx_sem, &HPTaskAwoken); if(HPTaskAwoken == pdTRUE) { portYIELD_FROM_ISR(); } p_rmt->tx_data = NULL; p_rmt->tx_len_rem = 0; p_rmt->tx_offset = 0; p_rmt->tx_sub_len = 0; break; //RX_END case 1: ESP_EARLY_LOGD(RMT_TAG, "RMT INTR : RX END"); RMT.conf_ch[channel].conf1.rx_en = 0; int item_len = rmt_get_mem_len(channel); //change memory owner to protect data. RMT.conf_ch[channel].conf1.mem_owner = RMT_MEM_OWNER_TX; if(p_rmt->rx_buf) { BaseType_t res = xRingbufferSendFromISR(p_rmt->rx_buf, (void*) RMTMEM.chan[channel].data32, item_len * 4, &HPTaskAwoken); if(res == pdFALSE) { ESP_EARLY_LOGE(RMT_TAG, "RMT RX BUFFER FULL"); } else { } if(HPTaskAwoken == pdTRUE) { portYIELD_FROM_ISR(); } } else { ESP_EARLY_LOGE(RMT_TAG, "RMT RX BUFFER ERROR\n"); } RMT.conf_ch[channel].conf1.mem_wr_rst = 1; RMT.conf_ch[channel].conf1.mem_owner = RMT_MEM_OWNER_RX; RMT.conf_ch[channel].conf1.rx_en = 1; break; //ERR case 2: ESP_EARLY_LOGE(RMT_TAG, "RMT[%d] ERR", channel); ESP_EARLY_LOGE(RMT_TAG, "status: 0x%08x", RMT.status_ch[channel]); RMT.int_ena.val &= (~(BIT(i))); break; default: break; } RMT.int_clr.val = BIT(i); } } else { if(intr_st & (BIT(i))) { channel = i - 24; rmt_obj_t* p_rmt = p_rmt_obj[channel]; RMT.int_clr.val = BIT(i); ESP_EARLY_LOGD(RMT_TAG, "RMT CH[%d]: EVT INTR", channel); if(p_rmt->tx_data == NULL) { //skip } else { const rmt_item32_t* pdata = p_rmt->tx_data; int len_rem = p_rmt->tx_len_rem; if(len_rem >= p_rmt->tx_sub_len) { rmt_fill_memory(channel, pdata, p_rmt->tx_sub_len, p_rmt->tx_offset); p_rmt->tx_data += p_rmt->tx_sub_len; p_rmt->tx_len_rem -= p_rmt->tx_sub_len; } else if(len_rem == 0) { RMTMEM.chan[channel].data32[p_rmt->tx_offset].val = 0; } else { rmt_fill_memory(channel, pdata, len_rem, p_rmt->tx_offset); RMTMEM.chan[channel].data32[p_rmt->tx_offset + len_rem].val = 0; p_rmt->tx_data += len_rem; p_rmt->tx_len_rem -= len_rem; } if(p_rmt->tx_offset == 0) { p_rmt->tx_offset = p_rmt->tx_sub_len; } else { p_rmt->tx_offset = 0; } } } } } }