Пример #1
0
static void dport_access_init_core0(void *arg)
{
    int core_id = xPortGetCoreID();

    assert(core_id == 0);

    vPortCPUInitializeMutex(&g_dport_mux);

    ESP_INTR_DISABLE(ETS_DPORT_INUM);
    intr_matrix_set(core_id, ETS_FROM_CPU_INTR2_SOURCE, ETS_DPORT_INUM);
    ESP_INTR_ENABLE(ETS_DPORT_INUM);

    dport_access_ref[core_id] = 0;
    dport_access_start[core_id] = 0;
    dport_access_end[core_id] = 0;
    dport_core_state[core_id] = DPORT_CORE_STATE_RUNNING;

    vTaskDelete(NULL);
}
Пример #2
0
/*
Initialize the heap allocator. We pass it a bunch of region descriptors, but we need to modify those first to accommodate for
the data as loaded by the bootloader.
ToDo: The regions are different when stuff like trace memory, BT, ... is used. Modify the regions struct on the fly for this.
Same with loading of apps. Same with using SPI RAM.
*/
void heap_caps_init()
{
    /* Copy the soc_memory_regions data to the stack, so we can
       manipulate it. */
    soc_memory_region_t regions[soc_memory_region_count];
    memcpy(regions, soc_memory_regions, sizeof(soc_memory_region_t)*soc_memory_region_count);

    //Disable the bits of memory where this code is loaded.
    disable_mem_region(regions, (intptr_t)&_data_start, (intptr_t)&_heap_start);           //DRAM used by bss/data static variables
    disable_mem_region(regions, (intptr_t)&_init_start, (intptr_t)&_iram_text_end);        //IRAM used by code

    // Disable all regions reserved on this SoC
    for (int i = 0; i < soc_reserved_region_count; i++) {
        disable_mem_region(regions, soc_reserved_regions[i].start,
                           soc_reserved_regions[i].end);
    }

    //The heap allocator will treat every region given to it as separate. In order to get bigger ranges of contiguous memory,
    //it's useful to coalesce adjacent regions that have the same type.

    for (int i = 1; i < soc_memory_region_count; i++) {
        soc_memory_region_t *a = &regions[i - 1];
        soc_memory_region_t *b = &regions[i];
        if (b->start == a->start + a->size && b->type == a->type ) {
            a->type = -1;
            b->start = a->start;
            b->size += a->size;
        }
    }

    /* Count the heaps left after merging */
    num_registered_heaps = 0;
    for (int i = 0; i < soc_memory_region_count; i++) {
        if (regions[i].type != -1) {
            num_registered_heaps++;
        }
    }

    /* Start by allocating the registered heap data on the stack.

       Once we have a heap to copy it to, we will copy it to a heap buffer.
    */
    heap_t temp_heaps[num_registered_heaps];
    size_t heap_idx = 0;

    ESP_EARLY_LOGI(TAG, "Initializing. RAM available for dynamic allocation:");
    for (int i = 0; i < soc_memory_region_count; i++) {
        soc_memory_region_t *region = &regions[i];
        const soc_memory_type_desc_t *type = &soc_memory_types[region->type];
        heap_t *heap = &temp_heaps[heap_idx];
        if (region->type == -1) {
            continue;
        }
        heap_idx++;
        assert(heap_idx <= num_registered_heaps);

        heap->type = region->type;
        heap->start = region->start;
        heap->end = region->start + region->size;
        memcpy(heap->caps, type->caps, sizeof(heap->caps));
        vPortCPUInitializeMutex(&heap->heap_mux);

        ESP_EARLY_LOGI(TAG, "At %08X len %08X (%d KiB): %s",
                       region->start, region->size, region->size / 1024, type->name);

        if (type->startup_stack) {
            /* Will be registered when OS scheduler starts */
            heap->heap = NULL;
        } else {
            register_heap(heap);
        }
    }

    assert(heap_idx == num_registered_heaps);

    /* Allocate the permanent heap data that we'll use for runtime */
    registered_heaps = NULL;
    for (int i = 0; i < num_registered_heaps; i++) {
        if (heap_caps_match(&temp_heaps[i], MALLOC_CAP_8BIT)) {
            /* use the first DRAM heap which can fit the data */
            registered_heaps = multi_heap_malloc(temp_heaps[i].heap, sizeof(heap_t) * num_registered_heaps);
            if (registered_heaps != NULL) {
                break;
            }
        }
    }
    assert(registered_heaps != NULL); /* if NULL, there's not enough free startup heap space */

    memcpy(registered_heaps, temp_heaps, sizeof(heap_t)*num_registered_heaps);

    /* Now the heap_mux fields live on the heap, assign them */
    for (int i = 0; i < num_registered_heaps; i++) {
        if (registered_heaps[i].heap != NULL) {
            multi_heap_set_lock(registered_heaps[i].heap, &registered_heaps[i].heap_mux);
        }
    }
}