Esempio n. 1
0
//! This test first starts the secondary CPUs in order from 1 through 3. When each secondary
//! CPU starts, it executes this function. The first thing this function does for secondary CPUs
//! is to enable interrupts for the CPU in the GIC.
//!
//! When the last CPU enters this function, it will start a loop of sending software interrupts
//! to all cores in sequence by sending the first SGI to core 0. As each core handles the SGI,
//! it will print a message and send an SGI to the next CPU in sequence.
void multicore_entry(void * arg)
{
    uint32_t cpu_id = cpu_get_current();
    int cpuCount = cpu_get_cores();
    
    if (cpuCount == 1)
    {
        printf("This chip only has one CPU!\n");
        return;
    }

    if (cpu_id != 0)
    {
        // Enable interrupts on secondary CPUs.
        gic_init_cpu();
    }

    // primary cpu
    if (cpu_id == 0)
    {
        isTestDone = 1;

        // register sgi isr
        register_interrupt_routine(SW_INTERRUPT_3, SGI3_ISR);

        printf("Running the GIC Multicore Test \n");
        printf("Starting and sending SGIs to secondary CPUs for \"hello world\" \n\n");

        // start second cpu
        cpu_start_secondary(1, &multicore_entry, 0);

        // cpu0 wait until test is done, that is until cpu3 completes its SGI.
        while (isTestDone);
        
        // put other cores back into reset
        cpu_disable(1);
        cpu_disable(2);
        cpu_disable(3);
        
        printf("\nEnd of test\n");
    }
    // other cpus
    else
    {
        printf("secondary main cpu: %d\n", cpu_id);

        if (cpu_id == (cpuCount - 1))
        {
            // send to cpu_0 to start sgi loop
            gic_send_sgi(SW_INTERRUPT_3, 1, kGicSgiFilter_UseTargetList);
        }
        else
        {
            cpu_start_secondary(cpu_id + 1, &multicore_entry, 0);
        }
        
        // do nothing wait to be interrupted
        while (1);
    }
}
Esempio n. 2
0
static void common_cpu_entry(void)
{
    uint32_t myCoreNumber = cpu_get_current();
    core_startup_info_t * info = &s_core_info[myCoreNumber];
    
    // Call the requested entry point for this CPU number.
    if (info->entry)
    {
        info->entry(info->arg);
    }
}
Esempio n. 3
0
void CAN_Ctrl::init(uint32_t baudrate){
	if(!initialized){
		initialized=true;

		can_init(can, CAN_LAST_MB);

		imx_flexcan canmodule;
		can_set_can_attributes(&canmodule,mapToFlexcanBitrate(baudrate),can);
		can_update_bitrate(&canmodule);

		for(int i=0;i<CAN_NUMBER_OF_BUFFERS;i++){
			can_mb_int_ack(can,i);
		}

		//configure fifo
		HW_FLEXCAN_MCR_SET(can->instance,BM_FLEXCAN_MCR_FEN); //set RFEN
		HW_FLEXCAN_MCR_SET(can->instance,BM_FLEXCAN_MCR_BCC);//set IRMQ
		HW_FLEXCAN_MCR_CLR(can->instance,BM_FLEXCAN_MCR_IDAM); // filter format A
		REG_SET(REG_RXFGMASK(can->base),~0);
		//conif RFFN
		REG_SET(REG_CTRL2(can->base),0xF <<REG_CTRL2_RFFN_SHIFT);

		//configure other flags
		HW_FLEXCAN_MCR_SET(can->instance,BM_FLEXCAN_MCR_SRX_DIS); //No self recv
		REG_SET(REG_CTRL2(can->base),REG_CTRL2_RRS_MASK);

		//configure irq
		irq_hdlr_t handler;
		uint32_t irqid;

		if(this==&CANs[0]){
			irqid=IMX_INT_FLEXCAN1;
			handler=&CAN1_IRQHandler;
		}else if(this==&CANs[1]){
			irqid=IMX_INT_FLEXCAN2;
			handler=&CAN2_IRQHandler;
		}
		register_interrupt_routine(irqid,handler);
		enable_interrupt(irqid,cpu_get_current(),128);
		gic_set_irq_security(irqid,false);

		setupFilters();

		can_exit_freeze(can);
		can_enable_mb_interrupt(can,FIFO_FLAG_DATARDY);
	}
}
Esempio n. 4
0
void SGI3_ISR(void)
{
    uint32_t cpu_id = cpu_get_current();
    int cpuCount = cpu_get_cores();
    
    //while(1); // debug
    
    printf("Hello from CPU %d\n", cpu_id);

    if (cpu_id < (cpuCount - 1))
    {
        // send to next core to start sgi loop
        gic_send_sgi(SW_INTERRUPT_3, (1 << (cpu_id + 1)), kGicSgiFilter_UseTargetList);
    }

    if (cpu_id == (cpuCount - 1))
    {
        // test complete
        isTestDone = 0;
    }
}
Esempio n. 5
0
File: sched.c Progetto: giszo/urubu
// =====================================================================================================================
int sched_irq(int irq, void* data, struct irq_context* ctx)
{
    // We need to send EOI on the PIC here because this function will not return to the common IRQ handler path ...
    pic_send_eoi(0);

    struct cpu* curr_cpu = cpu_get_current();
    struct thread* curr = curr_cpu->thread_current;
    struct amd64_thread* t_arch;

    // Save the kernel stack pointer of the previous thread.
    if (curr)
    {
        t_arch = (struct amd64_thread*)curr->arch_data;
        t_arch->rsp = (uint64_t)ctx;
    }

    // Select the next thread to run.
    struct thread* next = sched_next(curr_cpu, curr);

    // upadte the TSS structure of the current CPU for the new thread
    struct amd64_cpu* arch_cpu = (struct amd64_cpu*)curr_cpu->arch_data;
    arch_cpu->tss.rsp0 = (ptr_t)next->kernel_stack + next->kernel_stack_size;

    // switch to the address space of the new process
    if (next->proc)
    {
	struct amd64_process* proc_arch = (struct amd64_process*)next->proc->arch_data;
	cpu_set_cr3(proc_arch->cr3);
    }

    // Switch to the next thread.
    t_arch = (struct amd64_thread*)next->arch_data;
    sched_arch_switch_to(t_arch->rsp);

    return 0;
}
Esempio n. 6
0
void cpu_interrupt_schedule_stage2(struct async_call *call)
{
	struct cpu *c = cpu_get_current();
	workqueue_insert(&c->work, call);
	cpu_put_current(c);
}
void init_memory_system(){

	if(cpu_get_current() !=0){
		scu_join_smp();
		scu_enable_maintenance_broadcast();
	}


	/****************************************
	 * MMU
	 */
	// Disable MMU
	mmu_disable();

	// Initiate MMU - initiate the peripherals
	mmu_init();

	// Enable MMU
	mmu_enable();


	/****************************************
	 * Branch prediction
	 */
	// Disable branch prediction
	arm_branch_prediction_disable();

	// Invalidate branch prediction array
	arm_branch_target_cache_invalidate();

	// Branch Prediction Enable
	arm_branch_prediction_enable();

	/****************************************
	 * Data Cache
	 */
	// Disable L1 Data Caches
	arm_dcache_disable();

	// Invalidate Data cache
	arm_dcache_invalidate();

	// Enable Data cache
	arm_dcache_enable();

	/****************************************
	 * Instruction Cache
	 */
	// Disable L1 Instruction cache
	arm_icache_disable();

	// Invalidate Instruction cache
	arm_icache_invalidate();

	// Enable Instruction cache
	arm_icache_enable();

	/****************************************
	 * L2 Cache
	 */
	if(cpu_get_current() == 0){
		// Disable L2
		_l2c310_cache_disable();

		// Set up L2 cache
		_l2c310_cache_setup();

		// Invalidate L2 cache
		_l2c310_cache_invalidate();

		// Enable L2 cache
		_l2c310_cache_enable();


		scu_enable();
		scu_join_smp();
	}



}