void
sol_interrupt_scheduler_process(msg_t *msg)
{
    unsigned int state;

    switch (msg->type) {
#ifdef USE_GPIO
    case GPIO: {
        struct gpio_interrupt_data *int_data = (void *)msg->content.ptr;

        state = irq_disable();
        int_data->base.pending = false;
        irq_restore(state);

        if (int_data->base.deleted)
            interrupt_scheduler_handler_free(int_data);
        else
            int_data->cb((void *)int_data->data);
        break;
    }
#endif
#ifdef USE_UART
    case UART_RX: {
        struct uart_interrupt_data *int_data = (void *)msg->content.ptr;
        uint16_t start, end, len;

        state = irq_disable();
        start = int_data->buf_next_read;
        end = int_data->buf_next_write;
        len = int_data->buf_len;
        int_data->base.pending = false;
        irq_restore(state);

        int_data->base.in_cb = true;
        while (!int_data->base.deleted) {
            if (start == end)
                break;
            int_data->rx_cb((void *)int_data->data, int_data->buf[start]);
            start = (start + 1) % len;
        }
        int_data->base.in_cb = false;
        if (int_data->base.deleted)
            interrupt_scheduler_handler_free(int_data);
        else
            int_data->buf_next_read = start;
        break;
    }
#endif
#ifdef NETWORK
    case GNRC_NETAPI_MSG_TYPE_RCV:
    case GNRC_NETAPI_MSG_TYPE_SND:
    case GNRC_NETAPI_MSG_TYPE_SET:
    case GNRC_NETAPI_MSG_TYPE_GET:
        sol_network_msg_dispatch(msg);
        break;
#endif
    }
}
Example #2
0
 int eint_irq_disable(unsigned int group, unsigned int number)
 {
	u32 reg_value = 0;
    volatile u32 * reg_addr = NULL;
    if(( group == 1) || (group ==2) || (group ==7)) 
    {
		if(group == 7)
		{
			group = 3;  //GPIO G 
		}

		reg_addr = SUNXI_PIO_EINT_EN(group);
		reg_value = *reg_addr;
		reg_value &= ~(0x01<< number);
		*reg_addr = reg_value;
		
		reg_addr = SUNXI_PIO_EINT_CFG(group);
		reg_value = *reg_addr;
		reg_value &= ~(0x0f << (4*number));
		*reg_addr = reg_value;
	//	*((volatile unsigned int *)( SUNXI_EINT_GPIO_B_INT_CFG)) = reg_value ;
		
		//再将gic控制器对应的gpio eint位enable
		switch(group)
		{
			case 1:
				irq_disable(AW_IRQ_EINTA);
				printf("enable EINT_A   \n");
				break;
			case 2:
				irq_disable(AW_IRQ_EINTB);
				printf("enable EINT_B   \n");
				break;
			case 3:
				irq_disable(AW_IRQ_EINTG);
				printf("enable EINT_G   \n");
				break;
			default:
				break;
			}
		return 0;
    }
    else
    {
        printf("eint_irq_disable_err:group does not have eint character \n");
        return -1;
    }

 }
Example #3
0
void kmain()
{
	irq_disable();
	/* 
	 * A primeira coisa a se fazer é iniciar todo o gerenciador
	 * de memória.
	 */
	mm_init();
	arch_early_init();
	ioremap_init();
	irq_init();
	sched_init();
	timer_init();
	/* 
	 * Neste momento temos o gerenciador de memória e escalonador prontos,
	 * já podemos habilitar as interrupções, que podem ser utilizadas
	 * pelos drivers.
	 */
	irq_enable();

	/* Inicia os drivers da plataforma */
	arch_setup();

	/* Requisita um modo se existir um framebuffer*/
	fb_set_mode();
	/* Inicia o console sobre o framebuffer */
	fb_console_init();
	kernel_info();

#if 1
	irq_disable();
	semaphore_init(&sem, 1);
	create_task("a", 4);
	create_task("b", 5);
	create_task("c", 6);
	create_task("d", 7);
	create_task("b", 8);
	create_task("b", 9);
	irq_enable();
	/* Fica de boas esperando as trocas de contexto */
#endif
	/* Como queremos imprimir para depuração do driver, inicializamos ele agora */
	//bcm2835_emmc_init();
	for (;;) {
		led_blink();
		//printk("-");
	}
}
Example #4
0
bool x86_rtc_set_periodic(uint8_t hz, uint32_t msg_content, kernel_pid_t target_pid, bool allow_replace)
{
    if (!valid) {
        return false;
    }

    unsigned old_status = irq_disable();
    bool result;
    if (target_pid == KERNEL_PID_UNDEF || hz == RTC_REG_A_HZ_OFF) {
        result = true;
        periodic_pid = KERNEL_PID_UNDEF;

        uint8_t old_divider = x86_cmos_read(RTC_REG_A) & ~RTC_REG_A_HZ_MASK;
        x86_cmos_write(RTC_REG_A, old_divider | RTC_REG_A_HZ_OFF);
        x86_cmos_write(RTC_REG_B, x86_cmos_read(RTC_REG_B) & ~RTC_REG_B_INT_PERIODIC);
    }
    else {
        result = allow_replace || periodic_pid == KERNEL_PID_UNDEF;
        if (result) {
            periodic_msg_content = msg_content;
            periodic_pid = target_pid;

            uint8_t old_divider = x86_cmos_read(RTC_REG_A) & ~RTC_REG_A_HZ_MASK;
            x86_cmos_write(RTC_REG_A, old_divider | hz);
            x86_cmos_write(RTC_REG_B, x86_cmos_read(RTC_REG_B) | RTC_REG_B_INT_PERIODIC);
        }
    }
    rtc_irq_handler(0);
    irq_restore(old_status);
    return result;
}
Example #5
0
//////////////////////////////////////////////////////////////////////
// KIRK functioon 
//////////////////////////////////////////////////////////////////////
int pspKirkProc(void *dst,u32 dsize,void *src,u32 ssize,u32 cmd)
{
	u32 intval;
	u32 sts;

	// enable decrypter HW
	intval = irq_disable();
	(*(vu32 *)0xbc100050) |= 0x80;
	irq_resume(intval);

	// clear cache
//	sceKernelDcacheWritebackInvalidateAll();
	pspClearDcache();

#if 0
// memory address mask
	
  r2 = 0x1fffffff;
  r3 = r18 & r2; // arg1
  r2 = r16 & r2; // arg3
#endif

//Kprintf("irq_disable\n");

//	intval = irq_disable();

	// go KIRK
	*(u32 *)(0xbde00010) = (u32)cmd;
	*(u32 *)(0xbde0002c) = ((u32)src)&0x1fffffff;
	*(u32 *)(0xbde00030) = ((u32)dst)&0x1fffffff;
#if 0
	*(u32 *)(0xbde00020) = 0x33; // for IRQ mode
#endif

//Kprintf("go kirk\n");

	*(u32 *)(0xbde0000c) = 0x01;  // EXEXUTE

//	irq_resume(intval);

//Kprintf("wait sts\n");

	do
	{
		sts = *(vu32 *)0xbde0001c;
	}while( (sts & 0x11)==0);


	*(u32 *)(0xbde00028) = sts;
	if(sts & 0x10)
	{
		*(u32 *)(0xbde0000c) = 0x02;
		do
		{
			sts = *(vu32 *)0xbde0001c;
		}while( (sts & 0x02)==0);
		*(u32 *)(0xbde00028) = sts;
		asm("sync"::);
		return -1;
	}
Example #6
0
/*
 * Used for releasing the IRQ lists after any scheme is run
 * Also removes all the enqueued wk items
 * set the current active scheme to INT_NONE.
 */
void release_irq(struct metrics_device_list *pmetrics_device_elem)
{
    /* Disable the IRQ */
    irq_disable(pmetrics_device_elem);

    if (pmetrics_device_elem->irq_process.wq) {
        LOG_DBG("Wait for the WQ to get flushed");
        /* Flush the WQ and wait till all BH's are executed */
        flush_workqueue(pmetrics_device_elem->irq_process.wq);
        LOG_DBG("Destroy the recently flushed WQ");
        /* Destroy the WQ */
        destroy_workqueue(pmetrics_device_elem->irq_process.wq);
        pmetrics_device_elem->irq_process.wq = NULL;
    }
    /* Note Mutex lock and unlock not required
     * even though we are editing the IRQ track list
     * since no more ISR's and BH's are pending
     */
    /* clean up and free all IRQ linked list nodes */
    deallocate_irq_trk(pmetrics_device_elem);
    /*Dealloc the work list if it exists */
    dealloc_wk_list(&pmetrics_device_elem->irq_process);

    /* Now we can Set IRQ type to INT_NONE */
    pmetrics_device_elem->metrics_device->public_dev.irq_active.
      irq_type = INT_NONE;
    pmetrics_device_elem->metrics_device->public_dev.irq_active.
      num_irqs = 0;
    /* Will only be read by ISR */
    pmetrics_device_elem->irq_process.irq_type = INT_NONE;
}
Example #7
0
/*
************************************************************************************************************
*
*                                             function
*
*    name          :
*
*    parmeters     :
*
*    return        :
*
*    note          :
*
*
************************************************************************************************************
*/
int sunxi_usb_exit(void)
{
	//int i;
	if(sunxi_udc_source.dma_send_channal)
	{
		sunxi_dma_release(sunxi_udc_source.dma_send_channal);
	}
	if(sunxi_udc_source.dma_recv_channal)
	{
		sunxi_dma_release(sunxi_udc_source.dma_recv_channal);
	}

	if(sunxi_ubuf.rx_base_buffer)
	{
		free(sunxi_ubuf.rx_base_buffer);
	}
	USBC_close_otg(sunxi_udc_source.usbc_hd);

	irq_disable(AW_IRQ_USB_OTG);
	irq_free_handler(AW_IRQ_USB_OTG);

	usb_close_clock();

	sunxi_udev_active->state_exit();

	memset(&sunxi_ubuf, 0, sizeof(sunxi_ubuf_t));

	return 0;
}
Example #8
0
/**
 * @brief Temporary implementation for pure virtual functions.
 */
void __cxa_pure_virtual(void)
{
	fprintf(stderr, "FATAL: Virtual funtion not implemented (%s at %i)!",
			__FILE__, __LINE__);
	irq_disable();
	for(;;);
}
Example #9
0
static inline void lptmr_stop(uint8_t dev)
{
    /* Disable IRQs to avoid race with ISR */
    unsigned int mask = irq_disable();
    LPTMR_Type *hw = lptmr_config[dev].dev;
    if (!(hw->CSR & LPTMR_CSR_TEN_MASK)) {
        /* Timer is already stopped */
        return;
    }
    /* Update state */
    /* Latch counter value */
    hw->CNR = 0;
    lptmr[dev].cnr += hw->CNR;
    uint16_t timeout = hw->CMR - hw->CNR;
    /* Disable timer */
    hw->CSR = 0;
    if (timeout > LPTMR_RELOAD_OVERHEAD) {
        /* Compensate for the delay in reloading */
        lptmr[dev].cmr = timeout - LPTMR_RELOAD_OVERHEAD;
    }
    else {
        lptmr[dev].cmr = timeout;
    }
    /* Clear any pending IRQ */
    NVIC_ClearPendingIRQ(lptmr_config[dev].irqn);
    irq_restore(mask);
}
Example #10
0
static inline void lptmr_start(uint8_t dev)
{
    LPTMR_Type *hw = lptmr_config[dev].dev;
    if (hw->CSR & LPTMR_CSR_TEN_MASK) {
        /* Timer is running */
        return;
    }
    /* Disable IRQs to avoid race with ISR */
    unsigned int mask = irq_disable();
    /* ensure hardware is reset */
    hw->CSR = 0;
    if (lptmr[dev].running) {
        /* set target */
        hw->CMR = lptmr[dev].cmr;
        /* enable interrupt and start timer */
        hw->CSR = LPTMR_CSR_TEN_MASK | LPTMR_CSR_TFC_MASK | LPTMR_CSR_TIE_MASK;
    }
    else {
        /* no target */
        hw->CMR = 0;
        /* Disable interrupt, enable timer */
        hw->CSR = LPTMR_CSR_TEN_MASK | LPTMR_CSR_TFC_MASK;
    }
    /* compensate for the reload delay when starting the timer */
    lptmr[dev].cnr += LPTMR_RELOAD_OVERHEAD;
    irq_restore(mask);
}
Example #11
0
static inline int lptmr_set_absolute(uint8_t dev, uint16_t target)
{
    LPTMR_Type *hw = lptmr_config[dev].dev;
    /* Disable IRQs to minimize jitter */
    unsigned int mask = irq_disable();
    lptmr[dev].running = 1;
    if (!(hw->CSR & LPTMR_CSR_TEN_MASK)) {
        /* Timer is stopped, only update target */
        uint16_t timeout = target - lptmr[dev].cnr;
        if (timeout > LPTMR_RELOAD_OVERHEAD) {
            /* Compensate for the reload delay */
            lptmr[dev].cmr = timeout - LPTMR_RELOAD_OVERHEAD;
        }
        else {
            lptmr[dev].cmr = 0;
        }
    }
    else if (hw->CSR & LPTMR_CSR_TCF_MASK) {
        /* TCF is set, safe to update CMR live */
        hw->CMR = target - lptmr[dev].cnr;
        /* cppcheck-suppress selfAssignment
         * Clear IRQ flags */
        hw->CSR = hw->CSR;
        /* Enable timer and IRQ */
        hw->CSR = LPTMR_CSR_TEN_MASK | LPTMR_CSR_TFC_MASK | LPTMR_CSR_TIE_MASK;
    }
    else {
        uint16_t timeout = target - lptmr_read(dev);
        lptmr_reload_or_spin(dev, timeout);
    }
    irq_restore(mask);
    return 1;
}
Example #12
0
/** read-modify write register, 'mask' is used to mask corresponding bits */
void sx1276_rmw(uint8_t addr, uint8_t mask, uint8_t val)
{
	uint8_t ret;

    /** disable IRQ, and get current IRQ status */
    irq_state_t irq_sta = irq_disable();

	/** read register */
	HAL_SX1276_NSS_L();

	hal_spi_wr( (~0x80) & addr );
	ret = hal_spi_wr( 0x00 );

	HAL_SX1276_NSS_H();

	/** modify the data */
	ret = (ret & mask) | (val & ~mask);

	/** write data back */
	HAL_SX1276_NSS_L();

	hal_spi_wr( 0x80 | addr );
	hal_spi_wr( ret );

	HAL_SX1276_NSS_H();

    /** restore previous IRQ status */
    irq_restore(irq_sta);
}
Example #13
0
void scheduler_add_process_from_elf_data(scheduler_t* scheduler, uint32_t length, uint8_t* data)
{
    irq_disable();
    __disable_interrupts();

    node_t* node = (node_t*) malloc(sizeof(node_t));
    node_initialize(node);

    process_t* process = (process_t*) malloc(sizeof(process_t));
    process->id = scheduler->nextProcessId;
    process->masterTable = mmu_create_master_table();
    mmu_init_process(process);
    node->member = process;

    uint32_t entryPoint = 0;
    loader_load_elf_from_data(process, length, data, &entryPoint);
    __context_init_with_entrypoint(process, entryPoint);


    scheduler->nextProcessId++;
    list_append(node, scheduler->processes);

    irq_enable();
    __enable_interrupts();
}
Example #14
0
// FSM
void disk_sched_irq_handler(){
	irq_disable();
	disk_job_str* current = disk_queue;
assert(current);
	ctx_s* owner = current->ctx;
	_dprint("DISK_IRQ\n");

	switch(disk_state){
		case DS_IDLE:
			fprintf(stderr, "WARNING - %s:%d - UNEXPECTED INTERRUPT IN IDLE STATE.\n", __FILE__, __LINE__);
			return;
		case DS_SEEK:{
			_dprint("SEEK TERMINE\n");
			// on vient de finir le seek.
			// Que faire maintenant ? > Dépend du type de tâche	
			switch(current->type){
				case DJT_READ:
					// on lance le read
					disk_state = DS_READ;
					read_sector(current->cyl, current->sect, current->buffer);
					irq_enable();
					return;
				case DJT_WRITE:
					// on lance le write
					disk_state = DS_WRITE;
					write_sector(current->cyl, current->sect, current->buffer);
					irq_enable();
					return;
				case DJT_FORMAT:
					// on lance le format
					disk_state = DS_FORMAT;
					mFormat(current->cyl, current->sect, 1, 0x0);
					irq_enable();
					return;
				default:
					// ???
					return;
			}
		}
		case DS_READ: // les données sont présentes dans le MASTERBUFFER. On les copie dans l buffer du job
			_dprint("READ TERMINE\n");
			assert(current->buffer);
			memcpy(current->buffer, MASTERBUFFER, secSize);
		case DS_WRITE:
		case DS_FORMAT:
			//disk_delete_job(current);
			disk_queue = current->next;

			// retour à idle
			disk_state = DS_IDLE;
			disk_ctx->status = CTXS_ACTIVABLE;
			// réactiver contexte propriétaire.
            owner->status = CTXS_ACTIVABLE;
			_dprint("JOB DONE\n");
			irq_enable();
			return;
		default:
			return;
	}
}
Example #15
0
bool x86_rtc_set_update(uint32_t msg_content, kernel_pid_t target_pid, bool allow_replace)
{
    if (!valid) {
        return false;
    }

    unsigned old_status = irq_disable();
    bool result;
    if (target_pid == KERNEL_PID_UNDEF) {
        result = true;
        update_pid = KERNEL_PID_UNDEF;

        x86_cmos_write(RTC_REG_B, x86_cmos_read(RTC_REG_B) & ~RTC_REG_B_INT_UPDATE);
    }
    else {
        result = allow_replace || update_pid == KERNEL_PID_UNDEF;
        if (result) {
            update_msg_content = msg_content;
            update_pid = target_pid;

            x86_cmos_write(RTC_REG_B, x86_cmos_read(RTC_REG_B) | RTC_REG_B_INT_UPDATE);
        }
    }
    rtc_irq_handler(0);
    irq_restore(old_status);
    return result;
}
Example #16
0
/**
 * Deallocate a block of data.
 */
void free(void *ptr)
{
    unsigned old_state = irq_disable();

    tlsf_free(gheap, ptr);
    irq_restore(old_state);
}
Example #17
0
int msg_reply(msg_t *m, msg_t *reply)
{
    unsigned state = irq_disable();

    thread_t *target = (thread_t*) sched_threads[m->sender_pid];
    assert(target != NULL);

    if (target->status != STATUS_REPLY_BLOCKED) {
        DEBUG("msg_reply(): %" PRIkernel_pid ": Target \"%" PRIkernel_pid
              "\" not waiting for reply.", sched_active_thread->pid, target->pid);
        irq_restore(state);
        return -1;
    }

    DEBUG("msg_reply(): %" PRIkernel_pid ": Direct msg copy.\n",
          sched_active_thread->pid);
    /* copy msg to target */
    msg_t *target_message = (msg_t*) target->wait_data;
    *target_message = *reply;
    sched_set_status(target, STATUS_PENDING);
    uint16_t target_prio = target->priority;
    irq_restore(state);
    sched_switch(target_prio);

    return 1;
}
Example #18
0
/**
 * @brief Atomic generic store
 *
 * @param[in]  size       width of the data, in bytes
 * @param[in]  dest       destination address to store to
 * @param[in]  src        source address
 * @param[in]  memorder   memory ordering, ignored in this implementation
 */
void __atomic_store_c(size_t size, void *dest, const void *src, int memorder)
{
    (void) memorder;
    unsigned int mask = irq_disable();
    memcpy(dest, src, size);
    irq_restore(mask);
}
Example #19
0
/*
************************************************************************************************************
*
*                                             function
*
*    name          :
*
*    parmeters     :
*
*    return        :
*
*    note          :
*
*
************************************************************************************************************
*/
int sunxi_dma_disable_int(uint hdma)
{
	sw_dma_channal_set_t     *dma_channal = (sw_dma_channal_set_t *)hdma;
	sunxi_dma_int_set    *dma_status  = (sunxi_dma_int_set *)SUNXI_DMA_BASE;
	uint  channal_count;

	if(!dma_channal->used)
	{
		return -1;
	}

	channal_count = dma_channal->channalNo;
	dma_status->irq_en &= ~(0x2 << (channal_count*2));
	//disable golbal int
	if(dma_int_count > 0)
	{
		dma_int_count --;
	}
	if(!dma_int_count)
	{
		irq_disable(AW_IRQ_DMA);
	}

	return 0;
}
Example #20
0
// Stops the specified timer:
void timer_stop(int timer)
{
	// Disables the timer interrupt:
	irq_disable(TIMER_IRQ(timer));
	// Stops the timer:
	timers[timer].memspace[REG_32(TIMER_TCLR)] &= ~TIMER_ST;
}
Example #21
0
void cpu_switch_context_exit(void)
{
#ifdef NATIVE_AUTO_EXIT
    if (sched_num_threads <= 1) {
        DEBUG("cpu_switch_context_exit: last task has ended. exiting.\n");
        real_exit(EXIT_SUCCESS);
    }
#endif

    if (_native_in_isr == 0) {
        irq_disable();
        _native_in_isr = 1;
        native_isr_context.uc_stack.ss_sp = __isr_stack;
        native_isr_context.uc_stack.ss_size = SIGSTKSZ;
        native_isr_context.uc_stack.ss_flags = 0;
        makecontext(&native_isr_context, isr_cpu_switch_context_exit, 0);
        if (setcontext(&native_isr_context) == -1) {
            err(EXIT_FAILURE, "cpu_switch_context_exit: swapcontext");
        }
        errx(EXIT_FAILURE, "1 this should have never been reached!!");
    }
    else {
        isr_cpu_switch_context_exit();
    }
    errx(EXIT_FAILURE, "3 this should have never been reached!!");
}
Example #22
0
/* Sleep for specified period.  This is useful for synchronising the
   CPU clock MCK to the timer clock, especially since the fastest
   timer clock is MCK / 2.  */
void
tc_clock_sync (tc_t tc, tc_period_t period)
{
    uint32_t id;

    tc_config_1 (tc, TC_MODE_DELAY_ONESHOT, period, period, 1);

    id = ID_TC0 + TC_CHANNEL (tc);

    irq_config (id, 7, tc_clock_sync_handler);
            
    irq_enable (id);

    /* Enable interrupt when have compare on A.  */
    tc->base->TC_IER = TC_IER_CPAS;

    tc_start (tc);
    
    /* Stop CPU clock until interrupt.  FIXME, should disable other
       interrupts first. */
    mcu_cpu_idle ();

    /* Disable interrupt when have compare on A.  */
    tc->base->TC_IDR = TC_IDR_CPAS;

    irq_disable (id);

    tc_stop (tc);
}
Example #23
0
/*
 * The function first deallocates the IRQ linked list, then disables IRQ
 * scheme sent in irq_active, finally resets active irq scheme to INT_NONE.
 * Also re-initializes the irq track linked list.
 * NOTE: Always call this function with IRQ MUTEX locked, otherwise it fails.
 */
static int disable_active_irq(struct metrics_device_list
    *pmetrics_device_elem, enum nvme_irq_type  irq_active)
{

#ifdef DEBUG
    /* If mutex is not locked then exit here */
    if (!mutex_is_locked(&pmetrics_device_elem->irq_process.irq_track_mtx)) {
        LOG_ERR("Mutex should have been locked before this...");
        /* Mutex is not locked so exiting */
        return -EINVAL;
    }
#endif

    /* Disable the IRQ */
    irq_disable(pmetrics_device_elem);

    /* clean up and free all IRQ linked list nodes */
    deallocate_irq_trk(pmetrics_device_elem);

    /* Dealloc the work list if it exists */
    dealloc_wk_list(&pmetrics_device_elem->irq_process);

    /* Now we can Set IRQ type to INT_NONE */
    pmetrics_device_elem->metrics_device->public_dev.irq_active.
        irq_type = INT_NONE;
    pmetrics_device_elem->metrics_device->public_dev.irq_active.
        num_irqs = 0;
    /* Will only be read by ISR */
    pmetrics_device_elem->irq_process.irq_type = INT_NONE;
    return SUCCESS;
}
int create_ctx(int stack_size, func_t f, void* args) {

	irq_disable();

	//Allocation de mémoire pour stocker un nouveau contexte -> MALLOC RÉSERVERA UN ESPACE MÉMOIRE DE TAILLE sizeof(struct ctx_s)
	struct ctx_s* ctx_new = malloc(sizeof(struct ctx_s));

	assert(ctx_new);

	//Initialisation du nouveau contexte
	init_ctx(ctx_new, stack_size, f, args);

	//Ajout dans la liste de contextes

	//Si la liste de contextes existe, on ajoute alors ce nouveau contexte (ctx_new) dans le contexte suivant la liste de contextes
	if (ctxs) {
		ctx_new -> ctx_next = ctxs -> ctx_next;
		ctxs -> ctx_next = ctx_new;
	}
	//Si la liste de contextes n'existe pas, ce contexte est alors le premier dans la liste de contexte -> on pointe sur lui!!
	else {
		ctxs = ctx_new;
		ctxs -> ctx_next = ctx_new;
	}

	irq_enable();

	return 0;

}
Example #25
0
/* Does the actual shutdown stuff for a proper shutdown */
void arch_shutdown() {
	/* Run dtors */
	arch_atexit();
	arch_dtors();

	dbglog(DBG_CRITICAL, "arch: shutting down kernel\n");

	/* Turn off UBC breakpoints, if any */
	// ubc_disable_all();
	
	/* Do auto-shutdown... or use the "light weight" version underneath */
#if 1
	arch_auto_shutdown();
#else
	/* Ensure that interrupts are disabled */
	irq_disable();
	irq_enable_exc();

	/* Make sure that PVR and Maple are shut down */
	pvr_shutdown();
	maple_shutdown();

	/* Shut down any other hardware things */
	hardware_shutdown();
#endif

	if (__kos_init_flags & INIT_MALLOCSTATS) {
		malloc_stats();
	}

	/* Shut down IRQs */
	// irq_shutdown();
}
void yield(void) {

	//Isolation de la section critique de code
	irq_disable();

	//Si la structure courante existe, on va switcher sur un suivant qui n'est pas terminé
	if (struct_reference) {
		struct ctx_s* tmp;
		//On va chercher le prochain contexte non-terminé, ne pointant pas sur nous-mêmes
		while (struct_reference -> ctx_next -> ctx_state == CTX_TERMINATED && struct_reference -> ctx_next != struct_reference) {
			tmp = struct_reference -> ctx_next;
			struct_reference -> ctx_next = struct_reference -> ctx_next -> ctx_next;
			free(tmp -> ctx_ptr_malloc);
			free(tmp);
		}
		//Switch_to sur le prochain contexte s'il n'est pas encore terminé
		if (struct_reference -> ctx_next -> ctx_state != CTX_TERMINATED) {
			switch_to_ctx(struct_reference -> ctx_next);
		}
		//Sinon, on pointe sur soi -> EXIT
		else {
			exit(EXIT_SUCCESS);
		}
	}
	//Si la structure courante n'existe pas, on switch directement sur le contexte pointé dans la pile ctxs
	else {
		if (ctxs != NULL) {
			switch_to_ctx(ctxs);
		}
	}

}
Example #27
0
static bool_t mach_cleanup(void)
{
    /* stop timer 0 ~ 4 */
    writel(S5PV210_TCON, 0x0);

    /* stop system timer */
    writel(S5PV210_SYSTIMER_TCON, 0x0);

    /* disable irq */
    irq_disable();

    /* disable fiq */
    fiq_disable();

    /* disable icache */
    icache_disable();

    /* disable dcache */
    dcache_disable();

    /* disable mmu */
    mmu_disable();

    /* disable vic */
    vic_disable();

    return TRUE;
}
Example #28
0
void _xtimer_set64(xtimer_t *timer, uint32_t offset, uint32_t long_offset)
{
    DEBUG(" _xtimer_set64() offset=%" PRIu32 " long_offset=%" PRIu32 "\n", offset, long_offset);
    if (!long_offset) {
        /* timer fits into the short timer */
        xtimer_set(timer, (uint32_t) offset);
    }
    else {
        int state = irq_disable();
        if (_is_set(timer)) {
            _remove(timer);
        }

        _xtimer_now64(&timer->target, &timer->long_target);
        timer->target += offset;
        timer->long_target += long_offset;
        if (timer->target < offset) {
            timer->long_target++;
        }

        _add_timer_to_long_list(&long_list_head, timer);
        irq_restore(state);
        DEBUG("xtimer_set64(): added longterm timer (long_target=%" PRIu32 " target=%" PRIu32 ")\n",
                timer->long_target, timer->target);
    }
}
Example #29
0
void cmd_execute(ether_header_t * ether, ip_header_t * ip, udp_header_t * udp, command_t * command)
{
    if (!running) {
	tool_ip = ntohl(ip->src);
	tool_port = ntohs(udp->src);
	memcpy(tool_mac, ether->src, 6);
	our_ip = ntohl(ip->dest);

	make_ip(ntohl(ip->src), ntohl(ip->dest), UDP_H_LEN + COMMAND_LEN, 17, (ip_header_t *)(pkt_buf + ETHER_H_LEN));
	make_udp(ntohs(udp->src), ntohs(udp->dest),(unsigned char *) command, COMMAND_LEN, (ip_header_t *)(pkt_buf + ETHER_H_LEN), (udp_header_t *)(pkt_buf + ETHER_H_LEN + IP_H_LEN));
	eth_txts(pkt_buf, ETHER_H_LEN + IP_H_LEN + UDP_H_LEN + COMMAND_LEN);

#if 0
	printf("executing %p ...", ntohl(command->address));
	
	if (ntohl(command->size)&1)
	    *(unsigned int *)0x8c004004 = 0xdeadbeef; /* enable console */
	else
	    *(unsigned int *)0x8c004004 = 0xfeedface; /* disable console */

	irq_disable();
	disable_cache();
	go(ntohl(command->address));
#endif
    }
}
Example #30
0
int main(int argc, char *argv[])
{
	if(argc != 3)
	{
		printf("%d", argc);
		printf("\nUsage: go 0x50000000 LoadAdress FileName\n\n");
		return -1;
	}

	hport = htons(69);
	eport = htons(4321);
	
	eth_init();

	irq_disable();
	
	GPNCON |= 2 << 14;
	EINT0CON0 |= 1 << 12;
	EINT0MASK &= ~(1 << 7);//设置网卡中断
	
	irq_request(INT_EINT1, do_net);
	
	irq_enable();

	tftp_down((void *)atoi(argv[1]), argv[2]);

	return 0;
}