Exemple #1
0
/* GDB sets a breakpoint at this function. */
void LJ_NOINLINE __jit_debug_register_code()
{
  __asm__ __volatile__("");
};
Exemple #2
0
void twi__slave__on_data_byte_received(const uint8_t value) {
    __asm__ __volatile__("twi__slave__on_data_byte_received:");
    twi__continue(true, false);
    data__leds__put(value);
    // TODO: check that space remains for just 1 byte and invoke twi__continue(false, false);
}
Exemple #3
0
void panic(const char *message, const char *file, uint32_t line)
{
    __asm__ __volatile__ ("cli"); // Disable interrupts.
    printf("\n!!! %s !!!", message);
    for(;;);
}
Exemple #4
0
/*
 * To the best of our knowledge Windows compatible x86 hardware expects
 * the following on reboot:
 *
 * 1) If the FADT has the ACPI reboot register flag set, try it
 * 2) If still alive, write to the keyboard controller
 * 3) If still alive, write to the ACPI reboot register again
 * 4) If still alive, write to the keyboard controller again
 * 5) If still alive, call the EFI runtime service to reboot
 * 6) If no EFI runtime service, call the BIOS to do a reboot
 *
 * We default to following the same pattern. We also have
 * two other reboot methods: 'triple fault' and 'PCI', which
 * can be triggered via the reboot= kernel boot option or
 * via quirks.
 *
 * This means that this function can never return, it can misbehave
 * by not rebooting properly and hanging.
 */
static void native_machine_emergency_restart(void)
{
	int i;
	int attempt = 0;
	int orig_reboot_type = reboot_type;
	unsigned short mode;

	if (reboot_emergency)
		emergency_vmx_disable_all();

	tboot_shutdown(TB_SHUTDOWN_REBOOT);

	/* Tell the BIOS if we want cold or warm reboot */
	mode = reboot_mode == REBOOT_WARM ? 0x1234 : 0;
	*((unsigned short *)__va(0x472)) = mode;

	/*
	 * If an EFI capsule has been registered with the firmware then
	 * override the reboot= parameter.
	 */
	if (efi_capsule_pending(NULL)) {
		pr_info("EFI capsule is pending, forcing EFI reboot.\n");
		reboot_type = BOOT_EFI;
	}

	for (;;) {
		/* Could also try the reset bit in the Hammer NB */
		switch (reboot_type) {
		case BOOT_ACPI:
			acpi_reboot();
			reboot_type = BOOT_KBD;
			break;

		case BOOT_KBD:
			mach_reboot_fixups(); /* For board specific fixups */

			for (i = 0; i < 10; i++) {
				kb_wait();
				udelay(50);
				outb(0xfe, 0x64); /* Pulse reset low */
				udelay(50);
			}
			if (attempt == 0 && orig_reboot_type == BOOT_ACPI) {
				attempt = 1;
				reboot_type = BOOT_ACPI;
			} else {
				reboot_type = BOOT_EFI;
			}
			break;

		case BOOT_EFI:
			efi_reboot(reboot_mode, NULL);
			reboot_type = BOOT_BIOS;
			break;

		case BOOT_BIOS:
			machine_real_restart(MRR_BIOS);

			/* We're probably dead after this, but... */
			reboot_type = BOOT_CF9_SAFE;
			break;

		case BOOT_CF9_FORCE:
			port_cf9_safe = true;
			/* Fall through */

		case BOOT_CF9_SAFE:
			if (port_cf9_safe) {
				u8 reboot_code = reboot_mode == REBOOT_WARM ?  0x06 : 0x0E;
				u8 cf9 = inb(0xcf9) & ~reboot_code;
				outb(cf9|2, 0xcf9); /* Request hard reset */
				udelay(50);
				/* Actually do the reset */
				outb(cf9|reboot_code, 0xcf9);
				udelay(50);
			}
			reboot_type = BOOT_TRIPLE;
			break;

		case BOOT_TRIPLE:
			load_idt(&no_idt);
			__asm__ __volatile__("int3");

			/* We're probably dead after this, but... */
			reboot_type = BOOT_KBD;
			break;
		}
	}
}
Exemple #5
0
void
kdp_ml_enter_debugger(void)
{
	__asm__ __volatile__("int3");
}
Exemple #6
0
/*
 * Windows compatible x86 hardware expects the following on reboot:
 *
 * 1) If the FADT has the ACPI reboot register flag set, try it
 * 2) If still alive, write to the keyboard controller
 * 3) If still alive, write to the ACPI reboot register again
 * 4) If still alive, write to the keyboard controller again
 *
 * If the machine is still alive at this stage, it gives up. We default to
 * following the same pattern, except that if we're still alive after (4) we'll
 * try to force a triple fault and then cycle between hitting the keyboard
 * controller and doing that
 */
static void native_machine_emergency_restart(void)
{
    int i;
    int attempt = 0;
    int orig_reboot_type = reboot_type;

    if (reboot_emergency)
        emergency_vmx_disable_all();

    tboot_shutdown(TB_SHUTDOWN_REBOOT);

    /* Tell the BIOS if we want cold or warm reboot */
    *((unsigned short *)__va(0x472)) = reboot_mode;

    for (;;) {
        /* Could also try the reset bit in the Hammer NB */
        switch (reboot_type) {
        case BOOT_KBD:
            mach_reboot_fixups(); /* for board specific fixups */

            for (i = 0; i < 10; i++) {
                kb_wait();
                udelay(50);
                outb(0xfe, 0x64); /* pulse reset low */
                udelay(50);
            }
            if (attempt == 0 && orig_reboot_type == BOOT_ACPI) {
                attempt = 1;
                reboot_type = BOOT_ACPI;
            } else {
                reboot_type = BOOT_TRIPLE;
            }
            break;

        case BOOT_TRIPLE:
            load_idt(&no_idt);
            __asm__ __volatile__("int3");

            reboot_type = BOOT_KBD;
            break;

#ifdef CONFIG_X86_32
        case BOOT_BIOS:
            machine_real_restart(MRR_BIOS);

            reboot_type = BOOT_KBD;
            break;
#endif

        case BOOT_ACPI:
            acpi_reboot();
            reboot_type = BOOT_KBD;
            break;

        case BOOT_EFI:
            if (efi_enabled)
                efi.reset_system(reboot_mode ?
                                 EFI_RESET_WARM :
                                 EFI_RESET_COLD,
                                 EFI_SUCCESS, 0, NULL);
            reboot_type = BOOT_KBD;
            break;

        case BOOT_CF9:
            port_cf9_safe = true;
        /* fall through */

        case BOOT_CF9_COND:
            if (port_cf9_safe) {
                u8 cf9 = inb(0xcf9) & ~6;
                outb(cf9|2, 0xcf9); /* Request hard reset */
                udelay(50);
                outb(cf9|6, 0xcf9); /* Actually do the reset */
                udelay(50);
            }
            reboot_type = BOOT_KBD;
            break;
        }
    }
}
Exemple #7
0
int main()
{
    __asm__ __volatile__(
        "	pushl	%ebp\n"
        "	movl	%esp, %ebp\n"
        "	pushl	%ebx\n"
        "	subl	$20, %esp\n"
        "	movl	$1048576, %eax\n"
        "	movl	%eax, 8(%esp)\n"
        "	movl	$_inbuf.0, %ebx\n"
        "	movl	%ebx, 4(%esp)\n"
        "	movl	$0, (%esp)\n"
        "	call	read\n"
        "	movb	$0, _inbuf.0(%eax)\n"
        "	movl	$_outbuf.1, %ecx\n"
        "	.p2align 4,,15\n"
        "L10:\n"
        "	movzbl	(%ebx), %eax\n"
        "	testb	%al, %al\n"
        "	je	L14\n"
        "	cmpb	$124, %al\n"
        "	je	L14\n"
        "	.p2align 4,,15\n"
        "L17:\n"
        "	incl	%ebx\n"
        "	movzbl	(%ebx), %eax\n"
        "	testb	%al, %al\n"
        "	je	L14\n"
        "	cmpb	$124, %al\n"
        "	jne	L17\n"
        "L14:\n"
        "	incl	%ebx\n"
        "	cmpb	$124, %al\n"
        "	jne	L11\n"
        "	xorl	%edx, %edx\n"
        "	movzbl	(%ebx), %eax\n"
        "	cmpb	$124, %al\n"
        "	je	L29\n"
        "	.p2align 4,,15\n"
        "L26:\n"
        "	cmpb	$32, %al\n"
        "	je	L30\n"
        "	cmpb	$111, %al\n"
        "	je	L31\n"
        "L21:\n"
        "	incl	%ebx\n"
        "	movzbl	(%ebx), %eax\n"
        "	cmpb	$124, %al\n"
        "	jne	L26\n"
        "L29:\n"
        "	incl	%ebx\n"
        "	movb	%dl, (%ecx)\n"
        "	incl	%ecx\n"
        "	jmp	L10\n"
        "	.p2align 4,,7\n"
        "L31:\n"
        "	addl	%edx, %edx\n"
        "	orl	$1, %edx\n"
        "	jmp	L21\n"
        "	.p2align 4,,7\n"
        "L30:\n"
        "	addl	%edx, %edx\n"
        "	jmp	L21\n"
        "L11:\n"
        "	subl	$_outbuf.1, %ecx\n"
        "	movl	%ecx, 8(%esp)\n"
        "	movl	$_outbuf.1, %edx\n"
        "	movl	%edx, 4(%esp)\n"
        "	movl	$1, (%esp)\n"
        "	call	write\n"
        "	movl	$0, (%esp)\n"
        "	call	exit\n"
        ".lcomm _inbuf.0,131072\n"
        ".lcomm _outbuf.1,65536\n"
    );
}
Exemple #8
0
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
			      unsigned long vector, int write_acc)
{
	struct task_struct *tsk;
	struct mm_struct *mm;
	struct vm_area_struct *vma;
	siginfo_t info;
	int fault;
	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;

	tsk = current;

	/*
	 * We fault-in kernel-space virtual memory on-demand. The
	 * 'reference' page table is init_mm.pgd.
	 *
	 * NOTE! We MUST NOT take any locks for this case. We may
	 * be in an interrupt or a critical region, and should
	 * only copy the information from the master page table,
	 * nothing more.
	 *
	 * NOTE2: This is done so that, when updating the vmalloc
	 * mappings we don't have to walk all processes pgdirs and
	 * add the high mappings all at once. Instead we do it as they
	 * are used. However vmalloc'ed page entries have the PAGE_GLOBAL
	 * bit set so sometimes the TLB can use a lingering entry.
	 *
	 * This verifies that the fault happens in kernel space
	 * and that the fault was not a protection error.
	 */

	if (address >= VMALLOC_START &&
	    (vector != 0x300 && vector != 0x400) &&
	    !user_mode(regs))
		goto vmalloc_fault;

	/* If exceptions were enabled, we can reenable them here */
	if (user_mode(regs)) {
		/* Exception was in userspace: reenable interrupts */
		local_irq_enable();
		flags |= FAULT_FLAG_USER;
	} else {
		/* If exception was in a syscall, then IRQ's may have
		 * been enabled or disabled.  If they were enabled,
		 * reenable them.
		 */
		if (regs->sr && (SPR_SR_IEE | SPR_SR_TEE))
			local_irq_enable();
	}

	mm = tsk->mm;
	info.si_code = SEGV_MAPERR;

	/*
	 * If we're in an interrupt or have no user
	 * context, we must not take the fault..
	 */

	if (in_interrupt() || !mm)
		goto no_context;

retry:
	down_read(&mm->mmap_sem);
	vma = find_vma(mm, address);

	if (!vma)
		goto bad_area;

	if (vma->vm_start <= address)
		goto good_area;

	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;

	if (user_mode(regs)) {
		/*
		 * accessing the stack below usp is always a bug.
		 * we get page-aligned addresses so we can only check
		 * if we're within a page from usp, but that might be
		 * enough to catch brutal errors at least.
		 */
		if (address + PAGE_SIZE < regs->sp)
			goto bad_area;
	}
	if (expand_stack(vma, address))
		goto bad_area;

	/*
	 * Ok, we have a good vm_area for this memory access, so
	 * we can handle it..
	 */

good_area:
	info.si_code = SEGV_ACCERR;

	/* first do some preliminary protection checks */

	if (write_acc) {
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
		flags |= FAULT_FLAG_WRITE;
	} else {
		/* not present */
		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
			goto bad_area;
	}

	/* are we trying to execute nonexecutable area */
	if ((vector == 0x400) && !(vma->vm_page_prot.pgprot & _PAGE_EXEC))
		goto bad_area;

	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */

	fault = handle_mm_fault(mm, vma, address, flags);

	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
		return;

	if (unlikely(fault & VM_FAULT_ERROR)) {
		if (fault & VM_FAULT_OOM)
			goto out_of_memory;
		else if (fault & VM_FAULT_SIGSEGV)
			goto bad_area;
		else if (fault & VM_FAULT_SIGBUS)
			goto do_sigbus;
		BUG();
	}

	if (flags & FAULT_FLAG_ALLOW_RETRY) {
		/*RGD modeled on Cris */
		if (fault & VM_FAULT_MAJOR)
			tsk->maj_flt++;
		else
			tsk->min_flt++;
		if (fault & VM_FAULT_RETRY) {
			flags &= ~FAULT_FLAG_ALLOW_RETRY;
			flags |= FAULT_FLAG_TRIED;

			 /* No need to up_read(&mm->mmap_sem) as we would
			 * have already released it in __lock_page_or_retry
			 * in mm/filemap.c.
			 */

			goto retry;
		}
	}

	up_read(&mm->mmap_sem);
	return;

	/*
	 * Something tried to access memory that isn't in our memory map..
	 * Fix it, but check if it's kernel or user first..
	 */

bad_area:
	up_read(&mm->mmap_sem);

bad_area_nosemaphore:

	/* User mode accesses just cause a SIGSEGV */

	if (user_mode(regs)) {
		info.si_signo = SIGSEGV;
		info.si_errno = 0;
		/* info.si_code has been set above */
		info.si_addr = (void *)address;
		force_sig_info(SIGSEGV, &info, tsk);

		printk("%s%s[%d]: segfault at %lx pc %p sp %p\n",
                       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
                       tsk->comm, task_pid_nr(tsk), address,
                       (void *)regs->pc, (void *)regs->sp);
		return;
	}

no_context:

	/* Are we prepared to handle this kernel fault?
	 *
	 * (The kernel has valid exception-points in the source
	 *  when it acesses user-memory. When it fails in one
	 *  of those points, we find it in a table and do a jump
	 *  to some fixup code that loads an appropriate error
	 *  code)
	 */

	{
		const struct exception_table_entry *entry;

		__asm__ __volatile__("l.nop 42");

		if ((entry = search_exception_tables(regs->pc)) != NULL) {
			/* Adjust the instruction pointer in the stackframe */
			regs->pc = entry->fixup;
			return;
		}
	}

	/*
	 * Oops. The kernel tried to access some bad page. We'll have to
	 * terminate things with extreme prejudice.
	 */

	if ((unsigned long)(address) < PAGE_SIZE)
		printk(KERN_ALERT
		       "Unable to handle kernel NULL pointer dereference");
	else
		printk(KERN_ALERT "Unable to handle kernel access");
	printk(" at virtual address 0x%08lx\n", address);

	die("Oops", regs, write_acc);

	do_exit(SIGKILL);

	/*
	 * We ran out of memory, or some other thing happened to us that made
	 * us unable to handle the page fault gracefully.
	 */

out_of_memory:
	__asm__ __volatile__("l.nop 42");
	__asm__ __volatile__("l.nop 1");

	up_read(&mm->mmap_sem);
	if (!user_mode(regs))
		goto no_context;
	pagefault_out_of_memory();
	return;

do_sigbus:
	up_read(&mm->mmap_sem);

	/*
	 * Send a sigbus, regardless of whether we were in kernel
	 * or user mode.
	 */
	info.si_signo = SIGBUS;
	info.si_errno = 0;
	info.si_code = BUS_ADRERR;
	info.si_addr = (void *)address;
	force_sig_info(SIGBUS, &info, tsk);

	/* Kernel mode? Handle exceptions or die */
	if (!user_mode(regs))
		goto no_context;
	return;

vmalloc_fault:
	{
		/*
		 * Synchronize this task's top level page-table
		 * with the 'reference' page table.
		 *
		 * Use current_pgd instead of tsk->active_mm->pgd
		 * since the latter might be unavailable if this
		 * code is executed in a misfortunately run irq
		 * (like inside schedule() between switch_mm and
		 *  switch_to...).
		 */

		int offset = pgd_index(address);
		pgd_t *pgd, *pgd_k;
		pud_t *pud, *pud_k;
		pmd_t *pmd, *pmd_k;
		pte_t *pte_k;

/*
		phx_warn("do_page_fault(): vmalloc_fault will not work, "
			 "since current_pgd assign a proper value somewhere\n"
			 "anyhow we don't need this at the moment\n");

		phx_mmu("vmalloc_fault");
*/
		pgd = (pgd_t *)current_pgd[smp_processor_id()] + offset;
		pgd_k = init_mm.pgd + offset;

		/* Since we're two-level, we don't need to do both
		 * set_pgd and set_pmd (they do the same thing). If
		 * we go three-level at some point, do the right thing
		 * with pgd_present and set_pgd here.
		 *
		 * Also, since the vmalloc area is global, we don't
		 * need to copy individual PTE's, it is enough to
		 * copy the pgd pointer into the pte page of the
		 * root task. If that is there, we'll find our pte if
		 * it exists.
		 */

		pud = pud_offset(pgd, address);
		pud_k = pud_offset(pgd_k, address);
		if (!pud_present(*pud_k))
			goto no_context;

		pmd = pmd_offset(pud, address);
		pmd_k = pmd_offset(pud_k, address);

		if (!pmd_present(*pmd_k))
			goto bad_area_nosemaphore;

		set_pmd(pmd, *pmd_k);

		/* Make sure the actual PTE exists as well to
		 * catch kernel vmalloc-area accesses to non-mapped
		 * addresses. If we don't do this, this will just
		 * silently loop forever.
		 */

		pte_k = pte_offset_kernel(pmd_k, address);
		if (!pte_present(*pte_k))
			goto no_context;

		return;
	}
}
Exemple #9
0
void __naked __flash_header_start go(void)
{
	__asm__ __volatile__("b _start\n");
}
Exemple #10
0
/*--------------------------------------------------------------------------*
 *                  ORDONNANCEUR preemptif optimise                         *
 *                                                                          *
 *             !! Cette fonction doit s'exécuter en mode IRQ !!             *
 *  !! Pas d'appel direct ! Utiliser schedule pour provoquer une            *
 *  commutation !!                                                          *
 *--------------------------------------------------------------------------*/
void __attribute__((naked)) scheduler(void) {
	register CONTEXTE *p;
	register unsigned int sp asm("sp");  /* Pointeur de pile */

	/* Sauvegarder le contexte complet sur la pile IRQ */
	__asm__ __volatile__(
		/* Sauvegarde registres mode system */
		"stmfd sp,{r0-r12,sp,lr}^\t\n"
		/* Attendre un cycle */
		"nop\t\n"
		/* Ajustement pointeur de pile */
		"sub sp, sp, #60\t\n"
		/* Sauvegarde lr IRQ */
		"stmfd sp!,{lr}\t\n"
		/* Sauvegarde de spsr_irq */
		"mrs r0, SPSR\t\n"
		"stmfd sp!,{r0}\t\n"
	);

	/* Réinitialiser le timer si nécessaire */
	if (_ack_timer) {
		/* Acquiter l'événement de comparaison du Timer pour pouvoir */
		/* obtenir le déclencement d'une prochaine interruption */
		timerComparedOccured(TIMER1);
	}
	else {
	  _ack_timer = 1;
	}

	/* memoriser le pointeur de pile */
	_contexte[_tache_c].sp_irq = sp;
	/* recherche du suivant */
	_tache_c = suivant();
	/* Incrémenter le compteur d'activations  */
	compteurs[_tache_c]++;
	/* p pointe sur la nouvelle tache courante*/
	p = &_contexte[_tache_c];

	/* tache prete ? */
	if (p->status == PRET) {
		/* Charger sp_irq initial */
		sp = p->sp_irq;
		/* Passer en mode système */
		_set_arm_mode_(ARMMODE_SYS);
		/* Charger sp_sys initial */
		sp = p->sp_ini;
		/* status tache -> execution */
		p->status = EXEC;
		/* autoriser les interuptions */
		_irq_enable_();
		/* lancement de la tâche */
		p->tache_adr();
	}
	else {
		/* tache deja en execution, restaurer sp_irq */
		sp = p->sp_irq;
	}

	/* Restaurer le contexte complet depuis la pile IRQ */
	__asm__ __volatile__(
		/* Restaurer spsr_irq */
		"ldmfd sp!,{r0}\t\n"
		"msr SPSR, r0\t\n"
		/* et lr_irq */
		"ldmfd sp!,{lr}\t\n"
		/* Restaurer registres mode system */
		"ldmfd sp,{r0-r12,sp,lr}^\t\n"
		/* Attendre un cycle */
		"nop\t\n"
		/* Ajuster pointeur de pile irq */
		"add sp, sp, #60\t\n"
		/* Retour d'exception */
		"subs pc, lr, #4\t\n"
	); 
}
Exemple #11
0
uint_t KERNEL_CALL
semaphore_get(uint_t aSemaphore)
{
    __asm__ __volatile__ ("cli");
    return gSemaphores[aSemaphore];
}
Exemple #12
0
struct bi_record *
decompress_kernel(unsigned long load_addr, int num_words, unsigned long cksum)
{
#ifdef INTERACTIVE_CONSOLE
    int timer = 0;
    char ch;
#endif
    char *cp;
    struct bi_record *rec;
    unsigned long initrd_loc = 0, TotalMemory = 0;

#if defined(CONFIG_SERIAL_8250_CONSOLE) || defined(CONFIG_SERIAL_MPSC_CONSOLE)
    com_port = serial_init(0, NULL);
#endif

#if defined(CONFIG_44x) && defined(PPC44x_EMAC0_MR0)
    /* Reset MAL */
    mtdcr(DCRN_MALCR(DCRN_MAL_BASE), MALCR_MMSR);
    /* Wait for reset */
    while (mfdcr(DCRN_MALCR(DCRN_MAL_BASE)) & MALCR_MMSR) {};
    /* Reset EMAC */
    *(volatile unsigned long *)PPC44x_EMAC0_MR0 = 0x20000000;
    __asm__ __volatile__("eieio");
#endif

    /*
     * Call get_mem_size(), which is memory controller dependent,
     * and we must have the correct file linked in here.
     */
    TotalMemory = get_mem_size();

    /* assume the chunk below 8M is free */
    end_avail = (char *)0x00800000;

    /*
     * Reveal where we were loaded at and where we
     * were relocated to.
     */
    puts("loaded at:     ");
    puthex(load_addr);
    puts(" ");
    puthex((unsigned long)(load_addr + (4*num_words)));
    puts("\n");
    if ( (unsigned long)load_addr != (unsigned long)&start )
    {
        puts("relocated to:  ");
        puthex((unsigned long)&start);
        puts(" ");
        puthex((unsigned long)((unsigned long)&start + (4*num_words)));
        puts("\n");
    }

    /*
     * We link ourself to 0x00800000.  When we run, we relocate
     * ourselves there.  So we just need __image_begin for the
     * start. -- Tom
     */
    zimage_start = (char *)(unsigned long)(&__image_begin);
    zimage_size = (unsigned long)(&__image_end) -
                  (unsigned long)(&__image_begin);

    initrd_size = (unsigned long)(&__ramdisk_end) -
                  (unsigned long)(&__ramdisk_begin);

    /*
     * The zImage and initrd will be between start and _end, so they've
     * already been moved once.  We're good to go now. -- Tom
     */
    avail_ram = (char *)PAGE_ALIGN((unsigned long)_end);
    puts("zimage at:     ");
    puthex((unsigned long)zimage_start);
    puts(" ");
    puthex((unsigned long)(zimage_size+zimage_start));
    puts("\n");

    if ( initrd_size ) {
        puts("initrd at:     ");
        puthex((unsigned long)(&__ramdisk_begin));
        puts(" ");
        puthex((unsigned long)(&__ramdisk_end));
        puts("\n");
    }

    avail_ram = (char *)0x00400000;
    end_avail = (char *)0x00800000;
    puts("avail ram:     ");
    puthex((unsigned long)avail_ram);
    puts(" ");
    puthex((unsigned long)end_avail);
    puts("\n");

    if (keyb_present)
        CRT_tstc();  /* Forces keyboard to be initialized */
#ifdef CONFIG_GEMINI
    /*
     * If cmd_line is empty and cmd_preset is not, copy cmd_preset
     * to cmd_line.  This way we can override cmd_preset with the
     * command line from Smon.
     */

    if ( (cmd_line[0] == '\0') && (cmd_preset[0] != '\0'))
        memcpy (cmd_line, cmd_preset, sizeof(cmd_preset));
#endif

    /* Display standard Linux/PPC boot prompt for kernel args */
    puts("\nLinux/PPC load: ");
    cp = cmd_line;
    memcpy (cmd_line, cmd_preset, sizeof(cmd_preset));
    while ( *cp ) putc(*cp++);

#ifdef INTERACTIVE_CONSOLE
    /*
     * If they have a console, allow them to edit the command line.
     * Otherwise, don't bother wasting the five seconds.
     */
    while (timer++ < 5*1000) {
        if (tstc()) {
            while ((ch = getc()) != '\n' && ch != '\r') {
                /* Test for backspace/delete */
                if (ch == '\b' || ch == '\177') {
                    if (cp != cmd_line) {
                        cp--;
                        puts("\b \b");
                    }
                    /* Test for ^x/^u (and wipe the line) */
                } else if (ch == '\030' || ch == '\025') {
                    while (cp != cmd_line) {
                        cp--;
                        puts("\b \b");
                    }
                } else {
                    *cp++ = ch;
                    putc(ch);
                }
            }
            break;  /* Exit 'timer' loop */
        }
        udelay(1000);  /* 1 msec */
    }
    *cp = 0;
#endif
    puts("\n");

    puts("Uncompressing Linux...");
    gunzip(NULL, 0x400000, zimage_start, &zimage_size);
    puts("done.\n");

    /* get the bi_rec address */
    rec = bootinfo_addr(zimage_size);

    /* We need to make sure that the initrd and bi_recs do not
     * overlap. */
    if ( initrd_size ) {
        unsigned long rec_loc = (unsigned long) rec;
        initrd_loc = (unsigned long)(&__ramdisk_begin);
        /* If the bi_recs are in the middle of the current
         * initrd, move the initrd to the next MB
         * boundary. */
        if ((rec_loc > initrd_loc) &&
                ((initrd_loc + initrd_size) > rec_loc)) {
            initrd_loc = _ALIGN((unsigned long)(zimage_size)
                                + (2 << 20) - 1, (2 << 20));
            memmove((void *)initrd_loc, &__ramdisk_begin,
                    initrd_size);
            puts("initrd moved:  ");
            puthex(initrd_loc);
            puts(" ");
            puthex(initrd_loc + initrd_size);
            puts("\n");
        }
    }

    bootinfo_init(rec);
    if ( TotalMemory )
        bootinfo_append(BI_MEMSIZE, sizeof(int), (void*)&TotalMemory);

    bootinfo_append(BI_CMD_LINE, strlen(cmd_line)+1, (void*)cmd_line);

    /* add a bi_rec for the initrd if it exists */
    if (initrd_size) {
        unsigned long initrd[2];

        initrd[0] = initrd_loc;
        initrd[1] = initrd_size;

        bootinfo_append(BI_INITRD, sizeof(initrd), &initrd);
    }
    puts("Now booting the kernel\n");
    serial_close(com_port);

    return rec;
}
Exemple #13
0
void disable_irq(void){
	__asm__ __volatile__("mrs	r1, cpsr");			// transfer the contents of cpsr into a r1
	__asm__ __volatile__("orr	r1, r1, #0x80"); 	// or r1 and 0x80
	__asm__ __volatile__("msr	cpsr, r1");   		// transfer the contents of r1 the cpse
}
Exemple #14
0
void enable_irq(void){
	__asm__ __volatile__("mrs	r1, cpsr");			// transfer the contents of cpsr into a r1
	__asm__ __volatile__("bic	r1, r1, #0x80");	// initialize r1 to 0x80 
	__asm__ __volatile__("msr	cpsr, r1");			// transfer the contents of r1 the cpse
}
void update_pulse_array ( void )
{
	// These declarations must be placed in the order specified in order to
	// ensure that they are assigned to the registers specified.
	uint16_t current_width ;		// R17:16
	uint16_t max_delta ;			// R19:18
	uint8_t current_width_frac ;	// R20
	uint8_t max_delta_frac ;		// R21
	uint16_t target_width ;
	uint8_t counter ;
	pulse_def_t * ptr_pulse ;
	
	for (
		counter = 0, ptr_pulse = & pulse_array [ 0 ] ;
		counter < 32 ;
		++ counter, ++ ptr_pulse )
	{
		// Copy pulse data to local scalar variables
		target_width = ptr_pulse -> target_width ;
		current_width = ptr_pulse -> current_width ;
		current_width_frac = ptr_pulse -> current_width_frac ;
		max_delta = ptr_pulse -> max_delta ;
		max_delta_frac = ptr_pulse -> max_delta_frac ;
        
		// If target_width is 0 or 65535, that indicates that the channel
		// should be continuous 0 or 65535.  In this case, just set the
		// current_width to 0 or 65535 and continue to the next pulse.
		if ( target_width == 0 || target_width == 65535 )
		{
			current_width = target_width ;
		}
		// For all other target widths, move the current_width closer
		// to target_width by the value in max_delta.
		else
		{
			// Add the offset to the pulse width.  The offset is in us, so
			// convert to cycles by multiplying by 14.7456:
			//   off_cyc = offs * 14.7456 * 4 / 4
			//           = ( offs * 59 ) >> 2
			target_width += ( (int16_t) pulse_offset_array [ counter ] * 59 ) >> 2 ;
			
			// Clip target width to min/max range
			if ( target_width < MIN_PULSE_WIDTH_CYCLES )
			{
				target_width = MIN_PULSE_WIDTH_CYCLES ;                     
			}
			else if ( target_width > MAX_PULSE_WIDTH_CYCLES )
			{
				target_width = MAX_PULSE_WIDTH_CYCLES ;
			}
			// If current width is 0 or 65535, meaning that the pulse is just
			// being started, then set current width to target width.
			if ( current_width == 0 || current_width == 65535 )
			{
				current_width = target_width ;
			}
			// Move current width towards target_width
    		if ( target_width > current_width )
    		{
    			if ( ( target_width - current_width ) < max_delta )
	   			{
    				current_width = target_width ;
    				current_width_frac = 0 ;
    			}
    			else
    			{
	    			// current_width += max_delta
	    			// Add max_delta to current_width, using 24-bit addition with
	    			// current_width_frac and max_delta_frac being the LSB.
#if defined(__CODEVISIONAVR__)
#line 529 "pulsedge.c"
	__asm__ __volatile__("add	R20, R21");
	__asm__ __volatile__("adc	R16, R18");
	__asm__ __volatile__("adc	R17, R19");
#line 534 "pulsedge.c"
#elif defined(__GNUC__)
#line 535 "pulsedge.c"
/* With gcc we can do this in a cleaner way: */
__asm__ __volatile__("add\t%0,%2\n\t"
		     "adc\t%A1,%A3\n\t"
		     "adc\t%B1,%B3"
		     : "=r" (current_width_frac), "=r" (current_width)
		     : "r" (max_delta_frac), "r" (max_delta),
		       "0" (current_width_frac), "1" (current_width));
#endif
#line 543 "pulsedge.c"
	    		}
    		}
    		else // current_width >= target_width
    		{
    			if ( ( current_width - target_width ) < max_delta )
    			{
    				current_width = target_width ;
    				current_width_frac = 0 ;
    			}
    			else
    			{
	    			// current_width -= max_delta
	    			// Subtract max_delta from current_width, using 24-bit math with
	    			// current_width_frac and max_delta_frac being the LSB.
#if defined(__CODEVISIONAVR__)
#line 558 "pulsedge.c"
	__asm__ __volatile__("sub	R20, R21");
	__asm__ __volatile__("sbc	R16, R18");
	__asm__ __volatile__("sbc	R17, R19");
#line 563 "pulsedge.c"
#elif defined(__GNUC__)
#line 564 "pulsedge.c"
/* With gcc we can do this in a cleaner way: */
__asm__ __volatile__("sub\t%0,%2\n\t"
		     "sbc\t%A1,%A3\n\t"
		     "sbc\t%B1,%B3"
		     : "=r" (current_width_frac), "=r" (current_width)
		     : "r" (max_delta_frac), "r" (max_delta),
		       "0" (current_width_frac), "1" (current_width));
#endif
#line 572 "pulsedge.c"
	    		}
    		}
    	}
    			
   		// Write current_width back to pulse_array
   		ptr_pulse -> current_width = current_width ;
   		ptr_pulse -> current_width_frac = current_width_frac ;
	}
Exemple #16
0
inline int
image_proc(u8 ** index, u8 * image_buf, u8 * color_buf, int width,
	   int height, BMP_HEADER * p_header)
{
	char interact = 0;
	int char_test = 0;

	while (1) {
		printf("\n\nDIP has already held your image, what's next?\n"
		       "type:\n"
		       "[s] -> smooth\n"
		       "[h] -> sharp\n"
		       "[l] -> enlarge / shrink\n"
		       "[r] -> rotate\n" "[e] -> exit\n\n");
		while ((char_test = getchar()) != '\n' && char_test != EOF) {
			interact = char_test;

			/* 
			 * so significant here
			 * we need just a char, if user inputs such as 'sel',
			 * so the final char which installed in the var is
			 * 'l', not 's'
			 */
			while ((char_test = getchar()) != '\n'
			       && char_test != EOF) ;
			break;
		}

		switch (interact) {
		case 's':
			printf("which algorithm u wanna use?\n"
			       "type:\n"
			       "[k] -> k_near_average\n"
			       "[a] -> average_filter \t[1, 1, 1, 1, 1, 1, 1, 1, 1]\n"
			       "[m] -> median_filter\n");
			__asm__ __volatile__("2:");
			while ((char_test = getchar()) != '\n'
			       && char_test != EOF) {
				interact = char_test;
				while ((char_test = getchar()) != '\n'
				       && char_test != EOF) ;
				break;
			}

			switch (interact) {
			case 'k':
				smooth_avr_k(index, height, width);
				break;
			case 'a':
				smooth_avr_filter(index, height, width);
				break;
			case 'm':
				smooth_median_filter(index, height, width);
				break;
			default:
				printf("\nhey bro, type \"k\" or \"9\"\n");
				__asm__ __volatile__("jmp 2b");
				break;
			}
			if (write_bmp(p_header, color_buf, image_buf)) {
				printf("Sorry, Failure!\n");
			}
			printf("Well Done!\n");
			break;
		case 'h':
			printf("which algorithm u wanna use?\n"
			       "type:\n"
			       "[l] -> laplacian \t[0, 1, 0; 1, -4, 1; 0, 1, 0]\n"
			       "[f] -> high pass filter [-1, -1, -1; -1, 9, -1; -1, -1, -1]\n"
			       "[a] -> ladder \t\t[-1, 1; -1, 1]\n");
			__asm__ __volatile__("3:");
			while ((char_test = getchar()) != '\n'
			       && char_test != EOF) {
				interact = char_test;
				while ((char_test = getchar()) != '\n'
				       && char_test != EOF) ;
				break;
			}

			switch (interact) {
			case 'a':
				sharp_ladder(index, height, width);
				break;
			case 'l':
				sharp_laplacian(index, height, width);
				break;
			case 'f':
				sharp_hpass_filter(index, height, width);
				break;
			default:
				printf("\nhey bro, type \"l\" or \"f\"\n");
				__asm__ __volatile__("jmp 3b");
				break;
			}
			if (write_bmp(p_header, color_buf, image_buf)) {
				printf("Sorry, Failure!\n");
			}
			printf("Well Done!\n");
			break;
		case 'l':
			stretch(index, p_header, color_buf, width, height);
			break;
		case 'r':
			rotate(index, p_header, color_buf, width, height);
			break;
		case 'e':
			{
				int i = 0;
				for (; i < 10; i++) {
					printf("%x %c\n", *bp, *bp);
					bp++;
				}
				free(p_header);
				/* to avoid accessing freed memory */
				p_header = NULL;
			}
			return 0;
		default:
			printf("\nHey bro, please follow the rule\n");
			break;
		}
	}
}
Exemple #17
0
inline void idt_load() 
{
    __asm__ __volatile__ ("lidt (idtp)");
}
Exemple #18
0
/* lock_gate
   Allow multiple threads to read during a critical section, but only
   allow one writer. To avoid deadlocks, it keeps track of the exclusive
   owner - so that a thread can enter a gate as a reader and later
   upgrade to a writer, or start off a writer and call a function that
   requires a read or write lock. This function will block and spin if
   write access is requested and other threads are reading/writing, or
   read access is requested and another thread is writing.
   => gate = lock structure to modify
      flags = set either LOCK_READ or LOCK_WRITE, also set LOCK_SELFDESTRUCT
              to mark a gate as defunct, causing other threads to fail if
              they try to access it
   <= success or a failure code
*/
kresult lock_gate(rw_gate *gate, unsigned int flags)
{
#ifndef UNIPROC
   kresult err = success;
   unsigned int caller;

#ifdef LOCK_TIME_CHECK
   unsigned long long ticks = x86_read_cyclecount();
#endif

   /* sanity checks */
   if(!gate) return e_failure;
   if(!cpu_table) return success; /* only one processor running */
   
#ifdef LOCK_DEBUG
   if(cpu_table)
   {
      LOCK_DEBUG("[lock:%i] -> lock_gate(%p, %x) by thread %p\n", CPU_ID, gate, flags, cpu_table[CPU_ID].current);
   }
   else
   {
      LOCK_DEBUG("[lock:%i] -> lock_gate(%p, %x) during boot\n", CPU_ID, gate, flags);
   }
#endif
   
   /* cpu_table[CPU_ID].current cannot be lower than the kernel virtual base 
      so it won't collide with the processor's CPU_ID, which is used to
      identify the owner if no thread is running */
   if(cpu_table[CPU_ID].current)
      caller = (unsigned int)cpu_table[CPU_ID].current;
   else
      caller = (CPU_ID) + 1; /* zero means no owner, CPU_IDs start at zero... */
   
   while(1)
   {
      lock_spin(&(gate->spinlock));
      
      /* we're in - is the gate claimed? */
      if(gate->owner)
      {
         /* it's in use - but is it another thread? */
         if(gate->owner != caller)
         {
            /* another thread has it :( perform checks */
            
            /* this lock is defunct? */
            if(gate->flags & LOCK_SELFDESTRUCT)
            {
               err = e_failure;
               goto exit_lock_gate;
            }
            
            /* if we're not trying to write and the owner isn't
               writing and a writer isn't waiting, then it's safe to progress */
            if(!(flags & LOCK_WRITE) && !(gate->flags & LOCK_WRITE) && !(gate->flags & LOCK_WRITEWAITING))
               goto exit_lock_gate;

            /* if we're trying to write then set a write-wait flag.
               when this flag is set, stop allowing new reading threads.
               this should prevent writer starvation */
            if(flags & LOCK_WRITE)
               gate->flags |= LOCK_WRITEWAITING;
         }
         else
         {
            /* if the gate's owned by this thread, then carry on */
            gate->refcount++; /* keep track of the number of times we're entering */
            goto exit_lock_gate;
         }
      }
      else
      {
         /* no one owns this gate, so make our mark */
         gate->owner = caller;
         gate->flags = flags;
         gate->refcount = 1; /* first in */
         goto exit_lock_gate;
      }
      
      unlock_spin(&(gate->spinlock));
      
      /* small window of opportunity for the other thread to
         release the gate :-/ */
      /* hint to newer processors that this is a spin-wait loop or
         NOP for older processors */
      __asm__ __volatile__("pause");
      
#ifdef LOCK_TIME_CHECK
      if((x86_read_cyclecount() - ticks) > LOCK_TIMEOUT)
      {
         /* prevent other cores from trashing the output debug while we dump this info */
         lock_spin(&lock_time_check_lock);
         
         KOOPS_DEBUG("[lock:%i] OMGWTF waited too long for gate %p to become available (flags %x)\n"
                     "         lock is owned by %p", CPU_ID, gate, flags, gate->owner);
         if(gate->owner > KERNEL_SPACE_BASE)
         {
            thread *t = (thread *)(gate->owner);
            KOOPS_DEBUG(" (thread %i process %i on cpu %i)", t->tid, t->proc->pid, t->cpu);
         }
         KOOPS_DEBUG("\n");
         debug_stacktrace();
         
         unlock_spin(&lock_time_check_lock);
         
         debug_panic("deadlock in kernel: we can't go on together with suspicious minds");
      }
#endif
   }

exit_lock_gate:
   /* release the gate so others can inspect it */
   unlock_spin(&(gate->spinlock));
   
   LOCK_DEBUG("[lock:%i] locked %p with %x\n", CPU_ID, gate, flags);
   
   return err;
#endif
}
Exemple #19
0
/** @brief Run the application.
 * @note Registers are not initialized.
 */
static void run_app()
{
  __asm__ __volatile__ ("ldi r30,0\n");
  __asm__ __volatile__ ("ldi r31,0\n");
  __asm__ __volatile__ ("ijmp\n");
}
void init_clocks(void)
{
	/* Kill any active DMAs as they may trigger external memory accesses
	 * in the middle of reprogramming things, and that'll screw us up.
	 * For example, any automatic DMAs left by U-Boot for splash screens.
	 */
	size_t i;
	for (i = 0; i < MAX_DMA_CHANNELS; ++i) {
		struct dma_register *dma = dma_io_base_addr[i];
		dma->cfg = 0;
	}

	do_sync();

#ifdef SIC_IWR0
	bfin_write_SIC_IWR0(IWR_ENABLE(0));
# ifdef SIC_IWR1
	/* BF52x system reset does not properly reset SIC_IWR1 which
	 * will screw up the bootrom as it relies on MDMA0/1 waking it
	 * up from IDLE instructions.  See this report for more info:
	 * http://blackfin.uclinux.org/gf/tracker/4323
	 */
	if (ANOMALY_05000435)
		bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
	else
		bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
# endif
# ifdef SIC_IWR2
	bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
# endif
#else
	bfin_write_SIC_IWR(IWR_ENABLE(0));
#endif
	do_sync();
#ifdef EBIU_SDGCTL
	bfin_write_EBIU_SDGCTL(bfin_read_EBIU_SDGCTL() | SRFS);
	do_sync();
#endif

#ifdef CLKBUFOE
	bfin_write16(VR_CTL, bfin_read_VR_CTL() | CLKBUFOE);
	do_sync();
	__asm__ __volatile__("IDLE;");
#endif
	bfin_write_PLL_LOCKCNT(0x300);
	do_sync();
	bfin_write16(PLL_CTL, PLL_CTL_VAL);
	__asm__ __volatile__("IDLE;");
	bfin_write_PLL_DIV(CONFIG_CCLK_ACT_DIV | CONFIG_SCLK_DIV);
#ifdef EBIU_SDGCTL
	bfin_write_EBIU_SDRRC(mem_SDRRC);
	bfin_write_EBIU_SDGCTL(mem_SDGCTL);
#else
	bfin_write_EBIU_RSTCTL(bfin_read_EBIU_RSTCTL() & ~(SRREQ));
	do_sync();
	bfin_write_EBIU_RSTCTL(bfin_read_EBIU_RSTCTL() | 0x1);
	bfin_write_EBIU_DDRCTL0(mem_DDRCTL0);
	bfin_write_EBIU_DDRCTL1(mem_DDRCTL1);
	bfin_write_EBIU_DDRCTL2(mem_DDRCTL2);
#ifdef CONFIG_MEM_EBIU_DDRQUE
	bfin_write_EBIU_DDRQUE(CONFIG_MEM_EBIU_DDRQUE);
#endif
#endif
	do_sync();
	bfin_read16(0);
}
Exemple #21
0
static void do_encoder(uint32_t enc)
{
  uint32_t i;
  int32_t lspeed, rspeed;
  int32_t diff;

  /* reset odometer counters */
  rodo = 0;
  lodo = 0;

  /* set pwms from speed */
  rspeed = clamp(speed / 2, 0, 128);
  lspeed = rspeed;
  set_pwm0(128 + (ldir == 1 ? lspeed : -lspeed));
  set_pwm1(128 + (rdir == 1 ? rspeed : -rspeed));

  /* wait until dont */
  i = 0;
  while (i < enc)
  {
    i += (lodo + rodo) / 2;

    diff = (int32_t)(lenc - renc);

    if (diff > 0)
    {
      /* left faster than right */
      if ((lspeed > speed) || (rspeed > 244))
	lspeed -= CONFIG_SPEED_INTEGRATOR;
      else
	rspeed += CONFIG_SPEED_INTEGRATOR;
    }
    else if (diff < 0)
    {
      /* right faster than left */
      if ((rspeed > speed) || (lspeed > 244))
	rspeed -= CONFIG_SPEED_INTEGRATOR;
      else
	lspeed += CONFIG_SPEED_INTEGRATOR;
    }

    /* reset the odometer */
    rodo = 0;
    lodo = 0;

    /* set motor pwms */
    lspeed = clamp(lspeed / 2, 0, 128);
    rspeed = clamp(rspeed / 2, 0, 128);
    set_pwm0(128 + (ldir == 1 ? lspeed : -lspeed));
    set_pwm1(128 + (rdir == 1 ? rspeed : -rspeed));

    /* small delay */
    {
      volatile int i;
      for (i = 0; i < 1000; ++i) __asm__ __volatile__ ("nop\n\t");
    }
  }

  /* stop the motor */
  set_pwm0(0);
  set_pwm1(0);
}
Exemple #22
0
/*
 * To the best of our knowledge Windows compatible x86 hardware expects
 * the following on reboot:
 *
 * 1) If the FADT has the ACPI reboot register flag set, try it
 * 2) If still alive, write to the keyboard controller
 * 3) If still alive, write to the ACPI reboot register again
 * 4) If still alive, write to the keyboard controller again
 * 5) If still alive, call the EFI runtime service to reboot
 * 6) If no EFI runtime service, call the BIOS to do a reboot
 *
 * We default to following the same pattern. We also have
 * two other reboot methods: 'triple fault' and 'PCI', which
 * can be triggered via the reboot= kernel boot option or
 * via quirks.
 *
 * This means that this function can never return, it can misbehave
 * by not rebooting properly and hanging.
 */
static void native_machine_emergency_restart(void)
{
	int i;
	int attempt = 0;
	int orig_reboot_type = reboot_type;
	unsigned short mode;

	if (reboot_emergency)
		emergency_vmx_disable_all();

	tboot_shutdown(TB_SHUTDOWN_REBOOT);

	/* Tell the BIOS if we want cold or warm reboot */
	mode = reboot_mode == REBOOT_WARM ? 0x1234 : 0;
	*((unsigned short *)__va(0x472)) = mode;

	for (;;) {
		/* Could also try the reset bit in the Hammer NB */
		switch (reboot_type) {
		case BOOT_ACPI:
			acpi_reboot();
			reboot_type = BOOT_KBD;
			break;

		case BOOT_KBD:
			mach_reboot_fixups(); /* For board specific fixups */

			for (i = 0; i < 10; i++) {
				kb_wait();
				udelay(50);
				outb(0xfe, 0x64); /* Pulse reset low */
				udelay(50);
			}
			if (attempt == 0 && orig_reboot_type == BOOT_ACPI) {
				attempt = 1;
				reboot_type = BOOT_ACPI;
			} else {
				reboot_type = BOOT_EFI;
			}
			break;

		case BOOT_EFI:
			if (efi_enabled(EFI_RUNTIME_SERVICES))
				efi.reset_system(reboot_mode == REBOOT_WARM ?
						 EFI_RESET_WARM :
						 EFI_RESET_COLD,
						 EFI_SUCCESS, 0, NULL);
			reboot_type = BOOT_BIOS;
			break;

		case BOOT_BIOS:
			machine_real_restart(MRR_BIOS);

			/* We're probably dead after this, but... */
			reboot_type = BOOT_CF9_SAFE;
			break;

		case BOOT_CF9_FORCE:
			port_cf9_safe = true;
			/* Fall through */

		case BOOT_CF9_SAFE:
			if (port_cf9_safe) {
				u8 reboot_code = reboot_mode == REBOOT_WARM ?  0x06 : 0x0E;
				u8 cf9 = inb(0xcf9) & ~reboot_code;
				outb(cf9|2, 0xcf9); /* Request hard reset */
				udelay(50);
				/* Actually do the reset */
				outb(cf9|reboot_code, 0xcf9);
				udelay(50);
			}
			reboot_type = BOOT_TRIPLE;
			break;

		case BOOT_TRIPLE:
			load_idt(&no_idt);
			__asm__ __volatile__("int3");

			/* We're probably dead after this, but... */
			reboot_type = BOOT_KBD;
			break;
		}
	}
}
volatile int __attribute__((noinline)) foo4 (int a0, int a1, int a2, int a3) {
    __asm__ __volatile__("");
}
Exemple #24
0
static unsigned long long get_rdtsc()
{
    __asm__ __volatile__ ("rdtsc");
}
Exemple #25
0
void twi__slave__on_data_reception_started(void) {
    __asm__ __volatile__("twi__slave__on_data_reception_started:");
    twi__continue(true, false);
    data__leds__put_position_reset();
}
Exemple #26
0
uint8_t manchester_receive(data_t* data)  // Function call 4 clk, function overhead at start 5 clk
{
	// Grand total from the edge 15 us + 1.125 us = 16.125 us.
	uint8_t time_tmp;
	uint8_t remainder = CRC_INITIAL_REMAINDER; // 0 clk

	PULLUP_OFF();

	(*data).abcd = 0;  // 8 clk = 1 us.

	//if(pinstat)
	//	return 1;

	// Wait for high transition
	// Already consumed 17.125 us. For +13 us time window, it needs to come within (25+13) - 17.125 us = 20.875 us = 167 clk

	// Require two successive high readings.
	// LOOP: SBIS 2 + SBIC 2 + SUBI 1 + BRCC 2 = 7 clk /round.
	time_tmp = 24;	//167/7 would be 23.85, round to 24.

	while(time_tmp--)
	{
		if(pinstat && pinstat)
			goto OK1;

	}

	PULLUP_ON();
	return 2;

	OK1:

	// Now we are exactly aligned at first '1', which is discarded.

	_delay_us(10.125 + 1.5); // Compensation for CRC delay, see below.
	                         // +1.5 = measured correction.

	for(uint8_t i = 0; i < 32; i++)
	{
		(*data).abcd <<= 1;  // 20 clk = 2.5 us

		// Align at 35.0 us from previous data bit. 37.5 us is
		// halfway between the optional edge and the next data bit.
		// Sample the value between 35.0 us and 40.0 us.
		// The expected edge is at 50 us, but allow it some window
		// due to clock differences.

		del_us(35.0 - 2.5 - 10.125 - 2.5 + 1.0); // CRC calculation uses 10.125 us - see
		                             // below.
									 // -2.5 = measured correction.


		uint8_t n_low_readings = 0;
		for(uint8_t i = 8; i>0; --i)  // 5 clk per round (regardless of pin value.)
		{
			if(!pinstat)
				n_low_readings++;
		} // 40 clk = 5 us.


		// Num of zeroes: 0  1  2   3  4   5  6  7
		//                H I G H | ?  ? | L  O  W

		if(n_low_readings < 3) 
		{	// High -- expect low. 
			del_us(10-WINDOW_BEFORE);
			// time windows of +/-10 us = 20 us = 160 clock cycles starts here.
			// LOOP: SBIS 2 + SBIC 2 + SUBI 1 + BRCC 2 = 7 clk /round.
			time_tmp = 	((WINDOW_BEFORE+WINDOW_AFTER)*8)/7;
			while(time_tmp--)
			{ // Require two successive low readings.
				if((!pinstat) && (!pinstat))
					goto OK2;
			}
			PULLUP_ON();
			return 0x40+i;

			OK2:
			__asm__ __volatile__ ("nop");

		}
		else if(n_low_readings > 4) // low -- expect high
		{
			del_us(10-WINDOW_BEFORE);
			// time windows of +/-10 us = 20 us = 160 clock cycles starts here.
			// LOOP: SBIS 2 + SBIC 2 + SUBI 1 + BRCC 2 = 7 clk /round.
			time_tmp = 	((WINDOW_BEFORE+WINDOW_AFTER)*8)/7;
			while(time_tmp--)
			{ // Require two successive high readings.
				if(pinstat && pinstat)
					goto OK3;
			}
			PULLUP_ON();
			return 0x60+i;

			OK3:

			(*data).d |= 1; // 1 clk = 0.125 us

		}
		else
		{
			PULLUP_ON();
			return 0x20+i;
		}

		// Here, we are aligned perfectly again.


		// At the same time, calculate CRC8. Calculate every time 8 bits have been received,
		// but of course skip the last octet which is the CRC.

		// Consume a constant amount of time here, which can be then subtracted from
		// the delay at the beginning of the loop.

		if(i==7 || i==15 || i==23)  // 6 cycles used when not true, 3...7 if true.
		{
			// We have our latest full byte in data.d
			remainder ^= (*data).d;        // 3 cycles
			CALC_CRC(remainder);
			// Total 3+48+24 = 75 cycles = 9.375 us.
		}
		else
		{
			_delay_us(9.375);
		}
		// In total, 10.125 us was spent for the if + CRC.

	}

	PULLUP_ON();

	if(remainder != (*data).d)
		return CRC_ERROR;

	return COMM_SUCCESS;
}
Exemple #27
0
void twi__slave__on_data_byte_requested(void) {
    __asm__ __volatile__("twi__slave__on_data_byte_requested:");
    twi__data__set(0x55);
    twi__continue(false, false);
}
Exemple #28
0
void __attribute__((noinline)) bar (int a, int b, int c, int d, int e, int f, int *g)
{
  __asm__ __volatile__ ("");
}
Exemple #29
0
void
rotate(u8** image, BMP_HEADER* p_header, u8* color_buf, int width, int height)
{
	char	char_test;
        int     i, j, flag_rotate;
        float   angle, x, y;

        printf("Give a rotate angle (whatever >0 or <0)\n");
	scanf("%f", &angle);
	while ((char_test = getchar()) != '\n' && char_test != EOF) ;

        printf("Choose one algorithm\n"
		"[a] -> adjacent pixel\n"
		"[b] -> bilinear interposition\n");
	__asm__ __volatile__("5:");
	while ((char_test = getchar()) != '\n' && char_test != EOF) {
		flag_rotate = char_test;

		/* not *++bp, bp is a global-var */
		*bp	 = '\n';
	}

	switch (flag_rotate) {
		case 'b':
			flag_rotate = 1;
			break;
		case 'a':
			flag_rotate = 0;
			break;
		default:
			printf("Hey bro, type 'a' or 'b'\n");
			__asm__ __volatile__("jmp 5b");
			break;
	}

        float   radian  =       RADIAN(angle);
        float   sina    =       sin(radian);
        float   cosa    =       cos(radian);

	/* 
	 * y	Original Image -- regard (x1, y1) as (0, 0)
	 *
	 * ^
	 * |
	 * |
	 * |
	 * |
	 * |(x2, y2)			      (x4, y4)
	 * ..................................
	 * .				    .
	 * .				    .
	 * .				    .
	 * .				    .
	 * .				    .
	 * .(x1, y1) -- axis		    . (x3, y3)
	 * ..................................----------> x */

	/* after anti-clock-wise rotation */
        float   x1      =       0;
        float   x2      =       0 - height * sina;
        float   x3      =       0 + width * cosa;
        float   x4      =       0 + width * cosa - height * sina;

        float   y1      =       0;
        float   y2      =       0 + height * cosa;
        float   y3      =       0 + width * sina;
        float   y4      =       0 + width * sina + height * cosa;

	/* 
	 * make sure that the image always locate 
	 * in active-x-axis'n active-y-axis
	 * so we need to know how far they get away
	 */
        float   x_offset =      MAX(MAX(-x2, -x3), -x4);
        float   y_offset =      MAX(MAX(-y2, -y3), -y4);
        x_offset        =       x_offset > 0 ? x_offset : 0;
        y_offset        =       y_offset > 0 ? y_offset : 0;

        int     width_new       =       ceil(MAX(fabs(x1 - x4), fabs(x2 - x3)));
        int     height_new      =       ceil(MAX(fabs(y1 - y4), fabs(y2 - y3)));

        u8*     image_new[height_new];

	/* fill up */
        width_pad(&width_new);

	/* everything for making a new image has done, now calloc */
        calloc_buf(image_new, width_new, height_new);

	/* control every pixels in image new, not image original */
        for (i = 0; i < height_new; i++) {
                for (j = 0; j < width_new; j++) {
			/* it's image new here, so clock-wise */
                        x       =       (  (j - x_offset) * cosa + (i - y_offset) * sina);
                        y       =       (- (j - x_offset) * sina + (i - y_offset) * cosa);

			/* 
			 * if the pixel in image new has the corresponding pixel in
			 * image original, we evaluate it, other wise, next one
			 */
			/* no equals */
			if (y >= 0 && y < height && x >= 0 && x < width) {
				image_new[i][j] = flag_rotate ? \
						  bilinear_interposition(image, x, y, width, height) : \
						  image[(int)y][(int)x];
			}
                }
        }

        if (write_bmp_calloc(image_new, color_buf, width_new, height_new, p_header)) {
                printf("Sorry, Failure!\n");
                exit(EXIT_FAILURE);
        }

        free_buf(image_new, width_new, height_new);
        printf("\nRotate Done!\tWhat's more?\n");
}
Exemple #30
0
Bool VG_(machine_get_hwcaps)( void )
{
   vg_assert(hwcaps_done == False);
   hwcaps_done = True;

   // Whack default settings into vai, so that we only need to fill in
   // any interesting bits.
   LibVEX_default_VexArchInfo(&vai);

#if defined(VGA_x86)
   { Bool have_sse1, have_sse2;
     UInt eax, ebx, ecx, edx;

     if (!VG_(has_cpuid)())
        /* we can't do cpuid at all.  Give up. */
        return False;

     VG_(cpuid)(0, &eax, &ebx, &ecx, &edx);
     if (eax < 1)
        /* we can't ask for cpuid(x) for x > 0.  Give up. */
        return False;

     /* get capabilities bits into edx */
     VG_(cpuid)(1, &eax, &ebx, &ecx, &edx);

     have_sse1 = (edx & (1<<25)) != 0; /* True => have sse insns */
     have_sse2 = (edx & (1<<26)) != 0; /* True => have sse2 insns */

     if (have_sse2 && have_sse1) {
        va          = VexArchX86;
        vai.hwcaps  = VEX_HWCAPS_X86_SSE1;
        vai.hwcaps |= VEX_HWCAPS_X86_SSE2;
        VG_(machine_x86_have_mxcsr) = 1;
        return True;
     }

     if (have_sse1) {
        va          = VexArchX86;
        vai.hwcaps  = VEX_HWCAPS_X86_SSE1;
        VG_(machine_x86_have_mxcsr) = 1;
        return True;
     }

     va         = VexArchX86;
     vai.hwcaps = 0; /*baseline - no sse at all*/
     VG_(machine_x86_have_mxcsr) = 0;
     return True;
   }

#elif defined(VGA_amd64)
   vg_assert(VG_(has_cpuid)());
   va         = VexArchAMD64;
   vai.hwcaps = 0; /*baseline - SSE2 */
   return True;

#elif defined(VGA_ppc32)
   { /* ppc32 doesn't seem to have a sane way to find out what insn
        sets the CPU supports.  So we have to arse around with
        SIGILLs.  Yuck. */
     vki_sigset_t         saved_set, tmp_set;
     struct vki_sigaction saved_act, tmp_act;

     volatile Bool have_F, have_V, have_FX, have_GX;
     Int r;

     VG_(sigemptyset)(&tmp_set);
     VG_(sigaddset)(&tmp_set, VKI_SIGILL);

     r = VG_(sigprocmask)(VKI_SIG_UNBLOCK, &tmp_set, &saved_set);
     vg_assert(r == 0);

     r = VG_(sigaction)(VKI_SIGILL, NULL, &saved_act);
     vg_assert(r == 0);
     tmp_act = saved_act;

     /* NODEFER: signal handler does not return (from the kernel's point of
        view), hence if it is to successfully catch a signal more than once,
        we need the NODEFER flag. */
     tmp_act.sa_flags &= ~VKI_SA_RESETHAND;
     tmp_act.sa_flags &= ~VKI_SA_SIGINFO;
     tmp_act.sa_flags |=  VKI_SA_NODEFER;

     /* standard FP insns */
     have_F = True;
     tmp_act.ksa_handler = handler_sigill;
     r = VG_(sigaction)(VKI_SIGILL, &tmp_act, NULL);
     vg_assert(r == 0);
     if (__builtin_setjmp(env_sigill)) {
        have_F = False;
     } else {
        __asm__ __volatile__(".long 0xFC000090"); /*fmr 0,0 */
     }

     /* Altivec insns */
     have_V = True;
     tmp_act.ksa_handler = handler_sigill;
     r = VG_(sigaction)(VKI_SIGILL, &tmp_act, NULL);
     vg_assert(r == 0);
     if (__builtin_setjmp(env_sigill)) {
        have_V = False;
     } else {
        /* Unfortunately some older assemblers don't speak Altivec (or
           choose not to), so to be safe we directly emit the 32-bit
           word corresponding to "vor 0,0,0".  This fixes a build
           problem that happens on Debian 3.1 (ppc32), and probably
           various other places. */
        __asm__ __volatile__(".long 0x10000484"); /*vor 0,0,0*/
     }

     /* General-Purpose optional (fsqrt, fsqrts) */
     have_FX = True;
     tmp_act.ksa_handler = handler_sigill;
     r = VG_(sigaction)(VKI_SIGILL, &tmp_act, NULL);
     vg_assert(r == 0);
     if (__builtin_setjmp(env_sigill)) {
        have_FX = False;
     } else {
        __asm__ __volatile__(".long 0xFC00002C"); /*fsqrt 0,0 */
     }

     /* Graphics optional (stfiwx, fres, frsqrte, fsel) */
     have_GX = True;
     tmp_act.ksa_handler = handler_sigill;
     r = VG_(sigaction)(VKI_SIGILL, &tmp_act, NULL);
     vg_assert(r == 0);
     if (__builtin_setjmp(env_sigill)) {
        have_GX = False;
     } else {
        __asm__ __volatile__(".long 0xFC000034"); /* frsqrte 0,0 */
     }

     r = VG_(sigaction)(VKI_SIGILL, &saved_act, NULL);
     vg_assert(r == 0);
     r = VG_(sigprocmask)(VKI_SIG_SETMASK, &saved_set, NULL);
     vg_assert(r == 0);
     /*
        VG_(printf)("F %d V %d FX %d GX %d\n", 
                    (Int)have_F, (Int)have_V, (Int)have_FX, (Int)have_GX);
     */
     /* Make FP a prerequisite for VMX (bogusly so), and for FX and GX. */
     if (have_V && !have_F)
        have_V = False;
     if (have_FX && !have_F)
        have_FX = False;
     if (have_GX && !have_F)
        have_GX = False;

     VG_(machine_ppc32_has_FP)  = have_F ? 1 : 0;
     VG_(machine_ppc32_has_VMX) = have_V ? 1 : 0;

     va = VexArchPPC32;

     vai.hwcaps = 0;
     if (have_F)  vai.hwcaps |= VEX_HWCAPS_PPC32_F;
     if (have_V)  vai.hwcaps |= VEX_HWCAPS_PPC32_V;
     if (have_FX) vai.hwcaps |= VEX_HWCAPS_PPC32_FX;
     if (have_GX) vai.hwcaps |= VEX_HWCAPS_PPC32_GX;

     /* But we're not done yet: VG_(machine_ppc32_set_clszB) must be
        called before we're ready to go. */
     return True;
   }

#elif defined(VGA_ppc64)
   { /* Same idiocy as for ppc32 - arse around with SIGILLs. */
     vki_sigset_t         saved_set, tmp_set;
     struct vki_sigaction saved_act, tmp_act;

     volatile Bool have_F, have_V, have_FX, have_GX;

     VG_(sigemptyset)(&tmp_set);
     VG_(sigaddset)(&tmp_set, VKI_SIGILL);

     VG_(sigprocmask)(VKI_SIG_UNBLOCK, &tmp_set, &saved_set);

     VG_(sigaction)(VKI_SIGILL, NULL, &saved_act);
     tmp_act = saved_act;

     /* NODEFER: signal handler does not return (from the kernel's point of
        view), hence if it is to successfully catch a signal more than once,
        we need the NODEFER flag. */
     tmp_act.sa_flags &= ~VKI_SA_RESETHAND;
     tmp_act.sa_flags &= ~VKI_SA_SIGINFO;
     tmp_act.sa_flags |=  VKI_SA_NODEFER;

     /* standard FP insns */
     have_F = True;
     tmp_act.ksa_handler = handler_sigill;
     VG_(sigaction)(VKI_SIGILL, &tmp_act, NULL);
     if (__builtin_setjmp(env_sigill)) {
        have_F = False;
     } else {
        __asm__ __volatile__("fmr 0,0");
     }

     /* Altivec insns */
     have_V = True;
     tmp_act.ksa_handler = handler_sigill;
     VG_(sigaction)(VKI_SIGILL, &tmp_act, NULL);
     if (__builtin_setjmp(env_sigill)) {
        have_V = False;
     } else {
        __asm__ __volatile__(".long 0x10000484"); /*vor 0,0,0*/
     }

     /* General-Purpose optional (fsqrt, fsqrts) */
     have_FX = True;
     tmp_act.ksa_handler = handler_sigill;
     VG_(sigaction)(VKI_SIGILL, &tmp_act, NULL);
     if (__builtin_setjmp(env_sigill)) {
        have_FX = False;
     } else {
        __asm__ __volatile__(".long 0xFC00002C"); /*fsqrt 0,0*/
     }

     /* Graphics optional (stfiwx, fres, frsqrte, fsel) */
     have_GX = True;
     tmp_act.ksa_handler = handler_sigill;
     VG_(sigaction)(VKI_SIGILL, &tmp_act, NULL);
     if (__builtin_setjmp(env_sigill)) {
        have_GX = False;
     } else {
        __asm__ __volatile__(".long 0xFC000034"); /*frsqrte 0,0*/
     }

     VG_(sigaction)(VKI_SIGILL, &saved_act, NULL);
     VG_(sigprocmask)(VKI_SIG_SETMASK, &saved_set, NULL);
     /*
     if (0)
        VG_(printf)("F %d V %d FX %d GX %d\n", 
                    (Int)have_F, (Int)have_V, (Int)have_FX, (Int)have_GX);
     */
     /* on ppc64, if we don't even have FP, just give up. */
     if (!have_F)
        return False;

     VG_(machine_ppc64_has_VMX) = have_V ? 1 : 0;

     va = VexArchPPC64;

     vai.hwcaps = 0;
     if (have_V)  vai.hwcaps |= VEX_HWCAPS_PPC64_V;
     if (have_FX) vai.hwcaps |= VEX_HWCAPS_PPC64_FX;
     if (have_GX) vai.hwcaps |= VEX_HWCAPS_PPC64_GX;

     /* But we're not done yet: VG_(machine_ppc64_set_clszB) must be
        called before we're ready to go. */
     return True;
   }

#else
#  error "Unknown arch"
#endif
}