/*-------------------------------------------- | Name: _tiny_elfloader | Description: | Parameters: none | Return Type: none | Comments: | See: ----------------------------------------------*/ unsigned long _tiny_elfloader(unsigned long flash_base, unsigned long base) { Elf32_Ehdr ehdr; Elf32_Phdr phdr[MAX_PHDR]; unsigned long offset = 0; int phx=0; int len=0; unsigned char ch; unsigned char *addr; unsigned long addr_offset = 0; unsigned long highest_address = 0; unsigned long lowest_address = 0xFFFFFFFF; unsigned char *SHORT_DATA = "error: short data reading elf file\r\n"; // int cb=0; boot_handler_t boot_handler = (boot_handler_t)0x00000000; #ifdef DEBUG _elf_printf("read elf file at 0x%x\r\n", (unsigned int)flash_base); #endif //ALREADY DONE //_at91sam9261_remap_internal_ram(); // Read the header _elf_printf("read elf header informations:\r\n" ); if (_elf_read(flash_base, (unsigned char *)&ehdr, sizeof(ehdr)) != sizeof(ehdr)) { #ifdef DEBUG _elf_printf("error: can't read elf header\r\n"); #endif return 0; } offset += sizeof(ehdr); // #ifdef DEBUG _elf_printf( "type: %d, machine: %d, version: %d\r\nentry: 0x%x, PHoff: 0x%x/%d/%d, SHoff: 0x%x/%d/%d\r\n\r\n", ehdr.e_type, ehdr.e_machine, ehdr.e_version, ehdr.e_entry, ehdr.e_phoff, ehdr.e_phentsize, ehdr.e_phnum, ehdr.e_shoff, ehdr.e_shentsize, ehdr.e_shnum); #endif // if (ehdr.e_type != ET_EXEC) { #ifdef DEBUG _elf_printf("error: only absolute elf images supported\r\n"); #endif return 0; } // if (ehdr.e_phnum > MAX_PHDR) { #ifdef DEBUG _elf_printf("error: too many firmware headers\r\n"); #endif return 0; } #ifdef DEBUG _elf_printf("jump to offset 0x%x wait some seconds... ",ehdr.e_phoff); #endif //jump to offset #ifdef USE_OPTIMIZED_READELF //optimized code #if !defined(__GNUC__) _elf_lseek(flash_base,ehdr.e_phoff,0); offset+=ehdr.e_phoff; #endif #else //not optimized original code from reboot while (offset < ehdr.e_phoff) { if (_elf_getc(flash_base) < 0) { #ifdef DEBUG printf(SHORT_DATA); #endif return 0; } offset++; } #endif // #ifdef DEBUG _elf_printf("done\r\n"); #endif #ifdef DEBUG _elf_printf("read elf section header\r\n"); #endif // for (phx = 0; phx < ehdr.e_phnum; phx++) { if (_elf_read(flash_base, (unsigned char *)&phdr[phx], sizeof(phdr[0])) != sizeof(phdr[0])) { #ifdef DEBUG _elf_printf("error: can't read ELF program header\r\n"); #endif return 0; } #ifdef DEBUG _elf_printf( "section header: type: %d, off: 0x%x\r\nva: 0x%x, pa: 0x%x, len: %d/%d, flags: %d\r\n", phdr[phx].p_type, phdr[phx].p_offset, phdr[phx].p_vaddr, phdr[phx].p_paddr, phdr[phx].p_filesz, phdr[phx].p_memsz, phdr[phx].p_flags); #endif offset += sizeof(phdr[0]); } if (base) { // Set address offset based on lowest address in file. addr_offset = 0xFFFFFFFF; for (phx = 0; phx < ehdr.e_phnum; phx++) { #ifdef CYGOPT_REDBOOT_ELF_VIRTUAL_ADDRESS if ((phdr[phx].p_type == PT_LOAD) && (phdr[phx].p_vaddr < addr_offset)) { addr_offset = phdr[phx].p_vaddr; #else if ((phdr[phx].p_type == PT_LOAD) && (phdr[phx].p_paddr < addr_offset)) { addr_offset = phdr[phx].p_paddr; #endif } } addr_offset = (unsigned long)base - addr_offset; }else{ addr_offset = 0; } //phlb modif #if !defined(__GNUC__) _elf_lseek(flash_base,sizeof(ehdr),0); #endif #ifdef DEBUG _elf_printf("copy firmware in ram started:\r\n"); #endif for (phx = 0; phx < ehdr.e_phnum; phx++) { // if (phdr[phx].p_type == PT_LOAD) { // Loadable segment #ifdef CYGOPT_REDBOOT_ELF_VIRTUAL_ADDRESS addr = (unsigned char *)phdr[phx].p_vaddr; #else addr = (unsigned char *)phdr[phx].p_paddr; #endif // len = phdr[phx].p_filesz; if ((unsigned long)addr < lowest_address) { lowest_address = (unsigned long)addr; } // addr += addr_offset; if (offset > phdr[phx].p_offset) { /* if ((phdr[phx].p_offset + len) < offset) { printf("Can't load ELF file - program headers out of order\r\n"); return 0; } */ /*addr += offset - phdr[phx].p_offset;*/ } else { while (offset < phdr[phx].p_offset) { if (_elf_getc(flash_base) < 0) { #ifdef DEBUG printf(SHORT_DATA); #endif return 0; } offset++; } } #ifdef DEBUG _elf_printf( "program header: type: %d, off: 0x%x, va: 0x%x, pa:0x%x, len: %d/%d, flags: %d\r\n", phdr[phx].p_type, phdr[phx].p_offset, phdr[phx].p_vaddr, phdr[phx].p_paddr, phdr[phx].p_filesz, phdr[phx].p_memsz, phdr[phx].p_flags); #endif // Copy data into memory #ifndef USE_OPTIMIZED_READELF while (len-- > 0) { if ((ch = _elf_getc(flash_base)) < 0) { #ifdef DEBUG printf(SHORT_DATA); #endif return 0; } #ifdef CYGSEM_REDBOOT_VALIDATE_USER_RAM_LOADS if (valid_address(addr)) #endif *addr = ch; //original code #ifdef DEBUG if(!(((unsigned long)offset)%(80*1*1024))) _elf_printf("."); #endif addr++; offset++; if ((unsigned long)(addr-addr_offset) > highest_address) { highest_address = (unsigned long)(addr - addr_offset); } } #endif #ifdef USE_OPTIMIZED_READELF // #if defined(__GNUC__) _elf_lseek(flash_base,phdr[phx].p_offset,0); _elf_printf("offset:%d addr:0x%x fl_offset:%d\r\n", offset, addr, elf_flash_offset); #endif cb=0; while((len-cb)) { unsigned char elf_buffer[4096]={0}; int sz=0; if((len-cb)>=sizeof(elf_buffer)) sz=sizeof(elf_buffer); else sz=(len-cb); //cb += read(fd,elf_buffer,sz); cb+=_elf_read(flash_base, addr, sz); /* #ifdef DEBUG lseek(fd_bin,(unsigned long)addr,SEEK_SET); write(fd_bin,elf_buffer,sz); #endif */ #ifdef CYGSEM_REDBOOT_VALIDATE_USER_RAM_LOADS if (valid_address(addr)) #endif //memcpy(addr,elf_buffer,sz); // addr+=sz; offset+=sz; if ((unsigned long)(addr-addr_offset) > highest_address) { highest_address = (unsigned long)(addr - addr_offset); } } #endif } } // Save load base/top and entry if (base) { load_address = base; load_address_end = base + (highest_address - lowest_address); entry_address = base + (ehdr.e_entry - lowest_address); } else { load_address = lowest_address; load_address_end = highest_address; entry_address = ehdr.e_entry; } // nak everything to stop the transfer, since redboot // usually doesn't read all the way to the end of the // elf files. #ifdef DEBUG _elf_printf("\r\ncopy firmware in ram done\r\n"); if (addr_offset) _elf_printf("address offset = 0x%x\n", addr_offset); _elf_printf("firmware entry point: 0x%x, address range: 0x%x-0x%x\r\n",entry_address, load_address, load_address_end); _elf_printf("ready to rumble? ;)\r\nboot on firmware\r\n"); #endif boot_handler = (boot_handler_t)entry_address; //boot!!!! boot_handler(); __asm__("nop"); __asm__("nop"); __asm__("nop"); __asm__("nop"); __asm__("nop"); __asm__("nop"); for(;; ) ; return 1; }
/*! @brief Sets up the clock out of RESET * */ void clock_initialise(void) { #if (CLOCK_MODE == CLOCK_MODE_NONE) // No clock setup #else // XTAL/EXTAL Pins SIM->SCGC5 |= SIM_SCGC5_PORTA_MASK; PORTA->PCR[3] = PORT_PCR_MUX(0); PORTA->PCR[4] = PORT_PCR_MUX(0); // Configure the Crystal Oscillator OSC0->CR = OSC_CR_ERCLKEN_M|OSC_CR_EREFSTEN_M|OSC_CR_SCP_M; // Fast Internal Clock divider MCG->SC = MCG_SC_FCRDIV_M; // Out of reset MCG is in FEI mode // ============================================================= SIM->CLKDIV1 = SIM_CLKDIV1_OUTDIV1(3) | SIM_CLKDIV1_OUTDIV2(7) | SIM_CLKDIV1_OUTDIV3(3) | SIM_CLKDIV1_OUTDIV4(7); // Switch from FEI -> FEI/FBI/FEE/FBE // ============================================================= // Set up crystal or external clock source MCG->C2 = MCG_C2_LOCRE0_M | // LOCRE0 = 0,1 -> Loss of clock reset enable MCG_C2_RANGE0_M | // RANGE0 = 0,1,2 -> Oscillator low/high/very high clock range MCG_C2_HGO0_M | // HGO0 = 0,1 -> Oscillator low power/high gain MCG_C2_EREFS0_M | // EREFS0 = 0,1 -> Select external clock/crystal oscillator MCG_C2_IRCS_M; // IRCS = 0,1 -> Select slow/fast internal clock for internal reference #if ((CLOCK_MODE == CLOCK_MODE_FEI) || (CLOCK_MODE == CLOCK_MODE_FBI) || (CLOCK_MODE == CLOCK_MODE_BLPI) ) // Transition via FBI //===================================== #define BYPASS (1) // CLKS value used while FLL locks MCG->C1 = MCG_C1_CLKS(BYPASS) | // CLKS = X -> External reference source while PLL locks MCG_C1_FRDIV_M | // FRDIV = N -> XTAL/2^n ~ 31.25 kHz MCG_C1_IREFS_M | // IREFS = 0,1 -> External/Slow IRC for FLL source MCG_C1_IRCLKEN_M | // IRCLKEN = 0,1 -> IRCLK disable/enable MCG_C1_IREFSTEN_M; // IREFSTEN = 0,1 -> Internal reference enabled in STOP mode // Wait for S_IREFST to indicate FLL Reference has switched do { __asm__("nop"); } while ((MCG->S & MCG_S_IREFST_MASK) != (MCG_C1_IREFS_V<<MCG_S_IREFST_SHIFT)); // Wait for S_CLKST to indicating that OUTCLK has switched to bypass PLL/FLL do { __asm__("nop"); } while ((MCG->S & MCG_S_CLKST_MASK) != MCG_S_CLKST(BYPASS)); // Set FLL Parameters MCG->C4 = (MCG->C4&~(MCG_C4_DMX32_MASK|MCG_C4_DRST_DRS_MASK))|MCG_C4_DMX32_M|MCG_C4_DRST_DRS_M; #endif #if ((CLOCK_MODE == CLOCK_MODE_FBE) || (CLOCK_MODE == CLOCK_MODE_FEE) || (CLOCK_MODE == CLOCK_MODE_PLBE) || (CLOCK_MODE == CLOCK_MODE_PBE) || (CLOCK_MODE == CLOCK_MODE_PEE)) // Transition via FBE //===================================== #define BYPASS (2) // CLKS value used while PLL locks MCG->C1 = MCG_C1_CLKS(BYPASS) | // CLKS = 2 -> External reference source while PLL locks MCG_C1_FRDIV_M | // FRDIV = N -> XTAL/2^n ~ 31.25 kHz MCG_C1_IREFS_M | // IREFS = 0,1 -> External/Slow IRC for FLL source MCG_C1_IRCLKEN_M | // IRCLKEN = 0,1 -> IRCLK disable/enable MCG_C1_IREFSTEN_M; // IREFSTEN = 0,1 -> Internal reference enabled in STOP mode #if (MCG_C2_EREFS_V != 0) // Wait for oscillator stable (if used) do { __asm__("nop"); } while ((MCG->S & MCG_S_OSCINIT0_MASK) == 0); #endif // Wait for S_IREFST to indicate FLL Reference has switched do { __asm__("nop"); } while ((MCG->S & MCG_S_IREFST_MASK) != (MCG_C1_IREFS_V<<MCG_S_IREFST_SHIFT)); // Wait for S_CLKST to indicating that OUTCLK has switched to bypass PLL/FLL do { __asm__("nop"); } while ((MCG->S & MCG_S_CLKST_MASK) != MCG_S_CLKST(BYPASS)); // Set FLL Parameters MCG->C4 = (MCG->C4&~(MCG_C4_DMX32_MASK|MCG_C4_DRST_DRS_MASK))|MCG_C4_DMX32_M|MCG_C4_DRST_DRS_M; #endif // Select FEI/FBI/FEE/FBE clock mode MCG->C1 = MCG_C1_CLKS_M | // CLKS = 0,1,2 -> Select FLL/IRCSCLK/ERCLK MCG_C1_FRDIV_M | // FRDIV = N -> XTAL/2^n ~ 31.25 kHz MCG_C1_IREFS_M | // IREFS = 0,1 -> External/Slow IRC for FLL source MCG_C1_IRCLKEN_M | // IRCLKEN = 0,1 -> IRCLK disable/enable MCG_C1_IREFSTEN_M; // IREFSTEN = 0,1 -> Internal reference enabled in STOP mode // Wait for mode change do { __asm__("nop"); } while ((MCG->S & MCG_S_IREFST_MASK) != (MCG_C1_IREFS_V<<MCG_S_IREFST_SHIFT)); #if defined (MCG_C6_PLLS_V) && (MCG_C1_CLKS_V == 0) // FLL or PLL #define MCG_S_CLKST_M MCG_S_CLKST(MCG_C6_PLLS_V?3:0) #else #define MCG_S_CLKST_M MCG_S_CLKST(MCG_C1_CLKS_V) #endif // Wait for S_CLKST to indicating that OUTCLK has switched do { __asm__("nop"); } while ((MCG->S & MCG_S_CLKST_MASK) != MCG_S_CLKST_M); // Set the SIM _CLKDIV dividers SIM->CLKDIV1 = SIM_CLKDIV1_OUTDIV1_M | SIM_CLKDIV1_OUTDIV2_M | SIM_CLKDIV1_OUTDIV3_M | SIM_CLKDIV1_OUTDIV4_M; #if (CLOCK_MODE == CLOCK_MODE_BLPE) || (CLOCK_MODE == CLOCK_MODE_BLPI) // Select BLPE/BLPI clock mode MCG->C2 = MCG_C2_LOCRE0_M | // LOCRE0 = 0,1 -> Loss of clock reset MCG_C2_RANGE0_M | // RANGE0 = 0,1,2 -> Oscillator low/high/very high clock range MCG_C2_HGO0_M | // HGO0 = 0,1 -> Oscillator low power/high gain MCG_C2_EREFS0_M | // EREFS0 = 0,1 -> Select external clock/crystal oscillator MCG_C2_LP_M | // LP = 0,1 -> Select FLL enabled/disabled in bypass mode MCG_C2_IRCS_M; // IRCS = 0,1 -> Select slow/fast internal clock for internal reference #endif // (CLOCK_MODE == CLOCK_MODE_BLPE) || (CLOCK_MODE == CLOCK_MODE_BLPI) #endif // (CLOCK_MODE == CLOCK_MODE_NONE) /*! * SOPT1 Clock multiplexing */ #if defined(SIM_SOPT1_OSC32KSEL_MASK) && defined(SIM_SOPT1_OSC32KSEL_M) // ERCLK32K source SIM->SOPT1 = (SIM->SOPT1&~SIM_SOPT1_OSC32KSEL_MASK)|SIM_SOPT1_OSC32KSEL_M; #endif /*! * SOPT2 Clock multiplexing */ #if defined(SIM_SOPT2_SDHCSRC_MASK) && defined(SIM_SOPT2_SDHCSRC_M) // SDHC clock SIM->SOPT2 = (SIM->SOPT2&~SIM_SOPT2_SDHCSRC_MASK)|SIM_SOPT2_SDHCSRC_M; #endif #if defined(SIM_SOPT2_TIMESRC_MASK) && defined(SIM_SOPT2_TIMESRC_M) // Ethernet time-stamp clock SIM->SOPT2 = (SIM->SOPT2&~SIM_SOPT2_TIMESRC_MASK)|SIM_SOPT2_TIMESRC_M; #endif #if defined(SIM_SOPT2_RMIISRC_MASK) && defined(SIM_SOPT2_RMIISRC_M) // RMII clock SIM->SOPT2 = (SIM->SOPT2&~SIM_SOPT2_RMIISRC_MASK)|SIM_SOPT2_RMIISRC_M; #endif #ifdef SIM_SCGC4_USBOTG_MASK // !! WARNING !! The USB interface must be disabled for clock changes to have effect !! WARNING !! SIM->SCGC4 &= ~SIM_SCGC4_USBOTG_MASK; #endif #if defined(SIM_SOPT2_USBSRC_MASK) && defined(SIM_SOPT2_USBSRC_M) // USB clock (48MHz req.) SIM->SOPT2 = (SIM->SOPT2&~SIM_SOPT2_USBSRC_MASK)|SIM_SOPT2_USBSRC_M; #endif #if defined(SIM_SOPT2_USBFSRC_MASK) && defined(SIM_SOPT2_USBFSRC_M) // USB clock (48MHz req.) SIM->SOPT2 = (SIM->SOPT2&~SIM_SOPT2_USBFSRC_MASK)|SIM_SOPT2_USBFSRC_M; #endif #if defined(SIM_SOPT2_PLLFLLSEL_MASK) && defined(SIM_SOPT2_PLLFLLSEL_M) // Peripheral clock SIM->SOPT2 = (SIM->SOPT2&~SIM_SOPT2_PLLFLLSEL_MASK)|SIM_SOPT2_PLLFLLSEL_M; #endif #if defined(SIM_SOPT2_UART0SRC_MASK) && defined(SIM_SOPT2_UART0SRC_M) // UART0 clock SIM->SOPT2 = (SIM->SOPT2&~SIM_SOPT2_UART0SRC_MASK)|SIM_SOPT2_UART0SRC_M; #endif #if defined(SIM_SOPT2_TPMSRC_MASK) && defined(SIM_SOPT2_TPMSRC_M) // TPM clock SIM->SOPT2 = (SIM->SOPT2&~SIM_SOPT2_TPMSRC_MASK)|SIM_SOPT2_TPMSRC_M; #endif #if defined(SIM_SOPT2_CLKOUTSEL_MASK) && defined(SIM_SOPT2_CLKOUTSEL_M) SIM->SOPT2 = (SIM->SOPT2&~SIM_SOPT2_CLKOUTSEL_MASK)|SIM_SOPT2_CLKOUTSEL_M; #endif #if defined(SIM_SOPT2_RTCCLKOUTSEL_MASK) && defined(SIM_SOPT2_RTCCLKOUTSEL_M) SIM->SOPT2 = (SIM->SOPT2&~SIM_SOPT2_RTCCLKOUTSEL_MASK)|SIM_SOPT2_RTCCLKOUTSEL_M; #endif #if defined(SIM_CLKDIV2_USBDIV_MASK) && defined(SIM_CLKDIV2_USBFRAC_MASK) && defined(SIM_CLKDIV2_USB_M) SIM->CLKDIV2 = (SIM->CLKDIV2&~(SIM_CLKDIV2_USBDIV_MASK|SIM_CLKDIV2_USBFRAC_MASK)) | SIM_CLKDIV2_USB_M; #endif SystemCoreClockUpdate(); }
void test10(void) { __asm__("int3"); __builtin_unreachable(); // No warning about falling off the end of a noreturn function. }
long sp(void){__asm__("movl %esp,%eax");} // where offset is add/sub from.
int eth_init(bd_t *bis) { int i; scc_enet_t *pram_ptr; volatile immap_t *immr = (immap_t *)CFG_IMMR; #if defined(CONFIG_FADS) *((uint *) BCSR4) &= ~(BCSR4_ETHLOOP|BCSR4_MODEM_EN); *((uint *) BCSR4) |= BCSR4_TFPLDL|BCSR4_TPSQEL|BCSR4_DATA_VOICE; *((uint *) BCSR1) &= ~BCSR1_ETHEN; #endif pram_ptr = (scc_enet_t *)&(immr->im_cpm.cp_dparam[PROFF_ENET]); rxIdx = 0; txIdx = 0; /* assign static pointer to BD area */ rtx = (RTXBD *) (immr->im_cpm.cp_dpmem + BD_OFFSET); /* Configure port A pins for Txd and Rxd. */ immr->im_ioport.iop_papar |= (PA_ENET_RXD | PA_ENET_TXD); immr->im_ioport.iop_padir &= ~(PA_ENET_RXD | PA_ENET_TXD); immr->im_ioport.iop_paodr &= ~PA_ENET_TXD; /* Configure port C pins to enable CLSN and RENA. */ immr->im_ioport.iop_pcpar &= ~(PC_ENET_CLSN | PC_ENET_RENA); immr->im_ioport.iop_pcdir &= ~(PC_ENET_CLSN | PC_ENET_RENA); immr->im_ioport.iop_pcso |= (PC_ENET_CLSN | PC_ENET_RENA); /* Configure port A for TCLK and RCLK. */ immr->im_ioport.iop_papar |= (PA_ENET_TCLK | PA_ENET_RCLK); immr->im_ioport.iop_padir &= ~(PA_ENET_TCLK | PA_ENET_RCLK); /* * Configure Serial Interface clock routing -- see section 16.7.5.3 * First, clear all SCC bits to zero, then set the ones we want. */ immr->im_cpm.cp_sicr &= ~SICR_ENET_MASK; immr->im_cpm.cp_sicr |= SICR_ENET_CLKRT; /* * Initialize SDCR -- see section 16.9.23.7 * SDMA configuration register */ immr->im_siu_conf.sc_sdcr = 0x01; /* * Setup SCC Ethernet Parameter RAM */ pram_ptr->sen_genscc.scc_rfcr = 0x18; /* Normal Operation and Mot byte ordering */ pram_ptr->sen_genscc.scc_tfcr = 0x18; /* Mot byte ordering, Normal access */ pram_ptr->sen_genscc.scc_mrblr = DBUF_LENGTH; /* max. ET package len 1520 */ pram_ptr->sen_genscc.scc_rbase = (unsigned int)(&rtx->rxbd[0]); /* Set RXBD tbl start at Dual Port */ pram_ptr->sen_genscc.scc_tbase = (unsigned int)(&rtx->txbd[0]); /* Set TXBD tbl start at Dual Port */ /* * Setup Receiver Buffer Descriptors (13.14.24.18) * Settings: * Empty, Wrap */ for (i = 0; i < PKTBUFSRX; i++) { rtx->rxbd[i].cbd_sc = BD_ENET_RX_EMPTY; rtx->rxbd[i].cbd_datlen = 0; /* Reset */ rtx->rxbd[i].cbd_bufaddr = (uint)NetRxPackets[i]; } rtx->rxbd[PKTBUFSRX - 1].cbd_sc |= BD_ENET_RX_WRAP; /* * Setup Ethernet Transmitter Buffer Descriptors (13.14.24.19) * Settings: * Add PADs to Short FRAMES, Wrap, Last, Tx CRC */ for (i = 0; i < TX_BUF_CNT; i++) { rtx->txbd[i].cbd_sc = (BD_ENET_TX_PAD | BD_ENET_TX_LAST | BD_ENET_TX_TC); rtx->txbd[i].cbd_datlen = 0; /* Reset */ rtx->txbd[i].cbd_bufaddr = (uint)&txbuf[i][0]; } rtx->txbd[TX_BUF_CNT - 1].cbd_sc |= BD_ENET_TX_WRAP; /* * Enter Command: Initialize Rx Params for SCC */ do { /* Spin until ready to issue command */ __asm__ ("eieio"); } while (immr->im_cpm.cp_cpcr & CPM_CR_FLG); /* Issue command */ immr->im_cpm.cp_cpcr = ((CPM_CR_INIT_RX << 8) | (CPM_CR_ENET << 4) | CPM_CR_FLG); do { /* Spin until command processed */ __asm__ ("eieio"); } while (immr->im_cpm.cp_cpcr & CPM_CR_FLG); /* * Ethernet Specific Parameter RAM * see table 13-16, pg. 660, * pg. 681 (example with suggested settings) */ pram_ptr->sen_cpres = ~(0x0); /* Preset CRC */ pram_ptr->sen_cmask = 0xdebb20e3; /* Constant Mask for CRC */ pram_ptr->sen_crcec = 0x0; /* Error Counter CRC (unused) */ pram_ptr->sen_alec = 0x0; /* Alignment Error Counter (unused) */ pram_ptr->sen_disfc = 0x0; /* Discard Frame Counter (unused) */ pram_ptr->sen_pads = 0x8888; /* Short Frame PAD Characters */ pram_ptr->sen_retlim = 15; /* Retry Limit Threshold */ pram_ptr->sen_maxflr = 1518; /* MAX Frame Length Register */ pram_ptr->sen_minflr = 64; /* MIN Frame Length Register */ pram_ptr->sen_maxd1 = DBUF_LENGTH; /* MAX DMA1 Length Register */ pram_ptr->sen_maxd2 = DBUF_LENGTH; /* MAX DMA2 Length Register */ pram_ptr->sen_gaddr1 = 0x0; /* Group Address Filter 1 (unused) */ pram_ptr->sen_gaddr2 = 0x0; /* Group Address Filter 2 (unused) */ pram_ptr->sen_gaddr3 = 0x0; /* Group Address Filter 3 (unused) */ pram_ptr->sen_gaddr4 = 0x0; /* Group Address Filter 4 (unused) */ #define ea bis->bi_enetaddr pram_ptr->sen_paddrh = (ea[5] << 8) + ea[4]; pram_ptr->sen_paddrm = (ea[3] << 8) + ea[2]; pram_ptr->sen_paddrl = (ea[1] << 8) + ea[0]; #undef ea pram_ptr->sen_pper = 0x0; /* Persistence (unused) */ pram_ptr->sen_iaddr1 = 0x0; /* Individual Address Filter 1 (unused) */ pram_ptr->sen_iaddr2 = 0x0; /* Individual Address Filter 2 (unused) */ pram_ptr->sen_iaddr3 = 0x0; /* Individual Address Filter 3 (unused) */ pram_ptr->sen_iaddr4 = 0x0; /* Individual Address Filter 4 (unused) */ pram_ptr->sen_taddrh = 0x0; /* Tmp Address (MSB) (unused) */ pram_ptr->sen_taddrm = 0x0; /* Tmp Address (unused) */ pram_ptr->sen_taddrl = 0x0; /* Tmp Address (LSB) (unused) */ /* * Enter Command: Initialize Tx Params for SCC */ do { /* Spin until ready to issue command */ __asm__ ("eieio"); } while (immr->im_cpm.cp_cpcr & CPM_CR_FLG); /* Issue command */ immr->im_cpm.cp_cpcr = ((CPM_CR_INIT_TX << 8) | (CPM_CR_ENET << 4) | CPM_CR_FLG); do { /* Spin until command processed */ __asm__ ("eieio"); } while (immr->im_cpm.cp_cpcr & CPM_CR_FLG); /* * Clear Events in SCCE -- Clear bits by writing 1's */ immr->im_cpm.cp_scc[SCC_ENET].scc_scce = ~(0x0); /* * Initialize GSMR High 32-Bits * Settings: Normal Mode */ immr->im_cpm.cp_scc[SCC_ENET].scc_gsmrh = 0; /* * Initialize GSMR Low 32-Bits, but do not Enable Transmit/Receive * Settings: * TCI = Invert * TPL = 48 bits * TPP = Repeating 10's * MODE = Ethernet */ immr->im_cpm.cp_scc[SCC_ENET].scc_gsmrl = ( SCC_GSMRL_TCI | \ SCC_GSMRL_TPL_48 | \ SCC_GSMRL_TPP_10 | \ SCC_GSMRL_MODE_ENET); /* * Initialize the DSR -- see section 13.14.4 (pg. 513) v0.4 */ immr->im_cpm.cp_scc[SCC_ENET].scc_dsr = 0xd555; /* * Initialize the PSMR * Settings: * CRC = 32-Bit CCITT * NIB = Begin searching for SFD 22 bits after RENA * BRO = Reject broadcast packets * PROMISCOUS = Catch all packetsregardless of dest. MAC adress */ immr->im_cpm.cp_scc[SCC_ENET].scc_pmsr = (SCC_PMSR_ENCRC | SCC_PMSR_NIB22 /* | SCC_PMSR_BRO | SCC_PMSR_PRO */); /* * Configure Ethernet TENA Signal */ #if (defined(PC_ENET_TENA) && !defined(PB_ENET_TENA)) immr->im_ioport.iop_pcpar |= PC_ENET_TENA; immr->im_ioport.iop_pcdir &= ~PC_ENET_TENA; #elif (defined(PB_ENET_TENA) && !defined(PC_ENET_TENA)) immr->im_cpm.cp_pbpar |= PB_ENET_TENA; immr->im_cpm.cp_pbdir |= PB_ENET_TENA; #else #error Configuration Error: exactly ONE of PB_ENET_TENA, PC_ENET_TENA must be defined #endif /* * Set the ENT/ENR bits in the GSMR Low -- Enable Transmit/Receive */ immr->im_cpm.cp_scc[SCC_ENET].scc_gsmrl |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT); return 1; }
void can_setup(void) { /* Enable peripheral clocks. */ rcc_peripheral_enable_clock(&RCC_APB2ENR, RCC_APB2ENR_AFIOEN); rcc_peripheral_enable_clock(&RCC_APB2ENR, RCC_APB2ENR_IOPAEN); rcc_peripheral_enable_clock(&RCC_APB1ENR, RCC_APB1ENR_CANEN); /* Configure CAN pin: RX */ gpio_set_mode(GPIOA, GPIO_MODE_INPUT, GPIO_CNF_INPUT_PULL_UPDOWN, GPIO_CAN_RX); gpio_set(GPIOA, GPIO_CAN_RX); /* Configure CAN pin: TX */ gpio_set_mode(GPIOA, GPIO_MODE_OUTPUT_50_MHZ, GPIO_CNF_OUTPUT_ALTFN_PUSHPULL, GPIO_CAN_TX); /* NVIC configuration */ nvic_enable_irq(NVIC_USB_LP_CAN_RX0_IRQ); nvic_set_priority(NVIC_USB_LP_CAN_RX0_IRQ, 1); /* CAN register init */ can_reset(CAN1); /* CAN cell init */ if (can_init(CAN1, false, /* TTCM: Time triggered comm mode? */ true, /* ABOM: Automatic bus-off management? */ false, /* AWUM: Automatic wakeup mode? */ false, /* NART: No automatic retransmission? */ false, /* RFLM: Receive FIFO locked mode? */ false, /* TXFP: Transmit FIFO priority? */ CAN_BTR_SJW_1TQ, CAN_BTR_TS1_3TQ, CAN_BTR_TS2_4TQ, 12, false, false)) /* BRP+1: Baud rate prescaler */ { ON(LED_RED); OFF(LED_GREEN); OFF(LED_BLUE); OFF(LED_ORANGE); /* Die because we failed to initialize. */ while (1) __asm__("nop"); } /* CAN filter init */ can_filter_id_mask_32bit_init(CAN1, 0, /* Filter ID */ 0, /* CAN ID */ 0, /* CAN ID mask */ 0, /* FIFO assignment (here: FIFO0) */ true); /* Enable the filter. */ /* transmit struct init */ can_tx_msg.id = 0x0; can_tx_msg.rtr = false; #ifdef CAN__USE_EXT_ID can_tx_msg.ide = true; #else can_tx_msg.ide = false; #endif can_tx_msg.dlc = 1; can_enable_irq(CAN1, CAN_IER_FMPIE0); }
void ffi_closure_eabi (unsigned arg1, unsigned arg2, unsigned arg3, unsigned arg4, unsigned arg5, unsigned arg6) { /* This function is called by a trampoline. The trampoline stows a pointer to the ffi_closure object in gr7. We must save this pointer in a place that will persist while we do our work. */ register ffi_closure *creg __asm__ ("gr7"); ffi_closure *closure = creg; /* Arguments that don't fit in registers are found on the stack at a fixed offset above the current frame pointer. */ register char *frame_pointer __asm__ ("fp"); char *stack_args = frame_pointer + 16; /* Lay the register arguments down in a continuous chunk of memory. */ unsigned register_args[6] = { arg1, arg2, arg3, arg4, arg5, arg6 }; ffi_cif *cif = closure->cif; ffi_type **arg_types = cif->arg_types; void **avalue = alloca (cif->nargs * sizeof(void *)); char *ptr = (char *) register_args; int i; /* Find the address of each argument. */ for (i = 0; i < cif->nargs; i++) { switch (arg_types[i]->type) { case FFI_TYPE_SINT8: case FFI_TYPE_UINT8: avalue[i] = ptr + 3; break; case FFI_TYPE_SINT16: case FFI_TYPE_UINT16: avalue[i] = ptr + 2; break; case FFI_TYPE_SINT32: case FFI_TYPE_UINT32: case FFI_TYPE_FLOAT: avalue[i] = ptr; break; case FFI_TYPE_STRUCT: avalue[i] = *(void**)ptr; break; default: /* This is an 8-byte value. */ avalue[i] = ptr; ptr += 4; break; } ptr += 4; /* If we've handled more arguments than fit in registers, start looking at the those passed on the stack. */ if (ptr == ((char *)register_args + (6*4))) ptr = stack_args; } /* Invoke the closure. */ if (cif->rtype->type == FFI_TYPE_STRUCT) { /* The caller allocates space for the return structure, and passes a pointer to this space in gr3. Use this value directly as the return value. */ register void *return_struct_ptr __asm__("gr3"); (closure->fun) (cif, return_struct_ptr, avalue, closure->user_data); } else {
void test(void) { /* Empty assembler fragment: */ __asm__(""); }
uint64_t force_ldm_stalls(chain_t **C, int element_size, int access_size, int mem_refs, // number of pointers/elements to chase uint64_t max_nelems, // max number of available elements/pointers int it_n, // seed to calculate the first pointer to chase, used to avoid repeating // pointers during consecutive calls unsigned long *time_diff_ns) { uint64_t j, i; int nchains = SEED_IN; uint64_t sumv[MAX_NUM_CHAINS]; uint64_t nextp[MAX_NUM_CHAINS]; char *buf; uint64_t buf_size = 16384; int count = 0; uint64_t start; uint64_t it_limit; struct timespec time_start, time_end; assert(nchains < MAX_NUM_CHAINS); if (mem_refs <= 0) return 0; buf = (char*) malloc(buf_size); assert(buf != NULL); if (max_nelems > mem_refs) { it_limit = max_nelems / mem_refs; } else { it_limit = 1; } it_n = it_n % it_limit; start = it_n * mem_refs; if ((start + mem_refs) > max_nelems) { start = 0; } /* chase the pointers */ if (nchains == 1) { clock_gettime(CLOCK_MONOTONIC, &time_start); sumv[0] = 0; // chase pointers until the 'mem_refs' count, the pointer chasing will restart from beginning if 'mem_refs' // is greater than 'nelems' for (count = 0, i = start; count < mem_refs; i = element(C[0], i)->val, ++count) { __asm__(""); sumv[0] += element(C[0], i)->val; if (access_size > element_size) { read_element(C[0], i, buf, buf_size); } } clock_gettime(CLOCK_MONOTONIC, &time_end); } // else { // for (j=0; j < nchains; j++) { // sumv[j] = 0; // nextp[j] = 0; // } // for (; 0 != element(C[0], nextp[0])->val; ) { // for (j=0; j < nchains; j++) { // sumv[j] += element(C[j], nextp[j])->val; // if (access_size > element_size) { // read_element(C[j], nextp[j], buf, buf_size); // } // nextp[j] = element(C[j], nextp[j])->val; // } // } // } *time_diff_ns = ((time_end.tv_sec * 1000000000) + time_end.tv_nsec) - ((time_start.tv_sec * 1000000000) + time_start.tv_nsec); free(buf); return sumv[0]; }
void main(void) { __asm__("mov %eax, %cr0;"); }
unsigned int get_esp() { __asm__("movl %esp,%eax"); }
/** * * @brief Initialize the tickless idle feature * * This routine initializes the tickless idle feature by calculating the * necessary hardware-specific parameters. * * Note that the maximum number of ticks that can elapse during a "tickless idle" * is limited by <default_load_value>. The larger the value (the lower the * tick frequency), the fewer elapsed ticks during a "tickless idle". * Conversely, the smaller the value (the higher the tick frequency), the * more elapsed ticks during a "tickless idle". * * @return N/A */ static void sysTickTicklessIdleInit(void) { /* enable counter, disable interrupt and set clock src to system clock */ union __stcsr stcsr = {.bit = {1, 0, 1, 0, 0, 0} }; volatile uint32_t dummy; /* used to help determine the 'skew time' */ /* store the default reload value (which has already been set) */ default_load_value = sysTickReloadGet(); /* calculate the max number of ticks with this 24-bit H/W counter */ max_system_ticks = 0x00ffffff / default_load_value; /* determine the associated load value */ max_load_value = max_system_ticks * default_load_value; /* * Calculate the skew from switching the timer in and out of idle mode. * The following sequence is emulated: * 1. Stop the timer. * 2. Read the current counter value. * 3. Calculate the new/remaining counter reload value. * 4. Load the new counter value. * 5. Set the timer mode to periodic/one-shot. * 6. Start the timer. * * The timer must be running for this to work, so enable the * systick counter without generating interrupts, using the processor *clock. * Note that the reload value has already been set by the caller. */ __scs.systick.stcsr.val |= stcsr.val; __asm__(" isb"); /* ensure the timer is started before reading */ timer_idle_skew = sysTickCurrentGet(); /* start of skew time */ __scs.systick.stcsr.val |= stcsr.val; /* normally sysTickStop() */ dummy = sysTickCurrentGet(); /* emulate sysTickReloadSet() */ /* emulate calculation of the new counter reload value */ if ((dummy == 1) || (dummy == default_load_value)) { dummy = max_system_ticks - 1; dummy += max_load_value - default_load_value; } else { dummy = dummy - 1; dummy += dummy * default_load_value; } /* _sysTickStart() without interrupts */ __scs.systick.stcsr.val |= stcsr.val; timer_mode = TIMER_MODE_PERIODIC; /* skew time calculation for down counter (assumes no rollover) */ timer_idle_skew -= sysTickCurrentGet(); /* restore the previous sysTick state */ sysTickStop(); sysTickReloadSet(default_load_value); }
/** * * @brief System clock tick handler * * This routine handles the system clock tick interrupt. A TICK_EVENT event * is pushed onto the microkernel stack. * * The symbol for this routine is either _timer_int_handler (for normal * system operation) or _real_timer_int_handler (when GDB_INFO is enabled). * * @return N/A */ void _TIMER_INT_HANDLER(void *unused) { ARG_UNUSED(unused); #ifdef CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT extern void _sys_k_event_logger_interrupt(void); _sys_k_event_logger_interrupt(); #endif #ifdef CONFIG_INT_LATENCY_BENCHMARK uint32_t value = __scs.systick.val; uint32_t delta = __scs.systick.reload - value; if (_hw_irq_to_c_handler_latency > delta) { /* keep the lowest value observed */ _hw_irq_to_c_handler_latency = delta; } #endif #ifdef CONFIG_SYS_POWER_MANAGEMENT int32_t numIdleTicks; /* * All interrupts are disabled when handling idle wakeup. * For tickless idle, this ensures that the calculation and programming * of * the device for the next timer deadline is not interrupted. * For non-tickless idle, this ensures that the clearing of the kernel * idle * state is not interrupted. * In each case, _sys_power_save_idle_exit is called with interrupts * disabled. */ __asm__(" cpsid i"); /* PRIMASK = 1 */ #ifdef CONFIG_TICKLESS_IDLE /* * If this a wakeup from a completed tickless idle or after * _timer_idle_exit has processed a partial idle, return * to the normal tick cycle. */ if (timer_mode == TIMER_MODE_ONE_SHOT) { sysTickStop(); sysTickReloadSet(default_load_value); sysTickStart(); timer_mode = TIMER_MODE_PERIODIC; } /* set the number of elapsed ticks and announce them to the kernel */ if (idle_mode == IDLE_TICKLESS) { /* tickless idle completed without interruption */ idle_mode = IDLE_NOT_TICKLESS; _sys_idle_elapsed_ticks = idle_original_ticks + 1; /* actual # of idle ticks */ _sys_clock_tick_announce(); } else { /* * Increment the tick because _timer_idle_exit does not * account for the tick due to the timer interrupt itself. * Also, if not in tickless mode, _sys_idle_elapsed_ticks will be 0. */ _sys_idle_elapsed_ticks++; /* * If we transition from 0 elapsed ticks to 1 we need to * announce the * tick event to the microkernel. Other cases will be covered by * _timer_idle_exit. */ if (_sys_idle_elapsed_ticks == 1) { _sys_clock_tick_announce(); } } /* accumulate total counter value */ clock_accumulated_count += default_load_value * _sys_idle_elapsed_ticks; #else /* !CONFIG_TICKLESS_IDLE */ /* * No tickless idle: * Update the total tick count and announce this tick to the kernel. */ clock_accumulated_count += sys_clock_hw_cycles_per_tick; _sys_clock_tick_announce(); #endif /* CONFIG_TICKLESS_IDLE */ numIdleTicks = _NanoIdleValGet(); /* get # of idle ticks requested */ if (numIdleTicks) { _NanoIdleValClear(); /* clear kernel idle setting */ /* * Complete idle processing. * Note that for tickless idle, nothing will be done in * _timer_idle_exit. */ _sys_power_save_idle_exit(numIdleTicks); } __asm__(" cpsie i"); /* re-enable interrupts (PRIMASK = 0) */ #else /* !CONFIG_SYS_POWER_MANAGEMENT */ /* accumulate total counter value */ clock_accumulated_count += sys_clock_hw_cycles_per_tick; /* * one more tick has occurred -- don't need to do anything special since * timer is already configured to interrupt on the following tick */ _sys_clock_tick_announce(); #endif /* CONFIG_SYS_POWER_MANAGEMENT */ extern void _ExcExit(void); _ExcExit(); }
void command(char *cmd) { if (strcmp(cmd, "clear") == 0) // Очистить экран { mysys_clear_screen(); return; } if (strcmp(cmd, "panic") == 0) // Напугать ядро { mysys_panic("As you wish!"); return; } if (strcmp(cmd, "reboot") == 0) // Перезагрузиться { // Я пока не нашел в документации почему это приводит к перезагрузке // На самом деле даже одна любая из этих команд вызывает перезагрузку outb(0xfe, 0x64); outb(0x01, 0x92); } if (strcmp(cmd, "fat") == 0) // Запустить FAT-браузер { // fat_main(); return; } if (strcmp(cmd, "dbg") == 0) // Вызвать отладчик Bochs { outw(0x8A00, 0x8A00); outw(0x8AE0, 0x8A00); return; } if (strcmp(cmd, "cli") == 0) // Отключить прерывания, остановить процессы { __asm__("cli"); return; } if (strcmp(cmd, "sti") == 0) // Включить прерывания { __asm__("sti"); return; } if (strcmp(cmd, "beep") == 0) // Наступить на хвост { make_sound(); return; } if (strcmp(cmd, "cpu") == 0) // Определить процессор { // Идентификация процессора (см. [1]) ulong eax, ebx, ecx, edx; ulong maxeax, maxexeax; // Макс. индексы для Basic и Extended CPUID информации ulong vendor[3]; // Имя производителя __asm__ volatile("movl $0x0, %%eax\n" "cpuid" :"=a"(maxeax), "=b"(vendor[0]), "=c"(vendor[2]), "=d"(vendor[1]):); __asm__ volatile ("movl $0x80000000, %%eax\n" "cpuid" :"=a"(maxexeax):); _printf("Maximum CPUID indexes: %p/%p", (char*)maxeax, (char*)maxexeax); _puts("\nVendor: "); _nputs((char*)&vendor, 12); _puts("\nBrand String: "); if ((maxexeax & 0x80000000) && maxexeax >= 0x80000004) // Если процессор поддерживает Brand String { ulong BrandString[12]; __asm__ volatile ("movl $0x80000002, %%eax\n" "cpuid" :"=a"(BrandString[0]), "=b"(BrandString[1]), "=c"(BrandString[2]), "=d"(BrandString[3]):); __asm__ volatile ("movl $0x80000003, %%eax\n" "cpuid" :"=a"(BrandString[4]), "=b"(BrandString[5]), "=c"(BrandString[6]), "=d"(BrandString[7]):); __asm__ volatile ("movl $0x80000004, %%eax\n" "cpuid" :"=a"(BrandString[8]), "=b"(BrandString[9]), "=c"(BrandString[10]), "=d"(BrandString[11]):); _puts((char*)&BrandString); }
noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *regs) { __asm__ (""); /* avoid to optimize as pure function */ }
void thread_iter(int dram_refs, int nvm_refs, int interleave_dram, int interleave_nvm) { long it_n; unsigned long time_dram, time_nvm, total_time_dram_ns, total_time_nvm_ns; uint64_t seed; uint64_t j; chain_t *C_dram[MAX_NUM_CHAINS]; chain_t *C_nvm[MAX_NUM_CHAINS]; int missing_dram_refs, missing_nvm_refs; int dram_stalls, nvm_stalls; struct timespec task_time_start, task_time_end; unsigned long task_time_diff_ns; #ifndef NDEBUG pid_t tid = (pid_t) syscall(SYS_gettid); #endif assert(NELEMS < UINT64_MAX); for (j=0; j < NCHAINS; j++) { seed = SEED_IN + j*j; C_dram[j] = alloc_chain(seed, NELEMS, 64LLU, 0, 0); C_nvm[j] = alloc_chain(seed, NELEMS, 64LLU, 0, 1); __asm__(""); } bind_cpu(thread_self()); // cache must be trashed after bind_cpu() call trash_cache(NELEMS); total_time_dram_ns = 0; total_time_nvm_ns = 0; missing_dram_refs = dram_refs; missing_nvm_refs = nvm_refs; #ifndef NDEBUG printf("DRAM accesses to be made: %ld\n", dram_refs); printf("NVM accesses to be made: %ld\n", nvm_refs); #endif //delay_cycles(8000000000); //printf("STARTING MEASURES\n"); clock_gettime(CLOCK_MONOTONIC, &task_time_start); for (it_n = 0; (missing_dram_refs > 0) || (missing_nvm_refs > 0); ++it_n) { __asm__(""); // calculate the number o memory accesses to be made on each memory type if (missing_dram_refs > interleave_dram) { missing_dram_refs -= interleave_dram; dram_stalls = interleave_dram; } else { dram_stalls = missing_dram_refs; missing_dram_refs = 0; } if (missing_nvm_refs > interleave_nvm) { missing_nvm_refs -= interleave_nvm; nvm_stalls = interleave_nvm; } else { nvm_stalls = missing_nvm_refs; missing_nvm_refs = 0; } time_dram = 0; time_nvm = 0; // do memory accesses interleaved by dividing the number of accesses in smaller amount // as configured by user force_ldm_stalls((chain_t **)&C_dram, 64LLU, 8, dram_stalls, NELEMS, it_n, &time_dram); force_ldm_stalls((chain_t **)&C_nvm, 64LLU, 8, nvm_stalls, NELEMS, it_n, &time_nvm); total_time_dram_ns += time_dram; total_time_nvm_ns += time_nvm; #ifndef NDEBUG printf("%ld DRAM accesses took: %ld ns\n", dram_stalls, time_dram); printf("%ld NVM accesses took: %ld ns\n", nvm_stalls, time_nvm); #endif } clock_gettime(CLOCK_MONOTONIC, &task_time_end); task_time_diff_ns = ((task_time_end.tv_sec * 1000000000) + task_time_end.tv_nsec) - ((task_time_start.tv_sec * 1000000000) + task_time_start.tv_nsec); // the memory latency is the total time divided by the number of accesses for each memory type if (dram_refs > 0) total_time_dram_ns /= dram_refs; else total_time_dram_ns = 0; if (nvm_refs > 0) total_time_nvm_ns /= nvm_refs; else total_time_nvm_ns = 0; printf("DRAM latency: %ld ns\n", total_time_dram_ns); printf("NVM latency: %ld ns\n", total_time_nvm_ns); printf("Measure time: %.3lf ms\n", (double)task_time_diff_ns/1000000.0); printf("Expected time: %.3ld ms\n", ((total_time_dram_ns * dram_refs) + (total_time_nvm_ns * nvm_refs)) / 1000000); for (j=0; j < NCHAINS; j++) { free(C_dram[j]); free(C_nvm[j]); } }
noinline void __naked _mcount(unsigned long parent_ip) { __asm__ (""); /* avoid to optimize as pure function */ }
static void scc_init (int scc_index) { bd_t *bd = gd->bd; static int proff[] = { PROFF_SCC1, PROFF_SCC2, PROFF_SCC3, PROFF_SCC4 }; static unsigned int cpm_cr[] = { CPM_CR_CH_SCC1, CPM_CR_CH_SCC2, CPM_CR_CH_SCC3, CPM_CR_CH_SCC4 }; int i; scc_enet_t *pram_ptr; volatile immap_t *immr = (immap_t *) CFG_IMMR; immr->im_cpm.cp_scc[scc_index].scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); #if defined(CONFIG_FADS) #if defined(CONFIG_MPC860T) || defined(CONFIG_MPC86xADS) /* The FADS860T and MPC86xADS don't use the MODEM_EN or DATA_VOICE signals. */ *((uint *) BCSR4) &= ~BCSR4_ETHLOOP; *((uint *) BCSR4) |= BCSR4_TFPLDL | BCSR4_TPSQEL; *((uint *) BCSR1) &= ~BCSR1_ETHEN; #else *((uint *) BCSR4) &= ~(BCSR4_ETHLOOP | BCSR4_MODEM_EN); *((uint *) BCSR4) |= BCSR4_TFPLDL | BCSR4_TPSQEL | BCSR4_DATA_VOICE; *((uint *) BCSR1) &= ~BCSR1_ETHEN; #endif #endif pram_ptr = (scc_enet_t *) & (immr->im_cpm.cp_dparam[proff[scc_index]]); rxIdx = 0; txIdx = 0; #ifdef CFG_ALLOC_DPRAM rtx = (RTXBD *) (immr->im_cpm.cp_dpmem + dpram_alloc_align (sizeof (RTXBD), 8)); #else rtx = (RTXBD *) (immr->im_cpm.cp_dpmem + CPM_SCC_BASE); #endif #if 0 #if (defined(PA_ENET_RXD) && defined(PA_ENET_TXD)) /* Configure port A pins for Txd and Rxd. */ immr->im_ioport.iop_papar |= (PA_ENET_RXD | PA_ENET_TXD); immr->im_ioport.iop_padir &= ~(PA_ENET_RXD | PA_ENET_TXD); immr->im_ioport.iop_paodr &= ~PA_ENET_TXD; #elif (defined(PB_ENET_RXD) && defined(PB_ENET_TXD)) /* Configure port B pins for Txd and Rxd. */ immr->im_cpm.cp_pbpar |= (PB_ENET_RXD | PB_ENET_TXD); immr->im_cpm.cp_pbdir &= ~(PB_ENET_RXD | PB_ENET_TXD); immr->im_cpm.cp_pbodr &= ~PB_ENET_TXD; #else #error Configuration Error: exactly ONE of PA_ENET_[RT]XD, PB_ENET_[RT]XD must be defined #endif #if defined(PC_ENET_LBK) /* Configure port C pins to disable External Loopback */ immr->im_ioport.iop_pcpar &= ~PC_ENET_LBK; immr->im_ioport.iop_pcdir |= PC_ENET_LBK; immr->im_ioport.iop_pcso &= ~PC_ENET_LBK; immr->im_ioport.iop_pcdat &= ~PC_ENET_LBK; /* Disable Loopback */ #endif /* PC_ENET_LBK */ /* Configure port C pins to enable CLSN and RENA. */ immr->im_ioport.iop_pcpar &= ~(PC_ENET_CLSN | PC_ENET_RENA); immr->im_ioport.iop_pcdir &= ~(PC_ENET_CLSN | PC_ENET_RENA); immr->im_ioport.iop_pcso |= (PC_ENET_CLSN | PC_ENET_RENA); /* Configure port A for TCLK and RCLK. */ immr->im_ioport.iop_papar |= (PA_ENET_TCLK | PA_ENET_RCLK); immr->im_ioport.iop_padir &= ~(PA_ENET_TCLK | PA_ENET_RCLK); /* * Configure Serial Interface clock routing -- see section 16.7.5.3 * First, clear all SCC bits to zero, then set the ones we want. */ immr->im_cpm.cp_sicr &= ~SICR_ENET_MASK; immr->im_cpm.cp_sicr |= SICR_ENET_CLKRT; #else /* * SCC2 receive clock is BRG2 * SCC2 transmit clock is BRG3 */ immr->im_cpm.cp_brgc2 = 0x0001000C; immr->im_cpm.cp_brgc3 = 0x0001000C; immr->im_cpm.cp_sicr &= ~0x00003F00; immr->im_cpm.cp_sicr |= 0x00000a00; #endif /* 0 */ /* * Initialize SDCR -- see section 16.9.23.7 * SDMA configuration register */ immr->im_siu_conf.sc_sdcr = 0x01; /* * Setup SCC Ethernet Parameter RAM */ pram_ptr->sen_genscc.scc_rfcr = 0x18; /* Normal Operation and Mot byte ordering */ pram_ptr->sen_genscc.scc_tfcr = 0x18; /* Mot byte ordering, Normal access */ pram_ptr->sen_genscc.scc_mrblr = DBUF_LENGTH; /* max. ET package len 1520 */ pram_ptr->sen_genscc.scc_rbase = (unsigned int) (&rtx->rxbd[0]); /* Set RXBD tbl start at Dual Port */ pram_ptr->sen_genscc.scc_tbase = (unsigned int) (&rtx->txbd[0]); /* Set TXBD tbl start at Dual Port */ /* * Setup Receiver Buffer Descriptors (13.14.24.18) * Settings: * Empty, Wrap */ for (i = 0; i < PKTBUFSRX; i++) { rtx->rxbd[i].cbd_sc = BD_ENET_RX_EMPTY; rtx->rxbd[i].cbd_datlen = 0; /* Reset */ rtx->rxbd[i].cbd_bufaddr = (uint) NetRxPackets[i]; } rtx->rxbd[PKTBUFSRX - 1].cbd_sc |= BD_ENET_RX_WRAP; /* * Setup Ethernet Transmitter Buffer Descriptors (13.14.24.19) * Settings: * Add PADs to Short FRAMES, Wrap, Last, Tx CRC */ for (i = 0; i < TX_BUF_CNT; i++) { rtx->txbd[i].cbd_sc = (BD_ENET_TX_PAD | BD_ENET_TX_LAST | BD_ENET_TX_TC); rtx->txbd[i].cbd_datlen = 0; /* Reset */ rtx->txbd[i].cbd_bufaddr = (uint) (&txbuf[0]); } rtx->txbd[TX_BUF_CNT - 1].cbd_sc |= BD_ENET_TX_WRAP; /* * Enter Command: Initialize Rx Params for SCC */ do { /* Spin until ready to issue command */ __asm__ ("eieio"); } while (immr->im_cpm.cp_cpcr & CPM_CR_FLG); /* Issue command */ immr->im_cpm.cp_cpcr = ((CPM_CR_INIT_RX << 8) | (cpm_cr[scc_index] << 4) | CPM_CR_FLG); do { /* Spin until command processed */ __asm__ ("eieio"); } while (immr->im_cpm.cp_cpcr & CPM_CR_FLG); /* * Ethernet Specific Parameter RAM * see table 13-16, pg. 660, * pg. 681 (example with suggested settings) */ pram_ptr->sen_cpres = ~(0x0); /* Preset CRC */ pram_ptr->sen_cmask = 0xdebb20e3; /* Constant Mask for CRC */ pram_ptr->sen_crcec = 0x0; /* Error Counter CRC (unused) */ pram_ptr->sen_alec = 0x0; /* Alignment Error Counter (unused) */ pram_ptr->sen_disfc = 0x0; /* Discard Frame Counter (unused) */ pram_ptr->sen_pads = 0x8888; /* Short Frame PAD Characters */ pram_ptr->sen_retlim = 15; /* Retry Limit Threshold */ pram_ptr->sen_maxflr = 1518; /* MAX Frame Length Register */ pram_ptr->sen_minflr = 64; /* MIN Frame Length Register */ pram_ptr->sen_maxd1 = DBUF_LENGTH; /* MAX DMA1 Length Register */ pram_ptr->sen_maxd2 = DBUF_LENGTH; /* MAX DMA2 Length Register */ pram_ptr->sen_gaddr1 = 0x0; /* Group Address Filter 1 (unused) */ pram_ptr->sen_gaddr2 = 0x0; /* Group Address Filter 2 (unused) */ pram_ptr->sen_gaddr3 = 0x0; /* Group Address Filter 3 (unused) */ pram_ptr->sen_gaddr4 = 0x0; /* Group Address Filter 4 (unused) */ #define ea bd->bi_enetaddr pram_ptr->sen_paddrh = (ea[5] << 8) + ea[4]; pram_ptr->sen_paddrm = (ea[3] << 8) + ea[2]; pram_ptr->sen_paddrl = (ea[1] << 8) + ea[0]; #undef ea pram_ptr->sen_pper = 0x0; /* Persistence (unused) */ pram_ptr->sen_iaddr1 = 0x0; /* Individual Address Filter 1 (unused) */ pram_ptr->sen_iaddr2 = 0x0; /* Individual Address Filter 2 (unused) */ pram_ptr->sen_iaddr3 = 0x0; /* Individual Address Filter 3 (unused) */ pram_ptr->sen_iaddr4 = 0x0; /* Individual Address Filter 4 (unused) */ pram_ptr->sen_taddrh = 0x0; /* Tmp Address (MSB) (unused) */ pram_ptr->sen_taddrm = 0x0; /* Tmp Address (unused) */ pram_ptr->sen_taddrl = 0x0; /* Tmp Address (LSB) (unused) */ /* * Enter Command: Initialize Tx Params for SCC */ do { /* Spin until ready to issue command */ __asm__ ("eieio"); } while (immr->im_cpm.cp_cpcr & CPM_CR_FLG); /* Issue command */ immr->im_cpm.cp_cpcr = ((CPM_CR_INIT_TX << 8) | (cpm_cr[scc_index] << 4) | CPM_CR_FLG); do { /* Spin until command processed */ __asm__ ("eieio"); } while (immr->im_cpm.cp_cpcr & CPM_CR_FLG); /* * Mask all Events in SCCM - we use polling mode */ immr->im_cpm.cp_scc[scc_index].scc_sccm = 0; /* * Clear Events in SCCE -- Clear bits by writing 1's */ immr->im_cpm.cp_scc[scc_index].scc_scce = ~(0x0); /* * Initialize GSMR High 32-Bits * Settings: Normal Mode */ immr->im_cpm.cp_scc[scc_index].scc_gsmrh = 0; /* * Initialize GSMR Low 32-Bits, but do not Enable Transmit/Receive * Settings: * TCI = Invert * TPL = 48 bits * TPP = Repeating 10's * LOOP = Loopback * MODE = Ethernet */ immr->im_cpm.cp_scc[scc_index].scc_gsmrl = (SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 | SCC_GSMRL_DIAG_LOOP | SCC_GSMRL_MODE_ENET); /* * Initialize the DSR -- see section 13.14.4 (pg. 513) v0.4 */ immr->im_cpm.cp_scc[scc_index].scc_dsr = 0xd555; /* * Initialize the PSMR * Settings: * CRC = 32-Bit CCITT * NIB = Begin searching for SFD 22 bits after RENA * LPB = Loopback Enable (Needed when FDE is set) */ immr->im_cpm.cp_scc[scc_index].scc_psmr = SCC_PSMR_ENCRC | SCC_PSMR_NIB22 | SCC_PSMR_LPB; #if 0 /* * Configure Ethernet TENA Signal */ #if (defined(PC_ENET_TENA) && !defined(PB_ENET_TENA)) immr->im_ioport.iop_pcpar |= PC_ENET_TENA; immr->im_ioport.iop_pcdir &= ~PC_ENET_TENA; #elif (defined(PB_ENET_TENA) && !defined(PC_ENET_TENA)) immr->im_cpm.cp_pbpar |= PB_ENET_TENA; immr->im_cpm.cp_pbdir |= PB_ENET_TENA; #else #error Configuration Error: exactly ONE of PB_ENET_TENA, PC_ENET_TENA must be defined #endif #if defined(CONFIG_ADS) && defined(CONFIG_MPC860) /* * Port C is used to control the PHY,MC68160. */ immr->im_ioport.iop_pcdir |= (PC_ENET_ETHLOOP | PC_ENET_TPFLDL | PC_ENET_TPSQEL); immr->im_ioport.iop_pcdat |= PC_ENET_TPFLDL; immr->im_ioport.iop_pcdat &= ~(PC_ENET_ETHLOOP | PC_ENET_TPSQEL); *((uint *) BCSR1) &= ~BCSR1_ETHEN; #endif /* MPC860ADS */ #if defined(CONFIG_AMX860) /* * Port B is used to control the PHY,MC68160. */ immr->im_cpm.cp_pbdir |= (PB_ENET_ETHLOOP | PB_ENET_TPFLDL | PB_ENET_TPSQEL); immr->im_cpm.cp_pbdat |= PB_ENET_TPFLDL; immr->im_cpm.cp_pbdat &= ~(PB_ENET_ETHLOOP | PB_ENET_TPSQEL); immr->im_ioport.iop_pddir |= PD_ENET_ETH_EN; immr->im_ioport.iop_pddat &= ~PD_ENET_ETH_EN; #endif /* AMX860 */ #endif /* 0 */ #ifdef CONFIG_RPXCLASSIC *((uchar *) BCSR0) &= ~BCSR0_ETHLPBK; *((uchar *) BCSR0) |= (BCSR0_ETHEN | BCSR0_COLTEST | BCSR0_FULLDPLX); #endif #ifdef CONFIG_RPXLITE *((uchar *) BCSR0) |= BCSR0_ETHEN; #endif #ifdef CONFIG_MBX board_ether_init (); #endif /* * Set the ENT/ENR bits in the GSMR Low -- Enable Transmit/Receive */ immr->im_cpm.cp_scc[scc_index].scc_gsmrl |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT); /* * Work around transmit problem with first eth packet */ #if defined (CONFIG_FADS) udelay (10000); /* wait 10 ms */ #elif defined (CONFIG_AMX860) || defined(CONFIG_RPXCLASSIC) udelay (100000); /* wait 100 ms */ #endif }
int readSpdData (u8 * spdData) { DECLARE_GLOBAL_DATA_PTR; volatile i2c8220_t *pi2cReg; volatile pcfg8220_t *pcfg; u8 slvAdr = DRAM_SPD; u8 Tmp; int Length = SPD_SIZE; int i = 0; /* Enable Port Configuration for SDA and SDL signals */ pcfg = (volatile pcfg8220_t *) (MMAP_PCFG); __asm__ ("sync"); pcfg->pcfg3 &= ~CFG_I2C_PORT3_CONFIG; __asm__ ("sync"); /* Points the structure to I2c mbar memory offset */ pi2cReg = (volatile i2c8220_t *) (MMAP_I2C); /* Clear FDR, ADR, SR and CR reg */ pi2cReg->adr = 0; pi2cReg->fdr = 0; pi2cReg->cr = 0; pi2cReg->sr = 0; /* Set for fix XLB Bus Frequency */ switch (gd->bus_clk) { case 60000000: pi2cReg->fdr = 0x15; break; case 70000000: pi2cReg->fdr = 0x16; break; case 80000000: pi2cReg->fdr = 0x3a; break; case 90000000: pi2cReg->fdr = 0x17; break; case 100000000: pi2cReg->fdr = 0x3b; break; case 110000000: pi2cReg->fdr = 0x18; break; case 120000000: pi2cReg->fdr = 0x19; break; case 130000000: pi2cReg->fdr = 0x1a; break; } pi2cReg->adr = CFG_I2C_SLAVE<<1; pi2cReg->cr = I2C_CTL_EN; /* Set Enable */ /* The I2C bus should be in Idle state. If the bus is busy, clear the STA bit in control register */ if (spd_status (pi2cReg, I2C_STA_BB, 0) != OK) { if ((pi2cReg->cr & I2C_CTL_STA) == I2C_CTL_STA) pi2cReg->cr &= ~I2C_CTL_STA; /* Check again if it is still busy, return error if found */ if (spd_status (pi2cReg, I2C_STA_BB, 1) == OK) return ERROR; } pi2cReg->cr |= I2C_CTL_TX; /* Enable the I2c for TX, Ack */ pi2cReg->cr |= I2C_CTL_STA; /* Generate start signal */ if (spd_status (pi2cReg, I2C_STA_BB, 1) != OK) return ERROR; /* Write slave address */ pi2cReg->sr &= ~I2C_STA_IF; /* Clear Interrupt */ pi2cReg->dr = slvAdr; /* Write a byte */ if (spd_status (pi2cReg, I2C_STA_CF, 1) != OK) { /* Transfer not complete? */ spd_stop (pi2cReg); return ERROR; } if (spd_status (pi2cReg, I2C_STA_IF, 1) != OK) { spd_stop (pi2cReg); return ERROR; } /* Issue the offset to start */ pi2cReg->sr &= ~I2C_STA_IF; /* Clear Interrupt */ pi2cReg->dr = 0; /* Write a byte */ if (spd_status (pi2cReg, I2C_STA_CF, 1) != OK) { /* Transfer not complete? */ spd_stop (pi2cReg); return ERROR; } if (spd_status (pi2cReg, I2C_STA_IF, 1) != OK) { spd_stop (pi2cReg); return ERROR; } /* Set repeat start */ pi2cReg->cr |= I2C_CTL_RSTA; /* Repeat Start */ pi2cReg->sr &= ~I2C_STA_IF; /* Clear Interrupt */ pi2cReg->dr = slvAdr | 1; /* Write a byte */ if (spd_status (pi2cReg, I2C_STA_CF, 1) != OK) { /* Transfer not complete? */ spd_stop (pi2cReg); return ERROR; } if (spd_status (pi2cReg, I2C_STA_IF, 1) != OK) { spd_stop (pi2cReg); return ERROR; } if (((pi2cReg->sr & 0x07) == 0x07) || (pi2cReg->sr & 0x01)) return ERROR; pi2cReg->cr &= ~I2C_CTL_TX; /* Set receive mode */ if (((pi2cReg->sr & 0x07) == 0x07) || (pi2cReg->sr & 0x01)) return ERROR; /* Dummy Read */ if (spd_readbyte (pi2cReg, &Tmp, &i) != OK) { spd_stop (pi2cReg); return ERROR; } i = 0; while (Length) { if (Length == 2) pi2cReg->cr |= I2C_CTL_TXAK; if (Length == 1) pi2cReg->cr &= ~I2C_CTL_STA; if (spd_readbyte (pi2cReg, spdData, &Length) != OK) { return spd_stop (pi2cReg); } i++; Length--; spdData++; } /* Stop the service */ spd_stop (pi2cReg); return OK; }
long get_esp() { __asm__("movl %esp,%eax"); }
int main(void) { for (u32 i = 0; i < 600000; i++) __asm__("nop"); led_setup(); // Debug pins (CC1111 TX/RX) RCC_AHB1ENR |= RCC_AHB1ENR_IOPCEN; gpio_mode_setup(GPIOC, GPIO_MODE_OUTPUT, GPIO_PUPD_NONE, GPIO10|GPIO11); gpio_clear(GPIOC, GPIO10|GPIO11); rcc_clock_setup_hse_3v3(&hse_16_368MHz_in_130_944MHz_out_3v3); debug_setup(); printf("\n\n# Firmware info - git: " GIT_VERSION ", built: " __DATE__ " " __TIME__ "\n"); swift_nap_setup(); swift_nap_reset(); led_toggle(LED_GREEN); led_toggle(LED_RED); u64 cw_power; float cw_freq; while(1) { printf("#PLOT_DATA_START\n"); // Load CW ram cw_schedule_load(timing_count() + 1000); while (!(cw_get_load_done())); printf("# Finished loading cw ram\n"); // Do CW detection // cw_start(-4e6,4e6,8e6/(SPECTRUM_LEN-1)); // cw_start(0.5e6,0.7e6,0.2e6/(SPECTRUM_LEN-1)); float cf = (1.575542*1e9-1.575420*1e9); float span = 200e3; cw_start(cf-span/2,cf+span/2,span/(SPECTRUM_LEN-1)); while (!(cw_get_running_done())); printf("# Finished doing cw detection\n"); for (u16 si=0;si<SPECTRUM_LEN;si++) { for (u32 dly = 0; dly < 50000; dly++) __asm__("nop"); cw_get_spectrum_point(&cw_freq,&cw_power,si); if (~((cw_power == 0) && (cw_freq == 0))) { // printf("%+7.2f %lu # %d\n",cw_freq,(long unsigned int)cw_power,(unsigned int)si); printf("%+7.1f %lu\n",cw_freq,(long unsigned int)cw_power); // printf("%+4.2f %lu\n",cw_freq,(long unsigned int)cw_power); } u32 err = swift_nap_read_error_blocking(); if (err) { printf("Error: 0x%08X\n", (unsigned int)err); while(1); } } printf("#PLOT_DATA_END\n"); } while (1); return 0; }
void machine_restart(void) { printk(KERN_INFO "*** MACHINE RESTART ***\n"); __asm__("l.nop 1"); }
// ---------------------------------------------------------------------------- // Type II command: WRITE-DATA // ---------------------------------------------------------------------------- void MB8877::cmd_writedata(char cmd) { unsigned char BYTE, // the byte to serve blocksize, // # bytes / sector nsectors; // # sectors to write #ifdef FDC_DEBUG fdcdisplay((char*)" II WRITE_DATA"); #endif // Calculate the number of sectors we will have to write nsectors = 1; if (fdc.cmdtype == FDC_CMD_WR_MSEC) nsectors = (fdc.track<80 ? FDC_SECTORS_0 : FDC_SECTORS_1) - reg[SECTOR]; blocksize = (fdc.track<80) ? FDC_SIZE_SECTOR_0 : FDC_SIZE_SECTOR_1; reg[STATUS] = FDC_ST_BUSY|FDC_ST_HEADENG; // Make some comparison: is it the desired side ? Is reg[SECTOR] Ok ? if (((reg[CMD] & FDC_FLAG_VERIFICATION) && (reg[CMD] & 0x08) != fdc.side) || (reg[SECTOR] > nsectors) ) { reg[STATUS] |= FDC_ST_RECNFND; return; // Exit with record not found status } // Main loop: we'll service the sector byte per byte. // transfer each byte to the Data register and generate a DRQ for(;reg[SECTOR] < reg[SECTOR]+nsectors; reg[SECTOR]++) { // Write the Data Address Mark on the first byte of the current sector BYTE = (reg[CMD] & FDC_FLAG_DAM) ? 0x01 : 0x00; if (disk.write(&BYTE,1)!=-1) // Write error { reg[STATUS] |= FDC_ST_WRITEFAULT; return; } fdc.position=0; do { qx1bus = 0xff; PORTC &= 0xf0; PORTC |= BUS_SELECT_DATA; // Prepare bus to get data attachInterrupt(0, read_qx1, FALLING); // Prepare interrupt digitalWrite(FDC_DRQ, LOW); // Fire DRQ Interrupt __asm__("nop\n\t""nop\n\t"); // Waits ~ 125 nsec if(qx1bus == 0xff) { reg[STATUS] |= FDC_ST_LOSTDATA; // QX1 did not load DATA in time; exits return; } if (disk.write(&BYTE,1)!=-1) // Write error { reg[STATUS] |= FDC_ST_WRITEFAULT; return; } } while (fdc.position++ < blocksize); } nsectors = (fdc.track<80 ? FDC_SECTORS_0 : FDC_SECTORS_1); if(reg[SECTOR]>nsectors) reg[SECTOR]=nsectors; }
/* * Similar to machine_power_off, but don't shut off power. Add code * here to freeze the system for e.g. post-mortem debug purpose when * possible. This halt has nothing to do with the idle halt. */ void machine_halt(void) { printk(KERN_INFO "*** MACHINE HALT ***\n"); __asm__("l.nop 1"); }
static void swd_search(ucl_swd_t * s, ucl_uint node, ucl_uint cnt) { #if 0 && defined(__GNUC__) && defined(__i386__) register const unsigned char *p1 __asm__("%edi"); register const unsigned char *p2 __asm__("%esi"); register const unsigned char *px __asm__("%edx"); #else const unsigned char *p1; const unsigned char *p2; const unsigned char *px; #endif ucl_uint m_len = s->m_len; const unsigned char *b = s->b; const unsigned char *bp = s->b + s->bp; const unsigned char *bx = s->b + s->bp + s->look; unsigned char scan_end1; assert(s->m_len > 0); scan_end1 = bp[m_len - 1]; for (; cnt-- > 0; node = s->succ3[node]) { p1 = bp; p2 = b + node; px = bx; assert(m_len < s->look); if ( #if 1 p2[m_len - 1] == scan_end1 && p2[m_len] == p1[m_len] && #endif p2[0] == p1[0] && p2[1] == p1[1]) { ucl_uint i; assert(ucl_memcmp(bp, &b[node], 3) == 0); #if 0 && defined(UCL_UNALIGNED_OK_4) p1 += 3; p2 += 3; while (p1 < px && *(const ucl_uint32p) p1 == *(const ucl_uint32p) p2) p1 += 4, p2 += 4; while (p1 < px && *p1 == *p2) p1 += 1, p2 += 1; #else p1 += 2; p2 += 2; do { } while (++p1 < px && *p1 == *++p2); #endif i = p1 - bp; #ifdef UCL_DEBUG if (ucl_memcmp(bp, &b[node], i) != 0) printf("%5ld %5ld %02x%02x %02x%02x\n", (long) s->bp, (long) node, bp[0], bp[1], b[node], b[node + 1]); #endif assert(ucl_memcmp(bp, &b[node], i) == 0); #if defined(SWD_BEST_OFF) if (i < SWD_BEST_OFF) { if (s->best_pos[i] == 0) s->best_pos[i] = node + 1; } #endif if (i > m_len) { s->m_len = m_len = i; s->m_pos = node; if (m_len == s->look) return; if (m_len >= s->nice_length) return; if (m_len > (ucl_uint) s->best3[node]) return; scan_end1 = bp[m_len - 1]; } } } }
/* If or when software power-off is implemented, add code here. */ void machine_power_off(void) { printk(KERN_INFO "*** MACHINE POWER OFF ***\n"); __asm__("l.nop 1"); }
/* Override library _fpreset() with asm fninit */ void _fpreset (void) { __asm__ ( "fninit" ) ;}
void Default_Handler(void) { while (1) { __asm__("halt"); } }
unsigned long get_sp(void){ __asm__("movl %esp, %eax");}
// reboots MakeSuremote void MakeSurePWR::reboot() { __asm__("jmp 0x1E000"); }