static unsigned long n2rng_generic_read_control_v2(unsigned long ra, unsigned long unit) { unsigned long hv_err, state, ticks, watchdog_delta, watchdog_status; int block = 0, busy = 0; while (1) { hv_err = sun4v_rng_ctl_read_v2(ra, unit, &state, &ticks, &watchdog_delta, &watchdog_status); if (hv_err == HV_EOK) break; if (hv_err == HV_EBUSY) { if (++busy >= N2RNG_BUSY_LIMIT) break; udelay(1); } else if (hv_err == HV_EWOULDBLOCK) { if (++block >= N2RNG_BLOCK_LIMIT) break; __delay(ticks); } else break; } return hv_err; }
static void crash_kexec_stop_spus(void) { struct spu *spu; int i; u64 tmp; for (i = 0; i < CRASH_NUM_SPUS; i++) { if (!crash_spu_info[i].spu) continue; spu = crash_spu_info[i].spu; crash_spu_info[i].saved_spu_runcntl_RW = in_be32(&spu->problem->spu_runcntl_RW); crash_spu_info[i].saved_spu_status_R = in_be32(&spu->problem->spu_status_R); crash_spu_info[i].saved_spu_npc_RW = in_be32(&spu->problem->spu_npc_RW); crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu); crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu); tmp = spu_mfc_sr1_get(spu); crash_spu_info[i].saved_mfc_sr1_RW = tmp; tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; spu_mfc_sr1_set(spu, tmp); __delay(200); } }
unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) { /* * Experiments with ethernet and slip connections show that buff * is aligned on either a 2-byte or 4-byte boundary. */ const unsigned char *endMarker = buff + len; const unsigned char *marker = endMarker - (len % 16); #if 0 if((int)buff & 0x3) printk("unaligned buff %p\n", buff); __delay(900); /* extra delay of 90 us to test performance hit */ #endif BITON; while (buff < marker) { sum += *((unsigned short *)buff)++; sum += *((unsigned short *)buff)++; sum += *((unsigned short *)buff)++; sum += *((unsigned short *)buff)++; sum += *((unsigned short *)buff)++; sum += *((unsigned short *)buff)++; sum += *((unsigned short *)buff)++; sum += *((unsigned short *)buff)++; } marker = endMarker - (len % 2); while(buff < marker) { sum += *((unsigned short *)buff)++; } if(endMarker - buff > 0) { sum += *buff; /* add extra byte seperately */ } BITOFF; return(sum); }
static void __spin_lock_debug(raw_spinlock_t *lock) { u64 i; u64 loops = (loops_per_jiffy * HZ) >> 4; for (i = 0; i < loops; i++) { if (arch_spin_trylock(&lock->raw_lock)) return; __delay(1); } /* lockup suspected: */ spin_dump(lock, "lockup suspected"); #ifdef CONFIG_SMP trigger_all_cpu_backtrace(); #endif /* * The trylock above was causing a livelock. Give the lower level arch * specific lock code a chance to acquire the lock. We have already * printed a warning/backtrace at this point. The non-debug arch * specific code might actually succeed in acquiring the lock. If it is * not successful, the end-result is the same - there is no forward * progress. */ arch_spin_lock(&lock->raw_lock); }
static void __spin_lock_debug(raw_spinlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * LOOP_HZ; int print_once = 1; char aee_str[40]; unsigned long long t1; t1 = sched_clock(); for (;;) { for (i = 0; i < loops; i++) { if (arch_spin_trylock(&lock->raw_lock)) return; __delay(1); } /* lockup suspected: */ printk("spin time: %llu ns(start:%llu ns, lpj:%lu, HZ:%d)", sched_clock() - t1, t1, loops_per_jiffy, (int)LOOP_HZ); if (print_once) { print_once = 0; spin_dump(lock, "lockup"); #ifdef CONFIG_SMP trigger_all_cpu_backtrace(); #endif debug_show_all_locks(); snprintf( aee_str, 40, "Spinlock lockup:%s\n", current->comm); aee_kernel_warning_api(__FILE__, __LINE__, DB_OPT_DUMMY_DUMP | DB_OPT_FTRACE, aee_str,"spinlock debugger\n"); } } }
static void __spin_lock_debug(raw_spinlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; int print_once = 1; for (;;) { for (i = 0; i < loops; i++) { if (__raw_spin_trylock(&lock->raw_lock)) return; __delay(1); } /* lockup suspected: */ if (print_once) { print_once = 0; printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, " "%s/%d, %p\n", raw_smp_processor_id(), current->comm, current->pid, lock); dump_stack(); #ifdef CONFIG_SMP trigger_all_cpu_backtrace(); #endif } } }
static int n2rng_generic_read_diag_data(struct n2rng *np, unsigned long unit, unsigned long data_ra, unsigned long data_len) { unsigned long ticks, hv_err; int block = 0; while (1) { hv_err = n2rng_read_diag_data_one(np, unit, data_ra, data_len, &ticks); if (hv_err == HV_EOK) return 0; if (hv_err == HV_EWOULDBLOCK) { if (++block >= N2RNG_BLOCK_LIMIT) return -EWOULDBLOCK; __delay(ticks); } else if (hv_err == HV_ENOACCESS) { return -EPERM; } else if (hv_err == HV_EIO) { return -EIO; } else return -ENODEV; } }
static int n2rng_generic_write_control(struct n2rng *np, unsigned long control_ra, unsigned long unit, unsigned long state) { unsigned long hv_err, ticks; int block = 0, busy = 0; while (1) { hv_err = n2rng_write_ctl_one(np, unit, state, control_ra, np->wd_timeo, &ticks); if (hv_err == HV_EOK) return 0; if (hv_err == HV_EWOULDBLOCK) { if (++block >= N2RNG_BLOCK_LIMIT) return -EWOULDBLOCK; __delay(ticks); } else if (hv_err == HV_EBUSY) { if (++busy >= N2RNG_BUSY_LIMIT) return -EBUSY; udelay(1); } else return -ENODEV; } }
void calibrate_delay(void) { int ticks; int loopbit; int lps_precision = LPS_PREC; loops_per_sec = (1<<12); printk("Calibrating delay loop.. "); while (loops_per_sec <<= 1) { /* wait for "start of" clock tick */ ticks = jiffies; while (ticks == jiffies) /* nothing */; /* Go .. */ ticks = jiffies; __delay(loops_per_sec); ticks = jiffies - ticks; if (ticks) break; } /* Do a binary approximation to get loops_per_second set to equal one clock (up to lps_precision bits) */ loops_per_sec >>= 1; loopbit = loops_per_sec; while ( lps_precision-- && (loopbit >>= 1) ) { loops_per_sec |= loopbit; ticks = jiffies; while (ticks == jiffies); ticks = jiffies; __delay(loops_per_sec); if (jiffies != ticks) /* longer than 1 tick */ loops_per_sec &= ~loopbit; } /* finally, adjust loops per second in terms of seconds instead of clocks */ loops_per_sec *= HZ; /* Round the value and print it */ printk("ok - %lu.%02lu BogoMIPS\n", (loops_per_sec+2500)/500000, ((loops_per_sec+2500)/5000) % 100); #if defined(__SMP__) && defined(__i386__) smp_loops_per_tick = loops_per_sec / 400; #endif }
inline void __const_udelay(unsigned long xloops) { int d0; __asm__("mull %0" :"=d" (xloops), "=&a" (d0) :"1" (xloops),"0" (current_cpu_data.loops_per_sec)); __delay(xloops); }
void udelay(uint us) { uint loops; loops = cpu_clock / 5; __delay(loops); }
/* We used to multiply by HZ after shifting down by 32 bits * but that runs into problems for higher values of HZ and * slow cpus. */ void __const_udelay(unsigned long n) { n *= 4; n *= (cpu_data(raw_smp_processor_id()).udelay_val * (HZ/4)); n >>= 32; __delay(n + 1); }
inline void __const_udelay(unsigned long xloops) { int d0; xloops *= 4; __asm__("mull %0" :"=d" (xloops), "=&a" (d0) :"1" (xloops),"0" (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4))); __delay(++xloops); }
void static rtc_set_ce(u32 val) { #ifdef DEBUG printk("rtc_set_ce(%d)\n",val); #endif reg_set32(GIU_PIO0, GPIO_4_CE, val ? SET_32_BIT : 0); #ifdef RTC_DELAY __delay(100000); #endif }
/* * calibrating delay loop... */ void calibrate_delay(void) { u32_t ticks, loopbit; s32_t lps_precision = 8; u32_t hz = get_system_hz(); if(hz > 0) { loops_per_jiffy = (1<<12); while((loops_per_jiffy <<= 1) != 0) { /* wait for "start of" clock tick */ ticks = jiffies; while (ticks == jiffies); /* go ... */ ticks = jiffies; __delay(loops_per_jiffy); ticks = jiffies - ticks; if(ticks) break; } loops_per_jiffy >>= 1; loopbit = loops_per_jiffy; while(lps_precision-- && (loopbit >>= 1)) { loops_per_jiffy |= loopbit; ticks = jiffies; while(ticks == jiffies); ticks = jiffies; __delay(loops_per_jiffy); /* longer than 1 tick */ if(jiffies != ticks) loops_per_jiffy &= ~loopbit; } }
int ap_init(void) { RET_CODE ret = ERR_FAILURE; __delay(0x10000); hal_dcache_invalidate((void *)&g_ipcfw_f,sizeof(ipc_fw_fun_set_t)); attach_ipcfw_fun_set_concerto(&g_ipcfw_f); ap_ipc_init(32); // ap ipc fifo create mem_cfg(MEMCFG_T_NORMAL); drv_init_concerto(); OS_PRINTF("$$$ %s,%d\n\n",__FUNCTION__,__LINE__); ret = barcode_client_process(); if(ret == SUCCESS) { jump_barcode_app(UPG_TOOL_BLOCK_ID); } ret = sn_client_process(); if(ret == SUCCESS) { jump_app(UPG_TOOL_BLOCK_ID); } else { #if 0 drv_dev_t * p_pic_dev; pic_info_t pic_info = {0}; pdec_ins_t picdec_ins = {0}; load_logo(); mtos_printk("PDEC_OUT_ADDR 0x%x\n",PDEC_OUT_ADDR); p_pic_dev = (drv_dev_t *)dev_find_identifier(NULL, DEV_IDT_TYPE, SYS_DEV_TYPE_PDEC); MT_ASSERT(p_pic_dev != NULL); if (pdec_getinfo(p_pic_dev, g_p_logo_buf, g_logo_size, &pic_info, &picdec_ins) == SUCCESS && pic_info.image_format != IMAGE_FORMAT_UNKNOWN) { OS_PRINTF("LOGO is picture, pic size [%d,%d] ,format %d\n", pic_info.src_width, pic_info.src_height, pic_info.image_format); show_logo(PDEC_OUT_ADDR, g_p_logo_buf, g_logo_size, DRAW_ON_OSD); } else #endif ota_dm_init(); load_jump_policy(); } return 0; }
static int __init rtc_ricoh_rx5c348_init(void) { unsigned char data; /* CSI1 reset */ io_set16(PIB_RESET, 0x40, 0xffff); __delay(10000); io_set16(PIB_RESET, 0x40, 0x0000); /* set GPIO3 , GPIO4 */ reg_set32(GIU_FUNCSEL0, (GPIO_4_CE | GPIO_3_INTR), SET_32_BIT); /* clear GPIO25 , GPIO26 , GPIO27 */ reg_set32(GIU_FUNCSEL0, GPIO_CSI1_PIN, CLR_32_BIT); /* make GPIO4 output */ reg_set32(GIU_DIR0, GPIO_4_CE, SET_32_BIT); /* make GPIO3 input */ reg_set32(GIU_DIR0, GPIO_3_INTR, CLR_32_BIT); csi1_reset(); rtc_read_burst(0x0e, &data, 1); if((data & 0x20) == 0) { /* 24 hour */ data |= 0x20; rtc_write_burst(0x0e, &data, 1); #ifdef RTC_DELAY __delay(10000); #endif } /* set the function pointers */ rtc_get_time = rtc_ricoh_rx5c348_get_time; rtc_set_time = rtc_ricoh_rx5c348_set_time; #if defined(CONFIG_MIPS_TCUBE_RTC) rtc_get_alm_time = rtc_ricoh_rx5c348_get_alm_time; rtc_set_alm_time = rtc_ricoh_rx5c348_set_alm_time; rtc_get_ctrl_reg = rtc_ricoh_rx5c348_get_ctrl_reg; rtc_set_ctrl_reg = rtc_ricoh_rx5c348_set_ctrl_reg; #endif return 0; }
void __init calibrate_delay(void) { unsigned long ticks, loopbit; int lps_precision = LPS_PREC; loops_per_jiffy = (1<<12); printk("Calibrating delay loop... "); while (loops_per_jiffy <<= 1) { /* wait for "start of" clock tick */ ticks = jiffies; while (ticks == jiffies) /* nothing */; /* Go .. */ ticks = jiffies; __delay(loops_per_jiffy); ticks = jiffies - ticks; if (ticks) break; } /* Do a binary approximation to get loops_per_jiffy set to equal one clock (up to lps_precision bits) */ loops_per_jiffy >>= 1; loopbit = loops_per_jiffy; while ( lps_precision-- && (loopbit >>= 1) ) { loops_per_jiffy |= loopbit; ticks = jiffies; while (ticks == jiffies); ticks = jiffies; __delay(loops_per_jiffy); if (jiffies != ticks) /* longer than 1 tick */ loops_per_jiffy &= ~loopbit; } /* Round the value and print it */ printk("%lu.%02lu BogoMIPS\n", loops_per_jiffy/(500000/HZ), (loops_per_jiffy/(5000/HZ)) % 100); }
static inline void csi1_reset(void) { /* CSI1 reset */ reg_set32(CSI1_CNT, 0x00008000, SET_32_BIT); /* set CSIRST bit */ __delay(100000); reg_set32(CSI1_CNT, 0x00008000, CLR_32_BIT); /* clear CSIRST bit */ /* set clock phase */ while(io_in32(CSI1_MODE) & 1); reg_set32(CSI1_MODE, CSIn_MODE_CSIE, CLR_32_BIT); reg_set32(CSI1_MODE, CSIn_MODE_CKP, SET_32_BIT); //// reg_set32(CSI1_MODE, CSIn_MODE_CKS_208333MHZ, SET_32_BIT); reg_set32(CSI1_MODE, CSIn_MODE_CKS_104167MHZ, SET_32_BIT); reg_set32(CSI1_MODE, CSIn_MODE_CSIE, SET_32_BIT); while(io_in32(CSI1_MODE) & CSIn_MODE_CSOT); }
static void write_nibble(uint8_t value) { LCD_D4_LAT = value & 1; LCD_D5_LAT = (value & 0b10) >> 1; LCD_D6_LAT = (value & 0b100) >> 2; LCD_D7_LAT = (value & 0b1000) >> 3; LCD_D4_TRIS = 0; LCD_D5_TRIS = 0; LCD_D6_TRIS = 0; LCD_D7_TRIS = 0; __delay(1); LCD_EN = 1; __delay_us(10); LCD_EN = 0; __delay_us(10); }
void __udelay(unsigned long usecs) { u32 now, begin, end; u32 ticks, loops; ticks = usecs * CLOCK_TICK_RATE / 1000000; if (usecs < UDELAY_MAX_LOOP_STEP_US) loops = usecs * UDELAY_MIN_CPU_FREQ_MHZ; else loops = UDELAY_MAX_LOOP_STEP_US * UDELAY_MIN_CPU_FREQ_MHZ; begin = read_udelay_tick(); end = begin + ticks; do { __delay(loops); now = read_udelay_tick(); } while ((now - begin) < ticks); }
static void calibrate_delay(void) { int ticks; printk("Calibrating delay loop.. "); while (loops_per_sec <<= 1) { ticks = jiffies; __delay(loops_per_sec); ticks = jiffies - ticks; if (ticks >= HZ) { __asm__("mull %1 ; divl %2" :"=a" (loops_per_sec) :"d" (HZ), "r" (ticks), "0" (loops_per_sec) :"dx"); printk("ok - %lu.%02lu BogoMips\n", loops_per_sec/500000, (loops_per_sec/5000) % 100); return; } }
static void __spin_lock_debug(spinlock_t *lock) { int print_once = 1; u64 i; for (;;) { for (i = 0; i < loops_per_jiffy * HZ; i++) { if (__raw_spin_trylock(&lock->raw_lock)) return; __delay(1); } /* lockup suspected: */ if (print_once) { print_once = 0; printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, " "%s/%d, %p\n", raw_smp_processor_id(), current->comm, current->pid, lock); dump_stack(); } } }
static void __spin_lock_debug(raw_spinlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; int print_once = 1; for (;;) { for (i = 0; i < loops; i++) { if (arch_spin_trylock(&lock->raw_lock)) return; __delay(1); } /* lockup suspected: */ if (print_once) { print_once = 0; spin_dump(lock, "lockup"); #ifdef CONFIG_SMP trigger_all_cpu_backtrace(); #endif } } }
/* * Must not happen on UP: */ RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP"); #endif return ret; } void do_raw_read_unlock(rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); arch_read_unlock(&lock->raw_lock); } static inline void debug_write_lock_before(rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); RWLOCK_BUG_ON(lock->owner == current, lock, "recursion"); RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(), lock, "cpu recursion"); } static inline void debug_write_lock_after(rwlock_t *lock) { lock->owner_cpu = raw_smp_processor_id(); lock->owner = current; } static inline void debug_write_unlock(rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner"); RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), lock, "wrong CPU"); lock->owner = SPINLOCK_OWNER_INIT; lock->owner_cpu = -1; } #if 0 /* This can cause lockups */ static void __write_lock_debug(rwlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; int print_once = 1; for (;;) { for (i = 0; i < loops; i++) { if (arch_write_trylock(&lock->raw_lock)) return; __delay(1); } /* lockup suspected: */ if (print_once) { print_once = 0; // printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, " // "%s/%d, %p\n", // raw_smp_processor_id(), current->comm, ; dump_stack(); } } }
/* * Must not happen on UP: */ SPIN_BUG_ON(!ret, lock, "trylock failure on UP"); #endif return ret; } void do_raw_spin_unlock(raw_spinlock_t *lock) { debug_spin_unlock(lock); arch_spin_unlock(&lock->raw_lock); } static void rwlock_bug(rwlock_t *lock, const char *msg) { if (!debug_locks_off()) return; printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n", msg, raw_smp_processor_id(), current->comm, task_pid_nr(current), lock); dump_stack_and_panic(); } #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) #if 0 /* __write_lock_debug() can lock up - maybe this can too? */ static void __read_lock_debug(rwlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; int print_once = 1; for (;;) { for (i = 0; i < loops; i++) { if (arch_read_trylock(&lock->raw_lock)) return; __delay(1); } /* lockup suspected: */ if (print_once) { print_once = 0; printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, " "%s/%d, %p\n", raw_smp_processor_id(), current->comm, current->pid, lock); dump_stack_and_panic(); } } }
static int n2rng_generic_read_data(unsigned long data_ra) { unsigned long ticks, hv_err; int block = 0, hcheck = 0; while (1) { hv_err = sun4v_rng_data_read(data_ra, &ticks); if (hv_err == HV_EOK) return 0; if (hv_err == HV_EWOULDBLOCK) { if (++block >= N2RNG_BLOCK_LIMIT) return -EWOULDBLOCK; __delay(ticks); } else if (hv_err == HV_ENOACCESS) { return -EPERM; } else if (hv_err == HV_EIO) { if (++hcheck >= N2RNG_HCHECK_LIMIT) return -EIO; udelay(10000); } else return -ENODEV; } }
unsigned int csum_partial_copy(const unsigned char *src, unsigned char *dst, int len, unsigned int sum) { const unsigned char *endMarker; const unsigned char *marker; printk("csum_partial_copy len %d.\n", len); #if 0 if((int)src & 0x3) printk("unaligned src %p\n", src); if((int)dst & 0x3) printk("unaligned dst %p\n", dst); __delay(1800); /* extra delay of 90 us to test performance hit */ #endif endMarker = src + len; marker = endMarker - (len % 16); CBITON; while(src < marker) { sum += (*((unsigned short *)dst)++ = *((unsigned short *)src)++); sum += (*((unsigned short *)dst)++ = *((unsigned short *)src)++); sum += (*((unsigned short *)dst)++ = *((unsigned short *)src)++); sum += (*((unsigned short *)dst)++ = *((unsigned short *)src)++); sum += (*((unsigned short *)dst)++ = *((unsigned short *)src)++); sum += (*((unsigned short *)dst)++ = *((unsigned short *)src)++); sum += (*((unsigned short *)dst)++ = *((unsigned short *)src)++); sum += (*((unsigned short *)dst)++ = *((unsigned short *)src)++); } marker = endMarker - (len % 2); while(src < marker) { sum += (*((unsigned short *)dst)++ = *((unsigned short *)src)++); } if(endMarker - src > 0) { sum += (*dst = *src); /* add extra byte seperately */ } CBITOFF; return(sum); }
static void Tpsc(TpsPumpRes_t *pstRes, u32 n, bool Dir) { void __iomem *reg; unsigned long irq_flags; u32 i, offset, delay, loops; msm_gpio_find_out(pstRes->Id, ®, &offset); delay = Dir ? 20 : 200; //printk("Tpsc n:%d, d:%d\n", n, Dir); spin_lock_irqsave(&atom_lock, irq_flags); loops = loops_per_jiffy/(1000000/HZ); loops *= delay; for (i = 0; i < n; i++) { msm_gpio_set_bit(offset, reg); // delay 1us msm_gpio_clr_bit(offset, reg); __delay(loops); } msm_gpio_set_bit(offset, reg); spin_unlock_irqrestore(&atom_lock, irq_flags); udelay(1000); }
__wsum csum_partial(const void *p, int len, __wsum __sum) { u32 sum = (__force u32)__sum; const u16 *buff = p; /* * Experiments with ethernet and slip connections show that buff * is aligned on either a 2-byte or 4-byte boundary. */ const void *endMarker = p + len; const void *marker = endMarker - (len % 16); #if 0 if((int)buff & 0x3) printk("unaligned buff %p\n", buff); __delay(900); /* extra delay of 90 us to test performance hit */ #endif BITON; while (buff < marker) { sum += *buff++; sum += *buff++; sum += *buff++; sum += *buff++; sum += *buff++; sum += *buff++; sum += *buff++; sum += *buff++; } marker = endMarker - (len % 2); while (buff < marker) sum += *buff++; if (endMarker > buff) sum += *(const u8 *)buff; /* add extra byte seperately */ BITOFF; return (__force __wsum)sum; }