int fflush(FILE *stream) { if(stream == 0) { panicf("not yet implemented (fflush(0))"); /* int res; FILE *f; __fflush_stdin(); __fflush_stdout(); __fflush_stderr(); for(res=0, f=__stdio_root; f; f=f->next) if(fflush(f)) res=-1; return res; */ } // if (stream->flags&NOBUF) return 0; if(stream->flags & BUFINPUT) { register int tmp; if ((tmp=stream->bm-stream->bs)) { panicf("not yet implemented (fflush lseek)"); // lseek(stream->fd,tmp,SEEK_CUR); } stream->bs=stream->bm=0; } else { if (stream->bm && write(stream->fd,stream->buf,stream->bm)!=(int)stream->bm) { stream->flags|=ERRORINDICATOR; return -1; } stream->bm=0; } return 0; }
static void handle_out_ep(int ep) { struct usb_ctrlrequest *req = (void*)AS3525_UNCACHED_ADDR(&setup_desc->data1); int ep_sts = USB_OEP_STS(ep) & ~USB_OEP_STS_MASK(ep); if (ep > 3) panicf("out_ep > 3!?"); USB_OEP_STS(ep) = ep_sts; /* ACK */ if (ep_sts & USB_EP_STAT_BNA) { /* Buffer was not set up */ int ctrl = USB_OEP_CTRL(ep); logf("ep%d OUT, status %x ctrl %x (BNA)\n", ep, ep_sts, ctrl); panicf("ep%d OUT 0x%x 0x%x (BNA)", ep, ep_sts, ctrl); ep_sts &= ~USB_EP_STAT_BNA; } if (ep_sts & USB_EP_STAT_OUT_RCVD) { struct usb_dev_dma_desc *uc_desc = endpoints[ep][1].uc_desc; int dma_sts = uc_desc->status; int dma_len = dma_sts & 0xffff; if (!(dma_sts & USB_DMA_DESC_ZERO_LEN)) { logf("EP%d OUT token, st:%08x len:%d frm:%x data=%s epstate=%d\n", ep, dma_sts & 0xf8000000, dma_len, (dma_sts >> 16) & 0x7ff, make_hex(uc_desc->data_ptr, dma_len), endpoints[ep][1].state); /* * If parts of the just dmaed range are in cache, dump them now. */ discard_dcache_range(uc_desc->data_ptr, dma_len); } else{
/* inline since branch is chosen at compile time */ static inline void usb_slave_mode(bool on) { int rc; if(on) { DEBUGF("Entering USB slave mode\n"); storage_soft_reset(); storage_init(); storage_enable(false); usb_enable(true); cpu_idle_mode(true); } else { DEBUGF("Leaving USB slave mode\n"); cpu_idle_mode(false); /* Let the ISDx00 settle */ sleep(HZ*1); usb_enable(false); rc = storage_init(); if(rc) panicf("storage: %d",rc); rc = disk_mount_all(); if (rc <= 0) /* no partition */ panicf("mount: %d",rc); } }
static void handle_ep0_complete(bool is_ack) { switch(ep0_state) { case EP0_WAIT_SETUP: panicf("usb-drv: EP0 completion while waiting for SETUP"); case EP0_WAIT_ACK: if(is_ack) /* everything is done, prepare next setup */ prepare_setup_ep0(); else panicf("usb-drv: EP0 data completion while waiting for ACK"); break; case EP0_WAIT_DATA: if(is_ack) panicf("usb-drv: EP0 ACK while waiting for data completion"); else /* everything is done, prepare next setup */ prepare_setup_ep0(); break; case EP0_WAIT_DATA_ACK: /* update state */ if(is_ack) ep0_state = EP0_WAIT_DATA; else ep0_state = EP0_WAIT_ACK; break; default: panicf("usb-drv: invalid EP0 state"); } logf("usb-drv: EP0 state updated to %d", ep0_state); }
static bool sim_kernel_init(void) { sim_irq_mtx = SDL_CreateMutex(); if (sim_irq_mtx == NULL) { panicf("Cannot create sim_handler_mtx\n"); return false; } sim_thread_cond = SDL_CreateCond(); if (sim_thread_cond == NULL) { panicf("Cannot create sim_thread_cond\n"); return false; } #ifndef HAVE_SDL_THREADS wfi_cond = SDL_CreateCond(); if (wfi_cond == NULL) { panicf("Cannot create wfi\n"); return false; } wfi_mutex = SDL_CreateMutex(); if (wfi_mutex == NULL) { panicf("Cannot create wfi mutex\n"); return false; } #endif return true; }
void uart1_puts(const char *str, int size) { if(size>SEND_RING_SIZE) panicf("Too much data passed to uart1_puts"); /* Wait for the previous transfer to finish */ while(uart1_send_count>0); memcpy(uart1_send_buffer_ring, str, size); /* Disable interrupt while modifying the pointers */ bitclr16(&IO_INTC_EINT0, INTR_EINT0_UART1); uart1_send_count=size; uart1_send_read=0; /* prime the hardware buffer */ while(((IO_UART1_TFCR & 0x3f) < 0x20) && (uart1_send_count > 0)) { IO_UART1_DTRR=uart1_send_buffer_ring[uart1_send_read++]; uart1_send_count--; } /* Enable interrupt */ bitset16(&IO_INTC_EINT0, INTR_EINT0_UART1); }
void tick_start(unsigned int interval_in_ms) { unsigned long count; int prescale; count = CPU_FREQ/2 * interval_in_ms / 1000 / 16; if(count > 0x10000) { panicf("Error! The tick interval is too long (%d ms)\n", interval_in_ms); return; } prescale = cpu_frequency / CPU_FREQ; /* Note: The prescaler is later adjusted on-the-fly on CPU frequency changes within timer.c */ /* We are using timer 0 */ TRR0 = (unsigned short)(count - 1); /* The reference count */ TCN0 = 0; /* reset the timer */ TMR0 = 0x001d | ((unsigned short)(prescale - 1) << 8); /* restart, CLK/16, enabled, prescaler */ TER0 = 0xff; /* Clear all events */ ICR1 = 0x8c; /* Interrupt on level 3.0 */ IMR &= ~0x200; }
static bool do_add_event(unsigned short id, bool oneshot, bool user_data_valid, void *handler, void *user_data) { int i; /* Check if the event already exists. */ for (i = 0; i < MAX_SYS_EVENTS; i++) { if (events[i].handler.callback == handler && events[i].id == id && (!user_data_valid || (user_data == events[i].handler.user_data))) return false; } /* Try to find a free slot. */ for (i = 0; i < MAX_SYS_EVENTS; i++) { if (events[i].handler.callback == NULL) { events[i].id = id; events[i].oneshot = oneshot; if ((events[i].has_user_data = user_data_valid)) events[i].handler.user_data = user_data; events[i].handler.callback = handler; return true; } } panicf("event line full"); return false; }
/* * setup a hrtimer to send a signal to our process every tick */ void tick_start(unsigned int interval_in_ms) { int ret = 0; timer_t timerid; struct itimerspec ts; sigevent_t sigev; /* initializing in the declaration causes some weird warnings */ memset(&sigev, 0, sizeof(sigevent_t)); sigev.sigev_notify = SIGEV_THREAD, sigev.sigev_notify_function = timer_signal, ts.it_value.tv_sec = ts.it_interval.tv_sec = 0; ts.it_value.tv_nsec = ts.it_interval.tv_nsec = interval_in_ms*1000*1000; /* add the timer */ ret |= timer_create(CLOCK_REALTIME, &sigev, &timerid); ret |= timer_settime(timerid, 0, &ts, NULL); /* Grab the mutex already now and leave it to this thread. We don't * care about race conditions when signaling the condition (because * they are not critical), but a mutex is necessary due to the API */ pthread_mutex_lock(&wfi_mtx); if (ret != 0) panicf("%s(): %s\n", __func__, strerror(errno)); }
int dbg_append(char* name) { int x=0; int size, fd, rc; char tmp[CHUNKSIZE+1]; fd = open(name,O_RDONLY); if (fd<0) { DEBUGF("Failed opening file\n"); return -1; } size = lseek(fd, 0, SEEK_END); DEBUGF("File is %d bytes\n", size); x = size / CHUNKSIZE; LDEBUGF("Check base is %x (%d)\n",x,size); if (close(fd) < 0) return -1; fd = open(name,O_RDWR|O_APPEND); if (fd<0) { DEBUGF("Failed opening file\n"); return -1; } sprintf(tmp,"%c%06x,",name[1],x++); rc = write(fd, tmp, 8); if ( rc < 0 ) panicf("Failed writing data\n"); return close(fd); }
int target_set_medium(target_context_t *tc, const char *medium) { char cmdbuf [CMDBUF_LENGTH]; msgf("setting medium to %s\n", medium); #ifndef WIN32 /* if medium is Ethernet, negotiate remote MAC before switching */ if (!strcmp(medium, TM_ETHERNET)) if(target_negotiate_mac(tc) == -1){ return -1; } #endif snprintf(cmdbuf, sizeof cmdbuf, "medium %s", medium); target_write_command(tc, cmdbuf); if(target_confirm_response(tc) == -1){ return -1; } if (!strcmp(medium, TM_ETHERNET)) { tc->mtu = ETHERNET_MTU; #ifndef WIN32 tc->write = target_write_ethernet; #endif } else if (!strcmp(medium, TM_SERIAL)) { tc->mtu = SERIAL_MTU; tc->write = target_write_serial; } else{ panicf("unknown medium: %s", medium); return -1; } tc->medium = medium; return 0; }
static int target_get_mac(target_context_t *tc) { char buf [LINE_LENGTH]; /* read MAC address from target */ msg("getting target MAC\n"); target_write_command(tc, "mac"); target_gets(tc, buf, sizeof buf); if(target_confirm_response(tc) == -1){ return -1; } /* and parse it */ msgf("target-provided remote MAC: %s", buf); if (parsemac(tc->remote_mac, buf)){ panicf("can't parse target-provided remote MAC: %s", buf); return -1; } if (opt_verbose) { msg("target-provided remote MAC (parsed): "); printmac(stdout, tc->remote_mac); msg("\n"); } return 0; }
void system_init(void) { SDL_sem *s; /* fake stack, OS manages size (and growth) */ stackbegin = stackend = (uintptr_t*)&s; #if (CONFIG_PLATFORM & PLATFORM_MAEMO) /* Make glib thread safe */ g_thread_init(NULL); g_type_init(); #endif if (SDL_Init(SDL_INIT_TIMER)) panicf("%s", SDL_GetError()); s = SDL_CreateSemaphore(0); /* 0-count so it blocks */ evt_thread = SDL_CreateThread(sdl_event_thread, s); /* wait for sdl_event_thread to run so that it can initialize the surfaces * and video subsystem needed for SDL events */ SDL_SemWait(s); /* cleanup */ SDL_DestroySemaphore(s); }
void imx233_touchscreen_init(void) { touch_chan = imx233_lradc_acquire_channel(TIMEOUT_NOBLOCK); touch_delay = imx233_lradc_acquire_delay(TIMEOUT_NOBLOCK); if(touch_chan < 0 || touch_delay < 0) panicf("Cannot acquire channel and delays for touchscreen measurement"); imx233_touchscreen_enable(false); }
// doesn't check in use ! void arbiter_reserve(struct channel_arbiter_t *a, unsigned channel) { // assume semaphore has a free slot immediately if(semaphore_wait(&a->sema, TIMEOUT_NOBLOCK) != OBJ_WAIT_SUCCEEDED) panicf("arbiter_reserve failed on semaphore_wait !"); mutex_lock(&a->mutex); a->free_bm &= ~(1 << channel); mutex_unlock(&a->mutex); }
void dma_release(void) { if(--dma_used == 0) { DMAC_CONFIGURATION &= ~(1<<0); CGU_PERI &= ~CGU_DMA_CLOCK_ENABLE; } if (dma_used < 0) panicf("dma_used < 0!"); }
void dma_release(void) { if(--dma_used == 0) { bitclr32(&DMAC_CONFIGURATION, 1<<0); bitclr32(&CGU_PERI, CGU_DMA_CLOCK_ENABLE); } if (dma_used < 0) panicf("dma_used < 0!"); }
static void read_random_writes_cache(int bank, int phys_segment) { int page = 0; short log_segment; unsigned char spare_buf[16]; nand_read_raw(bank, phys_segment_to_page_addr(phys_segment, page), SECTOR_SIZE, /* offset to first sector's spare */ 16, spare_buf); log_segment = get_log_segment_id(phys_segment, spare_buf); if (log_segment == -1) return; /* Find which cache this is related to */ int cache_no = find_write_cache(log_segment); if (cache_no == -1) { if (write_caches_in_use < MAX_WRITE_CACHES) { cache_no = write_caches_in_use; write_caches_in_use++; } else { panicf("Max NAND write caches reached"); } } write_caches[cache_no].log_segment = log_segment; write_caches[cache_no].random_bank = bank; write_caches[cache_no].random_phys_segment = phys_segment; #ifndef FTL_V1 /* Loop over each page in the phys segment (from page 1 onwards). Read spare for 1st sector, store location of page in array. */ for (page = 1; page < (nand_data->pages_per_block * nand_data->planes); page++) { unsigned short cached_page; nand_read_raw(bank, phys_segment_to_page_addr(phys_segment, page), SECTOR_SIZE, /* offset to first sector's spare */ 16, spare_buf); cached_page = get_cached_page_id(spare_buf); if (cached_page != 0xFFFF) write_caches[cache_no].page_map[cached_page] = page; } #endif /* !FTL_V1 */ }
int usb_drv_recv(int ep, void *ptr, int len) { struct usb_dev_dma_desc *uc_desc = endpoints[ep][1].uc_desc; ep &= 0x7f; logf("usb_drv_recv(%d,%x,%d)\n", ep, (int)ptr, len); if (len > USB_DMA_DESC_RXTX_BYTES) panicf("usb_recv: len=%d > %d", len, USB_DMA_DESC_RXTX_BYTES); if ((int)ptr & 31) { logf("addr %08x not aligned!\n", (int)ptr); } endpoints[ep][1].state |= EP_STATE_BUSY; endpoints[ep][1].len = len; endpoints[ep][1].rc = -1; /* remove data buffer from cache */ discard_dcache_range(ptr, len); /* DMA setup */ uc_desc->status = USB_DMA_DESC_BS_HST_RDY | USB_DMA_DESC_LAST | len; if (len == 0) { uc_desc->status |= USB_DMA_DESC_ZERO_LEN; uc_desc->data_ptr = 0; } else { uc_desc->data_ptr = AS3525_PHYSICAL_ADDR(ptr); } USB_OEP_DESC_PTR(ep) = AS3525_PHYSICAL_ADDR((int)&dmadescs[ep][1]); USB_OEP_STS(ep) = USB_EP_STAT_OUT_RCVD; /* clear status */ /* Make sure receive DMA is on */ if (!(USB_DEV_CTRL & USB_DEV_CTRL_RDE)){ USB_DEV_CTRL |= USB_DEV_CTRL_RDE; if (!(USB_DEV_CTRL & USB_DEV_CTRL_RDE)) logf("failed to enable RDE!\n"); } USB_OEP_CTRL(ep) |= USB_EP_CTRL_CNAK; /* Go! */ if (USB_OEP_CTRL(ep) & USB_EP_CTRL_NAK) { int i = 0; while (USB_OEP_CTRL(ep) & USB_EP_CTRL_NAK) { USB_OEP_CTRL(ep) |= USB_EP_CTRL_CNAK; /* Go! */ i++; } logf("ep%d CNAK needed %d retries CTRL=%x\n", ep, i, (int)USB_OEP_CTRL(ep)); } return 0; }
int dbg_chkfile(char* name, int size) { char text[81920]; int i; int x=0; int pos = 0; int block=0; int fd = open(name,O_RDONLY); if (fd<0) { DEBUGF("Failed opening file\n"); return -1; } size = lseek(fd, 0, SEEK_END); DEBUGF("File is %d bytes\n", size); /* random start position */ if ( size ) pos = ((int)rand() % size) & ~7; lseek(fd, pos, SEEK_SET); x = pos / CHUNKSIZE; LDEBUGF("Check base is %x (%d)\n",x,pos); while (1) { int rc = read(fd, text, sizeof text); DEBUGF("read %d bytes\n",rc); if (rc < 0) { panicf("Failed reading data\n"); } else { char tmp[CHUNKSIZE+1]; if (!rc) break; for (i=0; i<rc/CHUNKSIZE; i++ ) { sprintf(tmp,"%c%06x,",name[1],x++); if (strncmp(text+i*CHUNKSIZE,tmp,CHUNKSIZE)) { int idx = pos + block*sizeof(text) + i*CHUNKSIZE; DEBUGF("Mismatch in byte 0x%x (byte 0x%x of sector 0x%x)." "\nExpected %.8s found %.8s\n", idx, idx % SECTOR_SIZE, idx / SECTOR_SIZE, tmp, text+i*CHUNKSIZE); DEBUGF("i=%x, idx=%x\n",i,idx); dbg_dump_buffer(text+i*CHUNKSIZE - 0x20, 0x40, idx - 0x20); return -1; } } } block++; } return close(fd); }
int ata_bbt_read_sectors(uint32_t sector, uint32_t count, void* buffer) { if (ata_last_phys != sector - 1 && ata_last_phys > sector - 64) ata_soft_reset(); int rc = ata_rw_sectors_internal(sector, count, buffer, false); if (rc) rc = ata_rw_sectors_internal(sector, count, buffer, false); ata_last_phys = sector + count - 1; ata_last_offset = 0; if (IS_ERR(rc)) panicf("ATA: Error %08X while reading BBT (sector %d, count %d)\n", (unsigned int)rc, (unsigned int)sector, (unsigned int)count); return rc; }
/* * Return a pointer to an incore font structure. * If the requested font isn't loaded/compiled-in, * decrement the font number and try again. */ struct font* font_get(int font) { struct font* pf; while (1) { pf = sysfonts[font]; if (pf && pf->height) return pf; if (--font < 0) panicf("No font!"); } }
static void flush_tx_fifos(int nums) { unsigned int i = 0; GRSTCTL = (nums << GRSTCTL_txfnum_bitp) | GRSTCTL_txfflsh_flush; while(GRSTCTL & GRSTCTL_txfflsh_flush && i < 0x300) i++; if(GRSTCTL & GRSTCTL_txfflsh_flush) panicf("usb-drv: hang of flush tx fifos (%x)", nums); /* wait 3 phy clocks */ udelay(1); }
int imx233_lradc_acquire_channel(int timeout) { int w = semaphore_wait(&free_bm_sema, timeout); if(w == OBJ_WAIT_TIMEDOUT) return w; mutex_lock(&free_bm_mutex); int chan = find_first_set_bit(free_bm); if(chan >= HW_LRADC_NUM_CHANNELS) panicf("imx233_lradc_acquire_channel cannot find a free channel !"); free_bm &= ~(1 << chan); mutex_unlock(&free_bm_mutex); return chan; }
int arbiter_acquire(struct channel_arbiter_t *a, int timeout) { int w = semaphore_wait(&a->sema, timeout); if(w == OBJ_WAIT_TIMEDOUT) return w; mutex_lock(&a->mutex); int chan = find_first_set_bit(a->free_bm); if(chan >= a->count) panicf("arbiter_acquire cannot find a free channel !"); a->free_bm &= ~(1 << chan); mutex_unlock(&a->mutex); return chan; }
void imx233_lcdif_set_lcd_databus_width(unsigned width) { switch(width) { case 8: BF_WR_V(LCDIF_CTRL, LCD_DATABUS_WIDTH, 8_BIT); break; case 16: BF_WR_V(LCDIF_CTRL, LCD_DATABUS_WIDTH, 16_BIT); break; case 18: BF_WR_V(LCDIF_CTRL, LCD_DATABUS_WIDTH, 18_BIT); break; case 24: BF_WR_V(LCDIF_CTRL, LCD_DATABUS_WIDTH, 24_BIT); break; default: panicf("this chip cannot handle a lcd bus width of %d", width); break; } }
int storage_write_sectors(unsigned long start, int count, void* buf) { if ( count > 1 ) DEBUGF("[Writing %d blocks: 0x%lx to 0x%lx]\n", count, start, start+count-1); else DEBUGF("[Writing block 0x%lx]\n", start); if (start == 0) panicf("Writing on sector 0!\n"); if(fseek(file,start*BLOCK_SIZE,SEEK_SET)) { perror("fseek"); return -1; } if(!fwrite(buf,BLOCK_SIZE,count,file)) { DEBUGF("ata_write_sectors(0x%lx, 0x%x, %p)\n", start, count, buf ); perror("fwrite"); panicf("Disk error\n"); } return 0; }
static void read_inplace_writes_cache(int bank, int phys_segment) { int page = 0; short log_segment; unsigned char spare_buf[16]; nand_read_raw(bank, phys_segment_to_page_addr(phys_segment, page), SECTOR_SIZE, /* offset to first sector's spare */ 16, spare_buf); log_segment = get_log_segment_id(phys_segment, spare_buf); if (log_segment == -1) return; /* Find which cache this is related to */ int cache_no = find_write_cache(log_segment); if (cache_no == -1) { if (write_caches_in_use < MAX_WRITE_CACHES) { cache_no = write_caches_in_use; write_caches_in_use++; } else { panicf("Max NAND write caches reached"); } } write_caches[cache_no].log_segment = log_segment; /* Find how many pages have been written to the new segment */ while (log_segment != -1 && page < (nand_data->pages_per_block * nand_data->planes) - 1) { page++; nand_read_raw(bank, phys_segment_to_page_addr(phys_segment, page), SECTOR_SIZE, 16, spare_buf); log_segment = get_log_segment_id(phys_segment, spare_buf); } if (page != 0) { write_caches[cache_no].inplace_bank = bank; write_caches[cache_no].inplace_phys_segment = phys_segment; write_caches[cache_no].inplace_pages_used = page; } }
void unpackDataValue(DataValue *value, QVariant_ *var) { QVariant *qvar = reinterpret_cast<QVariant *>(var); switch (value->dataType) { case DTString: *qvar = QString::fromUtf8(*(char **)value->data, value->len); break; case DTBool: *qvar = bool(*(char *)(value->data) != 0); break; case DTInt64: *qvar = *(qint64*)(value->data); break; case DTInt32: *qvar = *(qint32*)(value->data); break; case DTUint64: *qvar = *(quint64*)(value->data); break; case DTUint32: *qvar = *(quint32*)(value->data); break; case DTFloat64: *qvar = *(double*)(value->data); break; case DTFloat32: *qvar = *(float*)(value->data); break; case DTColor: *qvar = QColor::fromRgba(*(QRgb*)(value->data)); break; case DTVariantList: *qvar = **(QVariantList**)(value->data); delete *(QVariantList**)(value->data); break; case DTObject: qvar->setValue(*(QObject**)(value->data)); break; case DTInvalid: // null would be more natural, but an invalid variant means // it has proper semantics when dealing with non-qml qt code. //qvar->setValue(QJSValue(QJSValue::NullValue)); qvar->clear(); break; default: panicf("unknown data type: %d", value->dataType); break; } }
static void handle_in_ep(int ep) { int ep_sts = USB_IEP_STS(ep) & ~USB_IEP_STS_MASK(ep); if (ep > 3) panicf("in_ep > 3?!"); USB_IEP_STS(ep) = ep_sts; /* ack */ if (ep_sts & USB_EP_STAT_BNA) { /* Buffer was not set up */ int ctrl = USB_IEP_CTRL(ep); logf("ep%d IN, status %x ctrl %x (BNA)\n", ep, ep_sts, ctrl); panicf("ep%d IN 0x%x 0x%x (BNA)", ep, ep_sts, ctrl); } if (ep_sts & USB_EP_STAT_TDC) { endpoints[ep][0].state &= ~EP_STATE_BUSY; endpoints[ep][0].rc = 0; logf("EP%d %x %stx done len %x stat %08x\n", ep, ep_sts, endpoints[ep][0].state & EP_STATE_ASYNC ? "async " :"", endpoints[ep][0].len, endpoints[ep][0].uc_desc->status); if (endpoints[ep][0].state & EP_STATE_ASYNC) { endpoints[ep][0].state &= ~EP_STATE_ASYNC; usb_core_transfer_complete(ep, USB_DIR_IN, 0, endpoints[ep][0].len); } else { semaphore_release(&endpoints[ep][0].complete); } ep_sts &= ~USB_EP_STAT_TDC; } if (ep_sts) { logf("ep%d IN, hwstat %lx, epstat %x\n", ep, USB_IEP_STS(ep), endpoints[ep][0].state); panicf("ep%d IN 0x%x", ep, ep_sts); } }