void start(const char* str) { cache_clear(); compare_counter = 0; the_str = str; the_start = nano(); }
static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { struct Scsi_Host *instance = cmd->device->host; struct WD33C93_hostdata *hdata = shost_priv(instance); unsigned char flags = 0x01; unsigned long addr = virt_to_bus(cmd->SCp.ptr); /* setup dma direction */ if (!dir_in) flags |= 0x04; /* remember direction */ hdata->dma_dir = dir_in; if (dir_in) { /* invalidate any cache */ cache_clear(addr, cmd->SCp.this_residual); } else { /* push any dirty cache */ cache_push(addr, cmd->SCp.this_residual); } /* start DMA */ m147_pcc->dma_bcr = cmd->SCp.this_residual | (1 << 24); m147_pcc->dma_dadr = addr; m147_pcc->dma_cntrl = flags; /* return success */ return 0; }
static int dma_setup (Scsi_Cmnd *cmd, int dir_in) { unsigned char flags = 0x01; unsigned long addr = virt_to_bus(cmd->SCp.ptr); /* setup dma direction */ if (!dir_in) flags |= 0x04; /* remember direction */ HDATA(mvme147_host)->dma_dir = dir_in; if (dir_in) /* invalidate any cache */ cache_clear (addr, cmd->SCp.this_residual); else /* push any dirty cache */ cache_push (addr, cmd->SCp.this_residual); /* start DMA */ m147_pcc->dma_bcr = cmd->SCp.this_residual | (1<<24); m147_pcc->dma_dadr = addr; m147_pcc->dma_cntrl = flags; /* return success */ return 0; }
static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { unsigned short cntr = CNTR_PDMD | CNTR_INTEN; unsigned long addr = virt_to_bus(cmd->SCp.ptr); /* * if the physical address has the wrong alignment, or if * physical address is bad, or if it is a write and at the * end of a physical memory chunk, then allocate a bounce * buffer */ if (addr & A3000_XFER_MASK) { HDATA(a3000_host)->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; HDATA(a3000_host)->dma_bounce_buffer = kmalloc (HDATA(a3000_host)->dma_bounce_len, GFP_KERNEL); /* can't allocate memory; use PIO */ if (!HDATA(a3000_host)->dma_bounce_buffer) { HDATA(a3000_host)->dma_bounce_len = 0; return 1; } if (!dir_in) { /* copy to bounce buffer for a write */ memcpy (HDATA(a3000_host)->dma_bounce_buffer, cmd->SCp.ptr, cmd->SCp.this_residual); } addr = virt_to_bus(HDATA(a3000_host)->dma_bounce_buffer); } /* setup dma direction */ if (!dir_in) cntr |= CNTR_DDIR; /* remember direction */ HDATA(a3000_host)->dma_dir = dir_in; DMA(a3000_host)->CNTR = cntr; /* setup DMA *physical* address */ DMA(a3000_host)->ACR = addr; if (dir_in) /* invalidate any cache */ cache_clear (addr, cmd->SCp.this_residual); else /* push any dirty cache */ cache_push (addr, cmd->SCp.this_residual); /* start DMA */ mb(); /* make sure setup is completed */ DMA(a3000_host)->ST_DMA = 1; mb(); /* make sure DMA has started before next IO */ /* return success */ return 0; }
int cache_routine(void) { if (ccacher.on) { if (!ccacher.selidx_moved && ccacher.on) { // cache next image start_cache_next_image(); return 0; } if (!ccacher.on) { return 0; } start_cache(*cache_selidx); ccacher.selidx_moved = false; } else { // clean up all remain cache now cache_clear(); } xrKernelDelayThread(100000); return 0; }
static int pbap_setpath(struct obex_session *os, obex_object_t *obj, void *user_data) { struct pbap_session *pbap = user_data; const char *name; uint8_t *nonhdr; char *fullname; int err; if (OBEX_ObjectGetNonHdrData(obj, &nonhdr) != 2) { error("Set path failed: flag and constants not found!"); return -EBADMSG; } name = obex_get_name(os); DBG("name %s folder %s nonhdr 0x%x%x", name, pbap->folder, nonhdr[0], nonhdr[1]); fullname = phonebook_set_folder(pbap->folder, name, nonhdr[0], &err); if (err < 0) return err; g_free(pbap->folder); pbap->folder = fullname; /* * FIXME: Define a criteria to mark the cache as invalid */ pbap->cache.valid = FALSE; pbap->cache.index = 0; cache_clear(&pbap->cache); return 0; }
void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length) { #if MKIV struct blz1230_dma_registers *dregs = (struct blz1230_dma_registers *) (esp->dregs); #else struct blz1230II_dma_registers *dregs = (struct blz1230II_dma_registers *) (esp->dregs); #endif cache_clear(addr, length); addr >>= 1; addr &= ~(BLZ1230_DMA_WRITE); /* First set latch */ dregs->dma_latch = (addr >> 24) & 0xff; /* Then pump the address to the DMA address register */ #if MKIV dregs->dma_addr = (addr >> 24) & 0xff; #endif dregs->dma_addr = (addr >> 16) & 0xff; dregs->dma_addr = (addr >> 8) & 0xff; dregs->dma_addr = (addr ) & 0xff; }
void free_block_list() { #ifdef __linux__ reset_firewall(); #endif cache_clear(block_list, 0); // Remove all items }
bool http::clear_cache( sim_t* sim, const std::string& name, const std::string& value ) { assert( name == "http_clear_cache" ); ( void )name; if ( value != "0" && ! sim -> parent ) cache_clear(); return true; }
void free_block_list() { #ifdef __linux__ if (mode != NO_FIREWALL_MODE) reset_firewall(); #endif cache_clear(block_list, 0); // Remove all items }
void Java_eu_jm0_wiringX_wiringX_GC(JNIEnv *env, jclass c) { // call original function wiringXGC(); // free handle on logger deregisterLogConsumer(); // clear object cache cache_clear(env); }
void cache_set_forward(bool forward) { cache_lock(); if (ccacher.isforward != forward) { cache_clear(); ccacher.first_run = true; } ccacher.isforward = forward; cache_unlock(); }
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length) { struct cyberII_dma_registers *dregs = (struct cyberII_dma_registers *) esp->dregs; cache_clear(addr, length); addr &= ~(1); dregs->dma_addr0 = (addr >> 24) & 0xff; dregs->dma_addr1 = (addr >> 16) & 0xff; dregs->dma_addr2 = (addr >> 8) & 0xff; dregs->dma_addr3 = (addr ) & 0xff; }
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length) { struct blz2060_dma_registers *dregs = (struct blz2060_dma_registers *) (esp->dregs); cache_clear(addr, length); addr >>= 1; addr &= ~(BLZ2060_DMA_WRITE); dregs->dma_addr3 = (addr ) & 0xff; dregs->dma_addr2 = (addr >> 8) & 0xff; dregs->dma_addr1 = (addr >> 16) & 0xff; dregs->dma_addr0 = (addr >> 24) & 0xff; }
void dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { switch (dir) { case DMA_TO_DEVICE: cache_push(handle, size); break; case DMA_FROM_DEVICE: cache_clear(handle, size); break; default: if (printk_ratelimit()) printk("dma_sync_single_for_device: unsupported dir %u\n", dir); break; } }
int main (int argc, char *argv[]) { GV.datapath=coi_getdatapath(argv[0]); gboolean f=coip(argc,argv); if(!f){return 0;} srand(time(NULL)); cache_army(); cache_images(GV.IMAGES); cache_langfile(GV.langfile); init_map(); gui_init(argc,argv); cache_clear(); return 0; }
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length) { struct fastlane_dma_registers *dregs = (struct fastlane_dma_registers *) (esp->dregs); unsigned long *t; cache_clear(addr, length); dma_clear(esp); t = (unsigned long *)((addr & 0x00ffffff) + esp->edev); dregs->clear_strobe = 0; *t = addr; ctrl_data = (ctrl_data & FASTLANE_DMA_MASK) | FASTLANE_DMA_ENABLE; dregs->ctrl_reg = ctrl_data; }
static void pbap_disconnect(struct obex_session *os, void *user_data) { struct pbap_session *pbap = user_data; manager_unregister_session(os); if (pbap->obj) pbap->obj->session = NULL; if (pbap->params) { g_free(pbap->params->searchval); g_free(pbap->params); } cache_clear(&pbap->cache); g_free(pbap->folder); g_free(pbap); }
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length) { struct cyber_dma_registers *dregs = (struct cyber_dma_registers *) esp->dregs; cache_clear(addr, length); addr &= ~(1); dregs->dma_addr0 = (addr >> 24) & 0xff; dregs->dma_addr1 = (addr >> 16) & 0xff; dregs->dma_addr2 = (addr >> 8) & 0xff; dregs->dma_addr3 = (addr ) & 0xff; ctrl_data &= ~(CYBER_DMA_WRITE); /* Check if physical address is outside Z2 space and of * block length/block aligned in memory. If this is the * case, enable 32 bit transfer. In all other cases, fall back * to 16 bit transfer. * Obviously 32 bit transfer should be enabled if the DMA address * and length are 32 bit aligned. However, this leads to some * strange behavior. Even 64 bit aligned addr/length fails. * Until I've found a reason for this, 32 bit transfer is only * used for full-block transfers (1kB). * -jskov */ #if 0 if((addr & 0x3fc) || length & 0x3ff || ((addr > 0x200000) && (addr < 0xff0000))) ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */ else ctrl_data |= CYBER_DMA_Z3; /* CHIP/Z3, do 32 bit DMA */ #else ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */ #endif dregs->ctrl_reg = ctrl_data; }
static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { struct Scsi_Host *instance = cmd->device->host; struct a3000_hostdata *hdata = shost_priv(instance); struct WD33C93_hostdata *wh = &hdata->wh; struct a3000_scsiregs *regs = hdata->regs; unsigned short cntr = CNTR_PDMD | CNTR_INTEN; unsigned long addr = virt_to_bus(cmd->SCp.ptr); /* * if the physical address has the wrong alignment, or if * physical address is bad, or if it is a write and at the * end of a physical memory chunk, then allocate a bounce * buffer */ if (addr & A3000_XFER_MASK) { wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len, GFP_KERNEL); /* can't allocate memory; use PIO */ if (!wh->dma_bounce_buffer) { wh->dma_bounce_len = 0; return 1; } if (!dir_in) { /* copy to bounce buffer for a write */ memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr, cmd->SCp.this_residual); } addr = virt_to_bus(wh->dma_bounce_buffer); } /* setup dma direction */ if (!dir_in) cntr |= CNTR_DDIR; /* remember direction */ wh->dma_dir = dir_in; regs->CNTR = cntr; /* setup DMA *physical* address */ regs->ACR = addr; if (dir_in) { /* invalidate any cache */ cache_clear(addr, cmd->SCp.this_residual); } else { /* push any dirty cache */ cache_push(addr, cmd->SCp.this_residual); } /* start DMA */ mb(); /* make sure setup is completed */ regs->ST_DMA = 1; mb(); /* make sure DMA has started before next IO */ /* return success */ return 0; }
static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { struct Scsi_Host *instance = cmd->device->host; struct a2091_hostdata *hdata = shost_priv(instance); struct WD33C93_hostdata *wh = &hdata->wh; struct a2091_scsiregs *regs = hdata->regs; unsigned short cntr = CNTR_PDMD | CNTR_INTEN; unsigned long addr = virt_to_bus(cmd->SCp.ptr); if (addr & A2091_XFER_MASK) { wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len, GFP_KERNEL); if (!wh->dma_bounce_buffer) { wh->dma_bounce_len = 0; return 1; } addr = virt_to_bus(wh->dma_bounce_buffer); if (addr & A2091_XFER_MASK) { kfree(wh->dma_bounce_buffer); wh->dma_bounce_buffer = NULL; wh->dma_bounce_len = 0; return 1; } if (!dir_in) { memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr, cmd->SCp.this_residual); } } if (!dir_in) cntr |= CNTR_DDIR; wh->dma_dir = dir_in; regs->CNTR = cntr; regs->ACR = addr; if (dir_in) { cache_clear(addr, cmd->SCp.this_residual); } else { cache_push(addr, cmd->SCp.this_residual); } regs->ST_DMA = 1; return 0; }
static int dma_setup (Scsi_Cmnd *cmd, int dir_in) { unsigned short cntr = CNTR_PDMD | CNTR_INTEN; unsigned long addr = VTOP(cmd->SCp.ptr); /* * if the physical address has the wrong alignment, or if * physical address is bad, or if it is a write and at the * end of a physical memory chunk, then allocate a bounce * buffer */ if (addr & A3000_XFER_MASK || (!dir_in && mm_end_of_chunk (addr, cmd->SCp.this_residual))) { HDATA(a3000_host)->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; HDATA(a3000_host)->dma_bounce_buffer = scsi_malloc (HDATA(a3000_host)->dma_bounce_len); /* can't allocate memory; use PIO */ if (!HDATA(a3000_host)->dma_bounce_buffer) { HDATA(a3000_host)->dma_bounce_len = 0; return 1; } if (!dir_in) { /* copy to bounce buffer for a write */ if (cmd->use_sg) { memcpy (HDATA(a3000_host)->dma_bounce_buffer, cmd->SCp.ptr, cmd->SCp.this_residual); } else memcpy (HDATA(a3000_host)->dma_bounce_buffer, cmd->request_buffer, cmd->request_bufflen); } addr = VTOP(HDATA(a3000_host)->dma_bounce_buffer); } /* setup dma direction */ if (!dir_in) cntr |= CNTR_DDIR; /* remember direction */ HDATA(a3000_host)->dma_dir = dir_in; DMA(a3000_host)->CNTR = cntr; /* setup DMA *physical* address */ DMA(a3000_host)->ACR = addr; if (dir_in) /* invalidate any cache */ cache_clear (addr, cmd->SCp.this_residual); else /* push any dirty cache */ cache_push (addr, cmd->SCp.this_residual); /* start DMA */ DMA(a3000_host)->ST_DMA = 1; /* return success */ return 0; }
void clear_block_list() { cache_clear(block_list, 3600); // Clear items older than 1 hour }
static int pbap_get(struct obex_session *os, void *user_data) { struct pbap_session *pbap = user_data; const char *type = obex_get_type(os); const char *name = obex_get_name(os); struct apparam_field *params; const uint8_t *buffer; char *path; ssize_t rsize; int ret; DBG("name %s type %s pbap %p", name, type, pbap); if (type == NULL) return -EBADR; rsize = obex_get_apparam(os, &buffer); if (rsize < 0) { if (g_ascii_strcasecmp(type, VCARDENTRY_TYPE) != 0) return -EBADR; rsize = 0; } params = parse_aparam(buffer, rsize); if (params == NULL) return -EBADR; if (pbap->params) { g_free(pbap->params->searchval); g_free(pbap->params); } pbap->params = params; if (g_ascii_strcasecmp(type, PHONEBOOK_TYPE) == 0) { /* Always contains the absolute path */ if (g_path_is_absolute(name)) path = g_strdup(name); else path = g_build_filename("/", name, NULL); } else if (g_ascii_strcasecmp(type, VCARDLISTING_TYPE) == 0) { /* Always relative */ if (!name || strlen(name) == 0) { /* Current folder */ path = g_strdup(pbap->folder); } else { /* Current folder + relative path */ path = g_build_filename(pbap->folder, name, NULL); /* clear cache */ pbap->cache.valid = FALSE; pbap->cache.index = 0; cache_clear(&pbap->cache); } } else if (g_ascii_strcasecmp(type, VCARDENTRY_TYPE) == 0) { /* File name only */ path = g_strdup(name); } else return -EBADR; if (path == NULL) return -EBADR; ret = obex_get_stream_start(os, path); g_free(path); return ret; }
int main(int argc, char **argv) { //rcpDebugEnable(); // initialize shared memory rcp_init(RCP_PROC_DNS); RcpPkt *pkt; pkt = malloc(sizeof(RcpPkt) + RCP_PKT_DATA_LEN); if (pkt == NULL) { fprintf(stderr, "Error: process %s, cannot allocate memory, exiting...\n", rcpGetProcName()); exit(1); } struct stat s; if (stat("/opt/rcp/var/log/dnsproxy_at_startup", &s) == 0) proxy_disabled = 1; // open sockets if necessary if (shm->config.dns_server) { if (proxy_disabled) { rcpLog(muxsock, RCP_PROC_DNS, RLOG_WARNING, RLOG_FC_DNS, "an external DNS proxy is already running on the system, RCP DNS proxy will be disabled"); } else { client_sock = rx_open(DNS_SERVER_PORT); // the socket open to clients listens on the server socket if (client_sock == 0) { fprintf(stderr, "Error: process %s, cannot open sockets, exiting...\n", rcpGetProcName()); exit(1); } } } // set the static cache entries cache_update_static(); // drop privileges rcpDropPriv(); // initialize request list rq_list_init(); // receive loop int reconnect_timer = 0; struct timeval ts; ts.tv_sec = 1; // 1 second ts.tv_usec = 0; // use this timer to speed up rx loop when the system is busy uint32_t rcptic = rcpTic(); while (1) { // reconnect mux socket if connection failed if (reconnect_timer >= 10) { // a regular reconnect will fail, this process runs with dropped privileges // end the process and restart it again break; } // set descriptors fd_set fds; FD_ZERO(&fds); int maxfd = 0; if (muxsock != 0) { FD_SET(muxsock, &fds); maxfd = (muxsock > maxfd)? muxsock: maxfd; } if (client_sock != 0) { FD_SET(client_sock, &fds); maxfd = (client_sock > maxfd)? client_sock: maxfd; } // set all server sockets DnsReq *rq = rq_active(); while (rq) { if (rq->sock != 0) { FD_SET(rq->sock, &fds); maxfd = (rq->sock > maxfd)? rq->sock: maxfd; } rq = rq->next; } // wait for data errno = 0; int nready = select(maxfd + 1, &fds, (fd_set *) 0, (fd_set *) 0, &ts); if (nready < 0) { fprintf(stderr, "Error: process %s, select nready %d, errno %d\n", rcpGetProcName(), nready, errno); } else if (nready == 0) { // watchdog pstats->wproc++; // muxsocket reconnect timeout if (reconnect_timer > 0) reconnect_timer++; // age cache entries cache_timer(); // age request queue entries rq_timer(); // reset rate-limit counter rate_limit = 0; // reload ts rcptic++; if (rcpTic() > rcptic) { // speed up the clock ts.tv_sec = 0; ts.tv_usec = 800000; // 0.8 seconds pstats->select_speedup++; } else { ts.tv_sec = 1; // 1 second ts.tv_usec = 0; } } // cli data else if (muxsock != 0 && FD_ISSET(muxsock, &fds)) { errno = 0; int nread = recv(muxsock, pkt, sizeof(RcpPkt), 0); if(nread < sizeof(RcpPkt) || errno != 0) { // fprintf(stderr, "Error: process %s, muxsocket nread %d, errno %d, disconnecting...\n", // rcpGetProcName(), nread, errno); close(muxsock); reconnect_timer = 1; muxsock = 0; continue; } // read the packet data if (pkt->data_len != 0) { nread += recv(muxsock, (unsigned char *) pkt + sizeof(RcpPkt), pkt->data_len, 0); } ASSERT(nread == sizeof(RcpPkt) + pkt->data_len); // process the cli packet if (pkt->type == RCP_PKT_TYPE_CLI && pkt->destination == RCP_PROC_DNS) { processCli(pkt); // forward the packet back to rcp send(muxsock, pkt, sizeof(RcpPkt) + pkt->data_len, 0); } // dns updates packet else if (pkt->type == RCP_PKT_TYPE_UPDATEDNS) { rcpLog(muxsock, RCP_PROC_DNS, RLOG_DEBUG, RLOG_FC_IPC, "processing DNS updates packet"); cache_update_static(); } else ASSERT(0); } // DNS packets from clients else if (client_sock != 0 && FD_ISSET(client_sock, &fds)) { rx_packet(client_sock); } // DNS packets from servers else { DnsReq *rq = rq_active(); while (rq) { if (rq->sock != 0 && FD_ISSET(rq->sock, &fds)) { rx_packet(rq->sock); break; } rq = rq->next; } } if (force_restart) { rcpLog(muxsock, RCP_PROC_DNS, RLOG_NOTICE, RLOG_FC_IPC, "process %s exiting for configuration purposes", rcpGetProcName()); if (pstats->wmonitor != 0) pstats->wproc = pstats->wmonitor; // tigger a restart in the next monitoring cycle pstats->no_logging = 1; break; // exit from while(1) } if (force_shutdown) break; } fflush(0); sleep(1); // close sockets if (muxsock != 0) { close(muxsock); } if (client_sock != 0) { close(client_sock); } // remove cli memory cliRemoveFunctions(); // clear request memory rq_clear_inactive(); // remove cache memory cache_clear(); // remove packet memory if (pkt) free(pkt); return 0; }
static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { struct Scsi_Host *instance = cmd->device->host; struct a2091_hostdata *hdata = shost_priv(instance); struct WD33C93_hostdata *wh = &hdata->wh; struct a2091_scsiregs *regs = hdata->regs; unsigned short cntr = CNTR_PDMD | CNTR_INTEN; unsigned long addr = virt_to_bus(cmd->SCp.ptr); /* don't allow DMA if the physical address is bad */ if (addr & A2091_XFER_MASK) { wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len, GFP_KERNEL); /* can't allocate memory; use PIO */ if (!wh->dma_bounce_buffer) { wh->dma_bounce_len = 0; return 1; } /* get the physical address of the bounce buffer */ addr = virt_to_bus(wh->dma_bounce_buffer); /* the bounce buffer may not be in the first 16M of physmem */ if (addr & A2091_XFER_MASK) { /* we could use chipmem... maybe later */ kfree(wh->dma_bounce_buffer); wh->dma_bounce_buffer = NULL; wh->dma_bounce_len = 0; return 1; } if (!dir_in) { /* copy to bounce buffer for a write */ memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr, cmd->SCp.this_residual); } } /* setup dma direction */ if (!dir_in) cntr |= CNTR_DDIR; /* remember direction */ wh->dma_dir = dir_in; regs->CNTR = cntr; /* setup DMA *physical* address */ regs->ACR = addr; if (dir_in) { /* invalidate any cache */ cache_clear(addr, cmd->SCp.this_residual); } else { /* push any dirty cache */ cache_push(addr, cmd->SCp.this_residual); } /* start DMA */ regs->ST_DMA = 1; /* return success */ return 0; }
void free_block_list() { cache_clear(block_list, 0); // Remove all items }