// could consider having an API to allow these to dynamically change // MTRRs are for physical, static ranges. PAT are linear, more granular, and // more dynamic void setup_default_mtrrs(barrier_t* smp_barrier) { // disable interrupts int8_t state = 0; disable_irqsave(&state); // barrier - if we're meant to do this for all cores, we'll be // passed a pointer to an initialized barrier if (smp_barrier) waiton_barrier(smp_barrier); // disable caching cr0: set CD and clear NW lcr0((rcr0() | CR0_CD) & ~CR0_NW); // flush caches cache_flush(); // flush tlb tlb_flush_global(); // disable MTRRs, and sets default type to WB (06) #ifndef CONFIG_NOMTRRS write_msr(IA32_MTRR_DEF_TYPE, 0x00000006); // Now we can actually safely adjust the MTRRs // MTRR for IO Holes (note these are 64 bit values we are writing) // 0x000a0000 - 0x000c0000 : VGA - WC 0x01 write_msr(IA32_MTRR_PHYSBASE0, PTE_ADDR(VGAPHYSMEM) | 0x01); // if we need to have a full 64bit val, use the UINT64 macro write_msr(IA32_MTRR_PHYSMASK0, 0x0000000ffffe0800); // 0x000c0000 - 0x00100000 : IO devices (and ROM BIOS) - UC 0x00 write_msr(IA32_MTRR_PHYSBASE1, PTE_ADDR(DEVPHYSMEM) | 0x00); write_msr(IA32_MTRR_PHYSMASK1, 0x0000000ffffc0800); // APIC/IOAPIC holes /* Going to skip them, since we set their mode using PAT when we * map them in */ // make sure all other MTRR ranges are disabled (should be unnecessary) write_msr(IA32_MTRR_PHYSMASK2, 0); write_msr(IA32_MTRR_PHYSMASK3, 0); write_msr(IA32_MTRR_PHYSMASK4, 0); write_msr(IA32_MTRR_PHYSMASK5, 0); write_msr(IA32_MTRR_PHYSMASK6, 0); write_msr(IA32_MTRR_PHYSMASK7, 0); // keeps default type to WB (06), turns MTRRs on, and turns off fixed ranges write_msr(IA32_MTRR_DEF_TYPE, 0x00000806); #endif // reflush caches and TLB cache_flush(); tlb_flush_global(); // turn on caching lcr0(rcr0() & ~(CR0_CD | CR0_NW)); // barrier if (smp_barrier) waiton_barrier(smp_barrier); // enable interrupts enable_irqsave(&state); }
/* * Removes some or all tags from some or all contigs. * If the contig list or tag list is blank it implies all contigs or all tags. * * Returns 0 on success * -1 on failure */ int delete_tags(GapIO *io, int ncontigs, contig_list_t *contigs, char *tag_list, int verbose) { HashTable *h = NULL; int ret = 0; /* Hash tag types */ if (tag_list && *tag_list) { int i; if (SetActiveTags(tag_list) == -1) { return -1; } h = HashTableCreate(32, 0); for (i = 0; i < number_of_active_tags; i++) { HashData hd; hd.i = 0; HashTableAdd(h, active_tag_types[i], 4, hd, NULL); } } /* Iterate over contig list or all contigs */ if (verbose) vfuncheader("Delete Tags"); if (ncontigs) { int i; for (i = 0; i < ncontigs; i++) { contig_t *c = cache_search(io, GT_Contig, contigs[i].contig); vmessage("Scanning contig %d of %d (%s)\n", i+1, ncontigs, c->name); ret |= delete_tag_single_contig(io, contigs[i].contig, h, verbose); UpdateTextOutput(); cache_flush(io); } } else { int i; tg_rec *order = ArrayBase(tg_rec, io->contig_order); for (i = 0; i < NumContigs(io); i++) { contig_t *c = cache_search(io, GT_Contig, order[i]); vmessage("Scanning contig %d of %d (%s)\n", i+1, NumContigs(io), c->name); ret |= delete_tag_single_contig(io, order[i], h, verbose); UpdateTextOutput(); cache_flush(io); } } SetActiveTags(""); if (h) HashTableDestroy(h, 0); return ret; }
struct cached_block* get_free_cache_buff(){ if(INODE_DEBUG) printf("get_free_cache_buff()\n"); int idx = bitmap_scan_and_flip(cache_bitmap, 0, 1, FREE); if(idx != BITMAP_ERROR){ struct cached_block* newblock = buffcache + idx; if(INODE_DEBUG) printf("get_free_cache_buff(): idx %d buffcache %x newblock %x buffcache+BUFF_CACHE_SIZE %x\n", idx, buffcache, newblock, buffcache+N_BUFFERS); memset(newblock, 0, sizeof(struct cached_block)); list_push_front(&buffcachelist, &newblock->elem); return newblock; } else{ struct cached_block* last_block = list_entry(list_back(&buffcache), struct cached_block, elem); cache_flush(NULL, last_block->sector); list_remove(&last_block->elem); traverse_buffcachelist(); list_push_front(&buffcachelist, &last_block->elem); memset(last_block->data, 0, BLOCK_SECTOR_SIZE); last_block->inode=NULL; last_block->sector=0; if(INODE_DEBUG) printf("get_free_cache_buff(): idx %d buffcache %x lastblock %x buffcache+BUFF_CACHE_SIZE %x\n", idx, buffcache, last_block, buffcache+N_BUFFERS); return last_block; } }
//$02 cache auto GSU::op_cache() { if(regs.cbr != (regs.r[15] & 0xfff0)) { regs.cbr = regs.r[15] & 0xfff0; cache_flush(); } regs.reset(); }
int cleanup_before_linux (void) { /* * this function is called just before we call linux * it prepares the processor for linux * * we turn off caches etc ... */ disable_interrupts (); #ifdef CONFIG_LCD { extern void lcd_disable(void); extern void lcd_panel_disable(void); lcd_disable(); /* proper disable of lcd & panel */ lcd_panel_disable(); } #endif /* turn off I/D-cache */ icache_disable(); dcache_disable(); /* flush I/D-cache */ cache_flush(); /*Workaround to enable L2CC during kernel decompressing*/ #ifdef fixup_before_linux fixup_before_linux; #endif return 0; }
int cleanup_before_linux (void) { /* * this function is called just before we call linux * it prepares the processor for linux * * we turn off caches etc ... */ disable_interrupts (); /* * this function is called just before we call linux * it prepares the processor for linux */ #ifdef CONFIG_BOARD_CLEANUP_BEFORE_LINUX board_cleanup_before_linux(); #endif /* turn off I/D-cache */ icache_disable(); dcache_disable(); /* flush I/D-cache */ cache_flush(); return 0; }
//$02 cache void SuperFX::op_cache() { if(regs.cbr != (regs.r[15] & 0xfff0)) { regs.cbr = regs.r[15] & 0xfff0; cache_flush(); } regs.reset(); }
int cleanup_before_linux (void) { /* * this function is called just before we call linux * it prepares the processor for linux * * we turn off caches etc ... * and we set the CPU-speed to 73 MHz - see start.S for details */ #if defined(CONFIG_IMPA7) || defined(CONFIG_EP7312) || defined(CONFIG_ARMADILLO) disable_interrupts (); /* turn off I-cache */ icache_disable(); dcache_disable(); /* flush I-cache */ cache_flush(); #ifdef CONFIG_ARM7_REVD /* go to high speed */ IO_SYSCON3 = (IO_SYSCON3 & ~CLKCTL) | CLKCTL_73; #endif #elif defined(CONFIG_NETARM) || defined(CONFIG_S3C4510B) || defined(CONFIG_LPC2292) disable_interrupts (); /* Nothing more needed */ #elif defined(CONFIG_INTEGRATOR) && defined(CONFIG_ARCH_INTEGRATOR) /* No cleanup before linux for IntegratorAP/CM720T as yet */ #else #error No cleanup_before_linux() defined for this CPU type #endif return 0; }
void cache_purge(struct cache_detail *detail) { detail->flush_time = LONG_MAX; detail->nextcheck = get_seconds(); cache_flush(); detail->flush_time = 1; }
int main(int argc, char **argv) { FILE *swp; struct cache *c; struct matrix *m; FILE *matrix_shared; readargs(argc, argv); if ((swp = fopen(swapfile, "w+")) == NULL) error("cannot open swap file"); c = cache_create(&access_info, swp, CACHE_SIZE); /* Read traces. */ for (int i = 0; i < ntraces; i++) { FILE *trace; if ((trace = fopen(tracefiles[i], "r")) == NULL) error("cannot open trace file"); trace_read(c, trace, i); fclose(trace); fprintf(stderr, "\nFechado arquivo de trace da thread %d\n\n", i); } /* Flushe traces on swap file. */ cache_flush(c); /* Create communication matrix. */ fseek(swp, 0, SEEK_SET); m = matrix_create(QTD_THREADS, QTD_THREADS); fprintf(stderr, "\nMatriz criada\n"); matrix_generate(swp, m); if ((matrix_shared = fopen(outfile, "w")) == NULL) error("cannot open output file"); fprintf(stderr, "\nGravar matriz no arquivo\n"); for (int i = 0; i < ntraces; i++) { for(int j = 0; j < ntraces; j++) fprintf(matrix_shared, "%d;%d;%d\n", i, j, (int) matrix_get(m, i, j)); } /* House keeping. */ fclose(matrix_shared); fclose(swp); fprintf(stderr, "\n\n FIM!\n"); return (EXIT_SUCCESS); }
int cleanup_before_linux(void) { unsigned int i; /* * this function is called just before we call linux * it prepares the processor for linux * * we turn off caches etc ... */ disable_interrupts(); /* turn off I/D-cache */ icache_disable(); dcache_disable(); /* invalidate I-cache */ cache_flush(); #ifndef CONFIG_L2_OFF /* turn off L2 cache */ l2_cache_disable(); /* invalidate L2 cache also */ v7_flush_dcache_all(get_device_type()); #endif i = 0; /* mem barrier to sync up things */ asm("mcr p15, 0, %0, c7, c10, 4": :"r"(i)); #ifndef CONFIG_L2_OFF l2_cache_enable(); #endif return 0; }
/* Closes INODE and writes it to disk. If this was the last reference to INODE, frees its memory. If INODE was also a removed inode, frees its blocks. */ void inode_close (struct inode *inode) { /* Ignore null pointer. */ if (inode == NULL) return; /* Release resources if this was the last opener. */ if (--inode->open_cnt == 0) { /* Remove from inode list and release lock. */ list_remove (&inode->elem); /* Deallocate blocks if removed. */ if (inode->removed) //inode is to be deleted { cache_remove(inode,SECTOR_NONE); inode_deallocate_data_blocks(inode); free_map_release (inode->sector, 1); //free_map_release (inode->data.start, bytes_to_sectors (inode->data.length)); free(inode); return; } cache_flush(inode,SECTOR_NONE); cache_remove(inode,SECTOR_NONE); free (inode); } }
THREADABLE_FUNCTION_END //start the master thread, locking all the other threads void thread_master_start(int narg,char **arg,void(*main_function)(int narg,char **arg)) { //initialize reducing buffers glb_single_reduction_buf=(float*)malloc(nthreads*sizeof(float)); glb_double_reduction_buf=(double*)malloc(nthreads*sizeof(double)); glb_quadruple_reduction_buf=(float_128*)malloc(nthreads*sizeof(float_128)); //lock the pool thread_pool_locked=true; cache_flush(); //control the proper working of all the threads... thread_sanity_check(); //launch the main function main_function(narg,arg); //free global reduction buffers free(glb_single_reduction_buf); free(glb_double_reduction_buf); free(glb_quadruple_reduction_buf); //exit the thread pool thread_pool_stop(); }
void cache_destroy(struct cache *c) { cache_flush(c); hashtable_destroy(c->h); free(c->features); free(c->feature_offset); free(c); }
/* Shuts down the file system module, writing any unwritten data to disk. */ void filesys_done (void) { /* Start of Project 4 */ cache_flush (); /* End of Project 4 */ free_map_close (); }
int arm_hook (void *_org, void *dst, void **trampo) { char *org = (char*)_org; if (!org) return 0; if (mprotect ((void*)((uint32_t)org & ~(PAGE_SIZE - 1)), 8, PROT_EXEC | PROT_WRITE | PROT_READ) != 0) { LOGD ("mprotect error : %s", strerror (errno)); return 0; } char *tr = alloc_trampo (); if (tr == 0) return 0; memcpy (tr, org, 8); /*提取原函数的前两个指令*/ emit_arm_jmp (tr + 8, org + 8); cache_flush ((uint32_t)tr, (uint32_t)(tr + 16)); *trampo = tr; emit_arm_jmp (org, dst); /*修改原函数头*/ cache_flush ((uint32_t)org, (uint32_t)(org + 8)); return 1; }
void file_data_flush(struct file *file, long long offset, int size) { if (file->cache) { struct file_cache_id id={offset,size,file->name_id,0}; cache_flush(file_cache,&id); dbg(1,"Flushing "LONGLONG_FMT" %d bytes\n",offset,size); } }
int main() { hashset_t H; for(int k = 0; k < K; ++k) H.multiplier[k] = getrand64(); H.vmultiplier= _mm256_loadu_si256((__m256i const * )H.multiplier); uint64_t howmany = 100; uint64_t * keys = malloc(sizeof(uint64_t) * howmany); for(uint64_t k = 0; k < howmany; ++k) keys[k] = getrand64(); for(H.size = 1024; H.size < (UINT64_C(1) << 32) ; H.size *=2) { H.sizemask = _mm256_set1_epi64x(H.size-1); printf("alloc size = %f MB \n", H.size * sizeof(uint64_t) / (1024 * 1024.0)); H.data = calloc(H.size , sizeof(uint64_t)); for(int j = 0; j < howmany; j += 2) H.data[hash(H.multiplier[0],j) & (H.size - 1)] = j; int answer = expected(&H,howmany,keys); RDTSC_BEST(checkthemall(&H,howmany,keys), answer, cache_flush(&H,howmany,keys), 50,howmany); RDTSC_BEST(avxcheckthemall(&H,howmany,keys), answer, cache_flush(&H,howmany,keys), 50,howmany); free(H.data); } free(keys); }
//unlock the thread pool void thread_pool_unlock() { THREAD_BARRIER_FORCE(); #ifdef THREAD_DEBUG GET_THREAD_ID(); if(rank==0 && VERBOSITY_LV3) printf("thread %d unlocking the pool\n",thread_id); #endif thread_pool_locked=false; cache_flush(); }
void cache_invalidate (CACHE* cache) { unsigned int i; if(cache==NULL) return; cache_flush(cache); for (i = 0; i < cache->numberOfPages; i++) { cache->cacheEntries[i].sector = CACHE_FREE; cache->cacheEntries[i].last_access = 0; cache->cacheEntries[i].count = 0; cache->cacheEntries[i].dirty = false; } }
static void print_tasks(void) { struct taskstat_delta *delta = NULL; for (;;) { delta = cache_walk(delta); if (delta) output->print_data(delta); else break; } cache_flush(); }
int cleanup_before_linux(void) { unsigned int i; #ifdef CONFIG_CMD_IMX_DOWNLOAD_MODE extern void clear_mfgmode_mem(void); clear_mfgmode_mem(); #endif #ifdef CONFIG_VIDEO_MX5 ipu_disable_channel(MEM_BG_SYNC); ipu_uninit_channel(MEM_BG_SYNC); #endif /* * this function is called just before we call linux * it prepares the processor for linux * * we turn off caches etc ... */ disable_interrupts(); /* flush cache */ cache_flush(); /* turn off I/D-cache */ icache_disable(); /* invalidate D-cache */ dcache_disable(); #ifndef CONFIG_L2_OFF /* turn off L2 cache */ l2_cache_disable(); /* invalidate L2 cache also */ v7_flush_dcache_all(get_device_type()); #endif i = 0; /* mem barrier to sync up things */ asm("mcr p15, 0, %0, c7, c10, 4": :"r"(i)); /* turn off MMU */ MMU_OFF(); #ifndef CONFIG_L2_OFF l2_cache_enable(); #endif return 0; }
static char *specific_hook (void *org, void *method_obj, void *func) { /* ______________ * |__common_func_ 0 * |___old_func___ 4 * |__monomethod__ 8 * |_____code[]___ 12 */ char *p = alloc_specific_trampo (); /*Fixme : 每次更新这段跳板代码都是蛋疼, 有没有方便高效稳定的方法?*/ unsigned char code[96] = { 0x0F, 0x00, 0x2D, 0xE9, 0xFF, 0xFF, 0x2D, 0xE9, 0x50, 0x00, 0x4D, 0xE2, 0x34, 0x00, 0x8D, 0xE5, 0x3C, 0xE0, 0x8D, 0xE5, 0x40, 0x10, 0x4D, 0xE2, 0x30, 0x40, 0xA0, 0xE3, 0x04, 0x40, 0x4F, 0xE0, 0x08, 0x00, 0x94, 0xE5, 0x0D, 0x20, 0xA0, 0xE1, 0x0F, 0xE0, 0xA0, 0xE1, 0x00, 0xF0, 0x94, 0xE5, 0xFF, 0x1F, 0xBD, 0xE8, 0x04, 0xE0, 0x9D, 0xE5, 0x1C, 0xD0, 0x8D, 0xE2, 0x01, 0x80, 0x2D, 0xE9, 0x58, 0x00, 0xA0, 0xE3, 0x00, 0x00, 0x4F, 0xE0, 0x04, 0x00, 0x90, 0xE5, 0x04, 0x00, 0x8D, 0xE5, 0x01, 0x80, 0xBD, 0xE8, 0x00, 0xF0, 0x20, 0xE3, 0x00, 0xF0, 0x20, 0xE3, 0x00, 0xF0, 0x20, 0xE3 }; /* * stmfd sp!, {r0-r3} * stmfd sp!, {r0-r12, sp, lr, pc} * sub r0, sp, #80 //函数调用前的sp | * str r0, [sp, #52] //设置保存的reg.sp为原来的sp |-> 都是为了堆栈回溯做准备 * str lr, [sp, #60] //设置保存的reg.pc为lr | * sub r1, sp, #64 //2.参数数组 * ldr r4, =48 * sub r4, pc, r4 * ldr r0, [r4, #8] //1.method ptr * mov r2, sp //3.保存的reg环境块, 用于堆栈回溯 * mov lr, pc * ldr pc, [r4] * ldmfd sp!, {r0-r12} * ldr lr, [sp, #4] //恢复lr, 但不恢复sp, pc * add sp, sp, #28 //清除掉栈中的 {sp, lr, pc} 以及 {r0-r3} * stmfd sp!, {r0, pc} * ldr r0, =88 * sub r0, pc, r0 * ldr r0, [r0, #4] * str r0, [sp, #4] //设置old_func域中的指针为继续执行的PC * ldmfd sp!, {r0, pc} */ memcpy (p + 12, code, sizeof (code)); memcpy (p, &func, 4); memcpy (p + 8, &method_obj, 4); if (arm_hook (org, p + 12, (void**)(p + 4)) == 0) { return 0; } cache_flush ((uint32_t)p, (uint32_t)(p + SP_BLOCK_SIZE)); return p; }
void output_alsa_flush_stream(struct output *h, struct output_stream *s) { pthread_mutex_lock(&h->mutex); /* Flush the cache */ cache_flush(s->cache); resample_flush(s->res); /* Must unlock input callback in cache after a flush */ if(s->is_playing) cache_unlock(s->cache); s->played = 0; pthread_mutex_unlock(&h->mutex); }
void cache_destructor (CACHE* cache) { unsigned int i; if(cache==NULL) return; // Clear out cache before destroying it cache_flush(cache); // Free memory in reverse allocation order for (i = 0; i < cache->numberOfPages; i++) { mem_free (cache->cacheEntries[i].cache); } mem_free (cache->cacheEntries); mem_free (cache); }
int cleanup_before_linux (void) { /* * this function is called just before we call linux * it prepares the processor for linux * * we turn off caches etc ... */ disable_interrupts (); /* turn off I/D-cache */ icache_disable(); dcache_disable(); /* flush I/D-cache */ cache_flush(); return 0; }
/* * A client can match many different netgroups and it's tough to know * beforehand whether it will. If the concatenated string of netgroup * m_hostnames is >512 bytes, then enable the "use_ipaddr" mode. This * makes mountd change how it matches a client ip address when a mount * request comes in. It's more efficient at handling netgroups at the * expense of larger kernel caches. */ static void check_useipaddr(void) { nfs_client *clp; int old_use_ipaddr = use_ipaddr; unsigned int len = 0; /* add length of m_hostname + 1 for the comma */ for (clp = clientlist[MCL_NETGROUP]; clp; clp = clp->m_next) len += (strlen(clp->m_hostname) + 1); if (len > (NFSCLNT_IDMAX / 2)) use_ipaddr = 1; else use_ipaddr = 0; if (use_ipaddr != old_use_ipaddr) cache_flush(1); }
void tms57002_device::device_reset() { sti = (sti & ~(SU_MASK|S_READ|S_WRITE|S_BRANCH|S_HOST)) | (SU_ST0|S_IDLE); pc = 0; ca = 0; hidx = 0; id = 0; ba0 = 0; ba1 = 0; st0 &= ~(ST0_INCS | ST0_DIRI | ST0_FI | ST0_SIM | ST0_PLRI | ST0_PBCI | ST0_DIRO | ST0_FO | ST0_SOM | ST0_PLRO | ST0_PBCO | ST0_CNS); st1 &= ~(ST1_AOV | ST1_SFAI | ST1_SFAO | ST1_MOVM | ST1_MOV | ST1_SFMA | ST1_SFMO | ST1_RND | ST1_CRM | ST1_DBP); xba = 0; // Not sure but makes sense cache_flush(); }
static errcode_t device_gekko_io_sync(io_channel dev) { gekko_fd *fd = DEV_FD(dev); //ext2_log_trace("dev %p\n", dev); // Check that the device can be written to if(!(dev->flags & EXT2_FLAG_RW)) return -1; // Flush any sectors in the disc cache (if required) if (fd->cache) { if (!cache_flush(fd->cache)) { errno = EIO; return EXT2_ET_BLOCK_BITMAP_WRITE; } } return EXT2_ET_OK; }
/* * Purge all nodes from the cache. All reference counts must be zero. */ void cache_purge( struct cache * cache) { int i; for (i = 0; i <= CACHE_MAX_PRIORITY; i++) cache_shake(cache, i, 1); #ifdef CACHE_DEBUG if (cache->c_count != 0) { /* flush referenced nodes to disk */ cache_flush(cache); fprintf(stderr, "%s: shake on cache %p left %u nodes!?\n", __FUNCTION__, cache, cache->c_count); cache_abort(); } #endif }