static void sync_buf_pre_cpu(struct cach_buf *buf, enum hwmem_access access, struct hwmem_region *region) { bool write = access & HWMEM_ACCESS_WRITE; bool read = access & HWMEM_ACCESS_READ; if (!write && !read) return; if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHED) { struct cach_range region_range; region_2_range(region, buf->size, ®ion_range); if (read || (write && buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_WB)) /* Perform defered invalidates */ invalidate_cpu_cache(buf, ®ion_range); if (read || (write && buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_AOW)) expand_range(&buf->range_in_cpu_cache, ®ion_range); if (write && buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_WB) { struct cach_range dirty_range_addition; if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_AOW) dirty_range_addition = region_range; else intersect_range(&buf->range_in_cpu_cache, ®ion_range, &dirty_range_addition); expand_range(&buf->range_dirty_in_cpu_cache, &dirty_range_addition); } } if (buf->cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE) { if (write) buf->in_cpu_write_buf = true; } }
int noit_console_generic_apply(noit_console_closure_t ncct, int argc, char **argv, noit_console_state_t *dstate, void *closure) { int i, j, count; char *name, *range; char **nargv, **expanded = NULL; const char *err; int problems = 0; if(argc < 3) { nc_printf(ncct, "apply <name> <range> cmd ...\n"); return -1; } name = argv[0]; range = argv[1]; argc -= 2; argv += 2; count = expand_range(range, &expanded, 256, &err); if(!count) { nc_printf(ncct, "apply error: '%s' range produced nothing [%s]\n", range, err ? err : "unknown error"); assert(expanded == NULL); return -1; } if(count < 0) { nc_printf(ncct, "apply error: '%s' range would produce %d items.\n", range, count); return -1; } nargv = malloc(argc * sizeof(*nargv)); for(i=0; i<count; i++) { for(j=0; j<argc; j++) nargv[j] = apply_replace(argv[j], name, expanded[i]); if(noit_console_state_do(ncct, argc, nargv)) problems = -1; for(j=0; j<argc; j++) free(nargv[j]); free(expanded[i]); } free(nargv); free(expanded); return problems; }
static void sync_buf_post_cpu(struct cach_buf *buf, enum hwmem_access next_access, struct hwmem_region *next_region) { bool write = next_access & HWMEM_ACCESS_WRITE; bool read = next_access & HWMEM_ACCESS_READ; struct cach_range region_range; if (!write && !read) return; region_2_range(next_region, buf->size, ®ion_range); if (write) { if (speculative_data_prefetch()) { /* Defer invalidate */ struct cach_range intersection; intersect_range(&buf->range_in_cpu_cache, ®ion_range, &intersection); expand_range(&buf->range_invalid_in_cpu_cache, &intersection); clean_cpu_cache(buf, ®ion_range); } else { flush_cpu_cache(buf, ®ion_range); } } if (read) clean_cpu_cache(buf, ®ion_range); if (buf->in_cpu_write_buf) { drain_cpu_write_buf(); buf->in_cpu_write_buf = false; } }