static void flush_cpu_cache(struct cach_buf *buf, struct cach_range *range) { struct cach_range intersection; intersect_range(&buf->range_in_cpu_cache, range, &intersection); if (is_non_empty_range(&intersection)) { bool flushed_everything; expand_range_2_edge(&intersection, &buf->range_in_cpu_cache); flush_cpu_dcache( offset_2_vaddr(buf, intersection.start), offset_2_paddr(buf, intersection.start), range_length(&intersection), buf->cache_settings & HWMEM_ALLOC_HINT_INNER_CACHE_ONLY, &flushed_everything); if (flushed_everything) { if (!speculative_data_prefetch()) null_range(&buf->range_in_cpu_cache); null_range(&buf->range_dirty_in_cpu_cache); null_range(&buf->range_invalid_in_cpu_cache); } else { if (!speculative_data_prefetch()) shrink_range(&buf->range_in_cpu_cache, &intersection); shrink_range(&buf->range_dirty_in_cpu_cache, &intersection); shrink_range(&buf->range_invalid_in_cpu_cache, &intersection); } } }
static void clean_cpu_cache(struct cach_buf *buf, struct cach_range *range) { struct cach_range intersection; intersect_range(&buf->range_dirty_in_cpu_cache, range, &intersection); if (is_non_empty_range(&intersection)) { bool cleaned_everything; expand_range_2_edge(&intersection, &buf->range_dirty_in_cpu_cache); clean_cpu_dcache( offset_2_vaddr(buf, intersection.start), offset_2_paddr(buf, intersection.start), range_length(&intersection), buf->cache_settings & HWMEM_ALLOC_HINT_INNER_CACHE_ONLY, &cleaned_everything); if (cleaned_everything) null_range(&buf->range_dirty_in_cpu_cache); else shrink_range(&buf->range_dirty_in_cpu_cache, &intersection); } }
/** Find the common range of J formed by coupling j1 with j2 and coupling jj1 with jj2. */ void range_twoJ_both(int twoj1, int twoj2, int twojj1, int twojj2, int *twoJMin, int *twoJMax) { int twoJJMin, twoJJMax; range_twoJ(twoj1, twoj2, twoJMin, twoJMax); range_twoJ(twojj1, twojj2, &twoJJMin, &twoJJMax); intersect_range(twoJMin, twoJMax, &twoJJMin, &twoJJMax); }
static void invalidate_cpu_cache(struct cach_buf *buf, struct cach_range *range) { struct cach_range intersection; intersect_range(&buf->range_invalid_in_cpu_cache, range, &intersection); if (is_non_empty_range(&intersection)) { bool flushed_everything; expand_range_2_edge(&intersection, &buf->range_invalid_in_cpu_cache); /* * Cache handler never uses invalidate to discard data in the * cache so we can use flush instead which is considerably * faster for large buffers. */ flush_cpu_dcache( offset_2_vaddr(buf, intersection.start), offset_2_paddr(buf, intersection.start), range_length(&intersection), buf->cache_settings & HWMEM_ALLOC_HINT_INNER_CACHE_ONLY, &flushed_everything); if (flushed_everything) { null_range(&buf->range_invalid_in_cpu_cache); null_range(&buf->range_dirty_in_cpu_cache); } else { /* * No need to shrink range_in_cpu_cache as invalidate * is only used when we can't keep track of what's in * the CPU cache. */ shrink_range(&buf->range_invalid_in_cpu_cache, &intersection); } } }
static void sync_buf_pre_cpu(struct cach_buf *buf, enum hwmem_access access, struct hwmem_region *region) { bool write = access & HWMEM_ACCESS_WRITE; bool read = access & HWMEM_ACCESS_READ; if (!write && !read) return; if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHED) { struct cach_range region_range; region_2_range(region, buf->size, ®ion_range); if (read || (write && buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_WB)) /* Perform defered invalidates */ invalidate_cpu_cache(buf, ®ion_range); if (read || (write && buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_AOW)) expand_range(&buf->range_in_cpu_cache, ®ion_range); if (write && buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_WB) { struct cach_range dirty_range_addition; if (buf->cache_settings & HWMEM_ALLOC_HINT_CACHE_AOW) dirty_range_addition = region_range; else intersect_range(&buf->range_in_cpu_cache, ®ion_range, &dirty_range_addition); expand_range(&buf->range_dirty_in_cpu_cache, &dirty_range_addition); } } if (buf->cache_settings & HWMEM_ALLOC_HINT_WRITE_COMBINE) { if (write) buf->in_cpu_write_buf = true; } }
static void sync_buf_post_cpu(struct cach_buf *buf, enum hwmem_access next_access, struct hwmem_region *next_region) { bool write = next_access & HWMEM_ACCESS_WRITE; bool read = next_access & HWMEM_ACCESS_READ; struct cach_range region_range; if (!write && !read) return; region_2_range(next_region, buf->size, ®ion_range); if (write) { if (speculative_data_prefetch()) { /* Defer invalidate */ struct cach_range intersection; intersect_range(&buf->range_in_cpu_cache, ®ion_range, &intersection); expand_range(&buf->range_invalid_in_cpu_cache, &intersection); clean_cpu_cache(buf, ®ion_range); } else { flush_cpu_cache(buf, ®ion_range); } } if (read) clean_cpu_cache(buf, ®ion_range); if (buf->in_cpu_write_buf) { drain_cpu_write_buf(); buf->in_cpu_write_buf = false; } }