void flush_logs(){ int i; for(i = 0; i < idx; i++){ if(logs[i].handle) clflush_cache_range(logs[i].handle); } }
/*Read function in ISP memory management*/ static int load_and_flush(void *virt, void *data, unsigned int bytes) { unsigned int ptr; struct hmm_buffer_object *bo; unsigned int idx, offset, len; char *src, *des; int ret; ptr = (unsigned int)virt; bo = hmm_bo_device_search_in_range(&bo_device, ptr); ret = hmm_check_bo(bo, ptr); if (ret) return ret; des = (char *)data; while (bytes) { idx = (ptr - bo->vm_node->start) >> PAGE_SHIFT; offset = (ptr - bo->vm_node->start) - (idx << PAGE_SHIFT); src = (char *)kmap(bo->page_obj[idx].page); if (!src) { v4l2_err(&atomisp_dev, "kmap buffer object page failed: " "pg_idx = %d\n", idx); return -EINVAL; } src += offset; if ((bytes + offset) >= PAGE_SIZE) { len = PAGE_SIZE - offset; bytes -= len; } else { len = bytes; bytes = 0; } ptr += len; /* update ptr for next loop */ if (des) { #ifdef USE_SSSE3 _ssse3_memcpy(des, src, len); #else memcpy(des, src, len); #endif des += len; } clflush_cache_range(src, len); kunmap(bo->page_obj[idx].page); } return 0; }
/* * Use the non-temperal mov calls to avoid the move with allocating in cache * in order to make the flushing less expensive. */ void memcpy_nt(void *dst, void *src, size_t len) { int i; long long t1, t2, t3, t4; unsigned char *from, *to; size_t remain = len & 63; from = src; to = dst; i = len / 64; for (; i > 0; i--) { __asm__ __volatile__(" mov (%4), %0\n" " mov 8(%4), %1\n" " mov 16(%4), %2\n" " mov 24(%4), %3\n" " movnti %0, (%5)\n" " movnti %1, 8(%5)\n" " movnti %2, 16(%5)\n" " movnti %3, 24(%5)\n" " mov 32(%4), %0\n" " mov 40(%4), %1\n" " mov 48(%4), %2\n" " mov 56(%4), %3\n" " movnti %0, 32(%5)\n" " movnti %1, 40(%5)\n" " movnti %2, 48(%5)\n" " movnti %3, 56(%5)\n":"=r"(t1), "=r"(t2), "=r"(t3), "=r"(t4) : "r"(from), "r"(to) : "memory"); from += 64; to += 64; } /* * Now do the tail of the block: */ if (remain) { memcpy(to, from, remain); clflush_cache_range(to, remain); } __asm__ __volatile__("mfence\n" : : ); }
void memcpy_nt(void *dest, const void *src, size_t n) { memcpy(dest, src, n); clflush_cache_range(dest, n); }
static long hv_cdev_ioctl( struct file *filp, unsigned int cmd , unsigned long arg) { hv_cdev_private *priv; int i; PINFO("%s:\n", __func__); priv = filp->private_data; switch (cmd) { case HV_MMLS_SIZE: return put_user(priv->dev_size, (u64 __user *)arg); case HV_MMLS_FLUSH_RANGE: { struct hv_mmls_range range; if (copy_from_user(&range, (struct hv_mmls_range __user *)arg, sizeof(struct hv_mmls_range))) return -EFAULT; if (range.offset >= priv->dev_size) return -EINVAL; if (range.offset + range.size > priv->dev_size) range.size = priv->dev_size - range.offset; if (mutex_lock_interruptible(&priv->cmutex)) return -ERESTARTSYS; if (use_static_buff) { /* parm: virtual start address, size */ clflush_cache_range(priv->buff + range.offset, range.size); } else { clflush_cache_range( priv->mmls_iomem + range.offset, range.size); } mutex_unlock(&priv->cmutex); break; } case HV_MMLS_DUMP_MEM: { struct hv_mmls_range range; if (copy_from_user(&range, (struct hv_mmls_range __user *)arg, sizeof(struct hv_mmls_range))) return -EFAULT; if (range.offset >= priv->dev_size) return -EINVAL; if (range.offset + range.size > priv->dev_size) range.size = priv->dev_size - range.offset; PDEBUG("mmls_iomem=%p, offset=%llu, size=%llu\n", priv->mmls_iomem, range.offset, range.size); if (mutex_lock_interruptible(&priv->cmutex)) return -ERESTARTSYS; for (i = 0 + range.offset; i < range.size; i++) pr_info("mmls_iomem[%d]=0x%02X\n", i, ioread8(priv->mmls_iomem+i)); mutex_unlock(&priv->cmutex); break; } default: return -ENOTTY; } return 0; }