int main(int argc, char *argv[]) { int fd; char *dest; char *src; struct stat stbuf; START(argc, argv, "pmem_memcpy"); if (argc != 5) FATAL("usage: %s file srcoff destoff length", argv[0]); fd = OPEN(argv[1], O_RDWR); int dest_off = atoi(argv[2]); int src_off = atoi(argv[3]); size_t bytes = strtoul(argv[4], NULL, 0); FSTAT(fd, &stbuf); /* src > dst */ dest = pmem_map(fd); if (dest == NULL) FATAL("!could not map file: %s", argv[1]); src = MMAP(dest + stbuf.st_size, stbuf.st_size, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, -1, 0); /* * Its very unlikely that src would not be > dest. pmem_map * chooses the first unused address >= 1TB, large * enough to hold the give range, and 1GB aligned. If the * addresses did not get swapped to allow src > dst, log error * and allow test to continue. */ if (src <= dest) { swap_mappings(&dest, &src, stbuf.st_size, fd); if (src <= dest) ERR("cannot map files in memory order"); } memset(dest, 0, (2 * bytes)); memset(src, 0, (2 * bytes)); do_memcpy(fd, dest, dest_off, src, src_off, bytes, argv[1]); /* dest > src */ swap_mappings(&dest, &src, stbuf.st_size, fd); if (dest <= src) { ERR("cannot map files in memory order"); } do_memcpy(fd, dest, dest_off, src, src_off, bytes, argv[1]); MUNMAP(dest, stbuf.st_size); MUNMAP(src, stbuf.st_size); CLOSE(fd); DONE(NULL); }
static void copy_strided_array( GLubyte *dest, const GLubyte *src, GLuint size, GLuint stride, GLuint count ) { if (size == stride) do_memcpy(dest, src, count * size); else { GLuint i,j; for (i = 0; i < count; i++) { for (j = 0; j < size; j++) *dest++ = *src++; src += (stride - size); } } }
static void * timed_memcpy(void *dest, const void *src, size_t n) { void *ret; unsigned t1, t2; double rate; if ((((unsigned) src) & 63) || (((unsigned) dest) & 63)) _mesa_printf("Warning - non-aligned texture copy!\n"); t1 = fastrdtsc(); ret = do_memcpy(dest, src, n); t2 = fastrdtsc(); rate = time_diff(t1, t2); rate /= (double) n; _mesa_printf("timed_memcpy: %u %u --> %f clocks/byte\n", t1, t2, rate); return ret; }