static void intel_wpos_line( intelContextPtr intel, intelVertexPtr v0, intelVertexPtr v1 ) { GLuint offset = intel->wpos_offset; GLuint size = intel->wpos_size; __memcpy( ((char *)v0) + offset, v0, size ); __memcpy( ((char *)v1) + offset, v1, size ); intel_draw_line( intel, v0, v1 ); }
static void intel_wpos_triangle(struct intel_context *intel, intelVertexPtr v0, intelVertexPtr v1, intelVertexPtr v2) { GLuint offset = intel->wpos_offset; GLuint size = intel->wpos_size; __memcpy(((char *) v0) + offset, v0, size); __memcpy(((char *) v1) + offset, v1, size); __memcpy(((char *) v2) + offset, v2, size); intel_draw_triangle(intel, v0, v1, v2); }
/* Upload an image from mesa's internal copy. */ static void i810UploadTexLevel( i810ContextPtr imesa, i810TextureObjectPtr t, int hwlevel ) { const struct gl_texture_image *image = t->image[hwlevel].image; int j; GLuint texelBytes; if (!image || !image->Data) return; texelBytes = _mesa_get_format_bytes(image->TexFormat); if (image->Width * texelBytes == t->Pitch) { GLubyte *dst = (GLubyte *)(t->BufAddr + t->image[hwlevel].offset); GLubyte *src = (GLubyte *)image->Data; memcpy( dst, src, t->Pitch * image->Height ); } else { switch (texelBytes) { case 1: { GLubyte *dst = (GLubyte *)(t->BufAddr + t->image[hwlevel].offset); GLubyte *src = (GLubyte *)image->Data; for (j = 0 ; j < image->Height ; j++, dst += t->Pitch) { __memcpy(dst, src, image->Width ); src += image->Width; } } break; case 2: { GLushort *dst = (GLushort *)(t->BufAddr + t->image[hwlevel].offset); GLushort *src = (GLushort *)image->Data; for (j = 0 ; j < image->Height ; j++, dst += (t->Pitch/2)) { __memcpy(dst, src, image->Width * 2 ); src += image->Width; } } break; default: fprintf(stderr, "%s: Not supported texel size %d\n", __FUNCTION__, texelBytes); } } }
int main(void){ char* s1 = "Hello World", s2; __memcpy((void*)s2, (void*)s1, 4); printf("%s\n", s1); printf("%s\n", s2); }
static void memcpy_func() { /* Compare the source and the dst memory area */ char* src; char* dst; for(int i = 1; i < 4000; i++) { src = malloc(i); dst = malloc(i); assert_not_in_range(dst, src, src + i); assert_not_in_range(src, dst, dst + i); for(int j = 1; j <= i; j++) { __memcpy(dst, src, j); for(int k = 0; k < j; k++) { //printf("memcpy: %d, %d, %d\n", i, j, k); assert_int_equal(dst[k], src[k]); } } free(src); src = NULL; free(dst); dst = NULL; } }
static ssize_t pcap_mmap_write_pcap_pkt(int fd, struct pcap_pkthdr *hdr, uint8_t *packet, size_t len) { int ret; spinlock_lock(&lock); if ((unsigned long) (pcurr - pstart) + sizeof(*hdr) + len > map_size) { size_t map_size_old = map_size; off_t offset = (pcurr - pstart); map_size = PAGE_ALIGN(map_size_old * 3 / 2); ret = lseek(fd, map_size, SEEK_SET); if (ret < 0) panic("Cannot lseek pcap file!\n"); ret = write_or_die(fd, "", 1); if (ret != 1) panic("Cannot write file!\n"); pstart = mremap(pstart, map_size_old, map_size, MREMAP_MAYMOVE); if (pstart == MAP_FAILED) puke_and_die(EXIT_FAILURE, "mmap of file failed!"); ret = madvise(pstart, map_size, MADV_SEQUENTIAL); if (ret < 0) panic("Failed to give kernel mmap advise!\n"); pcurr = pstart + offset; } __memcpy_small(pcurr, hdr, sizeof(*hdr)); pcurr += sizeof(*hdr); __memcpy(pcurr, packet, len); pcurr += len; spinlock_unlock(&lock); return sizeof(*hdr) + len; }
void *memcpy(void *dest, const void *src, size_t len) { check_memory_region((unsigned long)src, len, false, _RET_IP_); check_memory_region((unsigned long)dest, len, true, _RET_IP_); return __memcpy(dest, src, len); }
void *memcpy(void *dest, const void *src, size_t len) { __asan_loadN((unsigned long)src, len); __asan_storeN((unsigned long)dest, len); return __memcpy(dest, src, len); }
bool MemoryStream::write(const void* buffer, uint toWrite) { Assert(buffer != NULL); if (buffer == NULL || toWrite == 0) return false; if (m_pos + toWrite > m_bufSize) { if (m_growBytes == 0) return false; growFile(m_pos + toWrite); } Assert(m_pos + toWrite <= m_bufSize); // growFile success ? __memcpy((uchar*)m_buffer + m_pos, (uchar*)buffer, toWrite); m_pos += toWrite; if (m_pos > m_fileSize) m_fileSize = m_pos; return true; }
int recv_fds(int sock, int *fds, int nr_fds, struct fd_opts *opts) { struct scm_fdset fdset; struct cmsghdr *cmsg; int *cmsg_data; int ret; int i, min_fd; cmsg_data = scm_fdset_init(&fdset, NULL, 0, opts != NULL); for (i = 0; i < nr_fds; i += min_fd) { min_fd = min(CR_SCM_MAX_FD, nr_fds - i); scm_fdset_init_chunk(&fdset, min_fd, opts != NULL); ret = __sys(recvmsg)(sock, &fdset.hdr, 0); if (ret <= 0) return ret ? : -1; cmsg = CMSG_FIRSTHDR(&fdset.hdr); if (!cmsg || cmsg->cmsg_type != SCM_RIGHTS) return -EINVAL; if (fdset.hdr.msg_flags & MSG_CTRUNC) return -ENFILE; min_fd = (cmsg->cmsg_len - sizeof(struct cmsghdr)) / sizeof(int); /* * In case if kernel screwed the recipient, most probably * the caller stack frame will be overwriten, just scream * and exit. * * FIXME Need to sanitize util.h to be able to include it * into files which do not have glibc and a couple of * sys_write_ helpers. Meawhile opencoded BUG_ON here. */ BUG_ON(min_fd > CR_SCM_MAX_FD); if (unlikely(min_fd <= 0)) return -1; __memcpy(&fds[i], cmsg_data, sizeof(int) * min_fd); #ifdef SCM_FDSET_HAS_OPTS if (opts) __memcpy(opts + i, fdset.opts, sizeof(struct fd_opts) * min_fd); #endif } return 0; }
__visible void *memcpy(void *to, const void *from, size_t n) { #ifdef CONFIG_X86_USE_3DNOW return __memcpy3d(to, from, n); #else return __memcpy(to, from, n); #endif }
static void intel_wpos_line( intelContextPtr intel, intelVertexPtr v0, intelVertexPtr v1 ) { GLuint offset = intel->wpos_offset; GLuint size = intel->wpos_size; GLfloat *v0_wpos = (GLfloat *)((char *)v0 + offset); GLfloat *v1_wpos = (GLfloat *)((char *)v1 + offset); __memcpy(v0_wpos, v0, size); __memcpy(v1_wpos, v1, size); v0_wpos[1] = -v0_wpos[1] + intel->driDrawable->h; v1_wpos[1] = -v1_wpos[1] + intel->driDrawable->h; intel_draw_line( intel, v0, v1 ); }
void intel_batchbuffer_data(struct intel_batchbuffer *batch, const void *data, GLuint bytes, GLuint flags) { assert((bytes & 3) == 0); intel_batchbuffer_require_space(batch, bytes, flags); __memcpy(batch->ptr, data, bytes); batch->ptr += bytes; }
void cmain (void) { /* * init variable sections */ __memcpy (__sdata2_start, __sdata2_load, __sdata2_end - __sdata2_start); __memcpy (__sdata_start , __sdata_load , __sdata_end - __sdata_start); __memcpy (__data_start , __data_load , __data_end - __data_start); __bzero (__sbss2_start , __sbss2_end - __sbss2_start); __bzero (__sbss_start , __sbss_end - __sbss_start); __bzero (__bss_start , __bss_end - __bss_start); /* printk( "start of BSP\n"); */ boot_card(0); /* printk( "end of BSP\n"); */ __outb (0x92, 0x01); while (1) ; }
/* The system memcpy (at least on ubuntu 5.10) has problems copying * to agp (writecombined) memory from a source which isn't 64-byte * aligned - there is a 4x performance falloff. * * The x86 __memcpy is immune to this but is slightly slower * (10%-ish) than the system memcpy. * * The sse_memcpy seems to have a slight cliff at 64/32 bytes, but * isn't much faster than x86_memcpy for agp copies. * * TODO: switch dynamically. */ static void * do_memcpy(void *dest, const void *src, size_t n) { if ((((unsigned long) src) & 63) || (((unsigned long) dest) & 63)) { return __memcpy(dest, src, n); } else return memcpy(dest, src, n); }
static void intel_wpos_point(struct intel_context *intel, intelVertexPtr v0) { GLuint offset = intel->wpos_offset; GLuint size = intel->wpos_size; __memcpy(((char *) v0) + offset, v0, size); intel_draw_point(intel, v0); }
/* * * memcpy - copies the value of @n bytes from the location pointed by @src to * the memory area pointed by @dst. * @dst pointer to the destination array where the content is to be copied * @src pointer to the source of data to by copied * @n: number of bytes to copy * * The memcpy() returns @dst. * * Note that, the function does not check any terminating null character in @src, * it always copies exactly @n bytes. To avoid overflows, the size of arrays pointed * by both @src and @dst, should be at least @n bytes, and should not overlap * (for overlapping memory area, memmove is a safer approach). * */ void *memcpy(void *dst, const void *src, size_t n) { #ifdef __HAVE_ARCH_MEMCPY return __memcpy(dst, src, n); #else const char *s = src; char *d = dst; while (n-- > 0) { *d++ = *s++; } return dst; #endif /* __HAVE_ARCH_MEMCPY */ }
/* * [0 ... m] [m+1 ... num-1] */ static void __merge(void *base, size_t num, size_t size, size_t m, CMP_FUNC cmpf, SWAP_FUNC swapf) { unsigned int i=0, j=m+1, c; void *ptmp, *pold; pold = ptmp = __malloc(num*size); __assert(base && ptmp && m <= num); // printf("----------> m: %d\n", m); while (i<=m && j<num) { c = cmpf((char*)base+i*size, (char*)base+j*size)<=0 ? i++ : j++; __memcpy(ptmp, (char*)base+c*size, size); ptmp = (char*)ptmp+size; } if (i<=m) __memcpy(ptmp, (char*)base+i*size, (m-i+1)*size); if (j<num) __memcpy(ptmp, (char*)base+j*size, (num-j)*size); __memcpy(base, pold, num*size); // __dump(base, num, size); __free(pold); }
bool MemoryStream::read(IN OUT void* buffer, uint toRead) { Assert(buffer != NULL); if (buffer == NULL || toRead == 0) return false; if (m_pos >= m_fileSize) // eof return false; uint bytesOfRead = toRead; if (m_pos + toRead > m_fileSize) bytesOfRead = (uint)(m_fileSize - m_pos); __memcpy((uchar*)buffer, (uchar*)m_buffer + m_pos, bytesOfRead); m_pos += bytesOfRead; return true; }
void * memcpy(void *dst, const void *src, size_t len) #endif /* __GNUC__ && __GNUC__ < 3 */ { void * result = dst; assert( (len == 0) || (dst != NULL && src != NULL && (int)len > 0) ); #if defined(CHECK_FOR_NULL_POINTERS) { if(dst == NULL || src == NULL) { __set_errno(EFAULT); goto out; } } #endif /* CHECK_FOR_NULL_POINTERS */ if(len > 0 && dst != src) { char * to = dst; const char * from = src; /* The two memory regions may not overlap. */ assert((to) >= (from)+len || (from) >= (to )+len); #if 0 { while(len-- > 0) (*to++) = (*from++); } #else { __memcpy((unsigned char *)to,(unsigned char *)from,len); } #endif } /* out: */ return(result); }
/***** Get inode using directory and name */ static struct dentry *msdos_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct super_block *sb = dir->i_sb; struct fat_slot_info sinfo; struct inode *inode; int err; char szBuffer[20]; int len = min((int)dentry->d_name.len, 19); memset(szBuffer, 0, 20); __memcpy(szBuffer, dentry->d_name.name, len); printk (KERN_INFO "myfat: msdos_lookup, dir is %s\n", szBuffer); lock_super(sb); err = msdos_find(dir, dentry->d_name.name, dentry->d_name.len, &sinfo); if (err) { if (err == -ENOENT) { inode = NULL; goto out; } goto error; } inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos); // by rzq: hold the reference count brelse(sinfo.bh); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto error; } out: unlock_super(sb); dentry->d_op = &msdos_dentry_operations; dentry = d_splice_alias(inode, dentry); if (dentry) dentry->d_op = &msdos_dentry_operations; return dentry; error: unlock_super(sb); return ERR_PTR(err); }
static ssize_t pcap_sg_write_pcap_pkt(int fd, struct pcap_pkthdr *hdr, uint8_t *packet, size_t len) { ssize_t ret; spinlock_lock(&lock); if (unlikely(c == IOVSIZ)) { ret = writev(fd, iov, IOVSIZ); if (ret < 0) panic("writev I/O error!\n"); c = 0; } iov[c].iov_len = 0; __memcpy_small(iov[c].iov_base, hdr, sizeof(*hdr)); iov[c].iov_len += sizeof(*hdr); __memcpy(iov[c].iov_base + iov[c].iov_len, packet, len); iov[c].iov_len += len; ret = iov[c].iov_len; c++; spinlock_unlock(&lock); return ret; }
static ssize_t pcap_mmap_read_pcap_pkt(int fd, struct pcap_pkthdr *hdr, uint8_t *packet, size_t len) { spinlock_lock(&lock); if (unlikely((unsigned long) (pcurr + sizeof(*hdr) - pstart) > map_size)) { spinlock_unlock(&lock); return -ENOMEM; } __memcpy_small(hdr, pcurr, sizeof(*hdr)); pcurr += sizeof(*hdr); if (unlikely((unsigned long) (pcurr + hdr->len - pstart) > map_size)) { spinlock_unlock(&lock); return -ENOMEM; } __memcpy(packet, pcurr, hdr->len); pcurr += hdr->len; spinlock_unlock(&lock); if (unlikely(hdr->len == 0)) return -EINVAL; /* Bogus packet */ return sizeof(*hdr) + hdr->len; }
int send_fds(int sock, struct sockaddr_un *saddr, int len, int *fds, int nr_fds, void *data, unsigned ch_size) { struct scm_fdset fdset; int *cmsg_data; int i, min_fd, ret; cmsg_data = scm_fdset_init(&fdset, saddr, len); for (i = 0; i < nr_fds; i += min_fd) { min_fd = min(CR_SCM_MAX_FD, nr_fds - i); scm_fdset_init_chunk(&fdset, min_fd, data, ch_size); __memcpy(cmsg_data, &fds[i], sizeof(int) * min_fd); ret = __sys(sendmsg)(sock, &fdset.hdr, 0); if (ret <= 0) return ret ? : -1; if (data) data += min_fd * ch_size; } return 0; }
static void memcpy_sse_func() { char* src; char* dst; for(int i = 1; i < 4000; i++) { src = malloc(i); dst = malloc(i); assert_not_in_range(dst, src, src + i); assert_not_in_range(src, dst, dst + i); for(int j = 1; j <= i; j++) { __memcpy(dst, src, j); for(int k = 0; k < j; k++) { assert_int_equal(dst[k], src[k]); } } free(src); src = NULL; free(dst); dst = NULL; } }
/* If netsniff-ngs in device is on a tap, it can efficiently filter out * some interesting packets and give them to the out device for testing * or debugging for instance. */ static void enter_mode_rx_to_tx(struct mode *mode) { int rx_sock, ifindex_in, ifindex_out; unsigned int size_in, size_out, it_in = 0, it_out = 0; unsigned long fcnt = 0; uint8_t *in, *out; short ifflags = 0; struct frame_map *hdr_in, *hdr_out; struct ring tx_ring; struct ring rx_ring; struct pollfd rx_poll; struct sock_fprog bpf_ops; if (!strncmp(mode->device_in, mode->device_out, strlen(mode->device_in))) panic("Ingress/egress devices must be different!\n"); if (!device_up_and_running(mode->device_out)) panic("Egress device not up and running!\n"); if (!device_up_and_running(mode->device_in)) panic("Ingress device not up and running!\n"); set_memcpy(); rx_sock = pf_socket(); tx_sock = pf_socket(); memset(&tx_ring, 0, sizeof(tx_ring)); memset(&rx_ring, 0, sizeof(rx_ring)); memset(&rx_poll, 0, sizeof(rx_poll)); memset(&bpf_ops, 0, sizeof(bpf_ops)); ifindex_in = device_ifindex(mode->device_in); size_in = ring_size(mode->device_in, mode->reserve_size); ifindex_out = device_ifindex(mode->device_out); size_out = ring_size(mode->device_out, mode->reserve_size); enable_kernel_bpf_jit_compiler(); bpf_parse_rules(mode->filter, &bpf_ops); bpf_attach_to_sock(rx_sock, &bpf_ops); setup_rx_ring_layout(rx_sock, &rx_ring, size_in, mode->jumbo_support); create_rx_ring(rx_sock, &rx_ring); mmap_rx_ring(rx_sock, &rx_ring); alloc_rx_ring_frames(&rx_ring); bind_rx_ring(rx_sock, &rx_ring, ifindex_in); prepare_polling(rx_sock, &rx_poll); set_packet_loss_discard(tx_sock); setup_tx_ring_layout(tx_sock, &tx_ring, size_out, mode->jumbo_support); create_tx_ring(tx_sock, &tx_ring); mmap_tx_ring(tx_sock, &tx_ring); alloc_tx_ring_frames(&tx_ring); bind_tx_ring(tx_sock, &tx_ring, ifindex_out); mt_init_by_seed_time(); dissector_init_all(mode->print_mode); if (mode->promiscuous == true) { ifflags = enter_promiscuous_mode(mode->device_in); printf("PROMISC\n"); } if (mode->kpull) interval = mode->kpull; itimer.it_interval.tv_sec = 0; itimer.it_interval.tv_usec = interval; itimer.it_value.tv_sec = 0; itimer.it_value.tv_usec = interval; setitimer(ITIMER_REAL, &itimer, NULL); printf("BPF:\n"); bpf_dump_all(&bpf_ops); printf("MD: RXTX %luus\n\n", interval); printf("Running! Hang up with ^C!\n\n"); while (likely(sigint == 0)) { while (user_may_pull_from_rx(rx_ring.frames[it_in].iov_base)) { hdr_in = rx_ring.frames[it_in].iov_base; in = ((uint8_t *) hdr_in) + hdr_in->tp_h.tp_mac; fcnt++; if (mode->packet_type != PACKET_ALL) if (mode->packet_type != hdr_in->s_ll.sll_pkttype) goto next; hdr_out = tx_ring.frames[it_out].iov_base; out = ((uint8_t *) hdr_out) + TPACKET_HDRLEN - sizeof(struct sockaddr_ll); /* If we cannot pull, look for a different slot. */ for (; !user_may_pull_from_tx(tx_ring.frames[it_out].iov_base) && likely(!sigint);) { if (mode->randomize) next_rnd_slot(&it_out, &tx_ring); else next_slot(&it_out, &tx_ring); hdr_out = tx_ring.frames[it_out].iov_base; out = ((uint8_t *) hdr_out) + TPACKET_HDRLEN - sizeof(struct sockaddr_ll); } tpacket_hdr_clone(&hdr_out->tp_h, &hdr_in->tp_h); __memcpy(out, in, hdr_in->tp_h.tp_len); kernel_may_pull_from_tx(&hdr_out->tp_h); if (mode->randomize) next_rnd_slot(&it_out, &tx_ring); else next_slot(&it_out, &tx_ring); /* Should actually be avoided ... */ show_frame_hdr(hdr_in, mode->print_mode, RING_MODE_INGRESS); dissector_entry_point(in, hdr_in->tp_h.tp_snaplen, mode->link_type); if (frame_cnt_max != 0 && fcnt >= frame_cnt_max) { sigint = 1; break; } next: kernel_may_pull_from_rx(&hdr_in->tp_h); next_slot(&it_in, &rx_ring); if (unlikely(sigint == 1)) goto out; } poll(&rx_poll, 1, -1); poll_error_maybe_die(rx_sock, &rx_poll); } out: sock_print_net_stats(rx_sock); dissector_cleanup_all(); destroy_tx_ring(tx_sock, &tx_ring); destroy_rx_ring(rx_sock, &rx_ring); if (mode->promiscuous == true) leave_promiscuous_mode(mode->device_in, ifflags); close(tx_sock); close(rx_sock); }
int send_fds(int sock, struct sockaddr_un *saddr, int len, int *fds, int nr_fds, bool with_flags) { struct scm_fdset fdset; int *cmsg_data; int i, min_fd, ret; cmsg_data = scm_fdset_init(&fdset, saddr, len, with_flags); for (i = 0; i < nr_fds; i += min_fd) { min_fd = min(CR_SCM_MAX_FD, nr_fds - i); scm_fdset_init_chunk(&fdset, min_fd, with_flags); __memcpy(cmsg_data, &fds[i], sizeof(int) * min_fd); #ifdef SCM_FDSET_HAS_OPTS if (with_flags) { int j; for (j = 0; j < min_fd; j++) { int flags, fd = fds[i + j]; struct fd_opts *p = fdset.opts + j; struct f_owner_ex owner_ex; uint32_t v[2]; flags = __sys(fcntl)(fd, F_GETFD, 0); if (flags < 0) { pr_err("fcntl(%d, F_GETFD) -> %d\n", fd, flags); return -1; } p->flags = (char)flags; ret = __sys(fcntl)(fd, F_GETOWN_EX, (long)&owner_ex); if (ret) { pr_err("fcntl(%d, F_GETOWN_EX) -> %d\n", fd, ret); return -1; } /* * Simple case -- nothing is changed. */ if (owner_ex.pid == 0) { p->fown.pid = 0; continue; } ret = __sys(fcntl)(fd, F_GETOWNER_UIDS, (long)&v); if (ret) { pr_err("fcntl(%d, F_GETOWNER_UIDS) -> %d\n", fd, ret); return -1; } p->fown.uid = v[0]; p->fown.euid = v[1]; p->fown.pid_type = owner_ex.type; p->fown.pid = owner_ex.pid; } } #endif ret = __sys(sendmsg)(sock, &fdset.hdr, 0); if (ret <= 0) return ret ? : -1; } return 0; }
/* * * readseg - read @count bytes at @offset from kernel into virtual address @va, * might copy more than asked. * */ static void readseg(uintptr_t va, uint32_t count, uint32_t offset) { __memcpy((void*)va, (void*)((uint8_t*)ELFHDR + offset), count); }
void * memcpy(void * dest, const void * src, size_t n) { return __memcpy(dest, src, n); }
static inline void WINAPI OverlayIAT(PImgThunkData pitdDst, PCImgThunkData pitdSrc) { __memcpy(pitdDst, pitdSrc, CountOfImports(pitdDst) * sizeof IMAGE_THUNK_DATA); }