static INLINE u8 memread(u32 addr) { static int n; //handle dpcm cycle stealing if(nes->cpu.pcmcycles) { n = nes->cpu.pcmcycles - 1; nes->cpu.pcmcycles = 0; if(addr == 0x4016 || addr == 0x4017) { if(n--) memread(addr); while(n--) cpu_tick(); } else { while(n--) memread(addr); } apu_dpcm_fetch(); } //increment cycle counter, check irq lines cpu_tick(); //read data from address return(cpu_read(addr)); }
void cpu_reset(int hard) { int i; for(i=0;i<8;i+=2) { nes->cpu.readpages[i + 0] = nes->cpu.writepages[i + 0] = (u8*)nes->cpu.ram; nes->cpu.readpages[i + 1] = nes->cpu.writepages[i + 1] = (u8*)nes->cpu.ram + 0x400; } nes->cpu.pcmcycles = 0; nes->cpu.badopcode = 0; if(hard) { A = X = Y = 0; SP = 0xFD; P = 0x24; expand_flags(); memset(nes->cpu.ram,0,0x800); } else { FLAG_I = 1; SP -= 3; } PC = memread(0xFFFC); PC |= memread(0xFFFD) << 8; log_printf("vectors:\n"); log_printf(" nmi: $%04X\n",cpu_read(0xFFFA) | (cpu_read(0xFFFB) << 8)); log_printf(" irq: $%04X\n",cpu_read(0xFFFE) | (cpu_read(0xFFFF) << 8)); log_printf(" reset: $%04X\n",cpu_read(0xFFFC) | (cpu_read(0xFFFD) << 8)); }
//indirect x static INLINE void AM_INX() { TMPREG = memread(PC++); memread(TMPREG); TMPREG += X; EFFADDR = memread(TMPREG++); EFFADDR |= memread(TMPREG) << 8; }
//absolute y addressing static INLINE void AM_ABY() { EFFADDR = memread(PC++); EFFADDR |= memread(PC++) << 8; tmpi = (EFFADDR & 0xFF) + Y; memread((EFFADDR & 0xFF00) | (u8)tmpi); EFFADDR += Y; }
//indirect addressing static INLINE void AM_IND() { TMPADDR = memread(PC++); TMPADDR |= memread(PC++) << 8; EFFADDR = memread(TMPADDR); TMPADDR = (TMPADDR & 0xFF00) | ((TMPADDR + 1) & 0xFF); EFFADDR |= memread(TMPADDR) << 8; }
//indirect y static INLINE void AM_INY() { TMPREG = memread(PC++); EFFADDR = memread(TMPREG++); EFFADDR |= memread(TMPREG) << 8; tmpi = (EFFADDR & 0xFF) + Y; memread((EFFADDR & 0xFF00) | (u8)tmpi); EFFADDR += Y; }
//absolute y addressing (for reading only) static INLINE void AM_AYR() { EFFADDR = memread(PC++); EFFADDR |= memread(PC++) << 8; tmpi = (EFFADDR & 0xFF) + Y; if(tmpi >= 0x100) { memread((EFFADDR & 0xFF00) | (u8)tmpi); } EFFADDR += Y; }
void fsread(Req *r) { char *e; PD *pd; Fid *fid; void *data; int64_t offset; int32_t count; fid = r->fid; data = r->ofcall.data; offset = r->ifcall.offset; count = r->ifcall.count; pd = fid->file->aux; if(pd->isproc) e = memread(pd->p, fid->file, data, &count, offset); else e = dataread(pd->d, data, &count, offset); if(e == nil) r->ofcall.count = count; respond(r, e); }
uint8 cVirtualMemoryAccesser::operator [] (addressNumericValue address) const { uint8 ret; if (!memread(address, &ret, sizeof(ret), NULL)) XSTL_THROW(cException, EXCEPTION_OUT_OF_RANGE); return ret; }
static uint8_t cmos_read_mmio(uint8_t reg) { uint8_t data; memread(base + (uint32_t)reg_base + (uint32_t)reg, &data, 1); return data; }
static mpcth_t biosmptable_check_mpcth(off_t addr) { mpcth_t mpcth; u_int8_t *cp, sum; int idx, table_length; /* mpcth must be in the first 1MB */ if ((u_int32_t)addr >= 1024 * 1024) { warnx("bad mpcth address (0x%jx)\n", (intmax_t)addr); return (NULL); } mpcth = malloc(sizeof(*mpcth)); if (mpcth == NULL) { warnx("unable to malloc space for MP Configuration Table Header"); return (NULL); } if (!memread(addr, mpcth, sizeof(*mpcth))) goto bad; /* Compare signature and validate checksum. */ if (strncmp(mpcth->signature, MPCTH_SIG, strlen(MPCTH_SIG)) != 0) { warnx("bad mpcth signature"); goto bad; } table_length = mpcth->base_table_length; mpcth = realloc(mpcth, table_length); if (mpcth == NULL) { warnx("unable to realloc space for mpcth (len %u)", table_length); return (NULL); } if (!memread(addr, mpcth, table_length)) goto bad; cp = (u_int8_t *)mpcth; sum = 0; for (idx = 0; idx < mpcth->base_table_length; idx++) sum += *(cp + idx); if (sum != 0) { warnx("bad mpcth checksum (%d)", sum); goto bad; } return mpcth; bad: free(mpcth); return (NULL); }
static inline i32 read_memory (pid_t pid, ptr_t mem_addr, value_t *buf, const char *pfx) { i32 ret; ret = memread(pid, mem_addr, buf, sizeof(value_t)); if (ret) cerr << pfx << " READ ERROR PID[" << pid << "] (" << hex << mem_addr << dec << ")!" << endl; return ret; }
uint cVirtualMemoryAccesser::readUint(addressNumericValue address) const { // If this change, than the implementation must be changed also ASSERT(sizeof(uint) == sizeof(uint32)); uint8 buf[4]; if (!memread(address, &buf, sizeof(buf), NULL)) XSTL_THROW(cException, EXCEPTION_OUT_OF_RANGE); return readUint32(buf); }
static uint32_t qpci_pc_io_readl(QPCIBus *bus, void *addr) { uintptr_t port = (uintptr_t)addr; uint32_t value; if (port < 0x10000) { value = inl(port); } else { memread(port, &value, sizeof(value)); } return value; }
size_t VorbisAudioIO_memreadFunc(void * outData, size_t size, size_t nmemb, void * inData) { struct memreadContext * context = inData; if (memread(context, size * nmemb, outData)) { return size * nmemb; } if (context->position < context->length) { size_t bytesRead; bytesRead = context->length - context->position; memcpy(outData, context->data + context->position, bytesRead); context->position = context->length; return bytesRead; } return 0; }
void step(Mach *m) { Word inst; Inst ip; disasm(&ip, m, m->pc); if (m->halt) return; if (ip.mode & AMEM) m->sym[ip.addr] = 0xffffffff; /* printf("%04x %s", m->pc, ip.str); */ inst = memread(m, m->pc++); if (exec(m, inst)) m->halt |= 0x1; }
/* * Find the MP Floating Pointer Structure. See the MP spec section 4.1. */ static mpfps_t biosmptable_find_mpfps(void) { mpfps_t mpfps; uint16_t addr; /* EBDA is the 1 KB addressed by the 16 bit pointer at 0x40E. */ if (!memread(PTOV(0x40E), &addr, sizeof(addr))) return (NULL); mpfps = biosmptable_search_mpfps(PTOV(addr << 4), 0x400); if (mpfps != NULL) return (mpfps); /* Check the BIOS. */ mpfps = biosmptable_search_mpfps(PTOV(0xf0000), 0x10000); if (mpfps != NULL) return (mpfps); return (NULL); }
static mpfps_t biosmptable_search_mpfps(off_t base, int length) { mpfps_t mpfps; u_int8_t *cp, sum; int ofs, idx; mpfps = malloc(sizeof(*mpfps)); if (mpfps == NULL) { warnx("unable to malloc space for MP Floating Pointer Structure"); return (NULL); } /* search on 16-byte boundaries */ for (ofs = 0; ofs < length; ofs += 16) { if (!memread(base + ofs, mpfps, sizeof(*mpfps))) break; /* compare signature, validate checksum */ if (!strncmp(mpfps->signature, MPFPS_SIG, strlen(MPFPS_SIG))) { cp = (u_int8_t *)mpfps; sum = 0; /* mpfps is 16 bytes, or one "paragraph" */ if (mpfps->length != 1) { warnx("bad mpfps length (%d)", mpfps->length); continue; } for (idx = 0; idx < mpfps->length * 16; idx++) sum += *(cp + idx); if (sum != 0) { warnx("bad mpfps checksum (%d)\n", sum); continue; } return (mpfps); } } free(mpfps); return (NULL); }
//zero-page addressing static INLINE void AM_ZPG() { EFFADDR = memread(PC++); }
//method for timing a memory access (memory read) of addresses void Test2(struct args_st *arguments, size_t *timings) { size_t slot = 0; struct perf_event_attr pe; int perf_event_fd, addr; long long cpu_cycles; int i = 0; //init the perf_event_attr before calling the syscall memset(&pe, 0, sizeof(struct perf_event_attr)); pe.type = PERF_TYPE_HARDWARE; pe.size = sizeof(struct perf_event_attr); pe.config = PERF_COUNT_HW_CPU_CYCLES; //we are going to count the number of CPU cycles pe.disabled = 1; pe.exclude_kernel = 1; pe.exclude_hv = 1; perf_event_fd = perf_event_open(&pe, 0, -1, -1, 0); if(perf_event_fd == -1) { fprintf(stderr, "Error opening leader %llx\n", pe.config); exit(EXIT_FAILURE); } //Check overhead printf("[+] Computing overhead\n"); long long overhead = 0, tmp = 0; int N = 10000; for(i = 0; i < N; i++) { ioctl(perf_event_fd, PERF_EVENT_IOC_RESET, 0); ioctl(perf_event_fd, PERF_EVENT_IOC_ENABLE, 0); ioctl(perf_event_fd, PERF_EVENT_IOC_DISABLE, 0); read(perf_event_fd, &cpu_cycles, sizeof(long long)); overhead += cpu_cycles; } overhead /= N; //Start probing addresses of the shared library from base_address to end_address //in order to test the timing variations char *probe; i = 0; printf("[+] Probing memread\n"); sched_yield(); for(probe = arguments->base_address; probe < arguments->end_address; probe += arguments->stride) { clearcache(arguments->base_address, arguments->end_address); //start counting the cpu cycles ioctl(perf_event_fd, PERF_EVENT_IOC_RESET, 0); ioctl(perf_event_fd, PERF_EVENT_IOC_ENABLE, 0); //do a memory read memread(probe); //stop counting the cpu cycles ioctl(perf_event_fd, PERF_EVENT_IOC_DISABLE, 0); //read the cpu cycles counter read(perf_event_fd, &cpu_cycles, sizeof(long long)); //store it timings[i++] = cpu_cycles - overhead; } sched_yield(); }
/** * Each thread copies memory from a remote memory buffer. */ void *buf_copy_func(void *arg) { int i, j; size_t size = 0; struct buf_copy_data *data = (struct buf_copy_data *) arg; long memread_sum = 0; int start_entry_idx = 0; bind2node_id(data->node_id); data->copy_size = 0; if (use_remote) { /* * We make sure threads on different nodes start to access memory * from different nodes. In the current implementation, a thread * always starts to copy data from the NUMA node whose node id is * right behind its own node id. For example, the order of nodes * visited by a memcpy thread on node 0 is 1, 2, 3; the order by * a thread on node 1 is 2, 3, 0; etc. */ for (i = 0; i < NUM_NODES - 1; i++) { if (data->mode == MEMCPY_PULL || data->mode == MEMCPY_R2R || data->mode == MEMREAD) { if (data->copy_entries[i].from.node_id == (data->node_id + 1) % NUM_NODES) break; } else if (data->mode == MEMCPY_PUSH || data->mode == MEMWRITE) { if (data->copy_entries[i].to.node_id == (data->node_id + 1) % NUM_NODES) break; } } assert(i != NUM_NODES - 1); start_entry_idx = i; } for (j = 0; j < NUM_COPY; j++) for (i = 0; i < NUM_NODES - 1; i++) { struct buf_copy_data::copy_entry *entry = &data->copy_entries[(i + start_entry_idx) % (NUM_NODES - 1)]; int size = entry->to.size; if (data->mode == MEMCPY_PULL) { if (use_remote) { assert(entry->to.node_id == data->node_id); assert(entry->from.node_id != data->node_id); } else { assert(entry->to.node_id == data->node_id); assert(entry->from.node_id == data->node_id); } assert(size == entry->from.size); memcpy(entry->to.addr, entry->from.addr, size); } else if (data->mode == MEMCPY_PUSH) { if (use_remote) { assert(entry->to.node_id != data->node_id); assert(entry->from.node_id == data->node_id); } else { assert(entry->to.node_id == data->node_id); assert(entry->from.node_id == data->node_id); } assert(size == entry->from.size); memcpy(entry->to.addr, entry->from.addr, size); } else if (data->mode == MEMCPY_R2R) { if (use_remote) { assert(entry->to.node_id != data->node_id); assert(entry->from.node_id != data->node_id); } else { assert(entry->to.node_id == data->node_id); assert(entry->from.node_id == data->node_id); } assert(size == entry->from.size); memcpy(entry->to.addr, entry->from.addr, size); } else if (data->mode == MEMREAD) { assert(entry->to.addr == NULL); if (use_remote) assert(entry->from.node_id != data->node_id); else assert(entry->from.node_id == data->node_id); memread_sum += memread(entry->from); } else if (data->mode == MEMWRITE) { if (use_remote) assert(entry->to.node_id != data->node_id); else assert(entry->to.node_id == data->node_id); assert(entry->from.addr == NULL); memwrite(entry->to); } data->copy_size += size; } return (void *) memread_sum; }
static uint8_t virtio_scsi_do_command(QVirtIOSCSI *vs, const uint8_t *cdb, const uint8_t *data_in, size_t data_in_len, uint8_t *data_out, size_t data_out_len, struct virtio_scsi_cmd_resp *resp_out) { QVirtQueue *vq; struct virtio_scsi_cmd_req req = { { 0 } }; struct virtio_scsi_cmd_resp resp = { .response = 0xff, .status = 0xff }; uint64_t req_addr, resp_addr, data_in_addr = 0, data_out_addr = 0; uint8_t response; uint32_t free_head; vq = vs->vq[2]; req.lun[0] = 1; /* Select LUN */ req.lun[1] = 1; /* Select target 1 */ memcpy(req.cdb, cdb, VIRTIO_SCSI_CDB_SIZE); /* XXX: Fix endian if any multi-byte field in req/resp is used */ /* Add request header */ req_addr = qvirtio_scsi_alloc(vs, sizeof(req), &req); free_head = qvirtqueue_add(vq, req_addr, sizeof(req), false, true); if (data_out_len) { data_out_addr = qvirtio_scsi_alloc(vs, data_out_len, data_out); qvirtqueue_add(vq, data_out_addr, data_out_len, false, true); } /* Add response header */ resp_addr = qvirtio_scsi_alloc(vs, sizeof(resp), &resp); qvirtqueue_add(vq, resp_addr, sizeof(resp), true, !!data_in_len); if (data_in_len) { data_in_addr = qvirtio_scsi_alloc(vs, data_in_len, data_in); qvirtqueue_add(vq, data_in_addr, data_in_len, true, false); } qvirtqueue_kick(vs->dev, vq, free_head); qvirtio_wait_queue_isr(vs->dev, vq, QVIRTIO_SCSI_TIMEOUT_US); response = readb(resp_addr + offsetof(struct virtio_scsi_cmd_resp, response)); if (resp_out) { memread(resp_addr, resp_out, sizeof(*resp_out)); } guest_free(vs->qs->alloc, req_addr); guest_free(vs->qs->alloc, resp_addr); guest_free(vs->qs->alloc, data_in_addr); guest_free(vs->qs->alloc, data_out_addr); return response; } static QVirtIOSCSI *qvirtio_scsi_pci_init(int slot) { const uint8_t test_unit_ready_cdb[VIRTIO_SCSI_CDB_SIZE] = {}; QVirtIOSCSI *vs; QVirtioPCIDevice *dev; struct virtio_scsi_cmd_resp resp; int i; vs = g_new0(QVirtIOSCSI, 1); vs->qs = qvirtio_scsi_start("-drive file=blkdebug::null-co://," "if=none,id=dr1,format=raw,file.align=4k " "-device scsi-hd,drive=dr1,lun=0,scsi-id=1"); dev = qvirtio_pci_device_find(vs->qs->pcibus, VIRTIO_ID_SCSI); vs->dev = (QVirtioDevice *)dev; g_assert(dev != NULL); g_assert_cmphex(vs->dev->device_type, ==, VIRTIO_ID_SCSI); qvirtio_pci_device_enable(dev); qvirtio_reset(vs->dev); qvirtio_set_acknowledge(vs->dev); qvirtio_set_driver(vs->dev); vs->num_queues = qvirtio_config_readl(vs->dev, 0); g_assert_cmpint(vs->num_queues, <, MAX_NUM_QUEUES); for (i = 0; i < vs->num_queues + 2; i++) { vs->vq[i] = qvirtqueue_setup(vs->dev, vs->qs->alloc, i); } /* Clear the POWER ON OCCURRED unit attention */ g_assert_cmpint(virtio_scsi_do_command(vs, test_unit_ready_cdb, NULL, 0, NULL, 0, &resp), ==, 0); g_assert_cmpint(resp.status, ==, CHECK_CONDITION); g_assert_cmpint(resp.sense[0], ==, 0x70); /* Fixed format sense buffer */ g_assert_cmpint(resp.sense[2], ==, UNIT_ATTENTION); g_assert_cmpint(resp.sense[12], ==, 0x29); /* POWER ON */ g_assert_cmpint(resp.sense[13], ==, 0x00); return vs; } /* Tests only initialization so far. TODO: Replace with functional tests */ static void pci_nop(void) { QOSState *qs; qs = qvirtio_scsi_start(NULL); qvirtio_scsi_stop(qs); }
void OptiTrackNatNetClient::decodeModelDef(const sPacket& data) { const int major = natNetVersion[0]; //const int minor = natNetVersion[1]; ModelDef model; sofa::helper::vector<PointCloudDef> pointClouds; sofa::helper::vector<RigidDef> rigids; sofa::helper::vector<SkeletonDef> skeletons; const unsigned char *ptr = data.Data.cData; const unsigned char *end = ptr + data.nDataBytes; int nDatasets = 0; memread(nDatasets,ptr,end,"nDatasets"); for(int i=0; i < nDatasets; i++) { int type = 0; memread(type,ptr,end,"type"); switch(type) { case 0: // point cloud { PointCloudDef pdef; memread(pdef.name,ptr,end,"name"); memread(pdef.nMarkers,ptr,end,"nMarkers"); if (pdef.nMarkers <= 0) pdef.markers = NULL; else { pdef.markers = new PointCloudDef::Marker[pdef.nMarkers]; for (int j=0; j<pdef.nMarkers; ++j) { memread(pdef.markers[j].name,ptr,end,"markers.name"); } } pointClouds.push_back(pdef); break; } case 1: // rigid { RigidDef rdef; if(major >= 2) memread(rdef.name,ptr,end,"rigid.name"); else rdef.name = NULL; memread(rdef.ID,ptr,end,"rigid.ID"); memread(rdef.parentID,ptr,end,"rigid.parentID"); memread(rdef.offset,ptr,end,"rigid.offset"); rigids.push_back(rdef); break; } case 2: // skeleton { SkeletonDef sdef; memread(sdef.name,ptr,end,"skeleton.name"); memread(sdef.nRigids,ptr,end,"skeleton.nRigids"); if (sdef.nRigids <= 0) sdef.rigids = NULL; else { sdef.rigids = new RigidDef[sdef.nRigids]; for (int j=0; j<sdef.nRigids; ++j) { RigidDef& rdef = sdef.rigids[j]; memread(rdef.name,ptr,end,"skeleton.rigid.name"); memread(rdef.ID,ptr,end,"skeleton.rigid.ID"); memread(rdef.parentID,ptr,end,"skeleton.rigid.parentID"); memread(rdef.offset,ptr,end,"skeleton.rigid.offset"); } } skeletons.push_back(sdef); break; } default: { serr << "decodeModelDef: unknown type " << type << sendl; } } } model.nPointClouds = pointClouds.size(); model.pointClouds = (pointClouds.size() > 0) ? &(pointClouds[0]) : NULL; model.nRigids = rigids.size(); model.rigids = (rigids.size() > 0) ? &(rigids[0]) : NULL; model.nSkeletons = skeletons.size(); model.skeletons = (skeletons.size() > 0) ? &(skeletons[0]) : NULL; // Apply scale factor if (this->scale.isSet()) { const double scale = this->scale.getValue(); for (int iR = 0; iR < model.nRigids; ++iR) { model.rigids[iR].offset *= scale; } for (int iS = 0; iS < model.nSkeletons; ++iS) { for (int iR = 0; iR < model.skeletons[iS].nRigids; ++iR) { model.skeletons[iS].rigids[iR].offset *= scale; } } } processModelDef(&model); for (int i=0; i<model.nPointClouds; ++i) { if (model.pointClouds[i].markers) delete[] model.pointClouds[i].markers; } for (int i=0; i<model.nSkeletons; ++i) { if (model.skeletons[i].rigids) delete[] model.skeletons[i].rigids; } }
static INLINE void OP_NOPR() { memread(EFFADDR); }
static uint8_t virtio_scsi_do_command(QVirtioSCSIQueues *vs, const uint8_t *cdb, const uint8_t *data_in, size_t data_in_len, uint8_t *data_out, size_t data_out_len, struct virtio_scsi_cmd_resp *resp_out) { QVirtQueue *vq; struct virtio_scsi_cmd_req req = { { 0 } }; struct virtio_scsi_cmd_resp resp = { .response = 0xff, .status = 0xff }; uint64_t req_addr, resp_addr, data_in_addr = 0, data_out_addr = 0; uint8_t response; uint32_t free_head; vq = vs->vq[2]; req.lun[0] = 1; /* Select LUN */ req.lun[1] = 1; /* Select target 1 */ memcpy(req.cdb, cdb, VIRTIO_SCSI_CDB_SIZE); /* XXX: Fix endian if any multi-byte field in req/resp is used */ /* Add request header */ req_addr = qvirtio_scsi_alloc(vs, sizeof(req), &req); free_head = qvirtqueue_add(vq, req_addr, sizeof(req), false, true); if (data_out_len) { data_out_addr = qvirtio_scsi_alloc(vs, data_out_len, data_out); qvirtqueue_add(vq, data_out_addr, data_out_len, false, true); } /* Add response header */ resp_addr = qvirtio_scsi_alloc(vs, sizeof(resp), &resp); qvirtqueue_add(vq, resp_addr, sizeof(resp), true, !!data_in_len); if (data_in_len) { data_in_addr = qvirtio_scsi_alloc(vs, data_in_len, data_in); qvirtqueue_add(vq, data_in_addr, data_in_len, true, false); } qvirtqueue_kick(vs->dev, vq, free_head); qvirtio_wait_used_elem(vs->dev, vq, free_head, NULL, QVIRTIO_SCSI_TIMEOUT_US); response = readb(resp_addr + offsetof(struct virtio_scsi_cmd_resp, response)); if (resp_out) { memread(resp_addr, resp_out, sizeof(*resp_out)); } guest_free(alloc, req_addr); guest_free(alloc, resp_addr); guest_free(alloc, data_in_addr); guest_free(alloc, data_out_addr); return response; } static QVirtioSCSIQueues *qvirtio_scsi_init(QVirtioDevice *dev) { QVirtioSCSIQueues *vs; const uint8_t test_unit_ready_cdb[VIRTIO_SCSI_CDB_SIZE] = {}; struct virtio_scsi_cmd_resp resp; int i; vs = g_new0(QVirtioSCSIQueues, 1); vs->dev = dev; vs->num_queues = qvirtio_config_readl(dev, 0); g_assert_cmpint(vs->num_queues, <, MAX_NUM_QUEUES); for (i = 0; i < vs->num_queues + 2; i++) { vs->vq[i] = qvirtqueue_setup(dev, alloc, i); } /* Clear the POWER ON OCCURRED unit attention */ g_assert_cmpint(virtio_scsi_do_command(vs, test_unit_ready_cdb, NULL, 0, NULL, 0, &resp), ==, 0); g_assert_cmpint(resp.status, ==, CHECK_CONDITION); g_assert_cmpint(resp.sense[0], ==, 0x70); /* Fixed format sense buffer */ g_assert_cmpint(resp.sense[2], ==, UNIT_ATTENTION); g_assert_cmpint(resp.sense[12], ==, 0x29); /* POWER ON */ g_assert_cmpint(resp.sense[13], ==, 0x00); return vs; }
static void v9fs_memread(P9Req *req, void *addr, size_t len) { memread(req->r_msg + req->r_off, addr, len); req->r_off += len; }
static void test_basic(QVirtioDevice *dev, QGuestAllocator *alloc, QVirtQueue *vq) { QVirtioBlkReq req; uint64_t req_addr; uint64_t capacity; uint32_t features; uint32_t free_head; uint8_t status; char *data; capacity = qvirtio_config_readq(dev, 0); g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512); features = qvirtio_get_features(dev); features = features & ~(QVIRTIO_F_BAD_FEATURE | (1u << VIRTIO_RING_F_INDIRECT_DESC) | (1u << VIRTIO_RING_F_EVENT_IDX) | (1u << VIRTIO_BLK_F_SCSI)); qvirtio_set_features(dev, features); qvirtio_set_driver_ok(dev); /* Write and read with 3 descriptor layout */ /* Write request */ req.type = VIRTIO_BLK_T_OUT; req.ioprio = 1; req.sector = 0; req.data = g_malloc0(512); strcpy(req.data, "TEST"); req_addr = virtio_blk_request(alloc, dev, &req, 512); g_free(req.data); free_head = qvirtqueue_add(vq, req_addr, 16, false, true); qvirtqueue_add(vq, req_addr + 16, 512, false, true); qvirtqueue_add(vq, req_addr + 528, 1, true, false); qvirtqueue_kick(dev, vq, free_head); qvirtio_wait_used_elem(dev, vq, free_head, NULL, QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); guest_free(alloc, req_addr); /* Read request */ req.type = VIRTIO_BLK_T_IN; req.ioprio = 1; req.sector = 0; req.data = g_malloc0(512); req_addr = virtio_blk_request(alloc, dev, &req, 512); g_free(req.data); free_head = qvirtqueue_add(vq, req_addr, 16, false, true); qvirtqueue_add(vq, req_addr + 16, 512, true, true); qvirtqueue_add(vq, req_addr + 528, 1, true, false); qvirtqueue_kick(dev, vq, free_head); qvirtio_wait_used_elem(dev, vq, free_head, NULL, QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); data = g_malloc0(512); memread(req_addr + 16, data, 512); g_assert_cmpstr(data, ==, "TEST"); g_free(data); guest_free(alloc, req_addr); if (features & (1u << VIRTIO_BLK_F_WRITE_ZEROES)) { struct virtio_blk_discard_write_zeroes dwz_hdr; void *expected; /* * WRITE_ZEROES request on the same sector of previous test where * we wrote "TEST". */ req.type = VIRTIO_BLK_T_WRITE_ZEROES; req.data = (char *) &dwz_hdr; dwz_hdr.sector = 0; dwz_hdr.num_sectors = 1; dwz_hdr.flags = 0; virtio_blk_fix_dwz_hdr(dev, &dwz_hdr); req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr)); free_head = qvirtqueue_add(vq, req_addr, 16, false, true); qvirtqueue_add(vq, req_addr + 16, sizeof(dwz_hdr), false, true); qvirtqueue_add(vq, req_addr + 16 + sizeof(dwz_hdr), 1, true, false); qvirtqueue_kick(dev, vq, free_head); qvirtio_wait_used_elem(dev, vq, free_head, NULL, QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 16 + sizeof(dwz_hdr)); g_assert_cmpint(status, ==, 0); guest_free(alloc, req_addr); /* Read request to check if the sector contains all zeroes */ req.type = VIRTIO_BLK_T_IN; req.ioprio = 1; req.sector = 0; req.data = g_malloc0(512); req_addr = virtio_blk_request(alloc, dev, &req, 512); g_free(req.data); free_head = qvirtqueue_add(vq, req_addr, 16, false, true); qvirtqueue_add(vq, req_addr + 16, 512, true, true); qvirtqueue_add(vq, req_addr + 528, 1, true, false); qvirtqueue_kick(dev, vq, free_head); qvirtio_wait_used_elem(dev, vq, free_head, NULL, QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); data = g_malloc(512); expected = g_malloc0(512); memread(req_addr + 16, data, 512); g_assert_cmpmem(data, 512, expected, 512); g_free(expected); g_free(data); guest_free(alloc, req_addr); } if (features & (1u << VIRTIO_BLK_F_DISCARD)) { struct virtio_blk_discard_write_zeroes dwz_hdr; req.type = VIRTIO_BLK_T_DISCARD; req.data = (char *) &dwz_hdr; dwz_hdr.sector = 0; dwz_hdr.num_sectors = 1; dwz_hdr.flags = 0; virtio_blk_fix_dwz_hdr(dev, &dwz_hdr); req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr)); free_head = qvirtqueue_add(vq, req_addr, 16, false, true); qvirtqueue_add(vq, req_addr + 16, sizeof(dwz_hdr), false, true); qvirtqueue_add(vq, req_addr + 16 + sizeof(dwz_hdr), 1, true, false); qvirtqueue_kick(dev, vq, free_head); qvirtio_wait_used_elem(dev, vq, free_head, NULL, QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 16 + sizeof(dwz_hdr)); g_assert_cmpint(status, ==, 0); guest_free(alloc, req_addr); } if (features & (1u << VIRTIO_F_ANY_LAYOUT)) { /* Write and read with 2 descriptor layout */ /* Write request */ req.type = VIRTIO_BLK_T_OUT; req.ioprio = 1; req.sector = 1; req.data = g_malloc0(512); strcpy(req.data, "TEST"); req_addr = virtio_blk_request(alloc, dev, &req, 512); g_free(req.data); free_head = qvirtqueue_add(vq, req_addr, 528, false, true); qvirtqueue_add(vq, req_addr + 528, 1, true, false); qvirtqueue_kick(dev, vq, free_head); qvirtio_wait_used_elem(dev, vq, free_head, NULL, QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); guest_free(alloc, req_addr); /* Read request */ req.type = VIRTIO_BLK_T_IN; req.ioprio = 1; req.sector = 1; req.data = g_malloc0(512); req_addr = virtio_blk_request(alloc, dev, &req, 512); g_free(req.data); free_head = qvirtqueue_add(vq, req_addr, 16, false, true); qvirtqueue_add(vq, req_addr + 16, 513, true, false); qvirtqueue_kick(dev, vq, free_head); qvirtio_wait_used_elem(dev, vq, free_head, NULL, QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); data = g_malloc0(512); memread(req_addr + 16, data, 512); g_assert_cmpstr(data, ==, "TEST"); g_free(data); guest_free(alloc, req_addr); }
//pop data from stack static INLINE u8 pop() { SP++; return(memread(SP | 0x100)); }
static huffman_node* read_code_table_from_memory(const unsigned char* bufin, unsigned int bufinlen, unsigned int *pindex, uint32_t *pDataBytes) { huffman_node *root = new_nonleaf_node(0, NULL, NULL); uint32_t count; /* Read the number of entries. (it is stored in network byte order). */ if(memread(bufin, bufinlen, pindex, &count, sizeof(count))) { free_huffman_tree(root); return NULL; } count = ntohl(count); /* Read the number of data bytes this encoding represents. */ if(memread(bufin, bufinlen, pindex, pDataBytes, sizeof(*pDataBytes))) { free_huffman_tree(root); return NULL; } *pDataBytes = ntohl(*pDataBytes); /* Read the entries. */ while(count-- > 0) { unsigned int curbit; unsigned char symbol; unsigned char numbits; unsigned char numbytes; unsigned char *bytes; huffman_node *p = root; if(memread(bufin, bufinlen, pindex, &symbol, sizeof(symbol))) { free_huffman_tree(root); return NULL; } if(memread(bufin, bufinlen, pindex, &numbits, sizeof(numbits))) { free_huffman_tree(root); return NULL; } numbytes = (unsigned char)numbytes_from_numbits(numbits); bytes = (unsigned char*)malloc(numbytes); if(memread(bufin, bufinlen, pindex, bytes, numbytes)) { free(bytes); free_huffman_tree(root); return NULL; } /* * Add the entry to the Huffman tree. The value * of the current bit is used switch between * zero and one child nodes in the tree. New nodes * are added as needed in the tree. */ for(curbit = 0; curbit < numbits; ++curbit) { if(get_bit(bytes, curbit)) { if(p->one == NULL) { p->one = curbit == (unsigned char)(numbits - 1) ? new_leaf_node(symbol) : new_nonleaf_node(0, NULL, NULL); p->one->parent = p; } p = p->one; } else { if(p->zero == NULL) { p->zero = curbit == (unsigned char)(numbits - 1) ? new_leaf_node(symbol) : new_nonleaf_node(0, NULL, NULL); p->zero->parent = p; } p = p->zero; } } free(bytes); } return root; }
void OptiTrackNatNetClient::decodeFrame(const sPacket& data) { const int major = natNetVersion[0]; const int minor = natNetVersion[1]; FrameData frame; int nTrackedMarkers = 0; int nOtherMarkers = 0; const unsigned char *ptr = data.Data.cData; const unsigned char *end = ptr + data.nDataBytes; memread(frame.frameNumber,ptr,end,"frameNumber"); memread(frame.nPointClouds,ptr,end,"nPointClouds"); if (frame.nPointClouds <= 0) frame.pointClouds = NULL; else { frame.pointClouds = new PointCloudData[frame.nPointClouds]; for (int iP = 0; iP < frame.nPointClouds; ++iP) { PointCloudData& pdata = frame.pointClouds[iP]; memread(pdata.name,ptr,end,"pointCloud.name"); memread(pdata.nMarkers,ptr,end,"pointCloud.nMarkers"); nTrackedMarkers += pdata.nMarkers; memread(pdata.markersPos, pdata.nMarkers, ptr, end, "pointCloud.markersPos"); } } memread(frame.nOtherMarkers,ptr,end,"nOtherMarkers"); nOtherMarkers += frame.nOtherMarkers; memread(frame.otherMarkersPos, frame.nOtherMarkers, ptr,end,"otherMarkersPos"); memread(frame.nRigids,ptr,end,"nRigids"); if (frame.nRigids <= 0) frame.rigids = NULL; else { frame.rigids = new RigidData[frame.nRigids]; for (int iR = 0; iR < frame.nRigids; ++iR) { RigidData& rdata = frame.rigids[iR]; memread(rdata.ID,ptr,end,"rigid.ID"); memread(rdata.pos,ptr,end,"rigid.pos"); memread(rdata.rot,ptr,end,"rigid.rot"); memread(rdata.nMarkers, ptr,end,"rigid.nMarkers"); nTrackedMarkers += rdata.nMarkers; memread(rdata.markersPos, rdata.nMarkers, ptr,end,"rigid.markersPos"); if (major < 2) { rdata.markersID = NULL; rdata.markersSize = NULL; rdata.meanError = -1.0f; } else { memread(rdata.markersID, rdata.nMarkers, ptr,end,"rigid.markersID"); memread(rdata.markersSize, rdata.nMarkers, ptr,end,"rigid.markersSize"); memread(rdata.meanError, ptr,end,"rigid.meanError"); } } } if (major <= 1 || (major == 2 && minor <= 0)) { frame.nSkeletons = 0; frame.skeletons = NULL; } else { memread(frame.nSkeletons,ptr,end,"nSkeletons"); if (frame.nSkeletons <= 0) frame.skeletons = NULL; else { frame.skeletons = new SkeletonData[frame.nSkeletons]; for (int iS = 0; iS < frame.nSkeletons; ++iS) { SkeletonData& sdata = frame.skeletons[iS]; memread(sdata.ID,ptr,end,"skeleton.ID"); memread(sdata.nRigids, ptr,end,"skeleton.nRigids"); if (sdata.nRigids <= 0) sdata.rigids = NULL; else { sdata.rigids = new RigidData[sdata.nRigids]; for (int iR = 0; iR < sdata.nRigids; ++iR) { RigidData& rdata = sdata.rigids[iR]; memread(rdata.ID,ptr,end,"rigid.ID"); memread(rdata.pos,ptr,end,"rigid.pos"); memread(rdata.rot,ptr,end,"rigid.rot"); memread(rdata.nMarkers, ptr,end,"rigid.nMarkers"); nTrackedMarkers += rdata.nMarkers; memread(rdata.markersPos, rdata.nMarkers, ptr,end,"rigid.markersPos"); memread(rdata.markersID, rdata.nMarkers, ptr,end,"rigid.markersID"); memread(rdata.markersSize, rdata.nMarkers, ptr,end,"rigid.markersSize"); memread(rdata.meanError, ptr,end,"rigid.meanError"); } } } } } memread(frame.latency, ptr,end,"latency"); if (ptr != end) { // serr << "decodeFrame: extra " << end-ptr << " bytes at end of message" << sendl; } // Copy markers to stored Data { sofa::helper::WriteAccessor<sofa::core::objectmodel::Data<sofa::helper::vector<sofa::defaulttype::Vec3f> > > markers = this->otherMarkers; markers.resize(nOtherMarkers); int m0 = 0; for (int i = 0; i < frame.nOtherMarkers; ++i) markers[m0+i] = frame.otherMarkersPos[i]; frame.otherMarkersPos = &(markers[m0]); m0 += frame.nOtherMarkers; } { sofa::helper::WriteAccessor<sofa::core::objectmodel::Data<sofa::helper::vector<sofa::defaulttype::Vec3f> > > markers = this->trackedMarkers; markers.resize(nTrackedMarkers); int m0 = 0; for (int iP = 0; iP < frame.nPointClouds; ++iP) { for (int i = 0; i < frame.pointClouds[iP].nMarkers; ++i) markers[m0+i] = frame.pointClouds[iP].markersPos[i]; frame.pointClouds[iP].markersPos = &(markers[m0]); m0 += frame.pointClouds[iP].nMarkers; } for (int iR = 0; iR < frame.nRigids; ++iR) { for (int i = 0; i < frame.rigids[iR].nMarkers; ++i) markers[m0+i] = frame.rigids[iR].markersPos[i]; frame.rigids[iR].markersPos = &(markers[m0]); m0 += frame.rigids[iR].nMarkers; } for (int iS = 0; iS < frame.nSkeletons; ++iS) { for (int iR = 0; iR < frame.skeletons[iS].nRigids; ++iR) { for (int i = 0; i < frame.skeletons[iS].rigids[iR].nMarkers; ++i) markers[m0+i] = frame.skeletons[iS].rigids[iR].markersPos[i]; frame.skeletons[iS].rigids[iR].markersPos = &(markers[m0]); m0 += frame.skeletons[iS].rigids[iR].nMarkers; } } } // Apply scale factor if (this->scale.isSet()) { const double scale = this->scale.getValue(); { sofa::helper::WriteAccessor<sofa::core::objectmodel::Data<sofa::helper::vector<sofa::defaulttype::Vec3f> > > markers = this->trackedMarkers; for (unsigned int i=0; i<markers.size(); ++i) markers[i] *= scale; } { sofa::helper::WriteAccessor<sofa::core::objectmodel::Data<sofa::helper::vector<sofa::defaulttype::Vec3f> > > markers = this->otherMarkers; for (unsigned int i=0; i<markers.size(); ++i) markers[i] *= scale; } for (int iR = 0; iR < frame.nRigids; ++iR) frame.rigids[iR].pos *= scale; for (int iS = 0; iS < frame.nSkeletons; ++iS) for (int iR = 0; iR < frame.skeletons[iS].nRigids; ++iR) frame.skeletons[iS].rigids[iR].pos *= scale; } processFrame(&frame); if (frame.pointClouds) { delete[] frame.pointClouds; } if (frame.rigids) { delete[] frame.rigids; } if (frame.skeletons) { for (int i=0; i<frame.nSkeletons; ++i) { if (frame.skeletons[i].rigids) delete[] frame.skeletons[i].rigids; } delete[] frame.skeletons; } }