static int process_memory_info(QemuHttpConnection *conn) { int l1, l2; int st; uint32_t pgd, pde, pte; int buf_size; char buf[128]; CPUState *env = first_cpu; QList *res = qlist_new(); if (!(env->cr[0] & CR0_PG_MASK)) goto out; if (env->cr[4] & CR4_PAE_MASK) goto out; pgd = env->cr[3] & ~0xfff; for (l1 = 0; l1 < 1024; ++l1) { cpu_physical_memory_read(pgd + l1 * 4, &pde, 4); pde = le32_to_cpu(pde); if (pde & PG_PRESENT_MASK) { /* 4kb pages */ if (!(pde & PG_PSE_MASK) || !(env->cr[4] & CR4_PSE_MASK)) { for (l2 = 0; l2 < 1024; ++l2) { cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4); pte = le32_to_cpu(pte); if (pte & PG_PRESENT_MASK) { buf_size = snprintf(buf, sizeof(buf), "%02X (%02X) (%c%c%c%c%c%c%c%c%c)", (l1 << 22) + (l2 << 12), (pde & ~0xfff) + l2 * 4, pte & PG_NX_MASK ? 'X' : '-', pte & PG_GLOBAL_MASK ? 'G' : '-', pte & PG_PSE_MASK ? 'P' : '-', pte & PG_DIRTY_MASK ? 'D' : '-', pte & PG_ACCESSED_MASK ? 'A' : '-', pte & PG_PCD_MASK ? 'C' : '-', pte & PG_PWT_MASK ? 'T' : '-', pte & PG_USER_MASK ? 'U' : '-', pte & PG_RW_MASK ? 'W' : '-'); buf[buf_size] = 0; qlist_append(res, qstring_from_str(buf)); } } } } } out: st = respond_with_json(conn, QOBJECT(res)); qobject_decref(QOBJECT(res)); return st; }
static inline void pxa2xx_dma_descriptor_fetch( PXA2xxDMAState *s, int ch) { uint32_t desc[4]; hwaddr daddr = s->chan[ch].descr & ~0xf; if ((s->chan[ch].descr & DDADR_BREN) && (s->chan[ch].state & DCSR_CMPST)) daddr += 32; cpu_physical_memory_read(daddr, desc, 16); s->chan[ch].descr = desc[DDADR]; s->chan[ch].src = desc[DSADR]; s->chan[ch].dest = desc[DTADR]; s->chan[ch].cmd = desc[DCMD]; if (s->chan[ch].cmd & DCMD_FLOWSRC) s->chan[ch].src &= ~3; if (s->chan[ch].cmd & DCMD_FLOWTRG) s->chan[ch].dest &= ~3; if (s->chan[ch].cmd & (DCMD_CMPEN | DCMD_FLYBYS | DCMD_FLYBYT)) printf("%s: unsupported mode in channel %i\n", __FUNCTION__, ch); if (s->chan[ch].cmd & DCMD_STARTIRQEN) s->chan[ch].state |= DCSR_STARTINTR; }
/* Load new Frame Descriptors from DMA */ static void pxa2xx_descriptor_load(PXA2xxLCDState *s) { PXAFrameDescriptor desc; hwaddr descptr; int i; for (i = 0; i < PXA_LCDDMA_CHANS; i ++) { s->dma_ch[i].source = 0; if (!s->dma_ch[i].up) continue; if (s->dma_ch[i].branch & FBR_BRA) { descptr = s->dma_ch[i].branch & FBR_SRCADDR; if (s->dma_ch[i].branch & FBR_BINT) pxa2xx_dma_bs_set(s, i); s->dma_ch[i].branch &= ~FBR_BRA; } else descptr = s->dma_ch[i].descriptor; if (!((descptr >= PXA2XX_SDRAM_BASE && descptr + sizeof(desc) <= PXA2XX_SDRAM_BASE + ram_size) || (descptr >= PXA2XX_INTERNAL_BASE && descptr + sizeof(desc) <= PXA2XX_INTERNAL_BASE + PXA2XX_INTERNAL_SIZE))) { continue; } cpu_physical_memory_read(descptr, &desc, sizeof(desc)); s->dma_ch[i].descriptor = le32_to_cpu(desc.fdaddr); s->dma_ch[i].source = le32_to_cpu(desc.fsaddr); s->dma_ch[i].id = le32_to_cpu(desc.fidr); s->dma_ch[i].command = le32_to_cpu(desc.ldcmd); } }
/* Load new Frame Descriptors from DMA */ static void pxa2xx_descriptor_load(PXA2xxLCDState *s) { PXAFrameDescriptor desc; target_phys_addr_t descptr; int i; for (i = 0; i < PXA_LCDDMA_CHANS; i ++) { s->dma_ch[i].source = 0; if (!s->dma_ch[i].up) continue; if (s->dma_ch[i].branch & FBR_BRA) { descptr = s->dma_ch[i].branch & FBR_SRCADDR; if (s->dma_ch[i].branch & FBR_BINT) pxa2xx_dma_bs_set(s, i); s->dma_ch[i].branch &= ~FBR_BRA; } else descptr = s->dma_ch[i].descriptor; if (!(descptr >= PXA2XX_SDRAM_BASE && descptr + sizeof(desc) <= PXA2XX_SDRAM_BASE + ram_size)) continue; cpu_physical_memory_read(descptr, (void *)&desc, sizeof(desc)); s->dma_ch[i].descriptor = tswap32(desc.fdaddr); s->dma_ch[i].source = tswap32(desc.fsaddr); s->dma_ch[i].id = tswap32(desc.fidr); s->dma_ch[i].command = tswap32(desc.ldcmd); } }
/******************************************************************* UT_array* memfrs_scan_phymem( uint64_t start_addr, uint64_t end_addr, const char* pattern ) Scan for specific pattern in the VM's physical memory INPUT: uint64_t start_addr, The start address uint64_t end_addr, the end address const char* pattern pattern to search, support only ascii string OUTPUT: UT_array*, An UT_array that contains the address of found pattern *******************************************************************/ UT_array* memfrs_scan_phymem( uint64_t start_addr, uint64_t end_addr, const char* pattern , int length ) { uint64_t i; UT_array *match_addr; if(start_addr >= end_addr) { printf("end_addr is not less than start_addr\n"); return NULL; } uint8_t* buf = (uint8_t*)malloc(length); if(buf == NULL) { printf("Cannot allocate memory for memfrs_scan_phymem()\n"); return NULL; } utarray_new( match_addr, &adr_icd); printf("Scan for pattern %s\n", pattern); for(i = start_addr; i < end_addr-length+1; i++) { cpu_physical_memory_read(i, buf, length); if(memcmp(buf, pattern, length)==0) { printf("pattern found %lx\n", i); utarray_push_back(match_addr, &i); } } return match_addr; }
static uint32_t mmc_fifo_pop(S5pc1xxMMCState *s, uint32_t pos) { uint32_t value = 0; cpu_physical_memory_read(s->sysad + pos, (uint8_t *)(&value), 4); return value; }
static void channel_load_g(struct fs_dma_ctrl *ctrl, int c) { target_phys_addr_t addr = channel_reg(ctrl, c, RW_GROUP); /* Load and decode. FIXME: handle endianness. */ cpu_physical_memory_read (addr, (void *) &ctrl->channels[c].current_g, sizeof ctrl->channels[c].current_g); }
static void channel_load_d(struct fs_dma_ctrl *ctrl, int c) { hwaddr addr = channel_reg(ctrl, c, RW_SAVED_DATA); /* Load and decode. FIXME: handle endianness. */ D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr)); cpu_physical_memory_read (addr, (void *) &ctrl->channels[c].current_d, sizeof ctrl->channels[c].current_d); D(dump_d(c, &ctrl->channels[c].current_d)); ctrl->channels[c].regs[RW_DATA] = addr; }
/* Get an array of words from main memory */ static inline int get_words(OHCIState *ohci, uint32_t addr, uint16_t *buf, int num) { int i; addr += ohci->localmem_base; for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { cpu_physical_memory_read(addr, buf, sizeof(*buf)); *buf = le16_to_cpu(*buf); } return 1; }
static int pdc_start_transfer(void *opaque, target_phys_addr_t tx, unsigned int *tx_len, target_phys_addr_t rx, unsigned int *rx_len, int last_transfer) { SPIState *s = opaque; unsigned int i; unsigned int tlen; DPRINTF("pdc: start transfer, last trans %d\n", last_transfer); #if 1 if (tx_len == NULL) { DPRINTF("ignore only read request\n"); return -1; } #endif tlen = *tx_len; if (rx_len != NULL) { tlen = *rx_len > tlen ? *rx_len : tlen; } /* suppose that transfer 8 bit, TODO: fix this, extract right value from csr */ s->spi_control->set_chipselect(s->spi_control->opaque, 1); for (i = 0; i < tlen; ++i) { DPRINTF("pdc: transfering\n"); uint8_t tmp = 0; if (tx_len != NULL && *tx_len > 0) { cpu_physical_memory_read(tx, &tmp, 1); ++tx; --*tx_len; } tmp = s->spi_control->txrx_callback(s->spi_control->opaque, tmp, 8); s->rdr = tmp; if (rx_len != NULL && *rx_len > 0) { cpu_physical_memory_write(rx, &tmp, 1); ++rx; --*rx_len; } } if (last_transfer) { s->spi_control->set_chipselect(s->spi_control->opaque, 0); } return 0; }
static void channel_load_d(struct fs_dma_ctrl *ctrl, int c) { target_phys_addr_t addr = channel_reg(ctrl, c, RW_SAVED_DATA); /* Load and decode. FIXME: handle endianness. */ D(printf("%s addr=%x\n", __func__, addr)); cpu_physical_memory_read (addr, (void *) &ctrl->channels[c].current_d, sizeof ctrl->channels[c].current_d); D(dump_d(c, &ctrl->channels[c].current_d)); ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = (uint32_t)ctrl->channels[c].current_d.buf; }
static void bcm2708_vc_fb(struct bcm2708_vc *_vc, int _chan, uint32_t _msg) { target_phys_addr_t dma = _msg &~ (0xc << 28); cpu_physical_memory_read(dma, &_vc->fb, sizeof(_vc->fb)); // TODO: much better calculations. int pitch; switch(_vc->fb.bpp) { case 8: pitch = _vc->fb.xres; _vc->fb_bpp = BPP_8; break; case 16: pitch = _vc->fb.xres << 1; _vc->fb_bpp = BPP_16_565; break; case 32: pitch = _vc->fb.xres << 2; _vc->fb_bpp = BPP_32; break; default: pitch = _vc->fb.xres << 1; _vc->fb_bpp = BPP_16_565; break; } target_phys_addr_t fbsz = pitch*_vc->fb.yres; target_phys_addr_t addr = 128*1024*1024; // Currently hard-coded in kernel? _vc->fb_invalidate = 1; _vc->fb.pitch = pitch; _vc->fb.base = addr; _vc->fb.screen_size = fbsz; qemu_console_resize(_vc->disp, _vc->fb.xres, _vc->fb.yres); #ifdef DEBUG_FB printf("fb mapped to 0x%08x (%p).\n", addr, _vc->disp); #endif cpu_physical_memory_write(dma, &_vc->fb, sizeof(_vc->fb)); bcm2708_vc_send(_vc, _chan, 0); }
int kvm_ia64_copy_from_GFW_to_nvram() { struct nvram_save_addr nvram_addr_buf; uint8_t *nvram_buf; unsigned long nvram_fd; unsigned long type = WRITE_TO_NVRAM; int ret = -1; nvram_buf = malloc(NVRAM_SIZE); if (!nvram_buf) goto out_free; cpu_physical_memory_read(NVRAM_START, (uint8_t *)&nvram_addr_buf, sizeof(struct nvram_save_addr)); if (nvram_addr_buf.signature != NVRAM_VALID_SIG) { goto out_free; } cpu_physical_memory_read(nvram_addr_buf.addr, nvram_buf, NVRAM_SIZE); nvram_fd = kvm_ia64_nvram_init(type); if (nvram_fd == -1) goto out; lseek(nvram_fd, 0, SEEK_SET); if (write(nvram_fd, nvram_buf, NVRAM_SIZE) != NVRAM_SIZE) goto out; ret = 0; out: close(nvram_fd); out_free: free(nvram_buf); return ret; }
static void apic_sync_vapic(APICCommonState *s, int sync_type) { VAPICState vapic_state; //size_t length; //off_t start; int vector; if (!s->vapic_paddr) { return; } if (sync_type & SYNC_FROM_VAPIC) { cpu_physical_memory_read(NULL, s->vapic_paddr, &vapic_state, sizeof(vapic_state)); s->tpr = vapic_state.tpr; } if (sync_type & (SYNC_TO_VAPIC | SYNC_ISR_IRR_TO_VAPIC)) { //start = offsetof(VAPICState, isr); //length = offsetof(VAPICState, enabled) - offsetof(VAPICState, isr); if (sync_type & SYNC_TO_VAPIC) { assert(qemu_cpu_is_self(CPU(s->cpu))); vapic_state.tpr = s->tpr; vapic_state.enabled = 1; //start = 0; //length = sizeof(VAPICState); } vector = get_highest_priority_int(s->isr); if (vector < 0) { vector = 0; } vapic_state.isr = vector & 0xf0; vapic_state.zero = 0; vector = get_highest_priority_int(s->irr); if (vector < 0) { vector = 0; } vapic_state.irr = vector & 0xff; //cpu_physical_memory_write_rom(&address_space_memory, // s->vapic_paddr + start, // ((void *)&vapic_state) + start, length); // FIXME qq } }
static void channel_load_c(struct fs_dma_ctrl *ctrl, int c) { target_phys_addr_t addr = channel_reg(ctrl, c, RW_GROUP_DOWN); /* Load and decode. FIXME: handle endianness. */ cpu_physical_memory_read (addr, (void *) &ctrl->channels[c].current_c, sizeof ctrl->channels[c].current_c); D(dump_c(c, &ctrl->channels[c].current_c)); /* I guess this should update the current pos. */ ctrl->channels[c].regs[RW_SAVED_DATA] = (uint32_t)ctrl->channels[c].current_c.saved_data; ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = (uint32_t)ctrl->channels[c].current_c.saved_data_buf; }
static int goldfish_mmc_bdrv_write(struct goldfish_mmc_state *s, int64_t sector_number, target_phys_addr_t dst_address, int num_sectors) { int ret; while (num_sectors > 0) { cpu_physical_memory_read(dst_address, s->buf, 512); ret = bdrv_write(s->bs, sector_number, s->buf, 1); if (ret < 0) return ret; dst_address += 512; num_sectors -= 1; sector_number += 1; } return 0; }
void do_show_memory_taint_map(struct Monitor *mon, const struct QDict *qdict) { uint64_t target_addr = qdict_get_int(qdict, "addr"); uint64_t target_length = qdict_get_int(qdict, "len"); int i; uint8_t* buf = (uint8_t*)malloc(target_length); cpu_physical_memory_read(target_addr, buf, target_length); if(buf == NULL){ monitor_printf(mon, "Cannot allocate memory for do_show_memory_taint_map()\n"); return; } monitor_printf(mon, "Taint addr %"PRIx64" length %ld\n", target_addr, target_length); for(i = 0 ; i < target_length ; i++) { monitor_printf( mon, "%02x|%02x, ", buf[i], dift_get_memory_dirty(target_addr + i)); if((i & 0xf) == 0xf) monitor_printf(mon, "\n"); } }
static int goldfish_mmc_bdrv_write(struct goldfish_mmc_state *s, int64_t sector_number, hwaddr dst_address, int num_sectors) { int ret; int printData = matchMeInPidTid(cpu_single_env); while (num_sectors > 0) { cpu_physical_memory_read(dst_address, s->buf, 512, printData, "goldfish_mmc_bdrv_write"); ret = bdrv_write(s->bs, sector_number, s->buf, 1); if (ret < 0) return ret; dst_address += 512; num_sectors -= 1; sector_number += 1; } return 0; }
static void ac97_out_cb(void *opaque, int free_b) { MilkymistAC97State *s = opaque; uint8_t buf[4096]; uint32_t remaining = s->regs[R_D_REMAINING]; int temp = audio_MIN(remaining, free_b); uint32_t addr = s->regs[R_D_ADDR]; int transferred = 0; trace_milkymist_ac97_out_cb(free_b, remaining); /* prevent from raising an IRQ */ if (temp == 0) { return; } while (temp) { int copied, to_copy; to_copy = audio_MIN(temp, sizeof(buf)); cpu_physical_memory_read(addr, buf, to_copy); copied = AUD_write(s->voice_out, buf, to_copy); if (!copied) { break; } temp -= copied; addr += copied; transferred += copied; } trace_milkymist_ac97_out_cb_transferred(transferred); s->regs[R_D_ADDR] = addr; s->regs[R_D_REMAINING] -= transferred; if ((s->regs[R_D_CTRL] & CTRL_EN) && (s->regs[R_D_REMAINING] == 0)) { trace_milkymist_ac97_pulse_irq_dmar(); qemu_irq_pulse(s->dmar_irq); } }
static void pxa2xx_dma_run(PXA2xxDMAState *s) { int c, srcinc, destinc; uint32_t n, size; uint32_t width; uint32_t length; uint8_t buffer[32]; PXA2xxDMAChannel *ch; if (s->running ++) return; while (s->running) { s->running = 1; for (c = 0; c < s->channels; c ++) { ch = &s->chan[c]; while ((ch->state & DCSR_RUN) && !(ch->state & DCSR_STOPINTR)) { /* Test for pending requests */ if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && !ch->request) break; length = ch->cmd & DCMD_LEN; size = DCMD_SIZE(ch->cmd); width = DCMD_WIDTH(ch->cmd); srcinc = (ch->cmd & DCMD_INCSRCADDR) ? width : 0; destinc = (ch->cmd & DCMD_INCTRGADDR) ? width : 0; while (length) { size = MIN(length, size); for (n = 0; n < size; n += width) { cpu_physical_memory_read(ch->src, buffer + n, width); ch->src += srcinc; } for (n = 0; n < size; n += width) { cpu_physical_memory_write(ch->dest, buffer + n, width); ch->dest += destinc; } length -= size; if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && !ch->request) { ch->state |= DCSR_EORINT; if (ch->state & DCSR_EORSTOPEN) ch->state |= DCSR_STOPINTR; if ((ch->state & DCSR_EORJMPEN) && !(ch->state & DCSR_NODESCFETCH)) pxa2xx_dma_descriptor_fetch(s, c); break; } } ch->cmd = (ch->cmd & ~DCMD_LEN) | length; /* Is the transfer complete now? */ if (!length) { if (ch->cmd & DCMD_ENDIRQEN) ch->state |= DCSR_ENDINTR; if ((ch->state & DCSR_NODESCFETCH) || (ch->descr & DDADR_STOP) || (ch->state & DCSR_EORSTOPEN)) { ch->state |= DCSR_STOPINTR; ch->state &= ~DCSR_RUN; break; } ch->state |= DCSR_STOPINTR; break; } } } s->running --; } }
static void pipe_dev_write(void *opaque, target_phys_addr_t offset, uint32_t value) { PipeDevice *s = (PipeDevice *)opaque; switch (offset) { case PIPE_REG_COMMAND: DR("%s: command=%d (0x%x)", __FUNCTION__, value, value); pipeDevice_doCommand(s, value); break; case PIPE_REG_SIZE: DR("%s: size=%d (0x%x)", __FUNCTION__, value, value); s->size = value; break; case PIPE_REG_ADDRESS: DR("%s: address=%d (0x%x)", __FUNCTION__, value, value); s->address = value; break; case PIPE_REG_CHANNEL: DR("%s: channel=%d (0x%x)", __FUNCTION__, value, value); s->channel = value; break; case PIPE_REG_PARAMS_ADDR_HIGH: s->params_addr = (s->params_addr & ~(0xFFFFFFFFULL << 32) ) | ((uint64_t)value << 32); break; case PIPE_REG_PARAMS_ADDR_LOW: s->params_addr = (s->params_addr & ~(0xFFFFFFFFULL) ) | value; break; case PIPE_REG_ACCESS_PARAMS: { struct access_params aps; uint32_t cmd; if (s->params_addr == 0) break; cpu_physical_memory_read(s->params_addr, (void*)&aps, sizeof(struct access_params)); s->channel = aps.channel; s->size = aps.size; s->address = aps.address; cmd = aps.cmd; if ((cmd != PIPE_CMD_READ_BUFFER) && (cmd != PIPE_CMD_WRITE_BUFFER)) break; pipeDevice_doCommand(s, cmd); aps.result = s->status; cpu_physical_memory_write(s->params_addr, (void*)&aps, sizeof(struct access_params)); } break; default: D("%s: offset=%d (0x%x) value=%d (0x%x)\n", __FUNCTION__, offset, offset, value, value); break; } }
static #endif uint32_t nand_dev_do_cmd(nand_dev_controller_state *s, uint32_t cmd) { uint32_t size; uint64_t addr; nand_dev *dev; if (cmd == NAND_CMD_WRITE_BATCH || cmd == NAND_CMD_READ_BATCH || cmd == NAND_CMD_ERASE_BATCH) { struct batch_data bd; uint64_t bd_addr = ((uint64_t)s->batch_addr_high << 32) | s->batch_addr_low; cpu_physical_memory_read(bd_addr, (void*)&bd, sizeof(struct batch_data)); s->dev = bd.dev; s->addr_low = bd.addr_low; s->addr_high = bd.addr_high; s->transfer_size = bd.transfer_size; s->data = bd.data; } addr = s->addr_low | ((uint64_t)s->addr_high << 32); size = s->transfer_size; if(s->dev >= nand_dev_count) return 0; dev = nand_devs + s->dev; switch(cmd) { case NAND_CMD_GET_DEV_NAME: if(size > dev->devname_len) size = dev->devname_len; #ifdef TARGET_I386 if (kvm_enabled()) cpu_synchronize_state(cpu_single_env, 0); #endif cpu_memory_rw_debug(cpu_single_env, s->data, (uint8_t*)dev->devname, size, 1); return size; case NAND_CMD_READ_BATCH: case NAND_CMD_READ: if(addr >= dev->max_size) return 0; if(size > dev->max_size - addr) size = dev->max_size - addr; if(dev->fd >= 0) return nand_dev_read_file(dev, s->data, addr, size); #ifdef TARGET_I386 if (kvm_enabled()) cpu_synchronize_state(cpu_single_env, 0); #endif cpu_memory_rw_debug(cpu_single_env,s->data, &dev->data[addr], size, 1); return size; case NAND_CMD_WRITE_BATCH: case NAND_CMD_WRITE: if(dev->flags & NAND_DEV_FLAG_READ_ONLY) return 0; if(addr >= dev->max_size) return 0; if(size > dev->max_size - addr) size = dev->max_size - addr; if(dev->fd >= 0) return nand_dev_write_file(dev, s->data, addr, size); #ifdef TARGET_I386 if (kvm_enabled()) cpu_synchronize_state(cpu_single_env, 0); #endif cpu_memory_rw_debug(cpu_single_env,s->data, &dev->data[addr], size, 0); return size; case NAND_CMD_ERASE_BATCH: case NAND_CMD_ERASE: if(dev->flags & NAND_DEV_FLAG_READ_ONLY) return 0; if(addr >= dev->max_size) return 0; if(size > dev->max_size - addr) size = dev->max_size - addr; if(dev->fd >= 0) return nand_dev_erase_file(dev, addr, size); memset(&dev->data[addr], 0xff, size); return size; case NAND_CMD_BLOCK_BAD_GET: // no bad block support return 0; case NAND_CMD_BLOCK_BAD_SET: if(dev->flags & NAND_DEV_FLAG_READ_ONLY) return 0; return 0; default: cpu_abort(cpu_single_env, "nand_dev_do_cmd: Bad command %x\n", cmd); return 0; } }
void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3) { uint64_t addr = env->regs[r1]; uint64_t subcode = env->regs[r3]; IplParameterBlock *iplb; if (env->psw.mask & PSW_MASK_PSTATE) { program_interrupt(env, PGM_PRIVILEGED, ILEN_LATER_INC); return; } if ((subcode & ~0x0ffffULL) || (subcode > 6)) { program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER_INC); return; } switch (subcode) { case 0: modified_clear_reset(s390_env_get_cpu(env)); if (tcg_enabled()) { cpu_loop_exit(CPU(s390_env_get_cpu(env))); } break; case 1: load_normal_reset(s390_env_get_cpu(env)); if (tcg_enabled()) { cpu_loop_exit(CPU(s390_env_get_cpu(env))); } break; case 3: s390_reipl_request(); if (tcg_enabled()) { cpu_loop_exit(CPU(s390_env_get_cpu(env))); } break; case 5: if ((r1 & 1) || (addr & 0x0fffULL)) { program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER_INC); return; } if (!address_space_access_valid(&address_space_memory, addr, sizeof(IplParameterBlock), false)) { program_interrupt(env, PGM_ADDRESSING, ILEN_LATER_INC); return; } iplb = g_malloc0(sizeof(struct IplParameterBlock)); cpu_physical_memory_read(addr, iplb, sizeof(struct IplParameterBlock)); if (!s390_ipl_update_diag308(iplb)) { env->regs[r1 + 1] = DIAG_308_RC_OK; } else { env->regs[r1 + 1] = DIAG_308_RC_INVALID; } g_free(iplb); return; case 6: if ((r1 & 1) || (addr & 0x0fffULL)) { program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER_INC); return; } if (!address_space_access_valid(&address_space_memory, addr, sizeof(IplParameterBlock), true)) { program_interrupt(env, PGM_ADDRESSING, ILEN_LATER_INC); return; } iplb = s390_ipl_get_iplb(); if (iplb) { cpu_physical_memory_write(addr, iplb, sizeof(struct IplParameterBlock)); env->regs[r1 + 1] = DIAG_308_RC_OK; } else { env->regs[r1 + 1] = DIAG_308_RC_NO_CONF; } return; default: hw_error("Unhandled diag308 subcode %" PRIx64, subcode); break; } }
static void qtest_process_command(CharDriverState *chr, gchar **words) { const gchar *command; g_assert(words); command = words[0]; if (qtest_log_fp) { qemu_timeval tv; int i; qtest_get_time(&tv); fprintf(qtest_log_fp, "[R +" FMT_timeval "]", (long) tv.tv_sec, (long) tv.tv_usec); for (i = 0; words[i]; i++) { fprintf(qtest_log_fp, " %s", words[i]); } fprintf(qtest_log_fp, "\n"); } g_assert(command); if (strcmp(words[0], "irq_intercept_out") == 0 || strcmp(words[0], "irq_intercept_in") == 0) { DeviceState *dev; g_assert(words[1]); dev = DEVICE(object_resolve_path(words[1], NULL)); if (!dev) { qtest_send_prefix(chr); qtest_send(chr, "FAIL Unknown device\n"); return; } if (irq_intercept_dev) { qtest_send_prefix(chr); if (irq_intercept_dev != dev) { qtest_send(chr, "FAIL IRQ intercept already enabled\n"); } else { qtest_send(chr, "OK\n"); } return; } if (words[0][14] == 'o') { qemu_irq_intercept_out(&dev->gpio_out, qtest_irq_handler, dev->num_gpio_out); } else { qemu_irq_intercept_in(dev->gpio_in, qtest_irq_handler, dev->num_gpio_in); } irq_intercept_dev = dev; qtest_send_prefix(chr); qtest_send(chr, "OK\n"); } else if (strcmp(words[0], "outb") == 0 || strcmp(words[0], "outw") == 0 || strcmp(words[0], "outl") == 0) { uint16_t addr; uint32_t value; g_assert(words[1] && words[2]); addr = strtoul(words[1], NULL, 0); value = strtoul(words[2], NULL, 0); if (words[0][3] == 'b') { cpu_outb(addr, value); } else if (words[0][3] == 'w') { cpu_outw(addr, value); } else if (words[0][3] == 'l') { cpu_outl(addr, value); } qtest_send_prefix(chr); qtest_send(chr, "OK\n"); } else if (strcmp(words[0], "inb") == 0 || strcmp(words[0], "inw") == 0 || strcmp(words[0], "inl") == 0) { uint16_t addr; uint32_t value = -1U; g_assert(words[1]); addr = strtoul(words[1], NULL, 0); if (words[0][2] == 'b') { value = cpu_inb(addr); } else if (words[0][2] == 'w') { value = cpu_inw(addr); } else if (words[0][2] == 'l') { value = cpu_inl(addr); } qtest_send_prefix(chr); qtest_send(chr, "OK 0x%04x\n", value); } else if (strcmp(words[0], "writeb") == 0 || strcmp(words[0], "writew") == 0 || strcmp(words[0], "writel") == 0 || strcmp(words[0], "writeq") == 0) { uint64_t addr; uint64_t value; g_assert(words[1] && words[2]); addr = strtoull(words[1], NULL, 0); value = strtoull(words[2], NULL, 0); if (words[0][5] == 'b') { uint8_t data = value; cpu_physical_memory_write(addr, &data, 1); } else if (words[0][5] == 'w') { uint16_t data = value; tswap16s(&data); cpu_physical_memory_write(addr, &data, 2); } else if (words[0][5] == 'l') { uint32_t data = value; tswap32s(&data); cpu_physical_memory_write(addr, &data, 4); } else if (words[0][5] == 'q') { uint64_t data = value; tswap64s(&data); cpu_physical_memory_write(addr, &data, 8); } qtest_send_prefix(chr); qtest_send(chr, "OK\n"); } else if (strcmp(words[0], "readb") == 0 || strcmp(words[0], "readw") == 0 || strcmp(words[0], "readl") == 0 || strcmp(words[0], "readq") == 0) { uint64_t addr; uint64_t value = UINT64_C(-1); g_assert(words[1]); addr = strtoull(words[1], NULL, 0); if (words[0][4] == 'b') { uint8_t data; cpu_physical_memory_read(addr, &data, 1); value = data; } else if (words[0][4] == 'w') { uint16_t data; cpu_physical_memory_read(addr, &data, 2); value = tswap16(data); } else if (words[0][4] == 'l') { uint32_t data; cpu_physical_memory_read(addr, &data, 4); value = tswap32(data); } else if (words[0][4] == 'q') { cpu_physical_memory_read(addr, &value, 8); tswap64s(&value); } qtest_send_prefix(chr); qtest_send(chr, "OK 0x%016" PRIx64 "\n", value); } else if (strcmp(words[0], "read") == 0) { uint64_t addr, len, i; uint8_t *data; g_assert(words[1] && words[2]); addr = strtoull(words[1], NULL, 0); len = strtoull(words[2], NULL, 0); data = g_malloc(len); cpu_physical_memory_read(addr, data, len); qtest_send_prefix(chr); qtest_send(chr, "OK 0x"); for (i = 0; i < len; i++) { qtest_send(chr, "%02x", data[i]); } qtest_send(chr, "\n"); g_free(data); } else if (strcmp(words[0], "write") == 0) { uint64_t addr, len, i; uint8_t *data; size_t data_len; g_assert(words[1] && words[2] && words[3]); addr = strtoull(words[1], NULL, 0); len = strtoull(words[2], NULL, 0); data_len = strlen(words[3]); if (data_len < 3) { qtest_send(chr, "ERR invalid argument size\n"); return; } data = g_malloc(len); for (i = 0; i < len; i++) { if ((i * 2 + 4) <= data_len) { data[i] = hex2nib(words[3][i * 2 + 2]) << 4; data[i] |= hex2nib(words[3][i * 2 + 3]); } else { data[i] = 0; } } cpu_physical_memory_write(addr, data, len); g_free(data); qtest_send_prefix(chr); qtest_send(chr, "OK\n"); } else if (qtest_enabled() && strcmp(words[0], "clock_step") == 0) { int64_t ns; if (words[1]) { ns = strtoll(words[1], NULL, 0); } else { ns = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL); } qtest_clock_warp(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ns); qtest_send_prefix(chr); qtest_send(chr, "OK %"PRIi64"\n", (int64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); } else if (qtest_enabled() && strcmp(words[0], "clock_set") == 0) { int64_t ns; g_assert(words[1]); ns = strtoll(words[1], NULL, 0); qtest_clock_warp(ns); qtest_send_prefix(chr); qtest_send(chr, "OK %"PRIi64"\n", (int64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); } else { qtest_send_prefix(chr); qtest_send(chr, "FAIL Unknown command `%s'\n", words[0]); } }
static void goldfish_audio_buff_read( struct goldfish_audio_buff* b ) { cpu_physical_memory_read(b->address, b->data, b->length); }
static void pxa2xx_update_display(void *opaque) { PXA2xxLCDState *s = (PXA2xxLCDState *) opaque; hwaddr fbptr; int miny, maxy; int ch; if (!(s->control[0] & LCCR0_ENB)) return; pxa2xx_descriptor_load(s); pxa2xx_lcdc_resize(s); miny = s->yres; maxy = 0; s->transp = s->dma_ch[2].up || s->dma_ch[3].up; /* Note: With overlay planes the order depends on LCCR0 bit 25. */ for (ch = 0; ch < PXA_LCDDMA_CHANS; ch ++) if (s->dma_ch[ch].up) { if (!s->dma_ch[ch].source) { pxa2xx_dma_ber_set(s, ch); continue; } fbptr = s->dma_ch[ch].source; if (!((fbptr >= PXA2XX_SDRAM_BASE && fbptr <= PXA2XX_SDRAM_BASE + ram_size) || (fbptr >= PXA2XX_INTERNAL_BASE && fbptr <= PXA2XX_INTERNAL_BASE + PXA2XX_INTERNAL_SIZE))) { pxa2xx_dma_ber_set(s, ch); continue; } if (s->dma_ch[ch].command & LDCMD_PAL) { cpu_physical_memory_read(fbptr, s->dma_ch[ch].pbuffer, MAX(LDCMD_LENGTH(s->dma_ch[ch].command), sizeof(s->dma_ch[ch].pbuffer))); pxa2xx_palette_parse(s, ch, s->bpp); } else { /* Do we need to reparse palette */ if (LCCR4_PALFOR(s->control[4]) != s->pal_for) pxa2xx_palette_parse(s, ch, s->bpp); /* ACK frame start */ pxa2xx_dma_sof_set(s, ch); s->dma_ch[ch].redraw(s, fbptr, &miny, &maxy); s->invalidated = 0; /* ACK frame completed */ pxa2xx_dma_eof_set(s, ch); } } if (s->control[0] & LCCR0_DIS) { /* ACK last frame completed */ s->control[0] &= ~LCCR0_ENB; s->status[0] |= LCSR0_LDD; } if (miny >= 0) { switch (s->orientation) { case 0: dpy_gfx_update(s->con, 0, miny, s->xres, maxy - miny + 1); break; case 90: dpy_gfx_update(s->con, miny, 0, maxy - miny + 1, s->xres); break; case 180: maxy = s->yres - maxy - 1; miny = s->yres - miny - 1; dpy_gfx_update(s->con, 0, maxy, s->xres, miny - maxy + 1); break; case 270: maxy = s->yres - maxy - 1; miny = s->yres - miny - 1; dpy_gfx_update(s->con, maxy, 0, miny - maxy + 1, s->xres); break; } } pxa2xx_lcdc_int_update(s); qemu_irq_raise(s->vsync_cb); }
static int channel_out_run(struct fs_dma_ctrl *ctrl, int c) { uint32_t len; uint32_t saved_data_buf; unsigned char buf[2 * 1024]; struct dma_context_metadata meta; bool send_context = true; if (ctrl->channels[c].eol) return 0; do { bool out_eop; D(printf("ch=%d buf=%x after=%x\n", c, (uint32_t)ctrl->channels[c].current_d.buf, (uint32_t)ctrl->channels[c].current_d.after)); if (send_context) { if (ctrl->channels[c].client->client.metadata_push) { meta.metadata = ctrl->channels[c].current_d.md; ctrl->channels[c].client->client.metadata_push( ctrl->channels[c].client->client.opaque, &meta); } send_context = false; } channel_load_d(ctrl, c); saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF); len = (uint32_t)(unsigned long) ctrl->channels[c].current_d.after; len -= saved_data_buf; if (len > sizeof buf) len = sizeof buf; cpu_physical_memory_read (saved_data_buf, buf, len); out_eop = ((saved_data_buf + len) == ctrl->channels[c].current_d.after) && ctrl->channels[c].current_d.out_eop; D(printf("channel %d pushes %x %u bytes eop=%u\n", c, saved_data_buf, len, out_eop)); if (ctrl->channels[c].client->client.push) ctrl->channels[c].client->client.push( ctrl->channels[c].client->client.opaque, buf, len, out_eop); else printf("WARNING: DMA ch%d dataloss," " no attached client.\n", c); saved_data_buf += len; if (saved_data_buf == (uint32_t)(unsigned long) ctrl->channels[c].current_d.after) { /* Done. Step to next. */ if (ctrl->channels[c].current_d.out_eop) { send_context = true; } if (ctrl->channels[c].current_d.intr) { /* data intr. */ D(printf("signal intr %d eol=%d\n", len, ctrl->channels[c].current_d.eol)); ctrl->channels[c].regs[R_INTR] |= (1 << 2); channel_update_irq(ctrl, c); } channel_store_d(ctrl, c); if (ctrl->channels[c].current_d.eol) { D(printf("channel %d EOL\n", c)); ctrl->channels[c].eol = 1; /* Mark the context as disabled. */ ctrl->channels[c].current_c.dis = 1; channel_store_c(ctrl, c); channel_stop(ctrl, c); } else { ctrl->channels[c].regs[RW_SAVED_DATA] = (uint32_t)(unsigned long)ctrl-> channels[c].current_d.next; /* Load new descriptor. */ channel_load_d(ctrl, c); saved_data_buf = (uint32_t)(unsigned long) ctrl->channels[c].current_d.buf; } ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf; D(dump_d(c, &ctrl->channels[c].current_d)); } ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf; } while (!ctrl->channels[c].eol); return 1; }
static inline int ohci_read_hcca(OHCIState *ohci, uint32_t addr, struct ohci_hcca *hcca) { cpu_physical_memory_read(addr + ohci->localmem_base, hcca, sizeof(*hcca)); return 1; }
static void channel_out_run(struct fs_dma_ctrl *ctrl, int c) { uint32_t len; uint32_t saved_data_buf; unsigned char buf[2 * 1024]; if (ctrl->channels[c].eol == 1) return; saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF); D(printf("buf=%x after=%x saved_data_buf=%x\n", (uint32_t)ctrl->channels[c].current_d.buf, (uint32_t)ctrl->channels[c].current_d.after, saved_data_buf)); if (saved_data_buf == (uint32_t)ctrl->channels[c].current_d.after) { /* Done. Step to next. */ if (ctrl->channels[c].current_d.out_eop) { /* TODO: signal eop to the client. */ D(printf("signal eop\n")); } if (ctrl->channels[c].current_d.intr) { /* TODO: signal eop to the client. */ /* data intr. */ D(printf("signal intr\n")); ctrl->channels[c].regs[R_INTR] |= (1 << 2); channel_update_irq(ctrl, c); } if (ctrl->channels[c].current_d.eol) { D(printf("channel %d EOL\n", c)); ctrl->channels[c].eol = 1; channel_stop(ctrl, c); } else { ctrl->channels[c].regs[RW_SAVED_DATA] = (uint32_t) ctrl->channels[c].current_d.next; /* Load new descriptor. */ channel_load_d(ctrl, c); } channel_store_d(ctrl, c); D(dump_d(c, &ctrl->channels[c].current_d)); return; } len = (uint32_t) ctrl->channels[c].current_d.after; len -= saved_data_buf; if (len > sizeof buf) len = sizeof buf; cpu_physical_memory_read (saved_data_buf, buf, len); D(printf("channel %d pushes %x %u bytes\n", c, saved_data_buf, len)); /* TODO: Push content. */ if (ctrl->channels[c].client->client.push) ctrl->channels[c].client->client.push( ctrl->channels[c].client->client.opaque, buf, len); else printf("WARNING: DMA ch%d dataloss, no attached client.\n", c); ctrl->channels[c].regs[RW_SAVED_DATA_BUF] += len; }
static void pci_physical_memory_read(void *dma_opaque, target_phys_addr_t addr, uint8_t *buf, int len, int do_bswap) { cpu_physical_memory_read(addr, buf, len); }