bool vmm_ringbuf_enqueue(struct vmm_ringbuf *rb, void *srckey, bool overwrite) { u32 read_pos_mod, write_pos_mod; bool isfull, update; if (!rb || !srckey) { return FALSE; } vmm_spin_lock(&rb->lock); read_pos_mod = (rb->read_pos + 1); if (rb->key_count <= read_pos_mod) { read_pos_mod -= rb->key_count; } write_pos_mod = (rb->write_pos + 1); if (rb->key_count <= write_pos_mod) { write_pos_mod -= rb->key_count; } isfull = (rb->read_pos == write_pos_mod); update = FALSE; if (overwrite) { if (isfull) { rb->read_pos = read_pos_mod; rb->avail_count--; } update = TRUE; } else { if (!isfull) { update = TRUE; } } if(update) { switch(rb->key_size) { case 1: *((u8 *)(rb->keys + (rb->write_pos * rb->key_size))) = *((u8 *)srckey); break; case 2: *((u16 *)(rb->keys + (rb->write_pos * rb->key_size))) = *((u16 *)srckey); break; case 4: *((u32 *)(rb->keys + (rb->write_pos * rb->key_size))) = *((u32 *)srckey); break; default: vmm_memcpy(rb->keys + (rb->write_pos * rb->key_size), srckey, rb->key_size); break; }; rb->write_pos = write_pos_mod; rb->avail_count++; } vmm_spin_unlock(&rb->lock); return update; }
static int arm11mpcore_scu_write(struct arm11mpcore_priv_state *s, u32 offset, u32 src_mask, u32 src) { int rc = VMM_OK; if (!s) { return VMM_EFAIL; } src = src & ~src_mask; vmm_spin_lock(&s->lock); switch (offset) { case 0x00: /* Control */ s->scu_control = src & 1; break; case 0x0c: /* Invalidate All. */ break; default: rc = VMM_EFAIL; break; } vmm_spin_unlock(&s->lock); return rc; }
static int arm11mpcore_scu_read(struct arm11mpcore_priv_state *s, u32 offset, u32 *dst) { int rc = VMM_OK; if (!s || !dst) { return VMM_EFAIL; } vmm_spin_lock(&s->lock); switch (offset) { case 0x00: /* Control */ *dst = s->scu_control; break; case 0x04: /* Configuration */ *dst = (((1 << s->num_cpu) - 1) << 4) | (s->num_cpu - 1); break; case 0x08: /* CPU Status */ *dst = 0; break; case 0x0c: /* Invalidate all. */ *dst = 0; break; default: rc = VMM_EFAIL; break; } vmm_spin_unlock(&s->lock); return rc; }
static int pl031_reg_read(struct pl031_state *s, u32 offset, u32 *dst) { int rc = VMM_OK; vmm_spin_lock(&s->lock); if (offset >= 0xFE0 && offset < 0x1000) { *dst = (u32)pl031_id[(offset - 0xFE0) >> 2]; } else {
/* Process IRQ asserted via device emulation framework */ static void pl110_mux_in_irq_handle(u32 irq, int cpu, int level, void *opaque) { struct pl110_state *s = opaque; vmm_spin_lock(&s->lock); s->mux_ctrl = level; vmm_spin_unlock(&s->lock); }
static int pl110_enabled(struct pl110_state *s) { int ret; vmm_spin_lock(&s->lock); ret = __pl110_enabled(s); vmm_spin_unlock(&s->lock); return ret; }
static int pl061_emulator_read(struct vmm_emudev *edev, physical_addr_t offset, void *dst, u32 dst_len) { int rc = VMM_OK; u32 regval = 0x0; struct pl061_state *s = edev->priv; vmm_spin_lock(&s->lock); if (offset >= 0xfd0 && offset < 0x1000) { regval = *((u32 *)&s->id[(offset - 0xfd0) >> 2]); } else if (offset < 0x400) {
bool vmm_ringbuf_isempty(struct vmm_ringbuf *rb) { bool isempty; if (!rb) { return TRUE; } vmm_spin_lock(&rb->lock); isempty = (rb->read_pos == rb->write_pos); vmm_spin_unlock(&rb->lock); return isempty; }
u32 vmm_ringbuf_avail(struct vmm_ringbuf *rb) { u32 retval; if (!rb) { return 0; } vmm_spin_lock(&rb->lock); retval = rb->avail_count; vmm_spin_unlock(&rb->lock); return retval; }
bool vmm_ringbuf_dequeue(struct vmm_ringbuf *rb, void *dstkey) { u32 read_pos_mod; bool isempty; if (!rb || !dstkey) { return FALSE; } vmm_spin_lock(&rb->lock); isempty = (rb->read_pos == rb->write_pos); if (!isempty) { switch(rb->key_size) { case 1: *((u8 *)dstkey) = *((u8 *)(rb->keys + (rb->read_pos * rb->key_size))); break; case 2: *((u16 *)dstkey) = *((u16 *)(rb->keys + (rb->read_pos * rb->key_size))); break; case 4: *((u32 *)dstkey) = *((u32 *)(rb->keys + (rb->read_pos * rb->key_size))); break; default: vmm_memcpy(dstkey, rb->keys + (rb->read_pos * rb->key_size), rb->key_size); break; }; read_pos_mod = (rb->read_pos + 1); if (rb->key_count <= read_pos_mod) { read_pos_mod -= rb->key_count; } rb->read_pos = read_pos_mod; rb->avail_count--; } vmm_spin_unlock(&rb->lock); return !isempty; }
/* Resize virtual display. */ static void pl110_resize(struct pl110_state *s, int width, int height) { bool do_gfx_resize = FALSE; vmm_spin_lock(&s->lock); if (width != s->cols || height != s->rows) { if (__pl110_enabled(s)) { do_gfx_resize = TRUE; } } s->cols = width; s->rows = height; vmm_spin_unlock(&s->lock); if (do_gfx_resize) { vmm_vdisplay_surface_gfx_resize(s->vdis, width, height); } }
bool vmm_ringbuf_isfull(struct vmm_ringbuf *rb) { u32 write_pos_mod; bool isfull; if (!rb) { return FALSE; } vmm_spin_lock(&rb->lock); write_pos_mod = (rb->write_pos + 1); if (rb->key_count <= write_pos_mod) { write_pos_mod -= rb->key_count; } isfull = (rb->read_pos == write_pos_mod); vmm_spin_unlock(&rb->lock); return isfull; }
bool vmm_ringbuf_getkey(struct vmm_ringbuf *rb, u32 index, void *dstkey) { if (!rb || !dstkey) { return FALSE; } if (rb->key_count <= index) { return FALSE; } vmm_spin_lock(&rb->lock); index = (rb->read_pos + index); if (rb->key_count <= index) { index -= rb->key_count; } switch(rb->key_size) { case 1: *((u8 *)dstkey) = *((u8 *)(rb->keys + (index * rb->key_size))); break; case 2: *((u16 *)dstkey) = *((u16 *)(rb->keys + (index * rb->key_size))); break; case 4: *((u32 *)dstkey) = *((u32 *)(rb->keys + (index * rb->key_size))); break; default: vmm_memcpy(dstkey, rb->keys + (index * rb->key_size), rb->key_size); break; }; vmm_spin_unlock(&rb->lock); return TRUE; }
static void pl110_display_update(struct vmm_vdisplay *vdis, struct vmm_surface *sf) { drawfn fn; drawfn *fntable; u32 *palette; physical_addr_t gphys; int cols, rows, first, last; int dest_width, src_width, bpp_offset; struct pl110_state *s = vmm_vdisplay_priv(vdis); if (!pl110_enabled(s)) { return; } switch (vmm_surface_bits_per_pixel(sf)) { case 0: return; case 8: fntable = pl110_draw_fn_8; dest_width = 1; palette = s->palette8; break; case 15: fntable = pl110_draw_fn_15; dest_width = 2; palette = s->palette15; break; case 16: fntable = pl110_draw_fn_16; dest_width = 2; palette = s->palette16; break; case 24: fntable = pl110_draw_fn_24; dest_width = 3; palette = s->palette32; break; case 32: fntable = pl110_draw_fn_32; dest_width = 4; palette = s->palette32; break; default: vmm_printf("%s: Bad color depth\n", __func__); return; }; vmm_spin_lock(&s->lock); if (s->cr & PL110_CR_BGR) { bpp_offset = 0; } else { bpp_offset = 24; } if ((s->version != PL111) && (s->bpp == BPP_16)) { /* The PL110's native 16 bit mode is 5551; however * most boards with a PL110 implement an external * mux which allows bits to be reshuffled to give * 565 format. The mux is typically controlled by * an external system register. * This is controlled by a GPIO input pin * so boards can wire it up to their register. * * The PL111 straightforwardly implements both * 5551 and 565 under control of the bpp field * in the LCDControl register. */ switch (s->mux_ctrl) { case 3: /* 565 BGR */ bpp_offset = (BPP_16_565 - BPP_16); break; case 1: /* 5551 */ break; case 0: /* 888; also if we have loaded vmstate from an old version */ case 2: /* 565 RGB */ default: /* treat as 565 but honour BGR bit */ bpp_offset += (BPP_16_565 - BPP_16); break; }; } if (s->cr & PL110_CR_BEBO) { fn = fntable[s->bpp + 8 + bpp_offset]; } else if (s->cr & PL110_CR_BEPO) { fn = fntable[s->bpp + 16 + bpp_offset]; } else { fn = fntable[s->bpp + bpp_offset]; } src_width = s->cols; switch (s->bpp) { case BPP_1: src_width >>= 3; break; case BPP_2: src_width >>= 2; break; case BPP_4: src_width >>= 1; break; case BPP_8: break; case BPP_16: case BPP_16_565: case BPP_12: src_width <<= 1; break; case BPP_32: src_width <<= 2; break; }; dest_width *= s->cols; gphys = s->upbase; cols = s->cols; rows = s->rows; vmm_spin_unlock(&s->lock); first = 0; vmm_surface_update(sf, s->guest, gphys, cols, rows, src_width, dest_width, 0, fn, palette, &first, &last); if (first >= 0) { vmm_vdisplay_surface_gfx_update(vdis, 0, first, cols, last - first + 1); } }