static int heap_info(struct vmm_chardev *cdev, bool is_normal, virtual_addr_t heap_va, u64 heap_sz, u64 heap_hksz, u64 heap_freesz) { int rc; physical_addr_t heap_pa; u64 pre, heap_usesz; if (is_normal) { heap_usesz = heap_sz - heap_hksz - heap_freesz; } else { heap_usesz = heap_sz - heap_freesz; } if ((rc = vmm_host_va2pa(heap_va, &heap_pa))) { vmm_cprintf(cdev, "Error: Failed to get heap base PA\n"); return rc; } vmm_cprintf(cdev, "Base Virtual Addr : "); if (sizeof(virtual_addr_t) == sizeof(u64)) { vmm_cprintf(cdev, "0x%016llx\n", heap_va); } else { vmm_cprintf(cdev, "0x%08x\n", heap_va); } vmm_cprintf(cdev, "Base Physical Addr : "); if (sizeof(physical_addr_t) == sizeof(u64)) { vmm_cprintf(cdev, "0x%016llx\n", heap_pa); } else { vmm_cprintf(cdev, "0x%08x\n", heap_pa); } pre = 1000; /* Division correct upto 3 decimal points */ vmm_cprintf(cdev, "House-Keeping Size : "); heap_hksz = (heap_hksz * pre) >> 10; vmm_cprintf(cdev, "%ll.%03ll KB\n", udiv64(heap_hksz, pre), umod64(heap_hksz, pre)); vmm_cprintf(cdev, "Used Space Size : "); heap_usesz = (heap_usesz * pre) >> 10; vmm_cprintf(cdev, "%ll.%03ll KB\n", udiv64(heap_usesz, pre), umod64(heap_usesz, pre)); vmm_cprintf(cdev, "Free Space Size : "); heap_freesz = (heap_freesz * pre) >> 10; vmm_cprintf(cdev, "%ll.%03ll KB\n", udiv64(heap_freesz, pre), umod64(heap_freesz, pre)); vmm_cprintf(cdev, "Total Size : "); heap_sz = (heap_sz * pre) >> 10; vmm_cprintf(cdev, "%ll.%03ll KB\n", udiv64(heap_sz, pre), umod64(heap_sz, pre)); return VMM_OK; }
static int cmd_host_cpu_stats(struct vmm_chardev *cdev) { int rc; char str[16]; u32 c, p, khz, util; unsigned long hwid; vmm_cprintf(cdev, "----------------------------------------" "----------------------------------------\n"); vmm_cprintf(cdev, " %4s %14s %15s %13s %12s %16s\n", "CPU#", "HWID", "Speed (MHz)", "Util. (%)", "IRQs (%)", "Active VCPUs"); vmm_cprintf(cdev, "----------------------------------------" "----------------------------------------\n"); for_each_online_cpu(c) { vmm_cprintf(cdev, " %4d", c); rc = vmm_smp_map_hwid(c, &hwid); if (rc) return rc; vmm_snprintf(str, sizeof(str), "0x%lx", hwid); vmm_cprintf(cdev, " %14s", str); khz = vmm_delay_estimate_cpu_khz(c); vmm_cprintf(cdev, " %11d.%03d", udiv32(khz, 1000), umod32(khz, 1000)); util = udiv64(vmm_scheduler_idle_time(c) * 1000, vmm_scheduler_get_sample_period(c)); util = (util > 1000) ? 1000 : util; util = 1000 - util; vmm_cprintf(cdev, " %11d.%01d", udiv32(util, 10), umod32(util, 10)); util = udiv64(vmm_scheduler_irq_time(c) * 1000, vmm_scheduler_get_sample_period(c)); util = (util > 1000) ? 1000 : util; vmm_cprintf(cdev, " %10d.%01d", udiv32(util, 10), umod32(util, 10)); util = 1; for (p = VMM_VCPU_MIN_PRIORITY; p <= VMM_VCPU_MAX_PRIORITY; p++) { util += vmm_scheduler_ready_count(c, p); } vmm_cprintf(cdev, " %15d ", util); vmm_cprintf(cdev, "\n"); } vmm_cprintf(cdev, "----------------------------------------" "----------------------------------------\n"); return VMM_OK; }
static long _ipu_pixel_clk_div_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_clk_rate) { u64 div, final_rate; u64 remainder; u64 parent_rate = (unsigned long long)(*parent_clk_rate) * 16; /* * Calculate divider * Fractional part is 4 bits, * so simply multiply by 2^4 to get fractional part. */ div = parent_rate; div = do_udiv64(div, rate, &remainder); /* Round the divider value */ if (remainder > (rate/2)) div++; if (div < 0x10) /* Min DI disp clock divider is 1 */ div = 0x10; if (div & ~0xFEF) div &= 0xFF8; else { /* Round up divider if it gets us closer to desired pix clk */ if ((div & 0xC) == 0xC) { div += 0x10; div &= ~0xF; } } final_rate = parent_rate; final_rate = udiv64(final_rate, div); return final_rate; }
int arch_guest_init(struct vmm_guest *guest) { struct riscv_guest_priv *priv; if (!guest->reset_count) { guest->arch_priv = vmm_malloc(sizeof(struct riscv_guest_priv)); if (!guest->arch_priv) { return VMM_ENOMEM; } priv = riscv_guest_priv(guest); priv->time_offset = vmm_manager_guest_reset_timestamp(guest); priv->time_offset = priv->time_offset * vmm_timer_clocksource_frequency(); priv->time_offset = udiv64(priv->time_offset, 1000000000ULL); priv->pgtbl = cpu_mmu_pgtbl_alloc(PGTBL_STAGE2); if (!priv->pgtbl) { vmm_free(guest->arch_priv); guest->arch_priv = NULL; return VMM_ENOMEM; } } return VMM_OK; }
static int cmd_host_pagepool_info(struct vmm_chardev *cdev) { int i; u32 entry_count = 0; u32 hugepage_count = 0; u32 page_count = 0; u32 page_avail_count = 0; virtual_size_t space = 0; u64 pre, sz; for (i = 0; i < VMM_PAGEPOOL_MAX; i++) { space += vmm_pagepool_space(i); entry_count += vmm_pagepool_entry_count(i); hugepage_count += vmm_pagepool_hugepage_count(i); page_count += vmm_pagepool_page_count(i); page_avail_count += vmm_pagepool_page_avail_count(i); } vmm_cprintf(cdev, "Entry Count : %d (0x%08x)\n", entry_count, entry_count); vmm_cprintf(cdev, "Hugepage Count : %d (0x%08x)\n", hugepage_count, hugepage_count); vmm_cprintf(cdev, "Avail Page Count : %d (0x%08x)\n", page_avail_count, page_avail_count); vmm_cprintf(cdev, "Total Page Count : %d (0x%08x)\n", page_count, page_count); sz = space; pre = 1000; sz = (sz * pre) >> 10; vmm_cprintf(cdev, "Total Space : %"PRId64".%03"PRId64" KB\n", udiv64(sz, pre), umod64(sz, pre)); return VMM_OK; }
static void __cpuinit twd_caliberate_freq(virtual_addr_t base, virtual_addr_t ref_counter_addr, u32 ref_counter_freq) { u32 i, count, ref_count; u64 tmp; /* enable, no interrupt or reload */ vmm_writel(0x1, (void *)(base + TWD_TIMER_CONTROL)); /* read reference counter */ ref_count = vmm_readl((void *)ref_counter_addr); /* maximum value */ vmm_writel(0xFFFFFFFFU, (void *)(base + TWD_TIMER_COUNTER)); /* wait some arbitary amount of time */ for (i = 0; i < 1000000; i++); /* read counter */ count = vmm_readl((void *)(base + TWD_TIMER_COUNTER)); count = 0xFFFFFFFFU - count; /* take reference counter difference */ ref_count = vmm_readl((void *)ref_counter_addr) - ref_count; /* disable */ vmm_writel(0x0, (void *)(base + TWD_TIMER_CONTROL)); /* determine frequency */ tmp = (u64)count * (u64)ref_counter_freq; twd_freq_hz = udiv64(tmp, ref_count); }
static struct rbd *__rbd_create(struct vmm_device *dev, const char *name, physical_addr_t pa, physical_size_t sz) { struct rbd *d; if (!name) { return NULL; } d = vmm_zalloc(sizeof(struct rbd)); if (!d) { goto free_nothing; } d->addr = pa; d->size = sz; d->bdev = vmm_blockdev_alloc(); if (!d->bdev) { goto free_rbd; } /* Setup block device instance */ strncpy(d->bdev->name, name, VMM_FIELD_NAME_SIZE); strncpy(d->bdev->desc, "RAM backed block device", VMM_FIELD_DESC_SIZE); d->bdev->dev = dev; d->bdev->flags = VMM_BLOCKDEV_RW; d->bdev->start_lba = 0; d->bdev->num_blocks = udiv64(d->size, RBD_BLOCK_SIZE); d->bdev->block_size = RBD_BLOCK_SIZE; /* Setup request queue for block device instance */ d->bdev->rq->make_request = rbd_make_request; d->bdev->rq->abort_request = rbd_abort_request; d->bdev->rq->priv = d; /* Register block device instance */ if (vmm_blockdev_register(d->bdev)) { goto free_bdev; } /* Reserve RAM space */ if (vmm_host_ram_reserve(d->addr, d->size)) { goto unreg_bdev; } return d; unreg_bdev: vmm_blockdev_unregister(d->bdev); free_bdev: vmm_blockdev_free(d->bdev); free_rbd: vmm_free(d); free_nothing: return NULL; }
static u32 pl031_get_count(struct pl031_state *s) { /* This assumes qemu_get_clock_ns returns the time since * the machine was created. */ return s->tick_offset + (u32)udiv64(vmm_timer_timestamp() - s->tick_tstamp, 1000000000); }
static u32 ns_to_micros(u64 count) { if (count > ((u64)0xffffffff * 1000)) { count = (u64)0xffffffff * 1000; } return (u32)udiv64(count, 1000); }
static void cmd_host_cpu_stats(struct vmm_chardev *cdev) { u32 c, p, khz, util; vmm_cprintf(cdev, "----------------------------------------" "-------------------------\n"); vmm_cprintf(cdev, " %4s %15s %13s %12s %16s\n", "CPU#", "Speed (MHz)", "Util. (%)", "IRQs (%)", "Active VCPUs"); vmm_cprintf(cdev, "----------------------------------------" "-------------------------\n"); for_each_online_cpu(c) { vmm_cprintf(cdev, " %4d", c); khz = vmm_delay_estimate_cpu_khz(c); vmm_cprintf(cdev, " %11d.%03d", udiv32(khz, 1000), umod32(khz, 1000)); util = udiv64(vmm_scheduler_idle_time(c) * 1000, vmm_scheduler_get_sample_period(c)); util = (util > 1000) ? 1000 : util; util = 1000 - util; vmm_cprintf(cdev, " %11d.%01d", udiv32(util, 10), umod32(util, 10)); util = udiv64(vmm_scheduler_irq_time(c) * 1000, vmm_scheduler_get_sample_period(c)); util = (util > 1000) ? 1000 : util; vmm_cprintf(cdev, " %10d.%01d", udiv32(util, 10), umod32(util, 10)); util = 1; for (p = VMM_VCPU_MIN_PRIORITY; p <= VMM_VCPU_MAX_PRIORITY; p++) { util += vmm_scheduler_ready_count(c, p); } vmm_cprintf(cdev, " %15d ", util); vmm_cprintf(cdev, "\n"); } vmm_cprintf(cdev, "----------------------------------------" "-------------------------\n"); }
static int imx_pwm_config_v2(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns) { struct imx_chip *imx = to_imx_chip(chip); unsigned long long c; unsigned long period_cycles, duty_cycles, prescale; u32 cr; c = clk_get_rate(imx->clk_per); c = c * period_ns; c = udiv64(c, 1000000000); period_cycles = c; prescale = period_cycles / 0x10000 + 1; period_cycles = udiv32(period_cycles, prescale); c = (unsigned long long)period_cycles * duty_ns; c = udiv64(c, period_ns); duty_cycles = c; /* * according to imx pwm RM, the real period value should be * PERIOD value in PWMPR plus 2. */ if (period_cycles > 2) period_cycles -= 2; else period_cycles = 0; writel(duty_cycles, imx->mmio_base + MX3_PWMSAR); writel(period_cycles, imx->mmio_base + MX3_PWMPR); cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN | MX3_PWMCR_DBGEN | MX3_PWMCR_CLKSRC_IPG_HIGH; if (test_bit(PWMF_ENABLED, &pwm->flags)) cr |= MX3_PWMCR_EN; writel(cr, imx->mmio_base + MX3_PWMCR); return 0; }
static int printi(char **out, u32 *out_len, struct vmm_chardev *cdev, long long i, int b, int sg, int width, int flags, int letbase) { char print_buf[PRINT_BUF_LEN]; char *s; int t, neg = 0, pc = 0; unsigned long long u = i; if (sg && b == 10 && i < 0) { neg = 1; u = -i; } s = print_buf + PRINT_BUF_LEN - 1; *s = '\0'; if (!u) { *--s = '0'; } else { while (u) { t = umod64(u, b); if (t >= 10) t += letbase - '0' - 10; *--s = t + '0'; u = udiv64(u, b); } } if (flags & PAD_ALTERNATE) { if ((b == 16) && (letbase == 'A')) { *--s = 'X'; } else if ((b == 16) && (letbase == 'a')) { *--s = 'x'; } *--s = '0'; } if (neg) { if (width && (flags & PAD_ZERO)) { printc(out, out_len, cdev, '-'); ++pc; --width; } else { *--s = '-'; } } return pc + prints(out, out_len, cdev, s, width, flags); }
/* Must be called with sp805->lock held */ static u32 _sp805_reg_value(struct sp805_state *sp805) { u64 load = vmm_timer_timestamp(); if (!_sp805_enabled(sp805)) { /* Interrupt disabled: counter is disabled */ return sp805->freezed_value; } if (likely(load > sp805->timestamp)) { load = load - sp805->timestamp; } else { load = sp805->timestamp + ~load + 1; } return udiv64(load, 1000); }
static int __init bcm2836_early_init(struct vmm_devtree_node *node) { int rc = VMM_OK; void *base; u32 prescaler, cntfreq; virtual_addr_t base_va; struct vmm_devtree_node *np; np = vmm_devtree_find_compatible(NULL, NULL, "brcm,bcm2836-l1-intc"); if (!np) { return VMM_ENODEV; } rc = vmm_devtree_regmap(np, &base_va, 0); if (rc) { goto done; } base = (void *)base_va; cntfreq = generic_timer_reg_read(GENERIC_TIMER_REG_FREQ); switch (cntfreq) { case 19200000: prescaler = 0x80000000; case 1000000: prescaler = 0x06AAAAAB; default: prescaler = (u32)udiv64((u64)0x80000000 * (u64)cntfreq, (u64)19200000); break; }; if (!prescaler) { rc = VMM_EINVALID; goto done_unmap; } vmm_writel(prescaler, base + LOCAL_TIMER_PRESCALER); done_unmap: vmm_devtree_regunmap(node, base_va, 0); done: vmm_devtree_dref_node(np); return rc; }
struct mempool *mempool_ram_create(u32 entity_size, u32 page_count, u32 mem_flags) { u32 e; virtual_addr_t va; struct mempool *mp; if (!entity_size || ((VMM_PAGE_SIZE * page_count) < entity_size)) { return NULL; } mp = vmm_zalloc(sizeof(struct mempool)); if (!mp) { return NULL; } mp->type = MEMPOOL_TYPE_RAM; mp->entity_size = entity_size; mp->entity_count = udiv64((VMM_PAGE_SIZE * page_count), entity_size); mp->f = fifo_alloc(sizeof(virtual_addr_t), mp->entity_count); if (!mp->f) { vmm_free(mp); return NULL; } mp->entity_base = vmm_host_alloc_pages(page_count, mem_flags); if (!mp->entity_base) { fifo_free(mp->f); vmm_free(mp); return NULL; } mp->d.ram.page_count = page_count; mp->d.ram.mem_flags = mem_flags; for (e = 0; e < mp->entity_count; e++) { va = mp->entity_base + e * entity_size; fifo_enqueue(mp->f, &va, FALSE); } return mp; }
struct mempool *mempool_raw_create(u32 entity_size, physical_addr_t phys, virtual_size_t size, u32 mem_flags) { u32 e; virtual_addr_t va; struct mempool *mp; if (!entity_size || (size < entity_size)) { return NULL; } mp = vmm_zalloc(sizeof(struct mempool)); if (!mp) { return NULL; } mp->type = MEMPOOL_TYPE_RAW; mp->entity_size = entity_size; mp->entity_count = udiv64(size, entity_size); mp->f = fifo_alloc(sizeof(virtual_addr_t), mp->entity_count); if (!mp->f) { vmm_free(mp); return NULL; } mp->entity_base = vmm_host_memmap(phys, size, mem_flags); if (!mp->entity_base) { fifo_free(mp->f); vmm_free(mp); return NULL; } mp->d.raw.phys = phys; mp->d.raw.size = size; mp->d.raw.mem_flags = mem_flags; for (e = 0; e < mp->entity_count; e++) { va = mp->entity_base + e * entity_size; fifo_enqueue(mp->f, &va, FALSE); } return mp; }
void generic_timer_vcpu_context_restore(void *vcpu_ptr, void *context) { struct vmm_vcpu *vcpu = vcpu_ptr; struct generic_timer_context *cntx = context; if (!cntx) { return; } vmm_timer_event_stop(&cntx->phys_ev); vmm_timer_event_stop(&cntx->virt_ev); if (!cntx->cntvoff) { cntx->cntvoff = vmm_manager_guest_reset_timestamp(vcpu->guest); cntx->cntvoff = cntx->cntvoff * generic_timer_hz; cntx->cntvoff = udiv64(cntx->cntvoff, 1000000000ULL); } }
static int printi(char **out, struct vmm_chardev *cdev, long long i, int b, int sg, int width, int pad, int letbase) { char print_buf[PRINT_BUF_LEN]; char *s; int t, neg = 0, pc = 0; unsigned long long u = i; if (i == 0) { print_buf[0] = '0'; print_buf[1] = '\0'; return prints(out, cdev, print_buf, width, pad); } if (sg && b == 10 && i < 0) { neg = 1; u = -i; } s = print_buf + PRINT_BUF_LEN - 1; *s = '\0'; while (u) { t = umod64(u, b); if (t >= 10) t += letbase - '0' - 10; *--s = t + '0'; u = udiv64(u, b); } if (neg) { if (width && (pad & PAD_ZERO)) { printc(out, cdev, '-'); ++pc; --width; } else { *--s = '-'; } } return pc + prints(out, cdev, s, width, pad); }
static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { struct clk_fractional_divider *fd = to_clk_fd(hw); unsigned long m, n; u64 ret; if (!rate || rate >= *parent_rate) return *parent_rate; if (fd->approximation) fd->approximation(hw, rate, parent_rate, &m, &n); else clk_fd_general_approximation(hw, rate, parent_rate, &m, &n); ret = (u64)*parent_rate * m; ret = udiv64(ret, n); return ret; }
static unsigned long _ipu_pixel_clk_div_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_di_div *di_div = to_clk_di_div(hw); struct ipu_soc *ipu = ipu_get_soc(di_div->ipu_id); u32 div; u64 final_rate = (unsigned long long)parent_rate * 16; _ipu_get(ipu); div = ipu_di_read(ipu, di_div->di_id, DI_BS_CLKGEN0); _ipu_put(ipu); pr_debug("ipu_di%d read BS_CLKGEN0 div:%d, final_rate:%lld, prate:%ld\n", di_div->di_id, div, final_rate, parent_rate); if (div == 0) return 0; final_rate = udiv64(final_rate, div); return (unsigned long)final_rate; }
static void clk_fd_general_approximation(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate, unsigned long *m, unsigned long *n) { struct clk_fractional_divider *fd = to_clk_fd(hw); unsigned long scale; /* * Get rate closer to *parent_rate to guarantee there is no overflow * for m and n. In the result it will be the nearest rate left shifted * by (scale - fd->nwidth) bits. */ scale = fls_long((unsigned long)udiv64(*parent_rate, rate) - 1); if (scale > fd->nwidth) rate <<= scale - fd->nwidth; rational_best_approximation(rate, *parent_rate, GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0), m, n); }
static int cmd_profile_count_iterator(void *data, const char *name, unsigned long addr) { struct count_record *ptr = data; u32 index = kallsyms_get_symbol_pos(addr, NULL, NULL); u32 count = vmm_profiler_get_function_count(addr); u64 time = vmm_profiler_get_function_total_time(addr); ptr += index; /* It would be nice to have the strncpy variant */ strcpy(ptr->function_name, name); ptr->function_name[39] = 0; ptr->count = count; ptr->total_time = time; if (count) { ptr->time_per_call = udiv64(time, (u64)count); } return VMM_OK; }
u64 generic_timer_wakeup_timeout(void) { u32 vtval = 0, ptval = 0; u64 nsecs = 0; if (generic_timer_hz == 0) { return 0; } if (generic_timer_reg_read(GENERIC_TIMER_REG_PHYS_CTRL) & GENERIC_TIMER_CTRL_ENABLE) { ptval = generic_timer_reg_read(GENERIC_TIMER_REG_PHYS_TVAL); } if (generic_timer_reg_read(GENERIC_TIMER_REG_VIRT_CTRL) & GENERIC_TIMER_CTRL_ENABLE) { vtval = generic_timer_reg_read(GENERIC_TIMER_REG_VIRT_TVAL); } if ((ptval > 0) && (vtval > 0)) { nsecs = (ptval > vtval) ? vtval : ptval; } else { nsecs = (ptval > vtval) ? ptval : vtval; } if (nsecs) { if (generic_timer_hz == 100000000) { nsecs = nsecs * 10; } else { nsecs = udiv64((nsecs * 1000000000), (u64)generic_timer_hz); } } return nsecs; }
static unsigned long clk_fd_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_fractional_divider *fd = to_clk_fd(hw); unsigned long flags = 0; unsigned long m, n; u32 val; u64 ret; if (fd->lock) spin_lock_irqsave(fd->lock, flags); #if 0 else __acquire(fd->lock); #endif val = clk_readl(fd->reg); if (fd->lock) spin_unlock_irqrestore(fd->lock, flags); #if 0 else __release(fd->lock); #endif m = (val & fd->mmask) >> fd->mshift; n = (val & fd->nmask) >> fd->nshift; if (!n || !m) return parent_rate; ret = (u64)parent_rate * m; ret = udiv64(ret, n); return ret; }
int netstack_send_echo(u8 *ripaddr, u16 size, u16 seqno, struct netstack_echo_reply *reply) { u64 ts; int s, i, err; char buf[64]; size_t fromlen, off, len = sizeof(struct icmp_echo_hdr) + size; ip_addr_t to_addr, from_addr; struct sockaddr_in sock; struct ip_hdr *iphdr; struct icmp_echo_hdr *iecho; LWIP_ASSERT("ping_size is too big\n", len <= 0xffff); /* Prepare target address */ IP4_ADDR(&to_addr, ripaddr[0],ripaddr[1],ripaddr[2],ripaddr[3]); /* Open RAW socket */ if ((s = lwip_socket(AF_INET, SOCK_RAW, IP_PROTO_ICMP)) < 0) { vmm_printf("%s: failed to open ICMP socket\n", __func__); return VMM_EFAIL; } /* Set socket option */ i = PING_RCV_TIMEO; lwip_setsockopt(s, SOL_SOCKET, SO_RCVTIMEO, &i, sizeof(i)); /* Prepare socket address */ sock.sin_len = sizeof(sock); sock.sin_family = AF_INET; inet_addr_from_ipaddr(&sock.sin_addr, &to_addr); /* Prepare ECHO request */ iecho = (struct icmp_echo_hdr *)vmm_zalloc(len); if (!iecho) { return VMM_ENOMEM; } ICMPH_TYPE_SET(iecho, ICMP_ECHO); ICMPH_CODE_SET(iecho, 0); iecho->chksum = 0; iecho->id = PING_ID; iecho->seqno = htons(seqno); for (i = 0; i < size; i++) { ((char*)iecho)[sizeof(struct icmp_echo_hdr) + i] = (char)i; } iecho->chksum = inet_chksum(iecho, len); /* Send ECHO request */ err = lwip_sendto(s, iecho, len, 0, (struct sockaddr*)&sock, sizeof(sock)); vmm_free(iecho); if (!err) { return VMM_EFAIL; } /* Get reference timestamp */ ts = vmm_timer_timestamp(); /* Wait for ECHO reply */ err = VMM_EFAIL; off = lwip_recvfrom(s, buf, sizeof(buf), 0, (struct sockaddr*)&sock, (socklen_t*)&fromlen); if (off >= (sizeof(struct ip_hdr) + sizeof(struct icmp_echo_hdr))) { inet_addr_to_ipaddr(&from_addr, &sock.sin_addr); iphdr = (struct ip_hdr *)buf; iecho = (struct icmp_echo_hdr *)(buf + (IPH_HL(iphdr) * 4)); if ((iecho->id == PING_ID) && (iecho->seqno == htons(seqno))) { reply->ripaddr[0] = ip4_addr1(&from_addr); reply->ripaddr[1] = ip4_addr2(&from_addr); reply->ripaddr[2] = ip4_addr3(&from_addr); reply->ripaddr[3] = ip4_addr4(&from_addr); reply->ttl = IPH_TTL(iphdr); reply->len = len; reply->seqno = seqno; reply->rtt = udiv64(vmm_timer_timestamp() - ts, 1000); err = VMM_OK; } } while (off < len) { off = lwip_recvfrom(s, buf, sizeof(buf), 0, (struct sockaddr*)&sock, (socklen_t*)&fromlen); } /* Close RAW socket */ lwip_close(s); return err; }
int netstack_send_echo(u8 *ripaddr, u16 size, u16 seqno, struct netstack_echo_reply *reply) { int i, rc; u64 timeout = PING_DELAY_NS; struct pbuf *p; struct icmp_echo_hdr *iecho; size_t len = sizeof(struct icmp_echo_hdr) + size; LWIP_ASSERT("ping_size <= 0xffff", len <= 0xffff); /* Lock ping context for atomicity */ vmm_mutex_lock(&lns.ping_lock); /* Alloc ping pbuf */ p = pbuf_alloc(PBUF_IP, (u16_t)len, PBUF_RAM); if (!p) { vmm_mutex_unlock(&lns.ping_lock); return VMM_ENOMEM; } if ((p->len != p->tot_len) || (p->next != NULL)) { pbuf_free(p); vmm_mutex_unlock(&lns.ping_lock); return VMM_EFAIL; } /* Prepare ECHO request */ iecho = (struct icmp_echo_hdr *)p->payload; ICMPH_TYPE_SET(iecho, ICMP_ECHO); ICMPH_CODE_SET(iecho, 0); iecho->chksum = 0; iecho->id = PING_ID; iecho->seqno = htons(seqno); for (i = 0; i < size; i++) { ((char*)iecho)[sizeof(struct icmp_echo_hdr) + i] = (char)i; } iecho->chksum = inet_chksum(iecho, len); /* Prepare target address */ IP4_ADDR(&lns.ping_addr, ripaddr[0],ripaddr[1],ripaddr[2],ripaddr[3]); /* Save ping info */ lns.ping_seq_num = seqno; lns.ping_reply = reply; lns.ping_recv_tstamp = 0; lns.ping_send_tstamp = vmm_timer_timestamp(); lns.ping_recv_tstamp = lns.ping_send_tstamp + PING_DELAY_NS; /* Send ping packet */ raw_sendto(lns.ping_pcb, p, &lns.ping_addr); /* Wait for ping to complete with timeout */ timeout = lns.ping_recv_tstamp - lns.ping_send_tstamp; rc = vmm_completion_wait_timeout(&lns.ping_done, &timeout); timeout = lns.ping_recv_tstamp - lns.ping_send_tstamp; lns.ping_reply->rtt = udiv64(timeout, 1000); /* Free ping pbuf */ pbuf_free(p); /* Clear ping reply pointer */ lns.ping_reply = NULL; /* Unloack ping context */ vmm_mutex_unlock(&lns.ping_lock); return rc; }
u64 vmm_blockdev_rw(struct vmm_blockdev *bdev, enum vmm_request_type type, u8 *buf, u64 off, u64 len) { u8 *tbuf; u64 tmp, first_lba, first_off, first_len; u64 middle_lba, middle_len; u64 last_lba, last_len; BUG_ON(!vmm_scheduler_orphan_context()); if (!buf || !bdev || !len) { return 0; } if ((type != VMM_REQUEST_READ) && (type != VMM_REQUEST_WRITE)) { return 0; } if ((type == VMM_REQUEST_WRITE) && !(bdev->flags & VMM_BLOCKDEV_RW)) { return 0; } tmp = bdev->num_blocks * bdev->block_size; if ((off >= tmp) || ((off + len) > tmp)) { return 0; } first_lba = udiv64(off, bdev->block_size); first_off = off - first_lba * bdev->block_size; if (first_off) { first_len = bdev->block_size - first_off; first_len = (first_len < len) ? first_len : len; } else { if (len < bdev->block_size) { first_len = len; } else { first_len = 0; } } off += first_len; len -= first_len; middle_lba = udiv64(off, bdev->block_size); middle_len = udiv64(len, bdev->block_size) * bdev->block_size; off += middle_len; len -= middle_len; last_lba = udiv64(off, bdev->block_size); last_len = len; if (first_len || last_len) { tbuf = vmm_malloc(bdev->block_size); if (!tbuf) { return 0; } } tmp = 0; if (first_len) { if (blockdev_rw_blocks(bdev, VMM_REQUEST_READ, tbuf, first_lba, 1)) { goto done; } if (type == VMM_REQUEST_WRITE) { memcpy(&tbuf[first_off], buf, first_len); if (blockdev_rw_blocks(bdev, VMM_REQUEST_WRITE, tbuf, first_lba, 1)) { goto done; } } else { memcpy(buf, &tbuf[first_off], first_len); } buf += first_len; tmp += first_len; } if (middle_len) { if (blockdev_rw_blocks(bdev, type, buf, middle_lba, udiv64(middle_len, bdev->block_size))) { goto done; } buf += middle_len; tmp += middle_len; } if (last_len) { if (blockdev_rw_blocks(bdev, VMM_REQUEST_READ, tbuf, last_lba, 1)) { goto done; } if (type == VMM_REQUEST_WRITE) { memcpy(&tbuf[0], buf, last_len); if (blockdev_rw_blocks(bdev, VMM_REQUEST_WRITE, tbuf, last_lba, 1)) { goto done; } } else { memcpy(buf, &tbuf[0], last_len); } tmp += last_len; } done: if (first_len || last_len) { vmm_free(tbuf); } return tmp; }
static PTREE foldUInt64( CGOP op, PTREE left, signed_64 v2 ) { signed_64 test; signed_64 v1; float_handle t0, t1, t2; v1 = left->u.int64_constant; switch( op ) { case CO_PLUS: U64Add( &v1, &v2, &left->u.int64_constant ); break; case CO_MINUS: U64Sub( &v1, &v2, &left->u.int64_constant ); break; case CO_TIMES: t0 = BFCnvU64F( v1 ); t1 = BFCnvU64F( v2 ); t2 = BFMul( t0, t1 ); test = BFCnvF64( t2 ); BFFree( t0 ); BFFree( t1 ); BFFree( t2 ); U64Mul( &v1, &v2, &left->u.int64_constant ); if( 0 != U64Cmp( &test, &left->u.int64_constant ) ) { CErr1( ANSI_ARITHMETIC_OVERFLOW ); } break; case CO_DIVIDE: { signed_64 rem; udiv64( &v1, &v2, &left->u.int64_constant, &rem ); } break; case CO_PERCENT: { signed_64 div; udiv64( &v1, &v2, &div, &left->u.int64_constant ); } break; case CO_AND: left->u.int64_constant.u._32[0] = v1.u._32[0] & v2.u._32[0]; left->u.int64_constant.u._32[1] = v1.u._32[1] & v2.u._32[1]; break; case CO_OR: left->u.int64_constant.u._32[0] = v1.u._32[0] | v2.u._32[0]; left->u.int64_constant.u._32[1] = v1.u._32[1] | v2.u._32[1]; break; case CO_XOR: left->u.int64_constant.u._32[0] = v1.u._32[0] ^ v2.u._32[0]; left->u.int64_constant.u._32[1] = v1.u._32[1] ^ v2.u._32[1]; break; case CO_RSHIFT: U64ShiftR( &v1, v2.u._32[ I64LO32 ], &left->u.int64_constant ); break; case CO_LSHIFT: U64ShiftL( &v1, v2.u._32[ I64LO32 ], &left->u.int64_constant ); break; case CO_EQ: left = makeBooleanConst( left, 0 == U64Cmp( &v1, &v2 ) ); return( left ); case CO_NE: left = makeBooleanConst( left, 0 != U64Cmp( &v1, &v2 ) ); return( left ); case CO_GT: left = makeBooleanConst( left, 0 < U64Cmp( &v1, &v2 )) ; return( left ); case CO_LE: left = makeBooleanConst( left, 0 >= U64Cmp( &v1, &v2 ) ); return( left ); case CO_LT: left = makeBooleanConst( left, 0 > U64Cmp( &v1, &v2 )) ; return( left ); case CO_GE: left = makeBooleanConst( left, 0 <= U64Cmp( &v1, &v2 ) ); return( left ); case CO_AND_AND: left = makeBooleanConst( left, !Zero64( &v1 ) && !Zero64( &v2 ) ); return( left ); case CO_OR_OR: left = makeBooleanConst( left, !Zero64( &v1) || !Zero64( &v2 ) ); return( left ); case CO_COMMA: left->u.int64_constant = v2; break; default: return( NULL ); } left->op = PT_INT_CONSTANT; return( left ); }
static int cmd_ping_exec(struct vmm_chardev *cdev, int argc, char **argv) { u16 sent, rcvd, count = 1, size = 56; struct netstack_echo_reply reply; char ip_addr_str[20]; u32 rtt_usecs, rtt_msecs; u64 min_rtt = -1, max_rtt = 0, avg_rtt = 0; u8 ipaddr[4]; if((argc < 2) || (argc > 4)) { cmd_ping_usage(cdev); return VMM_EFAIL; } if(argc > 2) { count = atoi(argv[2]); } if(argc > 3) { size = atoi(argv[3]); } str2ipaddr(ipaddr, argv[1]); vmm_cprintf(cdev, "PING (%s) %d(%d) bytes of data.\n", argv[1], size, (size + IP4_HLEN + ICMP_HLEN)); netstack_prefetch_arp_mapping(ipaddr); for(sent=0, rcvd=0; sent<count; sent++) { if (!netstack_send_echo(ipaddr, size, sent, &reply)) { if (reply.rtt < min_rtt) min_rtt = reply.rtt; if (reply.rtt > max_rtt) max_rtt = reply.rtt; avg_rtt += reply.rtt; rtt_msecs = udiv64(reply.rtt, 1000); rtt_usecs = umod64(reply.rtt, 1000); ip4addr_to_str(ip_addr_str, (const u8 *)&reply.ripaddr); vmm_cprintf(cdev, "%d bytes from %s: seq=%d " "ttl=%d time=%d.%03dms\n", reply.len, ip_addr_str, reply.seqno, reply.ttl, rtt_msecs, rtt_usecs); rcvd++; } } if (min_rtt == -1) { min_rtt = 0; } if (rcvd) { avg_rtt = udiv64(avg_rtt, rcvd); } else { avg_rtt = 0; } vmm_cprintf(cdev, "\n----- %s ping statistics -----\n", argv[1]); vmm_cprintf(cdev, "%d packets transmitted, %d packets received\n", sent, rcvd); vmm_cprintf(cdev, "round-trip min/avg/max = "); rtt_msecs = udiv64(min_rtt, 1000); rtt_usecs = umod64(min_rtt, 1000); vmm_cprintf(cdev, "%d.%03d/", rtt_msecs, rtt_usecs); rtt_msecs = udiv64(avg_rtt, 1000); rtt_usecs = umod64(avg_rtt, 1000); vmm_cprintf(cdev, "%d.%03d/", rtt_msecs, rtt_usecs); rtt_msecs = udiv64(max_rtt, 1000); rtt_usecs = umod64(max_rtt, 1000); vmm_cprintf(cdev, "%d.%03d ms\n", rtt_msecs, rtt_usecs); return VMM_OK; }
/** * vsscanf - Unformat a buffer into a list of arguments * @buf: input buffer * @fmt: format of buffer * @args: arguments */ int vsscanf(const char *buf, const char *fmt, va_list args) { const char *str = buf; char *next; char digit; int num = 0; u8 qualifier; unsigned int base; union { long long s; unsigned long long u; } val; s16 field_width; bool is_sign; while (*fmt) { /* skip any white space in format */ /* white space in format matchs any amount of * white space, including none, in the input. */ if (isspace(*fmt)) { fmt = skip_spaces(++fmt); str = skip_spaces(str); } /* anything that is not a conversion must match exactly */ if (*fmt != '%' && *fmt) { if (*fmt++ != *str++) break; continue; } if (!*fmt) break; ++fmt; /* skip this conversion. * advance both strings to next white space */ if (*fmt == '*') { if (!*str) break; while (!isspace(*fmt) && *fmt != '%' && *fmt) fmt++; while (!isspace(*str) && *str) str++; continue; } /* get field width */ field_width = -1; if (isdigit(*fmt)) { field_width = skip_atoi(&fmt); if (field_width <= 0) break; } /* get conversion qualifier */ qualifier = -1; if (*fmt == 'h' || _tolower(*fmt) == 'l' || _tolower(*fmt) == 'z') { qualifier = *fmt++; if (unlikely(qualifier == *fmt)) { if (qualifier == 'h') { qualifier = 'H'; fmt++; } else if (qualifier == 'l') { qualifier = 'L'; fmt++; } } } if (!*fmt) break; if (*fmt == 'n') { /* return number of characters read so far */ *va_arg(args, int *) = str - buf; ++fmt; continue; } if (!*str) break; base = 10; is_sign = false; switch (*fmt++) { case 'c': { char *s = (char *)va_arg(args, char*); if (field_width == -1) field_width = 1; do { *s++ = *str++; } while (--field_width > 0 && *str); num++; } continue; case 's': { char *s = (char *)va_arg(args, char *); if (field_width == -1) field_width = SHRT_MAX; /* first, skip leading white space in buffer */ str = skip_spaces(str); /* now copy until next white space */ while (*str && !isspace(*str) && field_width--) *s++ = *str++; *s = '\0'; num++; } continue; case 'o': base = 8; break; case 'x': case 'X': base = 16; break; case 'i': base = 0; case 'd': is_sign = true; case 'u': break; case '%': /* looking for '%' in str */ if (*str++ != '%') return num; continue; default: /* invalid format; stop here */ return num; } /* have some sort of integer conversion. * first, skip white space in buffer. */ str = skip_spaces(str); digit = *str; if (is_sign && digit == '-') digit = *(str + 1); if (!digit || (base == 16 && !isxdigit(digit)) || (base == 10 && !isdigit(digit)) || (base == 8 && (!isdigit(digit) || digit > '7')) || (base == 0 && !isdigit(digit))) break; if (is_sign) val.s = qualifier != 'L' ? strtol(str, &next, base) : strtoll(str, &next, base); else val.u = qualifier != 'L' ? strtoul(str, &next, base) : strtoull(str, &next, base); if (field_width > 0 && next - str > field_width) { if (base == 0) _parse_integer_fixup_radix(str, &base); while (next - str > field_width) { if (is_sign) val.s = sdiv64(val.s, base); else val.u = udiv64(val.u, base); --next; } } switch (qualifier) { case 'H': /* that's 'hh' in format */ if (is_sign) *va_arg(args, signed char *) = val.s; else *va_arg(args, unsigned char *) = val.u; break; case 'h': if (is_sign) *va_arg(args, short *) = val.s; else *va_arg(args, unsigned short *) = val.u; break; case 'l': if (is_sign) *va_arg(args, long *) = val.s; else *va_arg(args, unsigned long *) = val.u; break; case 'L': if (is_sign) *va_arg(args, long long *) = val.s; else *va_arg(args, unsigned long long *) = val.u; break; case 'Z': case 'z': *va_arg(args, size_t *) = val.u; break; default: if (is_sign) *va_arg(args, int *) = val.s; else *va_arg(args, unsigned int *) = val.u; break; } num++; if (!next) break; str = next; } return num; }