static void xenoprof_shutdown(void) { xenoprof_enabled = 0; HYPERVISOR_xenoprof_op(XENOPROF_disable_virq, NULL); if (is_primary) { HYPERVISOR_xenoprof_op(XENOPROF_release_counters, NULL); active_defined = 0; } unbind_virq(); }
int __init oprofile_arch_init(struct oprofile_operations * ops) { struct xenoprof_init init; struct xenoprof_buf * buf; int vm_size; int npages; int ret; int i; init.max_samples = 16; ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init); if (!ret) { pgprot_t prot = __pgprot(_KERNPG_TABLE); num_events = init.num_events; is_primary = init.is_primary; nbuf = init.nbuf; /* just in case - make sure we do not overflow event list (i.e. counter_config list) */ if (num_events > OP_MAX_COUNTER) num_events = OP_MAX_COUNTER; npages = (init.bufsize * nbuf - 1) / PAGE_SIZE + 1; vm_size = npages * PAGE_SIZE; shared_buffer = (char *)vm_map_xen_pages(init.buf_maddr, vm_size, prot); if (!shared_buffer) { ret = -ENOMEM; goto out; } for (i=0; i< nbuf; i++) { buf = (struct xenoprof_buf*) &shared_buffer[i * init.bufsize]; BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS); xenoprof_buf[buf->vcpu_id] = buf; } /* cpu_type is detected by Xen */ cpu_type[XENOPROF_CPU_TYPE_SIZE-1] = 0; strncpy(cpu_type, init.cpu_type, XENOPROF_CPU_TYPE_SIZE - 1); xenoprof_ops.cpu_type = cpu_type; init_driverfs(); using_xenoprof = 1; *ops = xenoprof_ops; for (i=0; i<NR_CPUS; i++) ovf_irq[i] = -1; active_defined = 0; } out: printk(KERN_INFO "oprofile_arch_init: ret %d, events %d, " "is_primary %d\n", ret, num_events, is_primary); return ret; }
int __init xenoprofile_init(struct oprofile_operations * ops) { struct xenoprof_init init; unsigned int i; int ret; ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init); if (!ret) { xenoprof_arch_init_counter(&init); xenoprof_is_primary = init.is_primary; /* cpu_type is detected by Xen */ cpu_type[XENOPROF_CPU_TYPE_SIZE-1] = 0; strncpy(cpu_type, init.cpu_type, XENOPROF_CPU_TYPE_SIZE - 1); xenoprof_ops.cpu_type = cpu_type; init_driverfs(); using_xenoprof = 1; *ops = xenoprof_ops; for (i=0; i<NR_CPUS; i++) ovf_irq[i] = -1; active_defined = 0; } printk(KERN_INFO "%s: ret %d, events %d, xenoprof_is_primary %d\n", __func__, ret, init.num_events, xenoprof_is_primary); return ret; }
static int xenoprof_start(void) { int ret = 0; if (is_primary) ret = HYPERVISOR_xenoprof_op(XENOPROF_start, NULL); return ret; }
static int xenoprof_set_active(int * active_domains, unsigned int adomains) { int ret = 0; int i; int set_dom0 = 0; domid_t domid; if (!xenoprof_is_primary) return 0; if (adomains > MAX_OPROF_DOMAINS) return -E2BIG; ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL); if (ret) return ret; for (i=0; i<adomains; i++) { domid = active_domains[i]; if (domid != active_domains[i]) { ret = -EINVAL; goto out; } ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid); if (ret) goto out; if (active_domains[i] == 0) set_dom0 = 1; } /* dom0 must always be active but may not be in the list */ if (!set_dom0) { domid = 0; ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid); } out: if (ret) WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL)); active_defined = !ret; return ret; }
static void xenoprof_shutdown(void) { xenoprof_enabled = 0; WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_disable_virq, NULL)); if (xenoprof_is_primary) { WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_release_counters, NULL)); active_defined = 0; } unbind_virq(); xenoprof_arch_unmap_shared_buffer(&shared_buffer); if (xenoprof_is_primary) unmap_passive_list(); release_buffer_array(xenoprof_buf, nbuf); }
void xenoprofile_exit(void) { if (using_xenoprof) exit_driverfs(); xenoprof_arch_unmap_shared_buffer(&shared_buffer); if (xenoprof_is_primary) { unmap_passive_list(); WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_shutdown, NULL)); } }
static int xenoprof_set_passive(int * p_domains, unsigned int pdoms) { int ret; unsigned int i, j; struct xenoprof_buf *buf; if (!xenoprof_is_primary) return 0; if (pdoms > MAX_OPROF_DOMAINS) return -E2BIG; ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_passive_list, NULL); if (ret) return ret; unmap_passive_list(); for (i = 0; i < pdoms; i++) { passive_domains[i].domain_id = p_domains[i]; passive_domains[i].max_samples = 2048; ret = xenoprof_arch_set_passive(&passive_domains[i], &p_shared_buffer[i]); if (ret) goto out; p_xenoprof_buf[i] = get_buffer_array(passive_domains[i].nbuf); if (!p_xenoprof_buf[i]) { ++i; ret = -ENOMEM; goto out; } for (j = 0; j < passive_domains[i].nbuf; j++) { buf = (struct xenoprof_buf *) &p_shared_buffer[i].buffer[ j * passive_domains[i].bufsize]; BUG_ON(buf->vcpu_id >= passive_domains[i].nbuf); p_xenoprof_buf[i][buf->vcpu_id] = buf; } } pdomains = pdoms; return 0; out: for (j = 0; j < i; j++) { xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]); release_buffer_array(p_xenoprof_buf[i], passive_domains[i].nbuf); } return ret; }
void __exit oprofile_arch_exit(void) { if (using_xenoprof) exit_driverfs(); if (shared_buffer) { vunmap(shared_buffer); shared_buffer = NULL; } if (is_primary) HYPERVISOR_xenoprof_op(XENOPROF_shutdown, NULL); }
static int xenoprof_setup(void) { int ret; if ( (ret = map_xenoprof_buffer(MAX_XENOPROF_SAMPLES)) ) return ret; if ( (ret = bind_virq()) ) { release_buffer_array(xenoprof_buf, nbuf); return ret; } if (xenoprof_is_primary) { /* Define dom0 as an active domain if not done yet */ if (!active_defined) { domid_t domid; ret = HYPERVISOR_xenoprof_op( XENOPROF_reset_active_list, NULL); if (ret) goto err; domid = 0; ret = HYPERVISOR_xenoprof_op( XENOPROF_set_active, &domid); if (ret) goto err; active_defined = 1; } if (oprofile_backtrace_depth > 0) { ret = HYPERVISOR_xenoprof_op(XENOPROF_set_backtrace, &oprofile_backtrace_depth); if (ret) oprofile_backtrace_depth = 0; } ret = HYPERVISOR_xenoprof_op(XENOPROF_reserve_counters, NULL); if (ret) goto err; xenoprof_arch_counter(); ret = HYPERVISOR_xenoprof_op(XENOPROF_setup_events, NULL); if (ret) goto err; } ret = HYPERVISOR_xenoprof_op(XENOPROF_enable_virq, NULL); if (ret) goto err; xenoprof_enabled = 1; return 0; err: unbind_virq(); release_buffer_array(xenoprof_buf, nbuf); return ret; }
static int xenoprof_setup(void) { int ret; int i; ret = bind_virq(); if (ret) return ret; if (is_primary) { struct xenoprof_counter counter; /* Define dom0 as an active domain if not done yet */ if (!active_defined) { domid_t domid; ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL); if (ret) goto err; domid = 0; ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid); if (ret) goto err; active_defined = 1; } ret = HYPERVISOR_xenoprof_op(XENOPROF_reserve_counters, NULL); if (ret) goto err; for (i=0; i<num_events; i++) { counter.ind = i; counter.count = (uint64_t)counter_config[i].count; counter.enabled = (uint32_t)counter_config[i].enabled; counter.event = (uint32_t)counter_config[i].event; counter.kernel = (uint32_t)counter_config[i].kernel; counter.user = (uint32_t)counter_config[i].user; counter.unit_mask = (uint64_t)counter_config[i].unit_mask; HYPERVISOR_xenoprof_op(XENOPROF_counter, &counter); } ret = HYPERVISOR_xenoprof_op(XENOPROF_setup_events, NULL); if (ret) goto err; } ret = HYPERVISOR_xenoprof_op(XENOPROF_enable_virq, NULL); if (ret) goto err; xenoprof_enabled = 1; return 0; err: unbind_virq(); return ret; }
static void xenoprof_stop(void) { if (is_primary) HYPERVISOR_xenoprof_op(XENOPROF_stop, NULL); }
static void xenoprof_stop(void) { if (xenoprof_is_primary) WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_stop, NULL)); xenoprof_arch_stop(); }