int __init oprofile_arch_init(struct oprofile_operations *ops) { struct op_arm_model_spec *spec = NULL; int ret = -ENODEV; #ifdef CONFIG_CPU_XSCALE spec = &op_xscale_spec; #endif if (spec) { init_MUTEX(&op_arm_sem); if (spec->init() < 0) return -ENODEV; op_arm_model = spec; init_driverfs(); ops->create_files = op_arm_create_files; ops->setup = op_arm_setup; ops->shutdown = op_arm_stop; ops->start = op_arm_start; ops->stop = op_arm_stop; ops->cpu_type = op_arm_model->name; ops->backtrace = arm_backtrace; printk(KERN_INFO "oprofile: using %s\n", spec->name); } return ret; }
int __init oprofile_arch_init(struct oprofile_operations *ops) { struct op_sh_model_spec *spec = NULL; int ret = -ENODEV; #if defined(CONFIG_OPROFILE_TMU) spec = &op_sh7109_spec; #else spec = &op_shtimer_spec; #endif if (spec) { ret = spec->init(); if (ret < 0) return ret; op_sh_model = spec; init_driverfs(); ops->create_files = NULL; ops->setup = op_sh_setup; ops->shutdown = op_sh_stop; ops->start = op_sh_start; ops->stop = op_sh_stop; ops->cpu_type = op_sh_model->name; ops->backtrace = sh_backtrace; printk(KERN_INFO "oprofile: using %s\n", spec->name); } return ret; }
int __init oprofile_arch_init(struct oprofile_operations * ops) { struct xenoprof_init init; struct xenoprof_buf * buf; int vm_size; int npages; int ret; int i; init.max_samples = 16; ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init); if (!ret) { pgprot_t prot = __pgprot(_KERNPG_TABLE); num_events = init.num_events; is_primary = init.is_primary; nbuf = init.nbuf; /* just in case - make sure we do not overflow event list (i.e. counter_config list) */ if (num_events > OP_MAX_COUNTER) num_events = OP_MAX_COUNTER; npages = (init.bufsize * nbuf - 1) / PAGE_SIZE + 1; vm_size = npages * PAGE_SIZE; shared_buffer = (char *)vm_map_xen_pages(init.buf_maddr, vm_size, prot); if (!shared_buffer) { ret = -ENOMEM; goto out; } for (i=0; i< nbuf; i++) { buf = (struct xenoprof_buf*) &shared_buffer[i * init.bufsize]; BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS); xenoprof_buf[buf->vcpu_id] = buf; } /* cpu_type is detected by Xen */ cpu_type[XENOPROF_CPU_TYPE_SIZE-1] = 0; strncpy(cpu_type, init.cpu_type, XENOPROF_CPU_TYPE_SIZE - 1); xenoprof_ops.cpu_type = cpu_type; init_driverfs(); using_xenoprof = 1; *ops = xenoprof_ops; for (i=0; i<NR_CPUS; i++) ovf_irq[i] = -1; active_defined = 0; } out: printk(KERN_INFO "oprofile_arch_init: ret %d, events %d, " "is_primary %d\n", ret, num_events, is_primary); return ret; }
int __init xenoprofile_init(struct oprofile_operations * ops) { struct xenoprof_init init; unsigned int i; int ret; ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init); if (!ret) { xenoprof_arch_init_counter(&init); xenoprof_is_primary = init.is_primary; /* cpu_type is detected by Xen */ cpu_type[XENOPROF_CPU_TYPE_SIZE-1] = 0; strncpy(cpu_type, init.cpu_type, XENOPROF_CPU_TYPE_SIZE - 1); xenoprof_ops.cpu_type = cpu_type; init_driverfs(); using_xenoprof = 1; *ops = xenoprof_ops; for (i=0; i<NR_CPUS; i++) ovf_irq[i] = -1; active_defined = 0; } printk(KERN_INFO "%s: ret %d, events %d, xenoprof_is_primary %d\n", __func__, ret, init.num_events, xenoprof_is_primary); return ret; }
int __init oprofile_perf_init(struct oprofile_operations *ops) { int cpu, ret = 0; ret = init_driverfs(); if (ret) return ret; memset(&perf_events, 0, sizeof(perf_events)); num_counters = perf_num_counters(); if (num_counters <= 0) { pr_info("oprofile: no performance counters\n"); ret = -ENODEV; goto out; } counter_config = kcalloc(num_counters, sizeof(struct op_counter_config), GFP_KERNEL); if (!counter_config) { pr_info("oprofile: failed to allocate %d " "counters\n", num_counters); ret = -ENOMEM; num_counters = 0; goto out; } for_each_possible_cpu(cpu) { perf_events[cpu] = kcalloc(num_counters, sizeof(struct perf_event *), GFP_KERNEL); if (!perf_events[cpu]) { pr_info("oprofile: failed to allocate %d perf events " "for cpu %d\n", num_counters, cpu); ret = -ENOMEM; goto out; } } ops->create_files = oprofile_perf_create_files; ops->setup = oprofile_perf_setup; ops->start = oprofile_perf_start; ops->stop = oprofile_perf_stop; ops->shutdown = oprofile_perf_stop; ops->cpu_type = op_name_from_perf_id(); if (!ops->cpu_type) ret = -ENODEV; else pr_info("oprofile: using %s\n", ops->cpu_type); out: if (ret) oprofile_perf_exit(); return ret; }
int __init oprofile_arch_init(struct oprofile_operations *ops) { struct op_arm_model_spec *spec = NULL; int ret = -ENODEV; ops->backtrace = arm_backtrace; /* comes first, so that it can be overrided by a better implementation */ #ifdef CONFIG_OPROFILE_OMAP_GPTIMER spec = &op_omap_gptimer_spec; #endif #ifdef CONFIG_CPU_XSCALE spec = &op_xscale_spec; #endif #ifdef CONFIG_OPROFILE_ARMV6 spec = &op_armv6_spec; #endif #ifdef CONFIG_OPROFILE_MPCORE spec = &op_mpcore_spec; #endif #ifdef CONFIG_OPROFILE_ARMV7 spec = &op_armv7_spec; #endif if (spec) { ret = spec->init(); if (ret < 0) return ret; counter_config = kcalloc(spec->num_counters, sizeof(struct op_counter_config), GFP_KERNEL); if (!counter_config) return -ENOMEM; op_arm_model = spec; init_driverfs(); ops->create_files = op_arm_create_files; ops->setup = op_arm_setup; ops->shutdown = op_arm_stop; ops->start = op_arm_start; ops->stop = op_arm_stop; ops->cpu_type = op_arm_model->name; printk(KERN_INFO "oprofile: using %s\n", spec->name); } return ret; }
int __init oprofile_arch_init(struct oprofile_operations *ops) { struct op_arm_model_spec *spec = NULL; int ret = -ENODEV; ops->backtrace = arm_backtrace; #ifdef CONFIG_CPU_XSCALE spec = &op_xscale_spec; #endif #ifdef CONFIG_OPROFILE_ARMV6 spec = &op_armv6_spec; #endif #ifdef CONFIG_OPROFILE_MPCORE spec = &op_mpcore_spec; #endif #ifdef CONFIG_OPROFILE_ARMV7 spec = &op_armv7_spec; #endif #ifdef CONFIG_OPROFILE_MRVL_PJ4 spec = &op_mrvl_pj4_spec; #endif if (spec) { ret = spec->init(); if (ret < 0) return ret; counter_config = kcalloc(spec->num_counters, sizeof(struct op_counter_config), GFP_KERNEL); if (!counter_config) return -ENOMEM; op_arm_model = spec; init_driverfs(); ops->create_files = op_arm_create_files; ops->setup = op_arm_setup; ops->shutdown = op_arm_stop; ops->start = op_arm_start; ops->stop = op_arm_stop; ops->cpu_type = op_arm_model->name; printk(KERN_INFO "oprofile: using %s\n", spec->name); } return ret; }
int __init oprofile_arch_init(struct oprofile_operations *ops) { int cpu, ret = 0; perf_num_counters = armpmu_get_max_events(); counter_config = kcalloc(perf_num_counters, sizeof(struct op_counter_config), GFP_KERNEL); if (!counter_config) { pr_info("oprofile: failed to allocate %d " "counters\n", perf_num_counters); return -ENOMEM; } ret = init_driverfs(); if (ret) { kfree(counter_config); counter_config = NULL; return ret; } for_each_possible_cpu(cpu) { perf_events[cpu] = kcalloc(perf_num_counters, sizeof(struct perf_event *), GFP_KERNEL); if (!perf_events[cpu]) { pr_info("oprofile: failed to allocate %d perf events " "for cpu %d\n", perf_num_counters, cpu); while (--cpu >= 0) kfree(perf_events[cpu]); return -ENOMEM; } } ops->backtrace = arm_backtrace; ops->create_files = op_arm_create_files; ops->setup = op_arm_setup; ops->start = op_arm_start; ops->stop = op_arm_stop; ops->shutdown = op_arm_stop; ops->cpu_type = op_name_from_perf_id(armpmu_get_pmu_id()); if (!ops->cpu_type) ret = -ENODEV; else pr_info("oprofile: using %s\n", ops->cpu_type); return ret; }
int __init pmu_init(struct oprofile_operations *ops, struct op_arm_model_spec *spec) { init_MUTEX(&pmu_sem); if (spec->init() < 0) return -ENODEV; pmu_model = spec; init_driverfs(); ops->create_files = pmu_create_files; ops->setup = pmu_setup; ops->shutdown = pmu_stop; ops->start = pmu_start; ops->stop = pmu_stop; ops->cpu_type = pmu_model->name; printk(KERN_INFO "oprofile: using %s PMU\n", spec->name); return 0; }
int __init nmi_init(struct oprofile_operations *ops) { __u8 vendor = boot_cpu_data.x86_vendor; __u8 family = boot_cpu_data.x86; char *cpu_type; if (!cpu_has_apic) return -ENODEV; switch (vendor) { case X86_VENDOR_AMD: /* Needs to be at least an Athlon (or hammer in 32bit mode) */ switch (family) { default: return -ENODEV; case 6: model = &op_athlon_spec; cpu_type = "i386/athlon"; break; case 0xf: model = &op_athlon_spec; /* Actually it could be i386/hammer too, but give user space an consistent name. */ cpu_type = "x86-64/hammer"; break; } break; case X86_VENDOR_INTEL: switch (family) { /* Pentium IV */ case 0xf: if (!p4_init(&cpu_type)) return -ENODEV; break; /* A P6-class processor */ case 6: if (!ppro_init(&cpu_type)) return -ENODEV; break; default: return -ENODEV; } break; default: return -ENODEV; } init_driverfs(); using_nmi = 1; ops->create_files = nmi_create_files; ops->setup = nmi_setup; ops->shutdown = nmi_shutdown; ops->start = nmi_start; ops->stop = nmi_stop; ops->cpu_type = cpu_type; printk(KERN_INFO "oprofile: using NMI interrupt.\n"); return 0; }