/* * The affected CPUs below in 'cpu_needs_post_dma_flush()' can * speculatively fill random cachelines with stale data at any time, * requiring an extra flush post-DMA. * * Warning on the terminology - Linux calls an uncached area coherent; * MIPS terminology calls memory areas with hardware maintained coherency * coherent. */ static inline int cpu_needs_post_dma_flush(struct device *dev) { return !plat_device_is_coherent(dev) && (boot_cpu_type() == CPU_R10000 || boot_cpu_type() == CPU_R12000 || boot_cpu_type() == CPU_BMIPS5000); }
/* * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively * fill random cachelines with stale data at any time, requiring an extra * flush post-DMA. * * Warning on the terminology - Linux calls an uncached area coherent; MIPS * terminology calls memory areas with hardware maintained coherency coherent. * * Note that the R14000 and R16000 should also be checked for in this condition. * However this function is only called on non-I/O-coherent systems and only the * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp. * SGI IP32 aka O2. */ static inline bool cpu_needs_post_dma_flush(struct device *dev) { switch (boot_cpu_type()) { case CPU_R10000: case CPU_R12000: case CPU_BMIPS5000: return true; default: /* * Presence of MAARs suggests that the CPU supports * speculatively prefetching data, and therefore requires * the post-DMA flush/invalidate. */ return cpu_has_maar; } }
int __init oprofile_arch_init(struct oprofile_operations *ops) { struct op_mips_model *lmodel = NULL; int res; switch (boot_cpu_type()) { case CPU_5KC: case CPU_M14KC: case CPU_M14KEC: case CPU_20KC: case CPU_24K: case CPU_25KF: case CPU_34K: case CPU_1004K: case CPU_74K: case CPU_1074K: case CPU_INTERAPTIV: case CPU_PROAPTIV: case CPU_P5600: case CPU_I6400: case CPU_M5150: case CPU_LOONGSON1: case CPU_SB1: case CPU_SB1A: case CPU_R10000: case CPU_R12000: case CPU_R14000: case CPU_R16000: case CPU_XLR: lmodel = &op_model_mipsxx_ops; break; case CPU_LOONGSON2: lmodel = &op_model_loongson2_ops; break; case CPU_LOONGSON3: lmodel = &op_model_loongson3_ops; break; }; /* * Always set the backtrace. This allows unsupported CPU types to still * use timer-based oprofile. */ ops->backtrace = op_mips_backtrace; if (!lmodel) return -ENODEV; res = lmodel->init(); if (res) return res; model = lmodel; ops->create_files = op_mips_create_files; ops->setup = op_mips_setup; //ops->shutdown = op_mips_shutdown; ops->start = op_mips_start; ops->stop = op_mips_stop; ops->cpu_type = lmodel->cpu_type; printk(KERN_INFO "oprofile: using %s performance monitoring.\n", lmodel->cpu_type); return 0; }
static inline int cpu_is_noncoherent_r10000(struct device *dev) { return !plat_device_is_coherent(dev) && (boot_cpu_type() == CPU_R10000 && boot_cpu_type() == CPU_R12000); }