Esempio n. 1
0
/*
 * Copy an instruction and adjust the displacement if the instruction
 * uses the %rip-relative addressing mode.
 * If it does, Return the address of the 32-bit displacement word.
 * If not, return null.
 * Only applicable to 64-bit x86.
 */
int __copy_instruction(u8 *dest, u8 *src)
{
	struct insn insn;
	kprobe_opcode_t buf[MAX_INSN_SIZE];
	int length;
	unsigned long recovered_insn =
		recover_probed_instruction(buf, (unsigned long)src);

	if (!recovered_insn)
		return 0;
	kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
	insn_get_length(&insn);
	length = insn.length;

	/* Another subsystem puts a breakpoint, failed to recover */
	if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
		return 0;
	pax_open_kernel();
	memcpy(dest, insn.kaddr, length);
	pax_close_kernel();

#ifdef CONFIG_X86_64
	if (insn_rip_relative(&insn)) {
		s64 newdisp;
		u8 *disp;
		kernel_insn_init(&insn, dest, length);
		insn_get_displacement(&insn);
		/*
		 * The copied instruction uses the %rip-relative addressing
		 * mode.  Adjust the displacement for the difference between
		 * the original location of this instruction and the location
		 * of the copy that will actually be run.  The tricky bit here
		 * is making sure that the sign extension happens correctly in
		 * this calculation, since we need a signed 32-bit result to
		 * be sign-extended to 64 bits when it's added to the %rip
		 * value and yield the same 64-bit result that the sign-
		 * extension of the original signed 32-bit displacement would
		 * have given.
		 */
		newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
		if ((s64) (s32) newdisp != newdisp) {
			pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
			pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", src, dest, insn.displacement.value);
			return 0;
		}
		disp = (u8 *) dest + insn_offset_displacement(&insn);
		pax_open_kernel();
		*(s32 *) disp = (s32) newdisp;
		pax_close_kernel();
	}
#endif
	return length;
}
Esempio n. 2
0
void od_unregister_powersave_bias_handler(void)
{
	pax_open_kernel();
	*(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
	pax_close_kernel();
	od_set_powersave_bias(0);
}
Esempio n. 3
0
static void jit_fill_hole(void *area, unsigned int size)
{
	/* fill whole space with int3 instructions */
	pax_open_kernel();
	memset(area, 0xcc, size);
	pax_close_kernel();
}
Esempio n. 4
0
void __init exynos_pm_init(void)
{
	const struct of_device_id *match;
	struct device_node *np;
	u32 tmp;

	np = of_find_matching_node_and_match(NULL, exynos_pmu_of_device_ids, &match);
	if (!np) {
		pr_err("Failed to find PMU node\n");
		return;
	}

	if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) {
		pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
		return;
	}

	pm_data = (const struct exynos_pm_data *) match->data;

	/* All wakeup disable */
	tmp = pmu_raw_readl(S5P_WAKEUP_MASK);
	tmp |= pm_data->wake_disable_mask;
	pmu_raw_writel(tmp, S5P_WAKEUP_MASK);

	pax_open_kernel();
	*(void **)&exynos_pm_syscore_ops.suspend	= pm_data->pm_suspend;
	*(void **)&exynos_pm_syscore_ops.resume	= pm_data->pm_resume;
	pax_close_kernel();

	register_syscore_ops(&exynos_pm_syscore_ops);
	suspend_set_ops(&exynos_suspend_ops);
}
Esempio n. 5
0
int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *file_priv;
	struct radeon_device *rdev;
	int r;

	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
		return drm_mmap(filp, vma);
	}

	file_priv = filp->private_data;
	rdev = file_priv->minor->dev->dev_private;
	if (rdev == NULL) {
		return -EINVAL;
	}
	r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
	if (unlikely(r != 0)) {
		return r;
	}
	if (unlikely(ttm_vm_ops == NULL)) {
		ttm_vm_ops = vma->vm_ops;
		pax_open_kernel();
		radeon_ttm_vm_ops = *ttm_vm_ops;
		radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
		pax_close_kernel();
	}
	vma->vm_ops = &radeon_ttm_vm_ops;
	return 0;
}
Esempio n. 6
0
int apply_relocate(Elf32_Shdr *sechdrs,
		   const char *strtab,
		   unsigned int symindex,
		   unsigned int relsec,
		   struct module *me)
{
	unsigned int i;
	Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
	Elf32_Sym *sym;
	uint32_t *plocation, location;

	DEBUGP("Applying relocate section %u to %u\n", relsec,
	       sechdrs[relsec].sh_info);
	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
		/* This is where to make the change */
		plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
		location = (uint32_t)plocation;
		if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
			plocation = ktla_ktva((void *)plocation);
		/* This is the symbol it is referring to.  Note that all
		   undefined symbols have been resolved.  */
		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
			+ ELF32_R_SYM(rel[i].r_info);

		switch (ELF32_R_TYPE(rel[i].r_info)) {
		case R_386_32:
			/* We add the value into the location given */
			pax_open_kernel();
			*plocation += sym->st_value;
			pax_close_kernel();
			break;
		case R_386_PC32:
			/* Add the value, subtract its postition */
			pax_open_kernel();
			*plocation += sym->st_value - location;
			pax_close_kernel();
			break;
		default:
			printk(KERN_ERR "module %s: Unknown relocation: %u\n",
			       me->name, ELF32_R_TYPE(rel[i].r_info));
			return -ENOEXEC;
		}
	}
	return 0;
}
Esempio n. 7
0
void od_register_powersave_bias_handler(unsigned int (*f)
		(struct cpufreq_policy *, unsigned int, unsigned int),
		unsigned int powersave_bias)
{
	pax_open_kernel();
	*(void **)&od_ops.powersave_bias_target = f;
	pax_close_kernel();
	od_set_powersave_bias(powersave_bias);
}
Esempio n. 8
0
static void jit_fill_hole(void *area, unsigned int size)
{
	u32 *ptr;
	/* We are guaranteed to have aligned memory. */
	pax_open_kernel();
	for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
		*ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
	pax_close_kernel();
}
Esempio n. 9
0
static int __init load_scm_model_init(struct platform_device *sdev)
{
	u8 data;
	int result;

	if (!quirks->ec_read_only) {
		/* allow userland write sysfs file  */
		pax_open_kernel();
		*(void **)&dev_attr_bluetooth.store = store_bluetooth;
		*(void **)&dev_attr_wlan.store = store_wlan;
		*(void **)&dev_attr_threeg.store = store_threeg;
		*(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
		*(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
		*(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
		pax_close_kernel();
	}

	/* disable hardware control by fn key */
	result = ec_read(MSI_STANDARD_EC_SCM_LOAD_ADDRESS, &data);
	if (result < 0)
		return result;

	result = ec_write(MSI_STANDARD_EC_SCM_LOAD_ADDRESS,
		data | MSI_STANDARD_EC_SCM_LOAD_MASK);
	if (result < 0)
		return result;

	/* initial rfkill */
	result = rfkill_init(sdev);
	if (result < 0)
		goto fail_rfkill;

	/* setup input device */
	result = msi_laptop_input_setup();
	if (result)
		goto fail_input;

	result = i8042_install_filter(msi_laptop_i8042_filter);
	if (result) {
		pr_err("Unable to install key filter\n");
		goto fail_filter;
	}

	return 0;

fail_filter:
	msi_laptop_input_destroy();

fail_input:
	rfkill_cleanup();

fail_rfkill:

	return result;

}
Esempio n. 10
0
static int __init abyss_init (void)
{
	pax_open_kernel();
	memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));

	*(void **)&abyss_netdev_ops.ndo_open = abyss_open;
	*(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
	pax_close_kernel();

	return pci_register_driver(&abyss_driver);
}
Esempio n. 11
0
static void swap_ex(void *a, void *b, int size)
{
	struct exception_table_entry t, *x = a, *y = b;

	t = *x;

	pax_open_kernel();
	*x = *y;
	*y = t;
	pax_close_kernel();
}
Esempio n. 12
0
static int notifier_chain_register(struct notifier_block **nl,
		struct notifier_block *n)
{
	while ((*nl) != NULL) {
		if (n->priority > (*nl)->priority)
			break;
		nl = (struct notifier_block **)&((*nl)->next);
	}
	pax_open_kernel();
	*(const void **)&n->next = *nl;
	rcu_assign_pointer(*nl, n);
	pax_close_kernel();
	return 0;
}
Esempio n. 13
0
static int __init init_nls_euc_jp(void)
{
	p_nls = load_nls("cp932");

	if (p_nls) {
		pax_open_kernel();
		*(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
		*(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
		pax_close_kernel();
		return register_nls(&table);
	}

	return -EINVAL;
}
Esempio n. 14
0
static int notifier_chain_unregister(struct notifier_block **nl,
		struct notifier_block *n)
{
	while ((*nl) != NULL) {
		if ((*nl) == n) {
			pax_open_kernel();
			rcu_assign_pointer(*nl, n->next);
			pax_close_kernel();
			return 0;
		}
		nl = (struct notifier_block **)&((*nl)->next);
	}
	return -ENOENT;
}
Esempio n. 15
0
static nokprobe_inline void
__synthesize_relative_insn(void *from, void *to, u8 op)
{
	struct __arch_relative_insn {
		u8 op;
		s32 raddr;
	} __packed *insn;

	insn = (struct __arch_relative_insn *)ktla_ktva(from);

	pax_open_kernel();
	insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
	insn->op = op;
	pax_close_kernel();
}
Esempio n. 16
0
static __init struct clk *__socfpga_pll_init(struct device_node *node,
	const struct clk_ops *ops)
{
	u32 reg;
	struct clk *clk;
	struct socfpga_pll *pll_clk;
	const char *clk_name = node->name;
	const char *parent_name[SOCFPGA_MAX_PARENTS];
	struct clk_init_data init;
	struct device_node *clkmgr_np;
	int rc;

	of_property_read_u32(node, "reg", &reg);

	pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
	if (WARN_ON(!pll_clk))
		return NULL;

	clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr");
	clk_mgr_base_addr = of_iomap(clkmgr_np, 0);
	BUG_ON(!clk_mgr_base_addr);
	pll_clk->hw.reg = clk_mgr_base_addr + reg;

	of_property_read_string(node, "clock-output-names", &clk_name);

	init.name = clk_name;
	init.ops = ops;
	init.flags = 0;

	init.num_parents = of_clk_parent_fill(node, parent_name, SOCFPGA_MAX_PARENTS);
	init.parent_names = parent_name;
	pll_clk->hw.hw.init = &init;

	pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
	pax_open_kernel();
	*(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
	*(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
	pax_close_kernel();

	clk = clk_register(NULL, &pll_clk->hw.hw);
	if (WARN_ON(IS_ERR(clk))) {
		kfree(pll_clk);
		return NULL;
	}
	rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
	return clk;
}
Esempio n. 17
0
/**
 * register_security - registers a security framework with the kernel
 * @ops: a pointer to the struct security_options that is to be registered
 *
 * This function allows a security module to register itself with the
 * kernel security subsystem.  Some rudimentary checking is done on the @ops
 * value passed to this function. You'll need to check first if your LSM
 * is allowed to register its @ops by calling security_module_enable(@ops).
 *
 * If there is already a security module registered with the kernel,
 * an error will be returned.  Otherwise %0 is returned on success.
 */
int __init register_security(struct security_operations *ops)
{
	if (verify(ops)) {
		printk(KERN_DEBUG "%s could not verify "
		       "security_operations structure.\n", __func__);
		return -EINVAL;
	}

	if (security_ops != &default_security_ops)
		return -EAGAIN;

	pax_open_kernel();
	security_ops = ops;
	pax_close_kernel();

	return 0;
}
Esempio n. 18
0
int omapdss_register_display(struct omap_dss_device *dssdev)
{
	struct omap_dss_driver *drv = dssdev->driver;
	int id;

	/*
	 * Note: this presumes all the displays are either using DT or non-DT,
	 * which normally should be the case. This also presumes that all
	 * displays either have an DT alias, or none has.
	 */

	if (dssdev->dev->of_node) {
		id = of_alias_get_id(dssdev->dev->of_node, "display");

		if (id < 0)
			id = disp_num_counter++;
	} else {
		id = disp_num_counter++;
	}

	snprintf(dssdev->alias, sizeof(dssdev->alias), "display%d", id);

	/* Use 'label' property for name, if it exists */
	if (dssdev->dev->of_node)
		of_property_read_string(dssdev->dev->of_node, "label",
			&dssdev->name);

	if (dssdev->name == NULL)
		dssdev->name = dssdev->alias;

	pax_open_kernel();
	if (drv && drv->get_resolution == NULL)
		*(void **)&drv->get_resolution = omapdss_default_get_resolution;
	if (drv && drv->get_recommended_bpp == NULL)
		*(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
	if (drv && drv->get_timings == NULL)
		*(void **)&drv->get_timings = omapdss_default_get_timings;
	pax_close_kernel();

	mutex_lock(&panel_list_mutex);
	list_add_tail(&dssdev->panel_list, &panel_list);
	mutex_unlock(&panel_list_mutex);
	return 0;
}
Esempio n. 19
0
static int __init setup_pax_nouderef(char *str)
{
	unsigned int cpu;

#ifdef CONFIG_PAX_KERNEXEC
	unsigned long cr0;

	pax_open_kernel(cr0);
#endif

	for (cpu = 0; cpu < NR_CPUS; cpu++)
		get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_DS].b = 0x00cf9300;

#ifdef CONFIG_PAX_KERNEXEC
	pax_close_kernel(cr0);
#endif

	return 1;
}
Esempio n. 20
0
/*
 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
 * no global lock is needed and because the kmap code must perform a global TLB
 * invalidation when the kmap pool wraps.
 *
 * However when holding an atomic kmap it is not legal to sleep, so atomic
 * kmaps are appropriate for short, tight code paths only.
 */
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
	unsigned long vaddr;
	int idx, type;

	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
	pagefault_disable();

	if (!PageHighMem(page))
		return page_address(page);

	type = kmap_atomic_idx_push();
	idx = type + KM_TYPE_NR*smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
	BUG_ON(!pte_none(*(kmap_pte-idx)));

	pax_open_kernel();
	set_pte(kmap_pte-idx, mk_pte(page, prot));
	pax_close_kernel();

	return (void *)vaddr;
}
Esempio n. 21
0
void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
{
	int i;

	spin_lock_init(&pnp_bios_lock);
	pnp_bios_callpoint.offset = header->fields.pm16offset;
	pnp_bios_callpoint.segment = PNP_CS16;

	pax_open_kernel();

	for_each_possible_cpu(i) {
		struct desc_struct *gdt = get_cpu_gdt_table(i);
		if (!gdt)
			continue;
		set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_CS32],
			 (unsigned long)&pnp_bios_callfunc);
		set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_CS16],
			 (unsigned long)__va(header->fields.pm16cseg));
		set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
			 (unsigned long)__va(header->fields.pm16dseg));
	}

	pax_close_kernel();
}
Esempio n. 22
0
static void __init acpi_cpufreq_boost_init(void)
{
	if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
		msrs = msrs_alloc();

		if (!msrs)
			return;

		pax_open_kernel();
		*(bool *)&acpi_cpufreq_driver.boost_supported = true;
		*(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
		pax_close_kernel();

		cpu_notifier_register_begin();

		/* Force all MSRs to the same value */
		boost_set_msrs(acpi_cpufreq_driver.boost_enabled,
			       cpu_online_mask);

		__register_cpu_notifier(&boost_nb);

		cpu_notifier_register_done();
	}
}
Esempio n. 23
0
static int max8973_probe(struct i2c_client *client,
			 const struct i2c_device_id *id)
{
	struct max8973_regulator_platform_data *pdata;
	struct regulator_config config = { };
	struct regulator_dev *rdev;
	struct max8973_chip *max;
	int ret;

	pdata = client->dev.platform_data;
	if (!pdata) {
		dev_err(&client->dev, "No Platform data");
		return -EIO;
	}

	max = devm_kzalloc(&client->dev, sizeof(*max), GFP_KERNEL);
	if (!max) {
		dev_err(&client->dev, "Memory allocation for max failed\n");
		return -ENOMEM;
	}

	max->regmap = devm_regmap_init_i2c(client, &max8973_regmap_config);
	if (IS_ERR(max->regmap)) {
		ret = PTR_ERR(max->regmap);
		dev_err(&client->dev, "regmap init failed, err %d\n", ret);
		return ret;
	}

	i2c_set_clientdata(client, max);
	max->dev = &client->dev;
	max->desc.name = id->name;
	max->desc.id = 0;
	max->desc.ops = &max8973_dcdc_ops;
	max->desc.type = REGULATOR_VOLTAGE;
	max->desc.owner = THIS_MODULE;
	max->desc.min_uV = MAX8973_MIN_VOLATGE;
	max->desc.uV_step = MAX8973_VOLATGE_STEP;
	max->desc.n_voltages = MAX8973_BUCK_N_VOLTAGE;

	if (!pdata->enable_ext_control) {
		max->desc.enable_reg = MAX8973_VOUT;
		max->desc.enable_mask = MAX8973_VOUT_ENABLE;
		pax_open_kernel();
		*(void **)&max8973_dcdc_ops.enable = regulator_enable_regmap;
		*(void **)&max8973_dcdc_ops.disable = regulator_disable_regmap;
		*(void **)&max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
		pax_close_kernel();
	}

	max->enable_external_control = pdata->enable_ext_control;
	max->dvs_gpio = pdata->dvs_gpio;
	max->curr_gpio_val = pdata->dvs_def_state;
	max->curr_vout_reg = MAX8973_VOUT + pdata->dvs_def_state;
	max->lru_index[0] = max->curr_vout_reg;
	max->valid_dvs_gpio = false;

	if (gpio_is_valid(max->dvs_gpio)) {
		int gpio_flags;
		int i;

		gpio_flags = (pdata->dvs_def_state) ?
				GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
		ret = devm_gpio_request_one(&client->dev, max->dvs_gpio,
				gpio_flags, "max8973-dvs");
		if (ret) {
			dev_err(&client->dev,
				"gpio_request for gpio %d failed, err = %d\n",
				max->dvs_gpio, ret);
			return ret;
		}
		max->valid_dvs_gpio = true;

		/*
		 * Initialize the lru index with vout_reg id
		 * The index 0 will be most recently used and
		 * set with the max->curr_vout_reg */
		for (i = 0; i < MAX8973_MAX_VOUT_REG; ++i)
			max->lru_index[i] = i;
		max->lru_index[0] = max->curr_vout_reg;
		max->lru_index[max->curr_vout_reg] = 0;
	}

	ret = max8973_init_dcdc(max, pdata);
	if (ret < 0) {
		dev_err(max->dev, "Max8973 Init failed, err = %d\n", ret);
		return ret;
	}

	config.dev = &client->dev;
	config.init_data = pdata->reg_init_data;
	config.driver_data = max;
	config.of_node = client->dev.of_node;
	config.regmap = max->regmap;

	/* Register the regulators */
	rdev = regulator_register(&max->desc, &config);
	if (IS_ERR(rdev)) {
		ret = PTR_ERR(rdev);
		dev_err(max->dev, "regulator register failed, err %d\n", ret);
		return ret;
	}

	max->rdev = rdev;
	return 0;
}
Esempio n. 24
0
static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
    int status;

    status = zt5550_hc_config(pdev);
    if(status != 0) {
        return status;
    }
    dbg("returned from zt5550_hc_config");

    memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
    zt5550_hpc.ops = &zt5550_hpc_ops;
    if(!poll) {
        zt5550_hpc.irq = hc_dev->irq;
        zt5550_hpc.irq_flags = IRQF_SHARED;
        zt5550_hpc.dev_id = hc_dev;

        pax_open_kernel();
        *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
        *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
        *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
        pax_open_kernel();
    } else {
        info("using ENUM# polling mode");
    }

    status = cpci_hp_register_controller(&zt5550_hpc);
    if(status != 0) {
        err("could not register cPCI hotplug controller");
        goto init_hc_error;
    }
    dbg("registered controller");

    /* Look for first device matching cPCI bus's bridge vendor and device IDs */
    if(!(bus0_dev = pci_get_device(PCI_VENDOR_ID_DEC,
                                   PCI_DEVICE_ID_DEC_21154, NULL))) {
        status = -ENODEV;
        goto init_register_error;
    }
    bus0 = bus0_dev->subordinate;
    pci_dev_put(bus0_dev);

    status = cpci_hp_register_bus(bus0, 0x0a, 0x0f);
    if(status != 0) {
        err("could not register cPCI hotplug bus");
        goto init_register_error;
    }
    dbg("registered bus");

    status = cpci_hp_start();
    if(status != 0) {
        err("could not started cPCI hotplug system");
        cpci_hp_unregister_bus(bus0);
        goto init_register_error;
    }
    dbg("started cpci hp system");

    return 0;
init_register_error:
    cpci_hp_unregister_controller(&zt5550_hpc);
init_hc_error:
    err("status = %d", status);
    zt5550_hc_cleanup();
    return status;

}
Esempio n. 25
0
static int sdhci_s3c_probe(struct platform_device *pdev)
{
	struct s3c_sdhci_platdata *pdata;
	struct sdhci_s3c_drv_data *drv_data;
	struct device *dev = &pdev->dev;
	struct sdhci_host *host;
	struct sdhci_s3c *sc;
	struct resource *res;
	int ret, irq, ptr, clks;

	if (!pdev->dev.platform_data && !pdev->dev.of_node) {
		dev_err(dev, "no device data specified\n");
		return -ENOENT;
	}

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(dev, "no irq specified\n");
		return irq;
	}

	host = sdhci_alloc_host(dev, sizeof(struct sdhci_s3c));
	if (IS_ERR(host)) {
		dev_err(dev, "sdhci_alloc_host() failed\n");
		return PTR_ERR(host);
	}
	sc = sdhci_priv(host);

	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
	if (!pdata) {
		ret = -ENOMEM;
		goto err_pdata_io_clk;
	}

	if (pdev->dev.of_node) {
		ret = sdhci_s3c_parse_dt(&pdev->dev, host, pdata);
		if (ret)
			goto err_pdata_io_clk;
	} else {
		memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata));
		sc->ext_cd_gpio = -1; /* invalid gpio number */
	}

	drv_data = sdhci_s3c_get_driver_data(pdev);

	sc->host = host;
	sc->pdev = pdev;
	sc->pdata = pdata;

	platform_set_drvdata(pdev, host);

	sc->clk_io = devm_clk_get(dev, "hsmmc");
	if (IS_ERR(sc->clk_io)) {
		dev_err(dev, "failed to get io clock\n");
		ret = PTR_ERR(sc->clk_io);
		goto err_pdata_io_clk;
	}

	/* enable the local io clock and keep it running for the moment. */
	clk_prepare_enable(sc->clk_io);

	for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
		struct clk *clk;
		char name[14];

		snprintf(name, 14, "mmc_busclk.%d", ptr);
		clk = devm_clk_get(dev, name);
		if (IS_ERR(clk))
			continue;

		clks++;
		sc->clk_bus[ptr] = clk;

		/*
		 * save current clock index to know which clock bus
		 * is used later in overriding functions.
		 */
		sc->cur_clk = ptr;

		dev_info(dev, "clock source %d: %s (%ld Hz)\n",
			 ptr, name, clk_get_rate(clk));
	}

	if (clks == 0) {
		dev_err(dev, "failed to find any bus clocks\n");
		ret = -ENOENT;
		goto err_no_busclks;
	}

#ifndef CONFIG_PM_RUNTIME
	clk_prepare_enable(sc->clk_bus[sc->cur_clk]);
#endif

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(host->ioaddr)) {
		ret = PTR_ERR(host->ioaddr);
		goto err_req_regs;
	}

	/* Ensure we have minimal gpio selected CMD/CLK/Detect */
	if (pdata->cfg_gpio)
		pdata->cfg_gpio(pdev, pdata->max_width);

	host->hw_name = "samsung-hsmmc";
	host->ops = &sdhci_s3c_ops;
	host->quirks = 0;
	host->irq = irq;

	/* Setup quirks for the controller */
	host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
	host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT;
	if (drv_data)
		host->quirks |= drv_data->sdhci_quirks;

#ifndef CONFIG_MMC_SDHCI_S3C_DMA

	/* we currently see overruns on errors, so disable the SDMA
	 * support as well. */
	host->quirks |= SDHCI_QUIRK_BROKEN_DMA;

#endif /* CONFIG_MMC_SDHCI_S3C_DMA */

	/* It seems we do not get an DATA transfer complete on non-busy
	 * transfers, not sure if this is a problem with this specific
	 * SDHCI block, or a missing configuration that needs to be set. */
	host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ;

	/* This host supports the Auto CMD12 */
	host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;

	/* Samsung SoCs need BROKEN_ADMA_ZEROLEN_DESC */
	host->quirks |= SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC;

	if (pdata->cd_type == S3C_SDHCI_CD_NONE ||
	    pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
		host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;

	if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
		host->mmc->caps = MMC_CAP_NONREMOVABLE;

	switch (pdata->max_width) {
	case 8:
		host->mmc->caps |= MMC_CAP_8_BIT_DATA;
	case 4:
		host->mmc->caps |= MMC_CAP_4_BIT_DATA;
		break;
	}

	if (pdata->pm_caps)
		host->mmc->pm_caps |= pdata->pm_caps;

	host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
			 SDHCI_QUIRK_32BIT_DMA_SIZE);

	/* HSMMC on Samsung SoCs uses SDCLK as timeout clock */
	host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;

	/*
	 * If controller does not have internal clock divider,
	 * we can use overriding functions instead of default.
	 */
	if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
		pax_open_kernel();
		*(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
		*(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
		*(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
		pax_close_kernel();
	}

	/* It supports additional host capabilities if needed */
	if (pdata->host_caps)
		host->mmc->caps |= pdata->host_caps;

	if (pdata->host_caps2)
		host->mmc->caps2 |= pdata->host_caps2;

	pm_runtime_enable(&pdev->dev);
	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
	pm_runtime_use_autosuspend(&pdev->dev);
	pm_suspend_ignore_children(&pdev->dev, 1);

	ret = sdhci_add_host(host);
	if (ret) {
		dev_err(dev, "sdhci_add_host() failed\n");
		pm_runtime_forbid(&pdev->dev);
		pm_runtime_get_noresume(&pdev->dev);
		goto err_req_regs;
	}

	/* The following two methods of card detection might call
	   sdhci_s3c_notify_change() immediately, so they can be called
	   only after sdhci_add_host(). Setup errors are ignored. */
	if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_init)
		pdata->ext_cd_init(&sdhci_s3c_notify_change);
	if (pdata->cd_type == S3C_SDHCI_CD_GPIO &&
	    gpio_is_valid(pdata->ext_cd_gpio))
		sdhci_s3c_setup_card_detect_gpio(sc);

#ifdef CONFIG_PM_RUNTIME
	if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL)
		clk_disable_unprepare(sc->clk_io);
#endif
	return 0;

 err_req_regs:
#ifndef CONFIG_PM_RUNTIME
	clk_disable_unprepare(sc->clk_bus[sc->cur_clk]);
#endif

 err_no_busclks:
	clk_disable_unprepare(sc->clk_io);

 err_pdata_io_clk:
	sdhci_free_host(host);

	return ret;
}
Esempio n. 26
0
void reset_security_ops(void)
{
	pax_open_kernel();
	security_ops = &default_security_ops;
	pax_close_kernel();
}
Esempio n. 27
0
int __init op_nmi_init(struct oprofile_operations *ops)
{
	__u8 vendor = boot_cpu_data.x86_vendor;
	__u8 family = boot_cpu_data.x86;
	char *cpu_type = NULL;
	int ret = 0;

	if (!cpu_has_apic)
		return -ENODEV;

	if (force_cpu_type == timer)
		return -ENODEV;

	switch (vendor) {
	case X86_VENDOR_AMD:
		/* Needs to be at least an Athlon (or hammer in 32bit mode) */

		switch (family) {
		case 6:
			cpu_type = "i386/athlon";
			break;
		case 0xf:
			/*
			 * Actually it could be i386/hammer too, but
			 * give user space an consistent name.
			 */
			cpu_type = "x86-64/hammer";
			break;
		case 0x10:
			cpu_type = "x86-64/family10";
			break;
		case 0x11:
			cpu_type = "x86-64/family11h";
			break;
		case 0x12:
			cpu_type = "x86-64/family12h";
			break;
		case 0x14:
			cpu_type = "x86-64/family14h";
			break;
		case 0x15:
			cpu_type = "x86-64/family15h";
			break;
		default:
			return -ENODEV;
		}
		model = &op_amd_spec;
		break;

	case X86_VENDOR_INTEL:
		switch (family) {
			/* Pentium IV */
		case 0xf:
			p4_init(&cpu_type);
			break;

			/* A P6-class processor */
		case 6:
			ppro_init(&cpu_type);
			break;

		default:
			break;
		}

		if (cpu_type)
			break;

		if (!cpu_has_arch_perfmon)
			return -ENODEV;

		/* use arch perfmon as fallback */
		cpu_type = "i386/arch_perfmon";
		model = &op_arch_perfmon_spec;
		break;

	default:
		return -ENODEV;
	}

	/* default values, can be overwritten by model */
	ops->create_files	= nmi_create_files;
	ops->setup		= nmi_setup;
	ops->shutdown		= nmi_shutdown;
	ops->start		= nmi_start;
	ops->stop		= nmi_stop;
	ops->cpu_type		= cpu_type;

	if (model->init)
		ret = model->init(ops);
	if (ret)
		return ret;

	if (!model->num_virt_counters) {
		pax_open_kernel();
		*(unsigned int *)&model->num_virt_counters = model->num_counters;
		pax_close_kernel();
	}

	mux_init(ops);

	init_suspend_resume();

	printk(KERN_INFO "oprofile: using NMI interrupt.\n");
	return 0;
}
Esempio n. 28
0
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
	unsigned int i;
	unsigned int valid_states = 0;
	unsigned int cpu = policy->cpu;
	struct acpi_cpufreq_data *data;
	unsigned int result = 0;
	struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
	struct acpi_processor_performance *perf;
#ifdef CONFIG_SMP
	static int blacklisted;
#endif

	pr_debug("acpi_cpufreq_cpu_init\n");

#ifdef CONFIG_SMP
	if (blacklisted)
		return blacklisted;
	blacklisted = acpi_cpufreq_blacklist(c);
	if (blacklisted)
		return blacklisted;
#endif

	data = kzalloc(sizeof(*data), GFP_KERNEL);
	if (!data)
		return -ENOMEM;

	if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) {
		result = -ENOMEM;
		goto err_free;
	}

	perf = per_cpu_ptr(acpi_perf_data, cpu);
	data->acpi_perf_cpu = cpu;
	policy->driver_data = data;

	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
		pax_open_kernel();
		*(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
		pax_close_kernel();
	}

	result = acpi_processor_register_performance(perf, cpu);
	if (result)
		goto err_free_mask;

	policy->shared_type = perf->shared_type;

	/*
	 * Will let policy->cpus know about dependency only when software
	 * coordination is required.
	 */
	if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
	    policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
		cpumask_copy(policy->cpus, perf->shared_cpu_map);
	}
	cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map);

#ifdef CONFIG_SMP
	dmi_check_system(sw_any_bug_dmi_table);
	if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
		policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
		cpumask_copy(policy->cpus, topology_core_cpumask(cpu));
	}

	if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
		cpumask_clear(policy->cpus);
		cpumask_set_cpu(cpu, policy->cpus);
		cpumask_copy(data->freqdomain_cpus,
			     topology_sibling_cpumask(cpu));
		policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
		pr_info_once(PFX "overriding BIOS provided _PSD data\n");
	}
#endif

	/* capability check */
	if (perf->state_count <= 1) {
		pr_debug("No P-States\n");
		result = -ENODEV;
		goto err_unreg;
	}

	if (perf->control_register.space_id != perf->status_register.space_id) {
		result = -ENODEV;
		goto err_unreg;
	}

	switch (perf->control_register.space_id) {
	case ACPI_ADR_SPACE_SYSTEM_IO:
		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
		    boot_cpu_data.x86 == 0xf) {
			pr_debug("AMD K8 systems must use native drivers.\n");
			result = -ENODEV;
			goto err_unreg;
		}
		pr_debug("SYSTEM IO addr space\n");
		data->cpu_feature = SYSTEM_IO_CAPABLE;
		break;
	case ACPI_ADR_SPACE_FIXED_HARDWARE:
		pr_debug("HARDWARE addr space\n");
		if (check_est_cpu(cpu)) {
			data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
			break;
		}
		if (check_amd_hwpstate_cpu(cpu)) {
			data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
			break;
		}
		result = -ENODEV;
		goto err_unreg;
	default:
		pr_debug("Unknown addr space %d\n",
			(u32) (perf->control_register.space_id));
		result = -ENODEV;
		goto err_unreg;
	}

	data->freq_table = kzalloc(sizeof(*data->freq_table) *
		    (perf->state_count+1), GFP_KERNEL);
	if (!data->freq_table) {
		result = -ENOMEM;
		goto err_unreg;
	}

	/* detect transition latency */
	policy->cpuinfo.transition_latency = 0;
	for (i = 0; i < perf->state_count; i++) {
		if ((perf->states[i].transition_latency * 1000) >
		    policy->cpuinfo.transition_latency)
			policy->cpuinfo.transition_latency =
			    perf->states[i].transition_latency * 1000;
	}

	/* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
	if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
	    policy->cpuinfo.transition_latency > 20 * 1000) {
		policy->cpuinfo.transition_latency = 20 * 1000;
		printk_once(KERN_INFO
			    "P-state transition latency capped at 20 uS\n");
	}

	/* table init */
	for (i = 0; i < perf->state_count; i++) {
		if (i > 0 && perf->states[i].core_frequency >=
		    data->freq_table[valid_states-1].frequency / 1000)
			continue;

		data->freq_table[valid_states].driver_data = i;
		data->freq_table[valid_states].frequency =
		    perf->states[i].core_frequency * 1000;
		valid_states++;
	}
	data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
	perf->state = 0;

	result = cpufreq_table_validate_and_show(policy, data->freq_table);
	if (result)
		goto err_freqfree;

	if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
		printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");

	switch (perf->control_register.space_id) {
	case ACPI_ADR_SPACE_SYSTEM_IO:
		/*
		 * The core will not set policy->cur, because
		 * cpufreq_driver->get is NULL, so we need to set it here.
		 * However, we have to guess it, because the current speed is
		 * unknown and not detectable via IO ports.
		 */
		policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
		break;
	case ACPI_ADR_SPACE_FIXED_HARDWARE:
		pax_open_kernel();
		*(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
		pax_close_kernel();
		break;
	default:
		break;
	}

	/* notify BIOS that we exist */
	acpi_processor_notify_smm(THIS_MODULE);

	pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
	for (i = 0; i < perf->state_count; i++)
		pr_debug("     %cP%d: %d MHz, %d mW, %d uS\n",
			(i == perf->state ? '*' : ' '), i,
			(u32) perf->states[i].core_frequency,
			(u32) perf->states[i].power,
			(u32) perf->states[i].transition_latency);

	/*
	 * the first call to ->target() should result in us actually
	 * writing something to the appropriate registers.
	 */
	data->resume = 1;

	return result;

err_freqfree:
	kfree(data->freq_table);
err_unreg:
	acpi_processor_unregister_performance(cpu);
err_free_mask:
	free_cpumask_var(data->freqdomain_cpus);
err_free:
	kfree(data);
	policy->driver_data = NULL;

	return result;
}
Esempio n. 29
0
static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
				u16 arg4, u16 arg5, u16 arg6, u16 arg7,
				void *ts1_base, u32 ts1_size,
				void *ts2_base, u32 ts2_size)
{
	unsigned long flags;
	u16 status;
	struct desc_struct save_desc_40;
	int cpu;

	/*
	 * PnP BIOSes are generally not terribly re-entrant.
	 * Also, don't rely on them to save everything correctly.
	 */
	if (pnp_bios_is_utter_crap)
		return PNP_FUNCTION_NOT_SUPPORTED;

	cpu = get_cpu();
	save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];

	pax_open_kernel();
	get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
	pax_close_kernel();

	/* On some boxes IRQ's during PnP BIOS calls are deadly.  */
	spin_lock_irqsave(&pnp_bios_lock, flags);

	/* The lock prevents us bouncing CPU here */
	if (ts1_size)
		Q2_SET_SEL(smp_processor_id(), PNP_TS1, ts1_base, ts1_size);
	if (ts2_size)
		Q2_SET_SEL(smp_processor_id(), PNP_TS2, ts2_base, ts2_size);

	__asm__ __volatile__("pushl %%ebp\n\t"
			     "pushl %%edi\n\t"
			     "pushl %%esi\n\t"
			     "pushl %%ds\n\t"
			     "pushl %%es\n\t"
			     "pushl %%fs\n\t"
			     "pushl %%gs\n\t"
			     "pushfl\n\t"
			     "movl %%esp, pnp_bios_fault_esp\n\t"
			     "movl $1f, pnp_bios_fault_eip\n\t"
			     "lcall %5,%6\n\t"
			     "1:popfl\n\t"
			     "popl %%gs\n\t"
			     "popl %%fs\n\t"
			     "popl %%es\n\t"
			     "popl %%ds\n\t"
			     "popl %%esi\n\t"
			     "popl %%edi\n\t"
			     "popl %%ebp\n\t":"=a"(status)
			     :"0"((func) | (((u32) arg1) << 16)),
			     "b"((arg2) | (((u32) arg3) << 16)),
			     "c"((arg4) | (((u32) arg5) << 16)),
			     "d"((arg6) | (((u32) arg7) << 16)),
			     "i"(PNP_CS32), "i"(0)
			     :"memory");
	spin_unlock_irqrestore(&pnp_bios_lock, flags);

	pax_open_kernel();
	get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
	pax_close_kernel();

	put_cpu();

	/* If we get here and this is set then the PnP BIOS faulted on us. */
	if (pnp_bios_is_utter_crap) {
		printk(KERN_ERR
		       "PnPBIOS: Warning! Your PnP BIOS caused a fatal error. Attempting to continue\n");
		printk(KERN_ERR
		       "PnPBIOS: You may need to reboot with the \"pnpbios=off\" option to operate stably\n");
		printk(KERN_ERR
		       "PnPBIOS: Check with your vendor for an updated BIOS\n");
	}

	return status;
}
Esempio n. 30
0
int apply_relocate_add(Elf64_Shdr *sechdrs,
		   const char *strtab,
		   unsigned int symindex,
		   unsigned int relsec,
		   struct module *me)
{
	unsigned int i;
	Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
	Elf64_Sym *sym;
	void *loc;
	u64 val;

#ifdef CONFIG_PAX_KERNEXEC
	unsigned long cr0;
#endif

	DEBUGP("Applying relocate section %u to %u\n", relsec,
	       sechdrs[relsec].sh_info);
	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
		/* This is where to make the change */
		loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
			+ rel[i].r_offset;

		/* This is the symbol it is referring to.  Note that all
		   undefined symbols have been resolved.  */
		sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
			+ ELF64_R_SYM(rel[i].r_info);

		DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
			(int)ELF64_R_TYPE(rel[i].r_info),
			sym->st_value, rel[i].r_addend, (u64)loc);

		val = sym->st_value + rel[i].r_addend;

		switch (ELF64_R_TYPE(rel[i].r_info)) {
		case R_X86_64_NONE:
			break;
		case R_X86_64_64:

#ifdef CONFIG_PAX_KERNEXEC
			pax_open_kernel(cr0);
#endif

			*(u64 *)loc = val;

#ifdef CONFIG_PAX_KERNEXEC
			pax_close_kernel(cr0);
#endif

			break;
		case R_X86_64_32:

#ifdef CONFIG_PAX_KERNEXEC
			pax_open_kernel(cr0);
#endif

			*(u32 *)loc = val;

#ifdef CONFIG_PAX_KERNEXEC
			pax_close_kernel(cr0);
#endif

			if (val != *(u32 *)loc)
				goto overflow;
			break;
		case R_X86_64_32S:

#ifdef CONFIG_PAX_KERNEXEC
			pax_open_kernel(cr0);
#endif

			*(s32 *)loc = val;

#ifdef CONFIG_PAX_KERNEXEC
			pax_close_kernel(cr0);
#endif

			if ((s64)val != *(s32 *)loc)
				goto overflow;
			break;
		case R_X86_64_PC32:
			val -= (u64)loc;

#ifdef CONFIG_PAX_KERNEXEC
			pax_open_kernel(cr0);
#endif

			*(u32 *)loc = val;

#ifdef CONFIG_PAX_KERNEXEC
			pax_close_kernel(cr0);
#endif

#if 0
			if ((s64)val != *(s32 *)loc)
				goto overflow;
#endif
			break;
		default:
			printk(KERN_ERR "module %s: Unknown rela relocation: %llu\n",
			       me->name, ELF64_R_TYPE(rel[i].r_info));
			return -ENOEXEC;
		}
	}
	return 0;

overflow:
	printk(KERN_ERR "overflow in relocation type %d val %Lx\n",
	       (int)ELF64_R_TYPE(rel[i].r_info), val);
	printk(KERN_ERR "`%s' likely not compiled with -mcmodel=kernel\n",
	       me->name);
	return -ENOEXEC;
}