/* * keypad controller should be initialized in the following sequence * only, otherwise it might get into FSM stuck state. * * - Initialize keypad control parameters, like no. of rows, columns, * timing values etc., * - configure rows and column gpios pull up/down. * - set irq edge type. * - enable the keypad controller. */ static int __devinit pmic8xxx_kp_probe(struct platform_device *pdev) { const struct pm8xxx_keypad_platform_data *pdata = dev_get_platdata(&pdev->dev); const struct matrix_keymap_data *keymap_data; struct pmic8xxx_kp *kp; int rc; u8 ctrl_val; struct pm_gpio kypd_drv = { .direction = PM_GPIO_DIR_OUT, .output_buffer = PM_GPIO_OUT_BUF_OPEN_DRAIN, .output_value = 0, .pull = PM_GPIO_PULL_NO, .vin_sel = PM_GPIO_VIN_S4, .out_strength = PM_GPIO_STRENGTH_LOW, .function = PM_GPIO_FUNC_1, .inv_int_pol = 1, }; struct pm_gpio kypd_sns = { .direction = PM_GPIO_DIR_IN, .pull = PM_GPIO_PULL_UP_31P5, .vin_sel = PM_GPIO_VIN_S4, .out_strength = PM_GPIO_STRENGTH_NO, .function = PM_GPIO_FUNC_NORMAL, .inv_int_pol = 1, }; if (!pdata || !pdata->num_cols || !pdata->num_rows || pdata->num_cols > PM8XXX_MAX_COLS || pdata->num_rows > PM8XXX_MAX_ROWS || pdata->num_cols < PM8XXX_MIN_COLS) { dev_err(&pdev->dev, "invalid platform data\n"); return -EINVAL; } if (!pdata->scan_delay_ms || pdata->scan_delay_ms > MAX_SCAN_DELAY || pdata->scan_delay_ms < MIN_SCAN_DELAY || !is_power_of_2(pdata->scan_delay_ms)) { dev_err(&pdev->dev, "invalid keypad scan time supplied\n"); return -EINVAL; } if (!pdata->row_hold_ns || pdata->row_hold_ns > MAX_ROW_HOLD_DELAY || pdata->row_hold_ns < MIN_ROW_HOLD_DELAY || ((pdata->row_hold_ns % MIN_ROW_HOLD_DELAY) != 0)) { dev_err(&pdev->dev, "invalid keypad row hold time supplied\n"); return -EINVAL; } if (!pdata->debounce_ms || ((pdata->debounce_ms % 5) != 0) || pdata->debounce_ms > MAX_DEBOUNCE_TIME || pdata->debounce_ms < MIN_DEBOUNCE_TIME) { dev_err(&pdev->dev, "invalid debounce time supplied\n"); return -EINVAL; } keymap_data = pdata->keymap_data; if (!keymap_data) { dev_err(&pdev->dev, "no keymap data supplied\n"); return -EINVAL; } kp = kzalloc(sizeof(*kp), GFP_KERNEL); if (!kp) return -ENOMEM; platform_set_drvdata(pdev, kp); kp->pdata = pdata; kp->dev = &pdev->dev; kp->input = input_allocate_device(); if (!kp->input) { dev_err(&pdev->dev, "unable to allocate input device\n"); rc = -ENOMEM; goto err_alloc_device; } kp->key_sense_irq = platform_get_irq(pdev, 0); if (kp->key_sense_irq < 0) { dev_err(&pdev->dev, "unable to get keypad sense irq\n"); rc = -ENXIO; goto err_get_irq; } kp->key_stuck_irq = platform_get_irq(pdev, 1); if (kp->key_stuck_irq < 0) { dev_err(&pdev->dev, "unable to get keypad stuck irq\n"); rc = -ENXIO; goto err_get_irq; } kp->input->name = pdata->input_name ? : "PMIC8XXX keypad"; kp->input->phys = pdata->input_phys_device ? : "pmic8xxx_keypad/input0"; kp->input->dev.parent = &pdev->dev; kp->input->id.bustype = BUS_I2C; kp->input->id.version = 0x0001; kp->input->id.product = 0x0001; kp->input->id.vendor = 0x0001; kp->input->evbit[0] = BIT_MASK(EV_KEY); if (pdata->rep) __set_bit(EV_REP, kp->input->evbit); kp->input->keycode = kp->keycodes; kp->input->keycodemax = PM8XXX_MATRIX_MAX_SIZE; kp->input->keycodesize = sizeof(kp->keycodes); kp->input->open = pmic8xxx_kp_open; kp->input->close = pmic8xxx_kp_close; matrix_keypad_build_keymap(keymap_data, PM8XXX_ROW_SHIFT, kp->input->keycode, kp->input->keybit); input_set_capability(kp->input, EV_MSC, MSC_SCAN); input_set_drvdata(kp->input, kp); /* initialize keypad state */ memset(kp->keystate, 0xff, sizeof(kp->keystate)); memset(kp->stuckstate, 0xff, sizeof(kp->stuckstate)); rc = pmic8xxx_kpd_init(kp); if (rc < 0) { dev_err(&pdev->dev, "unable to initialize keypad controller\n"); goto err_get_irq; } rc = pmic8xxx_kp_config_gpio(pdata->cols_gpio_start, pdata->num_cols, kp, &kypd_sns); if (rc < 0) { dev_err(&pdev->dev, "unable to configure keypad sense lines\n"); goto err_gpio_config; } rc = pmic8xxx_kp_config_gpio(pdata->rows_gpio_start, pdata->num_rows, kp, &kypd_drv); if (rc < 0) { dev_err(&pdev->dev, "unable to configure keypad drive lines\n"); goto err_gpio_config; } rc = request_any_context_irq(kp->key_sense_irq, pmic8xxx_kp_irq, IRQF_TRIGGER_RISING, "pmic-keypad", kp); if (rc < 0) { dev_err(&pdev->dev, "failed to request keypad sense irq\n"); goto err_get_irq; } rc = request_any_context_irq(kp->key_stuck_irq, pmic8xxx_kp_stuck_irq, IRQF_TRIGGER_RISING, "pmic-keypad-stuck", kp); if (rc < 0) { dev_err(&pdev->dev, "failed to request keypad stuck irq\n"); goto err_req_stuck_irq; } rc = pmic8xxx_kp_read_u8(kp, &ctrl_val, KEYP_CTRL); if (rc < 0) { dev_err(&pdev->dev, "failed to read KEYP_CTRL register\n"); goto err_pmic_reg_read; } kp->ctrl_reg = ctrl_val; rc = input_register_device(kp->input); if (rc < 0) { dev_err(&pdev->dev, "unable to register keypad input device\n"); goto err_pmic_reg_read; } device_init_wakeup(&pdev->dev, pdata->wakeup); return 0; err_pmic_reg_read: free_irq(kp->key_stuck_irq, kp); err_req_stuck_irq: free_irq(kp->key_sense_irq, kp); err_gpio_config: err_get_irq: input_free_device(kp->input); err_alloc_device: platform_set_drvdata(pdev, NULL); kfree(kp); return rc; } static int __devexit pmic8xxx_kp_remove(struct platform_device *pdev) { struct pmic8xxx_kp *kp = platform_get_drvdata(pdev); device_init_wakeup(&pdev->dev, 0); free_irq(kp->key_stuck_irq, kp); free_irq(kp->key_sense_irq, kp); input_unregister_device(kp->input); kfree(kp); platform_set_drvdata(pdev, NULL); return 0; } #ifdef CONFIG_PM_SLEEP static int pmic8xxx_kp_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct pmic8xxx_kp *kp = platform_get_drvdata(pdev); struct input_dev *input_dev = kp->input; if (device_may_wakeup(dev)) { enable_irq_wake(kp->key_sense_irq); } else { mutex_lock(&input_dev->mutex); if (input_dev->users) pmic8xxx_kp_disable(kp); mutex_unlock(&input_dev->mutex); } return 0; } static int pmic8xxx_kp_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct pmic8xxx_kp *kp = platform_get_drvdata(pdev); struct input_dev *input_dev = kp->input; if (device_may_wakeup(dev)) { disable_irq_wake(kp->key_sense_irq); } else { mutex_lock(&input_dev->mutex); if (input_dev->users) pmic8xxx_kp_enable(kp); mutex_unlock(&input_dev->mutex); } return 0; } #endif static SIMPLE_DEV_PM_OPS(pm8xxx_kp_pm_ops, pmic8xxx_kp_suspend, pmic8xxx_kp_resume); static struct platform_driver pmic8xxx_kp_driver = { .probe = pmic8xxx_kp_probe, .remove = __devexit_p(pmic8xxx_kp_remove), .driver = { .name = PM8XXX_KEYPAD_DEV_NAME, .owner = THIS_MODULE, .pm = &pm8xxx_kp_pm_ops, }, }; static int __init pmic8xxx_kp_init(void) { return platform_driver_register(&pmic8xxx_kp_driver); } module_init(pmic8xxx_kp_init); static void __exit pmic8xxx_kp_exit(void) { platform_driver_unregister(&pmic8xxx_kp_driver); } module_exit(pmic8xxx_kp_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("PMIC8XXX keypad driver"); MODULE_VERSION("1.0"); MODULE_ALIAS("platform:pmic8xxx_keypad"); MODULE_AUTHOR("Trilok Soni <*****@*****.**>");
static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_defact *d = to_defact(a); struct tc_defact opt = { .index = d->tcf_index, .refcnt = d->tcf_refcnt - ref, .bindcnt = d->tcf_bindcnt - bind, .action = d->tcf_action, }; struct tcf_t t; if (nla_put(skb, TCA_DEF_PARMS, sizeof(opt), &opt) || nla_put_string(skb, TCA_DEF_DATA, d->tcfd_defdata)) goto nla_put_failure; tcf_tm_dump(&t, &d->tcf_tm); if (nla_put_64bit(skb, TCA_DEF_TM, sizeof(t), &t, TCA_DEF_PAD)) goto nla_put_failure; return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } static int tcf_simp_walker(struct net *net, struct sk_buff *skb, struct netlink_callback *cb, int type, const struct tc_action_ops *ops) { struct tc_action_net *tn = net_generic(net, simp_net_id); return tcf_generic_walker(tn, skb, cb, type, ops); } static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index) { struct tc_action_net *tn = net_generic(net, simp_net_id); return tcf_hash_search(tn, a, index); } static struct tc_action_ops act_simp_ops = { .kind = "simple", .type = TCA_ACT_SIMP, .owner = THIS_MODULE, .act = tcf_simp, .dump = tcf_simp_dump, .cleanup = tcf_simp_release, .init = tcf_simp_init, .walk = tcf_simp_walker, .lookup = tcf_simp_search, .size = sizeof(struct tcf_defact), }; static __net_init int simp_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, simp_net_id); return tc_action_net_init(tn, &act_simp_ops, SIMP_TAB_MASK); } static void __net_exit simp_exit_net(struct net *net) { struct tc_action_net *tn = net_generic(net, simp_net_id); tc_action_net_exit(tn); } static struct pernet_operations simp_net_ops = { .init = simp_init_net, .exit = simp_exit_net, .id = &simp_net_id, .size = sizeof(struct tc_action_net), }; MODULE_AUTHOR("Jamal Hadi Salim(2005)"); MODULE_DESCRIPTION("Simple example action"); MODULE_LICENSE("GPL"); static int __init simp_init_module(void) { int ret = tcf_register_action(&act_simp_ops, &simp_net_ops); if (!ret) pr_info("Simple TC action Loaded\n"); return ret; } static void __exit simp_cleanup_module(void) { tcf_unregister_action(&act_simp_ops, &simp_net_ops); } module_init(simp_init_module); module_exit(simp_cleanup_module);
int exec_netbsd(const char *file, physaddr_t loadaddr, int boothowto, int floppy, void (*callback)(void)) { u_long boot_argv[BOOT_NARGS]; u_long marks[MARK_MAX]; struct btinfo_symtab btinfo_symtab; u_long extmem; u_long basemem; #ifdef DEBUG printf("exec: file=%s loadaddr=0x%lx\n", file ? file : "NULL", loadaddr); #endif BI_ALLOC(32); /* ??? */ BI_ADD(&btinfo_console, BTINFO_CONSOLE, sizeof(struct btinfo_console)); howto = boothowto; if (common_load_kernel(file, &basemem, &extmem, loadaddr, floppy, marks)) goto out; boot_argv[0] = boothowto; boot_argv[1] = 0; boot_argv[2] = vtophys(bootinfo); /* old cyl offset */ boot_argv[3] = marks[MARK_END]; boot_argv[4] = extmem; boot_argv[5] = basemem; /* pull in any modules if necessary */ if (boot_modules_enabled) { module_init(file); if (btinfo_modulelist) { BI_ADD(btinfo_modulelist, BTINFO_MODULELIST, btinfo_modulelist_size); } } userconf_init(); if (btinfo_userconfcommands != NULL) BI_ADD(btinfo_userconfcommands, BTINFO_USERCONFCOMMANDS, btinfo_userconfcommands_size); #ifdef DEBUG printf("Start @ 0x%lx [%ld=0x%lx-0x%lx]...\n", marks[MARK_ENTRY], marks[MARK_NSYM], marks[MARK_SYM], marks[MARK_END]); #endif btinfo_symtab.nsym = marks[MARK_NSYM]; btinfo_symtab.ssym = marks[MARK_SYM]; btinfo_symtab.esym = marks[MARK_END]; BI_ADD(&btinfo_symtab, BTINFO_SYMTAB, sizeof(struct btinfo_symtab)); /* set new video mode if necessary */ vbe_commit(); BI_ADD(&btinfo_framebuffer, BTINFO_FRAMEBUFFER, sizeof(struct btinfo_framebuffer)); if (callback != NULL) (*callback)(); startprog(marks[MARK_ENTRY], BOOT_NARGS, boot_argv, x86_trunc_page(basemem*1024)); panic("exec returned"); out: BI_FREE(); bootinfo = 0; return -1; }
/* Interrupt handler */ static irqreturn_t omap4_keypad_interrupt(int irq, void *dev_id) { struct omap4_keypad *keypad_data = dev_id; struct input_dev *input_dev = keypad_data->input; unsigned char key_state[ARRAY_SIZE(keypad_data->key_state)]; unsigned int col, row, code, changed; u32 *new_state = (u32 *) key_state; /* */ wake_lock_timeout(&keypad_data->wlock, 1 * HZ); *new_state = __raw_readl(keypad_data->base + OMAP4_KBD_FULLCODE31_0); *(new_state + 1) = __raw_readl(keypad_data->base + OMAP4_KBD_FULLCODE63_32); // if(debug_mask) { printk("========================================================\n"); printk("%s: [%#x][%#x]\n", __func__, *new_state, *(new_state+1)); printk("========================================================\n"); } // for (col = 0; col < keypad_data->cols; col++) { changed = key_state[col] ^ keypad_data->key_state[col]; if (!changed) continue; for (row = 0; row < keypad_data->rows; row++) { if (changed & (1 << row)) { code = MATRIX_SCAN_CODE(row, col, keypad_data->row_shift); // if(debug_mask) { printk("%s: [changed][col][row][code] = [%#x][%d][%d][%d]\n", __func__, changed, col, row, code); printk("========================================================\n"); } // // #ifdef CONFIG_MACH_LGE_COSMO if( keypad_data->keymap[code] && !atcmd_keylock) { #else if( keypad_data->keymap[code] ) { #endif // input_event(input_dev, EV_MSC, MSC_SCAN, code); input_report_key(input_dev, keypad_data->keymap[code], (bool)(key_state[col] & (1 << row))); #ifdef CONFIG_MACH_LGE_U2 /* */ printk("[omap4-keypad] %s KEY %s\n", (keypad_data->keymap[code] == KEY_VOLUMEUP) ? "Vol_UP" : ((keypad_data->keymap[code] == KEY_VOLUMEDOWN) ? "Vol_DOWN" : "HOME"), (key_state[col] & (1 << row)) ? "PRESS" : "RELEASE" ); #else printk("[omap4-keypad] %s KEY %s\n", (keypad_data->keymap[code] == KEY_VOLUMEUP) ? "Vol_UP" : ((keypad_data->keymap[code] == KEY_VOLUMEDOWN) ? "Vol_DOWN" : "CAPTURE"), (key_state[col] & (1 << row)) ? "PRESS" : "RELEASE" ); #endif #ifdef CONFIG_INPUT_LGE_GKPD gkpd_report_key(keypad_data->keymap[code], (bool)(key_state[col] & (1 << row))); #endif break; } /* */ #ifdef CONFIG_KEYBOARD_OMAP4_SAFEMODE if (keypad_data->keymap[code] == KEY_VOLUMEUP) { safemode_key = !!(key_state[col] & (1 << row)); } #endif } } } input_sync(input_dev); memcpy(keypad_data->key_state, key_state, sizeof(keypad_data->key_state)); /* clear pending interrupts */ __raw_writel(__raw_readl(keypad_data->base + OMAP4_KBD_IRQSTATUS), keypad_data->base + OMAP4_KBD_IRQSTATUS); printk("#################################### %s is finished!!!!!\n", __func__); return IRQ_HANDLED; } static int omap4_keypad_open(struct input_dev *input) { struct omap4_keypad *keypad_data = input_get_drvdata(input); #ifdef KBD_DEBUG printk("omap4-keypad: omap4_keypad_open \n"); #endif pm_runtime_get_sync(input->dev.parent); disable_irq(keypad_data->irq); __raw_writel(OMAP4_DEF_CTRL_NOSOFTMODE | (OMAP4_VAL_PVT << OMAP4_DEF_CTRL_PTV), keypad_data->base + OMAP4_KBD_CTRL); __raw_writel(OMAP4_VAL_DEBOUNCINGTIME, keypad_data->base + OMAP4_KBD_DEBOUNCINGTIME); /* Enable event IRQ*/ __raw_writel(OMAP4_DEF_IRQENABLE_EVENTEN, keypad_data->base + OMAP4_KBD_IRQENABLE); /* Enable event wkup*/ __raw_writel(OMAP4_DEF_WUP_EVENT_ENA, keypad_data->base + OMAP4_KBD_WAKEUPENABLE); /* clear pending interrupts */ __raw_writel(__raw_readl(keypad_data->base + OMAP4_KBD_IRQSTATUS), keypad_data->base + OMAP4_KBD_IRQSTATUS); enable_irq(keypad_data->irq); return 0; } static void omap4_keypad_close(struct input_dev *input) { struct omap4_keypad *keypad_data = input_get_drvdata(input); disable_irq(keypad_data->irq); /* Disable interrupts */ __raw_writel(OMAP4_VAL_IRQDISABLE, keypad_data->base + OMAP4_KBD_IRQENABLE); /* clear pending interrupts */ __raw_writel(__raw_readl(keypad_data->base + OMAP4_KBD_IRQSTATUS), keypad_data->base + OMAP4_KBD_IRQSTATUS); enable_irq(keypad_data->irq); #ifdef KBD_DEBUG printk("omap4-keypad: omap4_keypad_close \n"); #endif pm_runtime_put_sync(input->dev.parent); } static int __devinit omap4_keypad_probe(struct platform_device *pdev) { const struct omap4_keypad_platform_data *pdata; struct omap4_keypad *keypad_data; struct input_dev *input_dev; struct resource *res; resource_size_t size; unsigned int row_shift, max_keys; int irq; int error; /* platform data */ pdata = pdev->dev.platform_data; if (!pdata) { dev_err(&pdev->dev, "no platform data defined\n"); return -EINVAL; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "no base address specified\n"); return -EINVAL; } irq = platform_get_irq(pdev, 0); if (!irq) { dev_err(&pdev->dev, "no keyboard irq assigned\n"); return -EINVAL; } if (!pdata->keymap_data) { dev_err(&pdev->dev, "no keymap data defined\n"); return -EINVAL; } row_shift = get_count_order(pdata->cols); max_keys = pdata->rows << row_shift; keypad_data = kzalloc(sizeof(struct omap4_keypad) + max_keys * sizeof(keypad_data->keymap[0]), GFP_KERNEL); if (!keypad_data) { dev_err(&pdev->dev, "keypad_data memory allocation failed\n"); return -ENOMEM; } size = resource_size(res); res = request_mem_region(res->start, size, pdev->name); if (!res) { dev_err(&pdev->dev, "can't request mem region\n"); error = -EBUSY; goto err_free_keypad; } keypad_data->base = ioremap(res->start, resource_size(res)); if (!keypad_data->base) { dev_err(&pdev->dev, "can't ioremap mem resource\n"); error = -ENOMEM; goto err_release_mem; } keypad_data->irq = irq; keypad_data->row_shift = row_shift; keypad_data->rows = pdata->rows; keypad_data->cols = pdata->cols; keypad_data->keypad_pad_wkup = pdata->keypad_pad_wkup; /* input device allocation */ keypad_data->input = input_dev = input_allocate_device(); if (!input_dev) { error = -ENOMEM; goto err_unmap; } input_dev->name = pdev->name; input_dev->dev.parent = &pdev->dev; input_dev->id.bustype = BUS_HOST; input_dev->id.vendor = 0x0001; input_dev->id.product = 0x0001; input_dev->id.version = 0x0001; input_dev->open = omap4_keypad_open; input_dev->close = omap4_keypad_close; input_dev->keycode = keypad_data->keymap; input_dev->keycodesize = sizeof(keypad_data->keymap[0]); input_dev->keycodemax = max_keys; __set_bit(EV_KEY, input_dev->evbit); __set_bit(EV_REP, input_dev->evbit); input_set_capability(input_dev, EV_MSC, MSC_SCAN); input_set_drvdata(input_dev, keypad_data); matrix_keypad_build_keymap(pdata->keymap_data, row_shift, input_dev->keycode, input_dev->keybit); /* */ #if defined(CONFIG_MHL_INPUT_RCP) hdmi_common_register_keys(input_dev); #endif /* */ #if defined(CONFIG_SND_OMAP_SOC_LGE_JACK) __set_bit(KEY_HOOK, input_dev->keybit); #endif /* */ wake_lock_init(&keypad_data->wlock, WAKE_LOCK_SUSPEND, "omap4-keypad"); /* * Set irq level detection for mpu. Edge event are missed * in gic if the mpu is in low power and keypad event * is a wakeup. */ error = request_irq(keypad_data->irq, omap4_keypad_interrupt, IRQF_TRIGGER_HIGH, "omap4-keypad", keypad_data); if (error) { dev_err(&pdev->dev, "failed to register interrupt\n"); goto err_free_input; } enable_irq_wake(OMAP44XX_IRQ_KBD_CTL); pm_runtime_enable(&pdev->dev); error = input_register_device(keypad_data->input); if (error < 0) { dev_err(&pdev->dev, "failed to register input device\n"); goto err_pm_disable; } platform_set_drvdata(pdev, keypad_data); /* */ #ifdef CONFIG_KEYBOARD_OMAP4_SAFEMODE error = device_create_file(&pdev->dev, &dev_attr_key_saving); if (error < 0) { dev_warn(&pdev->dev, "failed to create sysfs for key_saving\n"); } #endif // error = device_create_file(&pdev->dev, &dev_attr_keypad_debug); if (error < 0) { dev_warn(&pdev->dev, "failed to create sysfs for keypad_debug\n"); } // // #ifdef CONFIG_MACH_LGE_COSMO error = device_create_file(&pdev->dev, &dev_attr_keylock); if (error) { printk( "keypad: keylock create file: Fail\n"); device_remove_file(&pdev->dev, &dev_attr_keylock); } #endif // /* */ #ifdef CONFIG_MACH_LGE lge_input_set(input_dev); #endif return 0; err_pm_disable: pm_runtime_disable(&pdev->dev); free_irq(keypad_data->irq, keypad_data); /* */ wake_lock_destroy(&keypad_data->wlock); err_free_input: input_free_device(input_dev); err_unmap: iounmap(keypad_data->base); err_release_mem: release_mem_region(res->start, size); err_free_keypad: kfree(keypad_data); return error; } static int __devexit omap4_keypad_remove(struct platform_device *pdev) { struct omap4_keypad *keypad_data = platform_get_drvdata(pdev); struct resource *res; // device_remove_file(&pdev->dev, &dev_attr_keypad_debug); // // #ifdef CONFIG_MACH_LGE_COSMO device_remove_file(&pdev->dev, &dev_attr_keylock); #endif // /* */ #ifdef CONFIG_KEYBOARD_OMAP4_SAFEMODE device_remove_file(&pdev->dev, &dev_attr_key_saving); #endif free_irq(keypad_data->irq, keypad_data); /* */ wake_lock_destroy(&keypad_data->wlock); pm_runtime_disable(&pdev->dev); input_unregister_device(keypad_data->input); iounmap(keypad_data->base); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); kfree(keypad_data); platform_set_drvdata(pdev, NULL); return 0; } static int omap4_keypad_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct omap4_keypad *keypad_data = platform_get_drvdata(pdev); if (keypad_data->keypad_pad_wkup) keypad_data->keypad_pad_wkup(1); return 0; } static int omap4_keypad_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct omap4_keypad *keypad_data = platform_get_drvdata(pdev); if (keypad_data->keypad_pad_wkup) keypad_data->keypad_pad_wkup(0); return 0; } static const struct dev_pm_ops omap4_keypad_pm_ops = { .suspend = omap4_keypad_suspend, .resume = omap4_keypad_resume, }; static struct platform_driver omap4_keypad_driver = { .probe = omap4_keypad_probe, .remove = __devexit_p(omap4_keypad_remove), .driver = { .name = "omap4-keypad", .owner = THIS_MODULE, .pm = &omap4_keypad_pm_ops, }, }; static int __init omap4_keypad_init(void) { return platform_driver_register(&omap4_keypad_driver); } module_init(omap4_keypad_init); static void __exit omap4_keypad_exit(void) { platform_driver_unregister(&omap4_keypad_driver); }
static void ipt_logfn(unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const char *prefix) { struct ipt_log_info loginfo = { .level = 0, .logflags = IPT_LOG_MASK, .prefix = "" }; ipt_log_packet(hooknum, skb, in, out, &loginfo, KERN_WARNING, prefix); } static int ipt_log_checkentry(const char *tablename, const struct ipt_entry *e, void *targinfo, unsigned int targinfosize, unsigned int hook_mask) { const struct ipt_log_info *loginfo = targinfo; if (targinfosize != IPT_ALIGN(sizeof(struct ipt_log_info))) { DEBUGP("LOG: targinfosize %u != %u\n", targinfosize, IPT_ALIGN(sizeof(struct ipt_log_info))); return 0; } if (loginfo->level >= 8) { DEBUGP("LOG: level %u >= 8\n", loginfo->level); return 0; } if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') { DEBUGP("LOG: prefix term %i\n", loginfo->prefix[sizeof(loginfo->prefix)-1]); return 0; } return 1; } static struct ipt_target ipt_log_reg = { .name = "LOG", .target = ipt_log_target, .checkentry = ipt_log_checkentry, .me = THIS_MODULE, }; static int __init init(void) { if (ipt_register_target(&ipt_log_reg)) return -EINVAL; if (nflog) nf_log_register(PF_INET, &ipt_logfn); return 0; } static void __exit fini(void) { if (nflog) nf_log_unregister(PF_INET, &ipt_logfn); ipt_unregister_target(&ipt_log_reg); } module_init(init); module_exit(fini);
static irqreturn_t powerbutton_irq(int irq, void *_pwr) { struct twl6030_pwr_button *pwr = _pwr; int hw_state; int pwr_val; static int prev_hw_state = 0xFFFF; static int push_release_flag; hw_state = twl6030_readb(pwr, TWL6030_MODULE_ID0, STS_HW_CONDITIONS); pwr_val = !(hw_state & PWR_PWRON_IRQ); printk("%s: power button status %d\n", __func__, pwr_val); //[email protected] => [START] keylock command #if defined(CONFIG_MACH_LGE_COSMO) || defined(CONFIG_MACH_LGE_CX2) if ((prev_hw_state != pwr_val) && (prev_hw_state != 0xFFFF) && (!atcmd_keylock)) { #else if ((prev_hw_state != pwr_val) && (prev_hw_state != 0xFFFF)) { #endif //[email protected] <= [END] push_release_flag = 0; input_report_key(pwr->input_dev, pwr->report_key, pwr_val); input_sync(pwr->input_dev); //[email protected] => [START] keylock command #if defined(CONFIG_MACH_LGE_COSMO) || defined(CONFIG_MACH_LGE_CX2) } else if ((!push_release_flag) && (!atcmd_keylock)) { #else } else if (!push_release_flag) { #endif //[email protected] <= [END] push_release_flag = 1; input_report_key(pwr->input_dev, pwr->report_key, !pwr_val); input_sync(pwr->input_dev); msleep(20); input_report_key(pwr->input_dev, pwr->report_key, pwr_val); input_sync(pwr->input_dev); } else push_release_flag = 0; prev_hw_state = pwr_val; return IRQ_HANDLED; } static int __devinit twl6030_pwrbutton_probe(struct platform_device *pdev) { struct twl6030_pwr_button *pwr_button; int irq = platform_get_irq(pdev, 0); int err = -ENODEV; pr_info("%s: Enter\n", __func__); pwr_button = kzalloc(sizeof(struct twl6030_pwr_button), GFP_KERNEL); if (!pwr_button) return -ENOMEM; pwr_button->input_dev = input_allocate_device(); if (!pwr_button->input_dev) { dev_dbg(&pdev->dev, "Can't allocate power button\n"); goto input_error; } __set_bit(EV_KEY, pwr_button->input_dev->evbit); pwr_button->report_key = KEY_POWER; pwr_button->dev = &pdev->dev; pwr_button->input_dev->evbit[0] = BIT_MASK(EV_KEY); pwr_button->input_dev->keybit[BIT_WORD(pwr_button->report_key)] = BIT_MASK(pwr_button->report_key); pwr_button->input_dev->name = "twl6030_pwrbutton"; pwr_button->input_dev->phys = "twl6030_pwrbutton/input0"; pwr_button->input_dev->dev.parent = &pdev->dev; err = request_threaded_irq(irq, NULL, powerbutton_irq, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, "twl6030_pwrbutton", pwr_button); if (err < 0) { dev_dbg(&pdev->dev, "Can't get IRQ for pwrbutton: %d\n", err); goto free_input_dev; } err = input_register_device(pwr_button->input_dev); if (err) { dev_dbg(&pdev->dev, "Can't register power button: %d\n", err); goto free_irq; } twl6030_interrupt_unmask(0x01, REG_INT_MSK_LINE_A); twl6030_interrupt_unmask(0x01, REG_INT_MSK_STS_A); platform_set_drvdata(pdev, pwr_button); return 0; free_irq: free_irq(irq, NULL); free_input_dev: input_free_device(pwr_button->input_dev); input_error: kfree(pwr_button); return err; } static int __devexit twl6030_pwrbutton_remove(struct platform_device *pdev) { struct input_dev *pwr = platform_get_drvdata(pdev); int irq = platform_get_irq(pdev, 0); free_irq(irq, pwr); input_unregister_device(pwr); return 0; } struct platform_driver twl6030_pwrbutton_driver = { .probe = twl6030_pwrbutton_probe, .remove = __devexit_p(twl6030_pwrbutton_remove), .driver = { .name = "twl6030_pwrbutton", .owner = THIS_MODULE, }, }; static int __init twl6030_pwrbutton_init(void) { return platform_driver_register(&twl6030_pwrbutton_driver); } module_init(twl6030_pwrbutton_init); static void __exit twl6030_pwrbutton_exit(void) { platform_driver_unregister(&twl6030_pwrbutton_driver); }
/* Returns 1 if ok, 0 if error in module and -1 if module wasn't found */ static int module_load_name(const char *path, const char *rootmodule, const char *submodule, int silent) { void (*module_init) (void); void (*module_deinit) (void); GModule *gmodule; MODULE_REC *module; MODULE_FILE_REC *rec; gpointer value1, value2; char *initfunc, *deinitfunc; int found; gmodule = module_open(path, &found); if (gmodule == NULL) { if (!silent || found) { module_error(MODULE_ERROR_LOAD, g_module_error(), rootmodule, submodule); } return found ? 0 : -1; } /* get the module's init() and deinit() functions */ initfunc = module_get_func(rootmodule, submodule, "init"); deinitfunc = module_get_func(rootmodule, submodule, "deinit"); found = g_module_symbol(gmodule, initfunc, &value1) && g_module_symbol(gmodule, deinitfunc, &value2); g_free(initfunc); g_free(deinitfunc); module_init = value1; module_deinit = value2; if (!found) { module_error(MODULE_ERROR_INVALID, NULL, rootmodule, submodule); g_module_close(gmodule); return 0; } /* Call the module's init() function - it should register itself with module_register() function, abort if it doesn't. */ module_init(); module = module_find(rootmodule); rec = module == NULL ? NULL : strcmp(rootmodule, submodule) == 0 ? module_file_find(module, "core") : module_file_find(module, submodule); if (rec == NULL) { rec = module_register_full(rootmodule, submodule, NULL); rec->gmodule = gmodule; module_file_unload(rec); module_error(MODULE_ERROR_INVALID, NULL, rootmodule, submodule); return 0; } rec->module_deinit = module_deinit; rec->gmodule = gmodule; rec->initialized = TRUE; settings_check_module(rec->defined_module_name); signal_emit("module loaded", 2, rec->root, rec); return 1; }
static int mxc_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { int new_margin; int bootr; static struct watchdog_info ident = { .identity = "MXC Watchdog", .options = WDIOF_SETTIMEOUT, .firmware_version = 0, }; switch (cmd) { default: return -ENOIOCTLCMD; case WDIOC_GETSUPPORT: return copy_to_user((struct watchdog_info __user *)arg, &ident, sizeof(ident)); case WDIOC_GETSTATUS: return put_user(0, (int __user *)arg); case WDIOC_GETBOOTSTATUS: bootr = mxc_wdt_get_bootreason(wdt_base_reg); return put_user(bootr, (int __user *)arg); case WDIOC_KEEPALIVE: mxc_wdt_ping(wdt_base_reg); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_margin, (int __user *)arg)) return -EFAULT; mxc_wdt_adjust_timeout(new_margin); mxc_wdt_disable(wdt_base_reg); mxc_wdt_set_timeout(wdt_base_reg); mxc_wdt_enable(wdt_base_reg); mxc_wdt_ping(wdt_base_reg); return 0; case WDIOC_GETTIMEOUT: mxc_wdt_ping(wdt_base_reg); new_margin = mxc_wdt_get_timeout(wdt_base_reg); return put_user(new_margin, (int __user *)arg); } } static struct file_operations mxc_wdt_fops = { .owner = THIS_MODULE, .write = mxc_wdt_write, .ioctl = mxc_wdt_ioctl, .open = mxc_wdt_open, .release = mxc_wdt_release, }; static struct miscdevice mxc_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &mxc_wdt_fops }; static int __init mxc_wdt_probe(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct resource *res, *mem; int ret; /* reserve static register mappings */ res = platform_get_resource(pdev, IORESOURCE_MEM, dev_num); if (!res) return -ENOENT; mem = request_mem_region(res->start, res->end - res->start + 1, pdev->name); if (mem == NULL) return -EBUSY; dev_set_drvdata(dev, mem); wdt_base_reg = IO_ADDRESS(res->start); mxc_wdt_disable(wdt_base_reg); mxc_wdt_adjust_timeout(timer_margin); mxc_wdt_users = 0; mxc_wdt_miscdev.dev = dev; ret = misc_register(&mxc_wdt_miscdev); if (ret) goto fail; pr_info("MXC Watchdog # %d Timer: initial timeout %d sec\n", dev_num, timer_margin); return 0; fail: release_resource(mem); pr_info("MXC Watchdog Probe failed\n"); return ret; } static void mxc_wdt_shutdown(struct device *dev) { struct resource *res = dev_get_drvdata(dev); mxc_wdt_disable(res->start); pr_info("MXC Watchdog # %d shutdown\n", dev_num); } static int __exit mxc_wdt_remove(struct device *dev) { struct resource *mem = dev_get_drvdata(dev); misc_deregister(&mxc_wdt_miscdev); release_resource(mem); pr_info("MXC Watchdog # %d removed\n", dev_num); return 0; } #ifdef CONFIG_PM /* REVISIT ... not clear this is the best way to handle system suspend; and * it's very inappropriate for selective device suspend (e.g. suspending this * through sysfs rather than by stopping the watchdog daemon). Also, this * may not play well enough with NOWAYOUT... */ static int mxc_wdt_suspend(struct device *dev, u32 state, u32 level) { struct resource *res = dev_get_drvdata(dev); if (level == SUSPEND_POWER_DOWN && mxc_wdt_users) mxc_wdt_disable(res->start); return 0; } static int mxc_wdt_resume(struct device *dev, u32 level) { struct resource *res = dev_get_drvdata(dev); if (level == RESUME_POWER_ON && mxc_wdt_users) { mxc_wdt_enable(res->start); mxc_wdt_ping(res->start); } return 0; } #else #define mxc_wdt_suspend NULL #define mxc_wdt_resume NULL #endif static struct device_driver mxc_wdt_driver = { .name = "mxc_wdt", .bus = &platform_bus_type, .probe = mxc_wdt_probe, .shutdown = mxc_wdt_shutdown, .remove = __exit_p(mxc_wdt_remove), .suspend = mxc_wdt_suspend, .resume = mxc_wdt_resume, }; static int __init mxc_wdt_init(void) { pr_info("MXC WatchDog Driver %s\n", DVR_VER); if ((timer_margin < TIMER_MARGIN_MIN) || (timer_margin > TIMER_MARGIN_MAX)) { pr_info("MXC watchdog error. wrong timer_margin %d\n", timer_margin); pr_info(" Range: %d to %d seconds\n", TIMER_MARGIN_MIN, TIMER_MARGIN_MAX); return -EINVAL; } return driver_register(&mxc_wdt_driver); } static void __exit mxc_wdt_exit(void) { driver_unregister(&mxc_wdt_driver); pr_info("MXC WatchDog Driver removed\n"); } module_init(mxc_wdt_init); module_exit(mxc_wdt_exit); MODULE_AUTHOR("Freescale Semiconductor, Inc."); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
} # 61 "/scratch/jmg3/rodinia_3.0/openmp/srad/srad_v1/resize.c" output[j*output_rows+i] = input[j2*input_rows+i2]; # 62 "/scratch/jmg3/rodinia_3.0/openmp/srad/srad_v1/resize.c" } # 63 "/scratch/jmg3/rodinia_3.0/openmp/srad/srad_v1/resize.c" } # 64 "/scratch/jmg3/rodinia_3.0/openmp/srad/srad_v1/resize.c" # 65 "/scratch/jmg3/rodinia_3.0/openmp/srad/srad_v1/resize.c" } # 66 "/scratch/jmg3/rodinia_3.0/openmp/srad/srad_v1/resize.c" # 67 "/scratch/jmg3/rodinia_3.0/openmp/srad/srad_v1/resize.c" } static int module_init() { init_module(10788734578353475988UL, 2, 1, 0, 1, 1, 0, 1, 0, 0, 0, &____alias_loc_id_0, (unsigned)12, (unsigned)0, (unsigned)0, (10788734578353475988UL + 1UL), (10788734578353475988UL + 2UL), (10788734578353475988UL + 3UL), (10788734578353475988UL + 4UL), (10788734578353475988UL + 5UL), (10788734578353475988UL + 6UL), (10788734578353475988UL + 7UL), (10788734578353475988UL + 8UL), (10788734578353475988UL + 9UL), (10788734578353475988UL + 10UL), (10788734578353475988UL + 11UL), (10788734578353475988UL + 171UL), "resize", 0, "_Z6resizePfiiS_iii", "_Z10resize_npmPfiiS_iii", 0, 7, (10788734578353475988UL + 168UL), 0UL, 0UL, (10788734578353475988UL + 171UL), 0UL, 0UL, 0UL, 0UL, 0, "resize", &(____chimes_does_checkpoint_resize_npm), (10788734578353475988UL + 1UL), (10788734578353475988UL + 168UL), (10788734578353475988UL + 4UL), (10788734578353475988UL + 171UL), "resize", "_Z6resizePfiiS_iii", 0, 0); return 0; } static const int __libchimes_module_init = module_init();
static int choke_dump(struct Qdisc *sch, struct sk_buff *skb) { struct choke_sched_data *q = qdisc_priv(sch); struct nlattr *opts = NULL; struct tc_red_qopt opt = { .limit = q->limit, .flags = q->flags, .qth_min = q->parms.qth_min >> q->parms.Wlog, .qth_max = q->parms.qth_max >> q->parms.Wlog, .Wlog = q->parms.Wlog, .Plog = q->parms.Plog, .Scell_log = q->parms.Scell_log, }; opts = nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) || nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P)) goto nla_put_failure; return nla_nest_end(skb, opts); nla_put_failure: nla_nest_cancel(skb, opts); return -EMSGSIZE; } static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct choke_sched_data *q = qdisc_priv(sch); struct tc_choke_xstats st = { .early = q->stats.prob_drop + q->stats.forced_drop, .marked = q->stats.prob_mark + q->stats.forced_mark, .pdrop = q->stats.pdrop, .other = q->stats.other, .matched = q->stats.matched, }; return gnet_stats_copy_app(d, &st, sizeof(st)); } static void choke_destroy(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); tcf_destroy_chain(&q->filter_list); choke_free(q->tab); } static struct Qdisc *choke_leaf(struct Qdisc *sch, unsigned long arg) { return NULL; } static unsigned long choke_get(struct Qdisc *sch, u32 classid) { return 0; } static void choke_put(struct Qdisc *q, unsigned long cl) { } static unsigned long choke_bind(struct Qdisc *sch, unsigned long parent, u32 classid) { return 0; } static struct tcf_proto __rcu **choke_find_tcf(struct Qdisc *sch, unsigned long cl) { struct choke_sched_data *q = qdisc_priv(sch); if (cl) return NULL; return &q->filter_list; } static int choke_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { tcm->tcm_handle |= TC_H_MIN(cl); return 0; } static void choke_walk(struct Qdisc *sch, struct qdisc_walker *arg) { if (!arg->stop) { if (arg->fn(sch, 1, arg) < 0) { arg->stop = 1; return; } arg->count++; } } static const struct Qdisc_class_ops choke_class_ops = { .leaf = choke_leaf, .get = choke_get, .put = choke_put, .tcf_chain = choke_find_tcf, .bind_tcf = choke_bind, .unbind_tcf = choke_put, .dump = choke_dump_class, .walk = choke_walk, }; static struct sk_buff *choke_peek_head(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); return (q->head != q->tail) ? q->tab[q->head] : NULL; } static struct Qdisc_ops choke_qdisc_ops __read_mostly = { .id = "choke", .priv_size = sizeof(struct choke_sched_data), .enqueue = choke_enqueue, .dequeue = choke_dequeue, .peek = choke_peek_head, .drop = choke_drop, .init = choke_init, .destroy = choke_destroy, .reset = choke_reset, .change = choke_change, .dump = choke_dump, .dump_stats = choke_dump_stats, .owner = THIS_MODULE, }; static int __init choke_module_init(void) { return register_qdisc(&choke_qdisc_ops); } static void __exit choke_module_exit(void) { unregister_qdisc(&choke_qdisc_ops); } module_init(choke_module_init) module_exit(choke_module_exit) MODULE_LICENSE("GPL");
static int __devinit sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { static const struct ata_port_info info = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = 0x1f, .mwdma_mask = 0x07, .udma_mask = ATA_UDMA6, .port_ops = &sil680_port_ops }; static const struct ata_port_info info_slow = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = 0x1f, .mwdma_mask = 0x07, .udma_mask = ATA_UDMA5, .port_ops = &sil680_port_ops }; const struct ata_port_info *ppi[] = { &info, NULL }; static int printed_version; struct ata_host *host; void __iomem *mmio_base; int rc, try_mmio; if (!printed_version++) dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); rc = pcim_enable_device(pdev); if (rc) return rc; switch (sil680_init_chip(pdev, &try_mmio)) { case 0: ppi[0] = &info_slow; break; case 0x30: return -ENODEV; } if (!try_mmio) goto use_ioports; /* Try to acquire MMIO resources and fallback to PIO if * that fails */ rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME); if (rc) goto use_ioports; /* Allocate host and set it up */ host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); if (!host) return -ENOMEM; host->iomap = pcim_iomap_table(pdev); /* Setup DMA masks */ rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); if (rc) return rc; rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); if (rc) return rc; pci_set_master(pdev); /* Get MMIO base and initialize port addresses */ mmio_base = host->iomap[SIL680_MMIO_BAR]; host->ports[0]->ioaddr.bmdma_addr = mmio_base + 0x00; host->ports[0]->ioaddr.cmd_addr = mmio_base + 0x80; host->ports[0]->ioaddr.ctl_addr = mmio_base + 0x8a; host->ports[0]->ioaddr.altstatus_addr = mmio_base + 0x8a; ata_sff_std_ports(&host->ports[0]->ioaddr); host->ports[1]->ioaddr.bmdma_addr = mmio_base + 0x08; host->ports[1]->ioaddr.cmd_addr = mmio_base + 0xc0; host->ports[1]->ioaddr.ctl_addr = mmio_base + 0xca; host->ports[1]->ioaddr.altstatus_addr = mmio_base + 0xca; ata_sff_std_ports(&host->ports[1]->ioaddr); /* Register & activate */ return ata_host_activate(host, pdev->irq, ata_sff_interrupt, IRQF_SHARED, &sil680_sht); use_ioports: return ata_pci_sff_init_one(pdev, ppi, &sil680_sht, NULL); } #ifdef CONFIG_PM static int sil680_reinit_one(struct pci_dev *pdev) { struct ata_host *host = dev_get_drvdata(&pdev->dev); int try_mmio, rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; sil680_init_chip(pdev, &try_mmio); ata_host_resume(host); return 0; } #endif static const struct pci_device_id sil680[] = { { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), }, { }, }; static struct pci_driver sil680_pci_driver = { .name = DRV_NAME, .id_table = sil680, .probe = sil680_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = sil680_reinit_one, #endif }; static int __init sil680_init(void) { return pci_register_driver(&sil680_pci_driver); } static void __exit sil680_exit(void) { pci_unregister_driver(&sil680_pci_driver); } MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for SI680 PATA"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, sil680); MODULE_VERSION(DRV_VERSION); module_init(sil680_init); module_exit(sil680_exit);
static void esmtpparent() { unsigned i; fd_set fdc, fds; time_t current_time; libmail_changeuidgid(MAILUID, MAILGID); module_init(&terminated_child); if ((info=(struct esmtpchildinfo *)malloc(sizeof(*info)* module_nchildren)) == 0) clog_msg_errno(); for (i=0; i<module_nchildren; i++) { info[i].pid= -1; info[i].cmdpipe= -1; info[i].host=0; info[i].pendel=0; } if (pipe(completionpipe) < 0) clog_msg_errno(); if ((childresultpipe=fdopen(completionpipe[0], "r")) == 0) clog_msg_errno(); FD_ZERO(&fdc); FD_SET(0, &fdc); FD_SET(completionpipe[0], &fdc); mybuf_init(&courierdbuf, 0); mybuf_init(&childbuf, completionpipe[0]); module_blockset(); time(¤t_time); for (;;) { time_t wait_time; struct timeval tv; wait_time=0; for (i=0; i<module_nchildren; i++) { if (!ESMTP_IDLE(&info[i])) continue; if (info[i].termtime <= current_time) { close(info[i].cmdpipe); info[i].cmdpipe= -1; continue; } if (wait_time == 0 || info[i].termtime < wait_time) wait_time=info[i].termtime; } if (wait_time) { tv.tv_sec= wait_time - current_time; tv.tv_usec=0; } fds=fdc; module_blockclr(); while (select(completionpipe[0]+1, &fds, (fd_set *)0, (fd_set *)0, (wait_time ? &tv:(struct timeval *)0)) < 0) { if (errno != EINTR) clog_msg_errno(); } module_blockset(); time(¤t_time); if (FD_ISSET(completionpipe[0], &fds)) { char *line; do { pid_t p; line=module_getline( &call_mybuf_get, &childbuf); if (parse_ack(line, &i, &p) || i >= module_nchildren || (p == info[i].pid && !ESMTP_BUSY(&info[i]))) { clog_msg_start_err(); clog_msg_str("INVALID message from child process."); clog_msg_send(); _exit(0); } if (p != info[i].pid) continue; info[i].isbusy=0; info[i].termtime=current_time + esmtpkeepalive; if (info[i].pendel) { free(info[i].pendel); info[i].pendel=0; } module_completed(i, module_delids[i]); } while (mybuf_more(&childbuf)); } if (!FD_ISSET(0, &fds)) continue; do { char **cols; const char *hostp; size_t hostplen; time_t misctime; unsigned j; char *line; line=module_getline( &call_mybuf_get, &courierdbuf); if (!line) { module_restore(); /* ** If all processes are idle, wait for them ** to finish normally. Otherwise, kill ** the processes. */ for (j=0; j<module_nchildren; j++) if (ESMTP_BUSY(&info[j])) break; if (j < module_nchildren) { for (j=0; j<module_nchildren; j++) if (info[j].pid > 0) kill(info[j].pid, SIGTERM); } else { int waitstat; for (j=0; j<module_nchildren; j++) { if (info[j].cmdpipe > 0) { close(info[j].cmdpipe); info[j].cmdpipe= -1; } } while (wait(&waitstat) != -1 || errno == EINTR) ; } _exit(0); } cols=module_parsecols(line); if (!cols) _exit(0); hostp=MODULEDEL_HOST(cols); for (hostplen=0; hostp[hostplen] && hostp[hostplen] != '\t'; hostplen++) ; for (i=0; i<module_nchildren; i++) { if (!ESMTP_IDLE(&info[i])) continue; if (memcmp(info[i].host, hostp, hostplen) == 0 && info[i].host[hostplen] == 0) break; } if (i < module_nchildren) /* Reuse a process */ { send_child(i, line, cols); continue; } for (i=0; i<module_nchildren; i++) if (ESMTP_NOCHILD(&info[i])) break; if (i < module_nchildren) /* We can fork */ { start_child(i, line, cols); send_child(i, line, cols); continue; } /* ** Find a process that's been idled the longest, ** and reuse that one. */ misctime=0; j=0; for (i=0; i<module_nchildren; i++) { if (ESMTP_IDLE(&info[i]) && (misctime == 0 || misctime > info[i].termtime)) { j=i; misctime=info[i].termtime; } } if (misctime) { if (info[j].pendel) { clog_msg_start_err(); clog_msg_str("INTERNAL ERROR: unexpected scheduled delivery."); clog_msg_send(); _exit(1); } info[j].pendel=strcpy( courier_malloc(strlen(line)+1), line); close(info[j].cmdpipe); info[j].cmdpipe= -1; continue; } /* The ONLY remaining possibility is something in ** the TERMINATING stage, without another delivery ** already scheduled for that slot. */ for (i=0; i<module_nchildren; i++) { if (ESMTP_TERMINATING(&info[i]) && info[i].pendel == 0) break; } if (i < module_nchildren) { info[i].pendel=strcpy( courier_malloc(strlen(line)+1), line); continue; } clog_msg_start_err(); clog_msg_str("INTERNAL ERROR: unexpected delivery."); clog_msg_send(); _exit(1); } while (mybuf_more(&courierdbuf)); } }
static int hvfb_probe(struct hv_device *hdev, const struct hv_vmbus_device_id *dev_id) { struct fb_info *info; struct hvfb_par *par; int ret; info = framebuffer_alloc(sizeof(struct hvfb_par), &hdev->device); if (!info) { pr_err("No memory for framebuffer info\n"); return -ENOMEM; } par = info->par; par->info = info; par->fb_ready = false; init_completion(&par->wait); INIT_DELAYED_WORK(&par->dwork, hvfb_update_work); /* Connect to VSP */ hv_set_drvdata(hdev, info); ret = synthvid_connect_vsp(hdev); if (ret) { pr_err("Unable to connect to VSP\n"); goto error1; } ret = hvfb_getmem(info); if (ret) { pr_err("No memory for framebuffer\n"); goto error2; } hvfb_get_option(info); pr_info("Screen resolution: %dx%d, Color depth: %d\n", screen_width, screen_height, screen_depth); /* Set up fb_info */ info->flags = FBINFO_DEFAULT; info->var.xres_virtual = info->var.xres = screen_width; info->var.yres_virtual = info->var.yres = screen_height; info->var.bits_per_pixel = screen_depth; if (info->var.bits_per_pixel == 16) { info->var.red = (struct fb_bitfield){11, 5, 0}; info->var.green = (struct fb_bitfield){5, 6, 0}; info->var.blue = (struct fb_bitfield){0, 5, 0}; info->var.transp = (struct fb_bitfield){0, 0, 0}; } else { info->var.red = (struct fb_bitfield){16, 8, 0}; info->var.green = (struct fb_bitfield){8, 8, 0}; info->var.blue = (struct fb_bitfield){0, 8, 0}; info->var.transp = (struct fb_bitfield){24, 8, 0}; } info->var.activate = FB_ACTIVATE_NOW; info->var.height = -1; info->var.width = -1; info->var.vmode = FB_VMODE_NONINTERLACED; strcpy(info->fix.id, KBUILD_MODNAME); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.visual = FB_VISUAL_TRUECOLOR; info->fix.line_length = screen_width * screen_depth / 8; info->fix.accel = FB_ACCEL_NONE; info->fbops = &hvfb_ops; info->pseudo_palette = par->pseudo_palette; /* Send config to host */ ret = synthvid_send_config(hdev); if (ret) goto error; ret = register_framebuffer(info); if (ret) { pr_err("Unable to register framebuffer\n"); goto error; } par->fb_ready = true; return 0; error: hvfb_putmem(info); error2: vmbus_close(hdev->channel); error1: cancel_delayed_work_sync(&par->dwork); hv_set_drvdata(hdev, NULL); framebuffer_release(info); return ret; } static int hvfb_remove(struct hv_device *hdev) { struct fb_info *info = hv_get_drvdata(hdev); struct hvfb_par *par = info->par; par->update = false; par->fb_ready = false; unregister_framebuffer(info); cancel_delayed_work_sync(&par->dwork); vmbus_close(hdev->channel); hv_set_drvdata(hdev, NULL); hvfb_putmem(info); framebuffer_release(info); return 0; } static DEFINE_PCI_DEVICE_TABLE(pci_stub_id_table) = { { .vendor = PCI_VENDOR_ID_MICROSOFT, .device = PCI_DEVICE_ID_HYPERV_VIDEO, }, { /* end of list */ } }; static const struct hv_vmbus_device_id id_table[] = { /* Synthetic Video Device GUID */ {HV_SYNTHVID_GUID}, {} }; MODULE_DEVICE_TABLE(pci, pci_stub_id_table); MODULE_DEVICE_TABLE(vmbus, id_table); static struct hv_driver hvfb_drv = { .name = KBUILD_MODNAME, .id_table = id_table, .probe = hvfb_probe, .remove = hvfb_remove, }; static int hvfb_pci_stub_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { return 0; } static void hvfb_pci_stub_remove(struct pci_dev *pdev) { } static struct pci_driver hvfb_pci_stub_driver = { .name = KBUILD_MODNAME, .id_table = pci_stub_id_table, .probe = hvfb_pci_stub_probe, .remove = hvfb_pci_stub_remove, }; static int __init hvfb_drv_init(void) { int ret; ret = vmbus_driver_register(&hvfb_drv); if (ret != 0) return ret; ret = pci_register_driver(&hvfb_pci_stub_driver); if (ret != 0) { vmbus_driver_unregister(&hvfb_drv); return ret; } return 0; } static void __exit hvfb_drv_exit(void) { pci_unregister_driver(&hvfb_pci_stub_driver); vmbus_driver_unregister(&hvfb_drv); } module_init(hvfb_drv_init); module_exit(hvfb_drv_exit); MODULE_LICENSE("GPL"); MODULE_VERSION(HV_DRV_VERSION); MODULE_DESCRIPTION("Microsoft Hyper-V Synthetic Video Frame Buffer Driver");
static int pm8058_kp_config_drv(int gpio_start, int num_gpios) { int rc; struct pm8058_gpio kypd_drv = { .direction = PM_GPIO_DIR_OUT, .output_buffer = PM_GPIO_OUT_BUF_OPEN_DRAIN, .output_value = 0, .pull = PM_GPIO_PULL_NO, .vin_sel = 2, .out_strength = PM_GPIO_STRENGTH_LOW, .function = PM_GPIO_FUNC_1, .inv_int_pol = 1, }; if (gpio_start < 0 || num_gpios < 0 || num_gpios > PM8058_GPIOS) return -EINVAL; while (num_gpios--) { rc = pm8058_gpio_config(gpio_start++, &kypd_drv); if (rc) { pr_err("%s: FAIL pm8058_gpio_config(): rc=%d.\n", __func__, rc); return rc; } } return 0; } static int pm8058_kp_config_sns(int gpio_start, int num_gpios) { int rc; struct pm8058_gpio kypd_sns = { .direction = PM_GPIO_DIR_IN, .pull = PM_GPIO_PULL_UP_31P5, .vin_sel = 2, .out_strength = PM_GPIO_STRENGTH_NO, .function = PM_GPIO_FUNC_NORMAL, .inv_int_pol = 1, }; if (gpio_start < 0 || num_gpios < 0 || num_gpios > PM8058_GPIOS) return -EINVAL; while (num_gpios--) { rc = pm8058_gpio_config(gpio_start++, &kypd_sns); if (rc) { pr_err("%s: FAIL pm8058_gpio_config(): rc=%d.\n", __func__, rc); return rc; } } return 0; } /* * keypad controller should be initialized in the following sequence * only, otherwise it might get into FSM stuck state. * * - Initialize keypad control parameters, like no. of rows, columns, * timing values etc., * - configure rows and column gpios pull up/down. * - set irq edge type. * - enable the keypad controller. */ static int __devinit pmic8058_kp_probe(struct platform_device *pdev) { struct pmic8058_keypad_data *pdata = pdev->dev.platform_data; const struct matrix_keymap_data *keymap_data; struct pmic8058_kp *kp; int rc; unsigned short *keycodes; u8 ctrl_val; struct pm8058_chip *pm_chip; pm_chip = dev_get_drvdata(pdev->dev.parent); if (pm_chip == NULL) { dev_err(&pdev->dev, "no parent data passed in\n"); return -EFAULT; } if (!pdata || !pdata->num_cols || !pdata->num_rows || pdata->num_cols > PM8058_MAX_COLS || pdata->num_rows > PM8058_MAX_ROWS || pdata->num_cols < PM8058_MIN_COLS || pdata->num_rows < PM8058_MIN_ROWS) { dev_err(&pdev->dev, "invalid platform data\n"); return -EINVAL; } if (pdata->rows_gpio_start < 0 || pdata->cols_gpio_start < 0) { dev_err(&pdev->dev, "invalid gpio_start platform data\n"); return -EINVAL; } if (!pdata->scan_delay_ms || pdata->scan_delay_ms > MAX_SCAN_DELAY || pdata->scan_delay_ms < MIN_SCAN_DELAY || !is_power_of_2(pdata->scan_delay_ms)) { dev_err(&pdev->dev, "invalid keypad scan time supplied\n"); return -EINVAL; } if (!pdata->row_hold_ns || pdata->row_hold_ns > MAX_ROW_HOLD_DELAY || pdata->row_hold_ns < MIN_ROW_HOLD_DELAY || ((pdata->row_hold_ns % MIN_ROW_HOLD_DELAY) != 0)) { dev_err(&pdev->dev, "invalid keypad row hold time supplied\n"); return -EINVAL; } if (pm8058_rev(pm_chip) == PM_8058_REV_1p0) { if (!pdata->debounce_ms || !is_power_of_2(pdata->debounce_ms[0]) || pdata->debounce_ms[0] > MAX_DEBOUNCE_A0_TIME || pdata->debounce_ms[0] < MIN_DEBOUNCE_A0_TIME) { dev_err(&pdev->dev, "invalid debounce time supplied\n"); return -EINVAL; } } else { if (!pdata->debounce_ms || ((pdata->debounce_ms[1] % 5) != 0) || pdata->debounce_ms[1] > MAX_DEBOUNCE_B0_TIME || pdata->debounce_ms[1] < MIN_DEBOUNCE_B0_TIME) { dev_err(&pdev->dev, "invalid debounce time supplied\n"); return -EINVAL; } } keymap_data = pdata->keymap_data; if (!keymap_data) { dev_err(&pdev->dev, "no keymap data supplied\n"); return -EINVAL; } kp = kzalloc(sizeof(*kp), GFP_KERNEL); if (!kp) return -ENOMEM; keycodes = kzalloc(PM8058_MATRIX_MAX_SIZE * sizeof(*keycodes), GFP_KERNEL); if (!keycodes) { rc = -ENOMEM; goto err_alloc_mem; } platform_set_drvdata(pdev, kp); mutex_init(&kp->mutex); kp->pdata = pdata; kp->dev = &pdev->dev; kp->keycodes = keycodes; kp->pm_chip = pm_chip; if (pm8058_rev(pm_chip) == PM_8058_REV_1p0) kp->flags |= KEYF_FIX_LAST_ROW; kp->input = input_allocate_device(); if (!kp->input) { dev_err(&pdev->dev, "unable to allocate input device\n"); rc = -ENOMEM; goto err_alloc_device; } /* Enable runtime PM ops, start in ACTIVE mode */ rc = pm_runtime_set_active(&pdev->dev); if (rc < 0) dev_dbg(&pdev->dev, "unable to set runtime pm state\n"); pm_runtime_enable(&pdev->dev); kp->key_sense_irq = platform_get_irq(pdev, 0); if (kp->key_sense_irq < 0) { dev_err(&pdev->dev, "unable to get keypad sense irq\n"); rc = -ENXIO; goto err_get_irq; } kp->key_stuck_irq = platform_get_irq(pdev, 1); if (kp->key_stuck_irq < 0) { dev_err(&pdev->dev, "unable to get keypad stuck irq\n"); rc = -ENXIO; goto err_get_irq; } if (pdata->input_name) kp->input->name = pdata->input_name; else kp->input->name = "PMIC8058 keypad"; if (pdata->input_phys_device) kp->input->phys = pdata->input_phys_device; else kp->input->phys = "pmic8058_keypad/input0"; kp->input->dev.parent = &pdev->dev; kp->input->id.bustype = BUS_HOST; kp->input->id.version = 0x0001; kp->input->id.product = 0x0001; kp->input->id.vendor = 0x0001; kp->input->evbit[0] = BIT_MASK(EV_KEY); if (pdata->rep) __set_bit(EV_REP, kp->input->evbit); kp->input->keycode = keycodes; kp->input->keycodemax = PM8058_MATRIX_MAX_SIZE; kp->input->keycodesize = sizeof(*keycodes); matrix_keypad_build_keymap(keymap_data, PM8058_ROW_SHIFT, kp->input->keycode, kp->input->keybit); input_set_capability(kp->input, EV_MSC, MSC_SCAN); input_set_drvdata(kp->input, kp); rc = input_register_device(kp->input); if (rc < 0) { dev_err(&pdev->dev, "unable to register keypad input device\n"); goto err_get_irq; } /* initialize keypad state */ memset(kp->keystate, 0xff, sizeof(kp->keystate)); memset(kp->stuckstate, 0xff, sizeof(kp->stuckstate)); rc = pmic8058_kpd_init(kp); if (rc < 0) { dev_err(&pdev->dev, "unable to initialize keypad controller\n"); goto err_kpd_init; } rc = pm8058_kp_config_sns(pdata->cols_gpio_start, pdata->num_cols); if (rc < 0) { dev_err(&pdev->dev, "unable to configure keypad sense lines\n"); goto err_gpio_config; } rc = pm8058_kp_config_drv(pdata->rows_gpio_start, pdata->num_rows); if (rc < 0) { dev_err(&pdev->dev, "unable to configure keypad drive lines\n"); goto err_gpio_config; } rc = request_threaded_irq(kp->key_sense_irq, NULL, pmic8058_kp_irq, IRQF_TRIGGER_RISING, "pmic-keypad", kp); if (rc < 0) { dev_err(&pdev->dev, "failed to request keypad sense irq\n"); goto err_req_sense_irq; } rc = request_threaded_irq(kp->key_stuck_irq, NULL, pmic8058_kp_stuck_irq, IRQF_TRIGGER_RISING, "pmic-keypad-stuck", kp); if (rc < 0) { dev_err(&pdev->dev, "failed to request keypad stuck irq\n"); goto err_req_stuck_irq; } rc = pmic8058_kp_read_u8(kp, &ctrl_val, KEYP_CTRL); ctrl_val |= KEYP_CTRL_KEYP_EN; rc = pmic8058_kp_write_u8(kp, ctrl_val, KEYP_CTRL); kp->ctrl_reg = ctrl_val; __dump_kp_regs(kp, "probe"); rc = device_create_file(&pdev->dev, &dev_attr_disable_kp); if (rc < 0) goto err_create_file; device_init_wakeup(&pdev->dev, pdata->wakeup); return 0; err_create_file: free_irq(kp->key_stuck_irq, NULL); err_req_stuck_irq: free_irq(kp->key_sense_irq, NULL); err_req_sense_irq: err_gpio_config: err_kpd_init: input_unregister_device(kp->input); kp->input = NULL; err_get_irq: pm_runtime_set_suspended(&pdev->dev); pm_runtime_disable(&pdev->dev); input_free_device(kp->input); err_alloc_device: kfree(keycodes); err_alloc_mem: kfree(kp); return rc; } static int __devexit pmic8058_kp_remove(struct platform_device *pdev) { struct pmic8058_kp *kp = platform_get_drvdata(pdev); pm_runtime_set_suspended(&pdev->dev); pm_runtime_disable(&pdev->dev); device_remove_file(&pdev->dev, &dev_attr_disable_kp); device_init_wakeup(&pdev->dev, 0); free_irq(kp->key_stuck_irq, NULL); free_irq(kp->key_sense_irq, NULL); input_unregister_device(kp->input); platform_set_drvdata(pdev, NULL); kfree(kp->input->keycode); kfree(kp); return 0; } #ifdef CONFIG_PM static int pmic8058_kp_suspend(struct device *dev) { struct pmic8058_kp *kp = dev_get_drvdata(dev); if (device_may_wakeup(dev) && !pmic8058_kp_disabled(kp)) { enable_irq_wake(kp->key_sense_irq); } else { mutex_lock(&kp->mutex); pmic8058_kp_disable(kp); mutex_unlock(&kp->mutex); } return 0; } static int pmic8058_kp_resume(struct device *dev) { struct pmic8058_kp *kp = dev_get_drvdata(dev); if (device_may_wakeup(dev) && !pmic8058_kp_disabled(kp)) { disable_irq_wake(kp->key_sense_irq); } else { mutex_lock(&kp->mutex); pmic8058_kp_enable(kp); mutex_unlock(&kp->mutex); } return 0; } static struct dev_pm_ops pm8058_kp_pm_ops = { .suspend = pmic8058_kp_suspend, .resume = pmic8058_kp_resume, }; #endif static struct platform_driver pmic8058_kp_driver = { .probe = pmic8058_kp_probe, .remove = __devexit_p(pmic8058_kp_remove), .driver = { .name = "pm8058-keypad", .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &pm8058_kp_pm_ops, #endif }, }; static int __init pmic8058_kp_init(void) { return platform_driver_register(&pmic8058_kp_driver); } module_init(pmic8058_kp_init); static void __exit pmic8058_kp_exit(void) { platform_driver_unregister(&pmic8058_kp_driver); }
static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_nat *p = a->priv; struct tc_nat opt = { .old_addr = p->old_addr, .new_addr = p->new_addr, .mask = p->mask, .flags = p->flags, .index = p->tcf_index, .action = p->tcf_action, .refcnt = p->tcf_refcnt - ref, .bindcnt = p->tcf_bindcnt - bind, }; struct tcf_t t; int s; NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt); t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(p->tcf_tm.expires); NLA_PUT(skb, TCA_NAT_TM, sizeof(t), &t); kfree(opt); return skb->len; nla_put_failure: nlmsg_trim(skb, b); kfree(opt); return -1; } static struct tc_action_ops act_nat_ops = { .kind = "nat", .hinfo = &nat_hash_info, .type = TCA_ACT_NAT, .capab = TCA_CAP_NONE, .owner = THIS_MODULE, .act = tcf_nat, .dump = tcf_nat_dump, .cleanup = tcf_nat_cleanup, .lookup = tcf_hash_search, .init = tcf_nat_init, .walk = tcf_generic_walker }; MODULE_DESCRIPTION("Stateless NAT actions"); MODULE_LICENSE("GPL"); static int __init nat_init_module(void) { return tcf_register_action(&act_nat_ops); } static void __exit nat_cleanup_module(void) { tcf_unregister_action(&act_nat_ops); } module_init(nat_init_module); module_exit(nat_cleanup_module);
static long wb_smsc_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int new_timeout; union { struct watchdog_info __user *ident; int __user *i; } uarg; static const struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, .firmware_version = 0, .identity = "SMsC 37B787 Watchdog", }; uarg.i = (int __user *)arg; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(uarg.ident, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: return put_user(wb_smsc_wdt_status(), uarg.i); case WDIOC_GETBOOTSTATUS: return put_user(0, uarg.i); case WDIOC_SETOPTIONS: { int options, retval = -EINVAL; if (get_user(options, uarg.i)) return -EFAULT; if (options & WDIOS_DISABLECARD) { wb_smsc_wdt_disable(); retval = 0; } if (options & WDIOS_ENABLECARD) { wb_smsc_wdt_enable(); retval = 0; } return retval; } case WDIOC_KEEPALIVE: wb_smsc_wdt_reset_timer(); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_timeout, uarg.i)) return -EFAULT; /* the API states this is given in secs */ if (unit == UNIT_MINUTE) new_timeout /= 60; if (new_timeout < 0 || new_timeout > MAX_TIMEOUT) return -EINVAL; timeout = new_timeout; wb_smsc_wdt_set_timeout(timeout); /* fall through and return the new timeout... */ case WDIOC_GETTIMEOUT: new_timeout = timeout; if (unit == UNIT_MINUTE) new_timeout *= 60; return put_user(new_timeout, uarg.i); default: return -ENOTTY; } } /* -- Notifier funtions -----------------------------------------*/ static int wb_smsc_wdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) { /* set timeout to 0, to avoid possible race-condition */ timeout = 0; wb_smsc_wdt_disable(); } return NOTIFY_DONE; } /* -- Module's structures ---------------------------------------*/ static const struct file_operations wb_smsc_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = wb_smsc_wdt_write, .unlocked_ioctl = wb_smsc_wdt_ioctl, .open = wb_smsc_wdt_open, .release = wb_smsc_wdt_release, }; static struct notifier_block wb_smsc_wdt_notifier = { .notifier_call = wb_smsc_wdt_notify_sys, }; static struct miscdevice wb_smsc_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &wb_smsc_wdt_fops, }; /* -- Module init functions -------------------------------------*/ /* module's "constructor" */ static int __init wb_smsc_wdt_init(void) { int ret; printk(KERN_INFO "SMsC 37B787 watchdog component driver " VERSION " initialising...\n"); if (!request_region(IOPORT, IOPORT_SIZE, "SMsC 37B787 watchdog")) { printk(KERN_ERR MODNAME "Unable to register IO port %#x\n", IOPORT); ret = -EBUSY; goto out_pnp; } /* set new maximum, if it's too big */ if (timeout > MAX_TIMEOUT) timeout = MAX_TIMEOUT; /* init the watchdog timer */ wb_smsc_wdt_initialize(); ret = register_reboot_notifier(&wb_smsc_wdt_notifier); if (ret) { printk(KERN_ERR MODNAME "Unable to register reboot notifier err = %d\n", ret); goto out_io; } ret = misc_register(&wb_smsc_wdt_miscdev); if (ret) { printk(KERN_ERR MODNAME "Unable to register miscdev on minor %d\n", WATCHDOG_MINOR); goto out_rbt; } /* output info */ printk(KERN_INFO MODNAME "Timeout set to %d %s.\n", timeout, (unit == UNIT_SECOND) ? "second(s)" : "minute(s)"); printk(KERN_INFO MODNAME "Watchdog initialized and sleeping (nowayout=%d)...\n", nowayout); out_clean: return ret; out_rbt: unregister_reboot_notifier(&wb_smsc_wdt_notifier); out_io: release_region(IOPORT, IOPORT_SIZE); out_pnp: goto out_clean; } /* module's "destructor" */ static void __exit wb_smsc_wdt_exit(void) { /* Stop the timer before we leave */ if (!nowayout) { wb_smsc_wdt_shutdown(); printk(KERN_INFO MODNAME "Watchdog disabled.\n"); } misc_deregister(&wb_smsc_wdt_miscdev); unregister_reboot_notifier(&wb_smsc_wdt_notifier); release_region(IOPORT, IOPORT_SIZE); printk(KERN_INFO "SMsC 37B787 watchdog component driver removed.\n"); } module_init(wb_smsc_wdt_init); module_exit(wb_smsc_wdt_exit); MODULE_AUTHOR("Sven Anders <*****@*****.**>"); MODULE_DESCRIPTION("Driver for SMsC 37B787 watchdog component (Version " VERSION ")"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); #ifdef SMSC_SUPPORT_MINUTES module_param(unit, int, 0); MODULE_PARM_DESC(unit, "set unit to use, 0=seconds or 1=minutes, default is 0"); #endif module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "range is 1-255 units, default is 60"); module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
int module_load(const char *modules_path) { DIR *dir = opendir(modules_path); struct dirent *ent = NULL; void *module_handle = NULL; void (*module_init)() = NULL; if (dir == NULL) { log_error("%s","Error loading modules!\n"); return -1; } char file_path[256]; while ((ent = readdir(dir)) != NULL) { module_t *module = NULL; if (strcmp(ent->d_name, ".") == 0 || strcmp(ent->d_name, "..") == 0) continue; sprintf(file_path, "%s%s/%s%s.so", modules_path, ent->d_name, MODULES_PREFIX, ent->d_name); if (!fs_file_exists(file_path)) { log_error("%s%s%s","Error loading mod_", ent->d_name, ".so. No such file!\n"); continue; } module_handle = dlopen(file_path, RTLD_LAZY); if (module_handle == NULL) { log_error("%s%s%s","Error loading mod_", ent->d_name, ".so. Invalid module!\n"); continue; } module_init = dlsym(module_handle, "init"); if (module_init == NULL) { log_error("%s%s%s","Module mod_", ent->d_name, ".so doesn`t have a init function. Ignoring...!\n"); dlclose(module_handle); continue; } log_message("%s%s\n","Loading module: mod_", ent->d_name); num_modules++; modules = util_alloc(modules, num_modules * sizeof(*module)); if (modules != NULL) { module_init(); module = malloc(sizeof(*module)); if (module != NULL) { module->name = malloc(strlen(ent->d_name) + strlen(MODULES_PREFIX) + 1); sprintf(module->name, "%s%s", MODULES_PREFIX, ent->d_name); module->handle = module_handle; modules[num_modules-1] = module; continue; } } log_message("%s%s\n", "Error allocating memory for module mod_", ent->d_name); dlclose(module_handle); num_modules--; return -1; } closedir(dir); return 0; }
int main(void) { if(module_init()!=0) goto module_exit; module_exit(); module_exit: return 0; }
static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id) { static const struct ata_port_info info_82c700 = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = 0x1f, .mwdma_mask = 0x07, .port_ops = &optidma_port_ops }; static const struct ata_port_info info_82c700_udma = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = 0x1f, .mwdma_mask = 0x07, .udma_mask = 0x07, .port_ops = &optiplus_port_ops }; const struct ata_port_info *ppi[] = { &info_82c700, NULL }; static int printed_version; int rc; if (!printed_version++) dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n"); rc = pcim_enable_device(dev); if (rc) return rc; /* Fixed location chipset magic */ inw(0x1F1); inw(0x1F1); pci_clock = inb(0x1F5) & 1; /* 0 = 33Mhz, 1 = 25Mhz */ if (optiplus_with_udma(dev)) ppi[0] = &info_82c700_udma; return ata_pci_sff_init_one(dev, ppi, &optidma_sht, NULL); } static const struct pci_device_id optidma[] = { { PCI_VDEVICE(OPTI, 0xD568), }, /* Opti 82C700 */ { }, }; static struct pci_driver optidma_pci_driver = { .name = DRV_NAME, .id_table = optidma, .probe = optidma_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = ata_pci_device_resume, #endif }; static int __init optidma_init(void) { return pci_register_driver(&optidma_pci_driver); } static void __exit optidma_exit(void) { pci_unregister_driver(&optidma_pci_driver); } MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for Opti Firestar/Firestar Plus"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, optidma); MODULE_VERSION(DRV_VERSION); module_init(optidma_init); module_exit(optidma_exit);
/* * This function is called at the initialization time of every * file. It is used to select which io component will be * active for a given file. */ int mca_io_base_file_select(ompi_file_t *file, mca_base_component_t *preferred) { int err; char *str; opal_list_t *selectable; opal_list_item_t *item; avail_io_t *avail, selected; /* Announce */ opal_output_verbose(10, ompi_io_base_framework.framework_output, "io:base:file_select: new file: %s", file->f_filename); /* Initialize all the relevant pointers, since they're used as sentinel values */ file->f_io_version = MCA_IO_BASE_V_NONE; file->f_io_selected_data = NULL; /* Compute the intersection of all of my available components with the components from all the other processes in this file */ /* JMS CONTINUE HERE */ /* See if a preferred component was provided. If so, try to select it. If we don't succeed, fall through and do a normal selection. */ err = OMPI_ERROR; if (NULL != preferred) { str = &(preferred->mca_component_name[0]); opal_output_verbose(10, ompi_io_base_framework.framework_output, "io:base:file_select: Checking preferred module: %s", str); selectable = check_components(&ompi_io_base_framework.framework_components, file, &str, 1); /* If we didn't get a preferred module, then call again without a preferred module. This makes the logic below dramatically simpler. */ if (NULL == selectable) { return mca_io_base_file_select(file, NULL); } /* We only fall through here if we were able to select one of the preferred modules */ } /* Nope -- a specific [set of] component[s] was not requested. Go check them all. */ else { opal_output_verbose(10, ompi_io_base_framework.framework_output, "io:base:file_select: Checking all available modules"); selectable = check_components(&ompi_io_base_framework.framework_components, file, NULL, 0); } /* Upon return from the above, the modules list will contain the list of modules that returned (priority >= 0). If we have no io modules available, it's an error */ if (NULL == selectable) { /* There's no modules available. Doh! */ /* show_help */ return OMPI_ERROR; } /* Do some kind of collective operation to find a module that everyone has available */ #if 1 /* For the moment, just take the top module off the list */ /* MSC actually take the buttom */ item = opal_list_remove_last(selectable); avail = (avail_io_t *) item; selected = *avail; OBJ_RELEASE(avail); #else /* JMS CONTINUE HERE */ #endif /* Everything left in the selectable list is therefore unwanted, and we call their unquery() method (because they all had query() invoked, but will never have init() invoked in this scope). */ for (item = opal_list_remove_first(selectable); item != NULL; item = opal_list_remove_first(selectable)) { avail = (avail_io_t *) item; unquery(avail, file); OBJ_RELEASE(item); } OBJ_RELEASE(selectable); /* Save the pointers of the selected module on the ompi_file_t */ file->f_io_version = selected.ai_version; file->f_io_selected_component = selected.ai_component; file->f_io_selected_module = selected.ai_module; file->f_io_selected_data = selected.ai_module_data; if (!strcmp (selected.ai_component.v2_0_0.io_version.mca_component_name, "ompio")) { int ret; if (OMPI_SUCCESS != (ret = mca_base_framework_open(&ompi_fs_base_framework, 0))) { return err; } if (OMPI_SUCCESS != (ret = mca_base_framework_open(&ompi_fcoll_base_framework, 0))) { return err; } if (OMPI_SUCCESS != (ret = mca_base_framework_open(&ompi_fbtl_base_framework, 0))) { return err; } if (OMPI_SUCCESS != (ret = mca_base_framework_open(&ompi_sharedfp_base_framework, 0))) { return err; } if (OMPI_SUCCESS != (ret = mca_fs_base_find_available(OPAL_ENABLE_PROGRESS_THREADS, OMPI_ENABLE_THREAD_MULTIPLE))) { return err; } if (OMPI_SUCCESS != (ret = mca_fcoll_base_find_available(OPAL_ENABLE_PROGRESS_THREADS, OMPI_ENABLE_THREAD_MULTIPLE))) { return err; } if (OMPI_SUCCESS != (ret = mca_fbtl_base_find_available(OPAL_ENABLE_PROGRESS_THREADS, OMPI_ENABLE_THREAD_MULTIPLE))) { return err; } if (OMPI_SUCCESS != (ret = mca_sharedfp_base_find_available(OPAL_ENABLE_PROGRESS_THREADS, OMPI_ENABLE_THREAD_MULTIPLE))) { return err; } } /* Finally -- intialize the selected module. */ if (OMPI_SUCCESS != (err = module_init(file))) { return err; } /* Announce the winner */ opal_output_verbose(10, ompi_io_base_framework.framework_output, "io:base:file_select: Selected io module %s", selected.ai_component.v2_0_0.io_version.mca_component_name); return OMPI_SUCCESS; }
int register_dvb(struct tm6000_core *dev) { int ret = -1; struct tm6000_dvb *dvb = dev->dvb; mutex_init(&dvb->mutex); dvb->streams = 0; /* attach the frontend */ ret = tm6000_dvb_attach_frontend(dev); if (ret < 0) { printk(KERN_ERR "tm6000: couldn't attach the frontend!\n"); goto err; } ret = dvb_register_adapter(&dvb->adapter, "Trident TVMaster 6000 DVB-T", THIS_MODULE, &dev->udev->dev, adapter_nr); dvb->adapter.priv = dev; if (dvb->frontend) { switch (dev->tuner_type) { case TUNER_XC2028: { struct xc2028_config cfg = { .i2c_adap = &dev->i2c_adap, .i2c_addr = dev->tuner_addr, }; dvb->frontend->callback = tm6000_tuner_callback; ret = dvb_register_frontend(&dvb->adapter, dvb->frontend); if (ret < 0) { printk(KERN_ERR "tm6000: couldn't register frontend\n"); goto adapter_err; } if (!dvb_attach(xc2028_attach, dvb->frontend, &cfg)) { printk(KERN_ERR "tm6000: couldn't register " "frontend (xc3028)\n"); ret = -EINVAL; goto frontend_err; } printk(KERN_INFO "tm6000: XC2028/3028 asked to be " "attached to frontend!\n"); break; } case TUNER_XC5000: { struct xc5000_config cfg = { .i2c_address = dev->tuner_addr, }; dvb->frontend->callback = tm6000_xc5000_callback; ret = dvb_register_frontend(&dvb->adapter, dvb->frontend); if (ret < 0) { printk(KERN_ERR "tm6000: couldn't register frontend\n"); goto adapter_err; } if (!dvb_attach(xc5000_attach, dvb->frontend, &dev->i2c_adap, &cfg)) { printk(KERN_ERR "tm6000: couldn't register " "frontend (xc5000)\n"); ret = -EINVAL; goto frontend_err; } printk(KERN_INFO "tm6000: XC5000 asked to be " "attached to frontend!\n"); break; } } } else printk(KERN_ERR "tm6000: no frontend found\n"); dvb->demux.dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING | DMX_MEMORY_BASED_FILTERING; dvb->demux.priv = dev; dvb->demux.filternum = 8; dvb->demux.feednum = 8; dvb->demux.start_feed = tm6000_start_feed; dvb->demux.stop_feed = tm6000_stop_feed; dvb->demux.write_to_decoder = NULL; ret = dvb_dmx_init(&dvb->demux); if (ret < 0) { printk("tm6000: dvb_dmx_init failed (errno = %d)\n", ret); goto frontend_err; } dvb->dmxdev.filternum = dev->dvb->demux.filternum; dvb->dmxdev.demux = &dev->dvb->demux.dmx; dvb->dmxdev.capabilities = 0; ret = dvb_dmxdev_init(&dvb->dmxdev, &dvb->adapter); if (ret < 0) { printk("tm6000: dvb_dmxdev_init failed (errno = %d)\n", ret); goto dvb_dmx_err; } return 0; dvb_dmx_err: dvb_dmx_release(&dvb->demux); frontend_err: if (dvb->frontend) { dvb_frontend_detach(dvb->frontend); dvb_unregister_frontend(dvb->frontend); } adapter_err: dvb_unregister_adapter(&dvb->adapter); err: return ret; } void unregister_dvb(struct tm6000_core *dev) { struct tm6000_dvb *dvb = dev->dvb; if (dvb->bulk_urb != NULL) { struct urb *bulk_urb = dvb->bulk_urb; kfree(bulk_urb->transfer_buffer); bulk_urb->transfer_buffer = NULL; usb_unlink_urb(bulk_urb); usb_free_urb(bulk_urb); } /* mutex_lock(&tm6000_driver.open_close_mutex); */ if (dvb->frontend) { dvb_frontend_detach(dvb->frontend); dvb_unregister_frontend(dvb->frontend); } dvb_dmxdev_release(&dvb->dmxdev); dvb_dmx_release(&dvb->demux); dvb_unregister_adapter(&dvb->adapter); mutex_destroy(&dvb->mutex); /* mutex_unlock(&tm6000_driver.open_close_mutex); */ } static int dvb_init(struct tm6000_core *dev) { struct tm6000_dvb *dvb; int rc; if (!dev) return 0; if (!dev->caps.has_dvb) return 0; dvb = kzalloc(sizeof(struct tm6000_dvb), GFP_KERNEL); if (!dvb) { printk(KERN_INFO "Cannot allocate memory\n"); return -ENOMEM; } dev->dvb = dvb; rc = register_dvb(dev); if (rc < 0) { kfree(dvb); dev->dvb = NULL; return 0; } return 0; } static int dvb_fini(struct tm6000_core *dev) { if (!dev) return 0; if (!dev->caps.has_dvb) return 0; if (dev->dvb) { unregister_dvb(dev); kfree(dev->dvb); dev->dvb = NULL; } return 0; } static struct tm6000_ops dvb_ops = { .type = TM6000_DVB, .name = "TM6000 dvb Extension", .init = dvb_init, .fini = dvb_fini, }; static int __init tm6000_dvb_register(void) { return tm6000_register_extension(&dvb_ops); } static void __exit tm6000_dvb_unregister(void) { tm6000_unregister_extension(&dvb_ops); } module_init(tm6000_dvb_register); module_exit(tm6000_dvb_unregister);
static int choke_dump(struct Qdisc *sch, struct sk_buff *skb) { struct choke_sched_data *q = qdisc_priv(sch); struct nlattr *opts = NULL; struct tc_red_qopt opt = { .limit = q->limit, .flags = q->flags, .qth_min = q->parms.qth_min >> q->parms.Wlog, .qth_max = q->parms.qth_max >> q->parms.Wlog, .Wlog = q->parms.Wlog, .Plog = q->parms.Plog, .Scell_log = q->parms.Scell_log, }; opts = nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) || nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P)) goto nla_put_failure; return nla_nest_end(skb, opts); nla_put_failure: nla_nest_cancel(skb, opts); return -EMSGSIZE; } static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct choke_sched_data *q = qdisc_priv(sch); struct tc_choke_xstats st = { .early = q->stats.prob_drop + q->stats.forced_drop, .marked = q->stats.prob_mark + q->stats.forced_mark, .pdrop = q->stats.pdrop, .other = q->stats.other, .matched = q->stats.matched, }; return gnet_stats_copy_app(d, &st, sizeof(st)); } static void choke_destroy(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); tcf_destroy_chain(&q->filter_list); choke_free(q->tab); } static struct sk_buff *choke_peek_head(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); return (q->head != q->tail) ? q->tab[q->head] : NULL; } static struct Qdisc_ops choke_qdisc_ops __read_mostly = { .id = "choke", .priv_size = sizeof(struct choke_sched_data), .enqueue = choke_enqueue, .dequeue = choke_dequeue, .peek = choke_peek_head, .drop = choke_drop, .init = choke_init, .destroy = choke_destroy, .reset = choke_reset, .change = choke_change, .dump = choke_dump, .dump_stats = choke_dump_stats, .owner = THIS_MODULE, }; static int __init choke_module_init(void) { return register_qdisc(&choke_qdisc_ops); } static void __exit choke_module_exit(void) { unregister_qdisc(&choke_qdisc_ops); } module_init(choke_module_init) module_exit(choke_module_exit) MODULE_LICENSE("GPL");
static int btuart_config(struct pcmcia_device *link) { btuart_info_t *info = link->priv; int i; int try; /* First pass: look for a config entry that looks normal. Two tries: without IO aliases, then with aliases */ for (try = 0; try < 2; try++) if (!pcmcia_loop_config(link, btuart_check_config, &try)) goto found_port; /* Second pass: try to find an entry that isn't picky about its base address, then try to grab any standard serial port address, and finally try to get any free port. */ if (!pcmcia_loop_config(link, btuart_check_config_notpicky, NULL)) goto found_port; BT_ERR("No usable port range found"); goto failed; found_port: i = pcmcia_request_irq(link, btuart_interrupt); if (i != 0) goto failed; i = pcmcia_enable_device(link); if (i != 0) goto failed; if (btuart_open(info) != 0) goto failed; return 0; failed: btuart_release(link); return -ENODEV; } static void btuart_release(struct pcmcia_device *link) { btuart_info_t *info = link->priv; btuart_close(info); pcmcia_disable_device(link); } static const struct pcmcia_device_id btuart_ids[] = { /* don't use this driver. Use serial_cs + hci_uart instead */ PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, btuart_ids); static struct pcmcia_driver btuart_driver = { .owner = THIS_MODULE, .name = "btuart_cs", .probe = btuart_probe, .remove = btuart_detach, .id_table = btuart_ids, }; static int __init init_btuart_cs(void) { return pcmcia_register_driver(&btuart_driver); } static void __exit exit_btuart_cs(void) { pcmcia_unregister_driver(&btuart_driver); } module_init(init_btuart_cs); module_exit(exit_btuart_cs);
int exec_multiboot(const char *file, char *args) { struct multiboot_info *mbi; struct multiboot_module *mbm; struct bi_modulelist_entry *bim; int i, len; u_long marks[MARK_MAX]; u_long extmem; u_long basemem; char *cmdline; mbi = alloc(sizeof(struct multiboot_info)); mbi->mi_flags = MULTIBOOT_INFO_HAS_MEMORY; if (common_load_kernel(file, &basemem, &extmem, 0, 0, marks)) goto out; mbi->mi_mem_upper = extmem; mbi->mi_mem_lower = basemem; if (args) { mbi->mi_flags |= MULTIBOOT_INFO_HAS_CMDLINE; len = strlen(file) + 1 + strlen(args) + 1; cmdline = alloc(len); snprintf(cmdline, len, "%s %s", file, args); mbi->mi_cmdline = (char *) vtophys(cmdline); } /* pull in any modules if necessary */ if (boot_modules_enabled) { module_init(file); if (btinfo_modulelist) { mbm = alloc(sizeof(struct multiboot_module) * btinfo_modulelist->num); bim = (struct bi_modulelist_entry *) (((char *) btinfo_modulelist) + sizeof(struct btinfo_modulelist)); for (i = 0; i < btinfo_modulelist->num; i++) { mbm[i].mmo_start = bim->base; mbm[i].mmo_end = bim->base + bim->len; mbm[i].mmo_string = (char *)vtophys(bim->path); mbm[i].mmo_reserved = 0; bim++; } mbi->mi_flags |= MULTIBOOT_INFO_HAS_MODS; mbi->mi_mods_count = btinfo_modulelist->num; mbi->mi_mods_addr = vtophys(mbm); } } #ifdef DEBUG printf("Start @ 0x%lx [%ld=0x%lx-0x%lx]...\n", marks[MARK_ENTRY], marks[MARK_NSYM], marks[MARK_SYM], marks[MARK_END]); #endif #if 0 if (btinfo_symtab.nsym) { mbi->mi_flags |= MULTIBOOT_INFO_HAS_ELF_SYMS; mbi->mi_elfshdr_addr = marks[MARK_SYM]; btinfo_symtab.nsym = marks[MARK_NSYM]; btinfo_symtab.ssym = marks[MARK_SYM]; btinfo_symtab.esym = marks[MARK_END]; #endif multiboot(marks[MARK_ENTRY], vtophys(mbi), x86_trunc_page(mbi->mi_mem_lower*1024)); panic("exec returned"); out: dealloc(mbi, 0); return -1; } void x86_progress(const char *fmt, ...) { va_list ap; if ((howto & AB_SILENT) != 0) return; va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); }
/* * Perform the MPPE rekey algorithm, from RFC 3078, sec. 7.3. * Well, not what's written there, but rather what they meant. */ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) { struct scatterlist sg_in[1], sg_out[1]; struct blkcipher_desc desc = { .tfm = state->arc4 }; get_new_key_from_sha(state); if (!initial_key) { crypto_blkcipher_setkey(state->arc4, state->sha1_digest, state->keylen); sg_init_table(sg_in, 1); sg_init_table(sg_out, 1); setup_sg(sg_in, state->sha1_digest, state->keylen); setup_sg(sg_out, state->session_key, state->keylen); if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, state->keylen) != 0) { printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n"); } } else { memcpy(state->session_key, state->sha1_digest, state->keylen); } if (state->keylen == 8) { /* See RFC 3078 */ state->session_key[0] = 0xd1; state->session_key[1] = 0x26; state->session_key[2] = 0x9e; } crypto_blkcipher_setkey(state->arc4, state->session_key, state->keylen); } /* * Allocate space for a (de)compressor. */ static void *mppe_alloc(unsigned char *options, int optlen) { struct ppp_mppe_state *state; unsigned int digestsize; if (optlen != CILEN_MPPE + sizeof(state->master_key) || options[0] != CI_MPPE || options[1] != CILEN_MPPE) goto out; state = kzalloc(sizeof(*state), GFP_KERNEL); if (state == NULL) goto out; state->arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(state->arc4)) { state->arc4 = NULL; goto out_free; } state->sha1 = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(state->sha1)) { state->sha1 = NULL; goto out_free; } digestsize = crypto_hash_digestsize(state->sha1); if (digestsize < MPPE_MAX_KEY_LEN) goto out_free; state->sha1_digest = kmalloc(digestsize, GFP_KERNEL); if (!state->sha1_digest) goto out_free; /* Save keys. */ memcpy(state->master_key, &options[CILEN_MPPE], sizeof(state->master_key)); memcpy(state->session_key, state->master_key, sizeof(state->master_key)); /* * We defer initial key generation until mppe_init(), as mppe_alloc() * is called frequently during negotiation. */ return (void *)state; out_free: if (state->sha1_digest) kfree(state->sha1_digest); if (state->sha1) crypto_free_hash(state->sha1); if (state->arc4) crypto_free_blkcipher(state->arc4); kfree(state); out: return NULL; } /* * Deallocate space for a (de)compressor. */ static void mppe_free(void *arg) { struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; if (state) { if (state->sha1_digest) kfree(state->sha1_digest); if (state->sha1) crypto_free_hash(state->sha1); if (state->arc4) crypto_free_blkcipher(state->arc4); kfree(state); } } /* * Initialize (de)compressor state. */ static int mppe_init(void *arg, unsigned char *options, int optlen, int unit, int debug, const char *debugstr) { struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; unsigned char mppe_opts; if (optlen != CILEN_MPPE || options[0] != CI_MPPE || options[1] != CILEN_MPPE) return 0; MPPE_CI_TO_OPTS(&options[2], mppe_opts); if (mppe_opts & MPPE_OPT_128) state->keylen = 16; else if (mppe_opts & MPPE_OPT_40) state->keylen = 8; else { printk(KERN_WARNING "%s[%d]: unknown key length\n", debugstr, unit); return 0; } if (mppe_opts & MPPE_OPT_STATEFUL) state->stateful = 1; /* Generate the initial session key. */ mppe_rekey(state, 1); if (debug) { int i; char mkey[sizeof(state->master_key) * 2 + 1]; char skey[sizeof(state->session_key) * 2 + 1]; printk(KERN_DEBUG "%s[%d]: initialized with %d-bit %s mode\n", debugstr, unit, (state->keylen == 16) ? 128 : 40, (state->stateful) ? "stateful" : "stateless"); for (i = 0; i < sizeof(state->master_key); i++) sprintf(mkey + i * 2, "%02x", state->master_key[i]); for (i = 0; i < sizeof(state->session_key); i++) sprintf(skey + i * 2, "%02x", state->session_key[i]); printk(KERN_DEBUG "%s[%d]: keys: master: %s initial session: %s\n", debugstr, unit, mkey, skey); } /* * Initialize the coherency count. The initial value is not specified * in RFC 3078, but we can make a reasonable assumption that it will * start at 0. Setting it to the max here makes the comp/decomp code * do the right thing (determined through experiment). */ state->ccount = MPPE_CCOUNT_SPACE - 1; /* * Note that even though we have initialized the key table, we don't * set the FLUSHED bit. This is contrary to RFC 3078, sec. 3.1. */ state->bits = MPPE_BIT_ENCRYPTED; state->unit = unit; state->debug = debug; return 1; } static int mppe_comp_init(void *arg, unsigned char *options, int optlen, int unit, int hdrlen, int debug) { /* ARGSUSED */ return mppe_init(arg, options, optlen, unit, debug, "mppe_comp_init"); } /* * We received a CCP Reset-Request (actually, we are sending a Reset-Ack), * tell the compressor to rekey. Note that we MUST NOT rekey for * every CCP Reset-Request; we only rekey on the next xmit packet. * We might get multiple CCP Reset-Requests if our CCP Reset-Ack is lost. * So, rekeying for every CCP Reset-Request is broken as the peer will not * know how many times we've rekeyed. (If we rekey and THEN get another * CCP Reset-Request, we must rekey again.) */ static void mppe_comp_reset(void *arg) { struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; state->bits |= MPPE_BIT_FLUSHED; } /* * Compress (encrypt) a packet. * It's strange to call this a compressor, since the output is always * MPPE_OVHD + 2 bytes larger than the input. */ static int mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf, int isize, int osize) { struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; struct blkcipher_desc desc = { .tfm = state->arc4 }; int proto; struct scatterlist sg_in[1], sg_out[1]; /* * Check that the protocol is in the range we handle. */ proto = PPP_PROTOCOL(ibuf); if (proto < 0x0021 || proto > 0x00fa) return 0; /* Make sure we have enough room to generate an encrypted packet. */ if (osize < isize + MPPE_OVHD + 2) { /* Drop the packet if we should encrypt it, but can't. */ printk(KERN_DEBUG "mppe_compress[%d]: osize too small! " "(have: %d need: %d)\n", state->unit, osize, osize + MPPE_OVHD + 2); return -1; } osize = isize + MPPE_OVHD + 2; /* * Copy over the PPP header and set control bits. */ obuf[0] = PPP_ADDRESS(ibuf); obuf[1] = PPP_CONTROL(ibuf); put_unaligned_be16(PPP_COMP, obuf + 2); obuf += PPP_HDRLEN; state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; if (state->debug >= 7) printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit, state->ccount); put_unaligned_be16(state->ccount, obuf); if (!state->stateful || /* stateless mode */ ((state->ccount & 0xff) == 0xff) || /* "flag" packet */ (state->bits & MPPE_BIT_FLUSHED)) { /* CCP Reset-Request */ /* We must rekey */ if (state->debug && state->stateful) printk(KERN_DEBUG "mppe_compress[%d]: rekeying\n", state->unit); mppe_rekey(state, 0); state->bits |= MPPE_BIT_FLUSHED; } obuf[0] |= state->bits; state->bits &= ~MPPE_BIT_FLUSHED; /* reset for next xmit */ obuf += MPPE_OVHD; ibuf += 2; /* skip to proto field */ isize -= 2; /* Encrypt packet */ sg_init_table(sg_in, 1); sg_init_table(sg_out, 1); setup_sg(sg_in, ibuf, isize); setup_sg(sg_out, obuf, osize); if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) { printk(KERN_DEBUG "crypto_cypher_encrypt failed\n"); return -1; } state->stats.unc_bytes += isize; state->stats.unc_packets++; state->stats.comp_bytes += osize; state->stats.comp_packets++; return osize; } /* * Since every frame grows by MPPE_OVHD + 2 bytes, this is always going * to look bad ... and the longer the link is up the worse it will get. */ static void mppe_comp_stats(void *arg, struct compstat *stats) { struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; *stats = state->stats; } static int mppe_decomp_init(void *arg, unsigned char *options, int optlen, int unit, int hdrlen, int mru, int debug) { /* ARGSUSED */ return mppe_init(arg, options, optlen, unit, debug, "mppe_decomp_init"); } /* * We received a CCP Reset-Ack. Just ignore it. */ static void mppe_decomp_reset(void *arg) { /* ARGSUSED */ return; } /* * Decompress (decrypt) an MPPE packet. */ static int mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, int osize) { struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; struct blkcipher_desc desc = { .tfm = state->arc4 }; unsigned ccount; int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; int sanity = 0; struct scatterlist sg_in[1], sg_out[1]; if (isize <= PPP_HDRLEN + MPPE_OVHD) { if (state->debug) printk(KERN_DEBUG "mppe_decompress[%d]: short pkt (%d)\n", state->unit, isize); return DECOMP_ERROR; } /* * Make sure we have enough room to decrypt the packet. * Note that for our test we only subtract 1 byte whereas in * mppe_compress() we added 2 bytes (+MPPE_OVHD); * this is to account for possible PFC. */ if (osize < isize - MPPE_OVHD - 1) { printk(KERN_DEBUG "mppe_decompress[%d]: osize too small! " "(have: %d need: %d)\n", state->unit, osize, isize - MPPE_OVHD - 1); return DECOMP_ERROR; } osize = isize - MPPE_OVHD - 2; /* assume no PFC */ ccount = MPPE_CCOUNT(ibuf); if (state->debug >= 7) printk(KERN_DEBUG "mppe_decompress[%d]: ccount %d\n", state->unit, ccount); /* sanity checks -- terminate with extreme prejudice */ if (!(MPPE_BITS(ibuf) & MPPE_BIT_ENCRYPTED)) { printk(KERN_DEBUG "mppe_decompress[%d]: ENCRYPTED bit not set!\n", state->unit); state->sanity_errors += 100; sanity = 1; } if (!state->stateful && !flushed) { printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set in " "stateless mode!\n", state->unit); state->sanity_errors += 100; sanity = 1; } if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) { printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set on " "flag packet!\n", state->unit); state->sanity_errors += 100; sanity = 1; } if (sanity) { if (state->sanity_errors < SANITY_MAX) return DECOMP_ERROR; else /* * Take LCP down if the peer is sending too many bogons. * We don't want to do this for a single or just a few * instances since it could just be due to packet corruption. */ return DECOMP_FATALERROR; } /* * Check the coherency count. */ if (!state->stateful) { /* RFC 3078, sec 8.1. Rekey for every packet. */ while (state->ccount != ccount) { mppe_rekey(state, 0); state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; } } else { /* RFC 3078, sec 8.2. */ if (!state->discard) { /* normal state */ state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; if (ccount != state->ccount) { /* * (ccount > state->ccount) * Packet loss detected, enter the discard state. * Signal the peer to rekey (by sending a CCP Reset-Request). */ state->discard = 1; return DECOMP_ERROR; } } else { /* discard state */ if (!flushed) { /* ccp.c will be silent (no additional CCP Reset-Requests). */ return DECOMP_ERROR; } else { /* Rekey for every missed "flag" packet. */ while ((ccount & ~0xff) != (state->ccount & ~0xff)) { mppe_rekey(state, 0); state->ccount = (state->ccount + 256) % MPPE_CCOUNT_SPACE; } /* reset */ state->discard = 0; state->ccount = ccount; /* * Another problem with RFC 3078 here. It implies that the * peer need not send a Reset-Ack packet. But RFC 1962 * requires it. Hopefully, M$ does send a Reset-Ack; even * though it isn't required for MPPE synchronization, it is * required to reset CCP state. */ } } if (flushed) mppe_rekey(state, 0); } /* * Fill in the first part of the PPP header. The protocol field * comes from the decrypted data. */ obuf[0] = PPP_ADDRESS(ibuf); /* +1 */ obuf[1] = PPP_CONTROL(ibuf); /* +1 */ obuf += 2; ibuf += PPP_HDRLEN + MPPE_OVHD; isize -= PPP_HDRLEN + MPPE_OVHD; /* -6 */ /* net osize: isize-4 */ /* * Decrypt the first byte in order to check if it is * a compressed or uncompressed protocol field. */ sg_init_table(sg_in, 1); sg_init_table(sg_out, 1); setup_sg(sg_in, ibuf, 1); setup_sg(sg_out, obuf, 1); if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) { printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); return DECOMP_ERROR; } /* * Do PFC decompression. * This would be nicer if we were given the actual sk_buff * instead of a char *. */ if ((obuf[0] & 0x01) != 0) { obuf[1] = obuf[0]; obuf[0] = 0; obuf++; osize++; } /* And finally, decrypt the rest of the packet. */ setup_sg(sg_in, ibuf + 1, isize - 1); setup_sg(sg_out, obuf + 1, osize - 1); if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, isize - 1)) { printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); return DECOMP_ERROR; } state->stats.unc_bytes += osize; state->stats.unc_packets++; state->stats.comp_bytes += isize; state->stats.comp_packets++; /* good packet credit */ state->sanity_errors >>= 1; return osize; } /* * Incompressible data has arrived (this should never happen!). * We should probably drop the link if the protocol is in the range * of what should be encrypted. At the least, we should drop this * packet. (How to do this?) */ static void mppe_incomp(void *arg, unsigned char *ibuf, int icnt) { struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; if (state->debug && (PPP_PROTOCOL(ibuf) >= 0x0021 && PPP_PROTOCOL(ibuf) <= 0x00fa)) printk(KERN_DEBUG "mppe_incomp[%d]: incompressible (unencrypted) data! " "(proto %04x)\n", state->unit, PPP_PROTOCOL(ibuf)); state->stats.inc_bytes += icnt; state->stats.inc_packets++; state->stats.unc_bytes += icnt; state->stats.unc_packets++; } /************************************************************* * Module interface table *************************************************************/ /* * Procedures exported to if_ppp.c. */ static struct compressor ppp_mppe = { .compress_proto = CI_MPPE, .comp_alloc = mppe_alloc, .comp_free = mppe_free, .comp_init = mppe_comp_init, .comp_reset = mppe_comp_reset, .compress = mppe_compress, .comp_stat = mppe_comp_stats, .decomp_alloc = mppe_alloc, .decomp_free = mppe_free, .decomp_init = mppe_decomp_init, .decomp_reset = mppe_decomp_reset, .decompress = mppe_decompress, .incomp = mppe_incomp, .decomp_stat = mppe_comp_stats, .owner = THIS_MODULE, .comp_extra = MPPE_PAD, }; /* * ppp_mppe_init() * * Prior to allowing load, try to load the arc4 and sha1 crypto * libraries. The actual use will be allocated later, but * this way the module will fail to insmod if they aren't available. */ static int __init ppp_mppe_init(void) { int answer; if (!(crypto_has_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) && crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC))) return -ENODEV; sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL); if (!sha_pad) return -ENOMEM; sha_pad_init(sha_pad); answer = ppp_register_compressor(&ppp_mppe); if (answer == 0) printk(KERN_INFO "PPP MPPE Compression module registered\n"); else kfree(sha_pad); return answer; } static void __exit ppp_mppe_cleanup(void) { ppp_unregister_compressor(&ppp_mppe); kfree(sha_pad); } module_init(ppp_mppe_init); module_exit(ppp_mppe_cleanup);
static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; static const struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, .firmware_version = 1, .identity = "SC520", }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_SETOPTIONS: { int new_options, retval = -EINVAL; if (get_user(new_options, p)) return -EFAULT; if (new_options & WDIOS_DISABLECARD) { wdt_turnoff(); retval = 0; } if (new_options & WDIOS_ENABLECARD) { wdt_startup(); retval = 0; } return retval; } case WDIOC_KEEPALIVE: wdt_keepalive(); return 0; case WDIOC_SETTIMEOUT: { int new_timeout; if (get_user(new_timeout, p)) return -EFAULT; if (wdt_set_heartbeat(new_timeout)) return -EINVAL; wdt_keepalive(); /* Fall through */ } case WDIOC_GETTIMEOUT: return put_user(timeout, p); default: return -ENOTTY; } } static const struct file_operations wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = fop_write, .open = fop_open, .release = fop_close, .unlocked_ioctl = fop_ioctl, }; static struct miscdevice wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &wdt_fops, }; /* * Notifier for system down */ static int wdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) wdt_turnoff(); return NOTIFY_DONE; } /* * The WDT needs to learn about soft shutdowns in order to * turn the timebomb registers off. */ static struct notifier_block wdt_notifier = { .notifier_call = wdt_notify_sys, }; static void __exit sc520_wdt_unload(void) { if (!nowayout) wdt_turnoff(); /* Deregister */ misc_deregister(&wdt_miscdev); unregister_reboot_notifier(&wdt_notifier); iounmap(wdtmrctl); } static int __init sc520_wdt_init(void) { int rc = -EBUSY; /* Check that the timeout value is within it's range ; if not reset to the default */ if (wdt_set_heartbeat(timeout)) { wdt_set_heartbeat(WATCHDOG_TIMEOUT); pr_info("timeout value must be 1 <= timeout <= 3600, using %d\n", WATCHDOG_TIMEOUT); } wdtmrctl = ioremap(MMCR_BASE + OFFS_WDTMRCTL, 2); if (!wdtmrctl) { pr_err("Unable to remap memory\n"); rc = -ENOMEM; goto err_out_region2; } rc = register_reboot_notifier(&wdt_notifier); if (rc) { pr_err("cannot register reboot notifier (err=%d)\n", rc); goto err_out_ioremap; } rc = misc_register(&wdt_miscdev); if (rc) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, rc); goto err_out_notifier; } pr_info("WDT driver for SC520 initialised. timeout=%d sec (nowayout=%d)\n", timeout, nowayout); return 0; err_out_notifier: unregister_reboot_notifier(&wdt_notifier); err_out_ioremap: iounmap(wdtmrctl); err_out_region2: return rc; } module_init(sc520_wdt_init); module_exit(sc520_wdt_unload); MODULE_AUTHOR("Scott and Bill Jennings"); MODULE_DESCRIPTION( "Driver for watchdog timer in AMD \"Elan\" SC520 uProcessor"); MODULE_LICENSE("GPL");
/* CHAOS functions */ static void xt_chaos_total(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_chaos_tginfo *info = par->targinfo; const struct iphdr *iph = ip_hdr(skb); const int thoff = 4 * iph->ihl; const int fragoff = ntohs(iph->frag_off) & IP_OFFSET; typeof(xt_tarpit) destiny; bool ret; #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 22) int hotdrop = false; #else bool hotdrop = false; #endif #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 27) ret = xm_tcp->match(skb, par->in, par->out, xm_tcp, &tcp_params, fragoff, thoff, &hotdrop); #elif LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 34) { struct xt_match_param local_par = { .in = par->in, .out = par->out, .match = xm_tcp, .matchinfo = &tcp_params, .fragoff = fragoff, .thoff = thoff, .hotdrop = &hotdrop, }; ret = xm_tcp->match(skb, &local_par); } #else { struct xt_action_param local_par; local_par.in = par->in, local_par.out = par->out, local_par.match = xm_tcp; local_par.matchinfo = &tcp_params; local_par.fragoff = fragoff; local_par.thoff = thoff; local_par.hotdrop = false; ret = xm_tcp->match(skb, &local_par); hotdrop = local_par.hotdrop; } #endif if (!ret || hotdrop || (unsigned int)net_random() > delude_percentage) return; destiny = (info->variant == XTCHAOS_TARPIT) ? xt_tarpit : xt_delude; #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18) destiny->target(&skb, par->in, par->out, par->hooknum, destiny, NULL, NULL); #elif LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 23) destiny->target(&skb, par->in, par->out, par->hooknum, destiny, NULL); #elif LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 27) destiny->target(skb, par->in, par->out, par->hooknum, destiny, NULL); #elif LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 34) { struct xt_target_param local_par = { .in = par->in, .out = par->out, .hooknum = par->hooknum, .target = destiny, .targinfo = par->targinfo, .family = par->family, }; destiny->target(skb, &local_par); } #else { struct xt_action_param local_par; local_par.in = par->in; local_par.out = par->out; local_par.hooknum = par->hooknum; local_par.target = destiny; local_par.targinfo = par->targinfo; local_par.family = par->family; destiny->target(skb, &local_par); } #endif } static unsigned int chaos_tg(struct sk_buff **pskb, const struct xt_action_param *par) { /* * Equivalent to: * -A chaos -m statistic --mode random --probability \ * $reject_percentage -j REJECT --reject-with host-unreach; * -A chaos -p tcp -m statistic --mode random --probability \ * $delude_percentage -j DELUDE; * -A chaos -j DROP; */ const struct xt_chaos_tginfo *info = par->targinfo; struct sk_buff *skb = *pskb; const struct iphdr *iph = ip_hdr(skb); if ((unsigned int)net_random() <= reject_percentage) { #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18) return xt_reject->target(pskb, par->in, par->out, par->hooknum, xt_reject, &reject_params, NULL); #elif LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 23) return xt_reject->target(pskb, par->in, par->out, par->hooknum, xt_reject, &reject_params); #elif LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 27) return xt_reject->target(skb, par->in, par->out, par->hooknum, xt_reject, &reject_params); #elif LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 34) struct xt_target_param local_par = { .in = par->in, .out = par->out, .hooknum = par->hooknum, .target = xt_reject, .targinfo = &reject_params, }; return xt_reject->target(skb, &local_par); #else struct xt_action_param local_par; local_par.in = par->in; local_par.out = par->out; local_par.hooknum = par->hooknum; local_par.target = xt_reject; local_par.targinfo = &reject_params; return xt_reject->target(skb, &local_par); #endif } /* TARPIT/DELUDE may not be called from the OUTPUT chain */ if (iph->protocol == IPPROTO_TCP && info->variant != XTCHAOS_NORMAL && par->hooknum != NF_INET_LOCAL_OUT) xt_chaos_total(skb, par); return NF_DROP; } static int chaos_tg_check(const struct xt_tgchk_param *par) { const struct xt_chaos_tginfo *info = par->targinfo; if (info->variant == XTCHAOS_DELUDE && !have_delude) { printk(KERN_WARNING PFX "Error: Cannot use --delude when " "DELUDE module not available\n"); return -EINVAL; } if (info->variant == XTCHAOS_TARPIT && !have_tarpit) { printk(KERN_WARNING PFX "Error: Cannot use --tarpit when " "TARPIT module not available\n"); return -EINVAL; } return 0; } static struct xt_target chaos_tg_reg = { .name = "CHAOS", .revision = 0, .family = NFPROTO_IPV4, .table = "filter", .hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD) | (1 << NF_INET_LOCAL_OUT), .target = chaos_tg, .checkentry = chaos_tg_check, .targetsize = sizeof(struct xt_chaos_tginfo), .me = THIS_MODULE, }; static int __init chaos_tg_init(void) { int ret = -EINVAL; xm_tcp = xt_request_find_match(NFPROTO_IPV4, "tcp", 0); if (xm_tcp == NULL) { printk(KERN_WARNING PFX "Error: Could not find or load " "\"tcp\" match\n"); return -EINVAL; } xt_reject = xt_request_find_target(NFPROTO_IPV4, "REJECT", 0); if (xt_reject == NULL) { printk(KERN_WARNING PFX "Error: Could not find or load " "\"REJECT\" target\n"); goto out2; } xt_tarpit = xt_request_find_target(NFPROTO_IPV4, "TARPIT", 0); have_tarpit = xt_tarpit != NULL; if (!have_tarpit) printk(KERN_WARNING PFX "Warning: Could not find or load " "\"TARPIT\" target\n"); xt_delude = xt_request_find_target(NFPROTO_IPV4, "DELUDE", 0); have_delude = xt_delude != NULL; if (!have_delude) printk(KERN_WARNING PFX "Warning: Could not find or load " "\"DELUDE\" target\n"); if ((ret = xt_register_target(&chaos_tg_reg)) != 0) { printk(KERN_WARNING PFX "xt_register_target returned " "error %d\n", ret); goto out3; } return 0; out3: if (have_delude) module_put(xt_delude->me); if (have_tarpit) module_put(xt_tarpit->me); module_put(xt_reject->me); out2: module_put(xm_tcp->me); return ret; } static void __exit chaos_tg_exit(void) { xt_unregister_target(&chaos_tg_reg); module_put(xm_tcp->me); module_put(xt_reject->me); if (have_delude) module_put(xt_delude->me); if (have_tarpit) module_put(xt_tarpit->me); } module_init(chaos_tg_init); module_exit(chaos_tg_exit); MODULE_DESCRIPTION("Xtables: Network scan slowdown with non-deterministic results"); MODULE_AUTHOR("Jan Engelhardt <*****@*****.**>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_CHAOS");