static int xwdt_open(struct inode *inode, struct file *file) { /* Only one process can handle the wdt at a time */ if (test_and_set_bit(0, &driver_open)) return -EBUSY; /* Make sure that the module are always loaded...*/ if (xdev.nowayout) __module_get(THIS_MODULE); xwdt_start(); pr_info("Started...\n"); return nonseekable_open(inode, file); }
static int bfin_wdt_open(struct inode *inode, struct file *file) { stampit(); if (test_and_set_bit(0, &open_check)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); bfin_wdt_keepalive(); bfin_wdt_start(); return nonseekable_open(inode, file); }
static int rk29_wdt_open(struct inode *inode, struct file *file) { DBG("%s\n", __func__); if (test_and_set_bit(0, &open_lock)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); expect_close = 0; /* start the timer */ rk29_wdt_start(); return nonseekable_open(inode, file); }
/* * Allow only a single thread to walk the dog */ static int sbwdog_open(struct inode *inode, struct file *file) { nonseekable_open(inode, file); if (test_and_set_bit(0, &sbwdog_gate)) return -EBUSY; __module_get(THIS_MODULE); /* * Activate the timer */ sbwdog_set(user_dog, timeout); __raw_writeb(1, user_dog); return 0; }
static struct mtd_info *map_ram_probe(struct map_info *map) { struct mtd_info *mtd; /* Check the first byte is RAM */ #if 0 map_write8(map, 0x55, 0); if (map_read8(map, 0) != 0x55) return NULL; map_write8(map, 0xAA, 0); if (map_read8(map, 0) != 0xAA) return NULL; /* Check the last byte is RAM */ map_write8(map, 0x55, map->size-1); if (map_read8(map, map->size-1) != 0x55) return NULL; map_write8(map, 0xAA, map->size-1); if (map_read8(map, map->size-1) != 0xAA) return NULL; #endif /* OK. It seems to be RAM. */ mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); if (!mtd) return NULL; map->fldrv = &mapram_chipdrv; mtd->priv = map; mtd->name = map->name; mtd->type = MTD_RAM; mtd->size = map->size; mtd->erase = mapram_erase; mtd->read = mapram_read; mtd->write = mapram_write; mtd->sync = mapram_nop; mtd->flags = MTD_CAP_RAM; mtd->writesize = 1; mtd->erasesize = PAGE_SIZE; while(mtd->size & (mtd->erasesize - 1)) mtd->erasesize >>= 1; __module_get(THIS_MODULE); return mtd; }
static int ux500_wdt_open(struct inode *inode, struct file *file) { if (!timeout) return -ENODEV; if (test_and_set_bit(WDT_FLAGS_OPEN, &wdt_flags)) return -EBUSY; if (!test_and_clear_bit(WDT_FLAGS_ORPHAN, &wdt_flags)) __module_get(THIS_MODULE); ux500_wdt_ops->enable(wdog_id); wdt_en = true; return nonseekable_open(inode, file); }
static int indydog_open(struct inode *inode, struct file *file) { if (test_and_set_bit(0, &indydog_alive)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); indydog_start(); indydog_ping(); printk(KERN_INFO "Started watchdog timer.\n"); return nonseekable_open(inode, file); }
static int txx9wdt_open(struct inode *inode, struct file *file) { if (test_and_set_bit(0, &txx9wdt_alive)) return -EBUSY; if (__raw_readl(&txx9wdt_reg->tcr) & TXx9_TMTCR_TCE) { clear_bit(0, &txx9wdt_alive); return -EBUSY; } if (nowayout) __module_get(THIS_MODULE); txx9wdt_start(); return nonseekable_open(inode, file); }
/* * Allow only one person to hold it open */ static int ralink_open(struct inode *inode, struct file *file) { if (RaWdgAlive) return -EBUSY; #ifdef CONFIG_WATCHDOG_NOWAYOUT if (nowayout) __module_get(THIS_MODULE); #endif /* Activate timer */ RaWdgStart(); RaWdgAlive = 1; return nonseekable_open(inode, file); }
static int wdt_open(struct inode *inode, struct file *file) { /* Allow only one person to hold it open */ if (access) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); /* Activate timer */ wdt_reset_counter(); wdt_set_timeout(); printk(KERN_INFO NAME ": enabling watchdog timer\n"); access = 1; return 0; }
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl) { struct mii_bus *bus; bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); if (!bus) return NULL; __module_get(ctrl->ops->owner); bus->read = mdiobb_read; bus->write = mdiobb_write; bus->priv = ctrl; return bus; }
/* * ubi32_wdt_open() * Open the Device */ static int ubi32_wdt_open(struct inode *inode, struct file *file) { unsigned long flags; if (test_and_set_bit(0, &open_check)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); spin_lock_irqsave(&ubi32_wdt_spinlock, flags); ubi32_wdt_start(); spin_unlock_irqrestore(&ubi32_wdt_spinlock, flags); return nonseekable_open(inode, file); }
static int iio_sysfs_trigger_probe(int id) { struct iio_sysfs_trig *t; int ret; bool foundit = false; mutex_lock(&iio_syfs_trig_list_mut); list_for_each_entry(t, &iio_sysfs_trig_list, l) if (id == t->id) { foundit = true; break; } if (foundit) { ret = -EINVAL; goto out1; } t = kmalloc(sizeof(*t), GFP_KERNEL); if (t == NULL) { ret = -ENOMEM; goto out1; } t->id = id; t->trig = iio_allocate_trigger("sysfstrig%d", id); if (!t->trig) { ret = -ENOMEM; goto free_t; } t->trig->dev.groups = iio_sysfs_trigger_attr_groups; t->trig->owner = THIS_MODULE; t->trig->dev.parent = &iio_sysfs_trig_dev; ret = iio_trigger_register(t->trig); if (ret) goto out2; list_add(&t->l, &iio_sysfs_trig_list); __module_get(THIS_MODULE); mutex_unlock(&iio_syfs_trig_list_mut); return 0; out2: iio_put_trigger(t->trig); free_t: kfree(t); out1: mutex_unlock(&iio_syfs_trig_list_mut); return ret; }
/* * Allow only one person to hold it open */ static int epx_c3_open(struct inode *inode, struct file *file) { if (epx_c3_alive) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); /* Activate timer */ epx_c3_start(); epx_c3_pet(); epx_c3_alive = 1; printk(KERN_INFO "Started watchdog timer.\n"); return nonseekable_open(inode, file); }
/* * Allow only one person to hold it open */ static int epx_c3_open(struct inode *inode, struct file *file) { if (epx_c3_alive) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); /* Activate timer */ epx_c3_start(); epx_c3_pet(); epx_c3_alive = 1; ; return nonseekable_open(inode, file); }
static int epx_c3_open(struct inode *inode, struct file *file) { if (epx_c3_alive) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); epx_c3_start(); epx_c3_pet(); epx_c3_alive = 1; pr_info("Started watchdog timer\n"); return nonseekable_open(inode, file); }
static int mpcore_wdt_open(struct inode *inode, struct file *file) { struct mpcore_wdt *wdt = platform_get_drvdata(mpcore_wdt_dev); if (test_and_set_bit(0, &wdt->timer_alive)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); file->private_data = wdt; mpcore_wdt_start(wdt); return nonseekable_open(inode, file); }
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl) { struct mii_bus *bus; bus = mdiobus_alloc(); if (!bus) return NULL; __module_get(ctrl->ops->owner); bus->read = mdiobb_read; bus->write = mdiobb_write; bus->reset = mdiobb_reset; bus->priv = ctrl; return bus; }
static int wb_smsc_wdt_open(struct inode *inode, struct file *file) { /* /dev/watchdog can only be opened once */ if (test_and_set_bit(0, &timer_enabled)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); /* Reload and activate timer */ wb_smsc_wdt_enable(); printk(KERN_INFO MODNAME "Watchdog enabled. Timeout set to %d %s.\n", timeout, (unit == UNIT_SECOND) ? "second(s)" : "minute(s)"); return nonseekable_open(inode, file); }
static int pc87413_open(struct inode *inode, struct file *file) { /* /dev/watchdog can only be opened once */ if (test_and_set_bit(0, &timer_enabled)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); /* Reload and activate timer */ pc87413_refresh(); pr_info("Watchdog enabled. Timeout set to %d minute(s).\n", timeout); return nonseekable_open(inode, file); }
/* * Allow only one person to hold it open */ static int pnx833x_wdt_open(struct inode *inode, struct file *file) { if (test_and_set_bit(0, &pnx833x_wdt_alive)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); /* Activate timer */ if (!start_enabled) pnx833x_wdt_start(); pnx833x_wdt_ping(); printk(KERN_INFO "Started watchdog timer.\n"); return nonseekable_open(inode, file); }
static int wb_smsc_wdt_open(struct inode *inode, struct file *file) { if (test_and_set_bit(0, &timer_enabled)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); wb_smsc_wdt_enable(); pr_info("Watchdog enabled. Timeout set to %d %s\n", timeout, (unit == UNIT_SECOND) ? "second(s)" : "minute(s)"); return nonseekable_open(inode, file); }
/* * This is the callback kernel thread. */ static void nfs_callback_svc(struct svc_rqst *rqstp) { struct svc_serv *serv = rqstp->rq_server; int err; __module_get(THIS_MODULE); lock_kernel(); nfs_callback_info.pid = current->pid; daemonize("nfsv4-svc"); /* Process request with signals blocked, but allow SIGKILL. */ allow_signal(SIGKILL); complete(&nfs_callback_info.started); for(;;) { if (signalled()) { if (nfs_callback_info.users == 0) break; flush_signals(current); } /* * Listen for a request on the socket */ err = svc_recv(serv, rqstp, MAX_SCHEDULE_TIMEOUT); if (err == -EAGAIN || err == -EINTR) continue; if (err < 0) { printk(KERN_WARNING "%s: terminating on error %d\n", __FUNCTION__, -err); break; } dprintk("%s: request from %u.%u.%u.%u\n", __FUNCTION__, NIPQUAD(rqstp->rq_addr.sin_addr.s_addr)); svc_process(serv, rqstp); } flush_signals(current); svc_exit_thread(rqstp); nfs_callback_info.pid = 0; complete(&nfs_callback_info.stopped); unlock_kernel(); module_put_and_exit(0); }
int add_mtd_device(struct mtd_info *mtd) { int i; BUG_ON(mtd->writesize == 0); mutex_lock(&mtd_table_mutex); for (i=0; i < MAX_MTD_DEVICES; i++) if (!mtd_table[i]) { struct mtd_notifier *not; mtd_table[i] = mtd; mtd->index = i; mtd->usecount = 0; /* Some chips always power up locked. Unlock them now */ if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) { if (mtd->unlock(mtd, 0, mtd->size)) printk(KERN_WARNING "%s: unlock failed, " "writes may not work\n", mtd->name); } DEBUG(0, "mtd: Giving out device %d to %s\n",i, mtd->name); /* No need to get a refcount on the module containing the notifier, since we hold the mtd_table_mutex */ list_for_each_entry(not, &mtd_notifiers, list) not->add(mtd); mutex_unlock(&mtd_table_mutex); /* We _know_ we aren't being removed, because our caller is still holding us here. So none of this try_ nonsense, and no bitching about it either. :) */ __module_get(THIS_MODULE); return 0; } mutex_unlock(&mtd_table_mutex); return 1; }
static struct fblock *fb_counter_ctor(char *name) { int ret = 0; struct fblock *fb; struct fb_counter_priv *fb_priv; struct proc_dir_entry *fb_proc; fb = alloc_fblock(GFP_ATOMIC); if (!fb) return NULL; fb_priv = kzalloc(sizeof(*fb_priv), GFP_ATOMIC); if (!fb_priv) goto err; seqlock_init(&fb_priv->lock); fb_priv->port[0] = IDP_UNKNOWN; fb_priv->port[1] = IDP_UNKNOWN; fb_priv->packets = 0; fb_priv->bytes = 0; ret = init_fblock(fb, name, fb_priv); if (ret) goto err2; fb->netfb_rx = fb_counter_netrx; fb->event_rx = fb_counter_event; fb_proc = proc_create_data(fb->name, 0444, fblock_proc_dir, &fb_counter_proc_fops, (void *)(long) fb); if (!fb_proc) goto err3; ret = register_fblock_namespace(fb); if (ret) goto err4; __module_get(THIS_MODULE); return fb; err4: remove_proc_entry(fb->name, fblock_proc_dir); err3: cleanup_fblock_ctor(fb); err2: kfree(fb_priv); err: kfree_fblock(fb); return NULL; }
static int dw_wdt_open(struct inode *inode, struct file *filp) { if (test_and_set_bit(0, &dw_wdt.in_use)) return -EBUSY; __module_get(THIS_MODULE); spin_lock(&dw_wdt.lock); if (!dw_wdt_is_enabled()) { dw_wdt_set_top(DW_WDT_MAX_TOP); writel(WDOG_CONTROL_REG_WDT_EN_MASK, dw_wdt.regs + WDOG_CONTROL_REG_OFFSET); } dw_wdt_set_next_heartbeat(); spin_unlock(&dw_wdt.lock); return nonseekable_open(inode, filp); }
static int mpc83xx_wdt_open(struct inode *inode, struct file *file) { u32 tmp = SWCRR_SWEN; if (test_and_set_bit(0, &wdt_is_open)) return -EBUSY; /* Once we start the watchdog we can't stop it */ __module_get(THIS_MODULE); /* Good, fire up the show */ if (prescale) tmp |= SWCRR_SWPR; if (reset) tmp |= SWCRR_SWRI; tmp |= timeout << 16; out_be32(&wd_base->swcrr, tmp); return nonseekable_open(inode, file); }
static int harddog_open(struct inode *inode, struct file *file) { int err; char *sock = NULL; lock_kernel(); if(timer_alive) return -EBUSY; #ifdef CONFIG_HARDDOG_NOWAYOUT __module_get(THIS_MODULE); #endif #ifdef CONFIG_MCONSOLE sock = mconsole_notify_socket(); #endif err = start_watchdog(&harddog_in_fd, &harddog_out_fd, sock); if(err) return(err); timer_alive = 1; unlock_kernel(); return nonseekable_open(inode, file); }
static int mixcomwd_open(struct inode *inode, struct file *file) { if(test_and_set_bit(0,&mixcomwd_opened)) { return -EBUSY; } mixcomwd_ping(); if (nowayout) { /* * fops_get() code via open() has already done * a try_module_get() so it is safe to do the * __module_get(). */ __module_get(THIS_MODULE); } else { if(mixcomwd_timer_alive) { del_timer(&mixcomwd_timer); mixcomwd_timer_alive=0; } } return 0; }
int sock_tcpsrv_waitfor_cli(struct socket *server_sock, struct socket **client_sock, int flags) { struct sock *sk = server_sock->sk; int err; err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol, client_sock); if (err < 0) goto done; err = server_sock->ops->accept(server_sock, *client_sock, flags); if (err < 0) { sock_release(*client_sock); *client_sock= NULL; goto done; } (*client_sock)->ops = server_sock->ops; __module_get((*client_sock)->ops->owner); done: return err; }