static int generic_acquire_start(struct fp_dev *dev, int action) { struct fp_img_dev *imgdev = dev->priv; int r; fp_dbg("action %d", action); imgdev->action = action; imgdev->action_state = IMG_ACQUIRE_STATE_ACTIVATING; r = dev_activate(imgdev, IMGDEV_STATE_AWAIT_FINGER_ON); if (r < 0) fp_err("activation failed with error %d", r); return r; }
static struct Qdisc * dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc) { struct Qdisc *oqdisc; if (dev->flags & IFF_UP) dev_deactivate(dev); write_lock(&qdisc_tree_lock); spin_lock_bh(&dev->queue_lock); if (qdisc && qdisc->flags&TCQ_F_INGRES) { oqdisc = dev->qdisc_ingress; /* Prune old scheduler */ if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) { /* delete */ qdisc_reset(oqdisc); dev->qdisc_ingress = NULL; } else { /* new */ dev->qdisc_ingress = qdisc; } } else { oqdisc = dev->qdisc_sleeping; /* Prune old scheduler */ if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) qdisc_reset(oqdisc); /* ... and graft new one */ if (qdisc == NULL) qdisc = &noop_qdisc; dev->qdisc_sleeping = qdisc; dev->qdisc = &noop_qdisc; } spin_unlock_bh(&dev->queue_lock); write_unlock(&qdisc_tree_lock); if (dev->flags & IFF_UP) dev_activate(dev); return oqdisc; }
static void linkwatch_do_dev(struct net_device *dev) { /* * Make sure the above read is complete since it can be * rewritten as soon as we clear the bit below. */ smp_mb__before_clear_bit(); clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state); rfc2863_policy(dev); if (dev->flags & IFF_UP) { if (netif_carrier_ok(dev)) dev_activate(dev); else dev_deactivate(dev); netdev_state_change(dev); } dev_put(dev); }
static void linkwatch_do_dev(struct net_device *dev) { /* */ smp_mb__before_clear_bit(); /* */ clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state); rfc2863_policy(dev); if (dev->flags & IFF_UP) { if (netif_carrier_ok(dev)) dev_activate(dev); else dev_deactivate(dev); netdev_state_change(dev); } dev_put(dev); }
static void linkwatch_do_dev(struct net_device *dev) { /* * Make sure the above read is complete since it can be * rewritten as soon as we clear the bit below. */ smp_mb__before_atomic(); /* We are about to handle this device, * so new events can be accepted */ clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state); rfc2863_policy(dev); if (dev->flags & IFF_UP && netif_device_present(dev)) { if (netif_carrier_ok(dev)) dev_activate(dev); else dev_deactivate(dev); netdev_state_change(dev); } dev_put(dev); }
static int nm_os_catch_qdisc(struct netmap_generic_adapter *gna, int intercept) { struct netmap_adapter *na = &gna->up.up; struct ifnet *ifp = netmap_generic_getifp(gna); struct nm_generic_qdisc *qdiscopt = NULL; struct Qdisc *fqdisc = NULL; struct nlattr *nla = NULL; struct netdev_queue *txq; unsigned int i; if (!gna->txqdisc) { return 0; } if (intercept) { nla = kmalloc(nla_attr_size(sizeof(*qdiscopt)), GFP_KERNEL); if (!nla) { D("Failed to allocate netlink attribute"); return ENOMEM; } nla->nla_type = RTM_NEWQDISC; nla->nla_len = nla_attr_size(sizeof(*qdiscopt)); qdiscopt = (struct nm_generic_qdisc *)nla_data(nla); memset(qdiscopt, 0, sizeof(*qdiscopt)); qdiscopt->limit = na->num_tx_desc; } if (ifp->flags & IFF_UP) { dev_deactivate(ifp); } /* Replace the current qdiscs with our own. */ for (i = 0; i < ifp->real_num_tx_queues; i++) { struct Qdisc *nqdisc = NULL; struct Qdisc *oqdisc; int err; txq = netdev_get_tx_queue(ifp, i); if (intercept) { /* This takes a refcount to netmap module, alloc the * qdisc and calls the init() op with NULL netlink * attribute. */ nqdisc = qdisc_create_dflt( #ifndef NETMAP_LINUX_QDISC_CREATE_DFLT_3ARGS ifp, #endif /* NETMAP_LINUX_QDISC_CREATE_DFLT_3ARGS */ txq, &generic_qdisc_ops, TC_H_UNSPEC); if (!nqdisc) { D("Failed to create qdisc"); goto qdisc_create; } fqdisc = fqdisc ?: nqdisc; /* Call the change() op passing a valid netlink * attribute. This is used to set the queue idx. */ qdiscopt->qidx = i; err = nqdisc->ops->change(nqdisc, nla); if (err) { D("Failed to init qdisc"); goto qdisc_create; } } oqdisc = dev_graft_qdisc(txq, nqdisc); /* We can call this also with * odisc == &noop_qdisc, since the noop * qdisc has the TCQ_F_BUILTIN flag set, * and so qdisc_destroy will skip it. */ qdisc_destroy(oqdisc); } kfree(nla); if (ifp->qdisc) { qdisc_destroy(ifp->qdisc); } if (intercept) { atomic_inc(&fqdisc->refcnt); ifp->qdisc = fqdisc; } else { ifp->qdisc = &noop_qdisc; } if (ifp->flags & IFF_UP) { dev_activate(ifp); } return 0; qdisc_create: if (nla) { kfree(nla); } nm_os_catch_qdisc(gna, 0); return -1; }
static void __linkwatch_run_queue(int urgent_only) { struct net_device *next; /* * Limit the number of linkwatch events to one * per second so that a runaway driver does not * cause a storm of messages on the netlink * socket. This limit does not apply to up events * while the device qdisc is down. */ if (!urgent_only) linkwatch_nextevent = jiffies + HZ; /* Limit wrap-around effect on delay. */ else if (time_after(linkwatch_nextevent, jiffies + HZ)) linkwatch_nextevent = jiffies; clear_bit(LW_URGENT, &linkwatch_flags); spin_lock_irq(&lweventlist_lock); next = lweventlist; lweventlist = NULL; spin_unlock_irq(&lweventlist_lock); while (next) { struct net_device *dev = next; next = dev->link_watch_next; if (urgent_only && !linkwatch_urgent_event(dev)) { linkwatch_add_event(dev); continue; } /* * Make sure the above read is complete since it can be * rewritten as soon as we clear the bit below. */ smp_mb__before_clear_bit(); /* We are about to handle this device, * so new events can be accepted */ clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state); rfc2863_policy(dev); if (dev->flags & IFF_UP) { if (netif_carrier_ok(dev)) { WARN_ON(dev->qdisc_sleeping == &noop_qdisc); dev_activate(dev); } else dev_deactivate(dev); netdev_state_change(dev); } dev_put(dev); } if (lweventlist) linkwatch_schedule_work(0); }
static int cmd_reg(struct cmd *cmd) { int a=0; char *p; struct device *d; struct db_device *dd; char sum[64]; unsigned int n; int type; char *pass; int port; char *bcast; int did = cmd->device_id; char buf[CMD_MAX]; NEXT_ARG(p); type = atoi(p); NEXT_ARG(p); pass = p; NEXT_ARG(p); port = atoi(p); NEXT_ARG(p); bcast = p; /* authenticate the passwd based on id and type */ n = (unsigned int)cmd->device_id ^ (unsigned int)type; cksum(&n, sizeof n, sum); if (strcmp(pass, sum) != 0) { trace_warn("authenticate fail\n"); return 1; } if (port <= 0) return 1; dd = md_find_device(did); if( dd ) { /* mark it online even if it's disabled. * needed for the manager to identify online devs. */ dd->online = 1; if( !dd->enabled ) { return ERR_DEV_DISABLED; } } d = get_device(did); if( !d ) { d = dev_create(did); } d->type = type; d->addr = *cmd->saddr; d->addr.sin_port = htons(port); d->fileaddr = d->addr; d->fileaddr.sin_port = htons(port+1); if (strcmp("none", bcast) == 0) d->bcast.sin_addr.s_addr = 0; else { d->bcast.sin_addr.s_addr = inet_addr(bcast); d->bcast.sin_port = htons(BRCAST_PORT); } dev_update_data(d); if (dev_register(d) != 0) { /* existing dev ok */ } dev_activate(d); if( is_ptc(d) ) { /* re-set ptc if it's restarted. */ ptc_go_current(); } if( d->type == DEVTYPE_CHAIR ) { manage_notify_chair(d); } get_client_info(buf, d); REP_ADD(cmd, "OK"); REP_ADD(cmd, buf); REP_END(cmd); return 0; }