/* * Attempt to execute stuff queued on a slow thread. Return true if we managed * it, false if there was nothing to do. */ static noinline bool slow_work_execute(int id) { struct slow_work *work = NULL; unsigned vsmax; bool very_slow; vsmax = slow_work_calc_vsmax(); /* see if we can schedule a new thread to be started if we're not * keeping up with the work */ if (!waitqueue_active(&slow_work_thread_wq) && (!list_empty(&slow_work_queue) || !list_empty(&vslow_work_queue)) && atomic_read(&slow_work_thread_count) < slow_work_max_threads && !slow_work_may_not_start_new_thread) slow_work_enqueue(&slow_work_new_thread); /* find something to execute */ spin_lock_irq(&slow_work_queue_lock); if (!list_empty(&vslow_work_queue) && atomic_read(&vslow_work_executing_count) < vsmax) { work = list_entry(vslow_work_queue.next, struct slow_work, link); if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags)) BUG(); list_del_init(&work->link); atomic_inc(&vslow_work_executing_count); very_slow = true; } else if (!list_empty(&slow_work_queue)) {
int gfar_set_features(struct net_device *dev, netdev_features_t features) { netdev_features_t changed = dev->features ^ features; struct gfar_private *priv = netdev_priv(dev); int err = 0; if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM))) return 0; while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) cpu_relax(); dev->features = features; if (dev->flags & IFF_UP) { /* Now we take down the rings to rebuild them */ stop_gfar(dev); err = startup_gfar(dev); } else { gfar_mac_reset(priv); } clear_bit_unlock(GFAR_RESETTING, &priv->state); return err; }
static int event_buffer_open(struct inode *inode, struct file *file) { int err = -EPERM; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (test_and_set_bit_lock(0, &buffer_opened)) return -EBUSY; /* Register as a user of dcookies * to ensure they persist for the lifetime of * the open event file */ err = -EINVAL; file->private_data = dcookie_register(); if (!file->private_data) goto out; if ((err = oprofile_setup())) goto fail; /* NB: the actual start happens from userspace * echo 1 >/dev/oprofile/enable */ return nonseekable_open(inode, file); fail: dcookie_unregister(file->private_data); out: __clear_bit_unlock(0, &buffer_opened); return err; }
static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg, __u16 *val) { struct usb_device *dev = mcs->port->serial->dev; struct usb_ctrlrequest *dr = mcs->dr; unsigned char *buffer = mcs->ctrl_buf; int ret; if (test_and_set_bit_lock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags)) return -EBUSY; dr->bRequestType = MCS_RD_RTYPE; dr->bRequest = MCS_RDREQ; dr->wValue = cpu_to_le16(Wval); /* 0 */ dr->wIndex = cpu_to_le16(reg); dr->wLength = cpu_to_le16(2); usb_fill_control_urb(mcs->control_urb, dev, usb_rcvctrlpipe(dev, 0), (unsigned char *)dr, buffer, 2, mos7840_control_callback, mcs); mcs->control_urb->transfer_buffer_length = 2; ret = usb_submit_urb(mcs->control_urb, GFP_ATOMIC); if (ret) clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags); return ret; }
bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *clone, unsigned int type) { struct mv88e6xxx_chip *chip = ds->priv; struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port]; __be16 *seq_ptr; u8 *hdr; if (!(skb_shinfo(clone)->tx_flags & SKBTX_HW_TSTAMP)) return false; hdr = mv88e6xxx_should_tstamp(chip, port, clone, type); if (!hdr) return false; seq_ptr = (__be16 *)(hdr + OFF_PTP_SEQUENCE_ID); if (test_and_set_bit_lock(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS, &ps->state)) return false; ps->tx_skb = clone; ps->tx_tstamp_start = jiffies; ps->tx_seq_id = be16_to_cpup(seq_ptr); ptp_schedule_worker(chip->ptp_clock, 0); return true; }
/** * blk_queue_start_tag - find a free tag and assign it * @q: the request queue for the device * @rq: the block request that needs tagging * * Description: * This can either be used as a stand-alone helper, or possibly be * assigned as the queue &prep_rq_fn (in which case &struct request * automagically gets a tag assigned). Note that this function * assumes that any type of request can be queued! if this is not * true for your device, you must check the request type before * calling this function. The request will also be removed from * the request queue, so it's the drivers responsibility to readd * it if it should need to be restarted for some reason. * * Notes: * queue lock must be held. **/ int blk_queue_start_tag(struct request_queue *q, struct request *rq) { struct blk_queue_tag *bqt = q->queue_tags; unsigned max_depth; int tag; if (unlikely((rq->cmd_flags & REQ_QUEUED))) { printk(KERN_ERR "%s: request %p for device [%s] already tagged %d", __func__, rq, rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); BUG(); } /* * Protect against shared tag maps, as we may not have exclusive * access to the tag map. * * We reserve a few tags just for sync IO, since we don't want * to starve sync IO on behalf of flooding async IO. */ max_depth = bqt->max_depth; if (!rq_is_sync(rq) && max_depth > 1) { switch (max_depth) { case 2: max_depth = 1; break; case 3: max_depth = 2; break; default: max_depth -= 2; } if (q->in_flight[BLK_RW_ASYNC] > max_depth) return 1; } do { tag = find_first_zero_bit(bqt->tag_map, max_depth); if (tag >= max_depth) return 1; } while (test_and_set_bit_lock(tag, bqt->tag_map)); /* * We need lock ordering semantics given by test_and_set_bit_lock. * See blk_queue_end_tag for details. */ rq->cmd_flags |= REQ_QUEUED; rq->tag = tag; bqt->tag_index[tag] = rq; blk_start_request(rq); list_add(&rq->queuelist, &q->tag_busy_list); return 0; }
static void mos7840_led_activity(struct usb_serial_port *port) { struct moschip_port *mos7840_port = usb_get_serial_port_data(port); if (test_and_set_bit_lock(MOS7840_FLAG_LED_BUSY, &mos7840_port->flags)) return; mos7840_set_led_async(mos7840_port, 0x0301, MODEM_CONTROL_REGISTER); mod_timer(&mos7840_port->led_timer1, jiffies + msecs_to_jiffies(LED_ON_MS)); }
/** * usb_serial_generic_write_start - kick off an URB write * @port: Pointer to the &struct usb_serial_port data * * Returns zero on success, or a negative errno value */ static int usb_serial_generic_write_start(struct usb_serial_port *port) { struct urb *urb; int count, result; unsigned long flags; int i; if (test_and_set_bit_lock(USB_SERIAL_WRITE_BUSY, &port->flags)) return 0; retry: spin_lock_irqsave(&port->lock, flags); if (!port->write_urbs_free || !kfifo_len(&port->write_fifo)) { clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); spin_unlock_irqrestore(&port->lock, flags); return 0; } i = (int)find_first_bit(&port->write_urbs_free, ARRAY_SIZE(port->write_urbs)); spin_unlock_irqrestore(&port->lock, flags); urb = port->write_urbs[i]; count = port->serial->type->prepare_write_buffer(port, urb->transfer_buffer, port->bulk_out_size); urb->transfer_buffer_length = count; usb_serial_debug_data(debug, &port->dev, __func__, count, urb->transfer_buffer); spin_lock_irqsave(&port->lock, flags); port->tx_bytes += count; spin_unlock_irqrestore(&port->lock, flags); clear_bit(i, &port->write_urbs_free); result = usb_submit_urb(urb, GFP_ATOMIC); if (result) { dev_err(&port->dev, "%s - error submitting urb: %d\n", __func__, result); set_bit(i, &port->write_urbs_free); spin_lock_irqsave(&port->lock, flags); port->tx_bytes -= count; spin_unlock_irqrestore(&port->lock, flags); clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); return result; } /* Try sending off another urb, unless in irq context (in which case * there will be no free urb). */ if (!in_irq()) goto retry; clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); return 0; }
static unsigned int get_tag(struct nullb_queue *nq) { unsigned int tag; do { tag = find_first_zero_bit(nq->tag_map, nq->queue_depth); if (tag >= nq->queue_depth) return -1U; } while (test_and_set_bit_lock(tag, nq->tag_map)); return tag; }
static inline int ide_lock_host(struct ide_host *host, ide_hwif_t *hwif) { int rc = 0; if (host->host_flags & IDE_HFLAG_SERIALIZE) { rc = test_and_set_bit_lock(IDE_HOST_BUSY, &host->host_busy); if (rc == 0) { /* for atari only */ ide_get_lock(ide_intr, hwif); } } return rc; }
struct platform_device * reserve_pmu(enum arm_pmu_type device) { struct platform_device *pdev; if (test_and_set_bit_lock(device, &pmu_lock)) { pdev = ERR_PTR(-EBUSY); } else if (pmu_devices[device] == NULL) { clear_bit_unlock(device, &pmu_lock); pdev = ERR_PTR(-ENODEV); } else { pdev = pmu_devices[device]; } return pdev; }
/** * usb_serial_generic_write_start - start writing buffered data * @port: usb-serial port * @mem_flags: flags to use for memory allocations * * Serialised using USB_SERIAL_WRITE_BUSY flag. * * Return: Zero on success or if busy, otherwise a negative errno value. */ int usb_serial_generic_write_start(struct usb_serial_port *port, gfp_t mem_flags) { struct urb *urb; int count, result; unsigned long flags; int i; if (test_and_set_bit_lock(USB_SERIAL_WRITE_BUSY, &port->flags)) return 0; retry: spin_lock_irqsave(&port->lock, flags); if (!port->write_urbs_free || !kfifo_len(&port->write_fifo)) { clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); spin_unlock_irqrestore(&port->lock, flags); return 0; } i = (int)find_first_bit(&port->write_urbs_free, ARRAY_SIZE(port->write_urbs)); spin_unlock_irqrestore(&port->lock, flags); urb = port->write_urbs[i]; count = port->serial->type->prepare_write_buffer(port, urb->transfer_buffer, port->bulk_out_size); urb->transfer_buffer_length = count; usb_serial_debug_data(&port->dev, __func__, count, urb->transfer_buffer); spin_lock_irqsave(&port->lock, flags); port->tx_bytes += count; spin_unlock_irqrestore(&port->lock, flags); clear_bit(i, &port->write_urbs_free); result = usb_submit_urb(urb, mem_flags); if (result) { dev_err_console(port, "%s - error submitting urb: %d\n", __func__, result); set_bit(i, &port->write_urbs_free); spin_lock_irqsave(&port->lock, flags); port->tx_bytes -= count; spin_unlock_irqrestore(&port->lock, flags); clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); return result; } goto retry; /* try sending off another urb */ }
/* Change the current ring parameters, stopping the controller if * necessary so that we don't mess things up while we're in motion. */ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals) { struct gfar_private *priv = netdev_priv(dev); int err = 0, i; if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE) return -EINVAL; if (!is_power_of_2(rvals->rx_pending)) { netdev_err(dev, "Ring sizes must be a power of 2\n"); return -EINVAL; } if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE) return -EINVAL; if (!is_power_of_2(rvals->tx_pending)) { netdev_err(dev, "Ring sizes must be a power of 2\n"); return -EINVAL; } while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) cpu_relax(); if (dev->flags & IFF_UP) stop_gfar(dev); /* Change the sizes */ for (i = 0; i < priv->num_rx_queues; i++) priv->rx_queue[i]->rx_ring_size = rvals->rx_pending; for (i = 0; i < priv->num_tx_queues; i++) priv->tx_queue[i]->tx_ring_size = rvals->tx_pending; /* Rebuild the rings with the new size */ if (dev->flags & IFF_UP) err = startup_gfar(dev); clear_bit_unlock(GFAR_RESETTING, &priv->state); return err; }
/* tipc_bearer_reset_all - reset all links on all bearers */ void tipc_bearer_reset_all(struct net *net) { struct tipc_bearer *b; int i; for (i = 0; i < MAX_BEARERS; i++) { b = bearer_get(net, i); if (b) clear_bit_unlock(0, &b->up); } for (i = 0; i < MAX_BEARERS; i++) { b = bearer_get(net, i); if (b) tipc_reset_bearer(net, b); } for (i = 0; i < MAX_BEARERS; i++) { b = bearer_get(net, i); if (b) test_and_set_bit_lock(0, &b->up); } }
static int escvp_get_rdlen(struct escvp_port *vport) { struct usb_device *dev = vport->port->serial->dev; struct usb_ctrlrequest *dr = vport->dr; unsigned char *buffer = vport->ctrl_buf; int ret; if(test_and_set_bit_lock(ESCVP_FLAG_CTRL_BUSY, &vport->flags)) return -EBUSY; dr->bRequestType = ESCVP_RTYPE; dr->bRequest = ESCVP_GET_RDATALEN_REQ; dr->wValue = cpu_to_le16(0x00); dr->wIndex = cpu_to_le16(0x00); dr->wLength = cpu_to_le16(0x02); usb_fill_control_urb(vport->control_urb, dev, usb_rcvctrlpipe(dev, ENDPOINT), (unsigned char *)dr, buffer, 2, escvp_get_rdlen_callback,vport); vport->control_urb->transfer_buffer_length = 2; ret = 1; // usb_submit_urb(vport->control_urb, GFP_ATOMIC); if (ret) clear_bit_unlock(ESCVP_FLAG_CTRL_BUSY, &vport->flags); return ret; }
/** * blk_queue_start_tag - find a free tag and assign it * @q: the request queue for the device * @rq: the block request that needs tagging * * Description: * This can either be used as a stand-alone helper, or possibly be * assigned as the queue &prep_rq_fn (in which case &struct request * automagically gets a tag assigned). Note that this function * assumes that any type of request can be queued! if this is not * true for your device, you must check the request type before * calling this function. The request will also be removed from * the request queue, so it's the drivers responsibility to readd * it if it should need to be restarted for some reason. * * Notes: * queue lock must be held. **/ int blk_queue_start_tag(struct request_queue *q, struct request *rq) { struct blk_queue_tag *bqt = q->queue_tags; int tag; if (unlikely((rq->cmd_flags & REQ_QUEUED))) { printk(KERN_ERR "%s: request %p for device [%s] already tagged %d", __func__, rq, rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); BUG(); } /* * Protect against shared tag maps, as we may not have exclusive * access to the tag map. */ do { tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth); if (tag >= bqt->max_depth) return 1; } while (test_and_set_bit_lock(tag, bqt->tag_map)); /* * We need lock ordering semantics given by test_and_set_bit_lock. * See blk_queue_end_tag for details. */ rq->cmd_flags |= REQ_QUEUED; rq->tag = tag; bqt->tag_index[tag] = rq; blkdev_dequeue_request(rq); list_add(&rq->queuelist, &q->tag_busy_list); bqt->busy++; return 0; }
int reserve_pmu(enum arm_pmu_type type) { return test_and_set_bit_lock(type, pmu_lock) ? -EBUSY : 0; }
/** * tipc_enable_bearer - enable bearer with the given name */ static int tipc_enable_bearer(struct net *net, const char *name, u32 disc_domain, u32 priority, struct nlattr *attr[]) { struct tipc_net *tn = net_generic(net, tipc_net_id); struct tipc_bearer *b; struct tipc_media *m; struct tipc_bearer_names b_names; struct sk_buff *skb; char addr_string[16]; u32 bearer_id; u32 with_this_prio; u32 i; int res = -EINVAL; if (!tn->own_addr) { pr_warn("Bearer <%s> rejected, not supported in standalone mode\n", name); return -ENOPROTOOPT; } if (!bearer_name_validate(name, &b_names)) { pr_warn("Bearer <%s> rejected, illegal name\n", name); return -EINVAL; } if (tipc_addr_domain_valid(disc_domain) && (disc_domain != tn->own_addr)) { if (tipc_in_scope(disc_domain, tn->own_addr)) { disc_domain = tn->own_addr & TIPC_ZONE_CLUSTER_MASK; res = 0; /* accept any node in own cluster */ } else if (in_own_cluster_exact(net, disc_domain)) res = 0; /* accept specified node in own cluster */ } if (res) { pr_warn("Bearer <%s> rejected, illegal discovery domain\n", name); return -EINVAL; } if ((priority > TIPC_MAX_LINK_PRI) && (priority != TIPC_MEDIA_LINK_PRI)) { pr_warn("Bearer <%s> rejected, illegal priority\n", name); return -EINVAL; } m = tipc_media_find(b_names.media_name); if (!m) { pr_warn("Bearer <%s> rejected, media <%s> not registered\n", name, b_names.media_name); return -EINVAL; } if (priority == TIPC_MEDIA_LINK_PRI) priority = m->priority; restart: bearer_id = MAX_BEARERS; with_this_prio = 1; for (i = MAX_BEARERS; i-- != 0; ) { b = rtnl_dereference(tn->bearer_list[i]); if (!b) { bearer_id = i; continue; } if (!strcmp(name, b->name)) { pr_warn("Bearer <%s> rejected, already enabled\n", name); return -EINVAL; } if ((b->priority == priority) && (++with_this_prio > 2)) { if (priority-- == 0) { pr_warn("Bearer <%s> rejected, duplicate priority\n", name); return -EINVAL; } pr_warn("Bearer <%s> priority adjustment required %u->%u\n", name, priority + 1, priority); goto restart; } } if (bearer_id >= MAX_BEARERS) { pr_warn("Bearer <%s> rejected, bearer limit reached (%u)\n", name, MAX_BEARERS); return -EINVAL; } b = kzalloc(sizeof(*b), GFP_ATOMIC); if (!b) return -ENOMEM; strcpy(b->name, name); b->media = m; res = m->enable_media(net, b, attr); if (res) { pr_warn("Bearer <%s> rejected, enable failure (%d)\n", name, -res); return -EINVAL; } b->identity = bearer_id; b->tolerance = m->tolerance; b->window = m->window; b->domain = disc_domain; b->net_plane = bearer_id + 'A'; b->priority = priority; test_and_set_bit_lock(0, &b->up); res = tipc_disc_create(net, b, &b->bcast_addr, &skb); if (res) { bearer_disable(net, b); pr_warn("Bearer <%s> rejected, discovery object creation failed\n", name); return -EINVAL; } rcu_assign_pointer(tn->bearer_list[bearer_id], b); if (skb) tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr); if (tipc_mon_create(net, bearer_id)) return -ENOMEM; pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n", name, tipc_addr_string_fill(addr_string, disc_domain), priority); return res; }
/* * Probe all of a vlserver's addresses to find out the best route and to * query its capabilities. */ static bool afs_do_probe_vlserver(struct afs_net *net, struct afs_vlserver *server, struct key *key, unsigned int server_index, struct afs_error *_e) { struct afs_addr_cursor ac = { .index = 0, }; struct afs_call *call; bool in_progress = false; _enter("%s", server->name); read_lock(&server->lock); ac.alist = rcu_dereference_protected(server->addresses, lockdep_is_held(&server->lock)); read_unlock(&server->lock); atomic_set(&server->probe_outstanding, ac.alist->nr_addrs); memset(&server->probe, 0, sizeof(server->probe)); server->probe.rtt = UINT_MAX; for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) { call = afs_vl_get_capabilities(net, &ac, key, server, server_index); if (!IS_ERR(call)) { afs_put_call(call); in_progress = true; } else { afs_prioritise_error(_e, PTR_ERR(call), ac.abort_code); } } if (!in_progress) afs_vl_probe_done(server); return in_progress; } /* * Send off probes to all unprobed servers. */ int afs_send_vl_probes(struct afs_net *net, struct key *key, struct afs_vlserver_list *vllist) { struct afs_vlserver *server; struct afs_error e; bool in_progress = false; int i; e.error = 0; e.responded = false; for (i = 0; i < vllist->nr_servers; i++) { server = vllist->servers[i].server; if (test_bit(AFS_VLSERVER_FL_PROBED, &server->flags)) continue; if (!test_and_set_bit_lock(AFS_VLSERVER_FL_PROBING, &server->flags) && afs_do_probe_vlserver(net, server, key, i, &e)) in_progress = true; } return in_progress ? 0 : e.error; } /* * Wait for the first as-yet untried server to respond. */ int afs_wait_for_vl_probes(struct afs_vlserver_list *vllist, unsigned long untried) { struct wait_queue_entry *waits; struct afs_vlserver *server; unsigned int rtt = UINT_MAX; bool have_responders = false; int pref = -1, i; _enter("%u,%lx", vllist->nr_servers, untried); /* Only wait for servers that have a probe outstanding. */ for (i = 0; i < vllist->nr_servers; i++) { if (test_bit(i, &untried)) { server = vllist->servers[i].server; if (!test_bit(AFS_VLSERVER_FL_PROBING, &server->flags)) __clear_bit(i, &untried); if (server->probe.responded) have_responders = true; } } if (have_responders || !untried) return 0; waits = kmalloc(array_size(vllist->nr_servers, sizeof(*waits)), GFP_KERNEL); if (!waits) return -ENOMEM; for (i = 0; i < vllist->nr_servers; i++) { if (test_bit(i, &untried)) { server = vllist->servers[i].server; init_waitqueue_entry(&waits[i], current); add_wait_queue(&server->probe_wq, &waits[i]); } } for (;;) { bool still_probing = false; set_current_state(TASK_INTERRUPTIBLE); for (i = 0; i < vllist->nr_servers; i++) { if (test_bit(i, &untried)) { server = vllist->servers[i].server; if (server->probe.responded) goto stop; if (test_bit(AFS_VLSERVER_FL_PROBING, &server->flags)) still_probing = true; } } if (!still_probing || signal_pending(current)) goto stop; schedule(); } stop: set_current_state(TASK_RUNNING); for (i = 0; i < vllist->nr_servers; i++) { if (test_bit(i, &untried)) { server = vllist->servers[i].server; if (server->probe.responded && server->probe.rtt < rtt) { pref = i; rtt = server->probe.rtt; } remove_wait_queue(&server->probe_wq, &waits[i]); } } kfree(waits); if (pref == -1 && signal_pending(current)) return -ERESTARTSYS; if (pref >= 0) vllist->preferred = pref; _leave(" = 0 [%u]", pref); return 0; }
/** * tipc_enable_bearer - enable bearer with the given name */ static int tipc_enable_bearer(struct net *net, const char *name, u32 disc_domain, u32 prio, struct nlattr *attr[]) { struct tipc_net *tn = tipc_net(net); struct tipc_bearer_names b_names; int with_this_prio = 1; struct tipc_bearer *b; struct tipc_media *m; struct sk_buff *skb; int bearer_id = 0; int res = -EINVAL; char *errstr = ""; if (!bearer_name_validate(name, &b_names)) { errstr = "illegal name"; goto rejected; } if (prio > TIPC_MAX_LINK_PRI && prio != TIPC_MEDIA_LINK_PRI) { errstr = "illegal priority"; goto rejected; } m = tipc_media_find(b_names.media_name); if (!m) { errstr = "media not registered"; goto rejected; } if (prio == TIPC_MEDIA_LINK_PRI) prio = m->priority; /* Check new bearer vs existing ones and find free bearer id if any */ while (bearer_id < MAX_BEARERS) { b = rtnl_dereference(tn->bearer_list[bearer_id]); if (!b) break; if (!strcmp(name, b->name)) { errstr = "already enabled"; goto rejected; } bearer_id++; if (b->priority != prio) continue; if (++with_this_prio <= 2) continue; pr_warn("Bearer <%s>: already 2 bearers with priority %u\n", name, prio); if (prio == TIPC_MIN_LINK_PRI) { errstr = "cannot adjust to lower"; goto rejected; } pr_warn("Bearer <%s>: trying with adjusted priority\n", name); prio--; bearer_id = 0; with_this_prio = 1; } if (bearer_id >= MAX_BEARERS) { errstr = "max 3 bearers permitted"; goto rejected; } b = kzalloc(sizeof(*b), GFP_ATOMIC); if (!b) return -ENOMEM; strcpy(b->name, name); b->media = m; res = m->enable_media(net, b, attr); if (res) { kfree(b); errstr = "failed to enable media"; goto rejected; } b->identity = bearer_id; b->tolerance = m->tolerance; b->window = m->window; b->domain = disc_domain; b->net_plane = bearer_id + 'A'; b->priority = prio; test_and_set_bit_lock(0, &b->up); res = tipc_disc_create(net, b, &b->bcast_addr, &skb); if (res) { bearer_disable(net, b); kfree(b); errstr = "failed to create discoverer"; goto rejected; } rcu_assign_pointer(tn->bearer_list[bearer_id], b); if (skb) tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr); if (tipc_mon_create(net, bearer_id)) { bearer_disable(net, b); return -ENOMEM; } pr_info("Enabled bearer <%s>, priority %u\n", name, prio); return res; rejected: pr_warn("Enabling of bearer <%s> rejected, %s\n", name, errstr); return res; }
/* Change the coalescing values. * Both cvals->*_usecs and cvals->*_frames have to be > 0 * in order for coalescing to be active */ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) { struct gfar_private *priv = netdev_priv(dev); int i, err = 0; if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) return -EOPNOTSUPP; if (NULL == priv->phydev) return -ENODEV; /* Check the bounds of the values */ if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) { netdev_info(dev, "Coalescing is limited to %d microseconds\n", GFAR_MAX_COAL_USECS); return -EINVAL; } if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) { netdev_info(dev, "Coalescing is limited to %d frames\n", GFAR_MAX_COAL_FRAMES); return -EINVAL; } /* Check the bounds of the values */ if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) { netdev_info(dev, "Coalescing is limited to %d microseconds\n", GFAR_MAX_COAL_USECS); return -EINVAL; } if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) { netdev_info(dev, "Coalescing is limited to %d frames\n", GFAR_MAX_COAL_FRAMES); return -EINVAL; } while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) cpu_relax(); /* Set up rx coalescing */ if ((cvals->rx_coalesce_usecs == 0) || (cvals->rx_max_coalesced_frames == 0)) { for (i = 0; i < priv->num_rx_queues; i++) priv->rx_queue[i]->rxcoalescing = 0; } else { for (i = 0; i < priv->num_rx_queues; i++) priv->rx_queue[i]->rxcoalescing = 1; } for (i = 0; i < priv->num_rx_queues; i++) { priv->rx_queue[i]->rxic = mk_ic_value( cvals->rx_max_coalesced_frames, gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs)); } /* Set up tx coalescing */ if ((cvals->tx_coalesce_usecs == 0) || (cvals->tx_max_coalesced_frames == 0)) { for (i = 0; i < priv->num_tx_queues; i++) priv->tx_queue[i]->txcoalescing = 0; } else { for (i = 0; i < priv->num_tx_queues; i++) priv->tx_queue[i]->txcoalescing = 1; } for (i = 0; i < priv->num_tx_queues; i++) { priv->tx_queue[i]->txic = mk_ic_value( cvals->tx_max_coalesced_frames, gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); } if (dev->flags & IFF_UP) { stop_gfar(dev); err = startup_gfar(dev); } else { gfar_mac_reset(priv); } clear_bit_unlock(GFAR_RESETTING, &priv->state); return err; }