/** * usb_serial_generic_write_start - kick off an URB write * @port: Pointer to the &struct usb_serial_port data * * Returns zero on success, or a negative errno value */ static int usb_serial_generic_write_start(struct usb_serial_port *port) { struct urb *urb; int count, result; unsigned long flags; int i; if (test_and_set_bit_lock(USB_SERIAL_WRITE_BUSY, &port->flags)) return 0; retry: spin_lock_irqsave(&port->lock, flags); if (!port->write_urbs_free || !kfifo_len(&port->write_fifo)) { clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); spin_unlock_irqrestore(&port->lock, flags); return 0; } i = (int)find_first_bit(&port->write_urbs_free, ARRAY_SIZE(port->write_urbs)); spin_unlock_irqrestore(&port->lock, flags); urb = port->write_urbs[i]; count = port->serial->type->prepare_write_buffer(port, urb->transfer_buffer, port->bulk_out_size); urb->transfer_buffer_length = count; usb_serial_debug_data(debug, &port->dev, __func__, count, urb->transfer_buffer); spin_lock_irqsave(&port->lock, flags); port->tx_bytes += count; spin_unlock_irqrestore(&port->lock, flags); clear_bit(i, &port->write_urbs_free); result = usb_submit_urb(urb, GFP_ATOMIC); if (result) { dev_err(&port->dev, "%s - error submitting urb: %d\n", __func__, result); set_bit(i, &port->write_urbs_free); spin_lock_irqsave(&port->lock, flags); port->tx_bytes -= count; spin_unlock_irqrestore(&port->lock, flags); clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); return result; } /* Try sending off another urb, unless in irq context (in which case * there will be no free urb). */ if (!in_irq()) goto retry; clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); return 0; }
/** * unlock_page - unlock a locked page * @page: the page * * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). * Also wakes sleepers in wait_on_page_writeback() because the wakeup * mechanism between PageLocked pages and PageWriteback pages is shared. * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. * * The mb is necessary to enforce ordering between the clear_bit and the read * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()). */ void unlock_page(struct page *page) { VM_BUG_ON_PAGE(!PageLocked(page), page); clear_bit_unlock(PG_locked, &page->flags); smp_mb__after_atomic(); wake_up_page(page, PG_locked); }
static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg, __u16 *val) { struct usb_device *dev = mcs->port->serial->dev; struct usb_ctrlrequest *dr = mcs->dr; unsigned char *buffer = mcs->ctrl_buf; int ret; if (test_and_set_bit_lock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags)) return -EBUSY; dr->bRequestType = MCS_RD_RTYPE; dr->bRequest = MCS_RDREQ; dr->wValue = cpu_to_le16(Wval); /* 0 */ dr->wIndex = cpu_to_le16(reg); dr->wLength = cpu_to_le16(2); usb_fill_control_urb(mcs->control_urb, dev, usb_rcvctrlpipe(dev, 0), (unsigned char *)dr, buffer, 2, mos7840_control_callback, mcs); mcs->control_urb->transfer_buffer_length = 2; ret = usb_submit_urb(mcs->control_urb, GFP_ATOMIC); if (ret) clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags); return ret; }
/** * i40e_ptp_tx_hwtstamp - Utility function which returns the Tx timestamp * @pf: Board private structure * * Read the value of the Tx timestamp from the registers, convert it into a * value consumable by the stack, and store that result into the shhwtstamps * struct before returning it up the stack. **/ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf) { struct skb_shared_hwtstamps shhwtstamps; struct i40e_hw *hw = &pf->hw; u32 hi, lo; u64 ns; if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx) return; /* don't attempt to timestamp if we don't have an skb */ if (!pf->ptp_tx_skb) return; lo = rd32(hw, I40E_PRTTSYN_TXTIME_L); hi = rd32(hw, I40E_PRTTSYN_TXTIME_H); ns = (((u64)hi) << 32) | lo; i40e_ptp_convert_to_hwtstamp(&shhwtstamps, ns); skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps); dev_kfree_skb_any(pf->ptp_tx_skb); pf->ptp_tx_skb = NULL; clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state); }
static void put_tag(struct nullb_queue *nq, unsigned int tag) { clear_bit_unlock(tag, nq->tag_map); if (waitqueue_active(&nq->wait)) wake_up(&nq->wait); }
static void escvp_get_rdlen_callback(struct urb *urb) { __u16 *data; __u16 rdlen = 0x0000; int len; struct escvp_port *vport; struct device *dev = &urb->dev->dev; int status = urb->status; vport = urb->context; switch (status) { case 0: break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: dev_dbg(dev, "%s - urb shutting down with status : %d\n", __func__, status); goto out; default: dev_dbg(dev, "%s - nonzero urb status received: %dn", __func__, status); goto out; } data = urb->transfer_buffer; rdlen = data[0]; len = (int)rdlen; out: clear_bit_unlock(ESCVP_FLAG_CTRL_BUSY, &vport->flags); if(rdlen != 0x0000){ escvp_get_rd(vport,rdlen); } }
/** * blk_queue_end_tag - end tag operations for a request * @q: the request queue for the device * @rq: the request that has completed * * Description: * Typically called when end_that_request_first() returns %0, meaning * all transfers have been done for a request. It's important to call * this function before end_that_request_last(), as that will put the * request back on the free list thus corrupting the internal tag list. * * Notes: * queue lock must be held. **/ void blk_queue_end_tag(struct request_queue *q, struct request *rq) { struct blk_queue_tag *bqt = q->queue_tags; int tag = rq->tag; BUG_ON(tag == -1); if (unlikely(tag >= bqt->real_max_depth)) /* * This can happen after tag depth has been reduced. * FIXME: how about a warning or info message here? */ return; list_del_init(&rq->queuelist); rq->cmd_flags &= ~REQ_QUEUED; rq->tag = -1; if (unlikely(bqt->tag_index[tag] == NULL)) printk(KERN_ERR "%s: tag %d is missing\n", __func__, tag); bqt->tag_index[tag] = NULL; if (unlikely(!test_bit(tag, bqt->tag_map))) { printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", __func__, tag); return; } /* * The tag_map bit acts as a lock for tag_index[bit], so we need * unlock memory barrier semantics. */ clear_bit_unlock(tag, bqt->tag_map); }
int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; unsigned long *bitlock = &NFS_I(inode)->flags; struct nfs_pageio_descriptor pgio; int err; /* Stop dirtying of new pages while we sync */ err = wait_on_bit_lock(bitlock, NFS_INO_FLUSHING, nfs_wait_bit_killable, TASK_KILLABLE); if (err) goto out_err; nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); nfs_pageio_init_write(&pgio, inode, wb_priority(wbc)); err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio); nfs_pageio_complete(&pgio); clear_bit_unlock(NFS_INO_FLUSHING, bitlock); smp_mb__after_clear_bit(); wake_up_bit(bitlock, NFS_INO_FLUSHING); if (err < 0) goto out_err; err = pgio.pg_error; if (err < 0) goto out_err; return 0; out_err: return err; }
/** * i40e_ptp_tx_hang - Detect error case when Tx timestamp register is hung * @pf: The PF private data structure * * This watchdog task is run periodically to make sure that we clear the Tx * timestamp logic if we don't obtain a timestamp in a reasonable amount of * time. It is unexpected in the normal case but if it occurs it results in * permanently preventing timestamps of future packets. **/ void i40e_ptp_tx_hang(struct i40e_pf *pf) { struct sk_buff *skb; if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx) return; /* Nothing to do if we're not already waiting for a timestamp */ if (!test_bit(__I40E_PTP_TX_IN_PROGRESS, pf->state)) return; /* We already have a handler routine which is run when we are notified * of a Tx timestamp in the hardware. If we don't get an interrupt * within a second it is reasonable to assume that we never will. */ if (time_is_before_jiffies(pf->ptp_tx_start + HZ)) { skb = pf->ptp_tx_skb; pf->ptp_tx_skb = NULL; clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); /* Free the skb after we clear the bitlock */ dev_kfree_skb_any(skb); pf->tx_hwtstamp_timeouts++; } }
/** * igb_ptp_tx_work * @work: pointer to work struct * * This work function polls the TSYNCTXCTL valid bit to determine when a * timestamp has been taken for the current stored skb. **/ static void igb_ptp_tx_work(struct work_struct *work) { struct igb_adapter *adapter = container_of(work, struct igb_adapter, ptp_tx_work); struct e1000_hw *hw = &adapter->hw; u32 tsynctxctl; if (!adapter->ptp_tx_skb) return; if (time_is_before_jiffies(adapter->ptp_tx_start + IGB_PTP_TX_TIMEOUT)) { dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); adapter->tx_hwtstamp_timeouts++; dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n"); return; } tsynctxctl = rd32(E1000_TSYNCTXCTL); if (tsynctxctl & E1000_TSYNCTXCTL_VALID) igb_ptp_tx_hwtstamp(adapter); else /* reschedule to check later */ schedule_work(&adapter->ptp_tx_work); }
/** * igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp * @adapter: Board private structure. * * If we were asked to do hardware stamping and such a time stamp is * available, then it must have been for this skb here because we only * allow only one such packet into the queue. **/ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct skb_shared_hwtstamps shhwtstamps; u64 regval; int adjust = 0; regval = rd32(E1000_TXSTMPL); regval |= (u64)rd32(E1000_TXSTMPH) << 32; igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); /* adjust timestamp for the TX latency based on link speed */ if (adapter->hw.mac.type == e1000_i210) { switch (adapter->link_speed) { case SPEED_10: adjust = IGB_I210_TX_LATENCY_10; break; case SPEED_100: adjust = IGB_I210_TX_LATENCY_100; break; case SPEED_1000: adjust = IGB_I210_TX_LATENCY_1000; break; } } shhwtstamps.hwtstamp = ktime_sub_ns(shhwtstamps.hwtstamp, adjust); skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); }
/** * igb_ptp_stop - Disable PTP device and stop the overflow check. * @adapter: Board private structure. * * This function stops the PTP support and cancels the delayed work. **/ void igb_ptp_stop(struct igb_adapter *adapter) { switch (adapter->hw.mac.type) { case e1000_82576: case e1000_82580: case e1000_i354: case e1000_i350: cancel_delayed_work_sync(&adapter->ptp_overflow_work); break; case e1000_i210: case e1000_i211: /* No delayed work to cancel. */ break; default: return; } cancel_work_sync(&adapter->ptp_tx_work); if (adapter->ptp_tx_skb) { dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); } if (adapter->ptp_clock) { ptp_clock_unregister(adapter->ptp_clock); dev_info(&adapter->pdev->dev, "removed PHC on %s\n", adapter->netdev->name); adapter->flags &= ~IGB_FLAG_PTP; } }
/** * i40e_ptp_tx_hwtstamp - Utility function which returns the Tx timestamp * @pf: Board private structure * * Read the value of the Tx timestamp from the registers, convert it into a * value consumable by the stack, and store that result into the shhwtstamps * struct before returning it up the stack. **/ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf) { struct skb_shared_hwtstamps shhwtstamps; struct sk_buff *skb = pf->ptp_tx_skb; struct i40e_hw *hw = &pf->hw; u32 hi, lo; u64 ns; if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx) return; /* don't attempt to timestamp if we don't have an skb */ if (!pf->ptp_tx_skb) return; lo = rd32(hw, I40E_PRTTSYN_TXTIME_L); hi = rd32(hw, I40E_PRTTSYN_TXTIME_H); ns = (((u64)hi) << 32) | lo; i40e_ptp_convert_to_hwtstamp(&shhwtstamps, ns); /* Clear the bit lock as soon as possible after reading the register, * and prior to notifying the stack via skb_tstamp_tx(). Otherwise * applications might wake up and attempt to request another transmit * timestamp prior to the bit lock being cleared. */ pf->ptp_tx_skb = NULL; clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); /* Notify the stack and free the skb after we've unlocked */ skb_tstamp_tx(skb, &shhwtstamps); dev_kfree_skb_any(skb); }
/** * blk_queue_end_tag - end tag operations for a request * @q: the request queue for the device * @rq: the request that has completed * * Description: * Typically called when end_that_request_first() returns %0, meaning * all transfers have been done for a request. It's important to call * this function before end_that_request_last(), as that will put the * request back on the free list thus corrupting the internal tag list. * * Notes: * queue lock must be held. **/ void blk_queue_end_tag(struct request_queue *q, struct request *rq) { struct blk_queue_tag *bqt = q->queue_tags; unsigned tag = rq->tag; /* negative tags invalid */ BUG_ON(tag >= bqt->real_max_depth); list_del_init(&rq->queuelist); rq->cmd_flags &= ~REQ_QUEUED; rq->tag = -1; if (unlikely(bqt->tag_index[tag] == NULL)) printk(KERN_ERR "%s: tag %d is missing\n", __func__, tag); bqt->tag_index[tag] = NULL; if (unlikely(!test_bit(tag, bqt->tag_map))) { printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", __func__, tag); return; } /* * The tag_map bit acts as a lock for tag_index[bit], so we need * unlock memory barrier semantics. */ clear_bit_unlock(tag, bqt->tag_map); }
int gfar_set_features(struct net_device *dev, netdev_features_t features) { netdev_features_t changed = dev->features ^ features; struct gfar_private *priv = netdev_priv(dev); int err = 0; if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM))) return 0; while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) cpu_relax(); dev->features = features; if (dev->flags & IFF_UP) { /* Now we take down the rings to rebuild them */ stop_gfar(dev); err = startup_gfar(dev); } else { gfar_mac_reset(priv); } clear_bit_unlock(GFAR_RESETTING, &priv->state); return err; }
/** * usb_serial_generic_write_start - start writing buffered data * @port: usb-serial port * @mem_flags: flags to use for memory allocations * * Serialised using USB_SERIAL_WRITE_BUSY flag. * * Return: Zero on success or if busy, otherwise a negative errno value. */ int usb_serial_generic_write_start(struct usb_serial_port *port, gfp_t mem_flags) { struct urb *urb; int count, result; unsigned long flags; int i; if (test_and_set_bit_lock(USB_SERIAL_WRITE_BUSY, &port->flags)) return 0; retry: spin_lock_irqsave(&port->lock, flags); if (!port->write_urbs_free || !kfifo_len(&port->write_fifo)) { clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); spin_unlock_irqrestore(&port->lock, flags); return 0; } i = (int)find_first_bit(&port->write_urbs_free, ARRAY_SIZE(port->write_urbs)); spin_unlock_irqrestore(&port->lock, flags); urb = port->write_urbs[i]; count = port->serial->type->prepare_write_buffer(port, urb->transfer_buffer, port->bulk_out_size); urb->transfer_buffer_length = count; usb_serial_debug_data(&port->dev, __func__, count, urb->transfer_buffer); spin_lock_irqsave(&port->lock, flags); port->tx_bytes += count; spin_unlock_irqrestore(&port->lock, flags); clear_bit(i, &port->write_urbs_free); result = usb_submit_urb(urb, mem_flags); if (result) { dev_err_console(port, "%s - error submitting urb: %d\n", __func__, result); set_bit(i, &port->write_urbs_free); spin_lock_irqsave(&port->lock, flags); port->tx_bytes -= count; spin_unlock_irqrestore(&port->lock, flags); clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); return result; } goto retry; /* try sending off another urb */ }
int release_pmu(struct platform_device *pdev) { if (WARN_ON(pdev != pmu_devices[pdev->id])) return -EINVAL; clear_bit_unlock(pdev->id, &pmu_lock); return 0; }
static inline void ide_unlock_host(struct ide_host *host) { if (host->host_flags & IDE_HFLAG_SERIALIZE) { /* for atari only */ ide_release_lock(); clear_bit_unlock(IDE_HOST_BUSY, &host->host_busy); } }
int release_pmu(enum arm_pmu_type type) { if (WARN_ON(!pmu_devices[type])) return -EINVAL; clear_bit_unlock(type, &pmu_lock); return 0; }
static void gfs2_clear_glop_pending(struct gfs2_inode *ip) { if (!ip) return; clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags); wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING); }
static bool afs_vl_probe_done(struct afs_vlserver *server) { if (!atomic_dec_and_test(&server->probe_outstanding)) return false; wake_up_var(&server->probe_outstanding); clear_bit_unlock(AFS_VLSERVER_FL_PROBING, &server->flags); wake_up_bit(&server->flags, AFS_VLSERVER_FL_PROBING); return true; }
/* * When raise() is called it will be passed a pointer to the * backtrace_mask. Architectures that call nmi_cpu_backtrace() * directly from their raise() functions may rely on the mask * they are passed being updated as a side effect of this call. */ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self, void (*raise)(cpumask_t *mask)) { int i, this_cpu = get_cpu(); if (test_and_set_bit(0, &backtrace_flag)) { /* * If there is already a trigger_all_cpu_backtrace() in progress * (backtrace_flag == 1), don't output double cpu dump infos. */ put_cpu(); return; } cpumask_copy(to_cpumask(backtrace_mask), mask); if (exclude_self) cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask)); /* * Don't try to send an NMI to this cpu; it may work on some * architectures, but on others it may not, and we'll get * information at least as useful just by doing a dump_stack() here. * Note that nmi_cpu_backtrace(NULL) will clear the cpu bit. */ if (cpumask_test_cpu(this_cpu, to_cpumask(backtrace_mask))) nmi_cpu_backtrace(NULL); if (!cpumask_empty(to_cpumask(backtrace_mask))) { pr_info("Sending NMI from CPU %d to CPUs %*pbl:\n", this_cpu, nr_cpumask_bits, to_cpumask(backtrace_mask)); raise(to_cpumask(backtrace_mask)); } /* Wait for up to 10 seconds for all CPUs to do the backtrace */ for (i = 0; i < 10 * 1000; i++) { if (cpumask_empty(to_cpumask(backtrace_mask))) break; mdelay(1); touch_softlockup_watchdog(); } /* * Force flush any remote buffers that might be stuck in IRQ context * and therefore could not run their irq_work. */ printk_nmi_flush(); clear_bit_unlock(0, &backtrace_flag); put_cpu(); }
/** * igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp * @adapter: Board private structure. * * If we were asked to do hardware stamping and such a time stamp is * available, then it must have been for this skb here because we only * allow only one such packet into the queue. **/ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct skb_shared_hwtstamps shhwtstamps; u64 regval; regval = rd32(E1000_TXSTMPL); regval |= (u64)rd32(E1000_TXSTMPH) << 32; igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); }
/** * igb_ptp_suspend - Disable PTP work items and prepare for suspend * @adapter: Board private structure * * This function stops the overflow check work and PTP Tx timestamp work, and * will prepare the device for OS suspend. */ void igb_ptp_suspend(struct igb_adapter *adapter) { if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) return; if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) cancel_delayed_work_sync(&adapter->ptp_overflow_work); cancel_work_sync(&adapter->ptp_tx_work); if (adapter->ptp_tx_skb) { dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); } }
/** * bearer_disable * * Note: This routine assumes caller holds RTNL lock. */ static void bearer_disable(struct net *net, struct tipc_bearer *b) { struct tipc_net *tn = tipc_net(net); int bearer_id = b->identity; pr_info("Disabling bearer <%s>\n", b->name); clear_bit_unlock(0, &b->up); tipc_node_delete_links(net, bearer_id); b->media->disable_media(b); RCU_INIT_POINTER(b->media_ptr, NULL); if (b->disc) tipc_disc_delete(b->disc); RCU_INIT_POINTER(tn->bearer_list[bearer_id], NULL); kfree_rcu(b, rcu); tipc_mon_delete(net, bearer_id); }
struct platform_device * reserve_pmu(enum arm_pmu_type device) { struct platform_device *pdev; if (test_and_set_bit_lock(device, &pmu_lock)) { pdev = ERR_PTR(-EBUSY); } else if (pmu_devices[device] == NULL) { clear_bit_unlock(device, &pmu_lock); pdev = ERR_PTR(-ENODEV); } else { pdev = pmu_devices[device]; } return pdev; }
/** * i40e_ptp_tx_hwtstamp - Utility function which returns the Tx timestamp * @pf: Board private structure * * Read the value of the Tx timestamp from the registers, convert it into a * value consumable by the stack, and store that result into the shhwtstamps * struct before returning it up the stack. **/ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf) { struct skb_shared_hwtstamps shhwtstamps; struct i40e_hw *hw = &pf->hw; u32 hi, lo; u64 ns; lo = rd32(hw, I40E_PRTTSYN_TXTIME_L); hi = rd32(hw, I40E_PRTTSYN_TXTIME_H); ns = (((u64)hi) << 32) | lo; i40e_ptp_convert_to_hwtstamp(&shhwtstamps, ns); skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps); dev_kfree_skb_any(pf->ptp_tx_skb); pf->ptp_tx_skb = NULL; clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state); }
/* Change the current ring parameters, stopping the controller if * necessary so that we don't mess things up while we're in motion. */ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals) { struct gfar_private *priv = netdev_priv(dev); int err = 0, i; if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE) return -EINVAL; if (!is_power_of_2(rvals->rx_pending)) { netdev_err(dev, "Ring sizes must be a power of 2\n"); return -EINVAL; } if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE) return -EINVAL; if (!is_power_of_2(rvals->tx_pending)) { netdev_err(dev, "Ring sizes must be a power of 2\n"); return -EINVAL; } while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state)) cpu_relax(); if (dev->flags & IFF_UP) stop_gfar(dev); /* Change the sizes */ for (i = 0; i < priv->num_rx_queues; i++) priv->rx_queue[i]->rx_ring_size = rvals->rx_pending; for (i = 0; i < priv->num_tx_queues; i++) priv->tx_queue[i]->tx_ring_size = rvals->tx_pending; /* Rebuild the rings with the new size */ if (dev->flags & IFF_UP) err = startup_gfar(dev); clear_bit_unlock(GFAR_RESETTING, &priv->state); return err; }
/** * i40e_ptp_stop - Disable the driver/hardware support and unregister the PHC * @pf: Board private structure * * This function handles the cleanup work required from the initialization by * clearing out the important information and unregistering the PHC. **/ void i40e_ptp_stop(struct i40e_pf *pf) { pf->flags &= ~I40E_FLAG_PTP; pf->ptp_tx = false; pf->ptp_rx = false; if (pf->ptp_tx_skb) { dev_kfree_skb_any(pf->ptp_tx_skb); pf->ptp_tx_skb = NULL; clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state); } if (pf->ptp_clock) { ptp_clock_unregister(pf->ptp_clock); pf->ptp_clock = NULL; dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__, pf->vsi[pf->lan_vsi]->netdev->name); } }
/** * fscache_object_lookup_negative - Note negative cookie lookup * @object: Object pointing to cookie to mark * * Note negative lookup, permitting those waiting to read data from an already * existing backing object to continue as there's no data for them to read. */ void fscache_object_lookup_negative(struct fscache_object *object) { struct fscache_cookie *cookie = object->cookie; _enter("{OBJ%x,%s}", object->debug_id, object->state->name); if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) { fscache_stat(&fscache_n_object_lookups_negative); /* Allow write requests to begin stacking up and read requests to begin * returning ENODATA. */ set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); _debug("wake up lookup %p", &cookie->flags); clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags); wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP); } _leave(""); }