static base_jd_event_code kbase_fence_trigger(kbase_jd_atom *katom, int result) { struct sync_pt *pt; struct sync_timeline *timeline; if (!list_is_singular(&katom->fence->pt_list_head)) { /* Not exactly one item in the list - so it didn't (directly) come from us */ return BASE_JD_EVENT_JOB_CANCELLED; } pt = list_first_entry(&katom->fence->pt_list_head, struct sync_pt, pt_list); timeline = pt->parent; if (!kbase_sync_timeline_is_ours(timeline)) { /* Fence has a sync_pt which isn't ours! */ return BASE_JD_EVENT_JOB_CANCELLED; } kbase_sync_signal_pt(pt, result); sync_timeline_signal(timeline); return (result < 0) ? BASE_JD_EVENT_JOB_CANCELLED : BASE_JD_EVENT_DONE; }
/** @brief 更新LRU排序,降低淘汰率 */ void fss_dbd_lru_update(struct dbd * dbd) { struct list_head * next; /* 增加其访问频率 */ ke_atomic_inc(&dbd->access_counter); if ((ke_atomic_read(&dbd->access_counter) & FSS_DBD_LEVEL_EXCHANGE_MASK) == 0) { ke_spin_lock(&cache_desc()->sort_lock); /* 如果只有一个节点,无需交换 */ if (unlikely(list_is_singular(&cache_desc()->sort_list))) goto no_update; /* 与下一个DBD交换位置 */ next = dbd->list.next; list_del(&dbd->list); list_add(&dbd->list, next); no_update: ke_spin_unlock(&cache_desc()->sort_lock); } return; }
static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) { struct drm_device *dev = ppgtt->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct i915_address_space *vm = &ppgtt->base; if (ppgtt == dev_priv->mm.aliasing_ppgtt || (list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) { ppgtt->base.cleanup(&ppgtt->base); return; } /* * Make sure vmas are unbound before we take down the drm_mm * * FIXME: Proper refcounting should take care of this, this shouldn't be * needed at all. */ if (!list_empty(&vm->active_list)) { struct i915_vma *vma; list_for_each_entry(vma, &vm->active_list, mm_list) if (WARN_ON(list_empty(&vma->vma_link) || list_is_singular(&vma->vma_link))) break; i915_gem_evict_vm(&ppgtt->base, true); } else {
static int unix_sock_modesw(struct globals *globals, struct alfred_modeswitch_v0 *modeswitch, int client_sock) { int len, ret = -1; len = ntohs(modeswitch->header.length); if (len < (int)(sizeof(*modeswitch) - sizeof(modeswitch->header))) goto err; switch (modeswitch->mode) { case ALFRED_MODESWITCH_SLAVE: if (!list_is_singular(&globals->interfaces)) goto err; globals->opmode = OPMODE_SLAVE; break; case ALFRED_MODESWITCH_MASTER: globals->opmode = OPMODE_MASTER; break; default: goto err; } ret = 0; err: close(client_sock); return ret; }
static int micvcons_open(struct tty_struct * tty, struct file * filp) { micvcons_port_t *port = &mic_data.dd_ports[tty->index]; int ret = 0; mic_ctx_t *mic_ctx = get_per_dev_ctx(tty->index); tty->driver_data = port; mutex_lock(&port->dp_mutex); spin_lock_bh(&port->dp_lock); if ((filp->f_flags & O_ACCMODE) != O_RDONLY) { if (port->dp_writer) { ret = -EBUSY; goto exit_locked; } port->dp_writer = filp; port->dp_bytes = 0; } if ((filp->f_flags & O_ACCMODE) != O_WRONLY) { if (port->dp_reader) { ret = -EBUSY; goto exit_locked; } port->dp_reader = filp; port->dp_canread = 1; } #if LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) tty->low_latency = 0; #else tty->port->low_latency = 0; #endif if (!port->dp_tty) port->dp_tty = tty; if (!port->dp_vcons) port->dp_vcons = &mic_ctx->bi_vcons; if (tty->count == 1) { ret = micvcons_start(mic_ctx); if (ret != 0) goto exit_locked; spin_lock(&timer_list_lock); list_add_tail_rcu(&port->list_member, &timer_list_head); if (list_is_singular(&timer_list_head)) { restart_timer_flag = MICVCONS_TIMER_RESTART; mod_timer(&vcons_timer, jiffies + msecs_to_jiffies(MICVCONS_SHORT_TIMEOUT)); } spin_unlock(&timer_list_lock); } exit_locked: spin_unlock_bh(&port->dp_lock); mutex_unlock(&port->dp_mutex); return ret; }
static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode) { struct dentry *dentry; spin_lock(&inode->i_lock); /* Directory should have only one entry. */ BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry)); dentry = list_entry(inode->i_dentry.next, struct dentry, d_u.d_alias); spin_unlock(&inode->i_lock); return dentry; }
SYSCALL_DEFINE(sched_yield, void) { if (!list_is_singular(&pri_runq[current->prio])) { list_del(¤t->rq); list_add_tail(¤t->rq, &pri_runq[current->prio]); } schedule(); return 0; }
int v4l2_fh_is_singular(struct v4l2_fh *fh) { unsigned long flags; int is_singular; if (fh == NULL || fh->vdev == NULL) return 0; spin_lock_irqsave(&fh->vdev->fh_lock, flags); is_singular = list_is_singular(&fh->list); spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); return is_singular; }
static void cx18_mdl_send_to_dvb(struct cx18_stream *s, struct cx18_mdl *mdl) { struct cx18_buffer *buf; if (s->dvb == NULL || !s->dvb->enabled || mdl->bytesused == 0) return; /* We ignore mdl and buf readpos accounting here - it doesn't matter */ /* The likely case */ if (list_is_singular(&mdl->buf_list)) { buf = list_first_entry(&mdl->buf_list, struct cx18_buffer, list); if (buf->bytesused) dvb_dmx_swfilter(&s->dvb->demux, buf->buf, buf->bytesused); return; }
static void cx18_mdl_send_to_dvb(struct cx18_stream *s, struct cx18_mdl *mdl) { struct cx18_buffer *buf; if (s->dvb == NULL || !s->dvb->enabled || mdl->bytesused == 0) return; if (list_is_singular(&mdl->buf_list)) { buf = list_first_entry(&mdl->buf_list, struct cx18_buffer, list); if (buf->bytesused) dvb_dmx_swfilter(&s->dvb->demux, buf->buf, buf->bytesused); return; }
static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx) { struct at_xdmac_desc *desc = txd_to_at_desc(tx); struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan); dma_cookie_t cookie; unsigned long irqflags; spin_lock_irqsave(&atchan->lock, irqflags); cookie = dma_cookie_assign(tx); dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", __func__, atchan, desc); list_add_tail(&desc->xfer_node, &atchan->xfers_list); if (list_is_singular(&atchan->xfers_list)) at_xdmac_start_xfer(atchan, desc); spin_unlock_irqrestore(&atchan->lock, irqflags); return cookie; }
static int expand_single_string(struct token *tok, struct token *next, struct string **string_ret) { int ret; LIST_HEAD(string_list); ret = expand_params_and_word_split(tok, next, &string_list); if (ret) goto out_free_string_list; ret = glue_strings(&string_list); if (ret) goto out_free_string_list; if (!mysh_filename_expansion_disabled) { ret = do_filename_expansion(&string_list); if (ret) goto out_free_string_list; } if (list_empty(&string_list)) *string_ret = NULL; else if (list_is_singular(&string_list)) *string_ret = list_entry(string_list.next, struct string, list); else {
static void vivid_thread_vid_out_tick(struct vivid_dev *dev) { struct vivid_buffer *vid_out_buf = NULL; struct vivid_buffer *vbi_out_buf = NULL; dprintk(dev, 1, "Video Output Thread Tick\n"); /* Drop a certain percentage of buffers. */ if (dev->perc_dropped_buffers && prandom_u32_max(100) < dev->perc_dropped_buffers) return; spin_lock(&dev->slock); /* * Only dequeue buffer if there is at least one more pending. * This makes video loopback possible. */ if (!list_empty(&dev->vid_out_active) && !list_is_singular(&dev->vid_out_active)) { vid_out_buf = list_entry(dev->vid_out_active.next, struct vivid_buffer, list); list_del(&vid_out_buf->list); }
/* Generic interrupt line interrupt handler */ static irqreturn_t iio_interrupt_handler(int irq, void *_int_info) { struct iio_interrupt *int_info = _int_info; struct iio_dev *dev_info = int_info->dev_info; struct iio_event_handler_list *p; s64 time_ns; unsigned long flags; spin_lock_irqsave(&int_info->ev_list_lock, flags); if (list_empty(&int_info->ev_list)) { spin_unlock_irqrestore(&int_info->ev_list_lock, flags); return IRQ_NONE; } time_ns = iio_get_time_ns(); /* detect single element list*/ if (list_is_singular(&int_info->ev_list)) { disable_irq_nosync(irq); p = list_first_entry(&int_info->ev_list, struct iio_event_handler_list, list); /* single event handler - maybe shared */ p->handler(dev_info, 1, time_ns, !(p->refcount > 1)); } else
/* * This function is called as an URB transfert is complete (Isochronous pipe). * So, the traitement is done in interrupt time, so it has be fast, not crash, * and not stall. Neat. */ static void stk_isoc_handler(struct urb *urb) { int i; int ret; int framelen; unsigned long flags; unsigned char *fill = NULL; unsigned char *iso_buf = NULL; struct stk_camera *dev; struct stk_sio_buffer *fb; dev = (struct stk_camera *) urb->context; if (dev == NULL) { STK_ERROR("isoc_handler called with NULL device !\n"); return; } if (urb->status == -ENOENT || urb->status == -ECONNRESET || urb->status == -ESHUTDOWN) { atomic_dec(&dev->urbs_used); return; } spin_lock_irqsave(&dev->spinlock, flags); if (urb->status != -EINPROGRESS && urb->status != 0) { STK_ERROR("isoc_handler: urb->status == %d\n", urb->status); goto resubmit; } if (list_empty(&dev->sio_avail)) { /*FIXME Stop streaming after a while */ (void) (printk_ratelimit() && STK_ERROR("isoc_handler without available buffer!\n")); goto resubmit; } fb = list_first_entry(&dev->sio_avail, struct stk_sio_buffer, list); fill = fb->buffer + fb->v4lbuf.bytesused; for (i = 0; i < urb->number_of_packets; i++) { if (urb->iso_frame_desc[i].status != 0) { if (urb->iso_frame_desc[i].status != -EXDEV) STK_ERROR("Frame %d has error %d\n", i, urb->iso_frame_desc[i].status); continue; } framelen = urb->iso_frame_desc[i].actual_length; iso_buf = urb->transfer_buffer + urb->iso_frame_desc[i].offset; if (framelen <= 4) continue; /* no data */ /* * we found something informational from there * the isoc frames have to type of headers * type1: 00 xx 00 00 or 20 xx 00 00 * type2: 80 xx 00 00 00 00 00 00 or a0 xx 00 00 00 00 00 00 * xx is a sequencer which has never been seen over 0x3f * imho data written down looks like bayer, i see similarities * after every 640 bytes */ if (*iso_buf & 0x80) { framelen -= 8; iso_buf += 8; /* This marks a new frame */ if (fb->v4lbuf.bytesused != 0 && fb->v4lbuf.bytesused != dev->frame_size) { (void) (printk_ratelimit() && STK_ERROR("frame %d, " "bytesused=%d, skipping\n", i, fb->v4lbuf.bytesused)); fb->v4lbuf.bytesused = 0; fill = fb->buffer; } else if (fb->v4lbuf.bytesused == dev->frame_size) { if (list_is_singular(&dev->sio_avail)) { /* Always reuse the last buffer */ fb->v4lbuf.bytesused = 0; fill = fb->buffer; } else { list_move_tail(dev->sio_avail.next, &dev->sio_full); wake_up(&dev->wait_frame); fb = list_first_entry(&dev->sio_avail, struct stk_sio_buffer, list); fb->v4lbuf.bytesused = 0; fill = fb->buffer; } } } else {