static int camera_v4l2_close(struct file *filep) { int rc = 0; /* */ int ret = 0; /* */ struct v4l2_event event; struct msm_video_device *pvdev = video_drvdata(filep); struct camera_v4l2_private *sp = fh_to_private(filep->private_data); // atomic_sub_return(1, &pvdev->opened); if (atomic_read(&pvdev->opened) == 0) { camera_pack_event(filep, MSM_CAMERA_SET_PARM, MSM_CAMERA_PRIV_DEL_STREAM, -1, &event); /* Donot wait, imaging server may have crashed */ /* */ ret = msm_post_event(&event, MSM_POST_EVT_TIMEOUT); if(ret < 0){ pr_err("%s:%d camera_v4l2_close_1 failed\n", __func__, __LINE__); } /* */ camera_pack_event(filep, MSM_CAMERA_DEL_SESSION, 0, -1, &event); /* Donot wait, imaging server may have crashed */ msm_post_event(&event, -1); msm_delete_command_ack_q(pvdev->vdev->num, 0); /* This should take care of both normal close * and application crashes */ msm_destroy_session(pvdev->vdev->num); pm_relax(&pvdev->vdev->dev); atomic_set(&pvdev->stream_cnt, 0); } else { camera_pack_event(filep, MSM_CAMERA_SET_PARM, MSM_CAMERA_PRIV_DEL_STREAM, -1, &event); /* Donot wait, imaging server may have crashed */ /* */ ret = msm_post_event(&event, MSM_POST_EVT_TIMEOUT); if(ret < 0){ pr_err("%s:%d camera_v4l2_close_2 failed\n", __func__, __LINE__); } /* */ msm_delete_command_ack_q(pvdev->vdev->num, sp->stream_id); msm_delete_stream(pvdev->vdev->num, sp->stream_id); } camera_v4l2_vb2_q_release(filep); camera_v4l2_fh_release(filep); return rc; }
static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) { int r = atomic_sub_return(1, &ubufs->refcount); if (unlikely(!r)) wake_up(&ubufs->wait); return r; }
static ssize_t irq_proc_write(struct file *filp, const char __user *bufp, size_t len, loff_t *ppos) { #ifndef AUTO_IRQ_RESPONSE struct proc_dir_entry *ent = PDE(filp->f_path.dentry->d_inode); unsigned int irq = (unsigned int)ent->data; struct irq_proc *ip = lambo86_irq_proc[irq]; int pending; glirq_printf(""); if (len < sizeof(int)) { return -EINVAL; } if (ip) { pending = (int)bufp; pending = atomic_sub_return(pending, &ip->count); if (pending == 0) { enable_irq(ip->irq); if (filp->f_flags & O_NONBLOCK) { return -EWOULDBLOCK; } } } return sizeof(pending); #else return 0; #endif }
static void ged_monitor_3D_fence_work_cb(struct work_struct *psWork) { GED_MONITOR_3D_FENCE *psMonitor; if (atomic_sub_return(1, &g_i32Count) < 1) { if (0 == ged_monitor_3D_fence_disable) { //unsigned int uiFreqLevelID; //if (mtk_get_bottom_gpu_freq(&uiFreqLevelID)) { //if (uiFreqLevelID > 0) { mtk_set_bottom_gpu_freq(0); #ifdef CONFIG_GPU_TRACEPOINTS if (ged_monitor_3D_fence_systrace) { unsigned long long t = cpu_clock(smp_processor_id()); trace_gpu_sched_switch("Smart Boost", t, 0, 0, 1); } #endif } } } } if (ged_monitor_3D_fence_debug > 0) { GED_LOGI("[-]3D fences count = %d\n", atomic_read(&g_i32Count)); } psMonitor = GED_CONTAINER_OF(psWork, GED_MONITOR_3D_FENCE, sWork); sync_fence_put(psMonitor->psSyncFence); ged_free(psMonitor, sizeof(GED_MONITOR_3D_FENCE)); }
static struct base_jd_udata kbase_event_process(struct kbase_context *kctx, struct kbase_jd_atom *katom) { struct base_jd_udata data; lockdep_assert_held(&kctx->jctx.lock); KBASE_DEBUG_ASSERT(kctx != NULL); KBASE_DEBUG_ASSERT(katom != NULL); KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED); data = katom->udata; KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_sub_return(1, &kctx->timeline.jd_atoms_in_flight)); #if defined(CONFIG_MALI_MIPE_ENABLED) kbase_tlstream_tl_nret_atom_ctx(katom, kctx); kbase_tlstream_tl_del_atom(katom); #endif katom->status = KBASE_JD_ATOM_STATE_UNUSED; wake_up(&katom->completed); return data; }
void hiusb_stop_hcd(void) { if (atomic_sub_return(1, &dev_open_cnt) == 0) { int reg; reg = readl(PERI_CRG104); reg |= (USB_PHY0_SRST_REQ | USB_PHY0_SRST_TREQ | USB_PHY1_SRST_TREQ); writel(reg, PERI_CRG104); udelay(100); /* close clock */ reg = readl(PERI_CRG104); reg &= ~(USB_PHY0_REFCLK_SEL | USB_PHY0_REF_CKEN); writel(reg, PERI_CRG104); udelay(300); /* close clock */ reg = readl(PERI_CRG103); reg &= ~(USB2_BUS_CKEN | USB2_OHCI48M_CKEN | USB2_OHCI12M_CKEN | USB2_HST_PHY_CKEN | USB2_UTMI0_CKEN | USB2_UTMI1_CKEN); writel(reg, PERI_CRG103); udelay(200); } }
static int camera_v4l2_close(struct file *filep) { int rc = 0; struct v4l2_event event; struct msm_video_device *pvdev = video_drvdata(filep); struct camera_v4l2_private *sp = fh_to_private(filep->private_data); BUG_ON(!pvdev); atomic_sub_return(1, &pvdev->opened); if (atomic_read(&pvdev->opened) == 0) { if(1 == cam_wakelock_init && !wake_lock_active(&cam_wakelock)) { hw_camera_log_info("%s: start camera wake_lock_timeout!\n",__func__); //wake lock 500ms for camera exit wake_lock_timeout(&cam_wakelock, HZ/2); } else { hw_camera_log_info("%s: do not need wake_lock now, cam_wakelock_init = %d\n", __func__, cam_wakelock_init); } camera_pack_event(filep, MSM_CAMERA_SET_PARM, MSM_CAMERA_PRIV_DEL_STREAM, -1, &event); /* Donot wait, imaging server may have crashed */ msm_post_event(&event, MSM_POST_EVT_TIMEOUT); camera_pack_event(filep, MSM_CAMERA_DEL_SESSION, 0, -1, &event); /* Donot wait, imaging server may have crashed */ msm_post_event(&event, -1); msm_delete_command_ack_q(pvdev->vdev->num, 0); /* This should take care of both normal close * and application crashes */ msm_destroy_session(pvdev->vdev->num); pm_relax(&pvdev->vdev->dev); atomic_set(&pvdev->stream_cnt, 0); } else { camera_pack_event(filep, MSM_CAMERA_SET_PARM, MSM_CAMERA_PRIV_DEL_STREAM, -1, &event); /* Donot wait, imaging server may have crashed */ msm_post_event(&event, MSM_POST_EVT_TIMEOUT); msm_delete_command_ack_q(pvdev->vdev->num, sp->stream_id); msm_delete_stream(pvdev->vdev->num, sp->stream_id); } camera_v4l2_vb2_q_release(filep); camera_v4l2_fh_release(filep); return rc; }
void Buffer::release() { if (atomic_sub_return(1, &_refcount) == 0) { delete this; } }
inline int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i) { #ifdef PLATFORM_LINUX return atomic_sub_return(i,v); #elif defined(PLATFORM_WINDOWS) return InterlockedAdd(v,-i); #endif }
static void sh_mobile_lcdc_clk_off(struct sh_mobile_lcdc_priv *priv) { if (atomic_sub_return(1, &priv->hw_usecnt) == -1) { if (priv->dot_clk) clk_disable(priv->dot_clk); pm_runtime_put(priv->dev); } }
static void w1_therm_remove_slave(struct w1_slave *sl) { int refcnt = atomic_sub_return(1, THERM_REFCNT(sl->family_data)); while(refcnt) { msleep(1000); refcnt = atomic_read(THERM_REFCNT(sl->family_data)); } kfree(sl->family_data); sl->family_data = NULL; }
int IUnknown::release(void) { int ref = atomic_sub_return(1, &m_internal->ref); if (ref == 0) { destroy(); } return ref; }
void recursive_mutex_unlock(recursive_mutex_t *mutex) { /* Decrease reference count */ int count = atomic_sub_return(1, &mutex->count); /* If count reached 0, no one is locking anymore */ if (count == 0) { /* So release spin lock & lock */ mutex->owner = NULL; spin_unlock(&mutex->lock); } }
inline int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i) { #ifdef PLATFORM_LINUX return atomic_sub_return(i,v); #elif defined(PLATFORM_WINDOWS) return InterlockedAdd(v,-i); #elif defined(PLATFORM_FREEBSD) atomic_subtract_int(v,i); return atomic_load_acq_32(v); #endif }
/* * * inode_ref_dec - decrement ref_count * invoked by vop_ref_dec * calls vop_reclaim if the ref_count hits zero * */ int inode_ref_dec(struct inode *node) { assert(inode_ref_count(node) > 0); int ref_count, ret; if ((ref_count = atomic_sub_return(&(node->ref_count), 1)) == 0) { if ((ret = vop_reclaim(node)) != 0 && ret != -E_BUSY) { kprintf("vfs: warning: vop_reclaim: %e.\n", ret); } } return ref_count; }
/* * * inode_open_dec - decrement the open_count * invoked by vop_open_dec * calls vop_close if the open_count hits zero * */ int inode_open_dec(struct inode *node) { assert(inode_open_count(node) > 0); int open_count, ret; if ((open_count = atomic_sub_return(&(node->open_count), 1)) == 0) { if ((ret = vop_close(node)) != 0) { kprintf("vfs: warning: vop_close: %e.\n", ret); } } return open_count; }
static int camera_v4l2_close(struct file *filep) { int rc = 0; struct v4l2_event event; struct msm_video_device *pvdev = video_drvdata(filep); struct camera_v4l2_private *sp = fh_to_private(filep->private_data); BUG_ON(!pvdev); atomic_sub_return(1, &pvdev->opened); if (atomic_read(&pvdev->opened) == 0) { camera_pack_event(filep, MSM_CAMERA_SET_PARM, MSM_CAMERA_PRIV_DEL_STREAM, -1, &event); /* Donot wait, imaging server may have crashed */ msm_post_event(&event, MSM_POST_EVT_TIMEOUT); camera_pack_event(filep, MSM_CAMERA_DEL_SESSION, 0, -1, &event); /* Donot wait, imaging server may have crashed */ #if 1 // wait for signal msm_post_event(&event, MSM_POST_EVT_TIMEOUT); #else msm_post_event(&event, -1); #endif msm_delete_command_ack_q(pvdev->vdev->num, 0); /* This should take care of both normal close * and application crashes */ msm_destroy_session(pvdev->vdev->num); } else { camera_pack_event(filep, MSM_CAMERA_SET_PARM, MSM_CAMERA_PRIV_DEL_STREAM, -1, &event); /* Donot wait, imaging server may have crashed */ msm_post_event(&event, MSM_POST_EVT_TIMEOUT); msm_delete_command_ack_q(pvdev->vdev->num, sp->stream_id); msm_delete_stream(pvdev->vdev->num, sp->stream_id); } camera_v4l2_vb2_q_release(filep); camera_v4l2_fh_release(filep); return rc; }
void nvhost_module_idle_mult(struct nvhost_module *mod, int refs) { bool kick = false; mutex_lock(&mod->lock); if (atomic_sub_return(refs, &mod->refcount) == 0) { BUG_ON(!mod->powered); schedule_delayed_work(&mod->powerdown, ACM_TIMEOUT); kick = true; } mutex_unlock(&mod->lock); if (kick) wake_up(&mod->idle); }
static void interface_free(struct turbotap_sock_fd *turbotap_sf) { int i; int r = atomic_sub_return(1, &turbotap_sf->refcount); if (unlikely(r < 0)) printk(KERN_ERR "Error: in reference count\n"); i = find_interface_index(turbotap_sf); if (likely(interfaces_count_dec())) { turbotap_interfaces.turbotap_sf[i] = NULL; } kfree(turbotap_sf); }
void ipc_queue_reset(struct ipc_link_context *context) { unsigned long flags; struct ipc_tx_queue *frame; int qcount; spin_lock_irqsave(&context->tx_q_update_lock, flags); qcount = atomic_read(&context->tx_q_count); while (qcount != 0) { frame = list_first_entry(&context->tx_q, struct ipc_tx_queue, node); list_del(&frame->node); ipc_queue_delete_frame(frame); qcount = atomic_sub_return(1, &context->tx_q_count); } spin_unlock_irqrestore(&context->tx_q_update_lock, flags); }
static void skb_release_data(struct sk_buff *skb) { if (!skb->cloned || !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, &skb_shinfo(skb)->dataref)) { if (skb_shinfo(skb)->nr_frags) { int i; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) put_page(skb_shinfo(skb)->frags[i].page); } if (skb_shinfo(skb)->frag_list) skb_drop_fraglist(skb); kfree(skb->head); } }
STATIC base_jd_udata kbase_event_process(kbase_context *kctx, kbase_jd_atom *katom) { base_jd_udata data; KBASE_DEBUG_ASSERT(kctx != NULL); KBASE_DEBUG_ASSERT(katom != NULL); KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED); data = katom->udata; KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_sub_return(1, &kctx->timeline.jd_atoms_in_flight)); katom->status = KBASE_JD_ATOM_STATE_UNUSED; wake_up(&katom->completed); return data; }
struct ipc_tx_queue *ipc_queue_get_frame(struct ipc_link_context *context) { unsigned long flags; struct ipc_tx_queue *frame; int qcount; spin_lock_irqsave(&context->tx_q_update_lock, flags); frame = list_first_entry(&context->tx_q, struct ipc_tx_queue, node); list_del(&frame->node); qcount = atomic_sub_return(1, &context->tx_q_count); context->tx_q_free += frame->len; spin_unlock_irqrestore(&context->tx_q_update_lock, flags); dev_dbg(&context->sdev->dev, "link %d: get tx frame %d, new count %d, " "new free %d\n", context->link->id, frame->counter, qcount, context->tx_q_free); return frame; }
static void ged_monitor_3D_fence_work_cb(struct work_struct *psWork) { GED_MONITOR_3D_FENCE *psMonitor; #ifdef GED_DEBUG_MONITOR_3D_FENCE ged_log_buf_print(ghLogBuf_GED, "ged_monitor_3D_fence_work_cb"); #endif if (atomic_sub_return(1, &g_i32Count) < 1) { #ifdef GED_DEBUG_MONITOR_3D_FENCE ged_log_buf_print(ghLogBuf_GED, "mtk_set_bottom_gpu_freq(0)"); #endif mtk_set_bottom_gpu_freq(0); } psMonitor = GED_CONTAINER_OF(psWork, GED_MONITOR_3D_FENCE, sWork); sync_fence_put(psMonitor->psSyncFence); ged_free(psMonitor, sizeof(GED_MONITOR_3D_FENCE)); }
void nvhost_module_idle_mult(struct nvhost_module *mod, int refs) { bool kick = false; mutex_lock(&mod->lock); if (atomic_sub_return(refs, &mod->refcount) == 0) { BUG_ON(!mod->powered); schedule_delayed_work(&mod->powerdown, msecs_to_jiffies(mod->desc->powerdown_delay)); kick = true; } mutex_unlock(&mod->lock); if (kick) { wake_up(&mod->idle); if (mod->desc->idle) mod->desc->idle(mod); } }
unsigned long mce_intel_adjust_timer(unsigned long interval) { int r; if (interval < CMCI_POLL_INTERVAL) return interval; switch (__this_cpu_read(cmci_storm_state)) { case CMCI_STORM_ACTIVE: /* * We switch back to interrupt mode once the poll timer has * silenced itself. That means no events recorded and the * timer interval is back to our poll interval. */ __this_cpu_write(cmci_storm_state, CMCI_STORM_SUBSIDED); r = atomic_sub_return(1, &cmci_storm_on_cpus); if (r == 0) pr_notice("CMCI storm subsided: switching to interrupt mode\n"); /* FALLTHROUGH */ case CMCI_STORM_SUBSIDED: /* * We wait for all cpus to go back to SUBSIDED * state. When that happens we switch back to * interrupt mode. */ if (!atomic_read(&cmci_storm_on_cpus)) { __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE); cmci_reenable(); cmci_recheck(); } return CMCI_POLL_INTERVAL; default: /* * We have shiny weather. Let the poll do whatever it * thinks. */ return interval; } }
static int rdma_request(struct p9_client *client, struct p9_req_t *req) { struct p9_trans_rdma *rdma = client->trans; struct ib_send_wr wr, *bad_wr; struct ib_sge sge; int err = 0; unsigned long flags; struct p9_rdma_context *c = NULL; struct p9_rdma_context *rpl_context = NULL; /* When an error occurs between posting the recv and the send, * there will be a receive context posted without a pending request. * Since there is no way to "un-post" it, we remember it and skip * post_recv() for the next request. * So here, * see if we are this `next request' and need to absorb an excess rc. * If yes, then drop and free our own, and do not recv_post(). **/ if (unlikely(atomic_read(&rdma->excess_rc) > 0)) { if ((atomic_sub_return(1, &rdma->excess_rc) >= 0)) { /* Got one ! */ kfree(req->rc); req->rc = NULL; goto dont_need_post_recv; } else { /* We raced and lost. */ atomic_inc(&rdma->excess_rc); } } /* Allocate an fcall for the reply */ rpl_context = kmalloc(sizeof *rpl_context, GFP_NOFS); if (!rpl_context) { err = -ENOMEM; goto recv_error; } rpl_context->rc = req->rc; /* * Post a receive buffer for this request. We need to ensure * there is a reply buffer available for every outstanding * request. A flushed request can result in no reply for an * outstanding request, so we must keep a count to avoid * overflowing the RQ. */ if (down_interruptible(&rdma->rq_sem)) { err = -EINTR; goto recv_error; } err = post_recv(client, rpl_context); if (err) { p9_debug(P9_DEBUG_FCALL, "POST RECV failed\n"); goto recv_error; } /* remove posted receive buffer from request structure */ req->rc = NULL; dont_need_post_recv: /* Post the request */ c = kmalloc(sizeof *c, GFP_NOFS); if (!c) { err = -ENOMEM; goto send_error; } c->req = req; c->busa = ib_dma_map_single(rdma->cm_id->device, c->req->tc->sdata, c->req->tc->size, DMA_TO_DEVICE); if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) { err = -EIO; goto send_error; } sge.addr = c->busa; sge.length = c->req->tc->size; sge.lkey = rdma->lkey; wr.next = NULL; c->wc_op = IB_WC_SEND; wr.wr_id = (unsigned long) c; wr.opcode = IB_WR_SEND; wr.send_flags = IB_SEND_SIGNALED; wr.sg_list = &sge; wr.num_sge = 1; if (down_interruptible(&rdma->sq_sem)) { err = -EINTR; goto send_error; } /* Mark request as `sent' *before* we actually send it, * because doing if after could erase the REQ_STATUS_RCVD * status in case of a very fast reply. */ req->status = REQ_STATUS_SENT; err = ib_post_send(rdma->qp, &wr, &bad_wr); if (err) goto send_error; /* Success */ return 0; /* Handle errors that happened during or while preparing the send: */ send_error: req->status = REQ_STATUS_ERROR; kfree(c); p9_debug(P9_DEBUG_ERROR, "Error %d in rdma_request()\n", err); /* Ach. * We did recv_post(), but not send. We have one recv_post in excess. */ atomic_inc(&rdma->excess_rc); return err; /* Handle errors that happened during or while preparing post_recv(): */ recv_error: kfree(rpl_context); spin_lock_irqsave(&rdma->req_lock, flags); if (rdma->state < P9_RDMA_CLOSING) { rdma->state = P9_RDMA_CLOSING; spin_unlock_irqrestore(&rdma->req_lock, flags); rdma_disconnect(rdma->cm_id); } else spin_unlock_irqrestore(&rdma->req_lock, flags); return err; }
static int vpe_proc_general(struct msm_vpe_cmd *cmd) { int rc = 0; uint32_t *cmdp = NULL; struct msm_queue_cmd *qcmd = NULL; struct msm_vpe_buf_info *vpe_buf; int turbo_mode = 0; struct msm_sync *sync = (struct msm_sync *)vpe_ctrl->syncdata; CDBG("vpe_proc_general: cmdID = %s, length = %d\n", vpe_general_cmd[cmd->id], cmd->length); switch (cmd->id) { case VPE_ENABLE: cmdp = kmalloc(cmd->length, GFP_ATOMIC); if (!cmdp) { rc = -ENOMEM; goto vpe_proc_general_done; } if (copy_from_user(cmdp, (void __user *)(cmd->value), cmd->length)) { rc = -EFAULT; goto vpe_proc_general_done; } turbo_mode = *((int *)(cmd->value)); rc = turbo_mode ? vpe_enable(VPE_TURBO_MODE_CLOCK_RATE) : vpe_enable(VPE_NORMAL_MODE_CLOCK_RATE); break; case VPE_DISABLE: rc = vpe_disable(); break; case VPE_RESET: case VPE_ABORT: rc = vpe_reset(); break; case VPE_START: rc = vpe_start(); break; case VPE_INPUT_PLANE_CFG: cmdp = kmalloc(cmd->length, GFP_ATOMIC); if (!cmdp) { rc = -ENOMEM; goto vpe_proc_general_done; } if (copy_from_user(cmdp, (void __user *)(cmd->value), cmd->length)) { rc = -EFAULT; goto vpe_proc_general_done; } vpe_input_plane_config(cmdp); break; case VPE_OPERATION_MODE_CFG: CDBG("cmd->length = %d\n", cmd->length); if (cmd->length != VPE_OPERATION_MODE_CFG_LEN_ZSL) { rc = -EINVAL; goto vpe_proc_general_done; } cmdp = kmalloc(VPE_OPERATION_MODE_CFG_LEN_ZSL, GFP_ATOMIC); if (copy_from_user(cmdp, (void __user *)(cmd->value), VPE_OPERATION_MODE_CFG_LEN_ZSL)) { rc = -EFAULT; goto vpe_proc_general_done; } rc = vpe_operation_config(cmdp); CDBG("rc = %d \n", rc); break; case VPE_OUTPUT_PLANE_CFG: cmdp = kmalloc(cmd->length, GFP_ATOMIC); if (!cmdp) { rc = -ENOMEM; goto vpe_proc_general_done; } if (copy_from_user(cmdp, (void __user *)(cmd->value), cmd->length)) { rc = -EFAULT; goto vpe_proc_general_done; } vpe_output_plane_config(cmdp); break; case VPE_SCALE_CFG_TYPE: cmdp = kmalloc(cmd->length, GFP_ATOMIC); if (!cmdp) { rc = -ENOMEM; goto vpe_proc_general_done; } if (copy_from_user(cmdp, (void __user *)(cmd->value), cmd->length)) { rc = -EFAULT; goto vpe_proc_general_done; } vpe_update_scale_coef(cmdp); break; case VPE_CMD_DIS_OFFSET_CFG: { struct msm_vfe_resp *vdata; /* first get the dis offset and frame id. */ cmdp = kmalloc(cmd->length, GFP_ATOMIC); if (!cmdp) { rc = -ENOMEM; goto vpe_proc_general_done; } if (copy_from_user(cmdp, (void __user *)(cmd->value), cmd->length)) { rc = -EFAULT; goto vpe_proc_general_done; } /* get the offset. */ vpe_ctrl->dis_offset = *(struct dis_offset_type *)cmdp; qcmd = msm_dequeue_vpe(&sync->vpe_q, list_vpe_frame); if (!qcmd) { pr_err("%s: no video frame.\n", __func__); kfree(cmdp); return -EAGAIN; } vdata = (struct msm_vfe_resp *)(qcmd->command); vpe_buf = &vdata->vpe_bf; vpe_update_scaler_with_dis(&(vpe_buf->vpe_crop), &(vpe_ctrl->dis_offset)); msm_send_frame_to_vpe(vpe_buf->y_phy, vpe_buf->cbcr_phy, &(vpe_buf->ts), OUTPUT_TYPE_V); if (!qcmd || !atomic_read(&qcmd->on_heap)) { kfree(cmdp); return -EAGAIN; } if (!atomic_sub_return(1, &qcmd->on_heap)) kfree(qcmd); break; } default: break; } vpe_proc_general_done: kfree(cmdp); return rc; }
inline void successed() { if (atomic_sub_return(2, &failed_count) <= 0) atomic_set(&failed_count, 0); }
inline int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i) { return atomic_sub_return(i, v); }