/* * a note on stream states used: * we use following states in the compressed core * SNDRV_PCM_STATE_OPEN: When stream has been opened. * SNDRV_PCM_STATE_SETUP: When stream has been initialized. This is done by * calling SNDRV_COMPRESS_SET_PARAMS. Running streams will come to this * state at stop by calling SNDRV_COMPRESS_STOP, or at end of drain. * SNDRV_PCM_STATE_PREPARED: When a stream has been written to (for * playback only). User after setting up stream writes the data buffer * before starting the stream. * SNDRV_PCM_STATE_RUNNING: When stream has been started and is * decoding/encoding and rendering/capturing data. * SNDRV_PCM_STATE_DRAINING: When stream is draining current data. This is done * by calling SNDRV_COMPRESS_DRAIN. * SNDRV_PCM_STATE_PAUSED: When stream is paused. This is done by calling * SNDRV_COMPRESS_PAUSE. It can be stopped or resumed by calling * SNDRV_COMPRESS_STOP or SNDRV_COMPRESS_RESUME respectively. */ static int snd_compr_open(struct inode *inode, struct file *f) { struct snd_compr *compr; struct snd_compr_file *data; struct snd_compr_runtime *runtime; enum snd_compr_direction dirn; int maj = imajor(inode); int ret; if ((f->f_flags & O_ACCMODE) == O_WRONLY) dirn = SND_COMPRESS_PLAYBACK; else if ((f->f_flags & O_ACCMODE) == O_RDONLY) dirn = SND_COMPRESS_CAPTURE; else return -EINVAL; if (maj == snd_major) compr = snd_lookup_minor_data(iminor(inode), SNDRV_DEVICE_TYPE_COMPRESS); else return -EBADFD; if (compr == NULL) { pr_err("no device data!!!\n"); return -ENODEV; } if (dirn != compr->direction) { pr_err("this device doesn't support this direction\n"); snd_card_unref(compr->card); return -EINVAL; } data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) { snd_card_unref(compr->card); return -ENOMEM; } data->stream.ops = compr->ops; data->stream.direction = dirn; data->stream.private_data = compr->private_data; data->stream.device = compr; runtime = kzalloc(sizeof(*runtime), GFP_KERNEL); if (!runtime) { kfree(data); snd_card_unref(compr->card); return -ENOMEM; } runtime->state = SNDRV_PCM_STATE_OPEN; init_waitqueue_head(&runtime->sleep); data->stream.runtime = runtime; f->private_data = (void *)data; mutex_lock(&compr->lock); ret = compr->ops->open(&data->stream); mutex_unlock(&compr->lock); if (ret) { kfree(runtime); kfree(data); } snd_card_unref(compr->card); return ret; }
/* * Description : * Input : * Output : */ static ssize_t felica_write(struct file *fp, const char *buf, size_t count, loff_t *f_pos) { int rc = 0; int writecount = 0; #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA] felica_write - start \n"); #endif /* Check input parameters */ if(NULL == fp) { FELICA_DEBUG_MSG("[FELICA] ERROR - fp \n"); return -1; } if(NULL == buf) { FELICA_DEBUG_MSG("[FELICA] ERROR - buf \n"); return -1; } if(count > TRANSMIT_BUFFER_MAX_SIZE) { FELICA_DEBUG_MSG("[FELICA] ERROR - count \n"); return -1; } /* Clear transmit buffer before using */ memset(transmit_buf, 0, sizeof(transmit_buf)); /* Copy user memory to kernel memory */ rc = copy_from_user(transmit_buf, buf, count); if (rc) { FELICA_DEBUG_MSG("[FELICA] ERROR - copy_to_user \n"); return rc; } /* Display low data for debugging */ #ifdef RXTX_LOG_ENABLE { int i = 0; char *ptr = NULL; ptr = transmit_buf; if(NULL != ptr) { FELICA_DEBUG_MSG("===== WRITE FELICA LOW DATA =====\n"); for(i=0; i<count; i++) { FELICA_DEBUG_MSG(" %02x", *ptr++); if(0 == (i+1)%10) { FELICA_DEBUG_MSG("\n"); } } FELICA_DEBUG_MSG("\n"); } } #endif /* Test log { int remainingcount = 0; int rcTest = 0; rcTest = felica_uart_ioctrl(&remainingcount); FELICA_DEBUG_MSG("[FELICA] remaining count : %d \n",remainingcount); if (rcTest) { FELICA_DEBUG_MSG("[FELICA] ERROR - felica_uart_ioctrl \n"); return rcTest; } } */ /* Send transmit data to UART transmit buffer */ mutex_lock(&felica_mutex); writecount = felica_uart_write(transmit_buf,count); mutex_unlock(&felica_mutex); //mdelay(50); #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA] writecount : %d \n",writecount); #endif #ifdef FELICA_FN_DEVICE_TEST FELICA_DEBUG_MSG("[FELICA] felica_write - result_write_uart(%d) \n",result_write_uart); if(result_write_uart != -1) { if (writecount == 0) result_write_uart = -1; else result_write_uart = writecount; } return result_write_uart; #else return writecount; #endif }
static void uio_free_minor(struct uio_device *idev) { mutex_lock(&minor_lock); idr_remove(&uio_idr, idev->minor); mutex_unlock(&minor_lock); }
static void uvc_wait_finish(struct vb2_queue *vq) { struct uvc_video_queue *queue = vb2_get_drv_priv(vq); mutex_lock(&queue->mutex); }
static int __init ag71xx_probe(struct platform_device *pdev) { struct net_device *dev; struct resource *res; struct ag71xx *ag; struct ag71xx_platform_data *pdata; int err; pdata = pdev->dev.platform_data; if (!pdata) { dev_err(&pdev->dev, "no platform data specified\n"); err = -ENXIO; goto err_out; } dev = alloc_etherdev(sizeof(*ag)); if (!dev) { dev_err(&pdev->dev, "alloc_etherdev failed\n"); err = -ENOMEM; goto err_out; } SET_NETDEV_DEV(dev, &pdev->dev); ag = netdev_priv(dev); ag->pdev = pdev; ag->dev = dev; ag->mii_bus = &ag71xx_mdio_bus->mii_bus; ag->msg_enable = netif_msg_init(ag71xx_debug, AG71XX_DEFAULT_MSG_ENABLE); spin_lock_init(&ag->lock); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac_base"); if (!res) { dev_err(&pdev->dev, "no mac_base resource found\n"); err = -ENXIO; goto err_out; } ag->mac_base = ioremap_nocache(res->start, res->end - res->start + 1); if (!ag->mac_base) { dev_err(&pdev->dev, "unable to ioremap mac_base\n"); err = -ENOMEM; goto err_free_dev; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac_base2"); if (!res) { dev_err(&pdev->dev, "no mac_base2 resource found\n"); err = -ENXIO; goto err_unmap_base1; } ag->mac_base2 = ioremap_nocache(res->start, res->end - res->start + 1); if (!ag->mac_base) { dev_err(&pdev->dev, "unable to ioremap mac_base2\n"); err = -ENOMEM; goto err_unmap_base1; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mii_ctrl"); if (!res) { dev_err(&pdev->dev, "no mii_ctrl resource found\n"); err = -ENXIO; goto err_unmap_base2; } ag->mii_ctrl = ioremap_nocache(res->start, res->end - res->start + 1); if (!ag->mii_ctrl) { dev_err(&pdev->dev, "unable to ioremap mii_ctrl\n"); err = -ENOMEM; goto err_unmap_base2; } dev->irq = platform_get_irq(pdev, 0); err = request_irq(dev->irq, ag71xx_interrupt, IRQF_DISABLED | IRQF_SAMPLE_RANDOM, dev->name, dev); if (err) { dev_err(&pdev->dev, "unable to request IRQ %d\n", dev->irq); goto err_unmap_mii_ctrl; } dev->base_addr = (unsigned long)ag->mac_base; dev->open = ag71xx_open; dev->stop = ag71xx_stop; dev->hard_start_xmit = ag71xx_hard_start_xmit; dev->set_multicast_list = ag71xx_set_multicast_list; dev->do_ioctl = ag71xx_do_ioctl; dev->ethtool_ops = &ag71xx_ethtool_ops; netif_napi_add(dev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT); if (is_valid_ether_addr(pdata->mac_addr)) memcpy(dev->dev_addr, pdata->mac_addr, ETH_ALEN); else { dev->dev_addr[0] = 0xde; dev->dev_addr[1] = 0xad; get_random_bytes(&dev->dev_addr[2], 3); dev->dev_addr[5] = pdev->id & 0xff; } err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "unable to register net device\n"); goto err_free_irq; } printk(KERN_INFO "%s: Atheros AG71xx at 0x%08lx, irq %d\n", dev->name, dev->base_addr, dev->irq); ag71xx_dump_regs(ag); ag71xx_hw_init(ag); ag71xx_dump_regs(ag); /* Reset the mdio bus explicitly */ if (ag->mii_bus) { mutex_lock(&ag->mii_bus->mdio_lock); ag->mii_bus->reset(ag->mii_bus); mutex_unlock(&ag->mii_bus->mdio_lock); } err = ag71xx_phy_connect(ag); if (err) goto err_unregister_netdev; platform_set_drvdata(pdev, dev); return 0; err_unregister_netdev: unregister_netdev(dev); err_free_irq: free_irq(dev->irq, dev); err_unmap_mii_ctrl: iounmap(ag->mii_ctrl); err_unmap_base2: iounmap(ag->mac_base2); err_unmap_base1: iounmap(ag->mac_base); err_free_dev: kfree(dev); err_out: platform_set_drvdata(pdev, NULL); return err; }
static int intelfb_create(struct intel_fbdev *ifbdev, struct drm_fb_helper_surface_size *sizes) { struct drm_device *dev = ifbdev->helper.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct fb_info *info; struct drm_framebuffer *fb; struct drm_mode_fb_cmd mode_cmd; struct drm_i915_gem_object *obj; struct device *device = &dev->pdev->dev; int size, ret; /* we don't do packed 24bpp */ if (sizes->surface_bpp == 24) sizes->surface_bpp = 32; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; mode_cmd.bpp = sizes->surface_bpp; mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64); mode_cmd.depth = sizes->surface_depth; size = mode_cmd.pitch * mode_cmd.height; size = ALIGN(size, PAGE_SIZE); obj = i915_gem_alloc_object(dev, size); if (!obj) { DRM_ERROR("failed to allocate framebuffer\n"); ret = -ENOMEM; goto out; } mutex_lock(&dev->struct_mutex); /* Flush everything out, we'll be doing GTT only from now on */ ret = intel_pin_and_fence_fb_obj(dev, obj, false); if (ret) { DRM_ERROR("failed to pin fb: %d\n", ret); goto out_unref; } info = framebuffer_alloc(0, device); if (!info) { ret = -ENOMEM; goto out_unpin; } info->par = ifbdev; ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj); if (ret) goto out_unpin; fb = &ifbdev->ifb.base; ifbdev->helper.fb = fb; ifbdev->helper.fbdev = info; strcpy(info->fix.id, "inteldrmfb"); info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &intelfb_ops; ret = fb_alloc_cmap(&info->cmap, 256, 0); if (ret) { ret = -ENOMEM; goto out_unpin; } /* setup aperture base/size for vesafb takeover */ info->apertures = alloc_apertures(1); if (!info->apertures) { ret = -ENOMEM; goto out_unpin; } info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; info->fix.smem_len = size; info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size); if (!info->screen_base) { ret = -ENOSPC; goto out_unpin; } info->screen_size = size; // memset(info->screen_base, 0, size); drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); info->pixmap.size = 64*1024; info->pixmap.buf_align = 8; info->pixmap.access_align = 32; info->pixmap.flags = FB_PIXMAP_SYSTEM; info->pixmap.scan_align = 1; DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", fb->width, fb->height, obj->gtt_offset, obj); mutex_unlock(&dev->struct_mutex); vga_switcheroo_client_fb_set(dev->pdev, info); return 0; out_unpin: i915_gem_object_unpin(obj); out_unref: drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); out: return ret; }
static void wcd9xxx_irq_lock(struct irq_data *data) { struct wcd9xxx_core_resource *wcd9xxx_res = irq_data_get_irq_chip_data(data); mutex_lock(&wcd9xxx_res->irq_lock); }
static int bq27x00_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { int ret = 0; struct bq27x00_device_info *di = power_supply_get_drvdata(psy); mutex_lock(&di->lock); if (time_is_before_jiffies(di->last_update + 5 * HZ)) { cancel_delayed_work_sync(&di->work); bq27x00_battery_poll(&di->work.work); } mutex_unlock(&di->lock); if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0) return -ENODEV; switch (psp) { case POWER_SUPPLY_PROP_STATUS: ret = bq27x00_battery_status(di, val); break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: ret = bq27x00_battery_voltage(di, val); break; case POWER_SUPPLY_PROP_PRESENT: val->intval = di->cache.flags < 0 ? 0 : 1; break; case POWER_SUPPLY_PROP_CURRENT_NOW: ret = bq27x00_battery_current(di, val); break; case POWER_SUPPLY_PROP_CAPACITY: ret = bq27x00_simple_value(di->cache.capacity, val); break; case POWER_SUPPLY_PROP_CAPACITY_LEVEL: ret = bq27x00_battery_capacity_level(di, val); break; case POWER_SUPPLY_PROP_TEMP: ret = bq27x00_simple_value(di->cache.temperature, val); if (ret == 0) val->intval -= 2731; break; case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW: ret = bq27x00_simple_value(di->cache.time_to_empty, val); break; case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: ret = bq27x00_simple_value(di->cache.time_to_empty_avg, val); break; case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: ret = bq27x00_simple_value(di->cache.time_to_full, val); break; case POWER_SUPPLY_PROP_TECHNOLOGY: val->intval = POWER_SUPPLY_TECHNOLOGY_LION; break; case POWER_SUPPLY_PROP_CHARGE_NOW: ret = bq27x00_simple_value(bq27x00_battery_read_nac(di), val); break; case POWER_SUPPLY_PROP_CHARGE_FULL: ret = bq27x00_simple_value(di->cache.charge_full, val); break; case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: ret = bq27x00_simple_value(di->charge_design_full, val); break; case POWER_SUPPLY_PROP_CYCLE_COUNT: ret = bq27x00_simple_value(di->cache.cycle_count, val); break; case POWER_SUPPLY_PROP_ENERGY_NOW: ret = bq27x00_simple_value(di->cache.energy, val); break; case POWER_SUPPLY_PROP_POWER_AVG: ret = bq27x00_simple_value(di->cache.power_avg, val); break; case POWER_SUPPLY_PROP_HEALTH: ret = bq27x00_simple_value(di->cache.health, val); break; default: return -EINVAL; } return ret; }
static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err) { mutex_lock(&server->rcv.creq_mutex); __ncp_abort_request(server, req, err); mutex_unlock(&server->rcv.creq_mutex); }
static enum clnt_stat clnt_vc_call( CLIENT *h, rpcproc_t proc, xdrproc_t xdr_args, const char *args_ptr, xdrproc_t xdr_results, caddr_t results_ptr, struct timeval timeout ) { struct ct_data *ct; XDR *xdrs; struct rpc_msg reply_msg; u_int32_t x_id; u_int32_t *msg_x_id; bool_t shipnow; int refreshes = 2; #ifdef _REENTRANT sigset_t mask, newmask; #endif _DIAGASSERT(h != NULL); ct = (struct ct_data *) h->cl_private; #ifdef _REENTRANT __clnt_sigfillset(&newmask); thr_sigsetmask(SIG_SETMASK, &newmask, &mask); mutex_lock(&clnt_fd_lock); while (vc_fd_locks[ct->ct_fd]) cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock); vc_fd_locks[ct->ct_fd] = __rpc_lock_value; mutex_unlock(&clnt_fd_lock); #endif xdrs = &(ct->ct_xdrs); msg_x_id = &ct->ct_u.ct_mcalli; if (!ct->ct_waitset) { if (time_not_ok(&timeout) == FALSE) ct->ct_wait = timeout; } shipnow = (xdr_results == NULL && timeout.tv_sec == 0 && timeout.tv_usec == 0) ? FALSE : TRUE; call_again: xdrs->x_op = XDR_ENCODE; ct->ct_error.re_status = RPC_SUCCESS; x_id = ntohl(--(*msg_x_id)); if ((! XDR_PUTBYTES(xdrs, ct->ct_u.ct_mcallc, ct->ct_mpos)) || (! XDR_PUTINT32(xdrs, (int32_t *)&proc)) || (! AUTH_MARSHALL(h->cl_auth, xdrs)) || (! (*xdr_args)(xdrs, __UNCONST(args_ptr)))) { if (ct->ct_error.re_status == RPC_SUCCESS) ct->ct_error.re_status = RPC_CANTENCODEARGS; (void)xdrrec_endofrecord(xdrs, TRUE); release_fd_lock(ct->ct_fd, mask); return (ct->ct_error.re_status); } if (! xdrrec_endofrecord(xdrs, shipnow)) { release_fd_lock(ct->ct_fd, mask); return (ct->ct_error.re_status = RPC_CANTSEND); } if (! shipnow) { release_fd_lock(ct->ct_fd, mask); return (RPC_SUCCESS); } /* * Hack to provide rpc-based message passing */ if (timeout.tv_sec == 0 && timeout.tv_usec == 0) { release_fd_lock(ct->ct_fd, mask); return(ct->ct_error.re_status = RPC_TIMEDOUT); } /* * Keep receiving until we get a valid transaction id */ xdrs->x_op = XDR_DECODE; for (;;) { reply_msg.acpted_rply.ar_verf = _null_auth; reply_msg.acpted_rply.ar_results.where = NULL; reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void; if (! xdrrec_skiprecord(xdrs)) { release_fd_lock(ct->ct_fd, mask); return (ct->ct_error.re_status); } /* now decode and validate the response header */ if (! xdr_replymsg(xdrs, &reply_msg)) { if (ct->ct_error.re_status == RPC_SUCCESS) continue; release_fd_lock(ct->ct_fd, mask); return (ct->ct_error.re_status); } if (reply_msg.rm_xid == x_id) break; } /* * process header */ _seterr_reply(&reply_msg, &(ct->ct_error)); if (ct->ct_error.re_status == RPC_SUCCESS) { if (! AUTH_VALIDATE(h->cl_auth, &reply_msg.acpted_rply.ar_verf)) { ct->ct_error.re_status = RPC_AUTHERROR; ct->ct_error.re_why = AUTH_INVALIDRESP; } else if (! (*xdr_results)(xdrs, results_ptr)) { if (ct->ct_error.re_status == RPC_SUCCESS) ct->ct_error.re_status = RPC_CANTDECODERES; } /* free verifier ... */ if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) { xdrs->x_op = XDR_FREE; (void)xdr_opaque_auth(xdrs, &(reply_msg.acpted_rply.ar_verf)); } } /* end successful completion */ else { /* maybe our credentials need to be refreshed ... */ if (refreshes-- && AUTH_REFRESH(h->cl_auth)) goto call_again; } /* end of unsuccessful completion */ release_fd_lock(ct->ct_fd, mask); return (ct->ct_error.re_status); }
static bool_t clnt_vc_control( CLIENT *cl, u_int request, char *info ) { struct ct_data *ct; void *infop = info; #ifdef _REENTRANT sigset_t mask; #endif sigset_t newmask; _DIAGASSERT(cl != NULL); ct = (struct ct_data *)cl->cl_private; __clnt_sigfillset(&newmask); thr_sigsetmask(SIG_SETMASK, &newmask, &mask); mutex_lock(&clnt_fd_lock); #ifdef _REENTRANT while (vc_fd_locks[ct->ct_fd]) cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock); vc_fd_locks[ct->ct_fd] = __rpc_lock_value; #endif mutex_unlock(&clnt_fd_lock); switch (request) { case CLSET_FD_CLOSE: ct->ct_closeit = TRUE; release_fd_lock(ct->ct_fd, mask); return (TRUE); case CLSET_FD_NCLOSE: ct->ct_closeit = FALSE; release_fd_lock(ct->ct_fd, mask); return (TRUE); default: break; } /* for other requests which use info */ if (info == NULL) { release_fd_lock(ct->ct_fd, mask); return (FALSE); } switch (request) { case CLSET_TIMEOUT: if (time_not_ok((struct timeval *)(void *)info)) { release_fd_lock(ct->ct_fd, mask); return (FALSE); } ct->ct_wait = *(struct timeval *)infop; ct->ct_waitset = TRUE; break; case CLGET_TIMEOUT: *(struct timeval *)infop = ct->ct_wait; break; case CLGET_SERVER_ADDR: (void) memcpy(info, ct->ct_addr.buf, (size_t)ct->ct_addr.len); break; case CLGET_FD: *(int *)(void *)info = ct->ct_fd; break; case CLGET_SVC_ADDR: /* The caller should not free this memory area */ *(struct netbuf *)(void *)info = ct->ct_addr; break; case CLSET_SVC_ADDR: /* set to new address */ release_fd_lock(ct->ct_fd, mask); return (FALSE); case CLGET_XID: /* * use the knowledge that xid is the * first element in the call structure * This will get the xid of the PREVIOUS call */ ntohlp(info, &ct->ct_u.ct_mcalli); break; case CLSET_XID: /* This will set the xid of the NEXT call */ /* increment by 1 as clnt_vc_call() decrements once */ htonlp(&ct->ct_u.ct_mcalli, info, 1); break; case CLGET_VERS: /* * This RELIES on the information that, in the call body, * the version number field is the fifth field from the * begining of the RPC header. MUST be changed if the * call_struct is changed */ ntohlp(info, ct->ct_u.ct_mcallc + 4 * BYTES_PER_XDR_UNIT); break; case CLSET_VERS: htonlp(ct->ct_u.ct_mcallc + 4 * BYTES_PER_XDR_UNIT, info, 0); break; case CLGET_PROG: /* * This RELIES on the information that, in the call body, * the program number field is the fourth field from the * begining of the RPC header. MUST be changed if the * call_struct is changed */ ntohlp(info, ct->ct_u.ct_mcallc + 3 * BYTES_PER_XDR_UNIT); break; case CLSET_PROG: htonlp(ct->ct_u.ct_mcallc + 3 * BYTES_PER_XDR_UNIT, info, 0); break; default: release_fd_lock(ct->ct_fd, mask); return (FALSE); } release_fd_lock(ct->ct_fd, mask); return (TRUE); }
/* * Create a client handle for a connection. * Default options are set, which the user can change using clnt_control()'s. * The rpc/vc package does buffering similar to stdio, so the client * must pick send and receive buffer sizes, 0 => use the default. * NB: fd is copied into a private area. * NB: The rpch->cl_auth is set null authentication. Caller may wish to * set this something more useful. * * fd should be an open socket */ CLIENT * clnt_vc_create( int fd, const struct netbuf *raddr, rpcprog_t prog, rpcvers_t vers, u_int sendsz, u_int recvsz ) { CLIENT *h; struct ct_data *ct = NULL; struct rpc_msg call_msg; #ifdef _REENTRANT sigset_t mask; #endif sigset_t newmask; struct sockaddr_storage ss; socklen_t slen; struct __rpc_sockinfo si; _DIAGASSERT(raddr != NULL); h = mem_alloc(sizeof(*h)); if (h == NULL) { warnx("clnt_vc_create: out of memory"); rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = errno; goto fooy; } ct = mem_alloc(sizeof(*ct)); if (ct == NULL) { warnx("clnt_vc_create: out of memory"); rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = errno; goto fooy; } __clnt_sigfillset(&newmask); thr_sigsetmask(SIG_SETMASK, &newmask, &mask); #ifdef _REENTRANT mutex_lock(&clnt_fd_lock); if (vc_fd_locks == NULL) { size_t cv_allocsz, fd_allocsz; int dtbsize = __rpc_dtbsize(); fd_allocsz = dtbsize * sizeof (int); vc_fd_locks = mem_alloc(fd_allocsz); if (vc_fd_locks == NULL) { goto blooy; } else memset(vc_fd_locks, '\0', fd_allocsz); _DIAGASSERT(vc_cv == NULL); cv_allocsz = dtbsize * sizeof (cond_t); vc_cv = mem_alloc(cv_allocsz); if (vc_cv == NULL) { mem_free(vc_fd_locks, fd_allocsz); vc_fd_locks = NULL; goto blooy; } else { int i; for (i = 0; i < dtbsize; i++) cond_init(&vc_cv[i], 0, (void *) 0); } } else _DIAGASSERT(vc_cv != NULL); #endif /* * XXX - fvdl connecting while holding a mutex? */ slen = sizeof ss; if (getpeername(fd, (struct sockaddr *)(void *)&ss, &slen) < 0) { if (errno != ENOTCONN) { rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = errno; goto blooy; } if (connect(fd, (struct sockaddr *)raddr->buf, raddr->len) < 0){ rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = errno; goto blooy; } } mutex_unlock(&clnt_fd_lock); thr_sigsetmask(SIG_SETMASK, &(mask), NULL); if (!__rpc_fd2sockinfo(fd, &si)) goto fooy; ct->ct_closeit = FALSE; /* * Set up private data struct */ ct->ct_fd = fd; ct->ct_wait.tv_usec = 0; ct->ct_waitset = FALSE; ct->ct_addr.buf = malloc((size_t)raddr->maxlen); if (ct->ct_addr.buf == NULL) goto fooy; memcpy(ct->ct_addr.buf, raddr->buf, (size_t)raddr->len); ct->ct_addr.len = raddr->len; ct->ct_addr.maxlen = raddr->maxlen; /* * Initialize call message */ call_msg.rm_xid = __RPC_GETXID(); call_msg.rm_direction = CALL; call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION; call_msg.rm_call.cb_prog = (u_int32_t)prog; call_msg.rm_call.cb_vers = (u_int32_t)vers; /* * pre-serialize the static part of the call msg and stash it away */ xdrmem_create(&(ct->ct_xdrs), ct->ct_u.ct_mcallc, MCALL_MSG_SIZE, XDR_ENCODE); if (! xdr_callhdr(&(ct->ct_xdrs), &call_msg)) { if (ct->ct_closeit) { (void)close(fd); } goto fooy; } ct->ct_mpos = XDR_GETPOS(&(ct->ct_xdrs)); XDR_DESTROY(&(ct->ct_xdrs)); /* * Create a client handle which uses xdrrec for serialization * and authnone for authentication. */ h->cl_ops = clnt_vc_ops(); h->cl_private = ct; h->cl_auth = authnone_create(); sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz); recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz); xdrrec_create(&(ct->ct_xdrs), sendsz, recvsz, h->cl_private, read_vc, write_vc); return (h); blooy: mutex_unlock(&clnt_fd_lock); thr_sigsetmask(SIG_SETMASK, &(mask), NULL); fooy: /* * Something goofed, free stuff and barf */ if (ct) mem_free(ct, sizeof(struct ct_data)); if (h) mem_free(h, sizeof(CLIENT)); return (NULL); }
int Si47xx_dev_init(struct Si47xx_device_t *si47xx_dev) { int ret = 0; Si47xx_dev = si47xx_dev; Si47xx_dev->client = si47xx_dev->client; pSi47xxdata = si47xx_dev->pdata; si47xx_irq = Si47xx_dev->client->irq; debug("Si47xx_dev_init called"); mutex_lock(&Si47xx_dev->lock); Si47xx_dev->state.power_state = RADIO_POWERDOWN; Si47xx_dev->state.seek_state = RADIO_SEEK_OFF; Si47xx_dev->valid_client_state = eTRUE; Si47xx_dev->valid = eFALSE; #ifdef RDS_INTERRUPT_ON_ALWAYS /*Creating Circular Buffer */ /*Single RDS_Block_Data buffer size is 4x16 bits */ RDS_Block_Data_buffer = kzalloc(RDS_BUFFER_LENGTH * 8, GFP_KERNEL); if (!RDS_Block_Data_buffer) { dev_err(Si47xx_dev->dev, "Not sufficient memory for creating " "RDS_Block_Data_buffer"); ret = -ENOMEM; goto EXIT; } /*Single RDS_Block_Error buffer size is 4x8 bits */ RDS_Block_Error_buffer = kzalloc(RDS_BUFFER_LENGTH * 4, GFP_KERNEL); if (!RDS_Block_Error_buffer) { dev_err(Si47xx_dev->dev, "Not sufficient memory for creating " "RDS_Block_Error_buffer"); ret = -ENOMEM; kfree(RDS_Block_Data_buffer); goto EXIT; } /*Initialising read and write indices */ RDS_Buffer_Index_read = 0; RDS_Buffer_Index_write = 0; /*Creating work-queue */ Si47xx_wq = create_singlethread_workqueue("Si47xx_wq"); if (!Si47xx_wq) { dev_err(Si47xx_dev->dev, "Not sufficient memory for Si47xx_wq, work-queue"); ret = -ENOMEM; kfree(RDS_Block_Error_buffer); kfree(RDS_Block_Data_buffer); goto EXIT; } /*Initialising work_queue */ INIT_WORK(&Si47xx_work, Si47xx_work_func); RDS_Data_Available = 0; RDS_Data_Lost = 0; RDS_Groups_Available_till_now = 0; EXIT: #endif mutex_unlock(&(Si47xx_dev->lock)); debug("Si47xx_dev_init call over"); return ret; }
void Si47xx_work_func(struct work_struct *work) { struct radio_data_t rds_data; int i = 0; u8 RdsFifoUsed; #ifdef RDS_TESTING u8 group_type; #endif debug_rds("%s", __func__); mutex_lock(&(Si47xx_dev->lock)); if (Si47xx_dev->valid == eFALSE) { dev_err(Si47xx_dev->dev, "Si47xx_dev_RDS_data_get called when DS is invalid"); return; } if (RDS_Data_Lost > 1) debug_rds("No_of_RDS_groups_Lost till now : %d", RDS_Data_Lost); fmRdsStatus(1, 0, &rds_data, &RdsFifoUsed); /* RDSR bit and RDS Block data, so reading the RDS registers */ do { /* Writing into RDS_Block_Data_buffer */ i = 0; RDS_Block_Data_buffer[i++ + 4 * RDS_Buffer_Index_write] = rds_data.rdsa; RDS_Block_Data_buffer[i++ + 4 * RDS_Buffer_Index_write] = rds_data.rdsb; RDS_Block_Data_buffer[i++ + 4 * RDS_Buffer_Index_write] = rds_data.rdsc; RDS_Block_Data_buffer[i++ + 4 * RDS_Buffer_Index_write] = rds_data.rdsd; /*Writing into RDS_Block_Error_buffer */ i = 0; RDS_Block_Error_buffer[i++ + 4 * RDS_Buffer_Index_write] = rds_data.blera; RDS_Block_Error_buffer[i++ + 4 * RDS_Buffer_Index_write] = rds_data.blerb; RDS_Block_Error_buffer[i++ + 4 * RDS_Buffer_Index_write] = rds_data.blerc; RDS_Block_Error_buffer[i++ + 4 * RDS_Buffer_Index_write] = rds_data.blerd; fmRdsStatus(1, 0, &rds_data, &RdsFifoUsed); } while (RdsFifoUsed != 0); #ifdef RDS_TESTING if (RDS_Block_Error_buffer [0 + 4 * RDS_Buffer_Index_write] < 3) { debug_rds("PI Code is %d", RDS_Block_Data_buffer[0 + 4 * RDS_Buffer_Index_write]); } if (RDS_Block_Error_buffer [1 + 4 * RDS_Buffer_Index_write] < 2) { group_type = RDS_Block_Data_buffer[1 + 4 * RDS_Buffer_Index_write] >> 11; if (group_type & 0x01) { debug_rds("PI Code is %d", RDS_Block_Data_buffer[2 + 4 * RDS_Buffer_Index_write]); } if (group_type == GROUP_TYPE_2A || group_type == GROUP_TYPE_2B) { if (RDS_Block_Error_buffer [2 + 4 * RDS_Buffer_Index_write] < 3) { debug_rds("Update RT with RDSC"); } else { debug_rds("RDS_Block_Error_buffer" " of Block C is greater than 3"); } } }
/** * regcache_mark_dirty: Mark the register cache as dirty * * @map: map to mark * * Mark the register cache as dirty, for example due to the device * having been powered down for suspend. If the cache is not marked * as dirty then the cache sync will be suppressed. */ void regcache_mark_dirty(struct regmap *map) { mutex_lock(&map->lock); map->cache_dirty = true; mutex_unlock(&map->lock); }
void ncpdgram_rcv_proc(struct work_struct *work) { struct ncp_server *server = container_of(work, struct ncp_server, rcv.tq); struct socket* sock; sock = server->ncp_sock; while (1) { struct ncp_reply_header reply; int result; result = _recv(sock, &reply, sizeof(reply), MSG_PEEK | MSG_DONTWAIT); if (result < 0) { break; } if (result >= sizeof(reply)) { struct ncp_request_reply *req; if (reply.type == NCP_WATCHDOG) { unsigned char buf[10]; if (server->connection != get_conn_number(&reply)) { goto drop; } result = _recv(sock, buf, sizeof(buf), MSG_DONTWAIT); if (result < 0) { DPRINTK("recv failed with %d\n", result); continue; } if (result < 10) { DPRINTK("too short (%u) watchdog packet\n", result); continue; } if (buf[9] != '?') { DPRINTK("bad signature (%02X) in watchdog packet\n", buf[9]); continue; } buf[9] = 'Y'; _send(sock, buf, sizeof(buf)); continue; } if (reply.type != NCP_POSITIVE_ACK && reply.type != NCP_REPLY) { result = _recv(sock, server->unexpected_packet.data, sizeof(server->unexpected_packet.data), MSG_DONTWAIT); if (result < 0) { continue; } info_server(server, 0, server->unexpected_packet.data, result); continue; } mutex_lock(&server->rcv.creq_mutex); req = server->rcv.creq; if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence && server->connection == get_conn_number(&reply)))) { if (reply.type == NCP_POSITIVE_ACK) { server->timeout_retries = server->m.retry_count; server->timeout_last = NCP_MAX_RPC_TIMEOUT; mod_timer(&server->timeout_tm, jiffies + NCP_MAX_RPC_TIMEOUT); } else if (reply.type == NCP_REPLY) { result = _recv(sock, server->rxbuf, req->datalen, MSG_DONTWAIT); #ifdef CONFIG_NCPFS_PACKET_SIGNING if (result >= 0 && server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) { if (result < 8 + 8) { result = -EIO; } else { unsigned int hdrl; result -= 8; hdrl = sock->sk->sk_family == AF_INET ? 8 : 6; if (sign_verify_reply(server, server->rxbuf + hdrl, result - hdrl, cpu_to_le32(result), server->rxbuf + result)) { printk(KERN_INFO "ncpfs: Signature violation\n"); result = -EIO; } } } #endif del_timer(&server->timeout_tm); server->rcv.creq = NULL; ncp_finish_request(server, req, result); __ncp_next_request(server); mutex_unlock(&server->rcv.creq_mutex); continue; } } mutex_unlock(&server->rcv.creq_mutex); } drop:; _recv(sock, &reply, sizeof(reply), MSG_DONTWAIT); } }
void D3DDisplayWindowProvider::create(DisplayWindowSite *new_site, const DisplayWindowDescription &description) { site = new_site; if (device) D3DShareList::device_destroyed(device); info_queue.clear(); debug.clear(); back_buffer_rtv.clear(); fake_front_buffer.clear(); back_buffer.clear(); swap_chain.clear(); device_context.clear(); device.clear(); window.create(site, description); use_fake_front_buffer = description.is_update_supported(); D3D_FEATURE_LEVEL request_levels[] = { D3D_FEATURE_LEVEL_11_0, D3D_FEATURE_LEVEL_10_1, D3D_FEATURE_LEVEL_10_0 }; DXGI_SWAP_CHAIN_DESC swap_chain_description; swap_chain_description.BufferCount = description.get_flipping_buffers(); swap_chain_description.BufferDesc.Width = 0; swap_chain_description.BufferDesc.Height = 0; swap_chain_description.BufferDesc.RefreshRate.Numerator = 60; swap_chain_description.BufferDesc.RefreshRate.Denominator = 1; swap_chain_description.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM; // DXGI_FORMAT_R8G8B8A8_UNORM_SRGB; swap_chain_description.BufferDesc.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED; swap_chain_description.BufferDesc.Scaling = DXGI_MODE_SCALING_UNSPECIFIED; swap_chain_description.SampleDesc.Count = 1; swap_chain_description.SampleDesc.Quality = 0; swap_chain_description.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT; swap_chain_description.OutputWindow = window.get_hwnd(); swap_chain_description.Windowed = TRUE; // Seems the documentation wants us to call IDXGISwapChain::SetFullscreenState afterwards swap_chain_description.SwapEffect = DXGI_SWAP_EFFECT_DISCARD; swap_chain_description.Flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH; bool debug_mode = false; // To do: fetch this from DisplayWindowDescription using same method as clanGL (or maybe promote debug flag to clanDisplay?) UINT device_flags = 0; if (debug_mode) device_flags |= D3D11_CREATE_DEVICE_DEBUG; std::unique_lock<std::recursive_mutex> mutex_lock(d3d11_mutex); if (d3d11_dll == 0) { d3d11_dll = LoadLibrary(L"d3d11.dll"); if (d3d11_dll == 0) throw Exception("Unable to load d3d11.dll"); try { d3d11_createdeviceandswapchain = reinterpret_cast<FuncD3D11CreateDeviceAndSwapChain>(GetProcAddress(d3d11_dll, "D3D11CreateDeviceAndSwapChain")); if (d3d11_createdeviceandswapchain == 0) throw Exception("D3D11CreateDeviceAndSwapChain function not found!"); } catch (...) { CloseHandle(d3d11_dll); d3d11_dll = 0; d3d11_createdeviceandswapchain = 0; throw; } } HRESULT result = d3d11_createdeviceandswapchain( 0, D3D_DRIVER_TYPE_HARDWARE, 0, device_flags, request_levels, 3, D3D11_SDK_VERSION, &swap_chain_description, swap_chain.output_variable(), device.output_variable(), &feature_level, device_context.output_variable()); D3DTarget::throw_if_failed("D3D11CreateDeviceAndSwapChain failed", result); if (debug_mode) { result = device->QueryInterface(__uuidof(ID3D11Debug), (void**)debug.output_variable()); if (FAILED(result)) debug.clear(); // No debug info available. Should this throw an exception instead? result = device->QueryInterface(__uuidof(ID3D11InfoQueue), (void**)info_queue.output_variable()); if (FAILED(result)) info_queue.clear(); // No debug messages available. } // Disable mouse lag (no, 3 frames rendered ahead is NOT a good default Microsoft): ComPtr<IDXGIDevice1> dxgi_device; result = swap_chain->GetDevice(__uuidof(IDXGIDevice1), (void**)dxgi_device.output_variable()); D3DTarget::throw_if_failed("Unable to retrieve IDXGIDevice1 from swap chain", result); dxgi_device->SetMaximumFrameLatency(1); create_swap_chain_buffers(); // Prevent DXGI from responding to an alt-enter sequence. ComPtr<IDXGIAdapter> dxgi_adapter; result = dxgi_device->GetParent(__uuidof(IDXGIAdapter), (void **)dxgi_adapter.output_variable()); if (SUCCEEDED(result)) { ComPtr<IDXGIFactory> dxgi_factory; result = dxgi_adapter->GetParent(__uuidof(IDXGIFactory), (void **)dxgi_factory.output_variable()); if (SUCCEEDED(result)) { dxgi_factory->MakeWindowAssociation(window.get_hwnd(), DXGI_MWA_NO_ALT_ENTER); } } gc = GraphicContext(new D3DGraphicContextProvider(this, description)); if (description.is_fullscreen()) swap_chain->SetFullscreenState(TRUE, 0); D3DGraphicContextProvider *d3d_gc = static_cast<D3DGraphicContextProvider*>(gc.get_provider()); d3d_gc->standard_programs = StandardPrograms(gc); }
static int crystalcove_chgr_usb_set_property(struct power_supply *psy, enum power_supply_property psp, const union power_supply_propval *val) { struct chgr_info *info = container_of(psy, struct chgr_info, psy_usb); int ret = 0; mutex_lock(&info->lock); switch (psp) { case POWER_SUPPLY_PROP_PRESENT: info->present = val->intval; break; case POWER_SUPPLY_PROP_ONLINE: info->online = val->intval; break; case POWER_SUPPLY_PROP_ENABLE_CHARGING: ret = crystalcove_chgr_enable_charging(info, val->intval); if (ret < 0) dev_warn(&info->pdev->dev, "crystalcove enable charging failed\n"); info->is_charging_enabled = val->intval; break; case POWER_SUPPLY_PROP_ENABLE_CHARGER: ret = crystalcove_chgr_enable_charging(info, val->intval); if (ret < 0) dev_warn(&info->pdev->dev, "crystalcove enable charger failed\n"); info->is_charger_enabled = val->intval; break; case POWER_SUPPLY_PROP_CHARGE_CURRENT: info->cc = val->intval; break; case POWER_SUPPLY_PROP_INLMT: info->inlmt = val->intval; break; case POWER_SUPPLY_PROP_CHARGE_VOLTAGE: info->cv = val->intval; break; case POWER_SUPPLY_PROP_MAX_CHARGE_CURRENT: info->max_cc = val->intval; break; case POWER_SUPPLY_PROP_MAX_CHARGE_VOLTAGE: info->max_cv = val->intval; break; case POWER_SUPPLY_PROP_CHARGE_TERM_CUR: info->iterm = val->intval; break; case POWER_SUPPLY_PROP_CABLE_TYPE: info->cable_type = val->intval; info->psy_usb.type = get_power_supply_type(info->cable_type); break; case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT: info->cntl_state = val->intval; break; case POWER_SUPPLY_PROP_MAX_TEMP: info->max_temp = val->intval; break; case POWER_SUPPLY_PROP_MIN_TEMP: info->min_temp = val->intval; break; default: ret = -EINVAL; } mutex_unlock(&info->lock); return ret; }
void wcd9xxx_nested_irq_lock(struct wcd9xxx_core_resource *wcd9xxx_res) { mutex_lock(&wcd9xxx_res->nested_irq_lock); }
static int crystalcove_chgr_usb_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct chgr_info *info = container_of(psy, struct chgr_info, psy_usb); int ret = 0; mutex_lock(&info->lock); switch (psp) { case POWER_SUPPLY_PROP_PRESENT: ret = crystal_cove_vbus_on_status(); if (ret < 0) goto psy_get_prop_fail; val->intval = ret; break; case POWER_SUPPLY_PROP_ONLINE: val->intval = info->online; break; case POWER_SUPPLY_PROP_HEALTH: val->intval = get_charger_health(info); break; case POWER_SUPPLY_PROP_MAX_CHARGE_CURRENT: val->intval = info->max_cc; break; case POWER_SUPPLY_PROP_MAX_CHARGE_VOLTAGE: val->intval = info->max_cv; break; case POWER_SUPPLY_PROP_CHARGE_CURRENT: val->intval = info->cc; break; case POWER_SUPPLY_PROP_CHARGE_VOLTAGE: val->intval = info->cv; break; case POWER_SUPPLY_PROP_INLMT: val->intval = info->inlmt; break; case POWER_SUPPLY_PROP_CHARGE_TERM_CUR: val->intval = info->iterm; break; case POWER_SUPPLY_PROP_CABLE_TYPE: val->intval = info->cable_type; break; case POWER_SUPPLY_PROP_ENABLE_CHARGING: val->intval = info->is_charging_enabled; break; case POWER_SUPPLY_PROP_ENABLE_CHARGER: val->intval = info->is_charger_enabled; break; case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT: val->intval = info->cntl_state; break; case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX: val->intval = info->pdata->num_throttle_states; break; case POWER_SUPPLY_PROP_MAX_TEMP: val->intval = info->max_temp; break; case POWER_SUPPLY_PROP_MIN_TEMP: val->intval = info->min_temp; break; default: mutex_unlock(&info->lock); return -EINVAL; } psy_get_prop_fail: mutex_unlock(&info->lock); return ret; }
void omapdss_unregister_display(struct omap_dss_device *dssdev) { mutex_lock(&panel_list_mutex); list_del(&dssdev->panel_list); mutex_unlock(&panel_list_mutex); }
static void scsi_disk_lock(struct block_dev *bdev) { struct scsi_dev *sdev = bdev->privdata; scsi_dev_use_inc(sdev); mutex_lock(&sdev->m); }
void uvc_free_buffers(struct uvc_video_queue *queue) { mutex_lock(&queue->mutex); vb2_queue_release(&queue->queue); mutex_unlock(&queue->mutex); }
void unbounded_buffer_done(unbounded_buffer_t *b) { mutex_lock(&b->worker_mutex); b->workers--; mutex_unlock(&b->worker_mutex); }
static ssize_t felica_read(struct file *fp, char *buf, size_t count, loff_t *pos) { int rc = 0; int readcount = 0; #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA] felica_read - start \n"); #endif /* Check input parameters */ if(NULL == fp) { FELICA_DEBUG_MSG("[FELICA] ERROR - fp \n"); return -1; } if(NULL == buf) { FELICA_DEBUG_MSG("[FELICA] ERROR - buf \n"); return -1; } if(count > RECEIVE_BUFFER_MAX_SIZE) { FELICA_DEBUG_MSG("[FELICA] ERROR - count \n"); return -1; } if(NULL == pos) { FELICA_DEBUG_MSG("[FELICA] ERROR - pos \n"); return -1; } memset(receive_buf, 0, sizeof(receive_buf)); /* Test log { int remainingcount = 0; int rcTest = 0; rcTest = felica_uart_ioctrl(&remainingcount); FELICA_DEBUG_MSG("[FELICA] before remainedcount : %d \n",remainingcount); if (rcTest) { FELICA_DEBUG_MSG("[FELICA] ERROR - felica_uart_ioctrl \n"); return rcTest; } if(4095 == remainedcount) { int i = 0; int testreadcount = 0; char *ptr = NULL; testreadcount = felica_uart_read(receive_buf,264); ptr = receive_buf; if(NULL != ptr) { FELICA_DEBUG_MSG("===== TEST READ DATA =====\n"); for(i=0; i<testreadcount; i++) { FELICA_DEBUG_MSG(" %02x", *ptr++); if(0 == (i+1)%10) { FELICA_DEBUG_MSG("\n"); } } FELICA_DEBUG_MSG("\n"); } } }*/ /* Copy UART receive data to receive buffer */ mutex_lock(&felica_mutex); readcount = felica_uart_read(receive_buf,count); mutex_unlock(&felica_mutex); if(0 >= readcount) { FELICA_DEBUG_MSG("[FELICA] ERROR - No data in data buffer \n"); return 0; } //mdelay(5); /* Test log { int remainingcount = 0; int rcTest = 0; rcTest = felica_uart_ioctrl(&remainingcount); FELICA_DEBUG_MSG("[FELICA] remaining count : %d \n",remainingcount); if (rcTest) { FELICA_DEBUG_MSG("[FELICA] ERROR - felica_uart_ioctrl \n"); return rcTest; } } */ /* Display low data for debugging */ #ifdef RXTX_LOG_ENABLE { int i = 0; char *ptr = NULL; ptr = receive_buf; if(NULL != ptr) { FELICA_DEBUG_MSG("===== READ FELICA LOW DATA =====\n"); for(i=0; i<count; i++) { FELICA_DEBUG_MSG(" %02x", *ptr++); if(0 == (i+1)%10) { FELICA_DEBUG_MSG("\n"); } } FELICA_DEBUG_MSG("\n"); } } #endif /* Copy receive buffer to user memory */ rc = copy_to_user(buf, receive_buf, count); if (rc) { FELICA_DEBUG_MSG("[FELICA] ERROR - copy_to_user \n"); return rc; } #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA] felica_read - end \n"); #endif #ifdef FELICA_FN_DEVICE_TEST if(result_read_uart != -1) result_read_uart = readcount; #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA] felica_read - result_read_uart(%d) \n",result_read_uart); #endif return result_read_uart; #else return readcount; #endif }
int crawl(char *start_url, int download_workers, int parse_workers, int queue_size, char *(*_fetch_fn)(char *url), void (*_edge_fn)(char *from, char *to)) { int i; bounded_buffer_t url_queue; unbounded_buffer_t page_queue; hashset_t url_set; bounded_buffer_init(&url_queue, queue_size); unbounded_buffer_init(&page_queue); hashset_init(&url_set, HASHSET_BUCKETS); bounded_buffer_put(&url_queue, (void *)str_duplicate(start_url)); mutex_t done_mutex; cond_t done_cond; mutex_init(&done_mutex); cond_init(&done_cond); struct input_args in_args; in_args.url_queue = &url_queue; in_args.page_queue = &page_queue; in_args.url_set = &url_set; in_args.fetch = _fetch_fn; in_args.edge = _edge_fn; in_args.done_mutex = &done_mutex; in_args.done_cond = &done_cond; thread_t downloaders[download_workers]; thread_t parsers[parse_workers]; for (i = 0; i < download_workers; i++) thread_create(&downloaders[i], downloader, (void *)&in_args); for (i = 0; i < parse_workers; i++) thread_create(&parsers[i], parser, (void *)&in_args); while (1) { mutex_lock(&done_mutex); mutex_lock(&url_queue.mutex); mutex_lock(&url_queue.worker_mutex); mutex_lock(&page_queue.mutex); mutex_lock(&page_queue.worker_mutex); if (url_queue.count == 0 && url_queue.workers == 0 && page_queue.count == 0 && page_queue.workers == 0) { url_queue.done = 1; page_queue.done = 1; cond_broadcast(&url_queue.empty); cond_broadcast(&url_queue.fill); cond_broadcast(&page_queue.fill); mutex_unlock(&url_queue.mutex); mutex_unlock(&url_queue.worker_mutex); mutex_unlock(&page_queue.mutex); mutex_unlock(&page_queue.worker_mutex); mutex_unlock(&done_mutex); break; } else { mutex_unlock(&url_queue.mutex); mutex_unlock(&url_queue.worker_mutex); mutex_unlock(&page_queue.mutex); mutex_unlock(&page_queue.worker_mutex); cond_wait(&done_cond, &done_mutex); mutex_unlock(&done_mutex); } } for (i = 0; i < download_workers; i++) thread_join(downloaders[i], NULL); for (i = 0; i < parse_workers; i++) thread_join(parsers[i], NULL); bounded_buffer_destroy(&url_queue); unbounded_buffer_destroy(&page_queue); hashset_destroy(&url_set); return 0; }
/* * Description : MFC calls this function using available method(int available()) of FileOutputStream class * Input : None * Output : Return the number of byte that can be read. */ static long felica_ioctl (struct file *fp, unsigned int cmd, unsigned long arg) { int numofreceiveddata = 0; int rc = 0; int *uarg = (int *)arg; #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA] felica_ioctl - start \n"); #endif /* Check input parameters */ if(NULL == fp) { FELICA_DEBUG_MSG("[FELICA] ERROR - fp \n"); return -1; } if(IOCTL_FELICA_MAGIC != _IOC_TYPE(cmd)) { FELICA_DEBUG_MSG("[FELICA] ERROR - IO cmd type \n"); return -1; } if(IOCTL_FELICA_CMD_AVAILABLE != _IOC_NR(cmd)) { FELICA_DEBUG_MSG("[FELICA] ERROR - IO cmd number \n"); return -1; } if(0 != _IOC_SIZE(cmd)) { FELICA_DEBUG_MSG("[FELICA] ERROR - IO cmd size \n"); return -1; } mutex_lock(&felica_mutex); rc = felica_uart_ioctrl(&numofreceiveddata); mutex_unlock(&felica_mutex); if (rc) { FELICA_DEBUG_MSG("[FELICA] ERROR - felica_uart_ioctrl \n"); return rc; } //mdelay(20); rc = copy_to_user(uarg, &numofreceiveddata, sizeof(int)); if(rc) { FELICA_DEBUG_MSG("[FELICA] ERROR - open_hs_uart \n"); return rc; } #ifdef FEATURE_DEBUG_LOW FELICA_DEBUG_MSG("[FELICA] felica_ioctl - end \n"); #endif #ifdef FELICA_FN_DEVICE_TEST return result_available_uart; #else return rc; #endif }
static long ssp_temphumidity_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct ssp_data *data = container_of(file->private_data, struct ssp_data, shtc1_device); void __user *argp = (void __user *)arg; int retries = 2; int length; int ret = 0; if (data->bulk_buffer == NULL){ pr_err("[SSP] %s, buffer is null\n", __func__); return -EINVAL; } length = data->bulk_buffer->len; mutex_lock(&data->bulk_temp_read_lock); switch (cmd) { case IOCTL_READ_COMPLETE: /* free */ if(data->bulk_buffer) { kfree(data->bulk_buffer); data->bulk_buffer = NULL; } length = 1; break; case IOCTL_READ_ADC_BATT_DATA: while (retries--) { ret = copy_to_user(argp, data->bulk_buffer->batt, data->bulk_buffer->len*2); if (likely(!ret)) break; } if (unlikely(ret)) { pr_err("[SSP] read bluk adc1 data err(%d)", ret); goto ioctl_error; } break; case IOCTL_READ_ADC_CHG_DATA: while (retries--) { ret = copy_to_user(argp, data->bulk_buffer->chg, data->bulk_buffer->len*2); if (likely(!ret)) break; } if (unlikely(ret)) { pr_err("[SSP] read bluk adc1 data err(%d)", ret); goto ioctl_error; } break; case IOCTL_READ_THM_SHTC1_DATA: while (retries--) { ret = copy_to_user(argp, data->bulk_buffer->temp, data->bulk_buffer->len*2); if (likely(!ret)) break; } if (unlikely(ret)) { pr_err("[SSP] read bluk adc1 data err(%d)", ret); goto ioctl_error; } break; case IOCTL_READ_HUM_SHTC1_DATA: while (retries--) { ret = copy_to_user(argp, data->bulk_buffer->humidity, data->bulk_buffer->len*2); if (likely(!ret)) break; } if (unlikely(ret)) { pr_err("[SSP] read bluk adc1 data err(%d)", ret); goto ioctl_error; } break; case IOCTL_READ_THM_BARO_DATA: while (retries--) { ret = copy_to_user(argp, data->bulk_buffer->baro, data->bulk_buffer->len*2); if (likely(!ret)) break; } if (unlikely(ret)) { pr_err("[SSP] read bluk adc1 data err(%d)", ret); goto ioctl_error; } break; case IOCTL_READ_THM_GYRO_DATA: while (retries--) { ret = copy_to_user(argp, data->bulk_buffer->gyro, data->bulk_buffer->len*2); if (likely(!ret)) break; } if (unlikely(ret)) { pr_err("[SSP] read bluk adc1 data err(%d)", ret); goto ioctl_error; } break; default: pr_err("[SSP] temp ioctl cmd err(%d)", cmd); ret = EINVAL; goto ioctl_error; } mutex_unlock(&data->bulk_temp_read_lock); return length; ioctl_error: mutex_unlock(&data->bulk_temp_read_lock); return -ret; }
/* Init sequence taken from the Adafruit SSD1306 Arduino library */ static int init_display(struct fbtft_par *par) { fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__); par->fbtftops.reset(par); if (par->gamma.curves[0] == 0) { mutex_lock(&par->gamma.lock); if (par->info->var.yres == 64) par->gamma.curves[0] = 0xCF; else par->gamma.curves[0] = 0x8F; mutex_unlock(&par->gamma.lock); } /* Set Display OFF */ write_reg(par, 0xAE); /* Set Display Clock Divide Ratio/ Oscillator Frequency */ write_reg(par, 0xD5); write_reg(par, 0x80); /* Set Multiplex Ratio */ write_reg(par, 0xA8); if (par->info->var.yres == 64) write_reg(par, 0x3F); else write_reg(par, 0x1F); /* Set Display Offset */ write_reg(par, 0xD3); write_reg(par, 0x0); /* Set Display Start Line */ write_reg(par, 0x40 | 0x0); /* Charge Pump Setting */ write_reg(par, 0x8D); /* A[2] = 1b, Enable charge pump during display on */ write_reg(par, 0x14); /* Set Memory Addressing Mode */ write_reg(par, 0x20); /* Vertical addressing mode */ write_reg(par, 0x01); /*Set Segment Re-map */ /* column address 127 is mapped to SEG0 */ write_reg(par, 0xA0 | 0x1); /* Set COM Output Scan Direction */ /* remapped mode. Scan from COM[N-1] to COM0 */ write_reg(par, 0xC8); /* Set COM Pins Hardware Configuration */ write_reg(par, 0xDA); if (par->info->var.yres == 64) /* A[4]=1b, Alternative COM pin configuration */ write_reg(par, 0x12); else /* A[4]=0b, Sequential COM pin configuration */ write_reg(par, 0x02); /* Set Pre-charge Period */ write_reg(par, 0xD9); write_reg(par, 0xF1); /* Set VCOMH Deselect Level */ write_reg(par, 0xDB); /* according to the datasheet, this value is out of bounds */ write_reg(par, 0x40); /* Entire Display ON */ /* Resume to RAM content display. Output follows RAM content */ write_reg(par, 0xA4); /* Set Normal Display 0 in RAM: OFF in display panel 1 in RAM: ON in display panel */ write_reg(par, 0xA6); /* Set Display ON */ write_reg(par, 0xAF); return 0; }
static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg) { struct snd_compr_file *data = f->private_data; struct snd_compr_stream *stream; int retval = -ENOTTY; if (snd_BUG_ON(!data)) return -EFAULT; stream = &data->stream; mutex_lock(&stream->device->lock); switch (_IOC_NR(cmd)) { case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION): retval = put_user(SNDRV_COMPRESS_VERSION, (int __user *)arg) ? -EFAULT : 0; break; case _IOC_NR(SNDRV_COMPRESS_GET_CAPS): retval = snd_compr_get_caps(stream, arg); break; #ifndef COMPR_CODEC_CAPS_OVERFLOW case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS): retval = snd_compr_get_codec_caps(stream, arg); break; #endif case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS): retval = snd_compr_set_params(stream, arg); break; case _IOC_NR(SNDRV_COMPRESS_GET_PARAMS): retval = snd_compr_get_params(stream, arg); break; case _IOC_NR(SNDRV_COMPRESS_SET_METADATA): retval = snd_compr_set_metadata(stream, arg); break; case _IOC_NR(SNDRV_COMPRESS_GET_METADATA): retval = snd_compr_get_metadata(stream, arg); break; case _IOC_NR(SNDRV_COMPRESS_TSTAMP): retval = snd_compr_tstamp(stream, arg); break; case _IOC_NR(SNDRV_COMPRESS_AVAIL): retval = snd_compr_ioctl_avail(stream, arg); break; case _IOC_NR(SNDRV_COMPRESS_PAUSE): retval = snd_compr_pause(stream); break; case _IOC_NR(SNDRV_COMPRESS_RESUME): retval = snd_compr_resume(stream); break; case _IOC_NR(SNDRV_COMPRESS_START): retval = snd_compr_start(stream); break; case _IOC_NR(SNDRV_COMPRESS_STOP): retval = snd_compr_stop(stream); break; case _IOC_NR(SNDRV_COMPRESS_DRAIN): retval = snd_compr_drain(stream); break; case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN): retval = snd_compr_partial_drain(stream); break; case _IOC_NR(SNDRV_COMPRESS_NEXT_TRACK): retval = snd_compr_next_track(stream); break; } mutex_unlock(&stream->device->lock); return retval; }