Example #1
0
ret_t
cherokee_buffer_ensure_size (cherokee_buffer_t *buf, size_t size)
{
	char *pbuf;

	/* Maybe it doesn't need it
	 * if buf->size == 0 and size == 0 then buf can be NULL.
	 */
	if (size <= buf->size)
		return ret_ok;

	/* If it is a new buffer, take memory and return
	 */
	if (buf->buf == NULL) {
		buf->buf = (char *) malloc (size);
		if (unlikely (buf->buf == NULL))
			return ret_nomem;
		buf->size = size;
		return ret_ok;
	}

	/* It already has memory, but it needs more..
	 */
	pbuf = (char *) realloc(buf->buf, size);
	if (unlikely (pbuf == NULL)) {
		return ret_nomem;
	}

	buf->buf = pbuf;
	buf->size = size;

	return ret_ok;
}
Example #2
0
bool zmq::pipe_t::read (msg_t *msg_)
{
    if (unlikely (!in_active))
        return false;
    if (unlikely (state != active && state != waiting_for_delimiter))
        return false;

    if (!inpipe->read (msg_)) {
        in_active = false;
        return false;
    }

    //  If delimiter was read, start termination process of the pipe.
    if (msg_->is_delimiter ()) {
        process_delimiter ();
        return false;
    }

    if (!(msg_->flags () & msg_t::more))
        msgs_read++;

    if (lwm > 0 && msgs_read % lwm == 0)
        send_activate_write (peer, msgs_read);

    return true;
}
Example #3
0
int zmq::msg_t::copy (msg_t &src_)
{
    //  Check the validity of the source.
    if (unlikely (!src_.check ())) {
        errno = EFAULT;
        return -1;
    }

    int rc = close ();
    if (unlikely (rc < 0))
        return rc;

    if (src_.u.base.type == type_lmsg) {

        //  One reference is added to shared messages. Non-shared messages
        //  are turned into shared messages and reference count is set to 2.
        if (src_.u.lmsg.flags & msg_t::shared)
            src_.u.lmsg.content->refcnt.add (1);
        else {
            src_.u.lmsg.flags |= msg_t::shared;
            src_.u.lmsg.content->refcnt.set (2);
        }
    }

    *this = src_;

    return 0;

}
Example #4
0
static inline void tc_class_free(struct tc_device *n, struct tc_class *c) {
    if(c == n->classes) {
        if(likely(c->next))
            n->classes = c->next;
        else
            n->classes = c->prev;
    }

    if(c == n->last_class) {
        if(unlikely(c->next))
            n->last_class = c->next;
        else
            n->last_class = c->prev;
    }

    if(c->next) c->next->prev = c->prev;
    if(c->prev) c->prev->next = c->next;

    debug(D_TC_LOOP, "Removing from device '%s' class '%s', parentid '%s', leafid '%s', unused=%d", n->id, c->id, c->parentid?c->parentid:"", c->leafid?c->leafid:"", c->unupdated);

    if(unlikely(tc_class_index_del(n, c) != c))
        error("plugin_tc: INTERNAL ERROR: attempt remove class '%s' from device '%s': removed a different calls", c->id, n->id);

    freez(c->id);
    freez(c->name);
    freez(c->leafid);
    freez(c->parentid);
    freez(c);
}
Example #5
0
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{
	struct acpi_cpufreq_data *data;
	struct cpufreq_policy *policy;
	unsigned int freq;
	unsigned int cached_freq;

	pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);

	policy = cpufreq_cpu_get_raw(cpu);
	if (unlikely(!policy))
		return 0;

	data = policy->driver_data;
	if (unlikely(!data || !data->freq_table))
		return 0;

	cached_freq = data->freq_table[to_perf_data(data)->state].frequency;
	freq = extract_freq(get_cur_val(cpumask_of(cpu), data), data);
	if (freq != cached_freq) {
		/*
		 * The dreaded BIOS frequency change behind our back.
		 * Force set the frequency on next target call.
		 */
		data->resume = 1;
	}

	pr_debug("cur freq = %u\n", freq);

	return freq;
}
Example #6
0
File: lmem.c Project: guodawei/lua
void *luaM_growaux_ (lua_State *L, void *block, int nelems, int *psize,
                     int size_elems, int limit, const char *what) {
  void *newblock;
  int size = *psize;
  if (nelems + 1 <= size)  /* does one extra element still fit? */
    return block;  /* nothing to be done */
  if (size >= limit / 2) {  /* cannot double it? */
    if (unlikely(size >= limit))  /* cannot grow even a little? */
      luaG_runerror(L, "too many %s (limit is %d)", what, limit);
    size = limit;  /* still have at least one free place */
  }
  else {
    size *= 2;
    if (size < MINSIZEARRAY)
      size = MINSIZEARRAY;  /* minimum size */
  }
  lua_assert(nelems + 1 <= size && size <= limit);
  /* 'limit' ensures that multiplication will not overflow */
  newblock = luaM_realloc_(L, block, cast_sizet(*psize) * size_elems,
                                     cast_sizet(size) * size_elems);
  if (unlikely(newblock == NULL))
    luaM_error(L);
  *psize = size;  /* update only when everything else is OK */
  return newblock;
}
Example #7
0
STATIC int
xfs_vn_symlink(
	struct inode	*dir,
	struct dentry	*dentry,
	const char	*symname)
{
	struct inode	*inode;
	struct xfs_inode *cip = NULL;
	struct xfs_name	name;
	int		error;
	umode_t		mode;

	mode = S_IFLNK |
		(irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO);
	xfs_dentry_to_name(&name, dentry, mode);

	error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip);
	if (unlikely(error))
		goto out;

	inode = VFS_I(cip);

	error = xfs_init_security(inode, dir, &dentry->d_name);
	if (unlikely(error))
		goto out_cleanup_inode;

	d_instantiate(dentry, inode);
	return 0;

 out_cleanup_inode:
	xfs_cleanup_inode(dir, inode, dentry);
 out:
	return -error;
}
static int __init msm_uim_probe(struct platform_device *pdev)
{
	struct msm_port *msm_port;
	struct resource *resource;
	struct uart_port *port;
	int irq;

	if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
		return -ENXIO;

	pr_info("msm_uim: detected port #%d\n", pdev->id);

	port = get_port_from_line(pdev->id);
	port->dev = &pdev->dev;
	msm_port = UART_TO_MSM(port);

	msm_port->uim = true;

	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (unlikely(!resource))
		return -ENXIO;
	port->mapbase = resource->start;

	irq = platform_get_irq(pdev, 0);
	if (unlikely(irq < 0))
		return -ENXIO;
	port->irq = irq;

	platform_set_drvdata(pdev, port);

	return uart_add_one_port(&msm_uart_driver, port);
}
static void s3c_out_epn(struct s3c_udc *dev, u32 ep_idx)
{
	struct s3c_ep *ep = &dev->ep[ep_idx];
	struct s3c_request *req;

	if (unlikely(!(ep->desc))) {
		/* Throw packet away.. */
		printk("%s: No descriptor?!?\n", __FUNCTION__);
		return;
	}

	if (list_empty(&ep->queue))
		req = 0;
	else
		req = list_entry(ep->queue.next,
				struct s3c_request, queue);

	if (unlikely(!req)) {
		DEBUG_OUT_EP("%s: NULL REQ on OUT EP-%d\n", __FUNCTION__, ep_idx);
		return;

	} else {
		read_fifo(ep, req);
	}

}
Example #10
0
static __always_inline unsigned long memory_is_nonzero(const void *start,
						const void *end)
{
	unsigned int words;
	unsigned long ret;
	unsigned int prefix = (unsigned long)start % 8;

	if (end - start <= 16)
		return bytes_is_nonzero(start, end - start);

	if (prefix) {
		prefix = 8 - prefix;
		ret = bytes_is_nonzero(start, prefix);
		if (unlikely(ret))
			return ret;
		start += prefix;
	}

	words = (end - start) / 8;
	while (words) {
		if (unlikely(*(u64 *)start))
			return bytes_is_nonzero(start, 8);
		start += 8;
		words--;
	}

	return bytes_is_nonzero(start, (end - start) % 8);
}
/**
 * hwmon_device_register - register w/ hwmon
 * @dev: the device to register
 *
 * hwmon_device_unregister() must be called when the device is no
 * longer needed.
 *
 * Returns the pointer to the new device.
 */
struct device *hwmon_device_register(struct device *dev)
{
	struct device *hwdev;
	int id, err;

again:
	if (unlikely(idr_pre_get(&hwmon_idr, GFP_KERNEL) == 0))
		return ERR_PTR(-ENOMEM);

	spin_lock(&idr_lock);
	err = idr_get_new(&hwmon_idr, NULL, &id);
	spin_unlock(&idr_lock);

	if (unlikely(err == -EAGAIN))
		goto again;
	else if (unlikely(err))
		return ERR_PTR(err);

	id = id & MAX_ID_MASK;
	hwdev = device_create(hwmon_class, dev, MKDEV(0, 0), NULL,
			      HWMON_ID_FORMAT, id);

	if (IS_ERR(hwdev)) {
		spin_lock(&idr_lock);
		idr_remove(&hwmon_idr, id);
		spin_unlock(&idr_lock);
	}

	return hwdev;
}
static cairo_status_t
twin_font_face_set_properties_from_toy (cairo_font_face_t *twin_face,
					cairo_toy_font_face_t *toy_face)
{
    cairo_status_t status;
    twin_face_properties_t *props;

    props = malloc (sizeof (twin_face_properties_t));
    if (unlikely (props == NULL))
	return _cairo_error (CAIRO_STATUS_NO_MEMORY);

    props->stretch  = TWIN_STRETCH_NORMAL;
    props->monospace = FALSE;
    props->smallcaps = FALSE;

    props->slant = toy_face->slant;
    props->weight = toy_face->weight == CAIRO_FONT_WEIGHT_NORMAL ?
		    TWIN_WEIGHT_NORMAL : TWIN_WEIGHT_BOLD;
    face_props_parse (props, toy_face->family);

    status = cairo_font_face_set_user_data (twin_face,
					    &twin_properties_key,
					    props, free);
    if (unlikely (status))
	goto FREE_PROPS;

    return CAIRO_STATUS_SUCCESS;

FREE_PROPS:
    free (props);
    return status;
}
Example #13
0
/**
 *	handle_edge_eoi_irq - edge eoi type IRQ handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 * Similar as the above handle_edge_irq, but using eoi and w/o the
 * mask/unmask logic.
 */
void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
{
    struct irq_chip *chip = irq_desc_get_chip(desc);

    raw_spin_lock(&desc->lock);

    desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
    /*
     * If we're currently running this IRQ, or its disabled,
     * we shouldn't process the IRQ. Mark it pending, handle
     * the necessary masking and go out
     */
    if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
                 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
        if (!irq_check_poll(desc)) {
            desc->istate |= IRQS_PENDING;
            goto out_eoi;
        }
    }
    kstat_incr_irqs_this_cpu(irq, desc);

    do {
        if (unlikely(!desc->action))
            goto out_eoi;

        handle_irq_event(desc);

    } while ((desc->istate & IRQS_PENDING) &&
             !irqd_irq_disabled(&desc->irq_data));

out_eoi:
    chip->irq_eoi(&desc->irq_data);
    raw_spin_unlock(&desc->lock);
}
Example #14
0
/**
 *	handle_level_irq - Level type irq handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Level type interrupts are active as long as the hardware line has
 *	the active level. This may require to mask the interrupt and unmask
 *	it after the associated handler has acknowledged the device, so the
 *	interrupt line is back to inactive.
 */
void
handle_level_irq(unsigned int irq, struct irq_desc *desc)
{
    raw_spin_lock(&desc->lock);
    mask_ack_irq(desc);

    if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
        if (!irq_check_poll(desc))
            goto out_unlock;

    desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
    kstat_incr_irqs_this_cpu(irq, desc);

    /*
     * If its disabled or no action available
     * keep it masked and get out of here
     */
    if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
        desc->istate |= IRQS_PENDING;
        goto out_unlock;
    }

    handle_irq_event(desc);

    cond_unmask_irq(desc);

out_unlock:
    raw_spin_unlock(&desc->lock);
}
Example #15
0
static int drv_ps_liteon_ltr553_set_default_config(i2c_dev_t* drv)
{
    int     ret = 0;
    uint8_t value = 0;

    value = LTR553_SET_BITSLICE(value, LTR553_PS_MEAS_RATE_REG_MEAS_RATE, PMR_RATE_100);
    ret = sensor_i2c_write(drv, LTR553_PS_MEAS_RATE,
                            &value, I2C_DATA_LEN, I2C_OP_RETRIES);
    if (unlikely(ret)) {
        return ret;
    }

    value = 0;
    value = LTR553_SET_BITSLICE(value, LTR553_PS_LED_REG_PLUSE_FREQ, LPMF_PERIOD_60K);
    value = LTR553_SET_BITSLICE(value, LTR553_PS_LED_REG_CURRENT_DUTY, LCD_PER_100);
    value = LTR553_SET_BITSLICE(value, LTR553_PS_LED_REG_CURRENT, LC_LEVEL_100);
    ret = sensor_i2c_write(drv, LTR553_PS_MEAS_RATE,
                            &value, I2C_DATA_LEN, I2C_OP_RETRIES);
    if (unlikely(ret)) {
        return ret;
    }

    ret = sensor_i2c_read(drv, LTR553_PS_CONTR, &value, I2C_DATA_LEN, I2C_OP_RETRIES);
    if (unlikely(ret)) {
        return ret;
    }
    value = LTR553_SET_BITSLICE(value, LTR553_PS_CONTR_REG_PS_GAIN, PG_GAIN_X16);
    ret = sensor_i2c_write(drv, LTR553_PS_CONTR,
                            &value, I2C_DATA_LEN, I2C_OP_RETRIES);
    if (unlikely(ret)) {
        return ret;
    }

    return 0;
}
Example #16
0
static ssize_t secvib_read(struct file *file,
		char __user *buf, size_t count, loff_t *ppos)
{

	int ret;
	struct secvib_data *secvib =
		container_of(file->private_data, struct secvib_data, miscdev);
	const size_t bufsize = (secvib->dev_name_size > (size_t)(*ppos))
		? min(count, secvib->dev_name_size - (size_t)(*ppos)) : 0;

	pr_debug("secvib: %s[%d]\n", __func__, __LINE__);

	/* End of buffer, exit */
	if (unlikely(bufsize == 0))
		return 0;

	mutex_lock(&secvib->lock);
	ret = copy_to_user(buf, secvib->dev_name + (*ppos), bufsize);
	if (unlikely(ret != 0)) {
		pr_err("secvib: read failed\n");
		mutex_unlock(&secvib->lock);
		return 0;
	}

	/* Update file position and return copied buffer size */
	*ppos += bufsize;

	mutex_unlock(&secvib->lock);

	return bufsize;

}
Example #17
0
static int drv_ps_liteon_ltr553_read(void *buf, size_t len)
{
    int ret = 0;
    size_t size;
    uint8_t reg_data[2] = {0};
    proximity_data_t* pdata = (proximity_data_t*)buf;
    int wait_time = 0;

    if(buf == NULL){
        return -1;
    }

    size = sizeof(proximity_data_t);
    if(len < size){
        return -1;
    }

    ret = sensor_i2c_read(&ltr553_ctx, LTR553_PS_DATA_L, &reg_data[0], I2C_DATA_LEN, I2C_OP_RETRIES);
    if (unlikely(ret)) {
        return -1;
    }
    ret = sensor_i2c_read(&ltr553_ctx, LTR553_PS_DATA_H, &reg_data[1], I2C_DATA_LEN, I2C_OP_RETRIES);
    if (unlikely(ret)) {
        return -1;
    }

    pdata->present = (uint32_t)(reg_data[1] << 8 | reg_data[0]);

    pdata->timestamp = aos_now_ms();

    return (int)size;
}
Example #18
0
/**
 * Find the data portion of an IO Event section from event log.
 * @elog: RTAS error/event log.
 *
 * Return:
 * 	pointer to a valid IO event section data. NULL if not found.
 */
static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog)
{
	struct pseries_errorlog *sect;

	/* We should only ever get called for io-event interrupts, but if
	 * we do get called for another type then something went wrong so
	 * make some noise about it.
	 * RTAS_TYPE_IO only exists in extended event log version 6 or later.
	 * No need to check event log version.
	 */
	if (unlikely(rtas_error_type(elog) != RTAS_TYPE_IO)) {
		printk_once(KERN_WARNING"io_event_irq: Unexpected event type %d",
			    rtas_error_type(elog));
		return NULL;
	}

	sect = get_pseries_errorlog(elog, PSERIES_ELOG_SECT_ID_IO_EVENT);
	if (unlikely(!sect)) {
		printk_once(KERN_WARNING "io_event_irq: RTAS extended event "
			    "log does not contain an IO Event section. "
			    "Could be a bug in system firmware!\n");
		return NULL;
	}
	return (struct pseries_io_event *) &sect->data;
}
Example #19
0
/*
 * could be called frequently for query (@nr_to_scan == 0).
 * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
 */
static int enc_pools_shrink(int nr_to_scan, unsigned int gfp_mask)
{
        if (unlikely(nr_to_scan != 0)) {
                cfs_spin_lock(&page_pools.epp_lock);
                nr_to_scan = min(nr_to_scan, (int) page_pools.epp_free_pages -
                                             PTLRPC_MAX_BRW_PAGES);
                if (nr_to_scan > 0) {
                        enc_pools_release_free_pages(nr_to_scan);
                        CDEBUG(D_SEC, "released %d pages, %ld left\n",
                               nr_to_scan, page_pools.epp_free_pages);

                        page_pools.epp_st_shrinks++;
                        page_pools.epp_last_shrink = cfs_time_current_sec();
                }
                cfs_spin_unlock(&page_pools.epp_lock);
        }

        /*
         * if no pool access for a long time, we consider it's fully idle.
         * a little race here is fine.
         */
        if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
                     CACHE_QUIESCENT_PERIOD)) {
                cfs_spin_lock(&page_pools.epp_lock);
                page_pools.epp_idle_idx = IDLE_IDX_MAX;
                cfs_spin_unlock(&page_pools.epp_lock);
        }

        LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
        return max((int) page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
               (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
}
Example #20
0
/*****************************************************************************
 * DirInit: Init the directory access with a directory stream
 *****************************************************************************/
int DirInit (stream_t *access, DIR *dir)
{
    access_sys_t *sys = vlc_malloc(VLC_OBJECT(access), sizeof (*sys));
    if (unlikely(sys == NULL))
        goto error;

    if (!strcmp(access->psz_name, "fd"))
    {
        if (unlikely(asprintf(&sys->base_uri, "fd://%s",
                              access->psz_location) == -1))
            sys->base_uri = NULL;
    }
    else
        sys->base_uri = vlc_path2uri(access->psz_filepath, "file");
    if (unlikely(sys->base_uri == NULL))
        goto error;

    sys->dir = dir;

    access->p_sys = sys;
    access->pf_readdir = DirRead;
    access->pf_control = access_vaDirectoryControlHelper;
    return VLC_SUCCESS;

error:
    closedir(dir);
    return VLC_ENOMEM;
}
Example #21
0
static void PcapCallbackLoop(char *user, struct pcap_pkthdr *h, u_char *pkt)
{
    SCEnter();

    PcapThreadVars *ptv = (PcapThreadVars *)user;
    Packet *p = PacketGetFromQueueOrAlloc();
    struct timeval current_time;

    if (unlikely(p == NULL)) {
        SCReturn;
    }

    PKT_SET_SRC(p, PKT_SRC_WIRE);
    p->ts.tv_sec = h->ts.tv_sec;
    p->ts.tv_usec = h->ts.tv_usec;
    SCLogDebug("p->ts.tv_sec %"PRIuMAX"", (uintmax_t)p->ts.tv_sec);
    p->datalink = ptv->datalink;

    ptv->pkts++;
    ptv->bytes += h->caplen;
    (void) SC_ATOMIC_ADD(ptv->livedev->pkts, 1);
    p->livedev = ptv->livedev;

    if (unlikely(PacketCopyData(p, pkt, h->caplen))) {
        TmqhOutputPacketpool(ptv->tv, p);
        SCReturn;
    }

    switch (ptv->checksum_mode) {
        case CHECKSUM_VALIDATION_AUTO:
            if (ptv->livedev->ignore_checksum) {
                p->flags |= PKT_IGNORE_CHECKSUM;
            } else if (ChecksumAutoModeCheck(ptv->pkts,
                        SC_ATOMIC_GET(ptv->livedev->pkts),
                        SC_ATOMIC_GET(ptv->livedev->invalid_checksums))) {
                ptv->livedev->ignore_checksum = 1;
                p->flags |= PKT_IGNORE_CHECKSUM;
            }
            break;
        case CHECKSUM_VALIDATION_DISABLE:
            p->flags |= PKT_IGNORE_CHECKSUM;
            break;
        default:
            break;
    }

    if (TmThreadsSlotProcessPkt(ptv->tv, ptv->slot, p) != TM_ECODE_OK) {
        pcap_breakloop(ptv->pcap_handle);
        ptv->cb_result = TM_ECODE_FAILED;
    }

    /* Trigger one dump of stats every second */
    TimeGet(&current_time);
    if (current_time.tv_sec != ptv->last_stats_dump) {
        PcapDumpCounters(ptv);
        ptv->last_stats_dump = current_time.tv_sec;
    }

    SCReturn;
}
Example #22
0
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
		    bool evict,
		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
	struct ttm_tt *ttm = bo->ttm;
	struct ttm_mem_reg *old_mem = &bo->mem;
	int ret;

	if (old_mem->mem_type != TTM_PL_SYSTEM) {
		ttm_tt_unbind(ttm);
		ttm_bo_free_old_node(bo);
		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
				TTM_PL_MASK_MEM);
		old_mem->mem_type = TTM_PL_SYSTEM;
	}

	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
	if (unlikely(ret != 0))
		return ret;

	if (new_mem->mem_type != TTM_PL_SYSTEM) {
		ret = ttm_tt_bind(ttm, new_mem);
		if (unlikely(ret != 0))
			return ret;
	}

	*old_mem = *new_mem;
	new_mem->mm_node = NULL;

	return 0;
}
Example #23
0
static inline void tc_device_classes_cleanup(struct tc_device *d) {
    static int cleanup_every = 999;

    if(unlikely(cleanup_every > 0)) {
        cleanup_every = (int) config_get_number("plugin:tc", "cleanup unused classes every", 120);
        if(cleanup_every < 0) cleanup_every = -cleanup_every;
    }

    d->name_updated = 0;
    d->family_updated = 0;

    struct tc_class *c = d->classes;
    while(c) {
        if(unlikely(cleanup_every && c->unupdated >= cleanup_every)) {
            struct tc_class *nc = c->next;
            tc_class_free(d, c);
            c = nc;
        }
        else {
            c->updated = 0;
            c->name_updated = 0;

            c = c->next;
        }
    }
}
Example #24
0
static int drv_als_ps_liteon_ltr553_validate_id(i2c_dev_t* drv, uint8_t part_id, uint8_t manufac_id)
{
    int     ret = 0;
    uint8_t part_id_value = 0;
    uint8_t manufac_id_value = 0;

    if(drv == NULL){
        return -1;
    }

    ret = sensor_i2c_read(drv, LTR553_PART_ID, &part_id_value, I2C_DATA_LEN, I2C_OP_RETRIES);
    if(unlikely(ret)) {
        return ret;
    }

    ret = sensor_i2c_read(drv, LTR553_MANUFAC_ID, &manufac_id_value, I2C_DATA_LEN, I2C_OP_RETRIES);
    if(unlikely(ret)) {
        return ret;
    }

    if (part_id_value != part_id || manufac_id_value != manufac_id) {
        return -1;
    }

    return 0;
}
Example #25
0
/* Must be called with rcu_read_lock. */
static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
{
	if (unlikely(!vport))
		goto error;

	if (unlikely(skb_warn_if_lro(skb)))
		goto error;

	/* Make our own copy of the packet.  Otherwise we will mangle the
	 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
	 * (No one comes after us, since we tell handle_bridge() that we took
	 * the packet.) */
	skb = skb_share_check(skb, GFP_ATOMIC);
	if (unlikely(!skb))
		return;

	skb_push(skb, ETH_HLEN);

	if (unlikely(compute_ip_summed(skb, false)))
		goto error;

	vlan_copy_skb_tci(skb);

	ovs_vport_receive(vport, skb);
	return;

error:
	kfree_skb(skb);
}
Example #26
0
static int drv_ps_liteon_ltr553_set_power_mode(i2c_dev_t* drv, dev_power_mode_e mode)
{
    int     ret = 0;
    uint8_t dev_mode = 0;
    uint8_t value = 0;

    ret = sensor_i2c_read(drv, LTR553_PS_CONTR, &value, I2C_DATA_LEN, I2C_OP_RETRIES);
    if (unlikely(ret)) {
        return ret;
    }

    switch(mode){
        case DEV_POWER_OFF:
        case DEV_SLEEP:
            dev_mode = LTR553_SET_BITSLICE(value, LTR553_PS_CONTR_REG_PS_MODE, PM_MODE_STANDBY);
            break;
        case DEV_POWER_ON:
            dev_mode = LTR553_SET_BITSLICE(value, LTR553_PS_CONTR_REG_PS_MODE, PM_MODE_ACTIVE);
            break;
        default:
            return -1;
    }

    ret = sensor_i2c_write(drv, LTR553_PS_CONTR, &dev_mode, I2C_DATA_LEN, I2C_OP_RETRIES);
    if(unlikely(ret)) {
        return ret;
    }

    return 0;
}
Example #27
0
bool zmq::pipe_t::check_read ()
{
    if (unlikely (!in_active))
        return false;
    if (unlikely (state != active && state != waiting_for_delimiter))
        return false;

    //  Check if there's an item in the pipe.
    if (!inpipe->check_read ()) {
        in_active = false;
        return false;
    }

    //  If the next item in the pipe is message delimiter,
    //  initiate termination process.
    if (inpipe->probe (is_delimiter)) {
        msg_t msg;
        bool ok = inpipe->read (&msg);
        zmq_assert (ok);
        process_delimiter ();
        return false;
    }

    return true;
}
Example #28
0
static int drv_als_liteon_ltr553_set_default_config(i2c_dev_t* drv)
{
    int     ret = 0;
    uint8_t value = 0;

    value = LTR553_SET_BITSLICE(value, LTR553_ALS_MEAS_RATE_REG_INTEG_TIME, AIT_TIME_100);
    value = LTR553_SET_BITSLICE(value, LTR553_ALS_MEAS_RATE_REG_MEAS_RATE, AMR_RATE_100);
    ret = sensor_i2c_write(drv, LTR553_ALS_MEAS_RATE,
                            &value, I2C_DATA_LEN, I2C_OP_RETRIES);
    if (unlikely(ret)) {
        return ret;
    }

    ret = sensor_i2c_read(drv, LTR553_ALS_CONTR, &value, I2C_DATA_LEN, I2C_OP_RETRIES);
    if (unlikely(ret)) {
        return ret;
    }
    value = LTR553_SET_BITSLICE(value, LTR553_ALS_CONTR_REG_ALS_GAIN, AG_GAIN_1X);
    ret = sensor_i2c_write(drv, LTR553_ALS_CONTR,
                            &value, I2C_DATA_LEN, I2C_OP_RETRIES);
    if (unlikely(ret)) {
        return ret;
    }

    return 0;
}
Example #29
0
static ssize_t
cmos_nvram_read(struct file *filp, struct kobject *kobj,
		struct bin_attribute *attr,
		char *buf, loff_t off, size_t count)
{
	int	retval;

	if (unlikely(off >= attr->size))
		return 0;
	if (unlikely(off < 0))
		return -EINVAL;
	if ((off + count) > attr->size)
		count = attr->size - off;

	off += NVRAM_OFFSET;
	spin_lock_irq(&rtc_lock);
	for (retval = 0; count; count--, off++, retval++) {
		if (off < 128)
			*buf++ = CMOS_READ(off);
		else if (can_bank2)
			*buf++ = cmos_read_bank2(off);
		else
			break;
	}
	spin_unlock_irq(&rtc_lock);

	return retval;
}
Example #30
0
ret_t
cherokee_buffer_add_va_fixed (cherokee_buffer_t *buf, const char *format, ...)
{
	int len;
	int size = buf->size - buf->len;	/* final '\0' is always available */
	va_list ap;

	/* Test for minimum buffer size.
	 */
	if (size < 1)
		return ret_error;

	/* Format the string into the buffer.
	 * NOTE: len does NOT include '\0', size includes '\0' (len + 1)
	 */
	va_start (ap, format);
	len = vsnprintf (buf->buf + buf->len, size, format, ap);
	va_end (ap);

	if (unlikely (len < 0))
		return ret_error;

	/* Don't expand buffer if there is not enough space.
	 */
	if (unlikely (len >= size))
		return ret_error;

	buf->len += len;
	return ret_ok;
}