static int
hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
{
	struct ip_set_hash *h;
	u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
	u8 hbits;
	size_t hsize;

	if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
		return -IPSET_ERR_INVALID_FAMILY;

	if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
		     !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
		return -IPSET_ERR_PROTOCOL;

	if (tb[IPSET_ATTR_HASHSIZE]) {
		hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
		if (hashsize < IPSET_MIMINAL_HASHSIZE)
			hashsize = IPSET_MIMINAL_HASHSIZE;
	}

	if (tb[IPSET_ATTR_MAXELEM])
		maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);

	h = kzalloc(sizeof(*h), GFP_KERNEL);
	if (!h)
		return -ENOMEM;

	h->maxelem = maxelem;
	get_random_bytes(&h->initval, sizeof(h->initval));
	h->timeout = IPSET_NO_TIMEOUT;

	hbits = htable_bits(hashsize);
	hsize = htable_size(hbits);
	if (hsize == 0) {
		kfree(h);
		return -ENOMEM;
	}
	h->table = ip_set_alloc(hsize);
	if (!h->table) {
		kfree(h);
		return -ENOMEM;
	}
	h->table->htable_bits = hbits;

	set->data = h;

	if (tb[IPSET_ATTR_TIMEOUT]) {
		h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);

		set->variant = set->family == NFPROTO_IPV4
			? &hash_ipportip4_tvariant : &hash_ipportip6_tvariant;

		if (set->family == NFPROTO_IPV4)
			hash_ipportip4_gc_init(set);
		else
			hash_ipportip6_gc_init(set);
	} else {
		set->variant = set->family == NFPROTO_IPV4
			? &hash_ipportip4_variant : &hash_ipportip6_variant;
	}

	pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
		 set->name, jhash_size(h->table->htable_bits),
		 h->table->htable_bits, h->maxelem, set->data, h->table);

	return 0;
}
Пример #2
0
enum MqErrorE
SysSocketPair (
  struct MqS * const context,	    ///< [in] handle error
  int socks[]				    ///< [out] the result from socketpair
) {
#if defined(HAVE_SOCKETPAIR)
  int oldflags;
  sSysSetErrorNum(0);
  if (unlikely (socketpair (AF_UNIX, SOCK_STREAM, 0, socks) == -1)) {
    return sSysMqErrorMsg (context, __func__, "socketpair");
  }

  if (unlikely ((oldflags = fcntl (socks[0], F_GETFD, 0)) == -1)) {
    return sSysMqErrorMsg (context, __func__, "fcntl F_GETFD");
  }
  oldflags |= FD_CLOEXEC;
  if (unlikely (fcntl (socks[0], F_SETFD, oldflags) == -1)) {
    return sSysMqErrorMsg (context, __func__, "fcntl F_SETFD");
  }

  return MQ_OK;
#else
/* socketpair.c
 * Copyright 2007 by Nathan C. Myers <*****@*****.**>; all rights reserved.
 * This code is Free Software.  It may be copied freely, in original or 
 * modified form, subject only to the restrictions that (1) the author is
 * relieved from all responsibilities for any use for any purpose, and (2)
 * this copyright notice must be retained, unchanged, in its entirety.  If
 * for any reason the author might be held responsible for any consequences
 * of copying or use, license is withheld.  
 */

/* dumb_socketpair:
 *   If make_overlapped is nonzero, both sockets created will be usable for
 *   "overlapped" operations via WSASend etc.  If make_overlapped is zero,
 *   socks[0] (only) will be usable with regular ReadFile etc., and thus 
 *   suitable for use as stdin or stdout of a child process.  Note that the
 *   sockets must be closed with closesocket() regardless.
 */
    struct sockaddr_in addr;
    MQ_SOCK listener;
    socklen_t addrlen = sizeof(addr);

    socks[0] = socks[1] = INVALID_SOCKET;
    MqErrorCheck (SysSocket(context,AF_INET,SOCK_STREAM,0,&listener));

    memset(&addr, 0, sizeof(addr));
    addr.sin_family = AF_INET;
    addr.sin_addr.s_addr = htonl(0x7f000001);
    addr.sin_port = 0;

    MqErrorCheck (SysBind(context,listener, (const struct sockaddr*) &addr, sizeof(addr)));
    MqErrorCheck (SysGetSockName(context, listener, (struct sockaddr*) &addr, &addrlen));

    do {
	MqErrorCheck (SysListen(context,listener,1));
	MqErrorCheck (SysSocket(context,AF_INET,SOCK_STREAM,0,&socks[0]));
	MqErrorCheck (SysConnect(context,socks[0],(struct sockaddr*const) &addr, sizeof(addr),1));
	MqErrorCheck (SysAccept(context,listener,NULL,NULL,&socks[1]));
	MqErrorCheck (SysCloseSocket(context,__func__,MQ_NO,&listener));
        return MQ_OK;
    } while (0);
    return sSysMqErrorMsg (context, __func__, "socketpair");

error:
  MqErrorCheck (SysCloseSocket(MQ_ERROR_IGNORE,__func__,MQ_NO,&listener));
  return MqErrorStack (context);
#endif
}
Пример #3
0
int brubeck_statsd_msg_parse(struct brubeck_statsd_msg *msg, char *buffer, size_t length)
{
	char *end = buffer + length;
	*end = '\0';

	/**
	 * Message key: all the string until the first ':'
	 *
	 *      gaugor:333|g
	 *      ^^^^^^
	 */
	{
		msg->key = buffer;
		msg->key_len = 0;
		while (*buffer != ':' && *buffer != '\0') {
			/* Invalid metric, can't have a space */
			if (*buffer == ' ')
				return -1;
			++buffer;
		}
		if (*buffer == '\0')
			return -1;

		msg->key_len = buffer - msg->key;
		*buffer++ = '\0';

		/* Corrupted metric. Graphite won't swallow this */
		if (msg->key[msg->key_len - 1] == '.')
			return -1;
	}

	/**
	 * Message value: the numeric value between ':' and '|'.
	 * This is already converted to an integer.
	 *
	 *      gaugor:333|g
	 *             ^^^
	 */
	{
		int negative = 0;
		char *start = buffer;

		msg->value = 0.0;

		if (*buffer == '-') {
			++buffer;
			negative = 1;
		}

		while (*buffer >= '0' && *buffer <= '9') {
			msg->value = (msg->value * 10.0) + (*buffer - '0');
			++buffer;
		}

		if (*buffer == '.') {
			double f = 0.0, n = 0.0;
			++buffer;

			while (*buffer >= '0' && *buffer <= '9') {
				f = (f * 10.0) + (*buffer - '0');
				++buffer;
				n += 1.0;
			}

			msg->value += f / pow(10.0, n);
		}

		if (negative)
			msg->value = -msg->value;

		if (unlikely(*buffer == 'e')) {
			msg->value = strtod(start, &buffer);
		}

		if (*buffer != '|')
			return -1;

		buffer++;
	}

	/**
	 * Message type: one or two char identifier with the
	 * message type. Valid values: g, c, C, h, ms
	 *
	 *      gaugor:333|g
	 *                 ^
	 */
	{
		switch (*buffer) {
			case 'g': msg->type = BRUBECK_MT_GAUGE; break;
			case 'c': msg->type = BRUBECK_MT_METER; break;
			case 'C': msg->type = BRUBECK_MT_COUNTER; break;
			case 'h': msg->type = BRUBECK_MT_HISTO; break;
			case 'm':
					  ++buffer;
					  if (*buffer == 's') {
						  msg->type = BRUBECK_MT_TIMER;
						  break;
					  }

			default:
					  return -1;
		}
	}

	/**
	 * Trailing bytes: data appended at the end of the message.
	 * This is stored verbatim and will be parsed when processing
	 * the specific message type. This is optional.
	 *
	 *      gorets:1|c|@0.1
	 *                 ^^^^----
	 */
	{
		buffer++;

		if (buffer[0] == '\0' || (buffer[0] == '\n' && buffer[1] == '\0')) {
			msg->trail = NULL;
			return 0;
		}
			
		if (*buffer == '@' || *buffer == '|') {
			msg->trail = buffer;
			return 0;
		}

		return -1;
	}
}
Пример #4
0
static int
xfs_iget_cache_miss(
	struct xfs_mount	*mp,
	struct xfs_perag	*pag,
	xfs_trans_t		*tp,
	xfs_ino_t		ino,
	struct xfs_inode	**ipp,
	int			flags,
	int			lock_flags)
{
	struct xfs_inode	*ip;
	int			error;
	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino);

	ip = xfs_inode_alloc(mp, ino);
	if (!ip)
		return ENOMEM;

	error = xfs_iread(mp, tp, ip, flags);
	if (error)
		goto out_destroy;

	trace_xfs_iget_miss(ip);

	if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
		error = ENOENT;
		goto out_destroy;
	}

	/*
	 * Preload the radix tree so we can insert safely under the
	 * write spinlock. Note that we cannot sleep inside the preload
	 * region.
	 */
	if (radix_tree_preload(GFP_KERNEL)) {
		error = EAGAIN;
		goto out_destroy;
	}

	/*
	 * Because the inode hasn't been added to the radix-tree yet it can't
	 * be found by another thread, so we can do the non-sleeping lock here.
	 */
	if (lock_flags) {
		if (!xfs_ilock_nowait(ip, lock_flags))
			BUG();
	}

	/*
	 * These values must be set before inserting the inode into the radix
	 * tree as the moment it is inserted a concurrent lookup (allowed by the
	 * RCU locking mechanism) can find it and that lookup must see that this
	 * is an inode currently under construction (i.e. that XFS_INEW is set).
	 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
	 * memory barrier that ensures this detection works correctly at lookup
	 * time.
	 */
	ip->i_udquot = ip->i_gdquot = NULL;
	xfs_iflags_set(ip, XFS_INEW);

	/* insert the new inode */
	spin_lock(&pag->pag_ici_lock);
	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
	if (unlikely(error)) {
		WARN_ON(error != -EEXIST);
		XFS_STATS_INC(xs_ig_dup);
		error = EAGAIN;
		goto out_preload_end;
	}
	spin_unlock(&pag->pag_ici_lock);
	radix_tree_preload_end();

	*ipp = ip;
	return 0;

out_preload_end:
	spin_unlock(&pag->pag_ici_lock);
	radix_tree_preload_end();
	if (lock_flags)
		xfs_iunlock(ip, lock_flags);
out_destroy:
	__destroy_inode(VFS_I(ip));
	xfs_inode_free(ip);
	return error;
}
Пример #5
0
static void sas_scsi_task_done(struct sas_task *task)
{
	struct task_status_struct *ts = &task->task_status;
	struct scsi_cmnd *sc = task->uldd_task;
	int hs = 0, stat = 0;

	if (unlikely(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
		/* Aborted tasks will be completed by the error handler */
		SAS_DPRINTK("task done but aborted\n");
		return;
	}

	if (unlikely(!sc)) {
		SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
		list_del_init(&task->list);
		sas_free_task(task);
		return;
	}

	if (ts->resp == SAS_TASK_UNDELIVERED) {
		/* transport error */
		hs = DID_NO_CONNECT;
	} else { /* ts->resp == SAS_TASK_COMPLETE */
		/* task delivered, what happened afterwards? */
		switch (ts->stat) {
		case SAS_DEV_NO_RESPONSE:
		case SAS_INTERRUPTED:
		case SAS_PHY_DOWN:
		case SAS_NAK_R_ERR:
		case SAS_OPEN_TO:
			hs = DID_NO_CONNECT;
			break;
		case SAS_DATA_UNDERRUN:
			scsi_set_resid(sc, ts->residual);
			if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow)
				hs = DID_ERROR;
			break;
		case SAS_DATA_OVERRUN:
			hs = DID_ERROR;
			break;
		case SAS_QUEUE_FULL:
			hs = DID_SOFT_ERROR; /* retry */
			break;
		case SAS_DEVICE_UNKNOWN:
			hs = DID_BAD_TARGET;
			break;
		case SAS_SG_ERR:
			hs = DID_PARITY;
			break;
		case SAS_OPEN_REJECT:
			if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY)
				hs = DID_SOFT_ERROR; /* retry */
			else
				hs = DID_ERROR;
			break;
		case SAS_PROTO_RESPONSE:
			SAS_DPRINTK("LLDD:%s sent SAS_PROTO_RESP for an SSP "
				    "task; please report this\n",
				    task->dev->port->ha->sas_ha_name);
			break;
		case SAS_ABORTED_TASK:
			hs = DID_ABORT;
			break;
		case SAM_STAT_CHECK_CONDITION:
			memcpy(sc->sense_buffer, ts->buf,
			       min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
			stat = SAM_STAT_CHECK_CONDITION;
			break;
		default:
			stat = ts->stat;
			break;
		}
	}
	ASSIGN_SAS_TASK(sc, NULL);
	sc->result = (hs << 16) | stat;
	list_del_init(&task->list);
	sas_free_task(task);
	sc->scsi_done(sc);
}
Пример #6
0
static int __devinit tsu8111_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
{
	//struct regulator *regulator;
       //struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
       struct tsu8111_platform_data *pdata = client->dev.platform_data;
       struct tsu8111_usbsw *usbsw;
   	struct fsa880_muic *fsa880_muic;

	unsigned int data;
	int ret = 0;
	u8 devID;
	
	u8 intr, intr2;
	u8 mansw1;
	unsigned int ctrl = CTRL_MASK;

       printk("[TSU8111] PROBE ......\n");
	   
	isProbe = 1;

	fsa880_muic = kzalloc(sizeof(struct fsa880_muic), GFP_KERNEL);
	if (unlikely(!fsa880_muic))
	{
		pr_err("%s: fsa880_muic memory alloc failed\n", __func__);
		return -ENOMEM;
	}
	fsa880_muic_ptr = fsa880_muic;
	   
       //add for AT Command 
       wake_lock_init(&JIGConnect_idle_wake, WAKE_LOCK_IDLE, "jig_connect_idle_wake");
       wake_lock_init(&JIGConnect_suspend_wake, WAKE_LOCK_SUSPEND, "jig_connect_suspend_wake");
	   
       usbsw = kzalloc(sizeof(struct tsu8111_usbsw), GFP_KERNEL);
       if (!usbsw) {
               dev_err(&client->dev, "failed to allocate driver data\n");
               return -ENOMEM;
       }

       usbsw->client = client;
       usbsw->pdata = client->dev.platform_data;

       chip = usbsw;

       i2c_set_clientdata(client, usbsw);

	   
#if defined(CONFIG_SPA)
		   spa_external_event = spa_get_external_event_handler();
#endif

	/* clear interrupt */
	tsu8111_read_reg(client, TSU8111_REG_INT1, &intr);

	tsu8111_read_reg(client, TSU8111_REG_DEVID, &devID);
	if(devID==0x0a)
		muic_type=muicTypeTI8111;
	else if(devID==0x00)
		muic_type=muicTypeFSA880;
	else
		muic_type=muicTypeFSA;

	if(muic_type==muicTypeFSA880)
	{

	   intr &= 0xffff;
	   /* set control register */
	   tsu8111_write_reg(client, TSU8111_REG_CTRL, 0x04);
	}
	else if(muic_type==muicTypeTI8111)
	{
	   tsu8111_read_reg(client, TSU8111_REG_INT2, &intr2);
	   intr &= 0xffff;

	   /* unmask interrupt (attach/detach only) */
	   ret = tsu8111_write_reg(client, TSU8111_REG_INT1_MASK, 0x00);
	   if (ret < 0)
			  return ret;

	   /*TI USB : not to get Connect Interrupt : no more double interrupt*/
	   ret = tsu8111_write_reg(client, TSU8111_REG_INT2_MASK, 0x20);
	   if (ret < 0)
			  return ret;

	   tsu8111_read_reg(client, TSU8111_REG_MANSW1, &mansw1);
	   usbsw->mansw = mansw1;

	   ctrl &= ~INT_MASK;			  /* Unmask Interrupt */
	   if (usbsw->mansw)
			  ctrl &= ~MANUAL_SWITCH; /* Manual Switching Mode */
	   
	   tsu8111_write_reg(client, TSU8111_REG_CTRL, ctrl);
	}
	else
	   printk("[TSU8111] Error!!!! No Type. Check dev ID(0x01 addr) ......\n");



       ret = tsu8111_irq_init(usbsw);
       if (ret)
               goto tsu8111_probe_fail;

       ret = sysfs_create_group(&client->dev.kobj, &tsu8111_group);
       if (ret) {
               dev_err(&client->dev,
                               "[TSU8111] Creating fsa9480 attribute group failed");
               goto tsu8111_probe_fail2;
       }

       /* device detection */
	tsu8111_detect_dev(usbsw, 1); 
	isProbe = 0;

	/*reset UIC*/
	if(muic_type==muicTypeFSA880)
		tsu8111_write_reg(client, TSU8111_REG_CTRL, 0x04);
	else
	{
		tsu8111_write_reg(client, TSU8111_REG_CTRL, 0x1E);
		/*set timing1 to 100ms*/
		tsu8111_write_reg(client, TSU8111_REG_TIMING1, 0x1);
	}
       printk("[TSU8111] PROBE Done.\n");
       return 0;

tsu8111_probe_fail2:
       if (client->irq)
               free_irq(client->irq, NULL);
tsu8111_probe_fail:
       i2c_set_clientdata(client, NULL);
       kfree(usbsw);
       return ret;
}
Пример #7
0
static DFBResult
stmfbdevUnlock (CoreSurfacePool       *pool,
                void                  *pool_data,
                void                  *pool_local,
                CoreSurfaceAllocation *allocation,
                void                  *alloc_data,
                CoreSurfaceBufferLock *lock)
{
  const STMfbdevPoolData           * const data = pool_data;
  const STMfbdevPoolLocalData      * const local = pool_local;
  const STMfbdevPoolAllocationData * const alloc = alloc_data;

  D_MAGIC_ASSERT (pool, CoreSurfacePool);
  D_MAGIC_ASSERT (data, STMfbdevPoolData);
  D_MAGIC_ASSERT (local, STMfbdevPoolLocalData);
  D_MAGIC_ASSERT (allocation, CoreSurfaceAllocation);
  D_MAGIC_ASSERT (alloc, STMfbdevPoolAllocationData);
  D_MAGIC_ASSERT (lock, CoreSurfaceBufferLock);

  D_DEBUG_AT (STMfbdev_SurfLock, "%s (%p)\n", __FUNCTION__, lock->buffer);

  (void) data;
  (void) local;
  (void) alloc;

#if D_DEBUG_ENABLED
  {
  /* heavy performance hit */
  char *accessor = _accessor_str (lock->accessor);
  char *access   = _access_str (lock->access);
  D_DEBUG_AT (STMfbdev_SurfLock, "  -> by %s for %s\n", accessor,  access);
  free (access);
  free (accessor);
  }
#endif

#if STGFX_DRIVER == 2
  if (unlikely (lock->buffer->format == DSPF_RGB32
                && lock->accessor != CSAID_GPU
                && lock->access & CSAF_WRITE))
    {
      /* if a non-GPU accessor did a write access to an RGB32 surface, we
         should make sure the alpha is forced to 0xff, as the BDisp doesn't
         support this format natively */
      STGFX2DriverData * const stdrv = dfb_gfxcard_get_driver_data ();
      STGFX2DeviceData * const stdev = dfb_gfxcard_get_device_data ();
      DFBRectangle      rect = { .x = 0, .y = 0,
                                 .w = lock->buffer->surface->config.size.w,
                                 .h = lock->buffer->surface->config.size.h };

      D_DEBUG_AT (STMfbdev_SurfLock, "  -> rgb32 write release!\n");
      dfb_gfxcard_lock (GDLF_WAIT);
      _bdisp_aq_RGB32_fixup (stdrv, stdev,
                             lock->phys, lock->pitch,
                             &rect);
      dfb_gfxcard_unlock ();
    }
#endif

  return DFB_OK;
}
Пример #8
0
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags). The actual kick-off is left to the caller.
 */
static struct task_struct *copy_process(unsigned long clone_flags,
					unsigned long stack_start,
					struct pt_regs *regs,
					unsigned long stack_size,
					int __user *child_tidptr,
					struct pid *pid,
					int trace)
{
	int retval;
	struct task_struct *p;
	int cgroup_callbacks_done = 0;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);

	/*
	 * Shared signal handlers imply shared VM. By way of the above,
	 * thread groups also imply shared VM. Blocking this case allows
	 * for various simplifications in other code.
	 */
	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
		return ERR_PTR(-EINVAL);

	/*
	 * Siblings of global init remain as zombies on exit since they are
	 * not reaped by their parent (swapper). To solve this and to avoid
	 * multi-rooted process trees, prevent global and container-inits
	 * from creating siblings.
	 */
	if ((clone_flags & CLONE_PARENT) &&
				current->signal->flags & SIGNAL_UNKILLABLE)
		return ERR_PTR(-EINVAL);

	retval = security_task_create(clone_flags);
	if (retval)
		goto fork_out;

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

	ftrace_graph_init_task(p);

	rt_mutex_init_task(p);

#ifdef CONFIG_PROVE_LOCKING
	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
	retval = -EAGAIN;
	if (atomic_read(&p->real_cred->user->processes) >=
			p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
		    p->real_cred->user != INIT_USER)
			goto bad_fork_free;
	}

	retval = copy_creds(p, clone_flags);
	if (retval < 0)
		goto bad_fork_free;

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
	 * to stop root fork bombs.
	 */
	retval = -EAGAIN;
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

	if (!try_module_get(task_thread_info(p)->exec_domain->module))
		goto bad_fork_cleanup_count;

	p->did_exec = 0;
	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
	copy_flags(clone_flags, p);
	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	rcu_copy_process(p);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);

	init_sigpending(&p->pending);

	p->utime = cputime_zero;
	p->stime = cputime_zero;
	p->gtime = cputime_zero;
	p->utimescaled = cputime_zero;
	p->stimescaled = cputime_zero;
	p->prev_utime = cputime_zero;
	p->prev_stime = cputime_zero;

	p->default_timer_slack_ns = current->timer_slack_ns;

	task_io_accounting_init(&p->ioac);
	acct_clear_integrals(p);

	posix_cpu_timers_init(p);

	p->lock_depth = -1;		/* -1 = no lock */
	do_posix_clock_monotonic_gettime(&p->start_time);
	p->real_start_time = p->start_time;
	monotonic_to_bootbased(&p->real_start_time);
	p->io_context = NULL;
	p->audit_context = NULL;
	cgroup_fork(p);
#ifdef CONFIG_NUMA
	p->mempolicy = mpol_dup(p->mempolicy);
 	if (IS_ERR(p->mempolicy)) {
 		retval = PTR_ERR(p->mempolicy);
 		p->mempolicy = NULL;
 		goto bad_fork_cleanup_cgroup;
 	}
	mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
	p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
	p->hardirqs_enabled = 1;
#else
	p->hardirqs_enabled = 0;
#endif
	p->hardirq_enable_ip = 0;
	p->hardirq_enable_event = 0;
	p->hardirq_disable_ip = _THIS_IP_;
	p->hardirq_disable_event = 0;
	p->softirqs_enabled = 1;
	p->softirq_enable_ip = _THIS_IP_;
	p->softirq_enable_event = 0;
	p->softirq_disable_ip = 0;
	p->softirq_disable_event = 0;
	p->hardirq_context = 0;
	p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
	p->lockdep_depth = 0; /* no locks held yet */
	p->curr_chain_key = 0;
	p->lockdep_recursion = 0;
#endif

#ifdef CONFIG_DEBUG_MUTEXES
	p->blocked_on = NULL; /* not blocked yet */
#endif

	p->bts = NULL;

	p->stack_start = stack_start;

	/* Perform scheduler related setup. Assign this task to a CPU. */
	sched_fork(p, clone_flags);

	retval = perf_event_init_task(p);
	if (retval)
		goto bad_fork_cleanup_policy;

	if ((retval = audit_alloc(p)))
		goto bad_fork_cleanup_policy;
	/* copy all the process information */
	if ((retval = copy_semundo(clone_flags, p)))
		goto bad_fork_cleanup_audit;
	if ((retval = copy_files(clone_flags, p)))
		goto bad_fork_cleanup_semundo;
	if ((retval = copy_fs(clone_flags, p)))
		goto bad_fork_cleanup_files;
	if ((retval = copy_sighand(clone_flags, p)))
		goto bad_fork_cleanup_fs;
	if ((retval = copy_signal(clone_flags, p)))
		goto bad_fork_cleanup_sighand;
	if ((retval = copy_mm(clone_flags, p)))
		goto bad_fork_cleanup_signal;
	if ((retval = copy_namespaces(clone_flags, p)))
		goto bad_fork_cleanup_mm;
	if ((retval = copy_io(clone_flags, p)))
		goto bad_fork_cleanup_namespaces;
	retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_io;

	if (pid != &init_struct_pid) {
		retval = -ENOMEM;
		pid = alloc_pid(p->nsproxy->pid_ns);
		if (!pid)
			goto bad_fork_cleanup_io;

		if (clone_flags & CLONE_NEWPID) {
			retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);
			if (retval < 0)
				goto bad_fork_free_pid;
		}
	}

	p->pid = pid_nr(pid);
	p->tgid = p->pid;
	if (clone_flags & CLONE_THREAD)
		p->tgid = current->tgid;

	if (current->nsproxy != p->nsproxy) {
		retval = ns_cgroup_clone(p, pid);
		if (retval)
			goto bad_fork_free_pid;
	}

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
#ifdef CONFIG_FUTEX
	p->robust_list = NULL;
#ifdef CONFIG_COMPAT
	p->compat_robust_list = NULL;
#endif
	INIT_LIST_HEAD(&p->pi_state_list);
	p->pi_state_cache = NULL;
#endif
	/*
	 * sigaltstack should be cleared when sharing the same VM
	 */
	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
		p->sas_ss_sp = p->sas_ss_size = 0;

	/*
	 * Syscall tracing should be turned off in the child regardless
	 * of CLONE_PTRACE.
	 */
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
	clear_all_latency_tracing(p);

	/* ok, now we should be set up.. */
	p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
	p->pdeath_signal = 0;
	p->exit_state = 0;

	/*
	 * Ok, make it visible to the rest of the system.
	 * We dont wake it up yet.
	 */
	p->group_leader = p;
	INIT_LIST_HEAD(&p->thread_group);

	/* Now that the task is set up, run cgroup callbacks if
	 * necessary. We need to run them before the task is visible
	 * on the tasklist. */
	cgroup_fork_callbacks(p);
	cgroup_callbacks_done = 1;

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);

	/*
	 * The task hasn't been attached yet, so its cpus_allowed mask will
	 * not be changed, nor will its assigned CPU.
	 *
	 * The cpus_allowed mask of the parent may have changed after it was
	 * copied first time - so re-copy it here, then check the child's CPU
	 * to ensure it is on a valid CPU (and if not, just force it back to
	 * parent's CPU). This avoids alot of nasty races.
	 */
	p->cpus_allowed = current->cpus_allowed;
	p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
	if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
			!cpu_online(task_cpu(p))))
		set_task_cpu(p, smp_processor_id());

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
		p->real_parent = current->real_parent;
		p->parent_exec_id = current->parent_exec_id;
	} else {
		p->real_parent = current;
		p->parent_exec_id = current->self_exec_id;
	}

	spin_lock(&current->sighand->siglock);

	/*
	 * Process group and session signals need to be delivered to just the
	 * parent before the fork or both the parent and the child after the
	 * fork. Restart if a signal comes in before we add the new process to
	 * it's process group.
	 * A fatal signal pending means that current will exit, so the new
	 * thread can't slip out of an OOM kill (or normal SIGKILL).
 	 */
	recalc_sigpending();
	if (signal_pending(current)) {
		spin_unlock(&current->sighand->siglock);
		write_unlock_irq(&tasklist_lock);
		retval = -ERESTARTNOINTR;
		goto bad_fork_free_pid;
	}

	if (clone_flags & CLONE_THREAD) {
		atomic_inc(&current->signal->count);
		atomic_inc(&current->signal->live);
		p->group_leader = current->group_leader;
		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
	}

	if (likely(p->pid)) {
		list_add_tail(&p->sibling, &p->real_parent->children);
		tracehook_finish_clone(p, clone_flags, trace);

		if (thread_group_leader(p)) {
			if (clone_flags & CLONE_NEWPID)
				p->nsproxy->pid_ns->child_reaper = p;

			p->signal->leader_pid = pid;
			tty_kref_put(p->signal->tty);
			p->signal->tty = tty_kref_get(current->signal->tty);
			attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
			attach_pid(p, PIDTYPE_SID, task_session(current));
			list_add_tail_rcu(&p->tasks, &init_task.tasks);
			__get_cpu_var(process_counts)++;
		}
		attach_pid(p, PIDTYPE_PID, pid);
		nr_threads++;
	}

	total_forks++;
	spin_unlock(&current->sighand->siglock);
	write_unlock_irq(&tasklist_lock);
	proc_fork_connector(p);
	cgroup_post_fork(p);
	perf_event_fork(p);
	return p;

bad_fork_free_pid:
	if (pid != &init_struct_pid)
		free_pid(pid);
bad_fork_cleanup_io:
	put_io_context(p->io_context);
bad_fork_cleanup_namespaces:
	exit_task_namespaces(p);
bad_fork_cleanup_mm:
	if (p->mm)
		mmput(p->mm);
bad_fork_cleanup_signal:
	if (!(clone_flags & CLONE_THREAD))
		__cleanup_signal(p->signal);
bad_fork_cleanup_sighand:
	__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
	exit_sem(p);
bad_fork_cleanup_audit:
	audit_free(p);
bad_fork_cleanup_policy:
	perf_event_free_task(p);
#ifdef CONFIG_NUMA
	mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
	cgroup_exit(p, cgroup_callbacks_done);
	delayacct_tsk_free(p);
	module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
	atomic_dec(&p->cred->user->processes);
	exit_creds(p);
bad_fork_free:
	free_task(p);
fork_out:
	return ERR_PTR(retval);
}
Пример #9
0
/*
 * s_config subdev ops
 * With camera device, we need to re-initialize
 * every single opening time therefor,
 * it is not necessary to be initialized on probe time.
 * except for version checking
 * NOTE: version checking is optional
 */
static int s5k5bbgx_s_config(struct v4l2_subdev *sd,
		int irq, void *platform_data)
{
	struct i2c_client *client = v4l2_get_subdevdata(sd);
	struct s5k5bbgx_state *state = to_state(sd);
	struct s5k5bbgx_platform_data *pdata;
#ifdef CONFIG_LOAD_FILE
	int err = 0;
#endif

	cam_dbg("E\n");

	state->initialized = 0;
	state->req_fps = state->set_fps = 8;
	state->sensor_mode = SENSOR_CAMERA;

	pdata = client->dev.platform_data;

	if (!pdata) {
		cam_err("no platform data\n");
		return -ENODEV;
	}

	/*
	 * Assign default format and resolution
	 * Use configured default information in platform data
	 * or without them, use default information in driver
	 */
	if (!(pdata->default_width && pdata->default_height)) {
		state->preview_frmsizes.width = DEFAULT_PREVIEW_WIDTH;
		state->preview_frmsizes.height = DEFAULT_PREVIEW_HEIGHT;

	} else {
		state->preview_frmsizes.width = pdata->default_width;
		state->preview_frmsizes.height = pdata->default_height;
	}
	state->capture_frmsizes.width = DEFAULT_CAPTURE_WIDTH;
	state->capture_frmsizes.height = DEFAULT_CAPTURE_HEIGHT;

	cam_dbg("preview_width: %d , preview_height: %d, "
		"capture_width: %d, capture_height: %d",
		state->preview_frmsizes.width, state->preview_frmsizes.height,
		state->capture_frmsizes.width, state->capture_frmsizes.height);

	state->req_fmt.width = state->preview_frmsizes.width;
	state->req_fmt.height = state->preview_frmsizes.height;
	if (!pdata->pixelformat)
		state->req_fmt.pixelformat = DEFAULT_FMT;
	else
		state->req_fmt.pixelformat = pdata->pixelformat;

#ifdef CONFIG_LOAD_FILE
	err = loadFile();
	if (unlikely(err < 0)) {
		cam_err("failed to load file ERR=%d\n", err);
		return err;
	}
#endif

	return 0;
}
Пример #10
0
static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
			 struct iov_iter *i)
{
	size_t skip, copy, left, wanted;
	const struct iovec *iov;
	char __user *buf;
	void *kaddr, *to;

	if (unlikely(bytes > i->count))
		bytes = i->count;

	if (unlikely(!bytes))
		return 0;

	wanted = bytes;
	iov = i->iov;
	skip = i->iov_offset;
	buf = iov->iov_base + skip;
	copy = min(bytes, iov->iov_len - skip);

	if (!fault_in_pages_readable(buf, copy)) {
		kaddr = kmap_atomic(page);
		to = kaddr + offset;

		/* first chunk, usually the only one */
		left = __copy_from_user_inatomic(to, buf, copy);
		copy -= left;
		skip += copy;
		to += copy;
		bytes -= copy;

		while (unlikely(!left && bytes)) {
			iov++;
			buf = iov->iov_base;
			copy = min(bytes, iov->iov_len);
			left = __copy_from_user_inatomic(to, buf, copy);
			copy -= left;
			skip = copy;
			to += copy;
			bytes -= copy;
		}
		if (likely(!bytes)) {
			kunmap_atomic(kaddr);
			goto done;
		}
		offset = to - kaddr;
		buf += copy;
		kunmap_atomic(kaddr);
		copy = min(bytes, iov->iov_len - skip);
	}
	/* Too bad - revert to non-atomic kmap */
	kaddr = kmap(page);
	to = kaddr + offset;
	left = __copy_from_user(to, buf, copy);
	copy -= left;
	skip += copy;
	to += copy;
	bytes -= copy;
	while (unlikely(!left && bytes)) {
		iov++;
		buf = iov->iov_base;
		copy = min(bytes, iov->iov_len);
		left = __copy_from_user(to, buf, copy);
		copy -= left;
		skip = copy;
		to += copy;
		bytes -= copy;
	}
	kunmap(page);
done:
	if (skip == iov->iov_len) {
		iov++;
		skip = 0;
	}
	i->count -= wanted - bytes;
	i->nr_segs -= iov - i->iov;
	i->iov = iov;
	i->iov_offset = skip;
	return wanted - bytes;
}
Пример #11
0
/*
 *  Ok, this is the main fork-routine.
 *
 * It copies the process, and if successful kick-starts
 * it and waits for it to finish using the VM if required.
 */
long do_fork(unsigned long clone_flags,
	      unsigned long stack_start,
	      struct pt_regs *regs,
	      unsigned long stack_size,
	      int __user *parent_tidptr,
	      int __user *child_tidptr)
{
	struct task_struct *p;
	int trace = 0;
	long nr;

	/*
	 * Do some preliminary argument and permissions checking before we
	 * actually start allocating stuff
	 */
	if (clone_flags & CLONE_NEWUSER) {
		if (clone_flags & CLONE_THREAD)
			return -EINVAL;
		/* hopefully this check will go away when userns support is
		 * complete
		 */
		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
				!capable(CAP_SETGID))
			return -EPERM;
	}

	/*
	 * We hope to recycle these flags after 2.6.26
	 */
	if (unlikely(clone_flags & CLONE_STOPPED)) {
		static int __read_mostly count = 100;

		if (count > 0 && printk_ratelimit()) {
			char comm[TASK_COMM_LEN];

			count--;
			printk(KERN_INFO "fork(): process `%s' used deprecated "
					"clone flags 0x%lx\n",
				get_task_comm(comm, current),
				clone_flags & CLONE_STOPPED);
		}
	}

	/*
	 * When called from kernel_thread, don't do user tracing stuff.
	 */
	if (likely(user_mode(regs)))
		trace = tracehook_prepare_clone(clone_flags);

	p = copy_process(clone_flags, stack_start, regs, stack_size,
			 child_tidptr, NULL, trace);
	/*
	 * Do this prior waking up the new thread - the thread pointer
	 * might get invalid after that point, if the thread exits quickly.
	 */
	if (!IS_ERR(p)) {
		struct completion vfork;

		trace_sched_process_fork(current, p);

		nr = task_pid_vnr(p);

		if (clone_flags & CLONE_PARENT_SETTID)
			put_user(nr, parent_tidptr);

		if (clone_flags & CLONE_VFORK) {
			p->vfork_done = &vfork;
			init_completion(&vfork);
		}

		audit_finish_fork(p);
		tracehook_report_clone(regs, clone_flags, nr, p);

		/*
		 * We set PF_STARTING at creation in case tracing wants to
		 * use this to distinguish a fully live task from one that
		 * hasn't gotten to tracehook_report_clone() yet.  Now we
		 * clear it and set the child going.
		 */
		p->flags &= ~PF_STARTING;

		if (unlikely(clone_flags & CLONE_STOPPED)) {
			/*
			 * We'll start up with an immediate SIGSTOP.
			 */
			sigaddset(&p->pending.signal, SIGSTOP);
			set_tsk_thread_flag(p, TIF_SIGPENDING);
			__set_task_state(p, TASK_STOPPED);
		} else {
			wake_up_new_task(p, clone_flags);
		}

		tracehook_report_clone_complete(trace, regs,
						clone_flags, nr, p);

		if (clone_flags & CLONE_VFORK) {
			freezer_do_not_count();
			wait_for_completion(&vfork);
			freezer_count();
			tracehook_report_vfork_done(p, nr);
		}
	} else {
		nr = PTR_ERR(p);
	}
	return nr;
}
Пример #12
0
static bool
insert (Dwfl *dwfl, size_t i, GElf_Addr start, GElf_Addr end, int segndx)
{
  bool need_start = (i == 0 || dwfl->lookup_addr[i - 1] != start);
  bool need_end = (i >= dwfl->lookup_elts || dwfl->lookup_addr[i + 1] != end);
  size_t need = need_start + need_end;
  if (need == 0)
    return false;

  if (dwfl->lookup_alloc - dwfl->lookup_elts < need)
    {
      size_t n = dwfl->lookup_alloc == 0 ? 16 : dwfl->lookup_alloc * 2;
      GElf_Addr *naddr = realloc (dwfl->lookup_addr, sizeof naddr[0] * n);
      if (unlikely (naddr == NULL))
	return true;
      int *nsegndx = realloc (dwfl->lookup_segndx, sizeof nsegndx[0] * n);
      if (unlikely (nsegndx == NULL))
	{
	  if (naddr != dwfl->lookup_addr)
	    free (naddr);
	  return true;
	}
      dwfl->lookup_alloc = n;
      dwfl->lookup_addr = naddr;
      dwfl->lookup_segndx = nsegndx;

      if (dwfl->lookup_module != NULL)
	{
	  /* Make sure this array is big enough too.  */
	  Dwfl_Module **old = dwfl->lookup_module;
	  dwfl->lookup_module = realloc (dwfl->lookup_module,
					 sizeof dwfl->lookup_module[0] * n);
	  if (unlikely (dwfl->lookup_module == NULL))
	    {
	      free (old);
	      return true;
	    }
	}
    }

  if (unlikely (i < dwfl->lookup_elts))
    {
      const size_t move = dwfl->lookup_elts - i;
      memmove (&dwfl->lookup_addr[i + need], &dwfl->lookup_addr[i],
	       move * sizeof dwfl->lookup_addr[0]);
      memmove (&dwfl->lookup_segndx[i + need], &dwfl->lookup_segndx[i],
	       move * sizeof dwfl->lookup_segndx[0]);
      if (dwfl->lookup_module != NULL)
	memmove (&dwfl->lookup_module[i + need], &dwfl->lookup_module[i],
		 move * sizeof dwfl->lookup_module[0]);
    }

  if (need_start)
    {
      dwfl->lookup_addr[i] = start;
      dwfl->lookup_segndx[i] = segndx;
      if (dwfl->lookup_module != NULL)
	dwfl->lookup_module[i] = NULL;
      ++i;
    }
  else
    dwfl->lookup_segndx[i - 1] = segndx;

  if (need_end)
    {
      dwfl->lookup_addr[i] = end;
      dwfl->lookup_segndx[i] = -1;
      if (dwfl->lookup_module != NULL)
	dwfl->lookup_module[i] = NULL;
    }

  dwfl->lookup_elts += need;

  return false;
}
Пример #13
0
static bool
reify_segments (Dwfl *dwfl)
{
  int hint = -1;
  int highest = -1;
  bool fixup = false;
  for (Dwfl_Module *mod = dwfl->modulelist; mod != NULL; mod = mod->next)
    if (! mod->gc)
      {
	const GElf_Addr start = __libdwfl_segment_start (dwfl, mod->low_addr);
	const GElf_Addr end = __libdwfl_segment_end (dwfl, mod->high_addr);
	bool resized = false;

	int idx = lookup (dwfl, start, hint);
	if (unlikely (idx < 0))
	  {
	    /* Module starts below any segment.  Insert a low one.  */
	    if (unlikely (insert (dwfl, 0, start, end, -1)))
	      return true;
	    idx = 0;
	    resized = true;
	  }
	else if (dwfl->lookup_addr[idx] > start)
	  {
	    /* The module starts in the middle of this segment.  Split it.  */
	    if (unlikely (insert (dwfl, idx + 1, start, end,
				  dwfl->lookup_segndx[idx])))
	      return true;
	    ++idx;
	    resized = true;
	  }
	else if (dwfl->lookup_addr[idx] < start)
	  {
	    /* The module starts past the end of this segment.
	       Add a new one.  */
	    if (unlikely (insert (dwfl, idx + 1, start, end, -1)))
	      return true;
	    ++idx;
	    resized = true;
	  }

	if ((size_t) idx + 1 < dwfl->lookup_elts
	    && end < dwfl->lookup_addr[idx + 1])
	  {
	    /* The module ends in the middle of this segment.  Split it.  */
	    if (unlikely (insert (dwfl, idx + 1,
				  end, dwfl->lookup_addr[idx + 1], -1)))
	      return true;
	    resized = true;
	  }

	if (dwfl->lookup_module == NULL)
	  {
	    dwfl->lookup_module = calloc (dwfl->lookup_alloc,
					  sizeof dwfl->lookup_module[0]);
	    if (unlikely (dwfl->lookup_module == NULL))
	      return true;
	  }

	/* Cache a backpointer in the module.  */
	mod->segment = idx;

	/* Put MOD in the table for each segment that's inside it.  */
	do
	  dwfl->lookup_module[idx++] = mod;
	while ((size_t) idx < dwfl->lookup_elts
	       && dwfl->lookup_addr[idx] < end);
	assert (dwfl->lookup_module[mod->segment] == mod);

	if (resized && idx - 1 >= highest)
	  /* Expanding the lookup tables invalidated backpointers
	     we've already stored.  Reset those ones.  */
	  fixup = true;

	highest = idx - 1;
	hint = (size_t) idx < dwfl->lookup_elts ? idx : -1;
      }

  if (fixup)
    /* Reset backpointer indices invalidated by table insertions.  */
    for (size_t idx = 0; idx < dwfl->lookup_elts; ++idx)
      if (dwfl->lookup_module[idx] != NULL)
	dwfl->lookup_module[idx]->segment = idx;

  return false;
}
Пример #14
0
//ret read size
yf_s32_t  yf_cb_space_read(yf_circular_buf_t* cb, yf_s32_t bytes
                , char*** rbufs, yf_s32_t* roffset, yf_int_t flags)
{
        char** b = NULL;
        yf_s32_t  rbytes = 0;
        yf_s16_t  rbsize = 0;
        yf_s16_t  rindex = cb->cursor_index, tindex = cb->tail_index;
        yf_s32_t  rest_rsize = yf_circular_buf_rest_rsize(cb);

        CHECK_RV(bytes <= 0, bytes);

        if (*rbufs == NULL)
                *rbufs = _yf_cb_bufs_alloc(cb, bytes);
        b = *rbufs;
        *roffset = cb->cursor_offset;

        _yf_cb_preprocess_cursor(rindex, *roffset, cb);

        if (unlikely(rest_rsize <= bytes))
        {
                rbytes = rest_rsize;
                
                rbsize = yf_cb_diff(tindex, rindex, cb->buf_used, YF_CBR) + 1;

                yf_cb_space_gread(cb, b, rindex, rbsize);
                
                if (flags != YF_READ_PEEK)
                {
                        cb->cursor_index = tindex;
                        cb->cursor_offset = cb->tail_offset;
                }
                return rbytes;
        }

        *b = cb->buf[rindex];
        rbytes = cb->buf_size - *roffset;
        if (likely(rbytes >= bytes))
        {
                if (flags != YF_READ_PEEK)
                {
                        cb->cursor_index = rindex;
                        cb->cursor_offset = *roffset + bytes;//may cursor_offset == buf_size
                }
                return bytes;
        }
        ++b;
        assert(tindex != rindex);

        //rest num
        rbytes = bytes - rbytes;
        rbsize = yf_circular_buf_bsize(cb, rbytes);

        if (flags != YF_READ_PEEK)
        {
                cb->cursor_index = yf_cb_add(rindex, rbsize, cb->buf_used);
                cb->cursor_offset = yf_cb_rest_mod(rbytes, cb->buf_size);
        }        

        rindex = yf_cb_increase(rindex, cb->buf_used);
        yf_cb_space_gread(cb, b, rindex, rbsize);
        
        return  bytes;
}
Пример #15
0
Файл: rx.c Проект: Abioy/kasan
/*
 * iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler
 *
 * Handles the actual data of the Rx packet from the fw
 */
int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
		       struct iwl_device_cmd *cmd)
{
	struct ieee80211_hdr *hdr;
	struct ieee80211_rx_status *rx_status;
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_rx_phy_info *phy_info;
	struct iwl_rx_mpdu_res_start *rx_res;
	struct ieee80211_sta *sta;
	struct sk_buff *skb;
	u32 len;
	u32 ampdu_status;
	u32 rate_n_flags;
	u32 rx_pkt_status;
	u8 crypt_len = 0;

	phy_info = &mvm->last_phy_info;
	rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
	hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res));
	len = le16_to_cpu(rx_res->byte_count);
	rx_pkt_status = le32_to_cpup((__le32 *)
		(pkt->data + sizeof(*rx_res) + len));

	/* Dont use dev_alloc_skb(), we'll have enough headroom once
	 * ieee80211_hdr pulled.
	 */
	skb = alloc_skb(128, GFP_ATOMIC);
	if (!skb) {
		IWL_ERR(mvm, "alloc_skb failed\n");
		return 0;
	}

	rx_status = IEEE80211_SKB_RXCB(skb);

	/*
	 * drop the packet if it has failed being decrypted by HW
	 */
	if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status,
					 &crypt_len)) {
		IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n",
			       rx_pkt_status);
		kfree_skb(skb);
		return 0;
	}

	if ((unlikely(phy_info->cfg_phy_cnt > 20))) {
		IWL_DEBUG_DROP(mvm, "dsp size out of range [0,20]: %d\n",
			       phy_info->cfg_phy_cnt);
		kfree_skb(skb);
		return 0;
	}

	/*
	 * Keep packets with CRC errors (and with overrun) for monitor mode
	 * (otherwise the firmware discards them) but mark them as bad.
	 */
	if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) ||
	    !(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) {
		IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
	}

	/* This will be used in several places later */
	rate_n_flags = le32_to_cpu(phy_info->rate_n_flags);

	/* rx_status carries information about the packet to mac80211 */
	rx_status->mactime = le64_to_cpu(phy_info->timestamp);
	rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp);
	rx_status->band =
		(phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
				IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
	rx_status->freq =
		ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel),
					       rx_status->band);
	/*
	 * TSF as indicated by the fw is at INA time, but mac80211 expects the
	 * TSF at the beginning of the MPDU.
	 */
	/*rx_status->flag |= RX_FLAG_MACTIME_MPDU;*/

	iwl_mvm_get_signal_strength(mvm, phy_info, rx_status);

	IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status->signal,
			      (unsigned long long)rx_status->mactime);

	rcu_read_lock();
	/*
	 * We have tx blocked stations (with CS bit). If we heard frames from
	 * a blocked station on a new channel we can TX to it again.
	 */
	if (unlikely(mvm->csa_tx_block_bcn_timeout)) {
		sta = ieee80211_find_sta(
			rcu_dereference(mvm->csa_tx_blocked_vif), hdr->addr2);
		if (sta)
			iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false);
	}

	/* This is fine since we don't support multiple AP interfaces */
	sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
	if (sta) {
		struct iwl_mvm_sta *mvmsta;
		mvmsta = iwl_mvm_sta_from_mac80211(sta);
		rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
	}

	rcu_read_unlock();

	/* set the preamble flag if appropriate */
	if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE))
		rx_status->flag |= RX_FLAG_SHORTPRE;

	if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
		/*
		 * We know which subframes of an A-MPDU belong
		 * together since we get a single PHY response
		 * from the firmware for all of them
		 */
		rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
		rx_status->ampdu_reference = mvm->ampdu_ref;
	}

	/* Set up the HT phy flags */
	switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
	case RATE_MCS_CHAN_WIDTH_20:
		break;
	case RATE_MCS_CHAN_WIDTH_40:
		rx_status->flag |= RX_FLAG_40MHZ;
		break;
	case RATE_MCS_CHAN_WIDTH_80:
		rx_status->vht_flag |= RX_VHT_FLAG_80MHZ;
		break;
	case RATE_MCS_CHAN_WIDTH_160:
		rx_status->vht_flag |= RX_VHT_FLAG_160MHZ;
		break;
	}
	if (rate_n_flags & RATE_MCS_SGI_MSK)
		rx_status->flag |= RX_FLAG_SHORT_GI;
	if (rate_n_flags & RATE_HT_MCS_GF_MSK)
		rx_status->flag |= RX_FLAG_HT_GF;
	if (rate_n_flags & RATE_MCS_LDPC_MSK)
		rx_status->flag |= RX_FLAG_LDPC;
	if (rate_n_flags & RATE_MCS_HT_MSK) {
		u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >>
				RATE_MCS_STBC_POS;
		rx_status->flag |= RX_FLAG_HT;
		rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
		rx_status->flag |= stbc << RX_FLAG_STBC_SHIFT;
	} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
Пример #16
0
static int s5k5bbgx_set_brightness(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
	struct s5k5bbgx_state *state = to_state(sd);
	int err = -EINVAL;

	cam_dbg("E\n");

	if (state->check_dataline)
		return 0;

#ifdef CONFIG_LOAD_FILE
	switch (ctrl->value) {
	case EV_MINUS_4:
		err = s5k5bbgx_write_regs_from_sd(sd, "s5k5bbgx_bright_m4");
		break;
	case EV_MINUS_3:
		err = s5k5bbgx_write_regs_from_sd(sd, "s5k5bbgx_bright_m3");
		break;
	case EV_MINUS_2:
		err = s5k5bbgx_write_regs_from_sd(sd, "s5k5bbgx_bright_m2");
		break;
	case EV_MINUS_1:
		err = s5k5bbgx_write_regs_from_sd(sd, "s5k5bbgx_bright_m1");
		break;
	case EV_DEFAULT:
		err = s5k5bbgx_write_regs_from_sd(sd,
			"s5k5bbgx_bright_default");
		break;
	case EV_PLUS_1:
		err = s5k5bbgx_write_regs_from_sd(sd, "s5k5bbgx_bright_p1");
		break;
	case EV_PLUS_2:
		err = s5k5bbgx_write_regs_from_sd(sd, "s5k5bbgx_bright_p2");
		break;
	case EV_PLUS_3:
		err = s5k5bbgx_write_regs_from_sd(sd, "s5k5bbgx_bright_p3");
		break;
	case EV_PLUS_4:
		err = s5k5bbgx_write_regs_from_sd(sd, "s5k5bbgx_bright_p4");
		break;
	default:
		cam_err("ERR: Invalid brightness(%d)\n", ctrl->value);
		return err;
		break;
	}
#else
	switch (ctrl->value) {
	case EV_MINUS_4:
		err = s5k5bbgx_write_regs(sd, s5k5bbgx_bright_m4, \
			sizeof(s5k5bbgx_bright_m4) / \
			sizeof(s5k5bbgx_bright_m4[0]));
		break;
	case EV_MINUS_3:
		err = s5k5bbgx_write_regs(sd, s5k5bbgx_bright_m3, \
			sizeof(s5k5bbgx_bright_m3) / \
			sizeof(s5k5bbgx_bright_m3[0]));

		break;
	case EV_MINUS_2:
		err = s5k5bbgx_write_regs(sd, s5k5bbgx_bright_m2, \
			sizeof(s5k5bbgx_bright_m2) / \
			sizeof(s5k5bbgx_bright_m2[0]));
		break;
	case EV_MINUS_1:
		err = s5k5bbgx_write_regs(sd, s5k5bbgx_bright_m1, \
			sizeof(s5k5bbgx_bright_m1) / \
			sizeof(s5k5bbgx_bright_m1[0]));
		break;
	case EV_DEFAULT:
		err = s5k5bbgx_write_regs(sd, s5k5bbgx_bright_default, \
			sizeof(s5k5bbgx_bright_default) / \
			sizeof(s5k5bbgx_bright_default[0]));
		break;
	case EV_PLUS_1:
		err = s5k5bbgx_write_regs(sd, s5k5bbgx_bright_p1, \
			sizeof(s5k5bbgx_bright_p1) / \
			sizeof(s5k5bbgx_bright_p1[0]));
		break;
	case EV_PLUS_2:
		err = s5k5bbgx_write_regs(sd, s5k5bbgx_bright_p2, \
			sizeof(s5k5bbgx_bright_p2) / \
			sizeof(s5k5bbgx_bright_p2[0]));
		break;
	case EV_PLUS_3:
		err = s5k5bbgx_write_regs(sd, s5k5bbgx_bright_p3, \
			sizeof(s5k5bbgx_bright_p3) / \
			sizeof(s5k5bbgx_bright_p3[0]));
		break;
	case EV_PLUS_4:
		err = s5k5bbgx_write_regs(sd, s5k5bbgx_bright_p4, \
			sizeof(s5k5bbgx_bright_p4) / \
			sizeof(s5k5bbgx_bright_p4[0]));
		break;
	default:
		cam_err("ERR: invalid brightness(%d)\n", ctrl->value);
		return err;
		break;
	}
#endif

	if (unlikely(err < 0)) {
		cam_err("ERR: i2c_write for set brightness\n");
		return -EIO;
	}

	return 0;
}
Пример #17
0
static netdev_tx_t sam4e_netdev_start_xmit(
		struct sk_buff *skb, struct net_device *netdev)
{
	struct sam4e_req *req;
	struct usb_device *udev;
	struct sam4e_usb_handle *sam4e_usb_hnd = netdev_priv(netdev);
	struct sam4e_usb *dev = sam4e_usb_hnd->sam4e_dev;
	struct net_device_stats *stats = &netdev->stats;
	int result;
	struct can_frame *cf = (struct can_frame *)skb->data;
	struct urb *urb;
	size_t size = sizeof(struct sam4e_req) +
			sizeof(struct sam4e_can_full_write);
	struct sam4e_can_full_write *cfw;

	if (can_dropped_invalid_skb(netdev, skb)) {
		pr_err("Dropping invalid can frame");
		return NETDEV_TX_OK;
	}

	udev = dev->udev;
	urb = usb_alloc_urb(0, GFP_ATOMIC);
	if (!urb) {
		pr_err("No memory left for URBs\n");
		goto nomem;
	}

	req = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC,
			&urb->transfer_dma);
	if (!req) {
		pr_err("No memory left for USB buffer\n");
		usb_free_urb(urb);
		goto nomem;
	}

	/* Fill message data */
	cfw = (struct sam4e_can_full_write *)&req->data;
	req->cmd = CMD_CAN_FULL_WRITE;
	req->len = sizeof(struct sam4e_req) +
			sizeof(struct sam4e_can_full_write);
	req->seq = atomic_inc_return(&dev->msg_seq);
	cfw->can = sam4e_usb_hnd->owner_netdev_index;
	cfw->mailbox = 0;
	cfw->prio = 0;
	cfw->mid = cf->can_id;
	cfw->dlc = cf->can_dlc;
	memcpy(cfw->data, cf->data, 8);

	LOGNI(">%x %2d [%d] send frame [%d] %x %d %x %x %x %x %x %x %x %x\n",
			req->cmd, req->len, req->seq,
			atomic_read(&dev->active_tx_urbs), cfw->mid,
			cfw->dlc, cfw->data[0], cfw->data[1], cfw->data[2],
			cfw->data[3], cfw->data[4], cfw->data[5],
			cfw->data[6], cfw->data[7]);

	usb_fill_bulk_urb(urb, dev->udev,
			usb_sndbulkpipe(dev->udev, BULK_OUT_EP), req,
			size, sam4e_usb_write_bulk_callback, netdev);
	urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
	usb_anchor_urb(urb, &dev->tx_submitted);
	atomic_inc(&dev->active_tx_urbs);

	result = usb_submit_urb(urb, GFP_ATOMIC);
	if (unlikely(result)) {
		usb_unanchor_urb(urb);
		usb_free_coherent(dev->udev, size, req, urb->transfer_dma);
		dev_kfree_skb(skb);

		atomic_dec(&dev->active_tx_urbs);
		if (result == -ENODEV) {
			netif_device_detach(netdev);
		} else {
			pr_err("failed tx_urb %d\n", result);
			stats->tx_dropped++;
		}
	} else {
		/* Put on hold tx path */
		if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS) {
			LOGNI("Too many outstanding requests (%d). Stop queue",
					atomic_read(&dev->active_tx_urbs));
			atomic_inc(&dev->netif_queue_stop);
			if (dev->netdev1)
				netif_stop_queue(dev->netdev1);
			if (dev->netdev2)
				netif_stop_queue(dev->netdev2);
		}
	}
	dev_kfree_skb(skb);
	usb_free_urb(urb);
	return NETDEV_TX_OK;

nomem:
	dev_kfree_skb(skb);
	stats->tx_dropped++;

	return NETDEV_TX_OK;
}
Пример #18
0
static int s5k5bbgx_set_blur(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
	struct s5k5bbgx_state *state = to_state(sd);
	int err = -EINVAL;

	cam_dbg("E\n");

	if (state->check_dataline)
		return 0;

#ifdef CONFIG_LOAD_FILE
	switch (ctrl->value) {
	case BLUR_LEVEL_0:
		err = s5k5bbgx_write_regs_from_sd(sd,
				"s5k5bbgx_vt_pretty_default");
		break;
	case BLUR_LEVEL_1:
		err = s5k5bbgx_write_regs_from_sd(sd, "s5k5bbgx_vt_pretty_1");
		break;
	case BLUR_LEVEL_2:
		err = s5k5bbgx_write_regs_from_sd(sd, "s5k5bbgx_vt_pretty_2");
		break;
	case BLUR_LEVEL_3:
	case BLUR_LEVEL_MAX:
		err = s5k5bbgx_write_regs_from_sd(sd, "s5k5bbgx_vt_pretty_3");
		break;
	default:
		cam_err("ERR: Invalid blur(%d)\n", ctrl->value);
		return err;
		break;
	}
#else
	switch (ctrl->value) {
	case BLUR_LEVEL_0:
		err = s5k5bbgx_write_regs(sd, s5k5bbgx_vt_pretty_default, \
			sizeof(s5k5bbgx_vt_pretty_default) / \
			sizeof(s5k5bbgx_vt_pretty_default[0]));
		break;
	case BLUR_LEVEL_1:
		err = s5k5bbgx_write_regs(sd, s5k5bbgx_vt_pretty_1, \
			sizeof(s5k5bbgx_vt_pretty_1) / \
			sizeof(s5k5bbgx_vt_pretty_1[0]));
		break;
	case BLUR_LEVEL_2:
		err = s5k5bbgx_write_regs(sd, s5k5bbgx_vt_pretty_2, \
			sizeof(s5k5bbgx_vt_pretty_2) / \
			sizeof(s5k5bbgx_vt_pretty_2[0]));
		break;
	case BLUR_LEVEL_3:
	case BLUR_LEVEL_MAX:
		err = s5k5bbgx_write_regs(sd, s5k5bbgx_vt_pretty_3, \
			sizeof(s5k5bbgx_vt_pretty_3) / \
			sizeof(s5k5bbgx_vt_pretty_3[0]));
		break;
	default:
		cam_err("ERR: Invalid blur(%d)\n", ctrl->value);
		return err;
		break;
	}
#endif

	if (unlikely(err < 0)) {
		cam_err("ERR: i2c_write for set blur\n");
		return -EIO;
	}

	return 0;
}
Пример #19
0
static DFBResult
stmfbdevAllocateBuffer (CoreSurfacePool       *pool,
                        void                  *pool_data,
                        void                  *pool_local,
                        CoreSurfaceBuffer     *buffer,
                        CoreSurfaceAllocation *allocation,
                        void                  *alloc_data)
{
  CoreSurface                *surface;
  STMfbdevPoolData           * const data  = pool_data;
  STMfbdevPoolLocalData      * const local = pool_local;
  STMfbdevPoolAllocationData * const alloc = alloc_data;
  DFBResult                   ret;
  Chunk                      *chunk;

  D_DEBUG_AT (STMfbdev_Surfaces, "%s (%p)\n", __FUNCTION__, buffer);

  D_MAGIC_ASSERT (pool, CoreSurfacePool);
  D_MAGIC_ASSERT (data, STMfbdevPoolData);
  D_MAGIC_ASSERT (local, STMfbdevPoolLocalData);
  D_MAGIC_ASSERT (buffer, CoreSurfaceBuffer);
  D_MAGIC_ASSERT (allocation, CoreSurfaceAllocation);

  surface = buffer->surface;
  D_MAGIC_ASSERT (surface, CoreSurface);

  ret = dfb_surfacemanager_allocate (local->core, data->manager, buffer,
                                     allocation, &chunk);
  if (ret)
    return ret;

  D_MAGIC_ASSERT (chunk, Chunk);

  alloc->chunk = chunk;

  D_DEBUG_AT (STMfbdev_Surfaces,
              "  -> offset 0x%.8x (%u), format: %s, pitch %d, size %d\n",
              chunk->offset, chunk->offset,
              dfb_pixelformat_name (buffer->format), chunk->pitch,
              chunk->length);

  allocation->size   = chunk->length;
  allocation->offset = chunk->offset;

#if STGFX_DRIVER == 2
  if (unlikely (buffer->format == DSPF_RGB32))
    {
      /* for RGB32, we need to set the alpha to 0xff */
      STGFX2DriverData * const stdrv = dfb_gfxcard_get_driver_data ();
      STGFX2DeviceData * const stdev = dfb_gfxcard_get_device_data ();
      DFBRectangle      rect = { .x = 0, .y = 0,
                                 .w = buffer->surface->config.size.w,
                                 .h = buffer->surface->config.size.h };

      D_WARN ("BDisp/Surfaces: RGB32 support is experimental and slow!");
      if (dfb_system_type () != CORE_STMFBDEV)
        D_WARN ("BDisp/Surfaces: RGB32 is only supported in STMfbdev system!");

      D_DEBUG_AT (STMfbdev_Surfaces, "  -> rgb32 allocation!\n");
      dfb_gfxcard_lock (GDLF_WAIT);

      _bdisp_aq_RGB32_init (stdrv, stdev,
                            data->physical + chunk->offset, chunk->pitch,
                            &rect);
      dfb_gfxcard_unlock ();
    }
#endif

  D_MAGIC_SET (alloc, STMfbdevPoolAllocationData);

  return DFB_OK;
}
Пример #20
0
static inline int s5k5bbgx_read(struct i2c_client *client,
	u16 subaddr, u16 *data)
{
	u8 buf[2];
	int err = 0;
	struct i2c_msg msg = {
		.addr = client->addr,
		.flags = 0,
		.len = 2,
		.buf = buf,
	};

	*(u16 *)buf = cpu_to_be16(subaddr);

	/* printk("\n\n\n%X %X\n\n\n", buf[0], buf[1]);*/

	err = i2c_transfer(client->adapter, &msg, 1);
	if (unlikely(err < 0))
		cam_err("ERR: %d register read fail\n", __LINE__);

	msg.flags = I2C_M_RD;

	err = i2c_transfer(client->adapter, &msg, 1);
	if (unlikely(err < 0))
		cam_err("ERR: %d register read fail\n", __LINE__);

	/*printk("\n\n\n%X %X\n\n\n", buf[0], buf[1]);*/
	*data = ((buf[0] << 8) | buf[1]);

	return err;
}

/*
 * s5k6aafx sensor i2c write routine
 * <start>--<Device address><2Byte Subaddr><2Byte Value>--<stop>
 */
#ifdef CONFIG_LOAD_FILE
static int loadFile(void)
{
	struct file *fp = NULL;
	struct test *nextBuf = NULL;

	u8 *nBuf = NULL;
	size_t file_size = 0, max_size = 0, testBuf_size = 0;
	size_t nread = 0;
	s32 check = 0, starCheck = 0;
	s32 tmp_large_file = 0;
	s32 i = 0;
	int ret = 0;
	loff_t pos;

	mm_segment_t fs = get_fs();
	set_fs(get_ds());

	BUG_ON(testBuf);

	fp = filp_open("/mnt/sdcard/external_sd/s5k5bbgx_setfile.h", O_RDONLY, 0);
	if (IS_ERR(fp)) {
		cam_err("file open error\n");
		return PTR_ERR(fp);
	}

	file_size = (size_t) fp->f_path.dentry->d_inode->i_size;
	max_size = file_size;

	cam_dbg("file_size = %d\n", file_size);

	nBuf = kmalloc(file_size, GFP_ATOMIC);
	if (nBuf == NULL) {
		cam_dbg("Fail to 1st get memory\n");
		nBuf = vmalloc(file_size);
		if (nBuf == NULL) {
			cam_err("ERR: nBuf Out of Memory\n");
			ret = -ENOMEM;
			goto error_out;
		}
		tmp_large_file = 1;
	}

	testBuf_size = sizeof(struct test) * file_size;
	if (tmp_large_file) {
		testBuf = (struct test *)vmalloc(testBuf_size);
		large_file = 1;
	} else {
		testBuf = kmalloc(testBuf_size, GFP_ATOMIC);
		if (testBuf == NULL) {
			cam_dbg("Fail to get mem(%d bytes)\n", testBuf_size);
			testBuf = (struct test *)vmalloc(testBuf_size);
			large_file = 1;
		}
	}
	if (testBuf == NULL) {
		cam_err("ERR: Out of Memory\n");
		ret = -ENOMEM;
		goto error_out;
	}

	pos = 0;
	memset(nBuf, 0, file_size);
	memset(testBuf, 0, file_size * sizeof(struct test));

	nread = vfs_read(fp, (char __user *)nBuf, file_size, &pos);
	if (nread != file_size) {
		cam_err("failed to read file ret = %d\n", nread);
		ret = -1;
		goto error_out;
	}

	set_fs(fs);

	i = max_size;

	printk("i = %d\n", i);

	while (i) {
		testBuf[max_size - i].data = *nBuf;
		if (i != 1) {
			testBuf[max_size - i].nextBuf = &testBuf[max_size - i + 1];
		} else {
			testBuf[max_size - i].nextBuf = NULL;
			break;
		}
		i--;
		nBuf++;
	}

	i = max_size;
	nextBuf = &testBuf[0];

#if 1
	while (i - 1) {
		if (!check && !starCheck) {
			if (testBuf[max_size - i].data == '/') {
				if (testBuf[max_size-i].nextBuf != NULL) {
					if (testBuf[max_size-i].nextBuf->data
								== '/') {
						check = 1;/* when find '//' */
						i--;
					} else if (testBuf[max_size-i].nextBuf->data == '*') {
						starCheck = 1;/* when find '/ *' */
						i--;
					}
				} else
					break;
			}
			if (!check && !starCheck) {
				/* ignore '\t' */
				if (testBuf[max_size - i].data != '\t') {
					nextBuf->nextBuf = &testBuf[max_size-i];
					nextBuf = &testBuf[max_size - i];
				}
			}
		} else if (check && !starCheck) {
			if (testBuf[max_size - i].data == '/') {
				if(testBuf[max_size-i].nextBuf != NULL) {
					if (testBuf[max_size-i].nextBuf->data == '*') {
						starCheck = 1; /* when find '/ *' */
						check = 0;
						i--;
					}
				} else
					break;
			}

			 /* when find '\n' */
			if (testBuf[max_size - i].data == '\n' && check) {
				check = 0;
				nextBuf->nextBuf = &testBuf[max_size - i];
				nextBuf = &testBuf[max_size - i];
			}

		} else if (!check && starCheck) {
			if (testBuf[max_size - i].data == '*') {
				if (testBuf[max_size-i].nextBuf != NULL) {
					if (testBuf[max_size-i].nextBuf->data == '/') {
						starCheck = 0; /* when find '* /' */
						i--;
					}
				} else
					break;
			}
		}

		i--;

		if (i < 2) {
			nextBuf = NULL;
			break;
		}

		if (testBuf[max_size - i].nextBuf == NULL) {
			nextBuf = NULL;
			break;
		}
	}
#endif

#if 0 // for print
	printk("i = %d\n", i);
	nextBuf = &testBuf[0];
	while (1) {
		//printk("sdfdsf\n");
		if (nextBuf->nextBuf == NULL)
			break;
		printk("%c", nextBuf->data);
		nextBuf = nextBuf->nextBuf;
	}
#endif

error_out:

	if (nBuf)
		tmp_large_file ? vfree(nBuf) : kfree(nBuf);
	if (fp)
		filp_close(fp, current->files);
	return ret;
}
#endif

static inline int s5k5bbgx_write(struct i2c_client *client,
		u32 packet)
{
	u8 buf[4];
	int err = 0, retry_count = 5;

	struct i2c_msg msg = {
		.addr	= client->addr,
		.flags	= 0,
		.buf	= buf,
		.len	= 4,
	};

	if (!client->adapter) {
		cam_err("ERR - can't search i2c client adapter\n");
		return -EIO;
	}

	while (retry_count--) {
		*(u32 *)buf = cpu_to_be32(packet);
		err = i2c_transfer(client->adapter, &msg, 1);
		if (likely(err == 1))
			break;
		mdelay(10);
	}

	if (unlikely(err < 0)) {
		cam_err("ERR - 0x%08x write failed err=%d\n",
				(u32)packet, err);
		return err;
	}

	return (err != 1) ? -1 : 0;
}

#ifdef CONFIG_LOAD_FILE
static int s5k5bbgx_write_regs_from_sd(struct v4l2_subdev *sd, u8 s_name[])
{

	struct i2c_client *client = v4l2_get_subdevdata(sd);
	struct test *tempData = NULL;

	int ret = -EAGAIN;
	u32 temp;
	u32 delay = 0;
	u8 data[11];
	s32 searched = 0;
	size_t size = strlen(s_name);
	s32 i;

	cam_dbg("E size = %d, string = %s\n", size, s_name);
	tempData = &testBuf[0];
	while (!searched) {
		searched = 1;
		for (i = 0; i < size; i++) {
			if (tempData->data != s_name[i]) {
				searched = 0;
				break;
			}
			tempData = tempData->nextBuf;
		}
		tempData = tempData->nextBuf;
	}
	/* structure is get..*/

	while (1) {
		if (tempData->data == '{')
			break;
		else
			tempData = tempData->nextBuf;
	}

	while (1) {
		searched = 0;
		while (1) {
			if (tempData->data == 'x') {
				/* get 10 strings.*/
				data[0] = '0';
				for (i = 1; i < 11; i++) {
					data[i] = tempData->data;
					tempData = tempData->nextBuf;
				}
				/*cam_dbg("%s\n", data);*/
				temp = simple_strtoul(data, NULL, 16);
				break;
			} else if (tempData->data == '}') {
				searched = 1;
				break;
			} else
				tempData = tempData->nextBuf;

			if (tempData->nextBuf == NULL)
				return -1;
		}

		if (searched)
			break;

		if ((temp & S5K5BBGX_DELAY) == S5K5BBGX_DELAY) {
			delay = temp & 0xFFFF;
			cam_info("line(%d):delay(0x%x, %d)\n", __LINE__,
							delay, delay);
			msleep(delay);
			continue;
		}

		ret = s5k5bbgx_write(client, temp);

		/* In error circumstances */
		/* Give second shot */
		if (unlikely(ret)) {
			dev_info(&client->dev,
					"s5k5bbgx i2c retry one more time\n");
			ret = s5k5bbgx_write(client, temp);

			/* Give it one more shot */
			if (unlikely(ret)) {
				dev_info(&client->dev,
						"s5k5bbgx i2c retry twice\n");
				ret = s5k5bbgx_write(client, temp);
			}
		}
	}

	return ret;
}
#endif

/*
* Read a register.
*/
static int s5k5bbgx_read_reg(struct v4l2_subdev *sd,
		u16 page, u16 addr, u16 *val)
{
	struct i2c_client *client = v4l2_get_subdevdata(sd);
	u32 page_cmd = (0x002C << 16) | page;
	u32 addr_cmd = (0x002E << 16) | addr;
	int err = 0;

	cam_dbg("page_cmd=0x%X, addr_cmd=0x%X\n", page_cmd, addr_cmd);

	err = s5k5bbgx_write(client, page_cmd);
	CHECK_ERR(err);
	err = s5k5bbgx_write(client, addr_cmd);
	CHECK_ERR(err);
	err = s5k5bbgx_read(client, 0x0F12, val);
	CHECK_ERR(err);

	return 0;
}

/* program multiple registers */
static int s5k5bbgx_write_regs(struct v4l2_subdev *sd,
		const u32 *packet, u32 num)
{
	struct s5k5bbgx_state *state = to_state(sd);
	struct i2c_client *client = v4l2_get_subdevdata(sd);
	int ret = -EAGAIN;
	u32 temp = 0;
	u16 delay = 0;

	while (num--) {
		temp = *packet++;

		if ((temp & S5K5BBGX_DELAY) == S5K5BBGX_DELAY) {
			delay = temp & 0xFFFF;
			cam_dbg("line(%d):delay(0x%x):delay(%d)\n",
						__LINE__, delay, delay);
			msleep(delay);
			continue;
		}

		ret = s5k5bbgx_write(client, temp);

		/* In error circumstances
		 *Give second shot
		 */
		if (unlikely(ret)) {
			cam_warn("i2c retry one more time\n");
			ret = s5k5bbgx_write(client, temp);

			/* Give it one more shot */
			if (unlikely(ret)) {
				cam_warn("i2c retry twice\n");
				ret = s5k5bbgx_write(client, temp);
				break;
			}
		}
#ifdef S5K5BBGX_USLEEP
		if (unlikely(state->vt_mode))
			if (!(num%200))
				s5k5bbgx_usleep(3);
#endif		
	}

	if (unlikely(ret < 0)) {
		cam_err("fail to write registers!!\n");
		return -EIO;
	}

	return 0;
}

static int s5k5bbgx_get_exif(struct v4l2_subdev *sd)
{
	struct s5k5bbgx_state *state = to_state(sd);
	u16 iso_gain_table[] = {10, 18, 23, 28};
	u16 iso_table[] = {0, 50, 100, 200, 400};
	u16 gain = 0, val = 0;
	s32 index = 0;

	state->exif.shutter_speed = 0;
	state->exif.iso = 0;

	/* Get shutter speed */
	s5k5bbgx_read_reg(sd, REG_PAGE_SHUTTER, REG_ADDR_SHUTTER, &val);
	state->exif.shutter_speed = 1000 / (val / 400);
	cam_dbg("val = %d\n", val);

	/* Get ISO */
	val = 0;
	s5k5bbgx_read_reg(sd, REG_PAGE_ISO, REG_ADDR_ISO, &val);
	cam_dbg("val = %d\n", val);
	gain = val * 10 / 256;
	for (index = 0; index < sizeof(iso_gain_table); index++) {
		if (gain < iso_gain_table[index])
			break;
	}
	state->exif.iso = iso_table[index];

	cam_dbg("gain=%d, Shutter speed=%d, ISO=%d\n",
		gain, state->exif.shutter_speed, state->exif.iso);
	return 0;
}

static int s5k5bbgx_check_dataline(struct v4l2_subdev *sd, s32 val)
{
	int err = 0;

	cam_info("DTP %s\n", val ? "ON" : "OFF");

#ifdef CONFIG_LOAD_FILE
	if (val)
		err = s5k5bbgx_write_regs_from_sd(sd, "s5k5bbgx_pattern_on");
	else
		err = s5k5bbgx_write_regs_from_sd(sd, "s5k5bbgx_pattern_off");
#else
	if (val) {
		err = s5k5bbgx_write_regs(sd, s5k5bbgx_pattern_on,
			sizeof(s5k5bbgx_pattern_on) / \
			sizeof(s5k5bbgx_pattern_on[0]));
	} else {
		err = s5k5bbgx_write_regs(sd, s5k5bbgx_pattern_off,
			sizeof(s5k5bbgx_pattern_off) / \
			sizeof(s5k5bbgx_pattern_off[0]));
	}
#endif
	if (unlikely(err)) {
		cam_err("fail to DTP setting\n");
		return err;
	}

	return 0;
}
Пример #21
0
extern "C" const unsigned *
brw_compile_tcs(const struct brw_compiler *compiler,
                void *log_data,
                void *mem_ctx,
                const struct brw_tcs_prog_key *key,
                struct brw_tcs_prog_data *prog_data,
                const nir_shader *src_shader,
                int shader_time_index,
                unsigned *final_assembly_size,
                char **error_str)
{
   const struct brw_device_info *devinfo = compiler->devinfo;
   struct brw_vue_prog_data *vue_prog_data = &prog_data->base;
   const bool is_scalar = compiler->scalar_stage[MESA_SHADER_TESS_CTRL];

   nir_shader *nir = nir_shader_clone(mem_ctx, src_shader);
   nir->info.outputs_written = key->outputs_written;
   nir->info.patch_outputs_written = key->patch_outputs_written;

   struct brw_vue_map input_vue_map;
   brw_compute_vue_map(devinfo, &input_vue_map,
                       nir->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID,
                       true);

   brw_compute_tess_vue_map(&vue_prog_data->vue_map,
                            nir->info.outputs_written,
                            nir->info.patch_outputs_written);

   nir = brw_nir_apply_sampler_key(nir, devinfo, &key->tex, is_scalar);
   brw_nir_lower_vue_inputs(nir, is_scalar, &input_vue_map);
   brw_nir_lower_tcs_outputs(nir, &vue_prog_data->vue_map);
   nir = brw_postprocess_nir(nir, compiler->devinfo, is_scalar);

   prog_data->instances = DIV_ROUND_UP(nir->info.tcs.vertices_out, 2);

   /* Compute URB entry size.  The maximum allowed URB entry size is 32k.
    * That divides up as follows:
    *
    *     32 bytes for the patch header (tessellation factors)
    *    480 bytes for per-patch varyings (a varying component is 4 bytes and
    *              gl_MaxTessPatchComponents = 120)
    *  16384 bytes for per-vertex varyings (a varying component is 4 bytes,
    *              gl_MaxPatchVertices = 32 and
    *              gl_MaxTessControlOutputComponents = 128)
    *
    *  15808 bytes left for varying packing overhead
    */
   const int num_per_patch_slots = vue_prog_data->vue_map.num_per_patch_slots;
   const int num_per_vertex_slots = vue_prog_data->vue_map.num_per_vertex_slots;
   unsigned output_size_bytes = 0;
   /* Note that the patch header is counted in num_per_patch_slots. */
   output_size_bytes += num_per_patch_slots * 16;
   output_size_bytes += nir->info.tcs.vertices_out * num_per_vertex_slots * 16;

   assert(output_size_bytes >= 1);
   if (output_size_bytes > GEN7_MAX_HS_URB_ENTRY_SIZE_BYTES)
      return NULL;

   /* URB entry sizes are stored as a multiple of 64 bytes. */
   vue_prog_data->urb_entry_size = ALIGN(output_size_bytes, 64) / 64;

   /* HS does not use the usual payload pushing from URB to GRFs,
    * because we don't have enough registers for a full-size payload, and
    * the hardware is broken on Haswell anyway.
    */
   vue_prog_data->urb_read_length = 0;

   if (unlikely(INTEL_DEBUG & DEBUG_TCS)) {
      fprintf(stderr, "TCS Input ");
      brw_print_vue_map(stderr, &input_vue_map);
      fprintf(stderr, "TCS Output ");
      brw_print_vue_map(stderr, &vue_prog_data->vue_map);
   }

   vec4_tcs_visitor v(compiler, log_data, key, prog_data,
                      nir, mem_ctx, shader_time_index, &input_vue_map);
   if (!v.run()) {
      if (error_str)
         *error_str = ralloc_strdup(mem_ctx, v.fail_msg);
      return NULL;
   }

   if (unlikely(INTEL_DEBUG & DEBUG_TCS))
      v.dump_instructions();

   return brw_vec4_generate_assembly(compiler, log_data, mem_ctx, nir,
                                     &prog_data->base, v.cfg,
                                     final_assembly_size);
}
Пример #22
0
static int s5k5bbgx_set_frame_rate(struct v4l2_subdev *sd, u32 fps)
{
	int err = 0;

	cam_info("frame rate %d\n\n", fps);

#ifdef CONFIG_LOAD_FILE
	switch (fps) {
	case 7:
		err = s5k5bbgx_write_regs_from_sd(sd, "s5k5bbgx_vt_7fps");
		break;
	case 10:
		err = s5k5bbgx_write_regs_from_sd(sd, "s5k5bbgx_vt_10fps");

		break;
	case 12:
		err = s5k5bbgx_write_regs_from_sd(sd, "s5k5bbgx_vt_12fps");

		break;
	case 15:
		err = s5k5bbgx_write_regs_from_sd(sd, "s5k5bbgx_vt_15fps");
		break;
	case 30:
		cam_err("frame rate is 30\n");
		break;
	default:
		cam_err("ERR: Invalid framerate\n");
		break;
	}
#else
	switch (fps) {
	case 7:
		err = s5k5bbgx_write_regs(sd, s5k5bbgx_vt_7fps,
				sizeof(s5k5bbgx_vt_7fps) / \
				sizeof(s5k5bbgx_vt_7fps[0]));
		break;
	case 10:
		err = s5k5bbgx_write_regs(sd, s5k5bbgx_vt_10fps,
				sizeof(s5k5bbgx_vt_10fps) / \
				sizeof(s5k5bbgx_vt_10fps[0]));

		break;
	case 12:
		err = s5k5bbgx_write_regs(sd, s5k5bbgx_vt_12fps,
				sizeof(s5k5bbgx_vt_12fps) / \
				sizeof(s5k5bbgx_vt_12fps[0]));

		break;
	case 15:
		err = s5k5bbgx_write_regs(sd, s5k5bbgx_vt_15fps,
				sizeof(s5k5bbgx_vt_15fps) / \
				sizeof(s5k5bbgx_vt_15fps[0]));
		break;
	case 30:
		cam_warn("frame rate is 30\n");
		break;
	default:
		cam_err("ERR: Invalid framerate\n");
		break;
	}
#endif

	if (unlikely(err < 0)) {
		cam_err("i2c_write for set framerate\n");
		return -EIO;
	}

	return err;
}
Пример #23
0
    QueryResult* processGetMore(const char *ns, int ntoreturn, long long cursorid , CurOp& curop, int pass, bool& exhaust ) {
        exhaust = false;
        ClientCursor::Pointer p(cursorid);
        ClientCursor *cc = p.c();

        int bufSize = 512 + sizeof( QueryResult ) + MaxBytesToReturnToClientAtOnce;

        BufBuilder b( bufSize );
        b.skip(sizeof(QueryResult));
        int resultFlags = ResultFlag_AwaitCapable;
        int start = 0;
        int n = 0;

        if ( unlikely(!cc) ) {
            LOGSOME << "getMore: cursorid not found " << ns << " " << cursorid << endl;
            cursorid = 0;
            resultFlags = ResultFlag_CursorNotFound;
        }
        else {
            // check for spoofing of the ns such that it does not match the one originally there for the cursor
            uassert(14833, "auth error", str::equals(ns, cc->ns().c_str()));

            if ( pass == 0 )
                cc->updateSlaveLocation( curop );

            int queryOptions = cc->queryOptions();
            
            curop.debug().query = cc->query();

            start = cc->pos();
            Cursor *c = cc->c();
            c->recoverFromYield();
            DiskLoc last;

            scoped_ptr<Projection::KeyOnly> keyFieldsOnly;
            if ( cc->modifiedKeys() == false && cc->isMultiKey() == false && cc->fields )
                keyFieldsOnly.reset( cc->fields->checkKey( cc->indexKeyPattern() ) );

            // This manager may be stale, but it's the state of chunking when the cursor was created.
            ShardChunkManagerPtr manager = cc->getChunkManager();

            while ( 1 ) {
                if ( !c->ok() ) {
                    if ( c->tailable() ) {
                        /* when a tailable cursor hits "EOF", ok() goes false, and current() is null.  however
                           advance() can still be retries as a reactivation attempt.  when there is new data, it will
                           return true.  that's what we are doing here.
                           */
                        if ( c->advance() )
                            continue;

                        if( n == 0 && (queryOptions & QueryOption_AwaitData) && pass < 1000 ) {
                            return 0;
                        }

                        break;
                    }
                    p.release();
                    bool ok = ClientCursor::erase(cursorid);
                    assert(ok);
                    cursorid = 0;
                    cc = 0;
                    break;
                }

                // in some cases (clone collection) there won't be a matcher
                if ( c->matcher() && !c->matcher()->matchesCurrent( c ) ) {
                }
                else if ( manager && ! manager->belongsToMe( cc ) ){
                    LOG(2) << "cursor skipping document in un-owned chunk: " << c->current() << endl;
                }
                else {
                    if( c->getsetdup(c->currLoc()) ) {
                        //out() << "  but it's a dup \n";
                    }
                    else {
                        last = c->currLoc();
                        n++;

                        if ( keyFieldsOnly ) {
                            fillQueryResultFromObj(b, 0, keyFieldsOnly->hydrate( c->currKey() ) );
                        }
                        else {
                            BSONObj js = c->current();
                            // show disk loc should be part of the main query, not in an $or clause, so this should be ok
                            fillQueryResultFromObj(b, cc->fields.get(), js, ( cc->pq.get() && cc->pq->showDiskLoc() ? &last : 0));
                        }

                        if ( ( ntoreturn && n >= ntoreturn ) || b.len() > MaxBytesToReturnToClientAtOnce ) {
                            c->advance();
                            cc->incPos( n );
                            break;
                        }
                    }
                }
                c->advance();

                if ( ! cc->yieldSometimes( ClientCursor::MaybeCovered ) ) {
                    ClientCursor::erase(cursorid);
                    cursorid = 0;
                    cc = 0;
                    p.deleted();
                    break;
                }
            }
            
            if ( cc ) {
                if ( c->supportYields() ) {
                    ClientCursor::YieldData data;
                    assert( cc->prepareToYield( data ) );
                }
                else {
                    cc->updateLocation();
                }
                cc->mayUpgradeStorage();
                cc->storeOpForSlave( last );
                exhaust = cc->queryOptions() & QueryOption_Exhaust;
            }
        }

        QueryResult *qr = (QueryResult *) b.buf();
        qr->len = b.len();
        qr->setOperation(opReply);
        qr->_resultFlags() = resultFlags;
        qr->cursorId = cursorid;
        qr->startingFrom = start;
        qr->nReturned = n;
        b.decouple();

        return qr;
    }
Пример #24
0
static int s5k5bbgx_init(struct v4l2_subdev *sd, u32 val)
{
	/* struct i2c_client *client = v4l2_get_subdevdata(sd); */
	struct s5k5bbgx_state *state = to_state(sd);
	int err = -EINVAL;

	cam_dbg("E\n");

#ifdef CONFIG_CPU_FREQ
	if (s5pv310_cpufreq_lock(DVFS_LOCK_ID_CAM, CPU_L0))
		cam_err("failed lock DVFS\n");
#endif
	/* set initial regster value */
#ifdef CONFIG_LOAD_FILE
	if (!state->vt_mode) {
		cam_dbg("load camera common setting\n");
		err = s5k5bbgx_write_regs_from_sd(sd,
				"s5k5bbgx_common");
	} else {
		if (state->vt_mode == 1) {
			cam_info("load camera VT call setting\n");
			err = s5k5bbgx_write_regs_from_sd(sd,
					"s5k5bbgx_vt_common");
		} else {
			cam_info("load camera WIFI VT call setting\n");
			err = s5k5bbgx_write_regs_from_sd(sd,
					"s5k5bbgx_vt_wifi_common");
		}
	}
#else
	if (!state->vt_mode) {
		cam_info("load camera common setting\n");
		err = s5k5bbgx_write_regs(sd, s5k5bbgx_common,
			sizeof(s5k5bbgx_common) / \
			sizeof(s5k5bbgx_common[0]));
	} else {
		if (state->vt_mode == 1) {
			cam_info("load camera VT call setting\n");
			err = s5k5bbgx_write_regs(sd, s5k5bbgx_vt_common,
				sizeof(s5k5bbgx_vt_common) / \
				sizeof(s5k5bbgx_vt_common[0]));
		} else {
			cam_info("load camera WIFI VT call setting\n");
			err = s5k5bbgx_write_regs(sd, s5k5bbgx_vt_wifi_common,
				sizeof(s5k5bbgx_vt_wifi_common) / \
				sizeof(s5k5bbgx_vt_wifi_common[0]));
		}
	}
#endif
#ifdef CONFIG_CPU_FREQ
	s5pv310_cpufreq_lock_free(DVFS_LOCK_ID_CAM);
#endif
	if (unlikely(err)) {
		cam_err("failed to init\n");
		return err;
	}

	/* We stop stream-output from sensor when starting camera. */
	err = s5k5bbgx_control_stream(sd, STREAM_STOP);
	if (unlikely(err < 0))
		return err;
	msleep(150);

	if (state->vt_mode && (state->req_fps != state->set_fps)) {
		err = s5k5bbgx_set_frame_rate(sd, state->req_fps);
		if (unlikely(err < 0))
			return err;
		else
			state->set_fps = state->req_fps;
	}

	state->initialized = 1;

	return 0;
}
Пример #25
0
enum MqErrorE
SysRecv (
  struct MqS * const context,
  MQ_SOCK const hdl,
  MQ_BIN buf,
  MQ_SIZE numBytes,
  MQ_SIZE * const newSize,
  MQ_TIME_T timeout
)
{
  int const flags = 0;
  register MQ_SIZE ldata = 0;
  *newSize = 0;

  // recv data in buf
  do {
    ldata = recv (hdl, (MQ_buf_T) buf, numBytes, flags);

    // check for errors
    if (unlikely (ldata <= 0)) {
	if (ldata == -1) {
//MqDLogV(context,0,"ERROR sock<%i>, numBytes<%i>, str<%s>\n", hdl, numBytes, strerror(errno));
	  switch (sSysGetErrorNum) {
	    case WIN32_WSA (EWOULDBLOCK): {
	      struct timeval tv = {(long)timeout, 0L};
	      fd_set fds;
	      FD_ZERO(&fds);
	      FD_SET(hdl, &fds);
	      // now wait until the socket become recv-able
	      switch (SysSelect (context, (hdl+1), &fds, NULL, &tv)) {
		case MQ_OK:
		  break;
		case MQ_CONTINUE:
		  return MqErrorDbV (MQ_ERROR_TIMEOUT, timeout);
		case MQ_ERROR:
		  pIoCloseSocket (__func__, context->link.io);
		  return MqErrorStack(context);
	      }
	      ldata = 0;
	      break;
	    }
	    case WIN32_WSA (ECONNRESET):
	    case WIN32_WSA (EBADF): {
	      pIoCloseSocket (__func__, context->link.io);
	      return pErrorSetExitWithCheck (context);
	    }
	    default:
	      pIoCloseSocket (__func__, context->link.io);
	      return sSysMqErrorMsg (context, __func__, "recv");
	  }
	} else if (ldata == 0) {
	  pIoCloseSocket (__func__, context->link.io);
	  return pErrorSetExitWithCheck (context);
	}
    }

    buf += ldata;
    numBytes -= ldata;
    *newSize += ldata;
  }
  while (numBytes > 0);

  return MQ_OK;
}
Пример #26
0
int do_macos_mach_smi(int update_every, usec_t dt) {
    (void)dt;

    static int do_cpu = -1, do_ram = - 1, do_swapio = -1, do_pgfaults = -1;

    if (unlikely(do_cpu == -1)) {
        do_cpu                  = config_get_boolean("plugin:macos:mach_smi", "cpu utilization", 1);
        do_ram                  = config_get_boolean("plugin:macos:mach_smi", "system ram", 1);
        do_swapio               = config_get_boolean("plugin:macos:mach_smi", "swap i/o", 1);
        do_pgfaults             = config_get_boolean("plugin:macos:mach_smi", "memory page faults", 1);
    }

    RRDSET *st;

	kern_return_t kr;
	mach_msg_type_number_t count;
    host_t host;
    vm_size_t system_pagesize;


    // NEEDED BY: do_cpu
    natural_t cp_time[CPU_STATE_MAX];

    // NEEDED BY: do_ram, do_swapio, do_pgfaults
    vm_statistics64_data_t vm_statistics;

    host = mach_host_self();
    kr = host_page_size(host, &system_pagesize);
    if (unlikely(kr != KERN_SUCCESS))
        return -1;

    // --------------------------------------------------------------------

    if (likely(do_cpu)) {
        if (unlikely(HOST_CPU_LOAD_INFO_COUNT != 4)) {
            error("MACOS: There are %d CPU states (4 was expected)", HOST_CPU_LOAD_INFO_COUNT);
            do_cpu = 0;
            error("DISABLED: system.cpu");
        } else {
            count = HOST_CPU_LOAD_INFO_COUNT;
            kr = host_statistics(host, HOST_CPU_LOAD_INFO, (host_info_t)cp_time, &count);
            if (unlikely(kr != KERN_SUCCESS)) {
                error("MACOS: host_statistics() failed: %s", mach_error_string(kr));
                do_cpu = 0;
                error("DISABLED: system.cpu");
            } else {

                st = rrdset_find_bytype("system", "cpu");
                if (unlikely(!st)) {
                    st = rrdset_create("system", "cpu", NULL, "cpu", "system.cpu", "Total CPU utilization", "percentage", 100, update_every, RRDSET_TYPE_STACKED);

                    rrddim_add(st, "user", NULL, 1, 1, RRDDIM_PCENT_OVER_DIFF_TOTAL);
                    rrddim_add(st, "nice", NULL, 1, 1, RRDDIM_PCENT_OVER_DIFF_TOTAL);
                    rrddim_add(st, "system", NULL, 1, 1, RRDDIM_PCENT_OVER_DIFF_TOTAL);
                    rrddim_add(st, "idle", NULL, 1, 1, RRDDIM_PCENT_OVER_DIFF_TOTAL);
                    rrddim_hide(st, "idle");
                }
                else rrdset_next(st);

                rrddim_set(st, "user", cp_time[CPU_STATE_USER]);
                rrddim_set(st, "nice", cp_time[CPU_STATE_NICE]);
                rrddim_set(st, "system", cp_time[CPU_STATE_SYSTEM]);
                rrddim_set(st, "idle", cp_time[CPU_STATE_IDLE]);
                rrdset_done(st);
            }
        }
     }

    // --------------------------------------------------------------------
    
    if (likely(do_ram || do_swapio || do_pgfaults)) {
        count = sizeof(vm_statistics64_data_t);
        kr = host_statistics64(host, HOST_VM_INFO64, (host_info64_t)&vm_statistics, &count);
        if (unlikely(kr != KERN_SUCCESS)) {
            error("MACOS: host_statistics64() failed: %s", mach_error_string(kr));
            do_ram = 0;
            error("DISABLED: system.ram");
            do_swapio = 0;
            error("DISABLED: system.swapio");
            do_pgfaults = 0;
            error("DISABLED: mem.pgfaults");
        } else {
            if (likely(do_ram)) {
                st = rrdset_find("system.ram");
                if (unlikely(!st)) {
                    st = rrdset_create("system", "ram", NULL, "ram", NULL, "System RAM", "MB", 200, update_every, RRDSET_TYPE_STACKED);

                    rrddim_add(st, "active",    NULL, system_pagesize, 1048576, RRDDIM_ABSOLUTE);
                    rrddim_add(st, "wired",     NULL, system_pagesize, 1048576, RRDDIM_ABSOLUTE);
                    rrddim_add(st, "throttled", NULL, system_pagesize, 1048576, RRDDIM_ABSOLUTE);
                    rrddim_add(st, "compressor", NULL, system_pagesize, 1048576, RRDDIM_ABSOLUTE);
                    rrddim_add(st, "inactive",  NULL, system_pagesize, 1048576, RRDDIM_ABSOLUTE);
                    rrddim_add(st, "purgeable", NULL, system_pagesize, 1048576, RRDDIM_ABSOLUTE);
                    rrddim_add(st, "speculative", NULL, system_pagesize, 1048576, RRDDIM_ABSOLUTE);
                    rrddim_add(st, "free",      NULL, system_pagesize, 1048576, RRDDIM_ABSOLUTE);
                }
                else rrdset_next(st);

                rrddim_set(st, "active",    vm_statistics.active_count);
                rrddim_set(st, "wired",     vm_statistics.wire_count);
                rrddim_set(st, "throttled", vm_statistics.throttled_count);
                rrddim_set(st, "compressor", vm_statistics.compressor_page_count);
                rrddim_set(st, "inactive",  vm_statistics.inactive_count);
                rrddim_set(st, "purgeable", vm_statistics.purgeable_count);
                rrddim_set(st, "speculative", vm_statistics.speculative_count);
                rrddim_set(st, "free",      (vm_statistics.free_count - vm_statistics.speculative_count));
                rrdset_done(st);
            }

            // --------------------------------------------------------------------

            if (likely(do_swapio)) {
                st = rrdset_find("system.swapio");
                if (unlikely(!st)) {
                    st = rrdset_create("system", "swapio", NULL, "swap", NULL, "Swap I/O", "kilobytes/s", 250, update_every, RRDSET_TYPE_AREA);

                    rrddim_add(st, "in",  NULL, system_pagesize, 1024, RRDDIM_INCREMENTAL);
                    rrddim_add(st, "out", NULL, -system_pagesize, 1024, RRDDIM_INCREMENTAL);
                }
                else rrdset_next(st);

                rrddim_set(st, "in", vm_statistics.swapins);
                rrddim_set(st, "out", vm_statistics.swapouts);
                rrdset_done(st);
            }

            // --------------------------------------------------------------------

            if (likely(do_pgfaults)) {
                st = rrdset_find("mem.pgfaults");
                if (unlikely(!st)) {
                    st = rrdset_create("mem", "pgfaults", NULL, "system", NULL, "Memory Page Faults", "page faults/s", 500, update_every, RRDSET_TYPE_LINE);
                    st->isdetail = 1;

                    rrddim_add(st, "memory",    NULL, 1, 1, RRDDIM_INCREMENTAL);
                    rrddim_add(st, "cow",       NULL, 1, 1, RRDDIM_INCREMENTAL);
                    rrddim_add(st, "pagein",    NULL, 1, 1, RRDDIM_INCREMENTAL);
                    rrddim_add(st, "pageout",   NULL, 1, 1, RRDDIM_INCREMENTAL);
                    rrddim_add(st, "compress",  NULL, 1, 1, RRDDIM_INCREMENTAL);
                    rrddim_add(st, "decompress", NULL, 1, 1, RRDDIM_INCREMENTAL);
                    rrddim_add(st, "zero_fill", NULL, 1, 1, RRDDIM_INCREMENTAL);
                    rrddim_add(st, "reactivate", NULL, 1, 1, RRDDIM_INCREMENTAL);
                    rrddim_add(st, "purge",     NULL, 1, 1, RRDDIM_INCREMENTAL);
                }
                else rrdset_next(st);

                rrddim_set(st, "memory", vm_statistics.faults);
                rrddim_set(st, "cow", vm_statistics.cow_faults);
                rrddim_set(st, "pagein", vm_statistics.pageins);
                rrddim_set(st, "pageout", vm_statistics.pageouts);
                rrddim_set(st, "compress", vm_statistics.compressions);
                rrddim_set(st, "decompress", vm_statistics.decompressions);
                rrddim_set(st, "zero_fill", vm_statistics.zero_fill_count);
                rrddim_set(st, "reactivate", vm_statistics.reactivations);
                rrddim_set(st, "purge", vm_statistics.purges);
                rrdset_done(st);
            }
        }
    } 
 
    // --------------------------------------------------------------------

    return 0;
}
void
unpack0 (gfc_array_char *ret, const gfc_array_char *vector,
	 const gfc_array_l1 *mask, char *field)
{
  gfc_array_char tmp;

  index_type type_size;

  if (unlikely(compile_options.bounds_check))
    unpack_bounds (ret, vector, mask, NULL);

  type_size = GFC_DTYPE_TYPE_SIZE (vector);

  switch (type_size)
    {
    case GFC_DTYPE_LOGICAL_1:
    case GFC_DTYPE_INTEGER_1:
    case GFC_DTYPE_DERIVED_1:
      unpack0_i1 ((gfc_array_i1 *) ret, (gfc_array_i1 *) vector,
		  mask, (GFC_INTEGER_1 *) field);
      return;

    case GFC_DTYPE_LOGICAL_2:
    case GFC_DTYPE_INTEGER_2:
      unpack0_i2 ((gfc_array_i2 *) ret, (gfc_array_i2 *) vector,
		  mask, (GFC_INTEGER_2 *) field);
      return;

    case GFC_DTYPE_LOGICAL_4:
    case GFC_DTYPE_INTEGER_4:
      unpack0_i4 ((gfc_array_i4 *) ret, (gfc_array_i4 *) vector,
		  mask, (GFC_INTEGER_4 *) field);
      return;

    case GFC_DTYPE_LOGICAL_8:
    case GFC_DTYPE_INTEGER_8:
      unpack0_i8 ((gfc_array_i8 *) ret, (gfc_array_i8 *) vector,
		  mask, (GFC_INTEGER_8 *) field);
      return;

#ifdef HAVE_GFC_INTEGER_16
    case GFC_DTYPE_LOGICAL_16:
    case GFC_DTYPE_INTEGER_16:
      unpack0_i16 ((gfc_array_i16 *) ret, (gfc_array_i16 *) vector,
		   mask, (GFC_INTEGER_16 *) field);
      return;
#endif
    case GFC_DTYPE_REAL_4:
      unpack0_r4 ((gfc_array_r4 *) ret, (gfc_array_r4 *) vector,
		  mask, (GFC_REAL_4 *) field);
      return;

    case GFC_DTYPE_REAL_8:
      unpack0_r8 ((gfc_array_r8 *) ret, (gfc_array_r8*) vector,
		  mask, (GFC_REAL_8  *) field);
      return;

#ifdef HAVE_GFC_REAL_10
    case GFC_DTYPE_REAL_10:
      unpack0_r10 ((gfc_array_r10 *) ret, (gfc_array_r10 *) vector,
		   mask, (GFC_REAL_10 *) field);
      return;
#endif

#ifdef HAVE_GFC_REAL_16
    case GFC_DTYPE_REAL_16:
      unpack0_r16 ((gfc_array_r16 *) ret, (gfc_array_r16 *) vector,
		   mask, (GFC_REAL_16 *) field);
      return;
#endif

    case GFC_DTYPE_COMPLEX_4:
      unpack0_c4 ((gfc_array_c4 *) ret, (gfc_array_c4 *) vector,
		  mask, (GFC_COMPLEX_4 *) field);
      return;

    case GFC_DTYPE_COMPLEX_8:
      unpack0_c8 ((gfc_array_c8 *) ret, (gfc_array_c8 *) vector,
		  mask, (GFC_COMPLEX_8 *) field);
      return;

#ifdef HAVE_GFC_COMPLEX_10
    case GFC_DTYPE_COMPLEX_10:
      unpack0_c10 ((gfc_array_c10 *) ret, (gfc_array_c10 *) vector,
		   mask, (GFC_COMPLEX_10 *) field);
      return;
#endif

#ifdef HAVE_GFC_COMPLEX_16
    case GFC_DTYPE_COMPLEX_16:
      unpack0_c16 ((gfc_array_c16 *) ret, (gfc_array_c16 *) vector,
		   mask, (GFC_COMPLEX_16 *) field);
      return;
#endif
    case GFC_DTYPE_DERIVED_2:
      if (GFC_UNALIGNED_2(ret->data) || GFC_UNALIGNED_2(vector->data)
	  || GFC_UNALIGNED_2(field))
	break;
      else
	{
	  unpack0_i2 ((gfc_array_i2 *) ret, (gfc_array_i2 *) vector,
		      mask, (GFC_INTEGER_2 *) field);
	  return;
	}

    case GFC_DTYPE_DERIVED_4:
      if (GFC_UNALIGNED_4(ret->data) || GFC_UNALIGNED_4(vector->data)
	  || GFC_UNALIGNED_4(field))
	break;
      else
	{
	  unpack0_i4 ((gfc_array_i4 *) ret, (gfc_array_i4 *) vector,
		      mask, (GFC_INTEGER_4 *) field);
	  return;
	}

    case GFC_DTYPE_DERIVED_8:
      if (GFC_UNALIGNED_8(ret->data) || GFC_UNALIGNED_8(vector->data)
	  || GFC_UNALIGNED_8(field))
	break;
      else
	{
	  unpack0_i8 ((gfc_array_i8 *) ret, (gfc_array_i8 *) vector,
		      mask, (GFC_INTEGER_8 *) field);
	  return;
	}
#ifdef HAVE_GFC_INTEGER_16
    case GFC_DTYPE_DERIVED_16:
      if (GFC_UNALIGNED_16(ret->data) || GFC_UNALIGNED_16(vector->data)
	  || GFC_UNALIGNED_16(field))
	break;
      else
	{
	  unpack0_i16 ((gfc_array_i16 *) ret, (gfc_array_i16 *) vector,
		       mask, (GFC_INTEGER_16 *) field);
	  return;
	}
#endif
    }

  memset (&tmp, 0, sizeof (tmp));
  tmp.dtype = 0;
  tmp.data = field;
  unpack_internal (ret, vector, mask, &tmp, GFC_DESCRIPTOR_SIZE (vector));
}
Пример #28
0
static SkShader*
source_to_sk_shader (cairo_skia_context_t *cr,
		     const cairo_pattern_t *pattern)
{
    SkShader *shader = NULL;

    if (pattern->type == CAIRO_PATTERN_TYPE_SOLID) {
	cairo_solid_pattern_t *solid = (cairo_solid_pattern_t *) pattern;
	return new SkColorShader (color_to_sk (solid->color));
    } else if (pattern->type == CAIRO_PATTERN_TYPE_SURFACE) {
	cairo_surface_t *surface = surface_from_pattern (pattern);

	cr->source = cairo_surface_reference (surface);

	if (surface->type == CAIRO_SURFACE_TYPE_SKIA) {
	    cairo_skia_surface_t *esurf = (cairo_skia_surface_t *) surface;

		if (! _cairo_matrix_is_identity (&pattern->matrix))
		{
			SkMatrix localMatrix =  matrix_inverse_to_sk (pattern->matrix);
			shader = SkShader::CreateBitmapShader (*esurf->bitmap,
					extend_to_sk (pattern->extend),
					extend_to_sk (pattern->extend),
					&localMatrix);
		} else {
			shader = SkShader::CreateBitmapShader (*esurf->bitmap,
					extend_to_sk (pattern->extend),
					extend_to_sk (pattern->extend));
		}
	} else {
	    SkBitmap bitmap;

	    if (! _cairo_surface_is_image (surface)) {
		cairo_status_t status;

		status = _cairo_surface_acquire_source_image (surface,
							      &cr->source_image,
							      &cr->source_extra);
		if (status)
		    return NULL;

		surface = &cr->source_image->base;
	    }

	    if (unlikely (! surface_to_sk_bitmap (surface, bitmap)))
		return NULL;

		if (! _cairo_matrix_is_identity (&pattern->matrix))
		{
			SkMatrix localMatrix = matrix_inverse_to_sk (pattern->matrix);
			shader = SkShader::CreateBitmapShader (bitmap,
					extend_to_sk (pattern->extend),
					extend_to_sk (pattern->extend),
					&localMatrix);
		} else {
			shader = SkShader::CreateBitmapShader (bitmap,
			extend_to_sk (pattern->extend),
			extend_to_sk (pattern->extend));
		}
	}
    } else if (pattern->type == CAIRO_PATTERN_TYPE_LINEAR
	       /* || pattern->type == CAIRO_PATTERN_TYPE_RADIAL */)
    {
	cairo_gradient_pattern_t *gradient = (cairo_gradient_pattern_t *) pattern;
	SkColor colors_stack[10];
	SkScalar pos_stack[10];
	SkColor *colors = colors_stack;
	SkScalar *pos = pos_stack;

	if (gradient->n_stops > 10) {
	    colors = new SkColor[gradient->n_stops];
	    pos = new SkScalar[gradient->n_stops];
	}

	for (unsigned int i = 0; i < gradient->n_stops; i++) {
	    pos[i] = CAIRO_FIXED_TO_SK_SCALAR (gradient->stops[i].offset);
	    colors[i] = color_stop_to_sk (gradient->stops[i].color);
	}

	if (pattern->type == CAIRO_PATTERN_TYPE_LINEAR) {
	    cairo_linear_pattern_t *linear = (cairo_linear_pattern_t *) gradient;
	    SkPoint points[2];

	    points[0].set (SkFloatToScalar (linear->pd1.x),
			   SkFloatToScalar (linear->pd1.y));
	    points[1].set (SkFloatToScalar (linear->pd2.x),
			   SkFloatToScalar (linear->pd2.y));

	    if(! _cairo_matrix_is_identity (&pattern->matrix))
	    {
		SkMatrix localMatrix = matrix_inverse_to_sk (pattern->matrix);
	        shader = SkGradientShader::CreateLinear (points, colors, pos, gradient->n_stops,
						     extend_to_sk (pattern->extend),
						     0, &localMatrix);
	    } else {
	        shader = SkGradientShader::CreateLinear (points, colors, pos, gradient->n_stops,
							extend_to_sk (pattern->extend));
	    }
	} else {
	    // XXX todo -- implement real radial shaders in Skia
	}

	if (gradient->n_stops > 10) {
	    delete [] colors;
	    delete [] pos;
	}
    }

    return shader;
}
Пример #29
0
static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
{
	struct rb_node **p, *parent;
	struct sock *sk = skb->sk;
	struct rb_root *root;
	struct fq_flow *f;

	/* warning: no starvation prevention... */
	if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
		return &q->internal;

	/* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
	 * or a listener (SYNCOOKIE mode)
	 * 1) request sockets are not full blown,
	 *    they do not contain sk_pacing_rate
	 * 2) They are not part of a 'flow' yet
	 * 3) We do not want to rate limit them (eg SYNFLOOD attack),
	 *    especially if the listener set SO_MAX_PACING_RATE
	 * 4) We pretend they are orphaned
	 */
	if (!sk || sk_listener(sk)) {
		unsigned long hash = skb_get_hash(skb) & q->orphan_mask;

		/* By forcing low order bit to 1, we make sure to not
		 * collide with a local flow (socket pointers are word aligned)
		 */
		sk = (struct sock *)((hash << 1) | 1UL);
		skb_orphan(skb);
	}

	root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)];

	if (q->flows >= (2U << q->fq_trees_log) &&
	    q->inactive_flows > q->flows/2)
		fq_gc(q, root, sk);

	p = &root->rb_node;
	parent = NULL;
	while (*p) {
		parent = *p;

		f = rb_entry(parent, struct fq_flow, fq_node);
		if (f->sk == sk) {
			/* socket might have been reallocated, so check
			 * if its sk_hash is the same.
			 * It not, we need to refill credit with
			 * initial quantum
			 */
			if (unlikely(skb->sk &&
				     f->socket_hash != sk->sk_hash)) {
				f->credit = q->initial_quantum;
				f->socket_hash = sk->sk_hash;
				if (fq_flow_is_throttled(f))
					fq_flow_unset_throttled(q, f);
				f->time_next_packet = 0ULL;
			}
			return f;
		}
		if (f->sk > sk)
			p = &parent->rb_right;
		else
			p = &parent->rb_left;
	}

	f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
	if (unlikely(!f)) {
		q->stat_allocation_errors++;
		return &q->internal;
	}
	fq_flow_set_detached(f);
	f->sk = sk;
	if (skb->sk)
		f->socket_hash = sk->sk_hash;
	f->credit = q->initial_quantum;

	rb_link_node(&f->fq_node, parent, p);
	rb_insert_color(&f->fq_node, root);

	q->flows++;
	q->inactive_flows++;
	return f;
}
static int
hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
		    enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
	const struct ip_set_hash *h = set->data;
	ipset_adtfn adtfn = set->variant->adt[adt];
	struct hash_ipportip6_elem data = { };
	u32 port, port_to;
	u32 timeout = h->timeout;
	bool with_ports = false;
	int ret;

	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
		     tb[IPSET_ATTR_IP_TO] ||
		     tb[IPSET_ATTR_CIDR]))
		return -IPSET_ERR_PROTOCOL;

	if (tb[IPSET_ATTR_LINENO])
		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);

	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
	if (ret)
		return ret;

	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &data.ip2);
	if (ret)
		return ret;

	if (tb[IPSET_ATTR_PORT])
		data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
	else
		return -IPSET_ERR_PROTOCOL;

	if (tb[IPSET_ATTR_PROTO]) {
		data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
		with_ports = ip_set_proto_with_ports(data.proto);

		if (data.proto == 0)
			return -IPSET_ERR_INVALID_PROTO;
	} else
		return -IPSET_ERR_MISSING_PROTO;

	if (!(with_ports || data.proto == IPPROTO_ICMPV6))
		data.port = 0;

	if (tb[IPSET_ATTR_TIMEOUT]) {
		if (!with_timeout(h->timeout))
			return -IPSET_ERR_TIMEOUT;
		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
	}

	if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
		ret = adtfn(set, &data, timeout, flags);
		return ip_set_eexist(ret, flags) ? 0 : ret;
	}

	port = ntohs(data.port);
	port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
	if (port > port_to)
		swap(port, port_to);

	if (retried)
		port = h->next.port;
	for (; port <= port_to; port++) {
		data.port = htons(port);
		ret = adtfn(set, &data, timeout, flags);

		if (ret && !ip_set_eexist(ret, flags))
			return ret;
		else
			ret = 0;
	}
	return ret;
}