Ejemplo n.º 1
0
void *nxp_v4l2_get_alloc_ctx(void)
{
    if (likely(__me))
        return &__me->v4l2_dev;
    return NULL;
}
Ejemplo n.º 2
0
int __ipipe_handle_irq(struct pt_regs regs)
{
	struct ipipe_domain *this_domain, *next_domain;
	unsigned irq = regs.orig_eax;
	struct list_head *head, *pos;
	ipipe_declare_cpuid;
	int m_ack, s_ack;

	ipipe_load_cpuid();

	if (regs.orig_eax < 0) {
		irq &= 0xff;
		m_ack = 0;
	} else
		m_ack = 1;

	this_domain = ipipe_percpu_domain[cpuid];

	if (test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))
		head = &this_domain->p_link;
	else {
		head = __ipipe_pipeline.next;
		next_domain = list_entry(head, struct ipipe_domain, p_link);
		if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
			if (!m_ack && next_domain->irqs[irq].acknowledge != NULL)
				next_domain->irqs[irq].acknowledge(irq);
			if (likely(__ipipe_dispatch_wired(next_domain, irq)))
				goto finalize;
			else
				goto finalize_nosync;
		}
	}

	/* Ack the interrupt. */

	s_ack = m_ack;
	pos = head;

	while (pos != &__ipipe_pipeline) {
		next_domain = list_entry(pos, struct ipipe_domain, p_link);

		/* For each domain handling the incoming IRQ, mark it as
		   pending in its log. */

		if (test_bit
		    (IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
			/* Domains that handle this IRQ are polled for
			   acknowledging it by decreasing priority order. The
			   interrupt must be made pending _first_ in the domain's
			   status flags before the PIC is unlocked. */

			next_domain->cpudata[cpuid].irq_counters[irq].total_hits++;
			next_domain->cpudata[cpuid].irq_counters[irq].pending_hits++;
			__ipipe_set_irq_bit(next_domain, cpuid, irq);

			/* Always get the first master acknowledge available. Once
			   we've got it, allow slave acknowledge handlers to run
			   (until one of them stops us). */

			if (!m_ack)
				m_ack = next_domain->irqs[irq].acknowledge(irq);
			else if (test_bit
				 (IPIPE_SHARED_FLAG,
				  &next_domain->irqs[irq].control) && !s_ack)
				s_ack = next_domain->irqs[irq].acknowledge(irq);
		}

		/* If the domain does not want the IRQ to be passed down the
		   interrupt pipe, exit the loop now. */

		if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
			break;

		pos = next_domain->p_link.next;
	}

	if (irq == __ipipe_tick_irq &&
	    __ipipe_pipeline_head_p(ipipe_root_domain) &&
	    ipipe_root_domain->cpudata[cpuid].irq_counters[irq].pending_hits > 1)
		/*
		 * Emulate a loss of clock ticks if Linux is owning
		 * the time source. The drift will be compensated by
		 * the timer support code.
		 */
		ipipe_root_domain->cpudata[cpuid].irq_counters[irq].pending_hits = 1;

finalize:

	if (irq == __ipipe_tick_irq) {
		__ipipe_tick_regs[cpuid].eflags = regs.eflags;
		__ipipe_tick_regs[cpuid].eip = regs.eip;
		__ipipe_tick_regs[cpuid].xcs = regs.xcs;
#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
		/* Linux profiling code needs this. */
		__ipipe_tick_regs[cpuid].ebp = regs.ebp;
#endif	/* CONFIG_SMP && CONFIG_FRAME_POINTER */
	}

	/* Now walk the pipeline, yielding control to the highest
	   priority domain that has pending interrupt(s) or
	   immediately to the current domain if the interrupt has been
	   marked as 'sticky'. This search does not go beyond the
	   current domain in the pipeline. */

	__ipipe_walk_pipeline(head, cpuid);

finalize_nosync:

	ipipe_load_cpuid();

	if (ipipe_percpu_domain[cpuid] != ipipe_root_domain ||
	    test_bit(IPIPE_STALL_FLAG,
		     &ipipe_root_domain->cpudata[cpuid].status))
		return 0;

#ifdef CONFIG_SMP
	/* Prevent a spurious rescheduling from being triggered on
	   preemptible kernels along the way out through
	   ret_from_intr. */
	if (regs.orig_eax < 0)
		__set_bit(IPIPE_STALL_FLAG, &ipipe_root_domain->cpudata[cpuid].status);
#endif	/* CONFIG_SMP */

	return 1;
}
Ejemplo n.º 3
0
int _aac_rx_init(struct aac_dev *dev)
{
    unsigned long start;
    unsigned long status;
    int restart = 0;
    int instance = dev->id;
    const char * name = dev->name;

    if (aac_adapter_ioremap(dev, dev->base_size)) {
        printk(KERN_WARNING "%s: unable to map adapter.\n", name);
        goto error_iounmap;
    }

    /* Failure to reset here is an option ... */
    dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
    dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt;
    dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
    if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) &&
            !aac_rx_restart_adapter(dev, 0))
        /* Make sure the Hardware FIFO is empty */
        while ((++restart < 512) &&
                (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
    /*
     *	Check to see if the board panic'd while booting.
     */
    status = rx_readl(dev, MUnit.OMRx[0]);
    if (status & KERNEL_PANIC) {
        if (aac_rx_restart_adapter(dev, aac_rx_check_health(dev)))
            goto error_iounmap;
        ++restart;
    }
    /*
     *	Check to see if the board failed any self tests.
     */
    status = rx_readl(dev, MUnit.OMRx[0]);
    if (status & SELF_TEST_FAILED) {
        printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
        goto error_iounmap;
    }
    /*
     *	Check to see if the monitor panic'd while booting.
     */
    if (status & MONITOR_PANIC) {
        printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
        goto error_iounmap;
    }
    start = jiffies;
    /*
     *	Wait for the adapter to be up and running. Wait up to 3 minutes
     */
    while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING))
    {
        if ((restart &&
                (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
                time_after(jiffies, start+HZ*startup_timeout)) {
            printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
                   dev->name, instance, status);
            goto error_iounmap;
        }
        if (!restart &&
                ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
                 time_after(jiffies, start + HZ *
                            ((startup_timeout > 60)
                             ? (startup_timeout - 60)
                             : (startup_timeout / 2))))) {
            if (likely(!aac_rx_restart_adapter(dev, aac_rx_check_health(dev))))
                start = jiffies;
            ++restart;
        }
        msleep(1);
    }
    if (restart && aac_commit)
        aac_commit = 1;
    /*
     *	Fill in the common function dispatch table.
     */
    dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
    dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
    dev->a_ops.adapter_notify = aac_rx_notify_adapter;
    dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
    dev->a_ops.adapter_check_health = aac_rx_check_health;
    dev->a_ops.adapter_restart = aac_rx_restart_adapter;

    /*
     *	First clear out all interrupts.  Then enable the one's that we
     *	can handle.
     */
    aac_adapter_comm(dev, AAC_COMM_PRODUCER);
    aac_adapter_disable_int(dev);
    rx_writel(dev, MUnit.ODR, 0xffffffff);
    aac_adapter_enable_int(dev);

    if (aac_init_adapter(dev) == NULL)
        goto error_iounmap;
    aac_adapter_comm(dev, dev->comm_interface);
    if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr,
                    IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
        printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
               name, instance);
        goto error_iounmap;
    }
    aac_adapter_enable_int(dev);
    /*
     *	Tell the adapter that all is configured, and it can
     * start accepting requests
     */
    aac_rx_start_adapter(dev);

    return 0;

error_iounmap:

    return -1;
}
Ejemplo n.º 4
0
int hfsplus_uni2asc(struct super_block *sb, const struct hfsplus_unistr *ustr, char *astr, int *len_p)
{
	const hfsplus_unichr *ip;
	struct nls_table *nls = HFSPLUS_SB(sb).nls;
	u8 *op;
	u16 cc, c0, c1;
	u16 *ce1, *ce2;
	int i, len, ustrlen, res, compose;

	op = astr;
	ip = ustr->unicode;
	ustrlen = be16_to_cpu(ustr->length);
	len = *len_p;
	ce1 = NULL;
	compose = !(HFSPLUS_SB(sb).flags & HFSPLUS_SB_NODECOMPOSE);

	while (ustrlen > 0) {
		c0 = be16_to_cpu(*ip++);
		ustrlen--;
		/* search for single decomposed char */
		if (likely(compose))
			ce1 = hfsplus_compose_lookup(hfsplus_compose_table, c0);
		if (ce1 && (cc = ce1[0])) {
			/* start of a possibly decomposed Hangul char */
			if (cc != 0xffff)
				goto done;
			if (!ustrlen)
				goto same;
			c1 = be16_to_cpu(*ip) - Hangul_VBase;
			if (c1 < Hangul_VCount) {
				/* compose the Hangul char */
				cc = (c0 - Hangul_LBase) * Hangul_VCount;
				cc = (cc + c1) * Hangul_TCount;
				cc += Hangul_SBase;
				ip++;
				ustrlen--;
				if (!ustrlen)
					goto done;
				c1 = be16_to_cpu(*ip) - Hangul_TBase;
				if (c1 > 0 && c1 < Hangul_TCount) {
					cc += c1;
					ip++;
					ustrlen--;
				}
				goto done;
			}
		}
		while (1) {
			/* main loop for common case of not composed chars */
			if (!ustrlen)
				goto same;
			c1 = be16_to_cpu(*ip);
			if (likely(compose))
				ce1 = hfsplus_compose_lookup(hfsplus_compose_table, c1);
			if (ce1)
				break;
			switch (c0) {
			case 0:
				c0 = 0x2400;
				break;
			case '/':
				c0 = ':';
				break;
			}
			res = nls->uni2char(c0, op, len);
			if (res < 0) {
				if (res == -ENAMETOOLONG)
					goto out;
				*op = '?';
				res = 1;
			}
			op += res;
			len -= res;
			c0 = c1;
			ip++;
			ustrlen--;
		}
		ce2 = hfsplus_compose_lookup(ce1, c0);
		if (ce2) {
			i = 1;
			while (i < ustrlen) {
				ce1 = hfsplus_compose_lookup(ce2, be16_to_cpu(ip[i]));
				if (!ce1)
					break;
				i++;
				ce2 = ce1;
			}
			if ((cc = ce2[0])) {
				ip += i;
				ustrlen -= i;
				goto done;
			}
		}
	same:
		switch (c0) {
		case 0:
			cc = 0x2400;
			break;
		case '/':
			cc = ':';
			break;
		default:
			cc = c0;
		}
	done:
		res = nls->uni2char(cc, op, len);
		if (res < 0) {
			if (res == -ENAMETOOLONG)
				goto out;
			*op = '?';
			res = 1;
		}
		op += res;
		len -= res;
	}
	res = 0;
out:
	*len_p = (char *)op - astr;
	return res;
}
Ejemplo n.º 5
0
/**
 * Executes the validation
 */
PHP_METHOD(Phalcon_Validation_Validator_File, validate) {

	zend_bool _2, _3, _4, _6, _7, _16, _17, _18, _20, _21, _23, _30, _31, _62, _69$$21, _77$$26;
	zephir_fcall_cache_entry *_12 = NULL, *_14 = NULL, *_43 = NULL;
	int ZEPHIR_LAST_CALL_STATUS;
	zval *field = NULL;
	zval *validation, *field_param = NULL, *_SERVER, *_POST, *_FILES, *value = NULL, *message = NULL, *label = NULL, *replacePairs = NULL, *types = NULL, *byteUnits = NULL, *unit = NULL, *maxSize = NULL, *matches = NULL, *bytes = NULL, *mime = NULL, *tmp = NULL, *width = NULL, *height = NULL, *minResolution = NULL, *maxResolution = NULL, *minWidth = NULL, *maxWidth = NULL, *minHeight = NULL, *maxHeight = NULL, *_0 = NULL, *_1, *_5, *_8, *_15 = NULL, *_19, *_22, *_24, *_25 = NULL, *_36 = NULL, *_53 = NULL, *_61 = NULL, *_63 = NULL, *_9$$4 = NULL, *_11$$4 = NULL, *_13$$4, *_10$$5, *_26$$7 = NULL, *_28$$7 = NULL, *_29$$7, *_27$$8, *_32$$9 = NULL, *_34$$9 = NULL, *_35$$9, *_33$$10, *_37$$11 = NULL, *_38$$11, *_39$$11, *_40$$11, *_41$$11, *_42$$11 = NULL, *_44$$11, *_45$$11, _46$$11, *_47$$11, *_48$$11 = NULL, *_49$$13 = NULL, *_51$$13 = NULL, *_52$$13, *_50$$14, *_54$$15, _55$$17, *_56$$17, *_57$$19 = NULL, *_59$$19 = NULL, *_60$$19, *_58$$20, *_64$$21, *_65$$21 = NULL, *_66$$21 = NULL, *_74$$21 = NULL, *_67$$22 = NULL, *_68$$22, *_70$$24 = NULL, *_71$$24 = NULL, *_73$$24, *_72$$25, *_75$$26 = NULL, *_76$$26, *_78$$27 = NULL, *_79$$27 = NULL, *_81$$27, *_80$$28;

	ZEPHIR_MM_GROW();
	zephir_get_global(&_FILES, SS("_FILES") TSRMLS_CC);
	zephir_get_global(&_POST, SS("_POST") TSRMLS_CC);
	zephir_get_global(&_SERVER, SS("_SERVER") TSRMLS_CC);
	zephir_fetch_params(1, 2, 0, &validation, &field_param);

	if (unlikely(Z_TYPE_P(field_param) != IS_STRING && Z_TYPE_P(field_param) != IS_NULL)) {
		zephir_throw_exception_string(spl_ce_InvalidArgumentException, SL("Parameter 'field' must be a string") TSRMLS_CC);
		RETURN_MM_NULL();
	}
	if (likely(Z_TYPE_P(field_param) == IS_STRING)) {
		zephir_get_strval(field, field_param);
	} else {
		ZEPHIR_INIT_VAR(field);
		ZVAL_EMPTY_STRING(field);
	}


	ZEPHIR_CALL_METHOD(&value, validation, "getvalue", NULL, 0, field);
	zephir_check_call_status();
	ZEPHIR_INIT_VAR(_0);
	ZVAL_STRING(_0, "label", ZEPHIR_TEMP_PARAM_COPY);
	ZEPHIR_CALL_METHOD(&label, this_ptr, "getoption", NULL, 0, _0);
	zephir_check_temp_parameter(_0);
	zephir_check_call_status();
	if (ZEPHIR_IS_EMPTY(label)) {
		ZEPHIR_CALL_METHOD(&label, validation, "getlabel", NULL, 0, field);
		zephir_check_call_status();
	}
	zephir_array_fetch_string(&_1, _SERVER, SL("REQUEST_METHOD"), PH_NOISY | PH_READONLY, "phalcon/validation/validator/file.zep", 62 TSRMLS_CC);
	_2 = ZEPHIR_IS_STRING(_1, "POST");
	if (_2) {
		_2 = ZEPHIR_IS_EMPTY(_POST);
	}
	_3 = _2;
	if (_3) {
		_3 = ZEPHIR_IS_EMPTY(_FILES);
	}
	_4 = _3;
	if (_4) {
		zephir_array_fetch_string(&_5, _SERVER, SL("CONTENT_LENGTH"), PH_NOISY | PH_READONLY, "phalcon/validation/validator/file.zep", 62 TSRMLS_CC);
		_4 = ZEPHIR_GT_LONG(_5, 0);
	}
	_6 = _4;
	if (!(_6)) {
		_7 = zephir_array_isset_string(value, SS("error"));
		if (_7) {
			zephir_array_fetch_string(&_8, value, SL("error"), PH_NOISY | PH_READONLY, "phalcon/validation/validator/file.zep", 62 TSRMLS_CC);
			_7 = ZEPHIR_IS_LONG_IDENTICAL(_8, 1);
		}
		_6 = _7;
	}
	if (_6) {
		ZEPHIR_INIT_VAR(_9$$4);
		ZVAL_STRING(_9$$4, "messageIniSize", ZEPHIR_TEMP_PARAM_COPY);
		ZEPHIR_CALL_METHOD(&message, this_ptr, "getoption", NULL, 0, _9$$4);
		zephir_check_temp_parameter(_9$$4);
		zephir_check_call_status();
		ZEPHIR_INIT_VAR(replacePairs);
		zephir_create_array(replacePairs, 1, 0 TSRMLS_CC);
		zephir_array_update_string(&replacePairs, SL(":field"), &label, PH_COPY | PH_SEPARATE);
		if (ZEPHIR_IS_EMPTY(message)) {
			ZEPHIR_INIT_VAR(_10$$5);
			ZVAL_STRING(_10$$5, "FileIniSize", ZEPHIR_TEMP_PARAM_COPY);
			ZEPHIR_CALL_METHOD(&message, validation, "getdefaultmessage", NULL, 0, _10$$5);
			zephir_check_temp_parameter(_10$$5);
			zephir_check_call_status();
		}
		ZEPHIR_INIT_NVAR(_9$$4);
		object_init_ex(_9$$4, phalcon_validation_message_ce);
		ZEPHIR_CALL_FUNCTION(&_11$$4, "strtr", &_12, 55, message, replacePairs);
		zephir_check_call_status();
		ZEPHIR_INIT_VAR(_13$$4);
		ZVAL_STRING(_13$$4, "FileIniSize", ZEPHIR_TEMP_PARAM_COPY);
		ZEPHIR_CALL_METHOD(NULL, _9$$4, "__construct", &_14, 434, _11$$4, field, _13$$4);
		zephir_check_temp_parameter(_13$$4);
		zephir_check_call_status();
		ZEPHIR_CALL_METHOD(NULL, validation, "appendmessage", NULL, 0, _9$$4);
		zephir_check_call_status();
		RETURN_MM_BOOL(0);
	}
	ZEPHIR_INIT_NVAR(_0);
	ZVAL_STRING(_0, "allowEmpty", ZEPHIR_TEMP_PARAM_COPY);
	ZEPHIR_CALL_METHOD(&_15, this_ptr, "issetoption", NULL, 0, _0);
	zephir_check_temp_parameter(_0);
	zephir_check_call_status();
	_16 = zephir_is_true(_15);
	if (_16) {
		_17 = ZEPHIR_IS_EMPTY(value);
		if (!(_17)) {
			_18 = zephir_array_isset_string(value, SS("error"));
			if (_18) {
				zephir_array_fetch_string(&_19, value, SL("error"), PH_NOISY | PH_READONLY, "phalcon/validation/validator/file.zep", 75 TSRMLS_CC);
				_18 = ZEPHIR_IS_LONG_IDENTICAL(_19, 4);
			}
			_17 = _18;
		}
		_16 = _17;
	}
	if (_16) {
		RETURN_MM_BOOL(1);
	}
	_20 = !(zephir_array_isset_string(value, SS("error")));
	if (!(_20)) {
		_20 = !(zephir_array_isset_string(value, SS("tmp_name")));
	}
	_21 = _20;
	if (!(_21)) {
		zephir_array_fetch_string(&_22, value, SL("error"), PH_NOISY | PH_READONLY, "phalcon/validation/validator/file.zep", 79 TSRMLS_CC);
		_21 = !ZEPHIR_IS_LONG_IDENTICAL(_22, 0);
	}
	_23 = _21;
	if (!(_23)) {
		zephir_array_fetch_string(&_24, value, SL("tmp_name"), PH_NOISY | PH_READONLY, "phalcon/validation/validator/file.zep", 79 TSRMLS_CC);
		ZEPHIR_CALL_FUNCTION(&_25, "is_uploaded_file", NULL, 229, _24);
		zephir_check_call_status();
		_23 = !zephir_is_true(_25);
	}
	if (_23) {
		ZEPHIR_INIT_VAR(_26$$7);
		ZVAL_STRING(_26$$7, "messageEmpty", ZEPHIR_TEMP_PARAM_COPY);
		ZEPHIR_CALL_METHOD(&message, this_ptr, "getoption", NULL, 0, _26$$7);
		zephir_check_temp_parameter(_26$$7);
		zephir_check_call_status();
		ZEPHIR_INIT_NVAR(replacePairs);
		zephir_create_array(replacePairs, 1, 0 TSRMLS_CC);
		zephir_array_update_string(&replacePairs, SL(":field"), &label, PH_COPY | PH_SEPARATE);
		if (ZEPHIR_IS_EMPTY(message)) {
			ZEPHIR_INIT_VAR(_27$$8);
			ZVAL_STRING(_27$$8, "FileEmpty", ZEPHIR_TEMP_PARAM_COPY);
			ZEPHIR_CALL_METHOD(&message, validation, "getdefaultmessage", NULL, 0, _27$$8);
			zephir_check_temp_parameter(_27$$8);
			zephir_check_call_status();
		}
		ZEPHIR_INIT_NVAR(_26$$7);
		object_init_ex(_26$$7, phalcon_validation_message_ce);
		ZEPHIR_CALL_FUNCTION(&_28$$7, "strtr", &_12, 55, message, replacePairs);
		zephir_check_call_status();
		ZEPHIR_INIT_VAR(_29$$7);
		ZVAL_STRING(_29$$7, "FileEmpty", ZEPHIR_TEMP_PARAM_COPY);
		ZEPHIR_CALL_METHOD(NULL, _26$$7, "__construct", &_14, 434, _28$$7, field, _29$$7);
		zephir_check_temp_parameter(_29$$7);
		zephir_check_call_status();
		ZEPHIR_CALL_METHOD(NULL, validation, "appendmessage", NULL, 0, _26$$7);
		zephir_check_call_status();
		RETURN_MM_BOOL(0);
	}
	_30 = !(zephir_array_isset_string(value, SS("name")));
	if (!(_30)) {
		_30 = !(zephir_array_isset_string(value, SS("type")));
	}
	_31 = _30;
	if (!(_31)) {
		_31 = !(zephir_array_isset_string(value, SS("size")));
	}
	if (_31) {
		ZEPHIR_INIT_VAR(_32$$9);
		ZVAL_STRING(_32$$9, "messageValid", ZEPHIR_TEMP_PARAM_COPY);
		ZEPHIR_CALL_METHOD(&message, this_ptr, "getoption", NULL, 0, _32$$9);
		zephir_check_temp_parameter(_32$$9);
		zephir_check_call_status();
		ZEPHIR_INIT_NVAR(replacePairs);
		zephir_create_array(replacePairs, 1, 0 TSRMLS_CC);
		zephir_array_update_string(&replacePairs, SL(":field"), &label, PH_COPY | PH_SEPARATE);
		if (ZEPHIR_IS_EMPTY(message)) {
			ZEPHIR_INIT_VAR(_33$$10);
			ZVAL_STRING(_33$$10, "FileValid", ZEPHIR_TEMP_PARAM_COPY);
			ZEPHIR_CALL_METHOD(&message, validation, "getdefaultmessage", NULL, 0, _33$$10);
			zephir_check_temp_parameter(_33$$10);
			zephir_check_call_status();
		}
		ZEPHIR_INIT_NVAR(_32$$9);
		object_init_ex(_32$$9, phalcon_validation_message_ce);
		ZEPHIR_CALL_FUNCTION(&_34$$9, "strtr", &_12, 55, message, replacePairs);
		zephir_check_call_status();
		ZEPHIR_INIT_VAR(_35$$9);
		ZVAL_STRING(_35$$9, "FileValid", ZEPHIR_TEMP_PARAM_COPY);
		ZEPHIR_CALL_METHOD(NULL, _32$$9, "__construct", &_14, 434, _34$$9, field, _35$$9);
		zephir_check_temp_parameter(_35$$9);
		zephir_check_call_status();
		ZEPHIR_CALL_METHOD(NULL, validation, "appendmessage", NULL, 0, _32$$9);
		zephir_check_call_status();
		RETURN_MM_BOOL(0);
	}
	ZEPHIR_INIT_NVAR(_0);
	ZVAL_STRING(_0, "maxSize", ZEPHIR_TEMP_PARAM_COPY);
	ZEPHIR_CALL_METHOD(&_36, this_ptr, "issetoption", NULL, 0, _0);
	zephir_check_temp_parameter(_0);
	zephir_check_call_status();
	if (zephir_is_true(_36)) {
		ZEPHIR_INIT_VAR(byteUnits);
		zephir_create_array(byteUnits, 9, 0 TSRMLS_CC);
		add_assoc_long_ex(byteUnits, SS("B"), 0);
		add_assoc_long_ex(byteUnits, SS("K"), 10);
		add_assoc_long_ex(byteUnits, SS("M"), 20);
		add_assoc_long_ex(byteUnits, SS("G"), 30);
		add_assoc_long_ex(byteUnits, SS("T"), 40);
		add_assoc_long_ex(byteUnits, SS("KB"), 10);
		add_assoc_long_ex(byteUnits, SS("MB"), 20);
		add_assoc_long_ex(byteUnits, SS("GB"), 30);
		add_assoc_long_ex(byteUnits, SS("TB"), 40);
		ZEPHIR_INIT_VAR(_37$$11);
		ZVAL_STRING(_37$$11, "maxSize", ZEPHIR_TEMP_PARAM_COPY);
		ZEPHIR_CALL_METHOD(&maxSize, this_ptr, "getoption", NULL, 0, _37$$11);
		zephir_check_temp_parameter(_37$$11);
		zephir_check_call_status();
		ZEPHIR_INIT_VAR(matches);
		ZVAL_NULL(matches);
		ZEPHIR_INIT_VAR(unit);
		ZVAL_STRING(unit, "B", 1);
		ZEPHIR_INIT_NVAR(_37$$11);
		ZEPHIR_INIT_VAR(_38$$11);
		ZEPHIR_INIT_VAR(_39$$11);
		zephir_array_keys(_39$$11, byteUnits TSRMLS_CC);
		zephir_fast_join_str(_38$$11, SL("|"), _39$$11 TSRMLS_CC);
		ZEPHIR_INIT_VAR(_40$$11);
		ZEPHIR_CONCAT_SVS(_40$$11, "/^([0-9]+(?:\\.[0-9]+)?)(", _38$$11, ")?$/Di");
		zephir_preg_match(_37$$11, _40$$11, maxSize, matches, 0, 0 , 0  TSRMLS_CC);
		if (zephir_array_isset_long(matches, 2)) {
			ZEPHIR_OBS_NVAR(unit);
			zephir_array_fetch_long(&unit, matches, 2, PH_NOISY, "phalcon/validation/validator/file.zep", 115 TSRMLS_CC);
		}
		zephir_array_fetch_long(&_41$$11, matches, 1, PH_NOISY | PH_READONLY, "phalcon/validation/validator/file.zep", 118 TSRMLS_CC);
		ZEPHIR_CALL_FUNCTION(&_42$$11, "floatval", &_43, 301, _41$$11);
		zephir_check_call_status();
		ZEPHIR_INIT_VAR(_44$$11);
		zephir_array_fetch(&_45$$11, byteUnits, unit, PH_NOISY | PH_READONLY, "phalcon/validation/validator/file.zep", 118 TSRMLS_CC);
		ZEPHIR_SINIT_VAR(_46$$11);
		ZVAL_LONG(&_46$$11, 2);
		zephir_pow_function(_44$$11, &_46$$11, _45$$11);
		ZEPHIR_INIT_VAR(bytes);
		mul_function(bytes, _42$$11, _44$$11 TSRMLS_CC);
		zephir_array_fetch_string(&_47$$11, value, SL("size"), PH_NOISY | PH_READONLY, "phalcon/validation/validator/file.zep", 120 TSRMLS_CC);
		ZEPHIR_CALL_FUNCTION(&_42$$11, "floatval", &_43, 301, _47$$11);
		zephir_check_call_status();
		ZEPHIR_CALL_FUNCTION(&_48$$11, "floatval", &_43, 301, bytes);
		zephir_check_call_status();
		if (ZEPHIR_GT(_42$$11, _48$$11)) {
			ZEPHIR_INIT_VAR(_49$$13);
			ZVAL_STRING(_49$$13, "messageSize", ZEPHIR_TEMP_PARAM_COPY);
			ZEPHIR_CALL_METHOD(&message, this_ptr, "getoption", NULL, 0, _49$$13);
			zephir_check_temp_parameter(_49$$13);
			zephir_check_call_status();
			ZEPHIR_INIT_NVAR(replacePairs);
			zephir_create_array(replacePairs, 2, 0 TSRMLS_CC);
			zephir_array_update_string(&replacePairs, SL(":field"), &label, PH_COPY | PH_SEPARATE);
			zephir_array_update_string(&replacePairs, SL(":max"), &maxSize, PH_COPY | PH_SEPARATE);
			if (ZEPHIR_IS_EMPTY(message)) {
				ZEPHIR_INIT_VAR(_50$$14);
				ZVAL_STRING(_50$$14, "FileSize", ZEPHIR_TEMP_PARAM_COPY);
				ZEPHIR_CALL_METHOD(&message, validation, "getdefaultmessage", NULL, 0, _50$$14);
				zephir_check_temp_parameter(_50$$14);
				zephir_check_call_status();
			}
			ZEPHIR_INIT_NVAR(_49$$13);
			object_init_ex(_49$$13, phalcon_validation_message_ce);
			ZEPHIR_CALL_FUNCTION(&_51$$13, "strtr", &_12, 55, message, replacePairs);
			zephir_check_call_status();
			ZEPHIR_INIT_VAR(_52$$13);
			ZVAL_STRING(_52$$13, "FileSize", ZEPHIR_TEMP_PARAM_COPY);
			ZEPHIR_CALL_METHOD(NULL, _49$$13, "__construct", &_14, 434, _51$$13, field, _52$$13);
			zephir_check_temp_parameter(_52$$13);
			zephir_check_call_status();
			ZEPHIR_CALL_METHOD(NULL, validation, "appendmessage", NULL, 0, _49$$13);
			zephir_check_call_status();
			RETURN_MM_BOOL(0);
		}
	}
	ZEPHIR_INIT_NVAR(_0);
	ZVAL_STRING(_0, "allowedTypes", ZEPHIR_TEMP_PARAM_COPY);
	ZEPHIR_CALL_METHOD(&_53, this_ptr, "issetoption", NULL, 0, _0);
	zephir_check_temp_parameter(_0);
	zephir_check_call_status();
	if (zephir_is_true(_53)) {
		ZEPHIR_INIT_VAR(_54$$15);
		ZVAL_STRING(_54$$15, "allowedTypes", ZEPHIR_TEMP_PARAM_COPY);
		ZEPHIR_CALL_METHOD(&types, this_ptr, "getoption", NULL, 0, _54$$15);
		zephir_check_temp_parameter(_54$$15);
		zephir_check_call_status();
		if (Z_TYPE_P(types) != IS_ARRAY) {
			ZEPHIR_THROW_EXCEPTION_DEBUG_STR(phalcon_validation_exception_ce, "Option 'allowedTypes' must be an array", "phalcon/validation/validator/file.zep", 138);
			return;
		}
		if ((zephir_function_exists_ex(SS("finfo_open") TSRMLS_CC) == SUCCESS)) {
			ZEPHIR_SINIT_VAR(_55$$17);
			ZVAL_LONG(&_55$$17, 16);
			ZEPHIR_CALL_FUNCTION(&tmp, "finfo_open", NULL, 226, &_55$$17);
			zephir_check_call_status();
			zephir_array_fetch_string(&_56$$17, value, SL("tmp_name"), PH_NOISY | PH_READONLY, "phalcon/validation/validator/file.zep", 143 TSRMLS_CC);
			ZEPHIR_CALL_FUNCTION(&mime, "finfo_file", NULL, 227, tmp, _56$$17);
			zephir_check_call_status();
			ZEPHIR_CALL_FUNCTION(NULL, "finfo_close", NULL, 228, tmp);
			zephir_check_call_status();
		} else {
			ZEPHIR_OBS_NVAR(mime);
			zephir_array_fetch_string(&mime, value, SL("type"), PH_NOISY, "phalcon/validation/validator/file.zep", 147 TSRMLS_CC);
		}
		if (!(zephir_fast_in_array(mime, types TSRMLS_CC))) {
			ZEPHIR_INIT_VAR(_57$$19);
			ZVAL_STRING(_57$$19, "messageType", ZEPHIR_TEMP_PARAM_COPY);
			ZEPHIR_CALL_METHOD(&message, this_ptr, "getoption", NULL, 0, _57$$19);
			zephir_check_temp_parameter(_57$$19);
			zephir_check_call_status();
			ZEPHIR_INIT_NVAR(replacePairs);
			zephir_create_array(replacePairs, 2, 0 TSRMLS_CC);
			zephir_array_update_string(&replacePairs, SL(":field"), &label, PH_COPY | PH_SEPARATE);
			ZEPHIR_INIT_NVAR(_57$$19);
			zephir_fast_join_str(_57$$19, SL(", "), types TSRMLS_CC);
			zephir_array_update_string(&replacePairs, SL(":types"), &_57$$19, PH_COPY | PH_SEPARATE);
			if (ZEPHIR_IS_EMPTY(message)) {
				ZEPHIR_INIT_VAR(_58$$20);
				ZVAL_STRING(_58$$20, "FileType", ZEPHIR_TEMP_PARAM_COPY);
				ZEPHIR_CALL_METHOD(&message, validation, "getdefaultmessage", NULL, 0, _58$$20);
				zephir_check_temp_parameter(_58$$20);
				zephir_check_call_status();
			}
			ZEPHIR_INIT_NVAR(_57$$19);
			object_init_ex(_57$$19, phalcon_validation_message_ce);
			ZEPHIR_CALL_FUNCTION(&_59$$19, "strtr", &_12, 55, message, replacePairs);
			zephir_check_call_status();
			ZEPHIR_INIT_VAR(_60$$19);
			ZVAL_STRING(_60$$19, "FileType", ZEPHIR_TEMP_PARAM_COPY);
			ZEPHIR_CALL_METHOD(NULL, _57$$19, "__construct", &_14, 434, _59$$19, field, _60$$19);
			zephir_check_temp_parameter(_60$$19);
			zephir_check_call_status();
			ZEPHIR_CALL_METHOD(NULL, validation, "appendmessage", NULL, 0, _57$$19);
			zephir_check_call_status();
			RETURN_MM_BOOL(0);
		}
	}
	ZEPHIR_INIT_NVAR(_0);
	ZVAL_STRING(_0, "minResolution", ZEPHIR_TEMP_PARAM_COPY);
	ZEPHIR_CALL_METHOD(&_61, this_ptr, "issetoption", NULL, 0, _0);
	zephir_check_temp_parameter(_0);
	zephir_check_call_status();
	_62 = zephir_is_true(_61);
	if (!(_62)) {
		ZEPHIR_INIT_NVAR(_0);
		ZVAL_STRING(_0, "maxResolution", ZEPHIR_TEMP_PARAM_COPY);
		ZEPHIR_CALL_METHOD(&_63, this_ptr, "issetoption", NULL, 0, _0);
		zephir_check_temp_parameter(_0);
		zephir_check_call_status();
		_62 = zephir_is_true(_63);
	}
	if (_62) {
		zephir_array_fetch_string(&_64$$21, value, SL("tmp_name"), PH_NOISY | PH_READONLY, "phalcon/validation/validator/file.zep", 164 TSRMLS_CC);
		ZEPHIR_CALL_FUNCTION(&tmp, "getimagesize", NULL, 237, _64$$21);
		zephir_check_call_status();
		ZEPHIR_OBS_VAR(width);
		zephir_array_fetch_long(&width, tmp, 0, PH_NOISY, "phalcon/validation/validator/file.zep", 165 TSRMLS_CC);
		ZEPHIR_OBS_VAR(height);
		zephir_array_fetch_long(&height, tmp, 1, PH_NOISY, "phalcon/validation/validator/file.zep", 166 TSRMLS_CC);
		ZEPHIR_INIT_VAR(_66$$21);
		ZVAL_STRING(_66$$21, "minResolution", ZEPHIR_TEMP_PARAM_COPY);
		ZEPHIR_CALL_METHOD(&_65$$21, this_ptr, "issetoption", NULL, 0, _66$$21);
		zephir_check_temp_parameter(_66$$21);
		zephir_check_call_status();
		if (zephir_is_true(_65$$21)) {
			ZEPHIR_INIT_VAR(_68$$22);
			ZVAL_STRING(_68$$22, "minResolution", ZEPHIR_TEMP_PARAM_COPY);
			ZEPHIR_CALL_METHOD(&_67$$22, this_ptr, "getoption", NULL, 0, _68$$22);
			zephir_check_temp_parameter(_68$$22);
			zephir_check_call_status();
			ZEPHIR_INIT_VAR(minResolution);
			zephir_fast_explode_str(minResolution, SL("x"), _67$$22, LONG_MAX TSRMLS_CC);
			ZEPHIR_OBS_VAR(minWidth);
			zephir_array_fetch_long(&minWidth, minResolution, 0, PH_NOISY, "phalcon/validation/validator/file.zep", 170 TSRMLS_CC);
			ZEPHIR_OBS_VAR(minHeight);
			zephir_array_fetch_long(&minHeight, minResolution, 1, PH_NOISY, "phalcon/validation/validator/file.zep", 171 TSRMLS_CC);
		} else {
			ZEPHIR_INIT_NVAR(minWidth);
			ZVAL_LONG(minWidth, 1);
			ZEPHIR_INIT_NVAR(minHeight);
			ZVAL_LONG(minHeight, 1);
		}
		_69$$21 = ZEPHIR_LT(width, minWidth);
		if (!(_69$$21)) {
			_69$$21 = ZEPHIR_LT(height, minHeight);
		}
		if (_69$$21) {
			ZEPHIR_INIT_VAR(_70$$24);
			ZVAL_STRING(_70$$24, "messageMinResolution", ZEPHIR_TEMP_PARAM_COPY);
			ZEPHIR_CALL_METHOD(&message, this_ptr, "getoption", NULL, 0, _70$$24);
			zephir_check_temp_parameter(_70$$24);
			zephir_check_call_status();
			ZEPHIR_INIT_NVAR(replacePairs);
			zephir_create_array(replacePairs, 2, 0 TSRMLS_CC);
			zephir_array_update_string(&replacePairs, SL(":field"), &label, PH_COPY | PH_SEPARATE);
			ZEPHIR_INIT_NVAR(_70$$24);
			ZVAL_STRING(_70$$24, "minResolution", ZEPHIR_TEMP_PARAM_COPY);
			ZEPHIR_CALL_METHOD(&_71$$24, this_ptr, "getoption", NULL, 0, _70$$24);
			zephir_check_temp_parameter(_70$$24);
			zephir_check_call_status();
			zephir_array_update_string(&replacePairs, SL(":min"), &_71$$24, PH_COPY | PH_SEPARATE);
			if (ZEPHIR_IS_EMPTY(message)) {
				ZEPHIR_INIT_VAR(_72$$25);
				ZVAL_STRING(_72$$25, "FileMinResolution", ZEPHIR_TEMP_PARAM_COPY);
				ZEPHIR_CALL_METHOD(&message, validation, "getdefaultmessage", NULL, 0, _72$$25);
				zephir_check_temp_parameter(_72$$25);
				zephir_check_call_status();
			}
			ZEPHIR_INIT_NVAR(_70$$24);
			object_init_ex(_70$$24, phalcon_validation_message_ce);
			ZEPHIR_CALL_FUNCTION(&_71$$24, "strtr", &_12, 55, message, replacePairs);
			zephir_check_call_status();
			ZEPHIR_INIT_VAR(_73$$24);
			ZVAL_STRING(_73$$24, "FileMinResolution", ZEPHIR_TEMP_PARAM_COPY);
			ZEPHIR_CALL_METHOD(NULL, _70$$24, "__construct", &_14, 434, _71$$24, field, _73$$24);
			zephir_check_temp_parameter(_73$$24);
			zephir_check_call_status();
			ZEPHIR_CALL_METHOD(NULL, validation, "appendmessage", NULL, 0, _70$$24);
			zephir_check_call_status();
			RETURN_MM_BOOL(0);
		}
		ZEPHIR_INIT_NVAR(_66$$21);
		ZVAL_STRING(_66$$21, "maxResolution", ZEPHIR_TEMP_PARAM_COPY);
		ZEPHIR_CALL_METHOD(&_74$$21, this_ptr, "issetoption", NULL, 0, _66$$21);
		zephir_check_temp_parameter(_66$$21);
		zephir_check_call_status();
		if (zephir_is_true(_74$$21)) {
			ZEPHIR_INIT_VAR(_76$$26);
			ZVAL_STRING(_76$$26, "maxResolution", ZEPHIR_TEMP_PARAM_COPY);
			ZEPHIR_CALL_METHOD(&_75$$26, this_ptr, "getoption", NULL, 0, _76$$26);
			zephir_check_temp_parameter(_76$$26);
			zephir_check_call_status();
			ZEPHIR_INIT_VAR(maxResolution);
			zephir_fast_explode_str(maxResolution, SL("x"), _75$$26, LONG_MAX TSRMLS_CC);
			ZEPHIR_OBS_VAR(maxWidth);
			zephir_array_fetch_long(&maxWidth, maxResolution, 0, PH_NOISY, "phalcon/validation/validator/file.zep", 192 TSRMLS_CC);
			ZEPHIR_OBS_VAR(maxHeight);
			zephir_array_fetch_long(&maxHeight, maxResolution, 1, PH_NOISY, "phalcon/validation/validator/file.zep", 193 TSRMLS_CC);
			_77$$26 = ZEPHIR_GT(width, maxWidth);
			if (!(_77$$26)) {
				_77$$26 = ZEPHIR_GT(height, maxHeight);
			}
			if (_77$$26) {
				ZEPHIR_INIT_VAR(_78$$27);
				ZVAL_STRING(_78$$27, "messageMaxResolution", ZEPHIR_TEMP_PARAM_COPY);
				ZEPHIR_CALL_METHOD(&message, this_ptr, "getoption", NULL, 0, _78$$27);
				zephir_check_temp_parameter(_78$$27);
				zephir_check_call_status();
				ZEPHIR_INIT_NVAR(replacePairs);
				zephir_create_array(replacePairs, 2, 0 TSRMLS_CC);
				zephir_array_update_string(&replacePairs, SL(":field"), &label, PH_COPY | PH_SEPARATE);
				ZEPHIR_INIT_NVAR(_78$$27);
				ZVAL_STRING(_78$$27, "maxResolution", ZEPHIR_TEMP_PARAM_COPY);
				ZEPHIR_CALL_METHOD(&_79$$27, this_ptr, "getoption", NULL, 0, _78$$27);
				zephir_check_temp_parameter(_78$$27);
				zephir_check_call_status();
				zephir_array_update_string(&replacePairs, SL(":max"), &_79$$27, PH_COPY | PH_SEPARATE);
				if (ZEPHIR_IS_EMPTY(message)) {
					ZEPHIR_INIT_VAR(_80$$28);
					ZVAL_STRING(_80$$28, "FileMaxResolution", ZEPHIR_TEMP_PARAM_COPY);
					ZEPHIR_CALL_METHOD(&message, validation, "getdefaultmessage", NULL, 0, _80$$28);
					zephir_check_temp_parameter(_80$$28);
					zephir_check_call_status();
				}
				ZEPHIR_INIT_NVAR(_78$$27);
				object_init_ex(_78$$27, phalcon_validation_message_ce);
				ZEPHIR_CALL_FUNCTION(&_79$$27, "strtr", &_12, 55, message, replacePairs);
				zephir_check_call_status();
				ZEPHIR_INIT_VAR(_81$$27);
				ZVAL_STRING(_81$$27, "FileMaxResolution", ZEPHIR_TEMP_PARAM_COPY);
				ZEPHIR_CALL_METHOD(NULL, _78$$27, "__construct", &_14, 434, _79$$27, field, _81$$27);
				zephir_check_temp_parameter(_81$$27);
				zephir_check_call_status();
				ZEPHIR_CALL_METHOD(NULL, validation, "appendmessage", NULL, 0, _78$$27);
				zephir_check_call_status();
				RETURN_MM_BOOL(0);
			}
		}
	}
	RETURN_MM_BOOL(1);

}
Ejemplo n.º 6
0
/**
 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
 *
 * When the next event is more than a tick into the future, stop the idle tick
 * Called either from the idle loop or from irq_exit() when an idle period was
 * just interrupted by an interrupt which did not cause a reschedule.
 */
void tick_nohz_stop_sched_tick(int inidle)
{
	unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
	struct tick_sched *ts;
	ktime_t last_update, expires, now;
	struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
	u64 time_delta;
	int cpu;

	local_irq_save(flags);

	cpu = smp_processor_id();
	ts = &per_cpu(tick_cpu_sched, cpu);

	/*
	 * Call to tick_nohz_start_idle stops the last_update_time from being
	 * updated. Thus, it must not be called in the event we are called from
	 * irq_exit() with the prior state different than idle.
	 */
	if (!inidle && !ts->inidle)
		goto end;

	/*
	 * Set ts->inidle unconditionally. Even if the system did not
	 * switch to NOHZ mode the cpu frequency governers rely on the
	 * update of the idle time accounting in tick_nohz_start_idle().
	 */
	ts->inidle = 1;

	now = tick_nohz_start_idle(cpu, ts);

	/*
	 * If this cpu is offline and it is the one which updates
	 * jiffies, then give up the assignment and let it be taken by
	 * the cpu which runs the tick timer next. If we don't drop
	 * this here the jiffies might be stale and do_timer() never
	 * invoked.
	 */
	if (unlikely(!cpu_online(cpu))) {
		if (cpu == tick_do_timer_cpu)
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
	}

	if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
		goto end;

	if (need_resched())
		goto end;

	if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
		static int ratelimit;

		if (ratelimit < 10) {
			printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
			       (unsigned int) local_softirq_pending());
			ratelimit++;
		}
		goto end;
	}

	ts->idle_calls++;
	/* Read jiffies and the time when jiffies were updated last */
	do {
		seq = read_seqbegin(&xtime_lock);
		last_update = last_jiffies_update;
		last_jiffies = jiffies;
		time_delta = timekeeping_max_deferment();
	} while (read_seqretry(&xtime_lock, seq));

	if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
	    arch_needs_cpu(cpu)) {
		next_jiffies = last_jiffies + 1;
		delta_jiffies = 1;
	} else {
		/* Get the next timer wheel timer */
		next_jiffies = get_next_timer_interrupt(last_jiffies);
		delta_jiffies = next_jiffies - last_jiffies;
	}
	/*
	 * Do not stop the tick, if we are only one off
	 * or if the cpu is required for rcu
	 */
	if (!ts->tick_stopped && delta_jiffies == 1)
		goto out;

	/* Schedule the tick, if we are at least one jiffie off */
	if ((long)delta_jiffies >= 1) {

		/*
		 * If this cpu is the one which updates jiffies, then
		 * give up the assignment and let it be taken by the
		 * cpu which runs the tick timer next, which might be
		 * this cpu as well. If we don't drop this here the
		 * jiffies might be stale and do_timer() never
		 * invoked. Keep track of the fact that it was the one
		 * which had the do_timer() duty last. If this cpu is
		 * the one which had the do_timer() duty last, we
		 * limit the sleep time to the timekeeping
		 * max_deferement value which we retrieved
		 * above. Otherwise we can sleep as long as we want.
		 */
		if (cpu == tick_do_timer_cpu) {
			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
			ts->do_timer_last = 1;
		} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
			time_delta = KTIME_MAX;
			ts->do_timer_last = 0;
		} else if (!ts->do_timer_last) {
			time_delta = KTIME_MAX;
		}

		/*
		 * calculate the expiry time for the next timer wheel
		 * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
		 * that there is no timer pending or at least extremely
		 * far into the future (12 days for HZ=1000). In this
		 * case we set the expiry to the end of time.
		 */
		if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
			/*
			 * Calculate the time delta for the next timer event.
			 * If the time delta exceeds the maximum time delta
			 * permitted by the current clocksource then adjust
			 * the time delta accordingly to ensure the
			 * clocksource does not wrap.
			 */
			time_delta = min_t(u64, time_delta,
					   tick_period.tv64 * delta_jiffies);
		}

		if (time_delta < KTIME_MAX)
			expires = ktime_add_ns(last_update, time_delta);
		else
			expires.tv64 = KTIME_MAX;

		if (delta_jiffies > 1)
			cpumask_set_cpu(cpu, nohz_cpu_mask);

		/* Skip reprogram of event if its not changed */
		if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
			goto out;

		/*
		 * nohz_stop_sched_tick can be called several times before
		 * the nohz_restart_sched_tick is called. This happens when
		 * interrupts arrive which do not cause a reschedule. In the
		 * first call we save the current tick time, so we can restart
		 * the scheduler tick in nohz_restart_sched_tick.
		 */
		if (!ts->tick_stopped) {
			select_nohz_load_balancer(1);

			ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
			ts->tick_stopped = 1;
			ts->idle_jiffies = last_jiffies;
			rcu_enter_nohz();
		}

		ts->idle_sleeps++;

		/* Mark expires */
		ts->idle_expires = expires;

		/*
		 * If the expiration time == KTIME_MAX, then
		 * in this case we simply stop the tick timer.
		 */
		 if (unlikely(expires.tv64 == KTIME_MAX)) {
			if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
				hrtimer_cancel(&ts->sched_timer);
			goto out;
		}

		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
			hrtimer_start(&ts->sched_timer, expires,
				      HRTIMER_MODE_ABS_PINNED);
			/* Check, if the timer was already in the past */
			if (hrtimer_active(&ts->sched_timer))
				goto out;
		} else if (!tick_program_event(expires, 0))
				goto out;
		/*
		 * We are past the event already. So we crossed a
		 * jiffie boundary. Update jiffies and raise the
		 * softirq.
		 */
		tick_do_update_jiffies64(ktime_get());
		cpumask_clear_cpu(cpu, nohz_cpu_mask);
	}
	raise_softirq_irqoff(TIMER_SOFTIRQ);
out:
	ts->next_jiffies = next_jiffies;
	ts->last_jiffies = last_jiffies;
	ts->sleep_length = ktime_sub(dev->next_event, now);
end:
	local_irq_restore(flags);
}
Ejemplo n.º 7
0
/**
 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
 *
 * Send packet over as many bearers as necessary to reach all nodes
 * that have joined the broadcast link.
 *
 * Returns 0 (packet sent successfully) under all circumstances,
 * since the broadcast link's pseudo-bearer never blocks
 */
static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
			      struct tipc_media_addr *unused2)
{
	int bp_index;

	/* Prepare broadcast link message for reliable transmission,
	 * if first time trying to send it;
	 * preparation is skipped for broadcast link protocol messages
	 * since they are sent in an unreliable manner and don't need it
	 */
	if (likely(!msg_non_seq(buf_msg(buf)))) {
		struct tipc_msg *msg;

		bcbuf_set_acks(buf, bclink->bcast_nodes.count);
		msg = buf_msg(buf);
		msg_set_non_seq(msg, 1);
		msg_set_mc_netid(msg, tipc_net_id);
		bcl->stats.sent_info++;

		if (WARN_ON(!bclink->bcast_nodes.count)) {
			dump_stack();
			return 0;
		}
	}

	/* Send buffer over bearers until all targets reached */
	bcbearer->remains = bclink->bcast_nodes;

	for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
		struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
		struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
		struct tipc_bearer *b = p;
		struct sk_buff *tbuf;

		if (!p)
			break; /* No more bearers to try */

		tipc_nmap_diff(&bcbearer->remains, &b->nodes,
			       &bcbearer->remains_new);
		if (bcbearer->remains_new.count == bcbearer->remains.count)
			continue; /* Nothing added by bearer pair */

		if (bp_index == 0) {
			/* Use original buffer for first bearer */
			tipc_bearer_send(b, buf, &b->bcast_addr);
		} else {
			/* Avoid concurrent buffer access */
			tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
			if (!tbuf)
				break;
			tipc_bearer_send(b, tbuf, &b->bcast_addr);
			kfree_skb(tbuf); /* Bearer keeps a clone */
		}

		/* Swap bearers for next packet */
		if (s) {
			bcbearer->bpairs[bp_index].primary = s;
			bcbearer->bpairs[bp_index].secondary = p;
		}

		if (bcbearer->remains_new.count == 0)
			break; /* All targets reached */

		bcbearer->remains = bcbearer->remains_new;
	}

	return 0;
}
Ejemplo n.º 8
0
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags). The actual kick-off is left to the caller.
 */
static struct task_struct *copy_process(unsigned long clone_flags,
					unsigned long stack_start,
					struct pt_regs *regs,
					unsigned long stack_size,
					int __user *child_tidptr,
					struct pid *pid,
					int trace)
{
	int retval;
	struct task_struct *p;
	int cgroup_callbacks_done = 0;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);

	/*
	 * Shared signal handlers imply shared VM. By way of the above,
	 * thread groups also imply shared VM. Blocking this case allows
	 * for various simplifications in other code.
	 */
	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
		return ERR_PTR(-EINVAL);

	/*
	 * Siblings of global init remain as zombies on exit since they are
	 * not reaped by their parent (swapper). To solve this and to avoid
	 * multi-rooted process trees, prevent global and container-inits
	 * from creating siblings.
	 */
	if ((clone_flags & CLONE_PARENT) &&
				current->signal->flags & SIGNAL_UNKILLABLE)
		return ERR_PTR(-EINVAL);

	retval = security_task_create(clone_flags);
	if (retval)
		goto fork_out;

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

	ftrace_graph_init_task(p);

	rt_mutex_init_task(p);

#ifdef CONFIG_PROVE_LOCKING
	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
	retval = -EAGAIN;
	if (atomic_read(&p->real_cred->user->processes) >=
			task_rlimit(p, RLIMIT_NPROC)) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
		    p->real_cred->user != INIT_USER)
			goto bad_fork_free;
	}

	retval = copy_creds(p, clone_flags);
	if (retval < 0)
		goto bad_fork_free;

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
	 * to stop root fork bombs.
	 */
	retval = -EAGAIN;
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

	if (!try_module_get(task_thread_info(p)->exec_domain->module))
		goto bad_fork_cleanup_count;

	p->did_exec = 0;
	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
	copy_flags(clone_flags, p);
	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	rcu_copy_process(p);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);

	init_sigpending(&p->pending);

	p->utime = cputime_zero;
	p->stime = cputime_zero;
	p->gtime = cputime_zero;
	p->utimescaled = cputime_zero;
	p->stimescaled = cputime_zero;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
	p->prev_utime = cputime_zero;
	p->prev_stime = cputime_zero;
#endif
#if defined(SPLIT_RSS_COUNTING)
	memset(&p->rss_stat, 0, sizeof(p->rss_stat));
#endif

	p->default_timer_slack_ns = current->timer_slack_ns;

	task_io_accounting_init(&p->ioac);
	acct_clear_integrals(p);

	posix_cpu_timers_init(p);

	p->lock_depth = -1;		/* -1 = no lock */
	do_posix_clock_monotonic_gettime(&p->start_time);
	p->real_start_time = p->start_time;
	monotonic_to_bootbased(&p->real_start_time);
	p->io_context = NULL;
	p->audit_context = NULL;
	cgroup_fork(p);
#ifdef CONFIG_NUMA
	p->mempolicy = mpol_dup(p->mempolicy);
 	if (IS_ERR(p->mempolicy)) {
 		retval = PTR_ERR(p->mempolicy);
 		p->mempolicy = NULL;
 		goto bad_fork_cleanup_cgroup;
 	}
	mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
	p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
	p->hardirqs_enabled = 1;
#else
	p->hardirqs_enabled = 0;
#endif
	p->hardirq_enable_ip = 0;
	p->hardirq_enable_event = 0;
	p->hardirq_disable_ip = _THIS_IP_;
	p->hardirq_disable_event = 0;
	p->softirqs_enabled = 1;
	p->softirq_enable_ip = _THIS_IP_;
	p->softirq_enable_event = 0;
	p->softirq_disable_ip = 0;
	p->softirq_disable_event = 0;
	p->hardirq_context = 0;
	p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
	p->lockdep_depth = 0; /* no locks held yet */
	p->curr_chain_key = 0;
	p->lockdep_recursion = 0;
#endif

#ifdef CONFIG_DEBUG_MUTEXES
	p->blocked_on = NULL; /* not blocked yet */
#endif
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
	p->memcg_batch.do_batch = 0;
	p->memcg_batch.memcg = NULL;
#endif

	/* Perform scheduler related setup. Assign this task to a CPU. */
	sched_fork(p, clone_flags);

	retval = perf_event_init_task(p);
	if (retval)
		goto bad_fork_cleanup_policy;

	if ((retval = audit_alloc(p)))
		goto bad_fork_cleanup_policy;
	/* copy all the process information */
	if ((retval = copy_semundo(clone_flags, p)))
		goto bad_fork_cleanup_audit;
	if ((retval = copy_files(clone_flags, p)))
		goto bad_fork_cleanup_semundo;
	if ((retval = copy_fs(clone_flags, p)))
		goto bad_fork_cleanup_files;
	if ((retval = copy_sighand(clone_flags, p)))
		goto bad_fork_cleanup_fs;
	if ((retval = copy_signal(clone_flags, p)))
		goto bad_fork_cleanup_sighand;
	if ((retval = copy_mm(clone_flags, p)))
		goto bad_fork_cleanup_signal;
	if ((retval = copy_namespaces(clone_flags, p)))
		goto bad_fork_cleanup_mm;
	if ((retval = copy_io(clone_flags, p)))
		goto bad_fork_cleanup_namespaces;
	retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_io;

	if (pid != &init_struct_pid) {
		retval = -ENOMEM;
		pid = alloc_pid(p->nsproxy->pid_ns);
		if (!pid)
			goto bad_fork_cleanup_io;

		if (clone_flags & CLONE_NEWPID) {
			retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);
			if (retval < 0)
				goto bad_fork_free_pid;
		}
	}

	p->pid = pid_nr(pid);
	p->tgid = p->pid;
	if (clone_flags & CLONE_THREAD)
		p->tgid = current->tgid;

	if (current->nsproxy != p->nsproxy) {
		retval = ns_cgroup_clone(p, pid);
		if (retval)
			goto bad_fork_free_pid;
	}

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
#ifdef CONFIG_FUTEX
	p->robust_list = NULL;
#ifdef CONFIG_COMPAT
	p->compat_robust_list = NULL;
#endif
	INIT_LIST_HEAD(&p->pi_state_list);
	p->pi_state_cache = NULL;
#endif
	/*
	 * sigaltstack should be cleared when sharing the same VM
	 */
	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
		p->sas_ss_sp = p->sas_ss_size = 0;

	/*
	 * Syscall tracing and stepping should be turned off in the
	 * child regardless of CLONE_PTRACE.
	 */
	user_disable_single_step(p);
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
	clear_all_latency_tracing(p);

	/* ok, now we should be set up.. */
	p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
	p->pdeath_signal = 0;
	p->exit_state = 0;

	/*
	 * Ok, make it visible to the rest of the system.
	 * We dont wake it up yet.
	 */
	p->group_leader = p;
	INIT_LIST_HEAD(&p->thread_group);

	/* Now that the task is set up, run cgroup callbacks if
	 * necessary. We need to run them before the task is visible
	 * on the tasklist. */
	cgroup_fork_callbacks(p);
	cgroup_callbacks_done = 1;

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
		p->real_parent = current->real_parent;
		p->parent_exec_id = current->parent_exec_id;
	} else {
		p->real_parent = current;
		p->parent_exec_id = current->self_exec_id;
	}

	spin_lock(&current->sighand->siglock);

	/*
	 * Process group and session signals need to be delivered to just the
	 * parent before the fork or both the parent and the child after the
	 * fork. Restart if a signal comes in before we add the new process to
	 * it's process group.
	 * A fatal signal pending means that current will exit, so the new
	 * thread can't slip out of an OOM kill (or normal SIGKILL).
 	 */
	recalc_sigpending();
	if (signal_pending(current)) {
		spin_unlock(&current->sighand->siglock);
		write_unlock_irq(&tasklist_lock);
		retval = -ERESTARTNOINTR;
		goto bad_fork_free_pid;
	}

	if (clone_flags & CLONE_THREAD) {
		current->signal->nr_threads++;
		atomic_inc(&current->signal->live);
		atomic_inc(&current->signal->sigcnt);
		p->group_leader = current->group_leader;
		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
	}

	if (likely(p->pid)) {
		tracehook_finish_clone(p, clone_flags, trace);

		if (thread_group_leader(p)) {
			if (clone_flags & CLONE_NEWPID)
				p->nsproxy->pid_ns->child_reaper = p;

			p->signal->leader_pid = pid;
			p->signal->tty = tty_kref_get(current->signal->tty);
			attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
			attach_pid(p, PIDTYPE_SID, task_session(current));
			list_add_tail(&p->sibling, &p->real_parent->children);
			list_add_tail_rcu(&p->tasks, &init_task.tasks);
			__get_cpu_var(process_counts)++;
		}
		attach_pid(p, PIDTYPE_PID, pid);
		nr_threads++;
	}

	total_forks++;
	spin_unlock(&current->sighand->siglock);
	write_unlock_irq(&tasklist_lock);
	proc_fork_connector(p);
	cgroup_post_fork(p);
	perf_event_fork(p);
	return p;

bad_fork_free_pid:
	if (pid != &init_struct_pid)
		free_pid(pid);
bad_fork_cleanup_io:
	if (p->io_context)
		exit_io_context(p);
bad_fork_cleanup_namespaces:
	exit_task_namespaces(p);
bad_fork_cleanup_mm:
	if (p->mm) {
		task_lock(p);
		if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
			atomic_dec(&p->mm->oom_disable_count);
		task_unlock(p);
		mmput(p->mm);
	}
bad_fork_cleanup_signal:
	if (!(clone_flags & CLONE_THREAD))
		free_signal_struct(p->signal);
bad_fork_cleanup_sighand:
	__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
	exit_sem(p);
bad_fork_cleanup_audit:
	audit_free(p);
bad_fork_cleanup_policy:
	perf_event_free_task(p);
#ifdef CONFIG_NUMA
	mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
	cgroup_exit(p, cgroup_callbacks_done);
	delayacct_tsk_free(p);
	module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
	atomic_dec(&p->cred->user->processes);
	exit_creds(p);
bad_fork_free:
	free_task(p);
fork_out:
	return ERR_PTR(retval);
}
Ejemplo n.º 9
0
int
main(
    unsigned long long spe_id,
    unsigned long long ppu_vector_a,
    unsigned long long ppu_vector_b)
{
    int i, iter, buf_idx, vec_idx;
    unsigned long long ppu_vector_bases[2] _ALIG(128);
    vector float * pchunk_a, * pchunk_b;
    vector float g_vec = {0,0,0,0};

    ppu_vector_bases[0] = ppu_vector_a;
    ppu_vector_bases[1] = ppu_vector_b;

    const unsigned int spu_num = spu_read_in_mbox();
    unsigned long long get_edge_bytes = spu_num * SUBVEC_SZ_BYTES;

    float buffers[NBUFFERS * BUF_SZ_FLOATS] _ALIG(128);
    int buffer_tags[NBUFFERS][2] _ALIG(128);
    //int buffer_tags[NBUFFERS];

    for (iter = 0; iter < NBUFFERS; ++iter) {
        buffer_tags[iter][0] = mfc_tag_reserve();
        buffer_tags[iter][1] = mfc_tag_reserve();
    }

    // first mfc_get for all
    for (buf_idx = 0; buf_idx < NBUFFERS; ++buf_idx) {
        for (vec_idx = 0; vec_idx < 2; ++vec_idx) {
            mfc_get(buf_ptr_float(buffers, buf_idx, vec_idx),
                    ppu_vector_bases[vec_idx] + get_edge_bytes,
                    CHUNK_SZ_BYTES,
                    buffer_tags[buf_idx][vec_idx],
                    0, 0);
        }
    }
    get_edge_bytes += CHUNK_SZ_BYTES;

    //printf("subvec_sz-chunks: %d\n", SUBVEC_SZ_CHUNKS);
    //printf("%d==%d\n", MAXITER*NBUFFERS*CHUNK_SZ_FLOATS, SUBVEC_SZ_FLOATS);
    int chunksleft = SUBVEC_SZ_CHUNKS;
    while(chunksleft!=0) {
        for (buf_idx = 0; chunksleft !=0 && buf_idx < NBUFFERS; ++buf_idx) {
            const int tag_mask = (1 << buffer_tags[buf_idx][0])
                                 | (1 << buffer_tags[buf_idx][1]);

            mfc_write_tag_mask(tag_mask);
            mfc_read_tag_status_all();

            pchunk_a = buf_ptr_vecfloat(buffers, buf_idx, 0);
            pchunk_b = buf_ptr_vecfloat(buffers, buf_idx, 1);

            for (i = 0; i < CHUNK_SZ_FLOATVECS; ++i) {
                g_vec = spu_madd(pchunk_a[i], pchunk_b[i], g_vec);
            }

            // move this mfc_get to end of loop, check get_edge_bytes variable dynamics
            if (likely(iter != MAXITER - 1)) {
                for (vec_idx = 0; vec_idx < 2; ++vec_idx) {
                    mfc_get(buf_ptr_float(buffers, buf_idx, vec_idx),
                            ppu_vector_bases[vec_idx] + get_edge_bytes,
                            CHUNK_SZ_BYTES,
                            buffer_tags[buf_idx][vec_idx],
                            0, 0);
                }
            }
            get_edge_bytes += CHUNK_SZ_BYTES;
            --chunksleft;
        }
    }

    for (iter = 0; iter < NBUFFERS; ++iter) {
        mfc_tag_release(buffer_tags[iter][0]);
        mfc_tag_release(buffer_tags[iter][1]);
    }

    float_uint_t retval;
    retval.f =
        spu_extract(g_vec, 0) +
        spu_extract(g_vec, 1) +
        spu_extract(g_vec, 2) +
        spu_extract(g_vec, 3);

    //printf("retval: %f\n", retval.f);
    spu_write_out_mbox(retval.i);

    return 0;
}
int fimc_is_af_i2c_write(struct i2c_client *client ,u8 addr, u8 data)
{
        int retries = I2C_RETRY_COUNT;
        int ret = 0, err = 0;
        u8 buf[2] = {0,};
        struct i2c_msg msg = {
                .addr   = client->addr,
                .flags  = 0,
                .len    = 2,
                .buf    = buf,
        };

        buf[0] = addr;
        buf[1] = data;

#if 0
        pr_info("%s : W(0x%02X%02X)\n",__func__, buf[0], buf[1]);
#endif

        do {
                ret = i2c_transfer(client->adapter, &msg, 1);
                if (likely(ret == 1))
                        break;

                usleep_range(10000,11000);
                err = ret;
        } while (--retries > 0);

        /* Retry occured */
        if (unlikely(retries < I2C_RETRY_COUNT)) {
                err("i2c_write: error %d, write (%02X, %02X), retry %d\n",
                        err, addr, data, I2C_RETRY_COUNT - retries);
        }

        if (unlikely(ret != 1)) {
                err("I2C does not work\n\n");
                return -EIO;
        }

        return 0;
}

int fimc_is_af_ldo_enable(char *name, bool on)
{
	struct fimc_is_core *core = (struct fimc_is_core *)dev_get_drvdata(fimc_is_dev);
	struct regulator *regulator = NULL;
	struct platform_device *pdev = NULL;
	int ret = 0;

	BUG_ON(!core);
	BUG_ON(!core->pdev);

	pdev = core->pdev;

	regulator = regulator_get(&pdev->dev, name);
	if (IS_ERR_OR_NULL(regulator)) {
		err("%s : regulator_get(%s) fail\n", __func__, name);
		regulator_put(regulator);
		return -EINVAL;
	}

	if (on) {
		if (regulator_is_enabled(regulator)) {
			pr_info("%s: regulator is already enabled\n", name);
			goto exit;
		}

		ret = regulator_enable(regulator);
		if (ret) {
			err("%s : regulator_enable(%s) fail\n", __func__, name);
			goto exit;
		}
	} else {
		if (!regulator_is_enabled(regulator)) {
			pr_info("%s: regulator is already disabled\n", name);
			goto exit;
		}

		ret = regulator_disable(regulator);
		if (ret) {
			err("%s : regulator_disable(%s) fail\n", __func__, name);
			goto exit;
		}
	}
exit:
	regulator_put(regulator);

	return ret;
}

int fimc_is_af_power(struct fimc_is_device_af *af_device, bool onoff)
{
	int ret = 0;

	/*CAM_AF_2.8V_AP*/
	ret = fimc_is_af_ldo_enable("CAM_AF_2.8V_AP", onoff);
	if (ret) {
		err("failed to power control CAM_AF_2.8V_AP, onoff = %d", onoff);
		return -EINVAL;
	}

#ifdef CONFIG_OIS_USE
	/* OIS_VDD_2.8V */
	ret = fimc_is_af_ldo_enable("OIS_VDD_2.8V", onoff);
	if (ret) {
		err("failed to power control OIS_VDD_2.8V, onoff = %d", onoff);
		return -EINVAL;
	}

	/* OIS_VM_2.8V */
	ret = fimc_is_af_ldo_enable("OIS_VM_2.8V", onoff);
	if (ret) {
		err("failed to power control OIS_VM_2.8V, onoff = %d", onoff);
		return -EINVAL;
	}
#endif

	/*CAM_IO_1.8V_AP*/
	ret = fimc_is_af_ldo_enable("CAM_IO_1.8V_AP", onoff);
	if (ret) {
		err("failed to power control CAM_IO_1.8V_AP, onoff = %d", onoff);
		return -EINVAL;
	}

	usleep_range(5000,5000);
	return ret;
}
Ejemplo n.º 11
0
/*
 *  Ok, this is the main fork-routine.
 *
 * It copies the process, and if successful kick-starts
 * it and waits for it to finish using the VM if required.
 */
long do_fork(unsigned long clone_flags,
	      unsigned long stack_start,
	      struct pt_regs *regs,
	      unsigned long stack_size,
	      int __user *parent_tidptr,
	      int __user *child_tidptr)
{
	struct task_struct *p;
	int trace = 0;
	long nr;

	/*
	 * Do some preliminary argument and permissions checking before we
	 * actually start allocating stuff
	 */
	if (clone_flags & CLONE_NEWUSER) {
		if (clone_flags & CLONE_THREAD)
			return -EINVAL;
		/* hopefully this check will go away when userns support is
		 * complete
		 */
		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
				!capable(CAP_SETGID))
			return -EPERM;
	}

	/*
	 * We hope to recycle these flags after 2.6.26
	 */
	if (unlikely(clone_flags & CLONE_STOPPED)) {
		static int __read_mostly count = 100;

		if (count > 0 && printk_ratelimit()) {
			char comm[TASK_COMM_LEN];

			count--;
			printk(KERN_INFO "fork(): process `%s' used deprecated "
					"clone flags 0x%lx\n",
				get_task_comm(comm, current),
				clone_flags & CLONE_STOPPED);
		}
	}

	/*
	 * When called from kernel_thread, don't do user tracing stuff.
	 */
	if (likely(user_mode(regs)))
		trace = tracehook_prepare_clone(clone_flags);

	p = copy_process(clone_flags, stack_start, regs, stack_size,
			 child_tidptr, NULL, trace);
	/*
	 * Do this prior waking up the new thread - the thread pointer
	 * might get invalid after that point, if the thread exits quickly.
	 */
	if (!IS_ERR(p)) {
		struct completion vfork;

		trace_sched_process_fork(current, p);

		nr = task_pid_vnr(p);

		if (clone_flags & CLONE_PARENT_SETTID)
			put_user(nr, parent_tidptr);

		if (clone_flags & CLONE_VFORK) {
			p->vfork_done = &vfork;
			init_completion(&vfork);
		}

		audit_finish_fork(p);
		tracehook_report_clone(regs, clone_flags, nr, p);

		/*
		 * We set PF_STARTING at creation in case tracing wants to
		 * use this to distinguish a fully live task from one that
		 * hasn't gotten to tracehook_report_clone() yet.  Now we
		 * clear it and set the child going.
		 */
		p->flags &= ~PF_STARTING;

		if (unlikely(clone_flags & CLONE_STOPPED)) {
			/*
			 * We'll start up with an immediate SIGSTOP.
			 */
			sigaddset(&p->pending.signal, SIGSTOP);
			set_tsk_thread_flag(p, TIF_SIGPENDING);
			__set_task_state(p, TASK_STOPPED);
		} else {
			wake_up_new_task(p, clone_flags);
		}

		tracehook_report_clone_complete(trace, regs,
						clone_flags, nr, p);

		if (clone_flags & CLONE_VFORK) {
			freezer_do_not_count();
			wait_for_completion(&vfork);
			freezer_count();
			tracehook_report_vfork_done(p, nr);
		}
	} else {
		nr = PTR_ERR(p);
	}
	return nr;
}
Ejemplo n.º 12
0
static void
interp_mem_stats_opcode_exit (vm_frame_ctx_t *frame_ctx_p,
                              vm_instr_counter_t instr_position,
                              mem_heap_stats_t *heap_stats_before_p,
                              mem_pools_stats_t *pools_stats_before_p)
{
  if (likely (!interp_mem_stats_enabled))
  {
    return;
  }

  interp_mem_stats_print_indentation -= INTERP_MEM_PRINT_INDENTATION_STEP;

  const uint32_t indentation = JERRY_MIN (interp_mem_stats_print_indentation,
                                          INTERP_MEM_PRINT_INDENTATION_MAX);

  char indent_prefix[INTERP_MEM_PRINT_INDENTATION_MAX + 2];
  memset (indent_prefix, ' ', sizeof (indent_prefix));
  indent_prefix[indentation] = '|';
  indent_prefix[indentation + 1] = '\0';

  mem_heap_stats_t heap_stats_after;
  mem_pools_stats_t pools_stats_after;

  interp_mem_get_stats (&heap_stats_after,
                        &pools_stats_after,
                        false, true);

  frame_ctx_p->context_peak_allocated_heap_bytes = JERRY_MAX (frame_ctx_p->context_peak_allocated_heap_bytes,
                                                             heap_stats_after.allocated_bytes);
  frame_ctx_p->context_peak_waste_heap_bytes = JERRY_MAX (frame_ctx_p->context_peak_waste_heap_bytes,
                                                         heap_stats_after.waste_bytes);
  frame_ctx_p->context_peak_pools_count = JERRY_MAX (frame_ctx_p->context_peak_pools_count,
                                                    pools_stats_after.pools_count);
  frame_ctx_p->context_peak_allocated_pool_chunks = JERRY_MAX (frame_ctx_p->context_peak_allocated_pool_chunks,
                                                              pools_stats_after.allocated_chunks);

  vm_instr_t instr = vm_get_instr (frame_ctx_p->bytecode_header_p->instrs_p, instr_position);

  printf ("%s Allocated heap bytes:  %5u -> %5u (%+5d, local %5u, peak %5u)\n",
          indent_prefix,
          (uint32_t) heap_stats_before_p->allocated_bytes,
          (uint32_t) heap_stats_after.allocated_bytes,
          (uint32_t) (heap_stats_after.allocated_bytes - heap_stats_before_p->allocated_bytes),
          (uint32_t) (heap_stats_after.peak_allocated_bytes - JERRY_MAX (heap_stats_before_p->allocated_bytes,
                                                                         heap_stats_after.allocated_bytes)),
          (uint32_t) heap_stats_after.global_peak_allocated_bytes);

  if (heap_stats_before_p->waste_bytes != heap_stats_after.waste_bytes)
  {
    printf ("%s Waste heap bytes:      %5u -> %5u (%+5d, local %5u, peak %5u)\n",
            indent_prefix,
            (uint32_t) heap_stats_before_p->waste_bytes,
            (uint32_t) heap_stats_after.waste_bytes,
            (uint32_t) (heap_stats_after.waste_bytes - heap_stats_before_p->waste_bytes),
            (uint32_t) (heap_stats_after.peak_waste_bytes - JERRY_MAX (heap_stats_before_p->waste_bytes,
                                                                       heap_stats_after.waste_bytes)),
            (uint32_t) heap_stats_after.global_peak_waste_bytes);
  }

  if (pools_stats_before_p->pools_count != pools_stats_after.pools_count)
  {
    printf ("%s Pools:                 %5u -> %5u (%+5d, local %5u, peak %5u)\n",
            indent_prefix,
            (uint32_t) pools_stats_before_p->pools_count,
            (uint32_t) pools_stats_after.pools_count,
            (uint32_t) (pools_stats_after.pools_count - pools_stats_before_p->pools_count),
            (uint32_t) (pools_stats_after.peak_pools_count - JERRY_MAX (pools_stats_before_p->pools_count,
                                                                        pools_stats_after.pools_count)),
            (uint32_t) pools_stats_after.global_peak_pools_count);
  }

  if (pools_stats_before_p->allocated_chunks != pools_stats_after.allocated_chunks)
  {
    printf ("%s Allocated pool chunks: %5u -> %5u (%+5d, local %5u, peak %5u)\n",
            indent_prefix,
            (uint32_t) pools_stats_before_p->allocated_chunks,
            (uint32_t) pools_stats_after.allocated_chunks,
            (uint32_t) (pools_stats_after.allocated_chunks - pools_stats_before_p->allocated_chunks),
            (uint32_t) (pools_stats_after.peak_allocated_chunks - JERRY_MAX (pools_stats_before_p->allocated_chunks,
                                                                             pools_stats_after.allocated_chunks)),
            (uint32_t) pools_stats_after.global_peak_allocated_chunks);
  }

  printf ("%s-- End of execution of opcode %s (position %u) --\n\n",
          indent_prefix, __op_names[instr.op_idx], instr_position);
}
Ejemplo n.º 13
0
static void
interp_mem_stats_context_exit (vm_frame_ctx_t *frame_ctx_p,
                               vm_instr_counter_t block_position)
{
  if (likely (!interp_mem_stats_enabled))
  {
    return;
  }

  const uint32_t indentation = JERRY_MIN (interp_mem_stats_print_indentation,
                                          INTERP_MEM_PRINT_INDENTATION_MAX);

  char indent_prefix[INTERP_MEM_PRINT_INDENTATION_MAX + 2];
  memset (indent_prefix, ' ', sizeof (indent_prefix));
  indent_prefix[indentation] = '|';
  indent_prefix[indentation + 1] = '\0';

  mem_heap_stats_t heap_stats_context_exit;
  mem_pools_stats_t pools_stats_context_exit;

  interp_mem_get_stats (&heap_stats_context_exit,
                        &pools_stats_context_exit,
                        false, true);

  frame_ctx_p->context_peak_allocated_heap_bytes -= JERRY_MAX (frame_ctx_p->heap_stats_context_enter.allocated_bytes,
                                                              heap_stats_context_exit.allocated_bytes);
  frame_ctx_p->context_peak_waste_heap_bytes -= JERRY_MAX (frame_ctx_p->heap_stats_context_enter.waste_bytes,
                                                          heap_stats_context_exit.waste_bytes);
  frame_ctx_p->context_peak_pools_count -= JERRY_MAX (frame_ctx_p->pools_stats_context_enter.pools_count,
                                                     pools_stats_context_exit.pools_count);
  frame_ctx_p->context_peak_allocated_pool_chunks -= JERRY_MAX (frame_ctx_p->pools_stats_context_enter.allocated_chunks,
                                                               pools_stats_context_exit.allocated_chunks);

  printf ("%sAllocated heap bytes in the context:  %5u -> %5u (%+5d, local %5u, peak %5u)\n",
          indent_prefix,
          (uint32_t) frame_ctx_p->heap_stats_context_enter.allocated_bytes,
          (uint32_t) heap_stats_context_exit.allocated_bytes,
          (uint32_t) (heap_stats_context_exit.allocated_bytes - frame_ctx_p->heap_stats_context_enter.allocated_bytes),
          (uint32_t) frame_ctx_p->context_peak_allocated_heap_bytes,
          (uint32_t) heap_stats_context_exit.global_peak_allocated_bytes);

  printf ("%sWaste heap bytes in the context:      %5u -> %5u (%+5d, local %5u, peak %5u)\n",
          indent_prefix,
          (uint32_t) frame_ctx_p->heap_stats_context_enter.waste_bytes,
          (uint32_t) heap_stats_context_exit.waste_bytes,
          (uint32_t) (heap_stats_context_exit.waste_bytes - frame_ctx_p->heap_stats_context_enter.waste_bytes),
          (uint32_t) frame_ctx_p->context_peak_waste_heap_bytes,
          (uint32_t) heap_stats_context_exit.global_peak_waste_bytes);

  printf ("%sPools count in the context:           %5u -> %5u (%+5d, local %5u, peak %5u)\n",
          indent_prefix,
          (uint32_t) frame_ctx_p->pools_stats_context_enter.pools_count,
          (uint32_t) pools_stats_context_exit.pools_count,
          (uint32_t) (pools_stats_context_exit.pools_count - frame_ctx_p->pools_stats_context_enter.pools_count),
          (uint32_t) frame_ctx_p->context_peak_pools_count,
          (uint32_t) pools_stats_context_exit.global_peak_pools_count);

  printf ("%sAllocated pool chunks in the context: %5u -> %5u (%+5d, local %5u, peak %5u)\n",
          indent_prefix,
          (uint32_t) frame_ctx_p->pools_stats_context_enter.allocated_chunks,
          (uint32_t) pools_stats_context_exit.allocated_chunks,
          (uint32_t) (pools_stats_context_exit.allocated_chunks -
                      frame_ctx_p->pools_stats_context_enter.allocated_chunks),
          (uint32_t) frame_ctx_p->context_peak_allocated_pool_chunks,
          (uint32_t) pools_stats_context_exit.global_peak_allocated_chunks);

  printf ("\n%s--- End of interpretation of a block at position %u ---\n\n",
          indent_prefix, (uint32_t) block_position);
}
Ejemplo n.º 14
0
/*
 * IRQ handler
 */
static void cm109_urb_irq_callback(struct urb *urb)
{
    struct cm109_dev *dev = urb->context;
    const int status = urb->status;
    int error;

    dev_dbg(&urb->dev->dev, "### URB IRQ: [0x%02x 0x%02x 0x%02x 0x%02x] keybit=0x%02x\n",
            dev->irq_data->byte[0],
            dev->irq_data->byte[1],
            dev->irq_data->byte[2],
            dev->irq_data->byte[3],
            dev->keybit);

    if (status) {
        if (status == -ESHUTDOWN)
            return;
        err("%s: urb status %d", __func__, status);
    }

    /* Special keys */
    if (dev->irq_data->byte[HID_IR0] & 0x0f) {
        const int code = (dev->irq_data->byte[HID_IR0] & 0x0f);
        report_key(dev, dev->keymap[0xff + code]);
    }

    /* Scan key column */
    if (dev->keybit == 0xf) {

        /* Any changes ? */
        if ((dev->gpi & 0xf0) == (dev->irq_data->byte[HID_IR1] & 0xf0))
            goto out;

        dev->gpi = dev->irq_data->byte[HID_IR1] & 0xf0;
        dev->keybit = 0x1;
    } else {
        report_key(dev, dev->keymap[dev->irq_data->byte[HID_IR1]]);

        dev->keybit <<= 1;
        if (dev->keybit > 0x8)
            dev->keybit = 0xf;
    }

out:

    spin_lock(&dev->ctl_submit_lock);

    dev->irq_urb_pending = 0;

    if (likely(!dev->shutdown)) {

        if (dev->buzzer_state)
            dev->ctl_data->byte[HID_OR0] |= BUZZER_ON;
        else
            dev->ctl_data->byte[HID_OR0] &= ~BUZZER_ON;

        dev->ctl_data->byte[HID_OR1] = dev->keybit;
        dev->ctl_data->byte[HID_OR2] = dev->keybit;

        dev->buzzer_pending = 0;
        dev->ctl_urb_pending = 1;

        error = usb_submit_urb(dev->urb_ctl, GFP_ATOMIC);
        if (error)
            err("%s: usb_submit_urb (urb_ctl) failed %d",
                __func__, error);
    }

    spin_unlock(&dev->ctl_submit_lock);
}
Ejemplo n.º 15
0
static int mx51_ecspi_config(struct spi_device *spi,
			     struct spi_imx_config *config)
{
	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
	u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
	u32 clk = config->speed_hz, delay, reg;
	u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);

	/*
	 * The hardware seems to have a race condition when changing modes. The
	 * current assumption is that the selection of the channel arrives
	 * earlier in the hardware than the mode bits when they are written at
	 * the same time.
	 * So set master mode for all channels as we do not support slave mode.
	 */
	ctrl |= MX51_ECSPI_CTRL_MODE_MASK;

	/* set clock speed */
	ctrl |= mx51_ecspi_clkdiv(spi_imx, config->speed_hz, &clk);
	spi_imx->spi_bus_clk = clk;

	/* set chip select to use */
	ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select);

	ctrl |= (config->bpw - 1) << MX51_ECSPI_CTRL_BL_OFFSET;

	cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);

	if (spi->mode & SPI_CPHA)
		cfg |= MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
	else
		cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);

	if (spi->mode & SPI_CPOL) {
		cfg |= MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
		cfg |= MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
	} else {
		cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
		cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
	}
	if (spi->mode & SPI_CS_HIGH)
		cfg |= MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
	else
		cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);

	if (spi_imx->usedma)
		ctrl |= MX51_ECSPI_CTRL_SMC;

	/* CTRL register always go first to bring out controller from reset */
	writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);

	reg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
	if (spi->mode & SPI_LOOP)
		reg |= MX51_ECSPI_TESTREG_LBC;
	else
		reg &= ~MX51_ECSPI_TESTREG_LBC;
	writel(reg, spi_imx->base + MX51_ECSPI_TESTREG);

	writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);

	/*
	 * Wait until the changes in the configuration register CONFIGREG
	 * propagate into the hardware. It takes exactly one tick of the
	 * SCLK clock, but we will wait two SCLK clock just to be sure. The
	 * effect of the delay it takes for the hardware to apply changes
	 * is noticable if the SCLK clock run very slow. In such a case, if
	 * the polarity of SCLK should be inverted, the GPIO ChipSelect might
	 * be asserted before the SCLK polarity changes, which would disrupt
	 * the SPI communication as the device on the other end would consider
	 * the change of SCLK polarity as a clock tick already.
	 */
	delay = (2 * 1000000) / clk;
	if (likely(delay < 10))	/* SCLK is faster than 100 kHz */
		udelay(delay);
	else			/* SCLK is _very_ slow */
		usleep_range(delay, delay + 10);

	/*
	 * Configure the DMA register: setup the watermark
	 * and enable DMA request.
	 */

	writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml) |
		MX51_ECSPI_DMA_TX_WML(spi_imx->wml) |
		MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
		MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN |
		MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA);

	return 0;
}
Ejemplo n.º 16
0
static int recv_stream(struct kiocb *iocb, struct socket *sock,
		       struct msghdr *m, size_t buf_len, int flags)
{
	struct sock *sk = sock->sk;
	struct tipc_port *tport = tipc_sk_port(sk);
	struct sk_buff *buf;
	struct tipc_msg *msg;
	long timeout;
	unsigned int sz;
	int sz_to_copy, target, needed;
	int sz_copied = 0;
	u32 err;
	int res = 0;

	/* Catch invalid receive attempts */

	if (unlikely(!buf_len))
		return -EINVAL;

	lock_sock(sk);

	if (unlikely((sock->state == SS_UNCONNECTED) ||
		     (sock->state == SS_CONNECTING))) {
		res = -ENOTCONN;
		goto exit;
	}

	target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
restart:

	/* Look for a message in receive queue; wait if necessary */

	while (skb_queue_empty(&sk->sk_receive_queue)) {
		if (sock->state == SS_DISCONNECTING) {
			res = -ENOTCONN;
			goto exit;
		}
		if (timeout <= 0L) {
			res = timeout ? timeout : -EWOULDBLOCK;
			goto exit;
		}
		release_sock(sk);
		timeout = wait_event_interruptible_timeout(*sk_sleep(sk),
							   tipc_rx_ready(sock),
							   timeout);
		lock_sock(sk);
	}

	/* Look at first message in receive queue */

	buf = skb_peek(&sk->sk_receive_queue);
	msg = buf_msg(buf);
	sz = msg_data_sz(msg);
	err = msg_errcode(msg);

	/* Discard an empty non-errored message & try again */

	if ((!sz) && (!err)) {
		advance_rx_queue(sk);
		goto restart;
	}

	/* Optionally capture sender's address & ancillary data of first msg */

	if (sz_copied == 0) {
		set_orig_addr(m, msg);
		res = anc_data_recv(m, msg, tport);
		if (res)
			goto exit;
	}

	/* Capture message data (if valid) & compute return value (always) */

	if (!err) {
		u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);

		sz -= offset;
		needed = (buf_len - sz_copied);
		sz_to_copy = (sz <= needed) ? sz : needed;

		res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg) + offset,
					      m->msg_iov, sz_to_copy);
		if (res)
			goto exit;

		sz_copied += sz_to_copy;

		if (sz_to_copy < sz) {
			if (!(flags & MSG_PEEK))
				TIPC_SKB_CB(buf)->handle =
				(void *)(unsigned long)(offset + sz_to_copy);
			goto exit;
		}
	} else {
		if (sz_copied != 0)
			goto exit; /* can't add error msg to valid data */

		if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
			res = 0;
		else
			res = -ECONNRESET;
	}

	/* Consume received message (optional) */

	if (likely(!(flags & MSG_PEEK))) {
		if (unlikely(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
			tipc_acknowledge(tport->ref, tport->conn_unacked);
		advance_rx_queue(sk);
	}

	/* Loop around if more data is required */

	if ((sz_copied < buf_len) &&	/* didn't get all requested data */
	    (!skb_queue_empty(&sk->sk_receive_queue) ||
	    (sz_copied < target)) &&	/* and more is ready or required */
	    (!(flags & MSG_PEEK)) &&	/* and aren't just peeking at data */
	    (!err))			/* and haven't reached a FIN */
		goto restart;

exit:
	release_sock(sk);
	return sz_copied ? sz_copied : res;
}
Ejemplo n.º 17
0
/*
 * our custom d_alloc_root work-alike
 *
 * we can't use d_alloc_root if we want to use our own interpose function
 * unchanged, so we simply call our own "fake" d_alloc_root
 */
static struct dentry *unionfs_d_alloc_root(struct super_block *sb)
{
	struct dentry *ret = NULL;

	if (sb) {
		static const struct qstr name = {
			.name = "/",
			.len = 1
		};

		ret = d_alloc(NULL, &name);
		if (likely(ret)) {
			ret->d_op = &unionfs_dops;
			ret->d_sb = sb;
			ret->d_parent = ret;
		}
	}
	return ret;
}

/*
 * There is no need to lock the unionfs_super_info's rwsem as there is no
 * way anyone can have a reference to the superblock at this point in time.
 */
static int unionfs_read_super(struct super_block *sb, void *raw_data,
			      int silent)
{
	int err = 0;
	struct unionfs_dentry_info *lower_root_info = NULL;
	int bindex, bstart, bend;

	if (!raw_data) {
		printk(KERN_ERR
		       "unionfs: read_super: missing data argument\n");
		err = -EINVAL;
		goto out;
	}

	/* Allocate superblock private data */
	sb->s_fs_info = kzalloc(sizeof(struct unionfs_sb_info), GFP_KERNEL);
	if (unlikely(!UNIONFS_SB(sb))) {
		printk(KERN_CRIT "unionfs: read_super: out of memory\n");
		err = -ENOMEM;
		goto out;
	}

	UNIONFS_SB(sb)->bend = -1;
	atomic_set(&UNIONFS_SB(sb)->generation, 1);
	init_rwsem(&UNIONFS_SB(sb)->rwsem);
	UNIONFS_SB(sb)->high_branch_id = -1; /* -1 == invalid branch ID */

	lower_root_info = unionfs_parse_options(sb, raw_data);
	if (IS_ERR(lower_root_info)) {
		printk(KERN_ERR
		       "unionfs: read_super: error while parsing options "
		       "(err = %ld)\n", PTR_ERR(lower_root_info));
		err = PTR_ERR(lower_root_info);
		lower_root_info = NULL;
		goto out_free;
	}
	if (lower_root_info->bstart == -1) {
		err = -ENOENT;
		goto out_free;
	}

	/* set the lower superblock field of upper superblock */
	bstart = lower_root_info->bstart;
	BUG_ON(bstart != 0);
	sbend(sb) = bend = lower_root_info->bend;
	for (bindex = bstart; bindex <= bend; bindex++) {
		struct dentry *d = lower_root_info->lower_paths[bindex].dentry;
		atomic_inc(&d->d_sb->s_active);
		unionfs_set_lower_super_idx(sb, bindex, d->d_sb);
	}

	/* max Bytes is the maximum bytes from highest priority branch */
	sb->s_maxbytes = unionfs_lower_super_idx(sb, 0)->s_maxbytes;

	/*
	 * Our c/m/atime granularity is 1 ns because we may stack on file
	 * systems whose granularity is as good.  This is important for our
	 * time-based cache coherency.
	 */
	sb->s_time_gran = 1;

	sb->s_op = &unionfs_sops;

	/* See comment next to the definition of unionfs_d_alloc_root */
	sb->s_root = unionfs_d_alloc_root(sb);
	if (unlikely(!sb->s_root)) {
		err = -ENOMEM;
		goto out_dput;
	}

	/* link the upper and lower dentries */
	sb->s_root->d_fsdata = NULL;
	err = new_dentry_private_data(sb->s_root, UNIONFS_DMUTEX_ROOT);
	if (unlikely(err))
		goto out_freedpd;

	/* Set the lower dentries for s_root */
	for (bindex = bstart; bindex <= bend; bindex++) {
		struct dentry *d;
		struct vfsmount *m;

		d = lower_root_info->lower_paths[bindex].dentry;
		m = lower_root_info->lower_paths[bindex].mnt;

		unionfs_set_lower_dentry_idx(sb->s_root, bindex, d);
		unionfs_set_lower_mnt_idx(sb->s_root, bindex, m);
	}
	dbstart(sb->s_root) = bstart;
	dbend(sb->s_root) = bend;

	/* Set the generation number to one, since this is for the mount. */
	atomic_set(&UNIONFS_D(sb->s_root)->generation, 1);

	/*
	 * Call interpose to create the upper level inode.  Only
	 * INTERPOSE_LOOKUP can return a value other than 0 on err.
	 */
	err = PTR_ERR(unionfs_interpose(sb->s_root, sb, 0));
	unionfs_unlock_dentry(sb->s_root);
	if (!err)
		goto out;
	/* else fall through */

out_freedpd:
	if (UNIONFS_D(sb->s_root)) {
		kfree(UNIONFS_D(sb->s_root)->lower_paths);
		free_dentry_private_data(sb->s_root);
	}
	dput(sb->s_root);

out_dput:
	if (lower_root_info && !IS_ERR(lower_root_info)) {
		for (bindex = lower_root_info->bstart;
		     bindex <= lower_root_info->bend; bindex++) {
			struct dentry *d;
			d = lower_root_info->lower_paths[bindex].dentry;
			/* drop refs we took earlier */
			atomic_dec(&d->d_sb->s_active);
			path_put(&lower_root_info->lower_paths[bindex]);
		}
		kfree(lower_root_info->lower_paths);
		kfree(lower_root_info);
		lower_root_info = NULL;
	}

out_free:
	kfree(UNIONFS_SB(sb)->data);
	kfree(UNIONFS_SB(sb));
	sb->s_fs_info = NULL;

out:
	if (lower_root_info && !IS_ERR(lower_root_info)) {
		kfree(lower_root_info->lower_paths);
		kfree(lower_root_info);
	}
	return err;
}
Ejemplo n.º 18
0
static int send_msg(struct kiocb *iocb, struct socket *sock,
		    struct msghdr *m, size_t total_len)
{
	struct sock *sk = sock->sk;
	struct tipc_port *tport = tipc_sk_port(sk);
	struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
	int needs_conn;
	long timeout_val;
	int res = -EINVAL;

	if (unlikely(!dest))
		return -EDESTADDRREQ;
	if (unlikely((m->msg_namelen < sizeof(*dest)) ||
		     (dest->family != AF_TIPC)))
		return -EINVAL;
	if ((total_len > TIPC_MAX_USER_MSG_SIZE) ||
	    (m->msg_iovlen > (unsigned)INT_MAX))
		return -EMSGSIZE;

	if (iocb)
		lock_sock(sk);

	needs_conn = (sock->state != SS_READY);
	if (unlikely(needs_conn)) {
		if (sock->state == SS_LISTENING) {
			res = -EPIPE;
			goto exit;
		}
		if (sock->state != SS_UNCONNECTED) {
			res = -EISCONN;
			goto exit;
		}
		if ((tport->published) ||
		    ((sock->type == SOCK_STREAM) && (total_len != 0))) {
			res = -EOPNOTSUPP;
			goto exit;
		}
		if (dest->addrtype == TIPC_ADDR_NAME) {
			tport->conn_type = dest->addr.name.name.type;
			tport->conn_instance = dest->addr.name.name.instance;
		}

		/* Abort any pending connection attempts (very unlikely) */

		reject_rx_queue(sk);
	}

	timeout_val = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);

	do {
		if (dest->addrtype == TIPC_ADDR_NAME) {
			res = dest_name_check(dest, m);
			if (res)
				break;
			res = tipc_send2name(tport->ref,
					     &dest->addr.name.name,
					     dest->addr.name.domain,
					     m->msg_iovlen,
					     m->msg_iov,
					     total_len);
		} else if (dest->addrtype == TIPC_ADDR_ID) {
			res = tipc_send2port(tport->ref,
					     &dest->addr.id,
					     m->msg_iovlen,
					     m->msg_iov,
					     total_len);
		} else if (dest->addrtype == TIPC_ADDR_MCAST) {
			if (needs_conn) {
				res = -EOPNOTSUPP;
				break;
			}
			res = dest_name_check(dest, m);
			if (res)
				break;
			res = tipc_multicast(tport->ref,
					     &dest->addr.nameseq,
					     m->msg_iovlen,
					     m->msg_iov,
					     total_len);
		}
		if (likely(res != -ELINKCONG)) {
			if (needs_conn && (res >= 0))
				sock->state = SS_CONNECTING;
			break;
		}
		if (timeout_val <= 0L) {
			res = timeout_val ? timeout_val : -EWOULDBLOCK;
			break;
		}
		release_sock(sk);
		timeout_val = wait_event_interruptible_timeout(*sk_sleep(sk),
					       !tport->congested, timeout_val);
		lock_sock(sk);
	} while (1);

exit:
	if (iocb)
		release_sock(sk);
	return res;
}
Ejemplo n.º 19
0
/**
 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
 *
 * tipc_net_lock is read_locked, no other locks set
 */
void tipc_bclink_recv_pkt(struct sk_buff *buf)
{
	struct tipc_msg *msg = buf_msg(buf);
	struct tipc_node *node;
	u32 next_in;
	u32 seqno;
	int deferred;

	/* Screen out unwanted broadcast messages */

	if (msg_mc_netid(msg) != tipc_net_id)
		goto exit;

	node = tipc_node_find(msg_prevnode(msg));
	if (unlikely(!node))
		goto exit;

	tipc_node_lock(node);
	if (unlikely(!node->bclink.recv_permitted))
		goto unlock;

	/* Handle broadcast protocol message */

	if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
		if (msg_type(msg) != STATE_MSG)
			goto unlock;
		if (msg_destnode(msg) == tipc_own_addr) {
			tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
			tipc_node_unlock(node);
			spin_lock_bh(&bc_lock);
			bcl->stats.recv_nacks++;
			bclink->retransmit_to = node;
			bclink_retransmit_pkt(msg_bcgap_after(msg),
					      msg_bcgap_to(msg));
			spin_unlock_bh(&bc_lock);
		} else {
			tipc_node_unlock(node);
			bclink_peek_nack(msg);
		}
		goto exit;
	}

	/* Handle in-sequence broadcast message */

	seqno = msg_seqno(msg);
	next_in = mod(node->bclink.last_in + 1);

	if (likely(seqno == next_in)) {
receive:
		/* Deliver message to destination */

		if (likely(msg_isdata(msg))) {
			spin_lock_bh(&bc_lock);
			bclink_accept_pkt(node, seqno);
			spin_unlock_bh(&bc_lock);
			tipc_node_unlock(node);
			if (likely(msg_mcast(msg)))
				tipc_port_recv_mcast(buf, NULL);
			else
				kfree_skb(buf);
		} else if (msg_user(msg) == MSG_BUNDLER) {
			spin_lock_bh(&bc_lock);
			bclink_accept_pkt(node, seqno);
			bcl->stats.recv_bundles++;
			bcl->stats.recv_bundled += msg_msgcnt(msg);
			spin_unlock_bh(&bc_lock);
			tipc_node_unlock(node);
			tipc_link_recv_bundle(buf);
		} else if (msg_user(msg) == MSG_FRAGMENTER) {
			int ret;
			ret = tipc_link_recv_fragment(&node->bclink.reasm_head,
						      &node->bclink.reasm_tail,
						      &buf);
			if (ret == LINK_REASM_ERROR)
				goto unlock;
			spin_lock_bh(&bc_lock);
			bclink_accept_pkt(node, seqno);
			bcl->stats.recv_fragments++;
			if (ret == LINK_REASM_COMPLETE) {
				bcl->stats.recv_fragmented++;
				/* Point msg to inner header */
				msg = buf_msg(buf);
				spin_unlock_bh(&bc_lock);
				goto receive;
			}
			spin_unlock_bh(&bc_lock);
			tipc_node_unlock(node);
		} else if (msg_user(msg) == NAME_DISTRIBUTOR) {
			spin_lock_bh(&bc_lock);
			bclink_accept_pkt(node, seqno);
			spin_unlock_bh(&bc_lock);
			tipc_node_unlock(node);
			tipc_named_recv(buf);
		} else {
			spin_lock_bh(&bc_lock);
			bclink_accept_pkt(node, seqno);
			spin_unlock_bh(&bc_lock);
			tipc_node_unlock(node);
			kfree_skb(buf);
		}
		buf = NULL;

		/* Determine new synchronization state */

		tipc_node_lock(node);
		if (unlikely(!tipc_node_is_up(node)))
			goto unlock;

		if (node->bclink.last_in == node->bclink.last_sent)
			goto unlock;

		if (!node->bclink.deferred_head) {
			node->bclink.oos_state = 1;
			goto unlock;
		}

		msg = buf_msg(node->bclink.deferred_head);
		seqno = msg_seqno(msg);
		next_in = mod(next_in + 1);
		if (seqno != next_in)
			goto unlock;

		/* Take in-sequence message from deferred queue & deliver it */

		buf = node->bclink.deferred_head;
		node->bclink.deferred_head = buf->next;
		buf->next = NULL;
		node->bclink.deferred_size--;
		goto receive;
	}

	/* Handle out-of-sequence broadcast message */

	if (less(next_in, seqno)) {
		deferred = tipc_link_defer_pkt(&node->bclink.deferred_head,
					       &node->bclink.deferred_tail,
					       buf);
		node->bclink.deferred_size += deferred;
		bclink_update_last_sent(node, seqno);
		buf = NULL;
	} else
		deferred = 0;

	spin_lock_bh(&bc_lock);

	if (deferred)
		bcl->stats.deferred_recv++;
	else
		bcl->stats.duplicates++;

	spin_unlock_bh(&bc_lock);

unlock:
	tipc_node_unlock(node);
exit:
	kfree_skb(buf);
}
Ejemplo n.º 20
0
static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
				struct tipc_port *tport)
{
	u32 anc_data[3];
	u32 err;
	u32 dest_type;
	int has_name;
	int res;

	if (likely(m->msg_controllen == 0))
		return 0;

	/* Optionally capture errored message object(s) */

	err = msg ? msg_errcode(msg) : 0;
	if (unlikely(err)) {
		anc_data[0] = err;
		anc_data[1] = msg_data_sz(msg);
		res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
		if (res)
			return res;
		if (anc_data[1]) {
			res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
				       msg_data(msg));
			if (res)
				return res;
		}
	}

	/* Optionally capture message destination object */

	dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
	switch (dest_type) {
	case TIPC_NAMED_MSG:
		has_name = 1;
		anc_data[0] = msg_nametype(msg);
		anc_data[1] = msg_namelower(msg);
		anc_data[2] = msg_namelower(msg);
		break;
	case TIPC_MCAST_MSG:
		has_name = 1;
		anc_data[0] = msg_nametype(msg);
		anc_data[1] = msg_namelower(msg);
		anc_data[2] = msg_nameupper(msg);
		break;
	case TIPC_CONN_MSG:
		has_name = (tport->conn_type != 0);
		anc_data[0] = tport->conn_type;
		anc_data[1] = tport->conn_instance;
		anc_data[2] = tport->conn_instance;
		break;
	default:
		has_name = 0;
	}
	if (has_name) {
		res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
		if (res)
			return res;
	}

	return 0;
}
Ejemplo n.º 21
0
int ip6_mc_input(struct sk_buff *skb)
{
	struct ipv6hdr *hdr;
	int deliver;

	IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INMCASTPKTS);

	hdr = ipv6_hdr(skb);
	deliver = ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL);

#ifdef CONFIG_IPV6_MROUTE
	/*
	 *      IPv6 multicast router mode is now supported ;)
	 */
	if (ipv6_devconf.mc_forwarding &&
	    likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
		/*
		 * Okay, we try to forward - split and duplicate
		 * packets.
		 */
		struct sk_buff *skb2;
		struct inet6_skb_parm *opt = IP6CB(skb);

		/* Check for MLD */
		if (unlikely(opt->ra)) {
			/* Check if this is a mld message */
			u8 *ptr = skb_network_header(skb) + opt->ra;
			struct icmp6hdr *icmp6;
			u8 nexthdr = hdr->nexthdr;
			int offset;

			/* Check if the value of Router Alert
			 * is for MLD (0x0000).
			 */
			if ((ptr[2] | ptr[3]) == 0) {
				if (!ipv6_ext_hdr(nexthdr)) {
					/* BUG */
					goto discard;
				}
				offset = ipv6_skip_exthdr(skb, sizeof(*hdr),
							  &nexthdr);
				if (offset < 0)
					goto discard;

				if (nexthdr != IPPROTO_ICMPV6)
					goto discard;

				if (!pskb_may_pull(skb, (skb_network_header(skb) +
						   offset + 1 - skb->data)))
					goto discard;

				icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);

				switch (icmp6->icmp6_type) {
				case ICMPV6_MGM_QUERY:
				case ICMPV6_MGM_REPORT:
				case ICMPV6_MGM_REDUCTION:
				case ICMPV6_MLD2_REPORT:
					break;
				default:
					/* Bogus */
					goto discard;
				}
				deliver = 1;
				goto out;
			}
			/* unknown RA - process it normally */
		}

		if (deliver)
			skb2 = skb_clone(skb, GFP_ATOMIC);
		else {
			skb2 = skb;
			skb = NULL;
		}

		if (skb2) {
			skb2->dev = skb2->dst->dev;
			ip6_mr_input(skb2);
		}
	}
out:
#endif
	if (likely(deliver)) {
		ip6_input(skb);
		return 0;
	}
discard:
	/* discard */
	kfree_skb(skb);

	return 0;
}
Ejemplo n.º 22
0
static int recv_msg(struct kiocb *iocb, struct socket *sock,
		    struct msghdr *m, size_t buf_len, int flags)
{
	struct sock *sk = sock->sk;
	struct tipc_port *tport = tipc_sk_port(sk);
	struct sk_buff *buf;
	struct tipc_msg *msg;
	long timeout;
	unsigned int sz;
	u32 err;
	int res;

	/* Catch invalid receive requests */

	if (unlikely(!buf_len))
		return -EINVAL;

	lock_sock(sk);

	if (unlikely(sock->state == SS_UNCONNECTED)) {
		res = -ENOTCONN;
		goto exit;
	}

	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
restart:

	/* Look for a message in receive queue; wait if necessary */

	while (skb_queue_empty(&sk->sk_receive_queue)) {
		if (sock->state == SS_DISCONNECTING) {
			res = -ENOTCONN;
			goto exit;
		}
		if (timeout <= 0L) {
			res = timeout ? timeout : -EWOULDBLOCK;
			goto exit;
		}
		release_sock(sk);
		timeout = wait_event_interruptible_timeout(*sk_sleep(sk),
							   tipc_rx_ready(sock),
							   timeout);
		lock_sock(sk);
	}

	/* Look at first message in receive queue */

	buf = skb_peek(&sk->sk_receive_queue);
	msg = buf_msg(buf);
	sz = msg_data_sz(msg);
	err = msg_errcode(msg);

	/* Complete connection setup for an implied connect */

	if (unlikely(sock->state == SS_CONNECTING)) {
		res = auto_connect(sock, msg);
		if (res)
			goto exit;
	}

	/* Discard an empty non-errored message & try again */

	if ((!sz) && (!err)) {
		advance_rx_queue(sk);
		goto restart;
	}

	/* Capture sender's address (optional) */

	set_orig_addr(m, msg);

	/* Capture ancillary data (optional) */

	res = anc_data_recv(m, msg, tport);
	if (res)
		goto exit;

	/* Capture message data (if valid) & compute return value (always) */

	if (!err) {
		if (unlikely(buf_len < sz)) {
			sz = buf_len;
			m->msg_flags |= MSG_TRUNC;
		}
		res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg),
					      m->msg_iov, sz);
		if (res)
			goto exit;
		res = sz;
	} else {
		if ((sock->state == SS_READY) ||
		    ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
			res = 0;
		else
			res = -ECONNRESET;
	}

	/* Consume received message (optional) */

	if (likely(!(flags & MSG_PEEK))) {
		if ((sock->state != SS_READY) &&
		    (++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
			tipc_acknowledge(tport->ref, tport->conn_unacked);
		advance_rx_queue(sk);
	}
exit:
	release_sock(sk);
	return res;
}
Ejemplo n.º 23
0
int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
{
	struct enic_fdir_node *key;
	struct filter fltr = {0};
	int32_t pos;
	u8 do_free = 0;
	u16 old_fltr_id = 0;
	u32 flowtype_supported;
	u16 flex_bytes;
	u16 queue;

	flowtype_supported = (
		(RTE_ETH_FLOW_NONFRAG_IPV4_TCP == params->input.flow_type) ||
		(RTE_ETH_FLOW_NONFRAG_IPV4_UDP == params->input.flow_type));

	flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
		(params->input.flow_ext.flexbytes[0] & 0xFF));

	if (!enic->fdir.hash ||
		(params->input.flow_ext.vlan_tci & 0xFFF) ||
		!flowtype_supported || flex_bytes ||
		params->action.behavior /* drop */) {
		enic->fdir.stats.f_add++;
		return -ENOTSUP;
	}

	queue = params->action.rx_queue;
	/* See if the key is already there in the table */
	pos = rte_hash_del_key(enic->fdir.hash, params);
	switch (pos) {
	case -EINVAL:
		enic->fdir.stats.f_add++;
		return -EINVAL;
	case -ENOENT:
		/* Add a new classifier entry */
		if (!enic->fdir.stats.free) {
			enic->fdir.stats.f_add++;
			return -ENOSPC;
		}
		key = rte_zmalloc("enic_fdir_node",
				  sizeof(struct enic_fdir_node), 0);
		if (!key) {
			enic->fdir.stats.f_add++;
			return -ENOMEM;
		}
		break;
	default:
		/* The entry is already present in the table.
		 * Check if there is a change in queue
		 */
		key = enic->fdir.nodes[pos];
		enic->fdir.nodes[pos] = NULL;
		if (unlikely(key->rq_index == queue)) {
			/* Nothing to be done */
			pos = rte_hash_add_key(enic->fdir.hash, params);
			enic->fdir.nodes[pos] = key;
			enic->fdir.stats.f_add++;
			dev_warning(enic,
				"FDIR rule is already present\n");
			return 0;
		}

		if (likely(enic->fdir.stats.free)) {
			/* Add the filter and then delete the old one.
			 * This is to avoid packets from going into the
			 * default queue during the window between
			 * delete and add
			 */
			do_free = 1;
			old_fltr_id = key->fltr_id;
		} else {
			/* No free slots in the classifier.
			 * Delete the filter and add the modified one later
			 */
			vnic_dev_classifier(enic->vdev, CLSF_DEL,
				&key->fltr_id, NULL);
			enic->fdir.stats.free++;
		}

		break;
	}

	key->filter = *params;
	key->rq_index = queue;

	fltr.type = FILTER_IPV4_5TUPLE;
	fltr.u.ipv4.src_addr = rte_be_to_cpu_32(
		params->input.flow.ip4_flow.src_ip);
	fltr.u.ipv4.dst_addr = rte_be_to_cpu_32(
		params->input.flow.ip4_flow.dst_ip);
	fltr.u.ipv4.src_port = rte_be_to_cpu_16(
		params->input.flow.udp4_flow.src_port);
	fltr.u.ipv4.dst_port = rte_be_to_cpu_16(
		params->input.flow.udp4_flow.dst_port);

	if (RTE_ETH_FLOW_NONFRAG_IPV4_TCP == params->input.flow_type)
		fltr.u.ipv4.protocol = PROTO_TCP;
	else
		fltr.u.ipv4.protocol = PROTO_UDP;

	fltr.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;

	if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr)) {
		key->fltr_id = queue;
	} else {
		dev_err(enic, "Add classifier entry failed\n");
		enic->fdir.stats.f_add++;
		rte_free(key);
		return -1;
	}

	if (do_free)
		vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL);
	else{
		enic->fdir.stats.free--;
		enic->fdir.stats.add++;
	}

	pos = rte_hash_add_key(enic->fdir.hash, params);
	enic->fdir.nodes[pos] = key;
	return 0;
}
Ejemplo n.º 24
0
static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
{
    struct net_device *netdev = p->netdev;
    union cvmx_mixx_ircnt mix_ircnt;
    union mgmt_port_ring_entry re;
    struct sk_buff *skb;
    struct sk_buff *skb2;
    struct sk_buff *skb_new;
    union mgmt_port_ring_entry re2;
    int rc = 1;


    re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
    if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
        /* A good packet, send it up. */
        skb_put(skb, re.s.len);
good:
        /* Process the RX timestamp if it was recorded */
        if (p->has_rx_tstamp) {
            /* The first 8 bytes are the timestamp */
            u64 ns = *(u64 *)skb->data;
            struct skb_shared_hwtstamps *ts;
            ts = skb_hwtstamps(skb);
            ts->hwtstamp = ns_to_ktime(ns);
            __skb_pull(skb, 8);
        }
        skb->protocol = eth_type_trans(skb, netdev);
        netdev->stats.rx_packets++;
        netdev->stats.rx_bytes += skb->len;
        netif_receive_skb(skb);
        rc = 0;
    } else if (re.s.code == RING_ENTRY_CODE_MORE) {
        /* Packet split across skbs.  This can happen if we
         * increase the MTU.  Buffers that are already in the
         * rx ring can then end up being too small.  As the rx
         * ring is refilled, buffers sized for the new MTU
         * will be used and we should go back to the normal
         * non-split case.
         */
        skb_put(skb, re.s.len);
        do {
            re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
            if (re2.s.code != RING_ENTRY_CODE_MORE
                    && re2.s.code != RING_ENTRY_CODE_DONE)
                goto split_error;
            skb_put(skb2,  re2.s.len);
            skb_new = skb_copy_expand(skb, 0, skb2->len,
                                      GFP_ATOMIC);
            if (!skb_new)
                goto split_error;
            if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
                              skb2->len))
                goto split_error;
            skb_put(skb_new, skb2->len);
            dev_kfree_skb_any(skb);
            dev_kfree_skb_any(skb2);
            skb = skb_new;
        } while (re2.s.code == RING_ENTRY_CODE_MORE);
        goto good;
    } else {
        /* Some other error, discard it. */
        dev_kfree_skb_any(skb);
        /* Error statistics are accumulated in
         * octeon_mgmt_update_rx_stats.
         */
    }
    goto done;
split_error:
    /* Discard the whole mess. */
    dev_kfree_skb_any(skb);
    dev_kfree_skb_any(skb2);
    while (re2.s.code == RING_ENTRY_CODE_MORE) {
        re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
        dev_kfree_skb_any(skb2);
    }
    netdev->stats.rx_errors++;

done:
    /* Tell the hardware we processed a packet.  */
    mix_ircnt.u64 = 0;
    mix_ircnt.s.ircnt = 1;
    cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64);
    return rc;
}
Ejemplo n.º 25
0
static int __Pyx_PyGen_FetchStopIterationValue(PyObject **pvalue) {
    PyObject *et, *ev, *tb;
    PyObject *value = NULL;

    __Pyx_ErrFetch(&et, &ev, &tb);

    if (!et) {
        Py_XDECREF(tb);
        Py_XDECREF(ev);
        Py_INCREF(Py_None);
        *pvalue = Py_None;
        return 0;
    }

    if (unlikely(et != PyExc_StopIteration) &&
            unlikely(!PyErr_GivenExceptionMatches(et, PyExc_StopIteration))) {
        __Pyx_ErrRestore(et, ev, tb);
        return -1;
    }

    // most common case: plain StopIteration without or with separate argument
    if (likely(et == PyExc_StopIteration)) {
        if (likely(!ev) || !PyObject_IsInstance(ev, PyExc_StopIteration)) {
            // PyErr_SetObject() and friends put the value directly into ev
            if (!ev) {
                Py_INCREF(Py_None);
                ev = Py_None;
            }
            Py_XDECREF(tb);
            Py_DECREF(et);
            *pvalue = ev;
            return 0;
        }
    }
    // otherwise: normalise and check what that gives us
    PyErr_NormalizeException(&et, &ev, &tb);
    if (unlikely(!PyObject_IsInstance(ev, PyExc_StopIteration))) {
        // looks like normalisation failed - raise the new exception
        __Pyx_ErrRestore(et, ev, tb);
        return -1;
    }
    Py_XDECREF(tb);
    Py_DECREF(et);
#if PY_VERSION_HEX >= 0x030300A0
    value = ((PyStopIterationObject *)ev)->value;
    Py_INCREF(value);
    Py_DECREF(ev);
#else
    {
        PyObject* args = PyObject_GetAttr(ev, PYIDENT("args"));
        Py_DECREF(ev);
        if (likely(args)) {
            value = PyObject_GetItem(args, 0);
            Py_DECREF(args);
        }
        if (unlikely(!value)) {
            __Pyx_ErrRestore(NULL, NULL, NULL);
            Py_INCREF(Py_None);
            value = Py_None;
        }
    }
#endif
    *pvalue = value;
    return 0;
}
Ejemplo n.º 26
0
Archivo: avp.c Proyecto: Ansud/xxl2tpd
/*
 * Perform states sanity checks for message_type_avp
 */
static int mta_sanity_failed(const struct tunnel *t, const struct call *c)
{
	/*
	 * Look ou our state for each message and make sure everything
	 * make sense...
	 */
	if ((c != t->self) && (c->msgtype < Hello))
	{
		l2tp_log(LOG_DEBUG,
			"%s: attempting to negotiate tunnel inside a call!\n",
			__func__);
		return -EINVAL;
	}

	switch (c->msgtype)
	{
	case SCCRQ:
		/*
		 * When we handle tie breaker AVP's, then we'll check
		 * to see if we've both requested tunnels
		 */
		if (likely((t->state == 0) || (t->state == SCCRQ)))
			break;

		l2tp_log(LOG_DEBUG,
			"%s: attempting to negotiate SCCRQ with state != 0\n",
			__func__);
		return -EINVAL;
	case SCCRP:
		if (likely(t->state == SCCRQ))
			break;

		l2tp_log(LOG_DEBUG,
			"%s: attempting to negotiate SCCRP with state != SCCRQ!\n",
			__func__);
		return -EINVAL;
	case SCCCN:
		if (likely(t->state == SCCRP))
			break;

		l2tp_log(LOG_DEBUG,
			"%s: attempting to negotiate SCCCN with state != SCCRP!\n",
			__func__);
		return -EINVAL;
	case ICRQ:
		if (likely((t->state == SCCCN) && (c == t->self)))
			break;

		/* May waste a bit of cycles at error */
		l2tp_log(LOG_DEBUG, "%s: attempting to negotiate ICRQ %s\n",
			__func__,
			(t->state != SCCCN)?
				"when state != SCCCN":
				"on a call!");

		return -EINVAL;
	case ICRP:
		if (likely((t->state != SCCCN) && (c->state == ICRQ)))
			break;

		l2tp_log(LOG_DEBUG, "%s: attempting to negotiate ICRP %s\n",
			__func__,
			(t->state != SCCCN)?
				"on tunnel!=SCCCN":
				"when state != ICRQ");
		return -EINVAL;
	case ICCN:
		if (likely(c->state == ICRP))
			break;

		l2tp_log(LOG_DEBUG,
			"%s: attempting to negotiate ICCN when state != ICRP\n",
			__func__);
		return -EINVAL;
	case SLI:
		if (likely(c->state == ICCN))
			break;

		l2tp_log(LOG_DEBUG,
			"%s: attempting to negotiate SLI when state != ICCN\n",
			__func__);
		return -EINVAL;
	case OCRP:			 /* jz: case for ORCP */
		if (likely((t->state == SCCCN) && (c->state == OCRQ)))
			break;

		l2tp_log(LOG_DEBUG,
			"%s: attempting to negotiate OCRP %s\n",
			__func__,
			(t->state != SCCCN)?
				"on tunnel!=SCCCN":
				"when state != OCRQ");
		return -EINVAL;
	case OCCN:			 /* jz: case for OCCN */
		if (likely(c->state == OCRQ))
			break;

		l2tp_log(LOG_DEBUG,
			"%s: attempting to negotiate OCCN when state != OCRQ\n",
			__func__);
		return -EINVAL;
	case StopCCN:
	case CDN:
	case Hello:
		break;
	/* Help compiler to build jump table, and also validate incoming data */
	case CMRES5:
	case CMRES13:
		l2tp_log(LOG_WARNING, "%s: i don't know how to handle reserved message %u\n",
			__func__, c->msgtype);
		return -EINVAL;
	default:
		l2tp_log(LOG_WARNING, "%s: i don't know how to handle %s messages\n",
			 __func__, msgtypes[c->msgtype]);
		return -EINVAL;
	}

	return 0;
}
Ejemplo n.º 27
0
/*
 * function:   	zfcp_qdio_response_handler
 *
 * purpose:	is called by QDIO layer for completed SBALs in response queue
 *
 * returns:	(void)
 */
static void
zfcp_qdio_response_handler(struct ccw_device *ccw_device,
			   unsigned int status,
			   unsigned int qdio_error,
			   unsigned int siga_error,
			   unsigned int queue_number,
			   int first_element,
			   int elements_processed,
			   unsigned long int_parm)
{
	struct zfcp_adapter *adapter;
	struct zfcp_qdio_queue *queue;
	int buffer_index;
	int i;
	struct qdio_buffer *buffer;
	int retval = 0;
	u8 count;
	u8 start;
	volatile struct qdio_buffer_element *buffere = NULL;
	int buffere_index;

	adapter = (struct zfcp_adapter *) int_parm;
	queue = &adapter->response_queue;

	if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error,
						   siga_error, first_element,
						   elements_processed)))
		goto out;

	/*
	 * we stored address of struct zfcp_adapter  data structure
	 * associated with irq in int_parm
	 */

	buffere = &(queue->buffer[first_element]->element[0]);
	ZFCP_LOG_DEBUG("first BUFFERE flags=0x%x\n", buffere->flags);
	/*
	 * go through all SBALs from input queue currently
	 * returned by QDIO layer
	 */

	for (i = 0; i < elements_processed; i++) {

		buffer_index = first_element + i;
		buffer_index %= QDIO_MAX_BUFFERS_PER_Q;
		buffer = queue->buffer[buffer_index];

		/* go through all SBALEs of SBAL */
		for (buffere_index = 0;
		     buffere_index < QDIO_MAX_ELEMENTS_PER_BUFFER;
		     buffere_index++) {

			/* look for QDIO request identifiers in SB */
			buffere = &buffer->element[buffere_index];
			retval = zfcp_qdio_reqid_check(adapter,
					(unsigned long) buffere->addr);

			if (retval) {
				ZFCP_LOG_NORMAL("bug: unexpected inbound "
						"packet on adapter %s "
						"(reqid=0x%lx, "
						"first_element=%d, "
						"elements_processed=%d)\n",
						zfcp_get_busid_by_adapter(adapter),
						(unsigned long) buffere->addr,
						first_element,
						elements_processed);
				ZFCP_LOG_NORMAL("hex dump of inbound buffer "
						"at address %p "
						"(buffer_index=%d, "
						"buffere_index=%d)\n", buffer,
						buffer_index, buffere_index);
				ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
					      (char *) buffer, SBAL_SIZE);
			}
			/*
			 * A single used SBALE per inbound SBALE has been
			 * implemented by QDIO so far. Hope they will
			 * do some optimisation. Will need to change to
			 * unlikely() then.
			 */
			if (likely(buffere->flags & SBAL_FLAGS_LAST_ENTRY))
				break;
		};

		if (unlikely(!(buffere->flags & SBAL_FLAGS_LAST_ENTRY))) {
			ZFCP_LOG_NORMAL("bug: End of inbound data "
					"not marked!\n");
		}
	}

	/*
	 * put range of SBALs back to response queue
	 * (including SBALs which have already been free before)
	 */
	count = atomic_read(&queue->free_count) + elements_processed;
	start = queue->free_index;

	ZFCP_LOG_TRACE("calling do_QDIO on adapter %s (flags=0x%x, "
		       "queue_no=%i, index_in_queue=%i, count=%i, "
		       "buffers=0x%lx\n",
		       zfcp_get_busid_by_adapter(adapter),
		       QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
		       0, start, count, (unsigned long) &queue->buffer[start]);

	retval = do_QDIO(ccw_device,
			 QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
			 0, start, count, NULL);

	if (unlikely(retval)) {
		atomic_set(&queue->free_count, count);
		ZFCP_LOG_DEBUG("clearing of inbound data regions failed, "
			       "queues may be down "
			       "(count=%d, start=%d, retval=%d)\n",
			       count, start, retval);
	} else {
		queue->free_index += count;
		queue->free_index %= QDIO_MAX_BUFFERS_PER_Q;
		atomic_set(&queue->free_count, 0);
		ZFCP_LOG_TRACE("%i buffers enqueued to response "
			       "queue at position %i\n", count, start);
	}
 out:
	return;
}
Ejemplo n.º 28
0
Archivo: avp.c Proyecto: Ansud/xxl2tpd
/*
 * Find out what version of l2tp the other side is using.
 * I'm not sure what we're supposed to do with this but whatever..
 */
int result_code_avp(struct tunnel *t, struct call *c, const struct avp *data,
		    int datalen)
{
	int error;
	int result;

	struct avp_result {
		unsigned short result;
		unsigned short error;
		char msg[0];
	} * avpres = (struct avp_result *)&data->data.uc;

	if (sanity_enabled(t)) {
		int err = gen_sanity_check(c, datalen, 10, CDN, StopCCN,
					   "Result Code");

		if (err != SANITY_OK)
			return err;
	}

	result = ntohs(avpres->result);
	error = ntohs(avpres->error);

	/*
	 * from prepare_StopCCN and prepare_CDN, note missing htons() call
	 * http://www.opensource.apple.com/source/ppp/ppp-412.3/Drivers/L2TP/L2TP-plugin/l2tp.c
	 */
	result = fix_buggy_apple(result, "result");
	error = fix_buggy_apple(error, "error");

	if (((c->msgtype == StopCCN) && ((result > 7) || (result < 1))) ||
	    ((c->msgtype == CDN) && ((result > 11) || (result < 1))))
	{
		l2tp_log(LOG_DEBUG,
			"%s: result code out of range (%d %d %d).  Ignoring.\n",
			__func__, result, error, datalen);
		return 0;
	}

	c->error = error;
	c->result = result;
	safe_copy(c->errormsg, avpres->msg, datalen - 10);

	if (likely(!gconfig.debug_avp))
		return 0;

	if (c->msgtype == StopCCN)
	{
		l2tp_log(LOG_DEBUG,
			"%s: peer closing for reason %d (%s), error = %d (%s)\n",
			__func__, result, stopccn_result_codes[result], error,
			c->errormsg);
		return 0;
	}

	l2tp_log(LOG_DEBUG,
		"%s: peer closing for reason %d (%s), error = %d (%s)\n",
		__func__, result, cdn_result_codes[result], error,
		c->errormsg);

	return 0;
}
Ejemplo n.º 29
0
static int
mlx4_en_alloc_buf(struct mlx4_en_rx_ring *ring,
     __be64 *pdma, struct mlx4_en_rx_mbuf *mb_list)
{
	bus_dma_segment_t segs[1];
	bus_dmamap_t map;
	struct mbuf *mb;
	int nsegs;
	int err;

	/* try to allocate a new spare mbuf */
	if (unlikely(ring->spare.mbuf == NULL)) {
		mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size);
		if (unlikely(mb == NULL))
			return (-ENOMEM);
		/* setup correct length */
		mb->m_pkthdr.len = mb->m_len = ring->rx_mb_size;

		/* make sure IP header gets aligned */
		m_adj(mb, MLX4_NET_IP_ALIGN);

		/* load spare mbuf into BUSDMA */
		err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, ring->spare.dma_map,
		    mb, segs, &nsegs, BUS_DMA_NOWAIT);
		if (unlikely(err != 0)) {
			m_freem(mb);
			return (err);
		}

		/* store spare info */
		ring->spare.mbuf = mb;
		ring->spare.paddr_be = cpu_to_be64(segs[0].ds_addr);

		bus_dmamap_sync(ring->dma_tag, ring->spare.dma_map,
		    BUS_DMASYNC_PREREAD);
	}

	/* synchronize and unload the current mbuf, if any */
	if (likely(mb_list->mbuf != NULL)) {
		bus_dmamap_sync(ring->dma_tag, mb_list->dma_map,
		    BUS_DMASYNC_POSTREAD);
		bus_dmamap_unload(ring->dma_tag, mb_list->dma_map);
	}

	mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size);
	if (unlikely(mb == NULL))
		goto use_spare;

	/* setup correct length */
	mb->m_pkthdr.len = mb->m_len = ring->rx_mb_size;

	/* make sure IP header gets aligned */
	m_adj(mb, MLX4_NET_IP_ALIGN);

	err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, mb_list->dma_map,
	    mb, segs, &nsegs, BUS_DMA_NOWAIT);
	if (unlikely(err != 0)) {
		m_freem(mb);
		goto use_spare;
	}

	*pdma = cpu_to_be64(segs[0].ds_addr);
	mb_list->mbuf = mb;

	bus_dmamap_sync(ring->dma_tag, mb_list->dma_map, BUS_DMASYNC_PREREAD);
	return (0);

use_spare:
	/* swap DMA maps */
	map = mb_list->dma_map;
	mb_list->dma_map = ring->spare.dma_map;
	ring->spare.dma_map = map;

	/* swap MBUFs */
	mb_list->mbuf = ring->spare.mbuf;
	ring->spare.mbuf = NULL;

	/* store physical address */
	*pdma = ring->spare.paddr_be;
	return (0);
}
Ejemplo n.º 30
0
struct v4l2_device *nxp_v4l2_get_v4l2_device(void)
{
    if (likely(__me))
        return &__me->v4l2_dev;
    return NULL;
}