Esempio n. 1
0
/**
 * x509_request_asymmetric_key - Request a key by X.509 certificate params.
 * @keyring: The keys to search.
 * @subject: The name of the subject to whom the key belongs.
 * @key_id: The subject key ID as a hex string.
 *
 * Find a key in the given keyring by subject name and key ID.  These might,
 * for instance, be the issuer name and the authority key ID of an X.509
 * certificate that needs to be verified.
 */
struct key *x509_request_asymmetric_key(struct key *keyring,
					const char *subject,
					const char *key_id)
{
	key_ref_t key;
	size_t subject_len = strlen(subject), key_id_len = strlen(key_id);
	char *id;

	/* Construct an identifier "<subjname>:<keyid>". */
	id = kmalloc(subject_len + 2 + key_id_len + 1, GFP_KERNEL);
	if (!id)
		return ERR_PTR(-ENOMEM);

	memcpy(id, subject, subject_len);
	id[subject_len + 0] = ':';
	id[subject_len + 1] = ' ';
	memcpy(id + subject_len + 2, key_id, key_id_len);
	id[subject_len + 2 + key_id_len] = 0;

	pr_debug("Look up: \"%s\"\n", id);

	key = keyring_search(make_key_ref(keyring, 1),
			     &key_type_asymmetric, id);
	if (IS_ERR(key))
		pr_debug("Request for key '%s' err %ld\n", id, PTR_ERR(key));
	kfree(id);

	if (IS_ERR(key)) {
		switch (PTR_ERR(key)) {
			/* Hide some search errors */
		case -EACCES:
		case -ENOTDIR:
		case -EAGAIN:
			return ERR_PTR(-ENOKEY);
		default:
			return ERR_CAST(key);
		}
	}

	pr_devel("<==%s() = 0 [%x]\n", __func__,
		 key_serial(key_ref_to_ptr(key)));
	return key_ref_to_ptr(key);
}
Esempio n. 2
0
static int hvsi_check_packet(struct hvsi_priv *pv)
{
	u8 len, type;

	/* Check header validity. If it's invalid, we ditch
	 * the whole buffer and hope we eventually resync
	 */
	if (pv->inbuf[0] < 0xfc) {
		pv->inbuf_len = pv->inbuf_pktlen = 0;
		return 0;
	}
	type = pv->inbuf[0];
	len = pv->inbuf[1];

	/* Packet incomplete ? */
	if (pv->inbuf_len < len)
		return 0;

	pr_devel("HVSI@%x: Got packet type %x len %d bytes:\n",
		 pv->termno, type, len);

	/* We have a packet, yay ! Handle it */
	switch(type) {
	case VS_DATA_PACKET_HEADER:
		pv->inbuf_pktlen = len - 4;
		pv->inbuf_cur = 4;
		return 1;
	case VS_CONTROL_PACKET_HEADER:
		hvsi_got_control(pv);
		break;
	case VS_QUERY_PACKET_HEADER:
		hvsi_got_query(pv);
		break;
	case VS_QUERY_RESPONSE_PACKET_HEADER:
		hvsi_got_response(pv);
		break;
	}

	/* Swallow packet and retry */
	pv->inbuf_len -= len;
	memmove(pv->inbuf, &pv->inbuf[len], pv->inbuf_len);
	return 1;
}
Esempio n. 3
0
static int write_node(u64 n1, u64 n2, u64 n3, u64 n4, u64 v1, u64 v2)
{
	int result;

	result = create_node(n1, n2, n3, n4, v1, v2);

	if (!result)
		return 0;

	result = lv1_write_repository_node(n1, n2, n3, n4, v1, v2);

	if (result) {
		pr_devel("%s:%d: lv1_write_repository_node failed: %s\n",
			__func__, __LINE__, ps3_result(result));
		return -ENOENT;
	}

	return 0;
}
Esempio n. 4
0
static int ics_opal_set_affinity(struct irq_data *d,
				 const struct cpumask *cpumask,
				 bool force)
{
	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
	int16_t server;
	int8_t priority;
	int64_t rc;
	int wanted_server;

	if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
		return -1;

	rc = opal_get_xive(hw_irq, &server, &priority);
	if (rc != OPAL_SUCCESS) {
		pr_err("%s: opal_set_xive(irq=%d [hw 0x%x] server=%x)"
		       " error %lld\n",
		       __func__, d->irq, hw_irq, server, rc);
		return -1;
	}

	wanted_server = xics_get_irq_server(d->irq, cpumask, 1);
	if (wanted_server < 0) {
		char cpulist[128];
		cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
		pr_warning("%s: No online cpus in the mask %s for irq %d\n",
			   __func__, cpulist, d->irq);
		return -1;
	}
	server = ics_opal_mangle_server(wanted_server);

	pr_devel("ics-hal: set-affinity irq %d [hw 0x%x] server: 0x%x/0x%x\n",
		 d->irq, hw_irq, wanted_server, server);

	rc = opal_set_xive(hw_irq, server, priority);
	if (rc != OPAL_SUCCESS) {
		pr_err("%s: opal_set_xive(irq=%d [hw 0x%x] server=%x)"
		       " error %lld\n",
		       __func__, d->irq, hw_irq, server, rc);
		return -1;
	}
	return 0;
}
/*
 * Note a signature information block
 */
int pkcs7_note_signed_info(void *context, size_t hdrlen,
			   unsigned char tag,
			   const void *value, size_t vlen)
{
	struct pkcs7_parse_context *ctx = context;
	struct pkcs7_signed_info *sinfo = ctx->sinfo;
	struct asymmetric_key_id *kid;

	if (ctx->msg->data_type == OID_msIndirectData && !sinfo->authattrs) {
		pr_warn("Authenticode requires AuthAttrs\n");
		return -EBADMSG;
	}

	/* Generate cert issuer + serial number key ID */
	if (!ctx->expect_skid) {
		kid = asymmetric_key_generate_id(ctx->raw_serial,
						 ctx->raw_serial_size,
						 ctx->raw_issuer,
						 ctx->raw_issuer_size);
	} else {
		kid = asymmetric_key_generate_id(ctx->raw_skid,
						 ctx->raw_skid_size,
						 "", 0);
	}
	if (IS_ERR(kid))
		return PTR_ERR(kid);

	pr_devel("SINFO KID: %u [%*phN]\n", kid->len, kid->len, kid->data);

	sinfo->sig->auth_ids[0] = kid;
	sinfo->index = ++ctx->sinfo_index;
	*ctx->ppsinfo = sinfo;
	ctx->ppsinfo = &sinfo->next;
	ctx->sinfo = kzalloc(sizeof(struct pkcs7_signed_info), GFP_KERNEL);
	if (!ctx->sinfo)
		return -ENOMEM;
	ctx->sinfo->sig = kzalloc(sizeof(struct public_key_signature),
				  GFP_KERNEL);
	if (!ctx->sinfo->sig)
		return -ENOMEM;
	return 0;
}
Esempio n. 6
0
static ssize_t backlightdimmer_status_write(struct device * dev, struct device_attribute * attr, const char * buf, size_t size)
{
    unsigned int data;

    if(sscanf(buf, "%u\n", &data) == 1) 
	{
	    pr_devel("%s: %u \n", __FUNCTION__, data);
	    
	    if (data == 1) 
		{
		    pr_info("%s: BLD function enabled\n", __FUNCTION__);

		    bld_enabled = true;

		    touchkey_pressed();
		} 
	    else if (data == 0) 
		{
		    pr_info("%s: BLD function disabled\n", __FUNCTION__);

		    bld_enabled = false;

		    cancel_delayed_work(&dimmer_work);
		    flush_scheduled_work();

		    if (backlight_dimmed)
			{
			    bld_enable_backlights();
			}
		} 
	    else 
		{
		    pr_info("%s: invalid input range %u\n", __FUNCTION__, data);
		}
	} 
    else 
	{
	    pr_info("%s: invalid input\n", __FUNCTION__);
	}

    return size;
}
Esempio n. 7
0
static void hvsi_cd_change(struct hvsi_priv *pv, int cd)
{
	if (cd)
		pv->mctrl |= TIOCM_CD;
	else {
		pv->mctrl &= ~TIOCM_CD;

		/* We copy the existing hvsi driver semantics
		 * here which are to trigger a hangup when
		 * we get a carrier loss.
		 * Closing our connection to the server will
		 * do just that.
		 */
		if (!pv->is_console && pv->opened) {
			pr_devel("HVSI@%x Carrier lost, hanging up !\n",
				 pv->termno);
			hvsi_send_close(pv);
		}
	}
}
Esempio n. 8
0
static ssize_t ignore_mar_store(struct device *dev, struct device_attribute *attr, const char *buf, 
								size_t size) {
	unsigned int data;

	if(sscanf(buf, "%u\n", &data) == 1) {
		pr_devel("%s: %u \n", __FUNCTION__, data);

		if (data == 1) {
			ignore_margin = true;
		} else if (data == 0) {
			ignore_margin = false;
		} else {
			pr_info("%s: invalid input range %u\n", __FUNCTION__, data);
		}
	} else 	{
		pr_info("%s: invalid input\n", __FUNCTION__);
	}

	return size;
}
Esempio n. 9
0
static void rmnet_check_fifo(struct net_device *dev)
{
#if fcENABLE_FLOW_CTRL
	if (bRmnetFifoFull)
	{
		struct rmnet_private *p = netdev_priv(dev);
		int iAvail = smd_write_avail(p->ch);

		if (iAvail > (smd_total_fifo_size(p->ch) / 2))
		{
			pr_devel(LOG_TAG1 "%s@%d: tx resumed\n", __func__, __LINE__);
			if (netif_carrier_ok(dev))
				netif_wake_queue(dev);
			else
				pr_err(LOG_TAG1 "%s@%d: no netif_carrier_ok\n", __func__, __LINE__);
			bRmnetFifoFull = 0;
		}
	}
#endif
}
Esempio n. 10
0
int
pfq_computation_init(struct pfq_computation_tree *comp)
{
	size_t n;
	for (n = 0; n < comp->size; n++)
	{
		if (comp->node[n].init) {

			pr_devel("[PFQ] %zu: initializing computation %pF...\n", n, comp->node[n].init);

			if (comp->node[n].init( &comp->node[n].fun ) < 0) {
				printk(KERN_INFO "[PFQ] computation_init: error in function (%zu)!\n", n);
				return -EPERM;
			}

			comp->node[n].initialized = true;
		}
	}
	return 0;
}
Esempio n. 11
0
static ssize_t notification_led_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t size)
{
	unsigned int data;

	if(sscanf(buf, "%u\n", &data) == 1) {
		if(data == 0 || data == 1){
			pr_devel("%s: %u \n", __FUNCTION__, data);
			if (data == 1)
				enable_led_notification();

			if(data == 0)
				disable_led_notification();

		} else
			pr_info("%s: wrong input %u\n", __FUNCTION__, data);
	} else
		pr_info("%s: input error\n", __FUNCTION__);

	return size;
}
Esempio n. 12
0
File: bpf.c Progetto: pfq/PFQ
void
pfq_free_sk_filter(struct sk_filter *filter)
{
	struct sock sk;
	int rv;

	sock_init_data(NULL, &sk);
	sk.sk_filter = NULL;
	atomic_set(&sk.sk_omem_alloc, 0);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
	sock_reset_flag(&sk, SOCK_FILTER_LOCKED);
#endif
	sk.sk_filter = filter;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,8) && LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0))
	if ((rv = __sk_detach_filter(&sk, sock_owned_by_user(&sk))))
#else
	if ((rv = sk_detach_filter(&sk)))
#endif
		pr_devel("[PFQ] BPF: sk_detach_filter error: (%d)!\n", rv);
}
Esempio n. 13
0
static void ics_opal_unmask_irq(struct irq_data *d)
{
	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
	int64_t rc;
	int server;

	pr_devel("ics-hal: unmask virq %d [hw 0x%x]\n", d->irq, hw_irq);

	if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
		return;

	server = xics_get_irq_server(d->irq, d->affinity, 0);
	server = ics_opal_mangle_server(server);

	rc = opal_set_xive(hw_irq, server, DEFAULT_PRIORITY);
	if (rc != OPAL_SUCCESS)
		pr_err("%s: opal_set_xive(irq=%d [hw 0x%x] server=%x)"
		       " error %lld\n",
		       __func__, d->irq, hw_irq, server, rc);
}
Esempio n. 14
0
static int ncp6335b_parse_dt(struct device *dev,
		struct ncp6335b_platform_data *pdata)
{
	struct device_node *np = dev->of_node;
	char *str = NULL;
	int ret;

	ret = of_property_read_string(np, "ncp6335b,dev_name", (const char **)&str);
	if (ret) {
		pr_err("ncp6335b: fail to read, ncp6335b_parse_dt\n");
		return -ENODEV;
	}

	if (str)
		pr_devel("ncp6335b: DT dev name = %s\n", str);

	dev->platform_data = pdata;

	return 0;
}
Esempio n. 15
0
void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu)
{
	struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
	int i;

	if (!kvmppc_xive_enabled(vcpu))
		return;

	if (!xc)
		return;

	pr_devel("native_cleanup_vcpu(cpu=%d)\n", xc->server_num);

	/* Ensure no interrupt is still routed to that VP */
	xc->valid = false;
	kvmppc_xive_disable_vcpu_interrupts(vcpu);

	/* Disable the VP */
	xive_native_disable_vp(xc->vp_id);

	/* Free the queues & associated interrupts */
	for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
		/* Free the escalation irq */
		if (xc->esc_virq[i]) {
			free_irq(xc->esc_virq[i], vcpu);
			irq_dispose_mapping(xc->esc_virq[i]);
			kfree(xc->esc_virq_names[i]);
			xc->esc_virq[i] = 0;
		}

		/* Free the queue */
		kvmppc_xive_native_cleanup_queue(vcpu, i);
	}

	/* Free the VP */
	kfree(xc);

	/* Cleanup the vcpu */
	vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
	vcpu->arch.xive_vcpu = NULL;
}
Esempio n. 16
0
int __devinit pnv_smp_kick_cpu(int nr)
{
	unsigned int pcpu = get_hard_smp_processor_id(nr);
	unsigned long start_here = __pa(*((unsigned long *)
					  generic_secondary_smp_init));
	long rc;

	BUG_ON(nr < 0 || nr >= NR_CPUS);

	/* On OPAL v2 the CPU are still spinning inside OPAL itself,
	 * get them back now
	 */
	if (firmware_has_feature(FW_FEATURE_OPALv2)) {
		pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu);
		rc = opal_start_cpu(pcpu, start_here);
		if (rc != OPAL_SUCCESS)
			pr_warn("OPAL Error %ld starting CPU %d\n",
				rc, nr);
	}
	return smp_generic_kick_cpu(nr);
}
Esempio n. 17
0
static void sbecom_proc_get_brdinfo(ci_t *ci, struct sbe_brd_info *bip)
{
	hdw_info_t *hi = &hdw_info[ci->brdno];
	u_int8_t *bsn = 0;

	switch (hi->promfmt)
	{
	case PROM_FORMAT_TYPE1:
		bsn = (u_int8_t *) hi->mfg_info.pft1.Serial;
		break;
	case PROM_FORMAT_TYPE2:
		bsn = (u_int8_t *) hi->mfg_info.pft2.Serial;
		break;
	}

	sbecom_get_brdinfo (ci, bip, bsn);

	pr_devel(">> sbecom_get_brdinfo: returned, first_if %p <%s> last_if %p <%s>\n",
		 bip->first_iname, bip->first_iname,
		 bip->last_iname, bip->last_iname);
}
Esempio n. 18
0
int ps3_repository_find_device(struct ps3_repository_device *repo)
{
	int result;
	struct ps3_repository_device tmp = *repo;
	unsigned int num_dev;

	BUG_ON(repo->bus_index > 10);
	BUG_ON(repo->dev_index > 10);

	result = ps3_repository_read_bus_num_dev(tmp.bus_index, &num_dev);

	if (result) {
		pr_devel("%s:%d read_bus_num_dev failed\n", __func__, __LINE__);
		return result;
	}

	pr_devel("%s:%d: bus_type %u, bus_index %u, bus_id %llu, num_dev %u\n",
		__func__, __LINE__, tmp.bus_type, tmp.bus_index, tmp.bus_id,
		num_dev);

	if (tmp.dev_index >= num_dev) {
		pr_devel("%s:%d: no device found\n", __func__, __LINE__);
		return -ENODEV;
	}

	result = ps3_repository_read_dev_type(tmp.bus_index, tmp.dev_index,
		&tmp.dev_type);

	if (result) {
		pr_devel("%s:%d read_dev_type failed\n", __func__, __LINE__);
		return result;
	}

	result = ps3_repository_read_dev_id(tmp.bus_index, tmp.dev_index,
		&tmp.dev_id);

	if (result) {
		pr_devel("%s:%d ps3_repository_read_dev_id failed\n", __func__,
		__LINE__);
		return result;
	}

	pr_devel("%s:%d: found: dev_type %u, dev_index %u, dev_id %llu\n",
		__func__, __LINE__, tmp.dev_type, tmp.dev_index, tmp.dev_id);

	*repo = tmp;
	return 0;
}
/* Note that this will also be called on SMP if all other CPUs are
 * offlined, which means that it may be called for cpu != 0. For
 * this to work, we somewhat assume that CPUs that are onlined
 * come up with a fully clean TLB (or are cleaned when offlined)
 */
static unsigned int steal_context_up(unsigned int id)
{
	struct mm_struct *mm;
	int cpu = smp_processor_id();

	/* Pick up the victim mm */
	mm = context_mm[id];

	pr_devel("[%d] steal context %d from mm @%p\n", cpu, id, mm);

	/* Flush the TLB for that context */
	local_flush_tlb_mm(mm);

	/* Mark this mm has having no context anymore */
	mm->context.id = MMU_NO_CONTEXT;

	/* XXX This clear should ultimately be part of local_flush_tlb_mm */
	__clear_bit(id, stale_map[cpu]);

	return id;
}
Esempio n. 20
0
const char *
pfq_signature_by_user_symbol(const char __user *symb)
{
	struct symtable_entry *entry;
        const char *symbol;

        symbol = strdup_user(symb);
        if (symbol == NULL) {
                pr_devel("[PFQ] pfq_signature_by_user_symbol: strdup!\n");
                return NULL;
        }

        entry = pfq_symtable_search(&pfq_lang_functions, symbol);
        if (entry == NULL) {
                kfree (symbol);
                return NULL;
        }

        kfree(symbol);
        return entry->signature;
}
static ssize_t soundcontrol_highperf_write(struct device * dev, struct device_attribute * attr, const char * buf, size_t size)
{
    unsigned int data;

    if(sscanf(buf, "%u\n", &data) == 1) 
	{
	    pr_devel("%s: %u \n", __FUNCTION__, data);
	    
	    if (data == 1)
		{
		    if (!high_perf_mode) {
			pr_info("%s: SOUNDCONTROL high performance audio enabled\n", __FUNCTION__);

			high_perf_mode = true;

			soundcontrol_updateperf(high_perf_mode);
		    }
		} 
	    else if (data == 0) 
		{
		    if (high_perf_mode) {
			pr_info("%s: SOUNDCONTROL high performance audio disabled\n", __FUNCTION__);

			high_perf_mode = false;

			soundcontrol_updateperf(high_perf_mode);
		    }
		} 
	    else 
		{
		    pr_info("%s: invalid input range %u\n", __FUNCTION__, data);
		}
	} 
    else 
	{
	    pr_info("%s: invalid input\n", __FUNCTION__);
	}

    return size;
}
Esempio n. 22
0
static int dump_stor_dev_info(struct ps3_repository_device *repo)
{
	int result = 0;
	unsigned int num_regions, region_index;
	u64 port, blk_size, num_blocks;

	pr_devel(" -> %s:%d: (%u:%u)\n", __func__, __LINE__,
		repo->bus_index, repo->dev_index);

	result = ps3_repository_read_stor_dev_info(repo->bus_index,
		repo->dev_index, &port, &blk_size, &num_blocks, &num_regions);
	if (result) {
		pr_devel("%s:%d ps3_repository_read_stor_dev_info"
			" (%u:%u) failed\n", __func__, __LINE__,
			repo->bus_index, repo->dev_index);
		goto out;
	}

	pr_devel("%s:%d  (%u:%u): port %llu, blk_size %llu, num_blocks "
		 "%llu, num_regions %u\n",
		 __func__, __LINE__, repo->bus_index, repo->dev_index,
		port, blk_size, num_blocks, num_regions);

	for (region_index = 0; region_index < num_regions; region_index++) {
		unsigned int region_id;
		u64 region_start, region_size;

		result = ps3_repository_read_stor_dev_region(repo->bus_index,
			repo->dev_index, region_index, &region_id,
			&region_start, &region_size);
		if (result) {
			 pr_devel("%s:%d ps3_repository_read_stor_dev_region"
				  " (%u:%u) failed\n", __func__, __LINE__,
				  repo->bus_index, repo->dev_index);
			break;
		}

		pr_devel("%s:%d (%u:%u) region_id %u, start %lxh, size %lxh\n",
			__func__, __LINE__, repo->bus_index, repo->dev_index,
			region_id, (unsigned long)region_start,
			(unsigned long)region_size);
	}

out:
	pr_devel(" <- %s:%d\n", __func__, __LINE__);
	return result;
}
Esempio n. 23
0
static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive)
{
	struct kvm *kvm = xive->kvm;
	struct kvm_vcpu *vcpu;
	unsigned int i;

	pr_devel("%s\n", __func__);

	mutex_lock(&kvm->lock);
	for (i = 0; i <= xive->max_sbid; i++) {
		struct kvmppc_xive_src_block *sb = xive->src_blocks[i];

		if (sb) {
			arch_spin_lock(&sb->lock);
			kvmppc_xive_native_sync_sources(sb);
			arch_spin_unlock(&sb->lock);
		}
	}

	kvm_for_each_vcpu(i, vcpu, kvm) {
		kvmppc_xive_native_vcpu_eq_sync(vcpu);
	}
Esempio n. 24
0
static int __devinit smp_bgq_kick_cpu(int nr)
{
	struct device_node *np;
	int tid;
	const char *enable_method;

	if (nr < 0 || nr >= num_possible_cpus())
		return -ENOENT;

	np = of_get_cpu_node(nr, &tid);
	if (!np)
		return -ENODEV;

	enable_method = of_get_property(np, "enable-method", NULL);
	if (!enable_method) {
		pr_err("CPU%d has no enable-method\n", nr);
		return -ENOENT;
	}
	pr_devel("CPU%d has enable-method: \"%s\"\n", nr, enable_method);

	if (strcmp(enable_method, "kexec") != 0) {
		pr_err("CPU%d: This kernel does not support the \"%s\"\n",
		       nr, enable_method);
		return -EINVAL;
	}

	/*
	 * The processor is currently spinning, waiting for the
	 * cpu_start field to become non-zero.	After we set
	 * cpu_start, the processor will continue on to
	 * secondary_start
	 */
	paca[nr].cpu_start = 1;

	/* barrier so other CPU can see it */
	smp_mb();

	return 0;
}
Esempio n. 25
0
static int
__pfq_join_group(int gid, int id, unsigned long class_mask, int policy)
{
        unsigned long tmp = 0;
        unsigned long bit;

        if (!pfq_groups[gid].pid) {
                __pfq_group_ctor(gid);
        }

        if (!__pfq_group_access(gid, id, policy, true)) {
                pr_devel("[PFQ] gid:%d is not joinable with policy %d\n", gid, policy);
                return -1;
        }

        pfq_bitwise_foreach(class_mask, bit)
        {
                int class = pfq_ctz(bit);
                tmp = atomic_long_read(&pfq_groups[gid].sock_mask[class]);
                tmp |= 1L << id;
                atomic_long_set(&pfq_groups[gid].sock_mask[class], tmp);
        }
Esempio n. 26
0
long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct cxl_context *ctx = file->private_data;

	if (ctx->status == CLOSED)
		return -EIO;

	if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
		return -EIO;

	pr_devel("afu_ioctl\n");
	switch (cmd) {
	case CXL_IOCTL_START_WORK:
		return afu_ioctl_start_work(ctx, (struct cxl_ioctl_start_work __user *)arg);
	case CXL_IOCTL_GET_PROCESS_ELEMENT:
		return afu_ioctl_process_element(ctx, (__u32 __user *)arg);
	case CXL_IOCTL_GET_AFU_ID:
		return afu_ioctl_get_afu_id(ctx, (struct cxl_afu_id __user *)
					    arg);
	}
	return -EINVAL;
}
Esempio n. 27
0
static irqreturn_t cxl_irq_afu(int irq, void *data)
{
	struct cxl_context *ctx = data;
	irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq));
	int irq_off, afu_irq = 1;
	__u16 range;
	int r;

	for (r = 1; r < CXL_IRQ_RANGES; r++) {
		irq_off = hwirq - ctx->irqs.offset[r];
		range = ctx->irqs.range[r];
		if (irq_off >= 0 && irq_off < range) {
			afu_irq += irq_off;
			break;
		}
		afu_irq += range;
	}
	if (unlikely(r >= CXL_IRQ_RANGES)) {
		WARN(1, "Recieved AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n",
		     ctx->pe, irq, hwirq);
		return IRQ_HANDLED;
	}

	pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n",
	       afu_irq, ctx->pe, irq, hwirq);

	if (unlikely(!ctx->irq_bitmap)) {
		WARN(1, "Recieved AFU IRQ for context with no IRQ bitmap\n");
		return IRQ_HANDLED;
	}
	spin_lock(&ctx->lock);
	set_bit(afu_irq - 1, ctx->irq_bitmap);
	ctx->pending_irq = true;
	spin_unlock(&ctx->lock);

	wake_up_all(&ctx->wq);

	return IRQ_HANDLED;
}
Esempio n. 28
0
static void
__pfq_group_dtor(int gid)
{
        struct pfq_group * that = &pfq_groups[gid];
        void *context[Q_FUN_MAX];

        struct sk_filter *filter;
        int i;

        /* remove this gid from demux matrix */

        pfq_devmap_update(map_reset, Q_ANY_DEVICE, Q_ANY_QUEUE, gid);

        that->pid = 0;
        that->policy = Q_GROUP_UNDEFINED;

        for(i = 0; i < Q_FUN_MAX; i++)
        {
		atomic_long_set(&pfq_groups[gid].fun_ctx[i].function, 0L);

		context[i] = (void *)atomic_long_xchg(&pfq_groups[gid].fun_ctx[i].context, 0L);
        }

        filter = (struct sk_filter *)atomic_long_xchg(&pfq_groups[gid].filter, 0L);

        msleep(Q_GRACE_PERIOD);   /* sleeping is possible here: user-context */

        for(i = 0; i < Q_FUN_MAX; i++)
        {
                kfree(context[i]);
        }

        pfq_free_sk_filter(filter);

        that->vlan_filt = false;

        pr_devel("[PFQ] group id:%d destroyed.\n", gid);
}
Esempio n. 29
0
static unsigned long single_gpci_request(u32 req, u32 starting_index,
		u16 secondary_index, u8 version_in, u32 offset, u8 length,
		u64 *value)
{
	unsigned long ret;
	size_t i;
	u64 count;

	struct {
		struct hv_get_perf_counter_info_params params;
		uint8_t bytes[GPCI_MAX_DATA_BYTES];
	} __packed __aligned(sizeof(uint64_t)) arg = {
		.params = {
			.counter_request = cpu_to_be32(req),
			.starting_index = cpu_to_be32(starting_index),
			.secondary_index = cpu_to_be16(secondary_index),
			.counter_info_version_in = version_in,
		}
	};

	ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO,
			virt_to_phys(&arg), sizeof(arg));
	if (ret) {
		pr_devel("hcall failed: 0x%lx\n", ret);
		return ret;
	}

	/*
	 * we verify offset and length are within the zeroed buffer at event
	 * init.
	 */
	count = 0;
	for (i = offset; i < offset + length; i++)
		count |= arg.bytes[i] << (i - offset);

	*value = count;
	return ret;
}
Esempio n. 30
0
/*
 * Verify the signature on a module.
 */
int mod_verify_sig(const void *mod, unsigned long *_modlen)
{
	struct module_signature ms;
	size_t modlen = *_modlen, sig_len;

	pr_devel("==>%s(,%zu)\n", __func__, modlen);

	if (modlen <= sizeof(ms))
		return -EBADMSG;

	memcpy(&ms, mod + (modlen - sizeof(ms)), sizeof(ms));
	modlen -= sizeof(ms);

	sig_len = be32_to_cpu(ms.sig_len);
	if (sig_len >= modlen)
		return -EBADMSG;
	modlen -= sig_len;
	*_modlen = modlen;

	if (ms.id_type != PKEY_ID_PKCS7) {
		pr_err("Module is not signed with expected PKCS#7 message\n");
		return -ENOPKG;
	}

	if (ms.algo != 0 ||
	    ms.hash != 0 ||
	    ms.signer_len != 0 ||
	    ms.key_id_len != 0 ||
	    ms.__pad[0] != 0 ||
	    ms.__pad[1] != 0 ||
	    ms.__pad[2] != 0) {
		pr_err("PKCS#7 signature info has unexpected non-zero params\n");
		return -EBADMSG;
	}

	return system_verify_data(mod, modlen, mod + modlen, sig_len,
				  VERIFYING_MODULE_SIGNATURE);
}