示例#1
0
static ssize_t
client_enable_store(struct device *dev, struct device_attribute *attr,
                    const char *buf, size_t size)
{
    struct l4fdx_client *client = to_client(dev);
    void *objmem;

    if (client->enabled)
        return -EINVAL;

    objmem = kmalloc(l4x_fdx_srv_objsize(), GFP_KERNEL);
    if (!objmem)
        return -ENOMEM;

    client->enabled = 1;

    INIT_WORK(&client->create_work, create_server);

    client->srv_obj = L4XV_FN(l4fdx_srv_obj,
                              l4x_fdx_srv_create_name(l4x_cpu_thread_get_cap(0),
                                      client->capname,
                                      &b_ops, client,
                                      objmem));

    if (IS_ERR(client->srv_obj)) {
        kfree(objmem);
        return PTR_ERR(client->srv_obj);
    }

    queue_work(khelper_wq, &client->create_work);

    return size;
}
示例#2
0
static void l4x_flush_page(struct mm_struct *mm,
                           unsigned long address,
                           unsigned long vaddr,
                           int size,
                           unsigned long flush_rights, unsigned long caller)
{
	l4_msgtag_t tag;

	if (IS_ENABLED(CONFIG_ARM))
		return;

	if (mm && mm->context.l4x_unmap_mode == L4X_UNMAP_MODE_SKIP)
		return;

	if ((address & PAGE_MASK) == 0)
		address = PAGE0_PAGE_ADDRESS;

	if (likely(mm)) {
		unmap_log_add(mm, vaddr, size, flush_rights, caller);
		return;
	}

	/* do the real flush */
	if (mm && !l4_is_invalid_cap(mm->context.task)) {
		/* Direct flush in the child, use virtual address in the
		 * child address space */
		tag = L4XV_FN(l4_msgtag_t,
		              l4_task_unmap(mm->context.task,
		                           l4_fpage(vaddr & PAGE_MASK, size,
		                                    flush_rights),
		                           L4_FP_ALL_SPACES));
	} else {
		/* Flush all pages in all childs using the 'physical'
		 * address known in the Linux server */
		tag = L4XV_FN(l4_msgtag_t,
		              l4_task_unmap(L4RE_THIS_TASK_CAP,
			                    l4_fpage(address & PAGE_MASK, size,
		                                     flush_rights),
			                    L4_FP_OTHER_SPACES));
	}

	if (l4_error(tag))
		l4x_printf("l4_task_unmap error %ld\n", l4_error(tag));
}
示例#3
0
static inline void
l4x_unmap_self(unsigned long a)
{
	l4_msgtag_t t;

	if (0)
		printk("dma-self-unmap: %08lx\n", a);

	a &= PAGE_MASK;

	t = L4XV_FN(l4_msgtag_t,
	            l4_task_unmap(L4_BASE_TASK_CAP,
	                          l4_fpage(a, PAGE_SHIFT, L4_FPAGE_RWX),
	                          L4_FP_ALL_SPACES));

	if (l4_error(t))
		printk("dma-remap: internal unmapping of %08lx failed\n", a);
}
示例#4
0
static inline void
l4x_map_self(unsigned long src, unsigned long dst, unsigned mapflags)
{
	l4_msgtag_t t;

	if (0)
		printk("dma-self-map: %08lx -> %08lx [%x]\n",
		       src, dst, mapflags);

	src &= PAGE_MASK;
	dst &= PAGE_MASK;

	t = L4XV_FN(l4_msgtag_t,
	            l4_task_map(L4_BASE_TASK_CAP, L4_BASE_TASK_CAP,
	                        l4_fpage(src, PAGE_SHIFT, L4_FPAGE_RWX),
	                        dst | L4_MAP_ITEM_MAP | mapflags));

	if (l4_error(t))
		printk("dma-remap: internal mapping failed: %08lx -> %08lx\n",
		       src, dst);
}
示例#5
0
void l4x_unmap_log_flush(void)
{
	unsigned i;
	struct unmap_log_t *log;
	unsigned long flags;

	local_irq_save(flags);

	log = this_cpu_ptr(&unmap_log);

	for (i = 0; i < log->cnt; ++i) {
		l4_msgtag_t tag;
		struct mm_struct *mm = log->log[i].mm;

		if (unlikely(l4_is_invalid_cap(mm->context.task)))
			continue;

		tag = L4XV_FN(l4_msgtag_t,
		              l4_task_unmap(mm->context.task,
		                            l4_fpage(log->log[i].addr,
		                                     log->log[i].size,
		                                     log->log[i].rights),
		                            L4_FP_ALL_SPACES));
		if (unlikely(l4_error(tag))) {
			l4x_printf("l4_task_unmap error %ld: t=%lx\n",
			           l4_error(tag), mm->context.task);
			WARN_ON(1);
		} else if (0)
			l4x_printf("flushing(%d) %lx:%08lx[%d,%x]\n",
			           i, mm->context.task,
			           log->log[i].addr, log->log[i].size,
			           log->log[i].rights);
	}

	log->cnt = 0;
	local_irq_restore(flags);
}
示例#6
0
L4_CV static
int l4xfdx_factory_create(struct l4x_fdx_srv_factory_create_data *data,
                          l4_cap_idx_t *client_cap)
{
    struct l4fdx_client *client;
    void *objmem;
    int ret;

    BUG_ON(!irqs_disabled()); /* utcb-data still hot */

    client = kzalloc(sizeof(*client), GFP_ATOMIC);
    if (!client)
        return -ENOMEM;

    ret = -ENOMEM;
    objmem = kmalloc(l4x_fdx_srv_objsize(), GFP_ATOMIC);
    if (!objmem)
        goto out_free_client;

    client->enabled = 1;
    if (data->opt_flags & L4X_FDX_SRV_FACTORY_HAS_UID)
        client->uid = data->uid;
    else
        client->uid = DEFAULT_UID;

    if (data->opt_flags & L4X_FDX_SRV_FACTORY_HAS_GID)
        client->gid = data->gid;
    else
        client->gid = DEFAULT_GID;

    if (data->opt_flags & L4X_FDX_SRV_FACTORY_HAS_OPENFLAGS_MASK)
        client->openflags_mask = data->openflags_mask;
    else
        client->openflags_mask = DEFAULT_OPEN_FLAGS_MASK;

    if (data->opt_flags & L4X_FDX_SRV_FACTORY_HAS_BASEPATH) {
        client->basepath_len = data->basepath_len;
        if (!data->basepath[client->basepath_len - 1])
            --client->basepath_len;
        client->basepath = kstrndup(data->basepath,
                                    client->basepath_len, GFP_ATOMIC);
        if (!client->basepath)
            goto out_free_objmem_and_client_strings;
    }

    if (data->opt_flags & L4X_FDX_SRV_FACTORY_HAS_FILTERPATH) {
        client->filterpath_len = data->filterpath_len;
        if (!data->filterpath[client->filterpath_len - 1])
            --client->filterpath_len;
        client->filterpath = kstrndup(data->filterpath,
                                      client->filterpath_len, GFP_ATOMIC);
        if (!client->filterpath)
            goto out_free_objmem_and_client_strings;
    }

    if (data->opt_flags & L4X_FDX_SRV_FACTORY_HAS_CLIENTNAME) {
        client->capname = kstrndup(data->clientname,
                                   data->clientname_len + 1, GFP_ATOMIC);
        if (!client->capname)
            goto out_free_objmem_and_client_strings;

        client->capname[data->clientname_len] = 0;
    }

    if (data->opt_flags & L4X_FDX_SRV_FACTORY_HAS_FLAG_NOGROW)
        client->flag_nogrow = 1;

    spin_lock_init(&client->req_list_lock);
    INIT_LIST_HEAD(&client->req_list);
    init_waitqueue_head(&client->event);

    INIT_WORK(&client->create_work, create_server);

    client->srv_obj = L4XV_FN(l4fdx_srv_obj,
                              l4x_fdx_srv_create(l4x_cpu_thread_get_cap(0),
                                      &b_ops, client, objmem,
                                      client_cap));

    queue_work(khelper_wq, &client->create_work);

    client->cap = *client_cap;
    INIT_WORK(&client->add_device_work, add_device);
    queue_work(khelper_wq, &client->add_device_work);

    return 0;

out_free_objmem_and_client_strings:
    kfree(objmem);
    kfree(client->basepath);
    kfree(client->filterpath);
    kfree(client->capname);

out_free_client:
    kfree(client);

    return ret;
}