Example #1
0
void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
{
	connected_init();

	if (mutex_lock_interruptible(&g_connected_mutex) != 0)
		return;

	if (g_connected)
		/* We're already connected. Call the callback immediately. */

		callback();
	else {
		if (g_num_deferred_callbacks >= MAX_CALLBACKS)
			vchiq_log_error(vchiq_core_log_level,
				"There already %d callback registered - "
				"please increase MAX_CALLBACKS",
				g_num_deferred_callbacks);
		else {
			g_deferred_callback[g_num_deferred_callbacks] =
				callback;
			g_num_deferred_callbacks++;
		}
	}
	mutex_unlock(&g_connected_mutex);
}
Example #2
0
int __init
vchiq_platform_init(VCHIQ_STATE_T *state)
{
	VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
	int frag_mem_size;
	int err;
	int i;

	/* Allocate space for the channels in coherent memory */
	g_slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
	frag_mem_size = PAGE_ALIGN(sizeof(FRAGMENTS_T) * MAX_FRAGMENTS);

	err = bus_dma_tag_create(
	    NULL,
	    PAGE_SIZE, 0,	       /* alignment, boundary */
	    BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
	    BUS_SPACE_MAXADDR,	  /* highaddr */
	    NULL, NULL,		 /* filter, filterarg */
	    g_slot_mem_size + frag_mem_size, 1,		/* maxsize, nsegments */
	    g_slot_mem_size + frag_mem_size, 0,		/* maxsegsize, flags */
	    NULL, NULL,		 /* lockfunc, lockarg */
	    &dma_tag);

	err = bus_dmamem_alloc(dma_tag, (void **)&g_slot_mem,
	    BUS_DMA_COHERENT | BUS_DMA_WAITOK, &dma_map);
	if (err) {
		vchiq_log_error(vchiq_core_log_level, "Unable to allocate channel memory");
		err = -ENOMEM;
		goto failed_alloc;
	}

	err = bus_dmamap_load(dma_tag, dma_map, g_slot_mem,
	    g_slot_mem_size + frag_mem_size, vchiq_dmamap_cb,
	    &g_slot_phys, 0);

	if (err) {
		vchiq_log_error(vchiq_core_log_level, "cannot load DMA map");
		err = -ENOMEM;
		goto failed_load;
	}

	WARN_ON(((int)g_slot_mem & (PAGE_SIZE - 1)) != 0);

	vchiq_slot_zero = vchiq_init_slots(g_slot_mem, g_slot_mem_size);
	if (!vchiq_slot_zero) {
		err = -EINVAL;
		goto failed_init_slots;
	}

	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
		(int)g_slot_phys + g_slot_mem_size;
	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
		MAX_FRAGMENTS;

	g_fragments_base = (FRAGMENTS_T *)(g_slot_mem + g_slot_mem_size);
	g_slot_mem_size += frag_mem_size;

	g_free_fragments = g_fragments_base;
	for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
		*(FRAGMENTS_T **)&g_fragments_base[i] =
			&g_fragments_base[i + 1];
	}
	*(FRAGMENTS_T **)&g_fragments_base[i] = NULL;
	_sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);

	if (vchiq_init_state(state, vchiq_slot_zero, 0/*slave*/) !=
		VCHIQ_SUCCESS) {
		err = -EINVAL;
		goto failed_vchiq_init;
	}

	bcm_mbox_write(BCM2835_MBOX_CHAN_VCHIQ, (unsigned int)g_slot_phys);

	vchiq_log_info(vchiq_arm_log_level,
		"vchiq_init - done (slots %x, phys %x)",
		(unsigned int)vchiq_slot_zero, g_slot_phys);

   vchiq_call_connected_callbacks();

   return 0;

failed_vchiq_init:
failed_init_slots:
failed_load:
	bus_dmamap_unload(dma_tag, dma_map);
failed_alloc:
	bus_dmamap_destroy(dma_tag, dma_map);
	bus_dma_tag_destroy(dma_tag);

   return err;
}
int __init
vchiq_platform_init(VCHIQ_STATE_T *state)
{
	VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
	int frag_mem_size;
	int err;
	int i;

	/* Allocate space for the channels in coherent memory */
	g_slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
	frag_mem_size = PAGE_ALIGN(sizeof(FRAGMENTS_T) * MAX_FRAGMENTS);

	g_slot_mem = dma_alloc_coherent(NULL, g_slot_mem_size + frag_mem_size,
		&g_slot_phys, GFP_ATOMIC);

	if (!g_slot_mem) {
		vchiq_log_error(vchiq_arm_log_level,
			"Unable to allocate channel memory");
		err = -ENOMEM;
		goto failed_alloc;
	}

	WARN_ON(((int)g_slot_mem & (PAGE_SIZE - 1)) != 0);

	vchiq_slot_zero = vchiq_init_slots(g_slot_mem, g_slot_mem_size);
	if (!vchiq_slot_zero) {
		err = -EINVAL;
		goto failed_init_slots;
	}

	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
		(int)g_slot_phys + g_slot_mem_size;
	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
		MAX_FRAGMENTS;

	g_fragments_base = (FRAGMENTS_T *)(g_slot_mem + g_slot_mem_size);
	g_slot_mem_size += frag_mem_size;

	g_free_fragments = g_fragments_base;
	for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
		*(FRAGMENTS_T **)&g_fragments_base[i] =
			&g_fragments_base[i + 1];
	}
	*(FRAGMENTS_T **)&g_fragments_base[i] = NULL;
	sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);

	if (vchiq_init_state(state, vchiq_slot_zero, 0/*slave*/) !=
		VCHIQ_SUCCESS) {
		err = -EINVAL;
		goto failed_vchiq_init;
	}

	err = request_irq(VCHIQ_DOORBELL_IRQ, vchiq_doorbell_irq,
		IRQF_IRQPOLL, "VCHIQ doorbell",
		state);
	if (err < 0) {
		vchiq_log_error(vchiq_arm_log_level, "%s: failed to register "
			"irq=%d err=%d", __func__,
			VCHIQ_DOORBELL_IRQ, err);
		goto failed_request_irq;
	}

	/* Send the base address of the slots to VideoCore */

	dsb(); /* Ensure all writes have completed */

	bcm_mailbox_write(MBOX_CHAN_VCHIQ, (unsigned int)g_slot_phys);

	vchiq_log_info(vchiq_arm_log_level,
		"vchiq_init - done (slots %x, phys %x)",
		(unsigned int)vchiq_slot_zero, g_slot_phys);

   vchiq_call_connected_callbacks();

   return 0;

failed_request_irq:
failed_vchiq_init:
failed_init_slots:
   dma_free_coherent(NULL, g_slot_mem_size, g_slot_mem, g_slot_phys);

failed_alloc:
   return err;
}
Example #4
0
static int
create_pagelist(char __user *buf, size_t count, unsigned short type,
                struct proc *p, BULKINFO_T *bi)
{
    PAGELIST_T *pagelist;
    vm_page_t* pages;
    unsigned long *addrs;
    unsigned int num_pages, i;
    vm_offset_t offset;
    int pagelist_size;
    char *addr, *base_addr, *next_addr;
    int run, addridx, actual_pages;
    int err;
    vm_paddr_t pagelist_phys;

    offset = (vm_offset_t)buf & (PAGE_SIZE - 1);
    num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;

    bi->pagelist = NULL;
    bi->buf = buf;
    bi->size = count;

    /* Allocate enough storage to hold the page pointers and the page
    ** list
    */
    pagelist_size = sizeof(PAGELIST_T) +
                    (num_pages * sizeof(unsigned long)) +
                    (num_pages * sizeof(pages[0]));

    err = bus_dma_tag_create(
              NULL,
              PAGE_SIZE, 0,	       /* alignment, boundary */
              BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
              BUS_SPACE_MAXADDR,	  /* highaddr */
              NULL, NULL,		 /* filter, filterarg */
              pagelist_size, 1,		/* maxsize, nsegments */
              pagelist_size, 0,		/* maxsegsize, flags */
              NULL, NULL,		 /* lockfunc, lockarg */
              &bi->pagelist_dma_tag);



    err = bus_dmamem_alloc(bi->pagelist_dma_tag, (void **)&pagelist,
                           BUS_DMA_COHERENT | BUS_DMA_WAITOK, &bi->pagelist_dma_map);
    if (err) {
        vchiq_log_error(vchiq_core_log_level, "Unable to allocate pagelist memory");
        err = -ENOMEM;
        goto failed_alloc;
    }

    err = bus_dmamap_load(bi->pagelist_dma_tag, bi->pagelist_dma_map, pagelist,
                          pagelist_size, vchiq_dmamap_cb,
                          &pagelist_phys, 0);

    if (err) {
        vchiq_log_error(vchiq_core_log_level, "cannot load DMA map for pagelist memory");
        err = -ENOMEM;
        goto failed_load;
    }

    vchiq_log_trace(vchiq_arm_log_level,
                    "create_pagelist - %x", (unsigned int)pagelist);
    if (!pagelist)
        return -ENOMEM;

    addrs = pagelist->addrs;
    pages = (vm_page_t*)(addrs + num_pages);

    actual_pages = vm_fault_quick_hold_pages(&p->p_vmspace->vm_map,
                   (vm_offset_t)buf, count,
                   (type == PAGELIST_READ ? VM_PROT_WRITE : 0 ) | VM_PROT_READ, pages, num_pages);

    if (actual_pages != num_pages) {
        vm_page_unhold_pages(pages, actual_pages);
        free(pagelist, M_VCPAGELIST);
        return (-ENOMEM);
    }

    pagelist->length = count;
    pagelist->type = type;
    pagelist->offset = offset;

    /* Group the pages into runs of contiguous pages */

    base_addr = (void *)PHYS_TO_VCBUS(VM_PAGE_TO_PHYS(pages[0]));
    next_addr = base_addr + PAGE_SIZE;
    addridx = 0;
    run = 0;

    for (i = 1; i < num_pages; i++) {
        addr = (void *)PHYS_TO_VCBUS(VM_PAGE_TO_PHYS(pages[i]));
        if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) {
            next_addr += PAGE_SIZE;
            run++;
        } else {
            addrs[addridx] = (unsigned long)base_addr + run;
            addridx++;
            base_addr = addr;
            next_addr = addr + PAGE_SIZE;
            run = 0;
        }
    }

    addrs[addridx] = (unsigned long)base_addr + run;
    addridx++;

    /* Partial cache lines (fragments) require special measures */
    if ((type == PAGELIST_READ) &&
            ((pagelist->offset & (CACHE_LINE_SIZE - 1)) ||
             ((pagelist->offset + pagelist->length) &
              (CACHE_LINE_SIZE - 1)))) {
        FRAGMENTS_T *fragments;

        if (down_interruptible(&g_free_fragments_sema) != 0) {
            free(pagelist, M_VCPAGELIST);
            return -EINTR;
        }

        WARN_ON(g_free_fragments == NULL);

        down(&g_free_fragments_mutex);
        fragments = (FRAGMENTS_T *) g_free_fragments;
        WARN_ON(fragments == NULL);
        g_free_fragments = *(FRAGMENTS_T **) g_free_fragments;
        up(&g_free_fragments_mutex);
        pagelist->type =
            PAGELIST_READ_WITH_FRAGMENTS + (fragments -
                                            g_fragments_base);
    }

    /* XXX: optimize? INV operation for read WBINV for write? */
    cpu_dcache_wbinv_range((vm_offset_t)buf, count);

    bi->pagelist = pagelist;

    return 0;

failed_load:
    bus_dmamap_unload(bi->pagelist_dma_tag, bi->pagelist_dma_map);
failed_alloc:
    bus_dmamap_destroy(bi->pagelist_dma_tag, bi->pagelist_dma_map);
    bus_dma_tag_destroy(bi->pagelist_dma_tag);

    return err;
}