int __init
vchiq_platform_init(VCHIQ_STATE_T *state)
{
	VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
	int frag_mem_size;
	int err;
	int i;

	/* Allocate space for the channels in coherent memory */
	g_slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
	frag_mem_size = PAGE_ALIGN(sizeof(FRAGMENTS_T) * MAX_FRAGMENTS);

	g_slot_mem = dma_alloc_coherent(NULL, g_slot_mem_size + frag_mem_size,
		&g_slot_phys, GFP_ATOMIC);

	if (!g_slot_mem) {
		vchiq_log_error(vchiq_arm_log_level,
			"Unable to allocate channel memory");
		err = -ENOMEM;
		goto failed_alloc;
	}

	WARN_ON(((int)g_slot_mem & (PAGE_SIZE - 1)) != 0);

	vchiq_slot_zero = vchiq_init_slots(g_slot_mem, g_slot_mem_size);
	if (!vchiq_slot_zero) {
		err = -EINVAL;
		goto failed_init_slots;
	}

	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
		(int)g_slot_phys + g_slot_mem_size;
	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
		MAX_FRAGMENTS;

	g_fragments_base = (FRAGMENTS_T *)(g_slot_mem + g_slot_mem_size);
	g_slot_mem_size += frag_mem_size;

	g_free_fragments = g_fragments_base;
	for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
		*(FRAGMENTS_T **)&g_fragments_base[i] =
			&g_fragments_base[i + 1];
	}
	*(FRAGMENTS_T **)&g_fragments_base[i] = NULL;
	sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);

	if (vchiq_init_state(state, vchiq_slot_zero, 0/*slave*/) !=
		VCHIQ_SUCCESS) {
		err = -EINVAL;
		goto failed_vchiq_init;
	}

	err = request_irq(VCHIQ_DOORBELL_IRQ, vchiq_doorbell_irq,
		IRQF_IRQPOLL, "VCHIQ doorbell",
		state);
	if (err < 0) {
		vchiq_log_error(vchiq_arm_log_level, "%s: failed to register "
			"irq=%d err=%d", __func__,
			VCHIQ_DOORBELL_IRQ, err);
		goto failed_request_irq;
	}

	/* Send the base address of the slots to VideoCore */

	dsb(); /* Ensure all writes have completed */

	bcm_mailbox_write(MBOX_CHAN_VCHIQ, (unsigned int)g_slot_phys);

	vchiq_log_info(vchiq_arm_log_level,
		"vchiq_init - done (slots %x, phys %x)",
		(unsigned int)vchiq_slot_zero, g_slot_phys);

   vchiq_call_connected_callbacks();

   return 0;

failed_request_irq:
failed_vchiq_init:
failed_init_slots:
   dma_free_coherent(NULL, g_slot_mem_size, g_slot_mem, g_slot_phys);

failed_alloc:
   return err;
}
int __init
vchiq_platform_init(VCHIQ_STATE_T *state)
{
	VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
	int frag_mem_size;
	int err;
	int i;

	/* Allocate space for the channels in coherent memory */
	g_slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
	frag_mem_size = PAGE_ALIGN(sizeof(FRAGMENTS_T) * MAX_FRAGMENTS);

	err = bus_dma_tag_create(
	    NULL,
	    PAGE_SIZE, 0,	       /* alignment, boundary */
	    BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
	    BUS_SPACE_MAXADDR,	  /* highaddr */
	    NULL, NULL,		 /* filter, filterarg */
	    g_slot_mem_size + frag_mem_size, 1,		/* maxsize, nsegments */
	    g_slot_mem_size + frag_mem_size, 0,		/* maxsegsize, flags */
	    NULL, NULL,		 /* lockfunc, lockarg */
	    &dma_tag);

	err = bus_dmamem_alloc(dma_tag, (void **)&g_slot_mem,
	    BUS_DMA_COHERENT | BUS_DMA_WAITOK, &dma_map);
	if (err) {
		vchiq_log_error(vchiq_core_log_level, "Unable to allocate channel memory");
		err = -ENOMEM;
		goto failed_alloc;
	}

	err = bus_dmamap_load(dma_tag, dma_map, g_slot_mem,
	    g_slot_mem_size + frag_mem_size, vchiq_dmamap_cb,
	    &g_slot_phys, 0);

	if (err) {
		vchiq_log_error(vchiq_core_log_level, "cannot load DMA map");
		err = -ENOMEM;
		goto failed_load;
	}

	WARN_ON(((int)g_slot_mem & (PAGE_SIZE - 1)) != 0);

	vchiq_slot_zero = vchiq_init_slots(g_slot_mem, g_slot_mem_size);
	if (!vchiq_slot_zero) {
		err = -EINVAL;
		goto failed_init_slots;
	}

	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
		(int)g_slot_phys + g_slot_mem_size;
	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
		MAX_FRAGMENTS;

	g_fragments_base = (FRAGMENTS_T *)(g_slot_mem + g_slot_mem_size);
	g_slot_mem_size += frag_mem_size;

	g_free_fragments = g_fragments_base;
	for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
		*(FRAGMENTS_T **)&g_fragments_base[i] =
			&g_fragments_base[i + 1];
	}
	*(FRAGMENTS_T **)&g_fragments_base[i] = NULL;
	_sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);

	if (vchiq_init_state(state, vchiq_slot_zero, 0/*slave*/) !=
		VCHIQ_SUCCESS) {
		err = -EINVAL;
		goto failed_vchiq_init;
	}

	bcm_mbox_write(BCM2835_MBOX_CHAN_VCHIQ, (unsigned int)g_slot_phys);

	vchiq_log_info(vchiq_arm_log_level,
		"vchiq_init - done (slots %x, phys %x)",
		(unsigned int)vchiq_slot_zero, g_slot_phys);

   vchiq_call_connected_callbacks();

   return 0;

failed_vchiq_init:
failed_init_slots:
failed_load:
	bus_dmamap_unload(dma_tag, dma_map);
failed_alloc:
	bus_dmamap_destroy(dma_tag, dma_map);
	bus_dma_tag_destroy(dma_tag);

   return err;
}
int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
{
	struct device *dev = &pdev->dev;
	VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
	struct resource *res;
	void *slot_mem;
	dma_addr_t slot_phys;
	int slot_mem_size, frag_mem_size;
	int err, irq, i;

	g_virt_to_bus_offset = virt_to_dma(dev, (void *)0);

	/* Allocate space for the channels in coherent memory */
	slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
	frag_mem_size = PAGE_ALIGN(sizeof(FRAGMENTS_T) * MAX_FRAGMENTS);

	slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
				       &slot_phys, GFP_KERNEL);
	if (!slot_mem) {
		dev_err(dev, "could not allocate DMA memory\n");
		return -ENOMEM;
	}

	WARN_ON(((int)slot_mem & (PAGE_SIZE - 1)) != 0);

	vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
	if (!vchiq_slot_zero)
		return -EINVAL;

	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
		(int)slot_phys + slot_mem_size;
	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
		MAX_FRAGMENTS;

	g_fragments_base = (FRAGMENTS_T *)(slot_mem + slot_mem_size);
	slot_mem_size += frag_mem_size;

	g_free_fragments = g_fragments_base;
	for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
		*(FRAGMENTS_T **)&g_fragments_base[i] =
			&g_fragments_base[i + 1];
	}
	*(FRAGMENTS_T **)&g_fragments_base[i] = NULL;
	sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);

	if (vchiq_init_state(state, vchiq_slot_zero, 0) != VCHIQ_SUCCESS)
		return -EINVAL;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	g_regs = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(g_regs))
		return PTR_ERR(g_regs);

	irq = platform_get_irq(pdev, 0);
	if (irq <= 0) {
		dev_err(dev, "failed to get IRQ\n");
		return irq;
	}

	err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
			       "VCHIQ doorbell", state);
	if (err) {
		dev_err(dev, "failed to register irq=%d\n", irq);
		return err;
	}

	/* Send the base address of the slots to VideoCore */

	dsb(); /* Ensure all writes have completed */

	err = bcm_mailbox_write(MBOX_CHAN_VCHIQ, (unsigned int)slot_phys);
	if (err) {
		dev_err(dev, "mailbox write failed\n");
		return err;
	}

	vchiq_log_info(vchiq_arm_log_level,
		"vchiq_init - done (slots %x, phys %pad)",
		(unsigned int)vchiq_slot_zero, &slot_phys);

	vchiq_call_connected_callbacks();

   return 0;
}