void asihpi_adapter_remove(struct pci_dev *pci_dev)
{
	int idx;
	struct hpi_message hm;
	struct hpi_response hr;
	struct hpi_adapter *pa;
	struct hpi_pci pci;

	pa = pci_get_drvdata(pci_dev);
	pci = pa->adapter->pci;

	hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
		HPI_ADAPTER_DELETE);
	hm.adapter_index = pa->adapter->index;
	hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);

	/* unmap PCI memory space, mapped during device init. */
	for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) {
		if (pci.ap_mem_base[idx])
			iounmap(pci.ap_mem_base[idx]);
	}

	if (pa->p_buffer)
		vfree(pa->p_buffer);

	pci_set_drvdata(pci_dev, NULL);
	if (1)
		dev_info(&pci_dev->dev,
			 "remove %04x:%04x,%04x:%04x,%04x, HPI index %d\n",
			 pci_dev->vendor, pci_dev->device,
			 pci_dev->subsystem_vendor, pci_dev->subsystem_device,
			 pci_dev->devfn, pa->adapter->index);

	memset(pa, 0, sizeof(*pa));
}
Beispiel #2
0
void asihpi_exit(void)
{
	struct hpi_message hm;
	struct hpi_response hr;

	hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
		HPI_SUBSYS_DRIVER_UNLOAD);
	hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
}
/* Wrapper function to HPI_Message to enable dumping of the
   message and response types.
*/
static void hpi_send_recv_f(struct hpi_message *phm, struct hpi_response *phr,
	struct file *file)
{
	if ((phm->adapter_index >= HPI_MAX_ADAPTERS)
		&& (phm->object != HPI_OBJ_SUBSYSTEM))
		phr->error = HPI_ERROR_INVALID_OBJ_INDEX;
	else
		hpi_send_recv_ex(phm, phr, file);
}
Beispiel #4
0
void asihpi_adapter_remove(struct pci_dev *pci_dev)
{
    int idx;
    struct hpi_message hm;
    struct hpi_response hr;
    struct hpi_adapter *pa;
    struct hpi_pci pci;

    pa = pci_get_drvdata(pci_dev);
    pci = pa->adapter->pci;

    /* Disable IRQ generation on DSP side */
    hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
                              HPI_ADAPTER_SET_PROPERTY);
    hm.adapter_index = pa->adapter->index;
    hm.u.ax.property_set.property = HPI_ADAPTER_PROPERTY_IRQ_RATE;
    hm.u.ax.property_set.parameter1 = 0;
    hm.u.ax.property_set.parameter2 = 0;
    hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);

    hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
                              HPI_ADAPTER_DELETE);
    hm.adapter_index = pa->adapter->index;
    hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);

    /* unmap PCI memory space, mapped during device init. */
    for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; ++idx)
        iounmap(pci.ap_mem_base[idx]);

    if (pa->irq)
        free_irq(pa->irq, pa);

    vfree(pa->p_buffer);

    if (1)
        dev_info(&pci_dev->dev,
                 "remove %04x:%04x,%04x:%04x,%04x, HPI index %d\n",
                 pci_dev->vendor, pci_dev->device,
                 pci_dev->subsystem_vendor, pci_dev->subsystem_device,
                 pci_dev->devfn, pa->adapter->index);

    memset(pa, 0, sizeof(*pa));
}
Beispiel #5
0
int asihpi_hpi_release(struct file *file)
{
	struct hpi_message hm;
	struct hpi_response hr;

/* HPI_DEBUG_LOG(INFO,"hpi_release file %p, pid %d\n", file, current->pid); */
	/* close the subsystem just in case the application forgot to. */
	hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
		HPI_SUBSYS_CLOSE);
	hpi_send_recv_ex(&hm, &hr, file);
	return 0;
}
void __init asihpi_init(void)
{
	struct hpi_message hm;
	struct hpi_response hr;

	memset(adapters, 0, sizeof(adapters));

	printk(KERN_INFO "ASIHPI driver " HPI_VER_STRING "\n");

	hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
		HPI_SUBSYS_DRIVER_LOAD);
	hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
}
Beispiel #7
0
void __init asihpi_init(void)
{
	struct hpi_message hm;
	struct hpi_response hr;

	memset(adapters, 0, sizeof(adapters));

	printk(KERN_INFO "ASIHPI driver %d.%02d.%02d\n",
		HPI_VER_MAJOR(HPI_VER), HPI_VER_MINOR(HPI_VER),
		HPI_VER_RELEASE(HPI_VER));

	hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
		HPI_SUBSYS_DRIVER_LOAD);
	hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
}
Beispiel #8
0
void __devexit asihpi_adapter_remove(struct pci_dev *pci_dev)
{
	int idx;
	struct hpi_message hm;
	struct hpi_response hr;
	struct hpi_adapter *pa;
	pa = (struct hpi_adapter *)pci_get_drvdata(pci_dev);

	hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
		HPI_SUBSYS_DELETE_ADAPTER);
	hm.adapter_index = pa->index;
	hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);

	/* unmap PCI memory space, mapped during device init. */
	for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) {
		if (pa->ap_remapped_mem_base[idx]) {
			iounmap(pa->ap_remapped_mem_base[idx]);
			pa->ap_remapped_mem_base[idx] = NULL;
		}
	}

	if (pa->p_buffer) {
		pa->buffer_size = 0;
		vfree(pa->p_buffer);
	}

	pci_set_drvdata(pci_dev, NULL);
	/*
	   printk(KERN_INFO "PCI device (%04x:%04x,%04x:%04x,%04x),"
	   " HPI index # %d, removed.\n",
	   pci_dev->vendor, pci_dev->device,
	   pci_dev->subsystem_vendor,
	   pci_dev->subsystem_device, pci_dev->devfn,
	   pa->index);
	 */
}
Beispiel #9
0
int __devinit asihpi_adapter_probe(struct pci_dev *pci_dev,
	const struct pci_device_id *pci_id)
{
	int err, idx, nm;
	unsigned int memlen;
	struct hpi_message hm;
	struct hpi_response hr;
	struct hpi_adapter adapter;
	struct hpi_pci pci;

	memset(&adapter, 0, sizeof(adapter));

	printk(KERN_DEBUG "probe PCI device (%04x:%04x,%04x:%04x,%04x)\n",
		pci_dev->vendor, pci_dev->device, pci_dev->subsystem_vendor,
		pci_dev->subsystem_device, pci_dev->devfn);

	hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
		HPI_SUBSYS_CREATE_ADAPTER);
	hpi_init_response(&hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CREATE_ADAPTER,
		HPI_ERROR_PROCESSING_MESSAGE);

	hm.adapter_index = -1;	/* an invalid index */

	/* fill in HPI_PCI information from kernel provided information */
	adapter.pci = pci_dev;

	nm = HPI_MAX_ADAPTER_MEM_SPACES;

	for (idx = 0; idx < nm; idx++) {
		HPI_DEBUG_LOG(INFO, "resource %d %s %08llx-%08llx %04llx\n",
			idx, pci_dev->resource[idx].name,
			(unsigned long long)pci_resource_start(pci_dev, idx),
			(unsigned long long)pci_resource_end(pci_dev, idx),
			(unsigned long long)pci_resource_flags(pci_dev, idx));

		if (pci_resource_flags(pci_dev, idx) & IORESOURCE_MEM) {
			memlen = pci_resource_len(pci_dev, idx);
			adapter.ap_remapped_mem_base[idx] =
				ioremap(pci_resource_start(pci_dev, idx),
				memlen);
			if (!adapter.ap_remapped_mem_base[idx]) {
				HPI_DEBUG_LOG(ERROR,
					"ioremap failed, aborting\n");
				/* unmap previously mapped pci mem space */
				goto err;
			}
		}

		pci.ap_mem_base[idx] = adapter.ap_remapped_mem_base[idx];
	}

	/* could replace Pci with direct pointer to pci_dev for linux
	   Instead wrap accessor functions for IDs etc.
	   Would it work for windows?
	 */
	pci.bus_number = pci_dev->bus->number;
	pci.vendor_id = (u16)pci_dev->vendor;
	pci.device_id = (u16)pci_dev->device;
	pci.subsys_vendor_id = (u16)(pci_dev->subsystem_vendor & 0xffff);
	pci.subsys_device_id = (u16)(pci_dev->subsystem_device & 0xffff);
	pci.device_number = pci_dev->devfn;
	pci.interrupt = pci_dev->irq;
	pci.p_os_data = pci_dev;

	hm.u.s.resource.bus_type = HPI_BUS_PCI;
	hm.u.s.resource.r.pci = &pci;

	/* call CreateAdapterObject on the relevant hpi module */
	hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
	if (hr.error)
		goto err;

	if (prealloc_stream_buf) {
		adapter.p_buffer = vmalloc(prealloc_stream_buf);
		if (!adapter.p_buffer) {
			HPI_DEBUG_LOG(ERROR,
				"HPI could not allocate "
				"kernel buffer size %d\n",
				prealloc_stream_buf);
			goto err;
		}
	}

	adapter.index = hr.u.s.adapter_index;
	adapter.type = hr.u.s.aw_adapter_list[adapter.index];
	hm.adapter_index = adapter.index;

	err = hpi_adapter_open(NULL, adapter.index);
	if (err)
		goto err;

	adapter.snd_card_asihpi = NULL;
	/* WARNING can't init mutex in 'adapter'
	 * and then copy it to adapters[] ?!?!
	 */
	adapters[hr.u.s.adapter_index] = adapter;
	mutex_init(&adapters[adapter.index].mutex);
	pci_set_drvdata(pci_dev, &adapters[adapter.index]);

	printk(KERN_INFO "probe found adapter ASI%04X HPI index #%d.\n",
		adapter.type, adapter.index);

	return 0;

err:
	for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) {
		if (adapter.ap_remapped_mem_base[idx]) {
			iounmap(adapter.ap_remapped_mem_base[idx]);
			adapter.ap_remapped_mem_base[idx] = NULL;
		}
	}

	if (adapter.p_buffer) {
		adapter.buffer_size = 0;
		vfree(adapter.p_buffer);
	}

	HPI_DEBUG_LOG(ERROR, "adapter_probe failed\n");
	return -ENODEV;
}
int asihpi_adapter_probe(struct pci_dev *pci_dev,
			 const struct pci_device_id *pci_id)
{
	int idx, nm;
	int adapter_index;
	unsigned int memlen;
	struct hpi_message hm;
	struct hpi_response hr;
	struct hpi_adapter adapter;
	struct hpi_pci pci;

	memset(&adapter, 0, sizeof(adapter));

	dev_printk(KERN_DEBUG, &pci_dev->dev,
		"probe %04x:%04x,%04x:%04x,%04x\n", pci_dev->vendor,
		pci_dev->device, pci_dev->subsystem_vendor,
		pci_dev->subsystem_device, pci_dev->devfn);

	if (pci_enable_device(pci_dev) < 0) {
		dev_err(&pci_dev->dev,
			"pci_enable_device failed, disabling device\n");
		return -EIO;
	}

	pci_set_master(pci_dev);	/* also sets latency timer if < 16 */

	hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
		HPI_SUBSYS_CREATE_ADAPTER);
	hpi_init_response(&hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CREATE_ADAPTER,
		HPI_ERROR_PROCESSING_MESSAGE);

	hm.adapter_index = HPI_ADAPTER_INDEX_INVALID;

	nm = HPI_MAX_ADAPTER_MEM_SPACES;

	for (idx = 0; idx < nm; idx++) {
		HPI_DEBUG_LOG(INFO, "resource %d %pR\n", idx,
			&pci_dev->resource[idx]);

		if (pci_resource_flags(pci_dev, idx) & IORESOURCE_MEM) {
			memlen = pci_resource_len(pci_dev, idx);
			pci.ap_mem_base[idx] =
				ioremap(pci_resource_start(pci_dev, idx),
				memlen);
			if (!pci.ap_mem_base[idx]) {
				HPI_DEBUG_LOG(ERROR,
					"ioremap failed, aborting\n");
				/* unmap previously mapped pci mem space */
				goto err;
			}
		}
	}

	pci.pci_dev = pci_dev;
	hm.u.s.resource.bus_type = HPI_BUS_PCI;
	hm.u.s.resource.r.pci = &pci;

	/* call CreateAdapterObject on the relevant hpi module */
	hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
	if (hr.error)
		goto err;

	adapter_index = hr.u.s.adapter_index;
	adapter.adapter = hpi_find_adapter(adapter_index);

	if (prealloc_stream_buf) {
		adapter.p_buffer = vmalloc(prealloc_stream_buf);
		if (!adapter.p_buffer) {
			HPI_DEBUG_LOG(ERROR,
				"HPI could not allocate "
				"kernel buffer size %d\n",
				prealloc_stream_buf);
			goto err;
		}
	}

	hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
		HPI_ADAPTER_OPEN);
	hm.adapter_index = adapter.adapter->index;
	hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);

	if (hr.error)
		goto err;

	/* WARNING can't init mutex in 'adapter'
	 * and then copy it to adapters[] ?!?!
	 */
	adapters[adapter_index] = adapter;
	mutex_init(&adapters[adapter_index].mutex);
	pci_set_drvdata(pci_dev, &adapters[adapter_index]);

	dev_info(&pci_dev->dev, "probe succeeded for ASI%04X HPI index %d\n",
		 adapter.adapter->type, adapter_index);

	return 0;

err:
	for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) {
		if (pci.ap_mem_base[idx]) {
			iounmap(pci.ap_mem_base[idx]);
			pci.ap_mem_base[idx] = NULL;
		}
	}

	if (adapter.p_buffer) {
		adapter.buffer_size = 0;
		vfree(adapter.p_buffer);
	}

	HPI_DEBUG_LOG(ERROR, "adapter_probe failed\n");
	return -ENODEV;
}
Beispiel #11
0
int asihpi_adapter_probe(struct pci_dev *pci_dev,
                         const struct pci_device_id *pci_id)
{
    int idx, nm, low_latency_mode = 0, irq_supported = 0;
    int adapter_index;
    unsigned int memlen;
    struct hpi_message hm;
    struct hpi_response hr;
    struct hpi_adapter adapter;
    struct hpi_pci pci;

    memset(&adapter, 0, sizeof(adapter));

    dev_printk(KERN_DEBUG, &pci_dev->dev,
               "probe %04x:%04x,%04x:%04x,%04x\n", pci_dev->vendor,
               pci_dev->device, pci_dev->subsystem_vendor,
               pci_dev->subsystem_device, pci_dev->devfn);

    if (pci_enable_device(pci_dev) < 0) {
        dev_err(&pci_dev->dev,
                "pci_enable_device failed, disabling device\n");
        return -EIO;
    }

    pci_set_master(pci_dev);	/* also sets latency timer if < 16 */

    hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
                              HPI_SUBSYS_CREATE_ADAPTER);
    hpi_init_response(&hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CREATE_ADAPTER,
                      HPI_ERROR_PROCESSING_MESSAGE);

    hm.adapter_index = HPI_ADAPTER_INDEX_INVALID;

    nm = HPI_MAX_ADAPTER_MEM_SPACES;

    for (idx = 0; idx < nm; idx++) {
        HPI_DEBUG_LOG(INFO, "resource %d %pR\n", idx,
                      &pci_dev->resource[idx]);

        if (pci_resource_flags(pci_dev, idx) & IORESOURCE_MEM) {
            memlen = pci_resource_len(pci_dev, idx);
            pci.ap_mem_base[idx] =
                ioremap(pci_resource_start(pci_dev, idx),
                        memlen);
            if (!pci.ap_mem_base[idx]) {
                HPI_DEBUG_LOG(ERROR,
                              "ioremap failed, aborting\n");
                /* unmap previously mapped pci mem space */
                goto err;
            }
        }
    }

    pci.pci_dev = pci_dev;
    hm.u.s.resource.bus_type = HPI_BUS_PCI;
    hm.u.s.resource.r.pci = &pci;

    /* call CreateAdapterObject on the relevant hpi module */
    hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
    if (hr.error)
        goto err;

    adapter_index = hr.u.s.adapter_index;
    adapter.adapter = hpi_find_adapter(adapter_index);

    if (prealloc_stream_buf) {
        adapter.p_buffer = vmalloc(prealloc_stream_buf);
        if (!adapter.p_buffer) {
            HPI_DEBUG_LOG(ERROR,
                          "HPI could not allocate "
                          "kernel buffer size %d\n",
                          prealloc_stream_buf);
            goto err;
        }
    }

    hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
                              HPI_ADAPTER_OPEN);
    hm.adapter_index = adapter.adapter->index;
    hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);

    if (hr.error) {
        HPI_DEBUG_LOG(ERROR, "HPI_ADAPTER_OPEN failed, aborting\n");
        goto err;
    }

    /* Check if current mode == Low Latency mode */
    hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
                              HPI_ADAPTER_GET_MODE);
    hm.adapter_index = adapter.adapter->index;
    hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);

    if (!hr.error
            && hr.u.ax.mode.adapter_mode == HPI_ADAPTER_MODE_LOW_LATENCY)
        low_latency_mode = 1;
    else
        dev_info(&pci_dev->dev,
                 "Adapter at index %d is not in low latency mode\n",
                 adapter.adapter->index);

    /* Check if IRQs are supported */
    hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
                              HPI_ADAPTER_GET_PROPERTY);
    hm.adapter_index = adapter.adapter->index;
    hm.u.ax.property_set.property = HPI_ADAPTER_PROPERTY_SUPPORTS_IRQ;
    hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
    if (hr.error || !hr.u.ax.property_get.parameter1) {
        dev_info(&pci_dev->dev,
                 "IRQs not supported by adapter at index %d\n",
                 adapter.adapter->index);
    } else {
        irq_supported = 1;
    }

    /* WARNING can't init mutex in 'adapter'
     * and then copy it to adapters[] ?!?!
     */
    adapters[adapter_index] = adapter;
    mutex_init(&adapters[adapter_index].mutex);
    pci_set_drvdata(pci_dev, &adapters[adapter_index]);

    if (low_latency_mode && irq_supported) {
        if (!adapter.adapter->irq_query_and_clear) {
            dev_err(&pci_dev->dev,
                    "no IRQ handler for adapter %d, aborting\n",
                    adapter.adapter->index);
            goto err;
        }

        /* Disable IRQ generation on DSP side by setting the rate to 0 */
        hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
                                  HPI_ADAPTER_SET_PROPERTY);
        hm.adapter_index = adapter.adapter->index;
        hm.u.ax.property_set.property = HPI_ADAPTER_PROPERTY_IRQ_RATE;
        hm.u.ax.property_set.parameter1 = 0;
        hm.u.ax.property_set.parameter2 = 0;
        hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL);
        if (hr.error) {
            HPI_DEBUG_LOG(ERROR,
                          "HPI_ADAPTER_GET_MODE failed, aborting\n");
            goto err;
        }

        /* Note: request_irq calls asihpi_isr here */
        if (request_irq(pci_dev->irq, asihpi_isr, IRQF_SHARED,
                        "asihpi", &adapters[adapter_index])) {
            dev_err(&pci_dev->dev, "request_irq(%d) failed\n",
                    pci_dev->irq);
            goto err;
        }

        adapters[adapter_index].interrupt_mode = 1;

        dev_info(&pci_dev->dev, "using irq %d\n", pci_dev->irq);
        adapters[adapter_index].irq = pci_dev->irq;
    } else {
        dev_info(&pci_dev->dev, "using polled mode\n");
    }

    dev_info(&pci_dev->dev, "probe succeeded for ASI%04X HPI index %d\n",
             adapter.adapter->type, adapter_index);

    return 0;

err:
    for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) {
        if (pci.ap_mem_base[idx]) {
            iounmap(pci.ap_mem_base[idx]);
            pci.ap_mem_base[idx] = NULL;
        }
    }

    if (adapter.p_buffer) {
        adapter.buffer_size = 0;
        vfree(adapter.p_buffer);
    }

    HPI_DEBUG_LOG(ERROR, "adapter_probe failed\n");
    return -ENODEV;
}