static void subsys_message(struct hpi_message *phm, struct hpi_response *phr) { hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, phm->function, 0); switch (phm->function) { case HPI_SUBSYS_OPEN: case HPI_SUBSYS_CLOSE: case HPI_SUBSYS_DRIVER_UNLOAD: break; case HPI_SUBSYS_DRIVER_LOAD: wipe_adapter_list(); hpios_alistlock_init(&adapters); break; case HPI_SUBSYS_GET_ADAPTER: subsys_get_adapter(phm, phr); break; case HPI_SUBSYS_GET_NUM_ADAPTERS: phr->u.s.num_adapters = adapters.gw_num_adapters; break; case HPI_SUBSYS_CREATE_ADAPTER: break; default: phr->error = HPI_ERROR_INVALID_FUNC; break; } }
void hpi_init_message_response(struct hpi_message *phm, struct hpi_response *phr, u16 object, u16 function) { hpi_init_message(phm, object, function); /* default error return if the response is not filled in by the callee */ hpi_init_response(phr, object, function, HPI_ERROR_PROCESSING_MESSAGE); }
static inline void hw_entry_point(struct hpi_message *phm, struct hpi_response *phr) { if ((phm->adapter_index < HPI_MAX_ADAPTERS) && hpi_entry_points[phm->adapter_index]) hpi_entry_points[phm->adapter_index] (phm, phr); else hpi_init_response(phr, phm->object, phm->function, HPI_ERROR_PROCESSING_MESSAGE); }
long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct hpi_ioctl_linux __user *phpi_ioctl_data; void __user *puhm; void __user *puhr; union hpi_message_buffer_v1 *hm; union hpi_response_buffer_v1 *hr; u16 res_max_size; u32 uncopied_bytes; struct hpi_adapter *pa = NULL; int err = 0; if (cmd != HPI_IOCTL_LINUX) return -EINVAL; hm = kmalloc(sizeof(*hm), GFP_KERNEL); hr = kmalloc(sizeof(*hr), GFP_KERNEL); if (!hm || !hr) { err = -ENOMEM; goto out; } phpi_ioctl_data = (struct hpi_ioctl_linux __user *)arg; /* Read the message and response pointers from user space. */ get_user(puhm, &phpi_ioctl_data->phm); get_user(puhr, &phpi_ioctl_data->phr); /* Now read the message size and data from user space. */ get_user(hm->h.size, (u16 __user *)puhm); if (hm->h.size > sizeof(*hm)) hm->h.size = sizeof(*hm); /*printk(KERN_INFO "message size %d\n", hm->h.wSize); */ uncopied_bytes = copy_from_user(hm, puhm, hm->h.size); if (uncopied_bytes) { HPI_DEBUG_LOG(ERROR, "uncopied bytes %d\n", uncopied_bytes); err = -EFAULT; goto out; } get_user(res_max_size, (u16 __user *)puhr); /* printk(KERN_INFO "user response size %d\n", res_max_size); */ if (res_max_size < sizeof(struct hpi_response_header)) { HPI_DEBUG_LOG(WARNING, "small res size %d\n", res_max_size); err = -EFAULT; goto out; } pa = &adapters[hm->h.adapter_index]; hr->h.size = 0; if (hm->h.object == HPI_OBJ_SUBSYSTEM) { switch (hm->h.function) { case HPI_SUBSYS_CREATE_ADAPTER: case HPI_SUBSYS_DELETE_ADAPTER: /* Application must not use these functions! */ hr->h.size = sizeof(hr->h); hr->h.error = HPI_ERROR_INVALID_OPERATION; hr->h.function = hm->h.function; uncopied_bytes = copy_to_user(puhr, hr, hr->h.size); if (uncopied_bytes) err = -EFAULT; else err = 0; goto out; default: hpi_send_recv_f(&hm->m0, &hr->r0, file); } } else { u16 __user *ptr = NULL; u32 size = 0; /* -1=no data 0=read from user mem, 1=write to user mem */ int wrflag = -1; u32 adapter = hm->h.adapter_index; if ((hm->h.adapter_index > HPI_MAX_ADAPTERS) || (!pa->type)) { hpi_init_response(&hr->r0, HPI_OBJ_ADAPTER, HPI_ADAPTER_OPEN, HPI_ERROR_BAD_ADAPTER_NUMBER); uncopied_bytes = copy_to_user(puhr, hr, sizeof(hr->h)); if (uncopied_bytes) err = -EFAULT; else err = 0; goto out; } if (mutex_lock_interruptible(&adapters[adapter].mutex)) { err = -EINTR; goto out; } /* Dig out any pointers embedded in the message. */ switch (hm->h.function) { case HPI_OSTREAM_WRITE: case HPI_ISTREAM_READ:{ /* Yes, sparse, this is correct. */ ptr = (u16 __user *)hm->m0.u.d.u.data.pb_data; size = hm->m0.u.d.u.data.data_size; /* Allocate buffer according to application request. ?Is it better to alloc/free for the duration of the transaction? */ if (pa->buffer_size < size) { HPI_DEBUG_LOG(DEBUG, "realloc adapter %d stream " "buffer from %zd to %d\n", hm->h.adapter_index, pa->buffer_size, size); if (pa->p_buffer) { pa->buffer_size = 0; vfree(pa->p_buffer); } pa->p_buffer = vmalloc(size); if (pa->p_buffer) pa->buffer_size = size; else { HPI_DEBUG_LOG(ERROR, "HPI could not allocate " "stream buffer size %d\n", size); mutex_unlock(&adapters [adapter].mutex); err = -EINVAL; goto out; } } hm->m0.u.d.u.data.pb_data = pa->p_buffer; if (hm->h.function == HPI_ISTREAM_READ) /* from card, WRITE to user mem */ wrflag = 1; else wrflag = 0; break; } default: size = 0; break; } if (size && (wrflag == 0)) { uncopied_bytes = copy_from_user(pa->p_buffer, ptr, size); if (uncopied_bytes) HPI_DEBUG_LOG(WARNING, "missed %d of %d " "bytes from user\n", uncopied_bytes, size); } hpi_send_recv_f(&hm->m0, &hr->r0, file); if (size && (wrflag == 1)) { uncopied_bytes = copy_to_user(ptr, pa->p_buffer, size); if (uncopied_bytes) HPI_DEBUG_LOG(WARNING, "missed %d of %d " "bytes to user\n", uncopied_bytes, size); } mutex_unlock(&adapters[adapter].mutex); } /* on return response size must be set */ /*printk(KERN_INFO "response size %d\n", hr->h.wSize); */ if (!hr->h.size) { HPI_DEBUG_LOG(ERROR, "response zero size\n"); err = -EFAULT; goto out; } if (hr->h.size > res_max_size) { HPI_DEBUG_LOG(ERROR, "response too big %d %d\n", hr->h.size, res_max_size); /*HPI_DEBUG_MESSAGE(ERROR, hm); */ err = -EFAULT; goto out; } uncopied_bytes = copy_to_user(puhr, hr, hr->h.size); if (uncopied_bytes) { HPI_DEBUG_LOG(ERROR, "uncopied bytes %d\n", uncopied_bytes); err = -EFAULT; goto out; } out: kfree(hm); kfree(hr); return err; }
int __devinit asihpi_adapter_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id) { int err, idx, nm; unsigned int memlen; struct hpi_message hm; struct hpi_response hr; struct hpi_adapter adapter; struct hpi_pci pci; memset(&adapter, 0, sizeof(adapter)); printk(KERN_DEBUG "probe PCI device (%04x:%04x,%04x:%04x,%04x)\n", pci_dev->vendor, pci_dev->device, pci_dev->subsystem_vendor, pci_dev->subsystem_device, pci_dev->devfn); hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CREATE_ADAPTER); hpi_init_response(&hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CREATE_ADAPTER, HPI_ERROR_PROCESSING_MESSAGE); hm.adapter_index = -1; /* an invalid index */ /* fill in HPI_PCI information from kernel provided information */ adapter.pci = pci_dev; nm = HPI_MAX_ADAPTER_MEM_SPACES; for (idx = 0; idx < nm; idx++) { HPI_DEBUG_LOG(INFO, "resource %d %s %08llx-%08llx %04llx\n", idx, pci_dev->resource[idx].name, (unsigned long long)pci_resource_start(pci_dev, idx), (unsigned long long)pci_resource_end(pci_dev, idx), (unsigned long long)pci_resource_flags(pci_dev, idx)); if (pci_resource_flags(pci_dev, idx) & IORESOURCE_MEM) { memlen = pci_resource_len(pci_dev, idx); adapter.ap_remapped_mem_base[idx] = ioremap(pci_resource_start(pci_dev, idx), memlen); if (!adapter.ap_remapped_mem_base[idx]) { HPI_DEBUG_LOG(ERROR, "ioremap failed, aborting\n"); /* unmap previously mapped pci mem space */ goto err; } } pci.ap_mem_base[idx] = adapter.ap_remapped_mem_base[idx]; } /* could replace Pci with direct pointer to pci_dev for linux Instead wrap accessor functions for IDs etc. Would it work for windows? */ pci.bus_number = pci_dev->bus->number; pci.vendor_id = (u16)pci_dev->vendor; pci.device_id = (u16)pci_dev->device; pci.subsys_vendor_id = (u16)(pci_dev->subsystem_vendor & 0xffff); pci.subsys_device_id = (u16)(pci_dev->subsystem_device & 0xffff); pci.device_number = pci_dev->devfn; pci.interrupt = pci_dev->irq; pci.p_os_data = pci_dev; hm.u.s.resource.bus_type = HPI_BUS_PCI; hm.u.s.resource.r.pci = &pci; /* call CreateAdapterObject on the relevant hpi module */ hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL); if (hr.error) goto err; if (prealloc_stream_buf) { adapter.p_buffer = vmalloc(prealloc_stream_buf); if (!adapter.p_buffer) { HPI_DEBUG_LOG(ERROR, "HPI could not allocate " "kernel buffer size %d\n", prealloc_stream_buf); goto err; } } adapter.index = hr.u.s.adapter_index; adapter.type = hr.u.s.aw_adapter_list[adapter.index]; hm.adapter_index = adapter.index; err = hpi_adapter_open(NULL, adapter.index); if (err) goto err; adapter.snd_card_asihpi = NULL; /* WARNING can't init mutex in 'adapter' * and then copy it to adapters[] ?!?! */ adapters[hr.u.s.adapter_index] = adapter; mutex_init(&adapters[adapter.index].mutex); pci_set_drvdata(pci_dev, &adapters[adapter.index]); printk(KERN_INFO "probe found adapter ASI%04X HPI index #%d.\n", adapter.type, adapter.index); return 0; err: for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) { if (adapter.ap_remapped_mem_base[idx]) { iounmap(adapter.ap_remapped_mem_base[idx]); adapter.ap_remapped_mem_base[idx] = NULL; } } if (adapter.p_buffer) { adapter.buffer_size = 0; vfree(adapter.p_buffer); } HPI_DEBUG_LOG(ERROR, "adapter_probe failed\n"); return -ENODEV; }
int asihpi_adapter_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id) { int idx, nm; int adapter_index; unsigned int memlen; struct hpi_message hm; struct hpi_response hr; struct hpi_adapter adapter; struct hpi_pci pci; memset(&adapter, 0, sizeof(adapter)); dev_printk(KERN_DEBUG, &pci_dev->dev, "probe %04x:%04x,%04x:%04x,%04x\n", pci_dev->vendor, pci_dev->device, pci_dev->subsystem_vendor, pci_dev->subsystem_device, pci_dev->devfn); if (pci_enable_device(pci_dev) < 0) { dev_err(&pci_dev->dev, "pci_enable_device failed, disabling device\n"); return -EIO; } pci_set_master(pci_dev); /* also sets latency timer if < 16 */ hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CREATE_ADAPTER); hpi_init_response(&hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CREATE_ADAPTER, HPI_ERROR_PROCESSING_MESSAGE); hm.adapter_index = HPI_ADAPTER_INDEX_INVALID; nm = HPI_MAX_ADAPTER_MEM_SPACES; for (idx = 0; idx < nm; idx++) { HPI_DEBUG_LOG(INFO, "resource %d %pR\n", idx, &pci_dev->resource[idx]); if (pci_resource_flags(pci_dev, idx) & IORESOURCE_MEM) { memlen = pci_resource_len(pci_dev, idx); pci.ap_mem_base[idx] = ioremap(pci_resource_start(pci_dev, idx), memlen); if (!pci.ap_mem_base[idx]) { HPI_DEBUG_LOG(ERROR, "ioremap failed, aborting\n"); /* unmap previously mapped pci mem space */ goto err; } } } pci.pci_dev = pci_dev; hm.u.s.resource.bus_type = HPI_BUS_PCI; hm.u.s.resource.r.pci = &pci; /* call CreateAdapterObject on the relevant hpi module */ hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL); if (hr.error) goto err; adapter_index = hr.u.s.adapter_index; adapter.adapter = hpi_find_adapter(adapter_index); if (prealloc_stream_buf) { adapter.p_buffer = vmalloc(prealloc_stream_buf); if (!adapter.p_buffer) { HPI_DEBUG_LOG(ERROR, "HPI could not allocate " "kernel buffer size %d\n", prealloc_stream_buf); goto err; } } hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER, HPI_ADAPTER_OPEN); hm.adapter_index = adapter.adapter->index; hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL); if (hr.error) goto err; /* WARNING can't init mutex in 'adapter' * and then copy it to adapters[] ?!?! */ adapters[adapter_index] = adapter; mutex_init(&adapters[adapter_index].mutex); pci_set_drvdata(pci_dev, &adapters[adapter_index]); dev_info(&pci_dev->dev, "probe succeeded for ASI%04X HPI index %d\n", adapter.adapter->type, adapter_index); return 0; err: for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) { if (pci.ap_mem_base[idx]) { iounmap(pci.ap_mem_base[idx]); pci.ap_mem_base[idx] = NULL; } } if (adapter.p_buffer) { adapter.buffer_size = 0; vfree(adapter.p_buffer); } HPI_DEBUG_LOG(ERROR, "adapter_probe failed\n"); return -ENODEV; }
/** HPI_6000() * Entry point from HPIMAN * All calls to the HPI start here */ void HPI_6000(struct hpi_message *phm, struct hpi_response *phr) { struct hpi_adapter_obj *pao = NULL; if (phm->object != HPI_OBJ_SUBSYSTEM) { pao = hpi_find_adapter(phm->adapter_index); if (!pao) { hpi_init_response(phr, phm->object, phm->function, HPI_ERROR_BAD_ADAPTER_NUMBER); HPI_DEBUG_LOG(DEBUG, "invalid adapter index: %d \n", phm->adapter_index); return; } /* Don't even try to communicate with crashed DSP */ if (pao->dsp_crashed >= 10) { hpi_init_response(phr, phm->object, phm->function, HPI_ERROR_DSP_HARDWARE); HPI_DEBUG_LOG(DEBUG, "adapter %d dsp crashed\n", phm->adapter_index); return; } } /* Init default response including the size field */ if (phm->function != HPI_SUBSYS_CREATE_ADAPTER) hpi_init_response(phr, phm->object, phm->function, HPI_ERROR_PROCESSING_MESSAGE); switch (phm->type) { case HPI_TYPE_REQUEST: switch (phm->object) { case HPI_OBJ_SUBSYSTEM: subsys_message(phm, phr); break; case HPI_OBJ_ADAPTER: phr->size = sizeof(struct hpi_response_header) + sizeof(struct hpi_adapter_res); adapter_message(pao, phm, phr); break; case HPI_OBJ_CONTROL: control_message(pao, phm, phr); break; case HPI_OBJ_OSTREAM: outstream_message(pao, phm, phr); break; case HPI_OBJ_ISTREAM: instream_message(pao, phm, phr); break; default: hw_message(pao, phm, phr); break; } break; default: phr->error = HPI_ERROR_INVALID_TYPE; break; } }
/** HPI_6000() * Entry point from HPIMAN * All calls to the HPI start here */ void HPI_6000(struct hpi_message *phm, struct hpi_response *phr) { struct hpi_adapter_obj *pao = NULL; /* subsytem messages get executed by every HPI. */ /* All other messages are ignored unless the adapter index matches */ /* an adapter in the HPI */ /*HPI_DEBUG_LOG(DEBUG, "O %d,F %x\n", phm->wObject, phm->wFunction); */ /* if Dsp has crashed then do not communicate with it any more */ if (phm->object != HPI_OBJ_SUBSYSTEM) { pao = hpi_find_adapter(phm->adapter_index); if (!pao) { HPI_DEBUG_LOG(DEBUG, " %d,%d refused, for another HPI?\n", phm->object, phm->function); return; } if (pao->dsp_crashed >= 10) { hpi_init_response(phr, phm->object, phm->function, HPI_ERROR_DSP_HARDWARE); HPI_DEBUG_LOG(DEBUG, " %d,%d dsp crashed.\n", phm->object, phm->function); return; } } /* Init default response including the size field */ if (phm->function != HPI_SUBSYS_CREATE_ADAPTER) hpi_init_response(phr, phm->object, phm->function, HPI_ERROR_PROCESSING_MESSAGE); switch (phm->type) { case HPI_TYPE_MESSAGE: switch (phm->object) { case HPI_OBJ_SUBSYSTEM: subsys_message(phm, phr); break; case HPI_OBJ_ADAPTER: phr->size = sizeof(struct hpi_response_header) + sizeof(struct hpi_adapter_res); adapter_message(pao, phm, phr); break; case HPI_OBJ_CONTROL: control_message(pao, phm, phr); break; case HPI_OBJ_OSTREAM: outstream_message(pao, phm, phr); break; case HPI_OBJ_ISTREAM: instream_message(pao, phm, phr); break; default: hw_message(pao, phm, phr); break; } break; default: phr->error = HPI_ERROR_INVALID_TYPE; break; } }
int asihpi_adapter_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id) { int idx, nm, low_latency_mode = 0, irq_supported = 0; int adapter_index; unsigned int memlen; struct hpi_message hm; struct hpi_response hr; struct hpi_adapter adapter; struct hpi_pci pci; memset(&adapter, 0, sizeof(adapter)); dev_printk(KERN_DEBUG, &pci_dev->dev, "probe %04x:%04x,%04x:%04x,%04x\n", pci_dev->vendor, pci_dev->device, pci_dev->subsystem_vendor, pci_dev->subsystem_device, pci_dev->devfn); if (pci_enable_device(pci_dev) < 0) { dev_err(&pci_dev->dev, "pci_enable_device failed, disabling device\n"); return -EIO; } pci_set_master(pci_dev); /* also sets latency timer if < 16 */ hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CREATE_ADAPTER); hpi_init_response(&hr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CREATE_ADAPTER, HPI_ERROR_PROCESSING_MESSAGE); hm.adapter_index = HPI_ADAPTER_INDEX_INVALID; nm = HPI_MAX_ADAPTER_MEM_SPACES; for (idx = 0; idx < nm; idx++) { HPI_DEBUG_LOG(INFO, "resource %d %pR\n", idx, &pci_dev->resource[idx]); if (pci_resource_flags(pci_dev, idx) & IORESOURCE_MEM) { memlen = pci_resource_len(pci_dev, idx); pci.ap_mem_base[idx] = ioremap(pci_resource_start(pci_dev, idx), memlen); if (!pci.ap_mem_base[idx]) { HPI_DEBUG_LOG(ERROR, "ioremap failed, aborting\n"); /* unmap previously mapped pci mem space */ goto err; } } } pci.pci_dev = pci_dev; hm.u.s.resource.bus_type = HPI_BUS_PCI; hm.u.s.resource.r.pci = &pci; /* call CreateAdapterObject on the relevant hpi module */ hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL); if (hr.error) goto err; adapter_index = hr.u.s.adapter_index; adapter.adapter = hpi_find_adapter(adapter_index); if (prealloc_stream_buf) { adapter.p_buffer = vmalloc(prealloc_stream_buf); if (!adapter.p_buffer) { HPI_DEBUG_LOG(ERROR, "HPI could not allocate " "kernel buffer size %d\n", prealloc_stream_buf); goto err; } } hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER, HPI_ADAPTER_OPEN); hm.adapter_index = adapter.adapter->index; hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL); if (hr.error) { HPI_DEBUG_LOG(ERROR, "HPI_ADAPTER_OPEN failed, aborting\n"); goto err; } /* Check if current mode == Low Latency mode */ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER, HPI_ADAPTER_GET_MODE); hm.adapter_index = adapter.adapter->index; hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL); if (!hr.error && hr.u.ax.mode.adapter_mode == HPI_ADAPTER_MODE_LOW_LATENCY) low_latency_mode = 1; else dev_info(&pci_dev->dev, "Adapter at index %d is not in low latency mode\n", adapter.adapter->index); /* Check if IRQs are supported */ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER, HPI_ADAPTER_GET_PROPERTY); hm.adapter_index = adapter.adapter->index; hm.u.ax.property_set.property = HPI_ADAPTER_PROPERTY_SUPPORTS_IRQ; hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL); if (hr.error || !hr.u.ax.property_get.parameter1) { dev_info(&pci_dev->dev, "IRQs not supported by adapter at index %d\n", adapter.adapter->index); } else { irq_supported = 1; } /* WARNING can't init mutex in 'adapter' * and then copy it to adapters[] ?!?! */ adapters[adapter_index] = adapter; mutex_init(&adapters[adapter_index].mutex); pci_set_drvdata(pci_dev, &adapters[adapter_index]); if (low_latency_mode && irq_supported) { if (!adapter.adapter->irq_query_and_clear) { dev_err(&pci_dev->dev, "no IRQ handler for adapter %d, aborting\n", adapter.adapter->index); goto err; } /* Disable IRQ generation on DSP side by setting the rate to 0 */ hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER, HPI_ADAPTER_SET_PROPERTY); hm.adapter_index = adapter.adapter->index; hm.u.ax.property_set.property = HPI_ADAPTER_PROPERTY_IRQ_RATE; hm.u.ax.property_set.parameter1 = 0; hm.u.ax.property_set.parameter2 = 0; hpi_send_recv_ex(&hm, &hr, HOWNER_KERNEL); if (hr.error) { HPI_DEBUG_LOG(ERROR, "HPI_ADAPTER_GET_MODE failed, aborting\n"); goto err; } /* Note: request_irq calls asihpi_isr here */ if (request_irq(pci_dev->irq, asihpi_isr, IRQF_SHARED, "asihpi", &adapters[adapter_index])) { dev_err(&pci_dev->dev, "request_irq(%d) failed\n", pci_dev->irq); goto err; } adapters[adapter_index].interrupt_mode = 1; dev_info(&pci_dev->dev, "using irq %d\n", pci_dev->irq); adapters[adapter_index].irq = pci_dev->irq; } else { dev_info(&pci_dev->dev, "using polled mode\n"); } dev_info(&pci_dev->dev, "probe succeeded for ASI%04X HPI index %d\n", adapter.adapter->type, adapter_index); return 0; err: for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) { if (pci.ap_mem_base[idx]) { iounmap(pci.ap_mem_base[idx]); pci.ap_mem_base[idx] = NULL; } } if (adapter.p_buffer) { adapter.buffer_size = 0; vfree(adapter.p_buffer); } HPI_DEBUG_LOG(ERROR, "adapter_probe failed\n"); return -ENODEV; }