static irqreturn_t aac_rx_intr(int irq, void *dev_id, struct pt_regs *regs) { struct aac_dev *dev = dev_id; dprintk((KERN_DEBUG "aac_rx_intr(%d,%p,%p)\n", irq, dev_id, regs)); if (dev->new_comm_interface) { u32 Index = rx_readl(dev, MUnit.OutboundQueue); if (Index == 0xFFFFFFFFL) Index = rx_readl(dev, MUnit.OutboundQueue); if (Index != 0xFFFFFFFFL) { do { if (aac_intr_normal(dev, Index)) { rx_writel(dev, MUnit.OutboundQueue, Index); rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady); } Index = rx_readl(dev, MUnit.OutboundQueue); } while (Index != 0xFFFFFFFFL); return IRQ_HANDLED; } } else { unsigned long bellbits; u8 intstat; intstat = rx_readb(dev, MUnit.OISR); /* * Read mask and invert because drawbridge is reversed. * This allows us to only service interrupts that have * been enabled. * Check to see if this is our interrupt. If it isn't just return */ if (intstat & ~(dev->OIMR)) { bellbits = rx_readl(dev, OutboundDoorbellReg); if (bellbits & DoorBellPrintfReady) { aac_printf(dev, rx_readl (dev, IndexRegs.Mailbox[5])); rx_writel(dev, MUnit.ODR,DoorBellPrintfReady); rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone); } else if (bellbits & DoorBellAdapterNormCmdReady) { rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady); aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); } else if (bellbits & DoorBellAdapterNormRespReady) { rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady); aac_response_normal(&dev->queues->queue[HostNormRespQueue]); } else if (bellbits & DoorBellAdapterNormCmdNotFull) { rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); } else if (bellbits & DoorBellAdapterNormRespNotFull) { rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull); } return IRQ_HANDLED; } } return IRQ_NONE; }
static int aac_rx_restart_adapter(struct aac_dev *dev, int bled) { u32 var; if (!(dev->supplement_adapter_info.SupportedOptions2 & AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) { if (bled) printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n", dev->name, dev->id, bled); else { bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL); if (!bled && (var != 0x00000001) && (var != 0x3803000F)) bled = -EINVAL; } if (bled && (bled != -ETIMEDOUT)) bled = aac_adapter_sync_cmd(dev, IOP_RESET, 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL); if (bled && (bled != -ETIMEDOUT)) return -EINVAL; } if (bled || (var == 0x3803000F)) { /* USE_OTHER_METHOD */ rx_writel(dev, MUnit.reserved2, 3); msleep(5000); /* Delay 5 seconds */ var = 0x00000001; } if (var != 0x00000001) return -EINVAL; if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) return -ENODEV; if (startup_timeout < 300) startup_timeout = 300; return 0; }
/** * aac_rx_deliver_message * @fib: fib to issue * * Will send a fib, returning 0 if successful. */ static int aac_rx_deliver_message(struct fib * fib) { struct aac_dev *dev = fib->dev; struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; u32 Index; u64 addr; volatile void __iomem *device; unsigned long count = 10000000L; /* 50 seconds */ atomic_inc(&q->numpending); for(;;) { Index = rx_readl(dev, MUnit.InboundQueue); if (unlikely(Index == 0xFFFFFFFFL)) Index = rx_readl(dev, MUnit.InboundQueue); if (likely(Index != 0xFFFFFFFFL)) break; if (--count == 0) { atomic_dec(&q->numpending); return -ETIMEDOUT; } udelay(5); } device = dev->base + Index; addr = fib->hw_fib_pa; writel((u32)(addr & 0xffffffff), device); device += sizeof(u32); writel((u32)(addr >> 32), device); device += sizeof(u32); writel(le16_to_cpu(fib->hw_fib_va->header.Size), device); rx_writel(dev, MUnit.InboundQueue, Index); return 0; }
/** * aac_rx_check_health * @dev: device to check if healthy * * Will attempt to determine if the specified adapter is alive and * capable of handling requests, returning 0 if alive. */ static int aac_rx_check_health(struct aac_dev *dev) { u32 status = rx_readl(dev, MUnit.OMRx[0]); /* * Check to see if the board failed any self tests. */ if (unlikely(status & SELF_TEST_FAILED)) return -1; /* * Check to see if the board panic'd. */ if (unlikely(status & KERNEL_PANIC)) { char * buffer; struct POSTSTATUS { __le32 Post_Command; __le32 Post_Address; } * post; dma_addr_t paddr, baddr; int ret; if (likely((status & 0xFF000000L) == 0xBC000000L)) return (status >> 16) & 0xFF; buffer = pci_alloc_consistent(dev->pdev, 512, &baddr); ret = -2; if (unlikely(buffer == NULL)) return ret; post = pci_alloc_consistent(dev->pdev, sizeof(struct POSTSTATUS), &paddr); if (unlikely(post == NULL)) { pci_free_consistent(dev->pdev, 512, buffer, baddr); return ret; } memset(buffer, 0, 512); post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS); post->Post_Address = cpu_to_le32(baddr); rx_writel(dev, MUnit.IMRx[0], paddr); rx_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS), post, paddr); if (likely((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X')))) { ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10); ret <<= 4; ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10); } pci_free_consistent(dev->pdev, 512, buffer, baddr); return ret; } /* * Wait for the adapter to be up and running. */ if (unlikely(!(status & KERNEL_UP_AND_RUNNING))) return -3; /* * Everything is OK */ return 0; }
static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id) { struct aac_dev *dev = dev_id; unsigned long bellbits; u8 intstat = rx_readb(dev, MUnit.OISR); if (likely(intstat & ~(dev->OIMR))) { bellbits = rx_readl(dev, OutboundDoorbellReg); if (unlikely(bellbits & DoorBellPrintfReady)) { aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5])); rx_writel(dev, MUnit.ODR,DoorBellPrintfReady); rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone); } else if (unlikely(bellbits & DoorBellAdapterNormCmdReady)) { rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady); aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); } else if (likely(bellbits & DoorBellAdapterNormRespReady)) { rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady); aac_response_normal(&dev->queues->queue[HostNormRespQueue]); } else if (unlikely(bellbits & DoorBellAdapterNormCmdNotFull)) { rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); } else if (unlikely(bellbits & DoorBellAdapterNormRespNotFull)) { rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull); } return IRQ_HANDLED; } return IRQ_NONE; }
static irqreturn_t aac_rx_intr_message(int irq, void *dev_id) { struct aac_dev *dev = dev_id; u32 Index = rx_readl(dev, MUnit.OutboundQueue); if (unlikely(Index == 0xFFFFFFFFL)) Index = rx_readl(dev, MUnit.OutboundQueue); if (likely(Index != 0xFFFFFFFFL)) { do { if (unlikely(aac_intr_normal(dev, Index))) { rx_writel(dev, MUnit.OutboundQueue, Index); rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady); } Index = rx_readl(dev, MUnit.OutboundQueue); } while (Index != 0xFFFFFFFFL); return IRQ_HANDLED; } return IRQ_NONE; }
static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event) { switch (event) { case AdapNormCmdQue: rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1); break; case HostNormRespNotFull: rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4); break; case AdapNormRespQue: rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2); break; case HostNormCmdNotFull: rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3); break; case HostShutdown: break; case FastIo: rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6); break; case AdapPrintfDone: rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5); break; default: BUG(); break; } }
static irqreturn_t aac_rx_intr_message(int irq, void *dev_id) { int isAif, isFastResponse, isSpecial; struct aac_dev *dev = dev_id; u32 Index = rx_readl(dev, MUnit.OutboundQueue); if (unlikely(Index == 0xFFFFFFFFL)) Index = rx_readl(dev, MUnit.OutboundQueue); if (likely(Index != 0xFFFFFFFFL)) { do { isAif = isFastResponse = isSpecial = 0; if (Index & 0x00000002L) { isAif = 1; if (Index == 0xFFFFFFFEL) isSpecial = 1; Index &= ~0x00000002L; } else { if (Index & 0x00000001L) isFastResponse = 1; Index >>= 2; } if (!isSpecial) { if (unlikely(aac_intr_normal(dev, Index, isAif, isFastResponse, NULL))) { rx_writel(dev, MUnit.OutboundQueue, Index); rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady); } } Index = rx_readl(dev, MUnit.OutboundQueue); } while (Index != 0xFFFFFFFFL); return IRQ_HANDLED; } return IRQ_NONE; }
static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id) { struct aac_dev *dev = dev_id; unsigned long bellbits; u8 intstat = rx_readb(dev, MUnit.OISR); /* * Read mask and invert because drawbridge is reversed. * This allows us to only service interrupts that have * been enabled. * Check to see if this is our interrupt. If it isn't just return */ if (likely(intstat & ~(dev->OIMR))) { bellbits = rx_readl(dev, OutboundDoorbellReg); if (unlikely(bellbits & DoorBellPrintfReady)) { aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5])); rx_writel(dev, MUnit.ODR,DoorBellPrintfReady); rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone); } else if (unlikely(bellbits & DoorBellAdapterNormCmdReady)) { rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady); aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); } else if (likely(bellbits & DoorBellAdapterNormRespReady)) { rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady); aac_response_normal(&dev->queues->queue[HostNormRespQueue]); } else if (unlikely(bellbits & DoorBellAdapterNormCmdNotFull)) { rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); } else if (unlikely(bellbits & DoorBellAdapterNormRespNotFull)) { rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull); } return IRQ_HANDLED; } return IRQ_NONE; }
int _aac_rx_init(struct aac_dev *dev) { unsigned long start; unsigned long status; int restart = 0; int instance = dev->id; const char * name = dev->name; if (aac_adapter_ioremap(dev, dev->base_size)) { printk(KERN_WARNING "%s: unable to map adapter.\n", name); goto error_iounmap; } /* Failure to reset here is an option ... */ dev->a_ops.adapter_sync_cmd = rx_sync_cmd; dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt; dev->OIMR = status = rx_readb (dev, MUnit.OIMR); if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) && !aac_rx_restart_adapter(dev, 0)) /* Make sure the Hardware FIFO is empty */ while ((++restart < 512) && (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL)); /* * Check to see if the board panic'd while booting. */ status = rx_readl(dev, MUnit.OMRx[0]); if (status & KERNEL_PANIC) { if (aac_rx_restart_adapter(dev, aac_rx_check_health(dev))) goto error_iounmap; ++restart; } /* * Check to see if the board failed any self tests. */ status = rx_readl(dev, MUnit.OMRx[0]); if (status & SELF_TEST_FAILED) { printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance); goto error_iounmap; } /* * Check to see if the monitor panic'd while booting. */ if (status & MONITOR_PANIC) { printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance); goto error_iounmap; } start = jiffies; /* * Wait for the adapter to be up and running. Wait up to 3 minutes */ while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING)) { if ((restart && (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || time_after(jiffies, start+HZ*startup_timeout)) { printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", dev->name, instance, status); goto error_iounmap; } if (!restart && ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || time_after(jiffies, start + HZ * ((startup_timeout > 60) ? (startup_timeout - 60) : (startup_timeout / 2))))) { if (likely(!aac_rx_restart_adapter(dev, aac_rx_check_health(dev)))) start = jiffies; ++restart; } msleep(1); } if (restart && aac_commit) aac_commit = 1; /* * Fill in the common function dispatch table. */ dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter; dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt; dev->a_ops.adapter_notify = aac_rx_notify_adapter; dev->a_ops.adapter_sync_cmd = rx_sync_cmd; dev->a_ops.adapter_check_health = aac_rx_check_health; dev->a_ops.adapter_restart = aac_rx_restart_adapter; /* * First clear out all interrupts. Then enable the one's that we * can handle. */ aac_adapter_comm(dev, AAC_COMM_PRODUCER); aac_adapter_disable_int(dev); rx_writel(dev, MUnit.ODR, 0xffffffff); aac_adapter_enable_int(dev); if (aac_init_adapter(dev) == NULL) goto error_iounmap; aac_adapter_comm(dev, dev->comm_interface); dev->msi = aac_msi && !pci_enable_msi(dev->pdev); if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { if (dev->msi) pci_disable_msi(dev->pdev); printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance); goto error_iounmap; } aac_adapter_enable_int(dev); /* * Tell the adapter that all is configured, and it can * start accepting requests */ aac_rx_start_adapter(dev); return 0; error_iounmap: return -1; }
static int rx_sync_cmd(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4) { unsigned long start; int ok; /* * Write the command into Mailbox 0 */ writel(command, &dev->IndexRegs->Mailbox[0]); /* * Write the parameters into Mailboxes 1 - 6 */ writel(p1, &dev->IndexRegs->Mailbox[1]); writel(p2, &dev->IndexRegs->Mailbox[2]); writel(p3, &dev->IndexRegs->Mailbox[3]); writel(p4, &dev->IndexRegs->Mailbox[4]); /* * Clear the synch command doorbell to start on a clean slate. */ rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); /* * Disable doorbell interrupts */ rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff); /* * Force the completion of the mask register write before issuing * the interrupt. */ rx_readb (dev, MUnit.OIMR); /* * Signal that there is a new synch command */ rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0); ok = 0; start = jiffies; /* * Wait up to 30 seconds */ while (time_before(jiffies, start+30*HZ)) { udelay(5); /* Delay 5 microseconds to let Mon960 get info. */ /* * Mon960 will set doorbell0 bit when it has completed the command. */ if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) { /* * Clear the doorbell. */ rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); ok = 1; break; } /* * Yield the processor in case we are slow */ msleep(1); } if (unlikely(ok != 1)) { /* * Restore interrupt mask even though we timed out */ aac_adapter_enable_int(dev); return -ETIMEDOUT; } /* * Pull the synch status from Mailbox 0. */ if (status) *status = readl(&dev->IndexRegs->Mailbox[0]); if (r1) *r1 = readl(&dev->IndexRegs->Mailbox[1]); if (r2) *r2 = readl(&dev->IndexRegs->Mailbox[2]); if (r3) *r3 = readl(&dev->IndexRegs->Mailbox[3]); if (r4) *r4 = readl(&dev->IndexRegs->Mailbox[4]); /* * Clear the synch command doorbell. */ rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); /* * Restore interrupt mask */ aac_adapter_enable_int(dev); return 0; }
static int rx_sync_cmd(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4) { unsigned long start; int ok; writel(command, &dev->IndexRegs->Mailbox[0]); writel(p1, &dev->IndexRegs->Mailbox[1]); writel(p2, &dev->IndexRegs->Mailbox[2]); writel(p3, &dev->IndexRegs->Mailbox[3]); writel(p4, &dev->IndexRegs->Mailbox[4]); rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff); rx_readb (dev, MUnit.OIMR); rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0); ok = 0; start = jiffies; while (time_before(jiffies, start+30*HZ)) { udelay(5); if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) { rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); ok = 1; break; } msleep(1); } if (unlikely(ok != 1)) { aac_adapter_enable_int(dev); return -ETIMEDOUT; } if (status) *status = readl(&dev->IndexRegs->Mailbox[0]); if (r1) *r1 = readl(&dev->IndexRegs->Mailbox[1]); if (r2) *r2 = readl(&dev->IndexRegs->Mailbox[2]); if (r3) *r3 = readl(&dev->IndexRegs->Mailbox[3]); if (r4) *r4 = readl(&dev->IndexRegs->Mailbox[4]); rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); aac_adapter_enable_int(dev); return 0; }
int _aac_rx_init(struct aac_dev *dev) { unsigned long start; unsigned long status; int restart = 0; int instance = dev->id; const char * name = dev->name; if (aac_adapter_ioremap(dev, dev->base_size)) { printk(KERN_WARNING "%s: unable to map adapter.\n", name); goto error_iounmap; } dev->a_ops.adapter_sync_cmd = rx_sync_cmd; dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt; dev->OIMR = status = rx_readb (dev, MUnit.OIMR); if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) && !aac_rx_restart_adapter(dev, 0)) while ((++restart < 512) && (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL)); status = rx_readl(dev, MUnit.OMRx[0]); if (status & KERNEL_PANIC) { if (aac_rx_restart_adapter(dev, aac_rx_check_health(dev))) goto error_iounmap; ++restart; } status = rx_readl(dev, MUnit.OMRx[0]); if (status & SELF_TEST_FAILED) { printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance); goto error_iounmap; } if (status & MONITOR_PANIC) { printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance); goto error_iounmap; } start = jiffies; while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING)) { if ((restart && (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || time_after(jiffies, start+HZ*startup_timeout)) { printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", dev->name, instance, status); goto error_iounmap; } if (!restart && ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || time_after(jiffies, start + HZ * ((startup_timeout > 60) ? (startup_timeout - 60) : (startup_timeout / 2))))) { if (likely(!aac_rx_restart_adapter(dev, aac_rx_check_health(dev)))) start = jiffies; ++restart; } msleep(1); } if (restart && aac_commit) aac_commit = 1; dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter; dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt; dev->a_ops.adapter_notify = aac_rx_notify_adapter; dev->a_ops.adapter_sync_cmd = rx_sync_cmd; dev->a_ops.adapter_check_health = aac_rx_check_health; dev->a_ops.adapter_restart = aac_rx_restart_adapter; aac_adapter_comm(dev, AAC_COMM_PRODUCER); aac_adapter_disable_int(dev); rx_writel(dev, MUnit.ODR, 0xffffffff); aac_adapter_enable_int(dev); if (aac_init_adapter(dev) == NULL) goto error_iounmap; aac_adapter_comm(dev, dev->comm_interface); dev->msi = aac_msi && !pci_enable_msi(dev->pdev); if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { if (dev->msi) pci_disable_msi(dev->pdev); printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance); goto error_iounmap; } aac_adapter_enable_int(dev); aac_rx_start_adapter(dev); return 0; error_iounmap: return -1; }