/* * allocate DBDMA command arrays */ static int snd_pmac_dbdma_alloc(struct snd_pmac *chip, struct pmac_dbdma *rec, int size) { unsigned int rsize = sizeof(struct dbdma_cmd) * (size + 1); rec->space = dma_alloc_coherent(&chip->pdev->dev, rsize, &rec->dma_base, GFP_KERNEL); if (rec->space == NULL) return -ENOMEM; rec->size = size; memset(rec->space, 0, rsize); rec->cmds = (void __iomem *)DBDMA_ALIGN(rec->space); rec->addr = rec->dma_base + (unsigned long)((char *)rec->cmds - (char *)rec->space); return 0; }
static int swim3_add_device(struct macio_dev *mdev, int index) { struct device_node *swim = mdev->ofdev.dev.of_node; struct floppy_state *fs = &floppy_states[index]; int rc = -EBUSY; /* Check & Request resources */ if (macio_resource_count(mdev) < 2) { printk(KERN_WARNING "ifd%d: no address for %s\n", index, swim->full_name); return -ENXIO; } if (macio_irq_count(mdev) < 2) { printk(KERN_WARNING "fd%d: no intrs for device %s\n", index, swim->full_name); } if (macio_request_resource(mdev, 0, "swim3 (mmio)")) { printk(KERN_ERR "fd%d: can't request mmio resource for %s\n", index, swim->full_name); return -EBUSY; } if (macio_request_resource(mdev, 1, "swim3 (dma)")) { printk(KERN_ERR "fd%d: can't request dma resource for %s\n", index, swim->full_name); macio_release_resource(mdev, 0); return -EBUSY; } dev_set_drvdata(&mdev->ofdev.dev, fs); if (mdev->media_bay == NULL) pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1); memset(fs, 0, sizeof(*fs)); spin_lock_init(&fs->lock); fs->state = idle; fs->swim3 = (struct swim3 __iomem *) ioremap(macio_resource_start(mdev, 0), 0x200); if (fs->swim3 == NULL) { printk("fd%d: couldn't map registers for %s\n", index, swim->full_name); rc = -ENOMEM; goto out_release; } fs->dma = (struct dbdma_regs __iomem *) ioremap(macio_resource_start(mdev, 1), 0x200); if (fs->dma == NULL) { printk("fd%d: couldn't map DMA for %s\n", index, swim->full_name); iounmap(fs->swim3); rc = -ENOMEM; goto out_release; } fs->swim3_intr = macio_irq(mdev, 0); fs->dma_intr = macio_irq(mdev, 1); fs->cur_cyl = -1; fs->cur_sector = -1; fs->secpercyl = 36; fs->secpertrack = 18; fs->total_secs = 2880; fs->mdev = mdev; init_waitqueue_head(&fs->wait); fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space); memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd)); st_le16(&fs->dma_cmd[1].command, DBDMA_STOP); if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) { printk(KERN_ERR "fd%d: couldn't request irq %d for %s\n", index, fs->swim3_intr, swim->full_name); pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0); goto out_unmap; return -EBUSY; } /* if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) { printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA", fs->dma_intr); return -EBUSY; } */ init_timer(&fs->timeout); printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count, mdev->media_bay ? "in media bay" : ""); return 0; out_unmap: iounmap(fs->dma); iounmap(fs->swim3); out_release: macio_release_resource(mdev, 0); macio_release_resource(mdev, 1); return rc; }
/* * pmac_ide_build_dmatable builds the DBDMA command list * for a transfer and sets the DBDMA channel to point to it. */ static int pmac_ide_build_dmatable(ide_drive_t *drive, int ix, int wr) { struct dbdma_cmd *table, *tstart; int count = 0; struct request *rq = HWGROUP(drive)->rq; struct buffer_head *bh = rq->bh; unsigned int size, addr; volatile struct dbdma_regs *dma = pmac_ide[ix].dma_regs; table = tstart = (struct dbdma_cmd *) DBDMA_ALIGN(pmac_ide[ix].dma_table); out_le32(&dma->control, (RUN|PAUSE|FLUSH|WAKE|DEAD) << 16); while (in_le32(&dma->status) & RUN) udelay(1); do { /* * Determine addr and size of next buffer area. We assume that * individual virtual buffers are always composed linearly in * physical memory. For example, we assume that any 8kB buffer * is always composed of two adjacent physical 4kB pages rather * than two possibly non-adjacent physical 4kB pages. */ if (bh == NULL) { /* paging requests have (rq->bh == NULL) */ addr = virt_to_bus(rq->buffer); size = rq->nr_sectors << 9; } else { /* group sequential buffers into one large buffer */ addr = virt_to_bus(bh->b_data); size = bh->b_size; while ((bh = bh->b_reqnext) != NULL) { if ((addr + size) != virt_to_bus(bh->b_data)) break; size += bh->b_size; } } /* * Fill in the next DBDMA command block. * Note that one DBDMA command can transfer * at most 65535 bytes. */ while (size) { unsigned int tc = (size < 0xfe00)? size: 0xfe00; if (++count >= MAX_DCMDS) { printk(KERN_WARNING "%s: DMA table too small\n", drive->name); return 0; /* revert to PIO for this request */ } st_le16(&table->command, wr? OUTPUT_MORE: INPUT_MORE); st_le16(&table->req_count, tc); st_le32(&table->phy_addr, addr); table->cmd_dep = 0; table->xfer_status = 0; table->res_count = 0; addr += tc; size -= tc; ++table; } } while (bh != NULL); /* convert the last command to an input/output last command */ if (count) st_le16(&table[-1].command, wr? OUTPUT_LAST: INPUT_LAST); else printk(KERN_DEBUG "%s: empty DMA table?\n", drive->name); /* add the stop command to the end of the list */ memset(table, 0, sizeof(struct dbdma_cmd)); out_le16(&table->command, DBDMA_STOP); out_le32(&dma->cmdptr, virt_to_bus(tstart)); return 1; }
int mac53c94_detect(Scsi_Host_Template *tp) { struct device_node *node; int nfscs; struct fsc_state *state, **prev_statep; struct Scsi_Host *host; void *dma_cmd_space; unsigned char *clkprop; int proplen; struct pci_dev *pdev; u8 pbus, devfn; nfscs = 0; prev_statep = &all_53c94s; for (node = find_devices("53c94"); node != 0; node = node->next) { if (node->n_addrs != 2 || node->n_intrs != 2) { printk(KERN_ERR "mac53c94: expected 2 addrs and intrs" " (got %d/%d) for node %s\n", node->n_addrs, node->n_intrs, node->full_name); continue; } pdev = NULL; if (node->parent != NULL && !pci_device_from_OF_node(node->parent, &pbus, &devfn)) pdev = pci_find_slot(pbus, devfn); if (pdev == NULL) { printk(KERN_ERR "mac53c94: can't find PCI device " "for %s\n", node->full_name); continue; } host = scsi_register(tp, sizeof(struct fsc_state)); if (host == NULL) break; host->unique_id = nfscs; state = (struct fsc_state *) host->hostdata; if (state == 0) { /* "can't happen" */ printk(KERN_ERR "mac53c94: no state for %s?!\n", node->full_name); scsi_unregister(host); break; } state->host = host; state->pdev = pdev; state->regs = (volatile struct mac53c94_regs *) ioremap(node->addrs[0].address, 0x1000); state->intr = node->intrs[0].line; state->dma = (volatile struct dbdma_regs *) ioremap(node->addrs[1].address, 0x1000); state->dmaintr = node->intrs[1].line; if (state->regs == NULL || state->dma == NULL) { printk(KERN_ERR "mac53c94: ioremap failed for %s\n", node->full_name); if (state->dma != NULL) iounmap(state->dma); if (state->regs != NULL) iounmap(state->regs); scsi_unregister(host); break; } clkprop = get_property(node, "clock-frequency", &proplen); if (clkprop == NULL || proplen != sizeof(int)) { printk(KERN_ERR "%s: can't get clock frequency, " "assuming 25MHz\n", node->full_name); state->clk_freq = 25000000; } else state->clk_freq = *(int *)clkprop; /* Space for dma command list: +1 for stop command, +1 to allow for aligning. */ dma_cmd_space = kmalloc((host->sg_tablesize + 2) * sizeof(struct dbdma_cmd), GFP_KERNEL); if (dma_cmd_space == 0) { printk(KERN_ERR "mac53c94: couldn't allocate dma " "command space for %s\n", node->full_name); goto err_cleanup; } state->dma_cmds = (struct dbdma_cmd *) DBDMA_ALIGN(dma_cmd_space); memset(state->dma_cmds, 0, (host->sg_tablesize + 1) * sizeof(struct dbdma_cmd)); state->dma_cmd_space = dma_cmd_space; *prev_statep = state; prev_statep = &state->next; if (request_irq(state->intr, do_mac53c94_interrupt, 0, "53C94", state)) { printk(KERN_ERR "mac53C94: can't get irq %d for %s\n", state->intr, node->full_name); err_cleanup: iounmap(state->dma); iounmap(state->regs); scsi_unregister(host); break; } mac53c94_init(state); ++nfscs; } return nfscs; }
static int swim3_add_device(struct macio_dev *mdev, int index) { struct device_node *swim = mdev->ofdev.dev.of_node; struct floppy_state *fs = &floppy_states[index]; int rc = -EBUSY; /* Do this first for message macros */ memset(fs, 0, sizeof(*fs)); fs->mdev = mdev; fs->index = index; /* Check & Request resources */ if (macio_resource_count(mdev) < 2) { swim3_err("%s", "No address in device-tree\n"); return -ENXIO; } if (macio_irq_count(mdev) < 1) { swim3_err("%s", "No interrupt in device-tree\n"); return -ENXIO; } if (macio_request_resource(mdev, 0, "swim3 (mmio)")) { swim3_err("%s", "Can't request mmio resource\n"); return -EBUSY; } if (macio_request_resource(mdev, 1, "swim3 (dma)")) { swim3_err("%s", "Can't request dma resource\n"); macio_release_resource(mdev, 0); return -EBUSY; } dev_set_drvdata(&mdev->ofdev.dev, fs); if (mdev->media_bay == NULL) pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1); fs->state = idle; fs->swim3 = (struct swim3 __iomem *) ioremap(macio_resource_start(mdev, 0), 0x200); if (fs->swim3 == NULL) { swim3_err("%s", "Couldn't map mmio registers\n"); rc = -ENOMEM; goto out_release; } fs->dma = (struct dbdma_regs __iomem *) ioremap(macio_resource_start(mdev, 1), 0x200); if (fs->dma == NULL) { swim3_err("%s", "Couldn't map dma registers\n"); iounmap(fs->swim3); rc = -ENOMEM; goto out_release; } fs->swim3_intr = macio_irq(mdev, 0); fs->dma_intr = macio_irq(mdev, 1); fs->cur_cyl = -1; fs->cur_sector = -1; fs->secpercyl = 36; fs->secpertrack = 18; fs->total_secs = 2880; init_waitqueue_head(&fs->wait); fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space); memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd)); fs->dma_cmd[1].command = cpu_to_le16(DBDMA_STOP); if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD) swim3_mb_event(mdev, MB_FD); if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) { swim3_err("%s", "Couldn't request interrupt\n"); pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0); goto out_unmap; return -EBUSY; } timer_setup(&fs->timeout, NULL, 0); swim3_info("SWIM3 floppy controller %s\n", mdev->media_bay ? "in media bay" : ""); return 0; out_unmap: iounmap(fs->dma); iounmap(fs->swim3); out_release: macio_release_resource(mdev, 0); macio_release_resource(mdev, 1); return rc; }