static int vnic_add_bufs(netfront_accel_vnic *vnic, struct net_accel_msg *msg) { int rc, offset; struct netfront_accel_bufinfo *bufinfo; BUG_ON(msg->u.mapbufs.pages > NET_ACCEL_MSG_MAX_PAGE_REQ); offset = msg->u.mapbufs.reqid; if (offset < vnic->bufpages.max_pages - (vnic->bufpages.max_pages / sfc_netfront_buffer_split)) { bufinfo = vnic->rx_bufs; } else bufinfo = vnic->tx_bufs; /* Queue up some Rx buffers to start things off. */ if ((rc = netfront_accel_add_bufs(&vnic->bufpages, bufinfo, msg)) == 0) { netfront_accel_vi_add_bufs(vnic, bufinfo == vnic->rx_bufs); if (offset + msg->u.mapbufs.pages == vnic->bufpages.max_pages) { VPRINTK("%s: got all buffers back\n", __FUNCTION__); vnic->frontend_ready = 1; if (vnic->backend_netdev_up) vnic_start_fastpath(vnic); } else { VPRINTK("%s: got buffers back %d %d\n", __FUNCTION__, offset, msg->u.mapbufs.pages); } } return rc; }
/** * Performs LRW-AES decryption. * @param in Source of data * @param out Location to place decrypted data * @param nents Number of entries in scatter list, in and out must have the same * number of entries * @param iv I-Value * @param cipher_key 16 byte array that is the cipher key * @param tweak_key 16 byte array that is the I-Value tweak key * @return error code or 0 for success */ int ox800_aeslrw_decrypt( struct scatterlist* in, struct scatterlist* out, unsigned int nents, u8* iv, u8* cipher_key, u8* tweak_key) { int localresult; VPRINTK(KERN_INFO"in %p, out %p, nents %d, iv %08x%08x, ckey %p, tkey%p\n", in, out, nents, *((u32* )(&iv[4])), *((u32* )(&iv[0])), cipher_key, tweak_key ); /* get cipher core */ while( down_interruptible(&ox800_aeslrw_driver.core) ) ; VPRINTK(KERN_INFO"got core\n"); ox800_aeslrw_setkeys(cipher_key, tweak_key); localresult = ox800_aeslrw_gencrypt( 0, in, out, nents, iv); up(&ox800_aeslrw_driver.core); VPRINTK(KERN_INFO"released core \n"); return localresult; }
static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc, u32 *ttl, dma_addr_t cmd_desc_paddr) { struct scatterlist *sg; unsigned int num_prde = 0; u32 ttl_dwords = 0; /* * NOTE : direct & indirect prdt's are contigiously allocated */ struct prde *prd = (struct prde *)&((struct command_desc *) cmd_desc)->prdt; struct prde *prd_ptr_to_indirect_ext = NULL; unsigned indirect_ext_segment_sz = 0; dma_addr_t indirect_ext_segment_paddr; VPRINTK("SATA FSL : cd = 0x%p, prd = 0x%p\n", cmd_desc, prd); indirect_ext_segment_paddr = cmd_desc_paddr + SATA_FSL_CMD_DESC_OFFSET_TO_PRDT + SATA_FSL_MAX_PRD_DIRECT * 16; ata_for_each_sg(sg, qc) { dma_addr_t sg_addr = sg_dma_address(sg); u32 sg_len = sg_dma_len(sg); VPRINTK("SATA FSL : fill_sg, sg_addr = 0x%x, sg_len = %d\n", sg_addr, sg_len); /* warn if each s/g element is not dword aligned */ if (sg_addr & 0x03) ata_port_printk(qc->ap, KERN_ERR, "s/g addr unaligned : 0x%x\n", sg_addr); if (sg_len & 0x03) ata_port_printk(qc->ap, KERN_ERR, "s/g len unaligned : 0x%x\n", sg_len); if (num_prde == (SATA_FSL_MAX_PRD_DIRECT - 1) && !ata_sg_is_last(sg, qc)) { VPRINTK("setting indirect prde\n"); prd_ptr_to_indirect_ext = prd; prd->dba = cpu_to_le32(indirect_ext_segment_paddr); indirect_ext_segment_sz = 0; ++prd; ++num_prde; } ttl_dwords += sg_len; prd->dba = cpu_to_le32(sg_addr); prd->ddc_and_ext = cpu_to_le32(DATA_SNOOP_ENABLE | (sg_len & ~0x03)); VPRINTK("sg_fill, ttl=%d, dba=0x%x, ddc=0x%x\n", ttl_dwords, prd->dba, prd->ddc_and_ext); ++num_prde; ++prd; if (prd_ptr_to_indirect_ext) indirect_ext_segment_sz += sg_len; }
/* accelstate on the frontend's xenbus node has changed */ static void bend_domu_accel_change(struct xenbus_watch *watch, const char **vec, unsigned int len) { int state; struct netback_accel *bend; bend = container_of(watch, struct netback_accel, domu_accel_watch); if (bend->domu_accel_watch.node != NULL) { struct xenbus_device *dev = (struct xenbus_device *)bend->hdev_data; VPRINTK("Watch matched, got dev %p otherend %p\n", dev, dev->otherend); /* * dev->otherend != NULL check to protect against * watch firing when domain goes away and we haven't * yet cleaned up */ if (!dev->otherend || !xenbus_exists(XBT_NIL, watch->node, "") || strncmp(dev->otherend, vec[XS_WATCH_PATH], strlen(dev->otherend))) { DPRINTK("Ignoring watch as otherend seems invalid\n"); return; } mutex_lock(&bend->bend_mutex); xenbus_scanf(XBT_NIL, dev->otherend, "accelstate", "%d", &state); netback_accel_frontend_changed(dev, state); mutex_unlock(&bend->bend_mutex); } }
static void ahci_pci_init_controller(struct ata_host *host) { struct ahci_host_priv *hpriv = host->private_data; struct pci_dev *pdev = to_pci_dev(host->dev); void __iomem *port_mmio; u32 tmp; int mv; if (hpriv->flags & AHCI_HFLAG_MV_PATA) { if (pdev->device == 0x6121) mv = 2; else mv = 4; port_mmio = __ahci_port_base(host, mv); writel(0, port_mmio + PORT_IRQ_MASK); /* clear port IRQ */ tmp = readl(port_mmio + PORT_IRQ_STAT); VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); if (tmp) writel(tmp, port_mmio + PORT_IRQ_STAT); } ahci_init_controller(host); }
/* * Demultiplex an IRQ from the frontend driver. This is never used * functionally, but we need it to pass to the bind function, and may * get called spuriously */ static irqreturn_t netirq_from_frontend(int irq, void *context, struct pt_regs *unused) { VPRINTK("netirq %d from device %s\n", irq, ((struct xenbus_device *)context)->nodename); return IRQ_HANDLED; }
static void scc_tf_load (struct ata_port *ap, const struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; if (tf->ctl != ap->last_ctl) { out_be32(ioaddr->ctl_addr, tf->ctl); ap->last_ctl = tf->ctl; ata_wait_idle(ap); } if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { out_be32(ioaddr->feature_addr, tf->hob_feature); out_be32(ioaddr->nsect_addr, tf->hob_nsect); out_be32(ioaddr->lbal_addr, tf->hob_lbal); out_be32(ioaddr->lbam_addr, tf->hob_lbam); out_be32(ioaddr->lbah_addr, tf->hob_lbah); VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam, tf->hob_lbah); } if (is_addr) { out_be32(ioaddr->feature_addr, tf->feature); out_be32(ioaddr->nsect_addr, tf->nsect); out_be32(ioaddr->lbal_addr, tf->lbal); out_be32(ioaddr->lbam_addr, tf->lbam); out_be32(ioaddr->lbah_addr, tf->lbah); VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", tf->feature, tf->nsect, tf->lbal, tf->lbam, tf->lbah); } if (tf->flags & ATA_TFLAG_DEVICE) { out_be32(ioaddr->device_addr, tf->device); VPRINTK("device 0x%X\n", tf->device); } ata_wait_idle(ap); }
/* Demultiplex a message IRQ from the frontend driver. */ static irqreturn_t msgirq_from_frontend(int irq, void *context, struct pt_regs *unused) { struct xenbus_device *dev = context; struct netback_accel *bend = NETBACK_ACCEL_FROM_XENBUS_DEVICE(dev); VPRINTK("irq %d from device %s\n", irq, dev->nodename); schedule_work(&bend->handle_msg); return IRQ_HANDLED; }
/* * Notify dom0 that the queue we want to use is full, it should * respond by setting MSG_AFLAGS_QUEUEUNOTFULL in due course */ inline void vnic_set_queue_full(netfront_accel_vnic *vnic) { if (!test_and_set_bit(NET_ACCEL_MSG_AFLAGS_QUEUEUFULL_B, (unsigned long *)&vnic->shared_page->aflags)) notify_remote_via_irq(vnic->msg_channel_irq); else VPRINTK("queue full bit already set, not signalling\n"); }
irqreturn_t netfront_accel_msg_channel_irq_from_bend(int irq, void *context, struct pt_regs *unused) { netfront_accel_vnic *vnic = (netfront_accel_vnic *)context; VPRINTK("irq %d from device %s\n", irq, vnic->dev->nodename); queue_work(netfront_accel_workqueue, &vnic->msg_from_bend); return IRQ_HANDLED; }
static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state) { #if PACKET_DEBUG > 1 static const char *state_name[] = { "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED" }; enum packet_data_state old_state = pkt->state; VPRINTK("pkt %2d : s=%6llx %s -> %s\n", pkt->id, (unsigned long long)pkt->sector, state_name[old_state], state_name[state]); #endif pkt->state = state; }
static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev, struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), ata_xlat_func_t xlat_func) { struct ata_queued_cmd *qc; u8 *scsicmd = cmd->cmnd; VPRINTK("ENTER\n"); qc = ata_scsi_qc_new(ap, dev, cmd, done); if (!qc) return; if (cmd->sc_data_direction == SCSI_DATA_READ || cmd->sc_data_direction == SCSI_DATA_WRITE) { if (unlikely(cmd->request_bufflen < 1)) { printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n", ap->id, dev->devno); goto err_out; } qc->flags |= ATA_QCFLAG_SG; /* data is present; dma-map it */ } if (xlat_func(qc, scsicmd)) goto err_out; /* select device, send command to hardware */ if (ata_qc_issue(qc)) goto err_out; VPRINTK("EXIT\n"); return; err_out: ata_bad_cdb(cmd, done); DPRINTK("EXIT - badcmd\n"); }
static int vnic_process_rx_msg(netfront_accel_vnic *vnic, struct net_accel_msg *msg) { int err; switch (msg->id) { case NET_ACCEL_MSG_HELLO: /* Hello, reply with Reply */ DPRINTK("got Hello, with version %.8x\n", msg->u.hello.version); BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_NONE); err = vnic_process_hello_msg(vnic, msg); if (err == 0) vnic->msg_state = NETFRONT_ACCEL_MSG_HELLO; break; case NET_ACCEL_MSG_SETHW: /* Hardware info message */ DPRINTK("got H/W info\n"); BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_HELLO); err = netfront_accel_vi_init(vnic, &msg->u.hw); if (err == 0) vnic->msg_state = NETFRONT_ACCEL_MSG_HW; break; case NET_ACCEL_MSG_MAPBUF | NET_ACCEL_MSG_REPLY: VPRINTK("Got mapped buffers back\n"); BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_HW); err = vnic_add_bufs(vnic, msg); break; case NET_ACCEL_MSG_MAPBUF | NET_ACCEL_MSG_REPLY | NET_ACCEL_MSG_ERROR: /* No buffers. Can't use the fast path. */ EPRINTK("Got mapped buffers error. Cannot accelerate.\n"); BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_HW); err = -EIO; break; case NET_ACCEL_MSG_LOCALMAC: /* Should be add, remove not currently used */ EPRINTK_ON(!(msg->u.localmac.flags & NET_ACCEL_MSG_ADD)); BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_HW); err = vnic_process_localmac_msg(vnic, msg); break; default: EPRINTK("Huh? Message code is 0x%x\n", msg->id); err = -EPROTO; break; } return err; }
/** * module initialisation * @return success is 0 */ static int __init ox800_aeslrw_init( void ) { VPRINTK(KERN_INFO"\n"); /* Enable the clock to the DPE block */ writel(1UL << SYS_CTRL_CKEN_DPE_BIT, SYS_CTRL_CKEN_SET_CTRL); /* Bring out of reset */ writel(1UL << SYS_CTRL_RSTEN_DPE_BIT, SYS_CTRL_RSTEN_CLR_CTRL); /* initialise in unlocked state */ init_MUTEX(&ox800_aeslrw_driver.core); return 0; }
unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, unsigned int buflen) { struct ata_device *dev = args->dev; u8 hdr[] = { TYPE_DISK, 0, 0x5, /* claim SPC-3 version compatibility */ 2, 96 - 4 }; /* set scsi removeable (RMB) bit per ata bit */ if (ata_id_removeable(dev)) hdr[1] |= (1 << 7); VPRINTK("ENTER\n"); memcpy(rbuf, hdr, sizeof(hdr)); if (buflen > 36) { memcpy(&rbuf[8], "ATA ", 8); ata_dev_id_string(dev, &rbuf[16], ATA_ID_PROD_OFS, 16); ata_dev_id_string(dev, &rbuf[32], ATA_ID_FW_REV_OFS, 4); if (rbuf[32] == 0 || rbuf[32] == ' ') memcpy(&rbuf[32], "n/a ", 4); } if (buflen > 63) { const u8 versions[] = { 0x60, /* SAM-3 (no version claimed) */ 0x03, 0x20, /* SBC-2 (no version claimed) */ 0x02, 0x60 /* SPC-3 (no version claimed) */ }; memcpy(rbuf + 59, versions, sizeof(versions)); } return 0; }
/* * Setup watch on "limits" in the backend vif info to know when * configuration has been set */ static int setup_config_accel_watch(struct xenbus_device *dev, struct netback_accel *bend) { int err; VPRINTK("Setting watch on %s/%s\n", dev->nodename, "limits"); err = xenbus_watch_path2(dev, dev->nodename, "limits", &bend->config_accel_watch, bend_config_accel_change); if (err) { EPRINTK("%s: Failed to register xenbus watch: %d\n", __FUNCTION__, err); bend->config_accel_watch.node = NULL; return err; } return 0; }
static int vnic_send_buffer_requests(netfront_accel_vnic *vnic, struct netfront_accel_bufpages *bufpages) { int pages, offset, rc = 0, sent = 0; struct net_accel_msg msg; while (bufpages->page_reqs < bufpages->max_pages) { offset = bufpages->page_reqs; pages = pow2(log2_le(bufpages->max_pages - bufpages->page_reqs)); pages = pages < NET_ACCEL_MSG_MAX_PAGE_REQ ? pages : NET_ACCEL_MSG_MAX_PAGE_REQ; BUG_ON(offset < 0); BUG_ON(pages <= 0); rc = netfront_accel_buf_map_request(vnic->dev, bufpages, &msg, pages, offset); if (rc == 0) { rc = net_accel_msg_send(vnic->shared_page, &vnic->to_dom0, &msg); if (rc < 0) { VPRINTK("%s: queue full, stopping for now\n", __FUNCTION__); break; } sent++; } else { EPRINTK("%s: problem with grant, stopping for now\n", __FUNCTION__); break; } bufpages->page_reqs += pages; } if (sent) net_accel_msg_notify(vnic->msg_channel_irq); return rc; }
/* Setup watch on frontend's accelstate */ static int setup_domu_accel_watch(struct xenbus_device *dev, struct netback_accel *bend) { int err; VPRINTK("Setting watch on %s/%s\n", dev->otherend, "accelstate"); err = xenbus_watch_path2(dev, dev->otherend, "accelstate", &bend->domu_accel_watch, bend_domu_accel_change); if (err) { EPRINTK("%s: Failed to register xenbus watch: %d\n", __FUNCTION__, err); goto fail; } return 0; fail: bend->domu_accel_watch.node = NULL; return err; }
/** * Sets the keys only if they have changed. * @param cipher_key 16 byte array that is the cipher key * @param tweak_key 16 byte array that is the I-Value tweak key */ static void ox800_aeslrw_setkeys(u8* cipher_key, u8* tweak_key) { VPRINTK(KERN_INFO"\n"); /* * changing the keys can take a long time as the core will * compute internal values based on the keys */ if (memcmp(&(ox800_aeslrw_driver.cipher_key[0]), cipher_key, OX800DPE_KEYSIZE) || memcmp(&(ox800_aeslrw_driver.tweak_key[0]), tweak_key, OX800DPE_KEYSIZE) ) { u32* key; unsigned int i; DPRINTK(KERN_INFO"cipher key ="); for (i = 0; i < OX800DPE_KEYSIZE; ++i) DPRINTK("%02x", cipher_key[i]); DPRINTK("\n"); DPRINTK(KERN_INFO"tweak key ="); for (i = 0; i < OX800DPE_KEYSIZE; ++i) DPRINTK("%02x", tweak_key[i]); DPRINTK("\n"); /* update stored values */ memcpy(&(ox800_aeslrw_driver.cipher_key[0]), cipher_key, OX800DPE_KEYSIZE); memcpy(&(ox800_aeslrw_driver.tweak_key[0]), tweak_key, OX800DPE_KEYSIZE); /* update hardware values */ key = (u32* )cipher_key; writel(key[0], OX800DPE_KEY00 ); writel(key[1], OX800DPE_KEY01 ); writel(key[2], OX800DPE_KEY02 ); writel(key[3], OX800DPE_KEY03 ); key = (u32* )tweak_key; writel(key[0], OX800DPE_KEY10 ); writel(key[1], OX800DPE_KEY11 ); writel(key[2], OX800DPE_KEY12 ); writel(key[3], OX800DPE_KEY13 ); } }
/* Process an interrupt received from the NIC via backend */ irqreturn_t netfront_accel_net_channel_irq_from_bend(int irq, void *context, struct pt_regs *unused) { netfront_accel_vnic *vnic = (netfront_accel_vnic *)context; struct net_device *net_dev = vnic->net_dev; unsigned long flags; VPRINTK("net irq %d from device %s\n", irq, vnic->dev->nodename); NETFRONT_ACCEL_STATS_OP(vnic->stats.irq_count++); BUG_ON(net_dev==NULL); spin_lock_irqsave(&vnic->irq_enabled_lock, flags); if (vnic->irq_enabled) { netfront_accel_disable_net_interrupts(vnic); vnic->irq_enabled = 0; spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags); #if NETFRONT_ACCEL_STATS vnic->stats.poll_schedule_count++; if (vnic->stats.event_count_since_irq > vnic->stats.events_per_irq_max) vnic->stats.events_per_irq_max = vnic->stats.event_count_since_irq; vnic->stats.event_count_since_irq = 0; #endif netif_rx_schedule(net_dev); } else { spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags); NETFRONT_ACCEL_STATS_OP(vnic->stats.useless_irq_count++); DPRINTK("%s: irq when disabled\n", __FUNCTION__); } return IRQ_HANDLED; }
void net_accel_update_state(struct xenbus_device *dev, int state) { struct xenbus_transaction tr; int err; DPRINTK("%s: setting accelstate to %s\n", __FUNCTION__, xenbus_strstate(state)); if (xenbus_exists(XBT_NIL, dev->nodename, "")) { VPRINTK("%s: nodename %s\n", __FUNCTION__, dev->nodename); again: err = xenbus_transaction_start(&tr); if (err == 0) err = xenbus_printf(tr, dev->nodename, "accelstate", "%d", state); if (err != 0) { xenbus_transaction_end(tr, 1); } else { err = xenbus_transaction_end(tr, 0); if (err == -EAGAIN) goto again; } } }
unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, unsigned int buflen) { u64 n_sectors = args->dev->n_sectors; u32 tmp; VPRINTK("ENTER\n"); n_sectors--; /* ATA TotalUserSectors - 1 */ tmp = n_sectors; /* note: truncates, if lba48 */ if (args->cmd->cmnd[0] == READ_CAPACITY) { /* sector count, 32-bit */ rbuf[0] = tmp >> (8 * 3); rbuf[1] = tmp >> (8 * 2); rbuf[2] = tmp >> (8 * 1); rbuf[3] = tmp; /* sector size */ tmp = ATA_SECT_SIZE; rbuf[6] = tmp >> 8; rbuf[7] = tmp; } else {
static void sata_fsl_setup_cmd_hdr_entry(struct sata_fsl_port_priv *pp, unsigned int tag, u32 desc_info, u32 data_xfer_len, u8 num_prde, u8 fis_len) { dma_addr_t cmd_descriptor_address; cmd_descriptor_address = pp->cmdentry_paddr + tag * SATA_FSL_CMD_DESC_SIZE; /* NOTE: both data_xfer_len & fis_len are Dword counts */ pp->cmdslot[tag].cda = cpu_to_le32(cmd_descriptor_address); pp->cmdslot[tag].prde_fis_len = cpu_to_le32((num_prde << 16) | (fis_len << 2)); pp->cmdslot[tag].ttl = cpu_to_le32(data_xfer_len & ~0x03); pp->cmdslot[tag].desc_info = cpu_to_le32(desc_info | (tag & 0x1F)); VPRINTK("cda=0x%x, prde_fis_len=0x%x, ttl=0x%x, di=0x%x\n", pp->cmdslot[tag].cda, pp->cmdslot[tag].prde_fis_len, pp->cmdslot[tag].ttl, pp->cmdslot[tag].desc_info); }
unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, unsigned int buflen) { u8 *scsicmd = args->cmd->cmnd, *p, *last; struct ata_device *dev = args->dev; unsigned int page_control, six_byte, output_len; VPRINTK("ENTER\n"); six_byte = (scsicmd[0] == MODE_SENSE); /* we only support saved and current values (which we treat * in the same manner) */ page_control = scsicmd[2] >> 6; if ((page_control != 0) && (page_control != 3)) return 1; if (six_byte) output_len = 4; else output_len = 8; p = rbuf + output_len; last = rbuf + buflen - 1; switch(scsicmd[2] & 0x3f) { case 0x01: /* r/w error recovery */ output_len += ata_msense_rw_recovery(&p, last); break; case 0x08: /* caching */ output_len += ata_msense_caching(dev, &p, last); break; case 0x0a: { /* control mode */ output_len += ata_msense_ctl_mode(&p, last); break; } case 0x3f: /* all pages */ output_len += ata_msense_rw_recovery(&p, last); output_len += ata_msense_caching(dev, &p, last); output_len += ata_msense_ctl_mode(&p, last); break; default: /* invalid page code */ return 1; } if (six_byte) { output_len--; rbuf[0] = output_len; } else { output_len -= 2; rbuf[0] = output_len >> 8; rbuf[1] = output_len; } return 0; }
/** * Generic LRW-AES en/decryption * @param encrypt non-zero to encrypt, zero to decrypt * @param in Source of data * @param out Location to place en/decrypted data * @param nents Number of entries in scatter list, in and out must have the same * number of entries * @param iv 8 byte array containing the I-Value * @return error code or 0 for success */ static int ox800_aeslrw_gencrypt( u8 encrypt, struct scatterlist* in, struct scatterlist* out, unsigned int nents, u8 iv[]) { oxnas_dma_channel_t* dma_in; oxnas_dma_channel_t* dma_out; struct scatterlist* out_; char same_buffer; int status = 0; /* get dma resources (non blocking) */ dma_in = oxnas_dma_request(0); dma_out = oxnas_dma_request(0); VPRINTK("dma in %d out %d \n", dma_in->channel_number_, dma_out->channel_number_); if ((dma_in) && (dma_out)) { u32 reg; // shouldn't be busy or full reg = readl( OX800DPE_STATUS ); if (! (reg & OX800DPE_STAT_IDLE) ) printk("not idle after abort toggle"); if (reg & OX800DPE_STAT_TX_NOTEMPTY) printk("tx fifo not empty after abort toggle"); if (! (reg & OX800DPE_STAT_RX_SPACE) ) printk("rx not empty after abort toggle"); /* check to see if the destination buffer is the same as the source */ same_buffer = (sg_phys(in) == sg_phys(out)); /* map transfers */ if (same_buffer) { dma_map_sg(NULL, in, nents, DMA_BIDIRECTIONAL); out_ = in; } else { /* map transfers */ dma_map_sg(NULL, in, nents, DMA_TO_DEVICE); dma_map_sg(NULL, out, nents, DMA_FROM_DEVICE); out_ = out; } #ifdef CIPHER_USE_SG_DMA /* setup DMA transfers */ oxnas_dma_device_set_sg( dma_in, OXNAS_DMA_TO_DEVICE, in, nents, &oxnas_dpe_rx_dma_settings, OXNAS_DMA_MODE_INC); oxnas_dma_device_set_sg( dma_out, OXNAS_DMA_FROM_DEVICE, out_, nents, &oxnas_dpe_tx_dma_settings, OXNAS_DMA_MODE_INC); #else oxnas_dma_device_set( dma_in, OXNAS_DMA_TO_DEVICE, (unsigned char* )sg_dma_address(in), sg_dma_len(in), &oxnas_dpe_rx_dma_settings, OXNAS_DMA_MODE_INC, 1 /*paused */ ); oxnas_dma_device_set( dma_out, OXNAS_DMA_FROM_DEVICE, (unsigned char* )sg_dma_address(out_), sg_dma_len(out_), &oxnas_dpe_tx_dma_settings, OXNAS_DMA_MODE_INC, 1 /*paused */ ); #endif /* set dma callbacks */ oxnas_dma_set_callback( dma_in, OXNAS_DMA_CALLBACK_ARG_NUL, OXNAS_DMA_CALLBACK_ARG_NUL); oxnas_dma_set_callback( dma_out, OXNAS_DMA_CALLBACK_ARG_NUL, OXNAS_DMA_CALLBACK_ARG_NUL); /* set for AES LRW encryption or decryption */ writel( (encrypt ? OX800DPE_CTL_DIRECTION_ENC : 0 ) | OX800DPE_CTL_MODE_LRW_AES, OX800DPE_CONTROL); wmb(); /* write in I-value */ writel(*((u32* )&(iv[0])), OX800DPE_DATA_LRW0 ); writel(*((u32* )&(iv[4])), OX800DPE_DATA_LRW1 ); wmb(); /* wait until done */ while( !(OX800DPE_STAT_IDLE & readl( OX800DPE_STATUS )) ); /* start dma */ oxnas_dma_start(dma_out); oxnas_dma_start(dma_in); /* wait (once for each channel) */ while ( oxnas_dma_is_active( dma_out ) || oxnas_dma_is_active( dma_in ) ) { schedule(); } /* free any allocated dma channels */ oxnas_dma_free( dma_in ); oxnas_dma_free( dma_out ); /* unmap transfers */ if (same_buffer) { dma_unmap_sg(NULL, in, nents, DMA_BIDIRECTIONAL); } else { dma_unmap_sg(NULL, in, nents, DMA_TO_DEVICE); dma_unmap_sg(NULL, out, nents, DMA_FROM_DEVICE); } status = ox800_aeslrw_driver.result; } else { /* free any allocated dma channels */ if (dma_in) oxnas_dma_free( dma_in ); if (dma_out) oxnas_dma_free( dma_out ); status = -EBUSY; } /* return an indication of success */ return status; }
static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) { struct ata_taskfile *tf = &qc->tf; unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48; tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf->hob_nsect = 0; tf->hob_lbal = 0; tf->hob_lbam = 0; tf->hob_lbah = 0; tf->protocol = qc->dev->xfer_protocol; tf->device |= ATA_LBA; if (scsicmd[0] == READ_10 || scsicmd[0] == READ_6 || scsicmd[0] == READ_16) { tf->command = qc->dev->read_cmd; } else { tf->command = qc->dev->write_cmd; tf->flags |= ATA_TFLAG_WRITE; } if (scsicmd[0] == READ_10 || scsicmd[0] == WRITE_10) { if (lba48) { tf->hob_nsect = scsicmd[7]; tf->hob_lbal = scsicmd[2]; qc->nsect = ((unsigned int)scsicmd[7] << 8) | scsicmd[8]; } else { /* if we don't support LBA48 addressing, the request * -may- be too large. */ if ((scsicmd[2] & 0xf0) || scsicmd[7]) return 1; /* stores LBA27:24 in lower 4 bits of device reg */ tf->device |= scsicmd[2]; qc->nsect = scsicmd[8]; } tf->nsect = scsicmd[8]; tf->lbal = scsicmd[5]; tf->lbam = scsicmd[4]; tf->lbah = scsicmd[3]; VPRINTK("ten-byte command\n"); return 0; } if (scsicmd[0] == READ_6 || scsicmd[0] == WRITE_6) { qc->nsect = tf->nsect = scsicmd[4]; tf->lbal = scsicmd[3]; tf->lbam = scsicmd[2]; tf->lbah = scsicmd[1] & 0x1f; /* mask out reserved bits */ VPRINTK("six-byte command\n"); return 0; } if (scsicmd[0] == READ_16 || scsicmd[0] == WRITE_16) { /* rule out impossible LBAs and sector counts */ if (scsicmd[2] || scsicmd[3] || scsicmd[10] || scsicmd[11]) return 1; if (lba48) { tf->hob_nsect = scsicmd[12]; tf->hob_lbal = scsicmd[6]; tf->hob_lbam = scsicmd[5]; tf->hob_lbah = scsicmd[4]; qc->nsect = ((unsigned int)scsicmd[12] << 8) | scsicmd[13]; } else { /* once again, filter out impossible non-zero values */ if (scsicmd[4] || scsicmd[5] || scsicmd[12] || (scsicmd[6] & 0xf0)) return 1; /* stores LBA27:24 in lower 4 bits of device reg */ tf->device |= scsicmd[2]; qc->nsect = scsicmd[13]; } tf->nsect = scsicmd[13]; tf->lbal = scsicmd[9]; tf->lbam = scsicmd[8]; tf->lbah = scsicmd[7]; VPRINTK("sixteen-byte command\n"); return 0; } DPRINTK("no-byte command\n"); return 1; }
unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf, unsigned int buflen) { VPRINTK("ENTER\n"); return 0; }