static void sas_phy_control(struct sas_ha_struct *sas_ha, u8 phy_id, u8 phy_op, enum sas_linkrate min, enum sas_linkrate max, u8 *resp_data) { struct sas_internal *i = to_sas_internal(sas_ha->core.shost->transportt); struct sas_phy_linkrates rates; if (phy_id >= sas_ha->num_phys) { resp_data[2] = SMP_RESP_NO_PHY; return; } switch (phy_op) { case PHY_FUNC_NOP: case PHY_FUNC_LINK_RESET: case PHY_FUNC_HARD_RESET: case PHY_FUNC_DISABLE: case PHY_FUNC_CLEAR_ERROR_LOG: case PHY_FUNC_CLEAR_AFFIL: case PHY_FUNC_TX_SATA_PS_SIGNAL: break; default: resp_data[2] = SMP_RESP_PHY_UNK_OP; return; } rates.minimum_linkrate = min; rates.maximum_linkrate = max; if (i->dft->lldd_control_phy(sas_ha->sas_phy[phy_id], phy_op, &rates)) resp_data[2] = SMP_RESP_FUNC_FAILED; else resp_data[2] = SMP_RESP_FUNC_ACC; }
static void sas_phye_oob_error(struct work_struct *work) { struct asd_sas_event *ev = to_asd_sas_event(work); struct asd_sas_phy *phy = ev->phy; struct sas_ha_struct *sas_ha = phy->ha; struct asd_sas_port *port = phy->port; struct sas_internal *i = to_sas_internal(sas_ha->core.shost->transportt); clear_bit(PHYE_OOB_ERROR, &phy->phy_events_pending); sas_deform_port(phy, 1); if (!port && phy->enabled && i->dft->lldd_control_phy) { phy->error++; switch (phy->error) { case 1: case 2: i->dft->lldd_control_phy(phy, PHY_FUNC_HARD_RESET, NULL); break; case 3: default: phy->error = 0; phy->enabled = 0; i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL); break; } } }
/** * sas_form_port -- add this phy to a port * @phy: the phy of interest * * This function adds this phy to an existing port, thus creating a wide * port, or it creates a port and adds the phy to the port. */ static void sas_form_port(struct asd_sas_phy *phy) { int i; struct sas_ha_struct *sas_ha = phy->ha; struct asd_sas_port *port = phy->port; struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt); unsigned long flags; if (port) { if (!phy_is_wideport_member(port, phy)) sas_deform_port(phy, 0); else { SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n", __func__, phy->id, phy->port->id, phy->port->num_phys); return; } } /* see if the phy should be part of a wide port */ spin_lock_irqsave(&sas_ha->phy_port_lock, flags); for (i = 0; i < sas_ha->num_phys; i++) { port = sas_ha->sas_port[i]; spin_lock(&port->phy_list_lock); if (*(u64 *) port->sas_addr && phy_is_wideport_member(port, phy) && port->num_phys > 0) { /* wide port */ SAS_DPRINTK("phy%d matched wide port%d\n", phy->id, port->id); break; } spin_unlock(&port->phy_list_lock); } /* The phy does not match any existing port, create a new one */ if (i == sas_ha->num_phys) { for (i = 0; i < sas_ha->num_phys; i++) { port = sas_ha->sas_port[i]; spin_lock(&port->phy_list_lock); if (*(u64 *)port->sas_addr == 0 && port->num_phys == 0) { memcpy(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE); break; } spin_unlock(&port->phy_list_lock); } } if (i >= sas_ha->num_phys) { printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n", __func__); spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags); return; } /* add the phy to the port */ list_add_tail(&phy->port_phy_el, &port->phy_list); <<<<<<< HEAD
void sas_notify_lldd_dev_gone(struct domain_device *dev) { struct sas_ha_struct *sas_ha = dev->port->ha; struct Scsi_Host *shost = sas_ha->core.shost; struct sas_internal *i = to_sas_internal(shost->transportt); if (i->dft->lldd_dev_gone) i->dft->lldd_dev_gone(dev); }
static void sas_phye_spinup_hold(struct work_struct *work) { struct asd_sas_event *ev = to_asd_sas_event(work); struct asd_sas_phy *phy = ev->phy; struct sas_ha_struct *sas_ha = phy->ha; struct sas_internal *i = to_sas_internal(sas_ha->core.shost->transportt); clear_bit(PHYE_SPINUP_HOLD, &phy->phy_events_pending); phy->error = 0; i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL); }
static void sas_phye_spinup_hold(void *data) { struct asd_sas_phy *phy = data; struct sas_ha_struct *sas_ha = phy->ha; struct sas_internal *i = to_sas_internal(sas_ha->core.shost->transportt); sas_begin_event(PHYE_SPINUP_HOLD, &phy->ha->event_lock, &phy->phy_events_pending); phy->error = 0; i->dft->lldd_control_phy_new(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL); }
int sas_notify_lldd_dev_found(struct domain_device *dev) { int res = 0; struct sas_ha_struct *sas_ha = dev->port->ha; struct Scsi_Host *shost = sas_ha->core.shost; struct sas_internal *i = to_sas_internal(shost->transportt); if (i->dft->lldd_dev_found) { res = i->dft->lldd_dev_found(dev); if (res) { // printk("sas: driver on pcidev %s cannot handle " // "device %llx, error:%d\n", // dev_name(sas_ha->dev), ; } } return res; }
static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) { int res; struct sas_task *task; struct domain_device *dev = qc->ap->private_data; struct sas_ha_struct *sas_ha = dev->port->ha; struct Scsi_Host *host = sas_ha->core.shost; struct sas_internal *i = to_sas_internal(host->transportt); struct scatterlist *sg; unsigned int num = 0; unsigned int xfer = 0; task = sas_alloc_task(GFP_ATOMIC); if (!task) return AC_ERR_SYSTEM; task->dev = dev; task->task_proto = SAS_PROTOCOL_STP; task->task_done = sas_ata_task_done; if (qc->tf.command == ATA_CMD_FPDMA_WRITE || qc->tf.command == ATA_CMD_FPDMA_READ) { /* Need to zero out the tag libata assigned us */ qc->tf.nsect = 0; } ata_tf_to_fis(&qc->tf, 1, 0, (u8*)&task->ata_task.fis); task->uldd_task = qc; if (ata_is_atapi(qc->tf.protocol)) { memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); task->total_xfer_len = qc->nbytes + qc->pad_len; task->num_scatter = qc->pad_len ? qc->n_elem + 1 : qc->n_elem; } else { ata_for_each_sg(sg, qc) { num++; xfer += sg->length; } task->total_xfer_len = xfer; task->num_scatter = num; }
static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) { int res; struct sas_task *task; struct domain_device *dev = qc->ap->private_data; struct sas_ha_struct *sas_ha = dev->port->ha; struct Scsi_Host *host = sas_ha->core.shost; struct sas_internal *i = to_sas_internal(host->transportt); struct scatterlist *sg; unsigned int xfer = 0; unsigned int si; /* If the device fell off, no sense in issuing commands */ if (dev->gone) return AC_ERR_SYSTEM; task = sas_alloc_task(GFP_ATOMIC); if (!task) return AC_ERR_SYSTEM; task->dev = dev; task->task_proto = SAS_PROTOCOL_STP; task->task_done = sas_ata_task_done; if (qc->tf.command == ATA_CMD_FPDMA_WRITE || qc->tf.command == ATA_CMD_FPDMA_READ) { /* Need to zero out the tag libata assigned us */ qc->tf.nsect = 0; } ata_tf_to_fis(&qc->tf, 1, 0, (u8*)&task->ata_task.fis); task->uldd_task = qc; if (ata_is_atapi(qc->tf.protocol)) { memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); task->total_xfer_len = qc->nbytes; task->num_scatter = qc->n_elem; } else { for_each_sg(qc->sg, sg, qc->n_elem, si) xfer += sg->length; task->total_xfer_len = xfer; task->num_scatter = si; } task->data_dir = qc->dma_dir; task->scatter = qc->sg; task->ata_task.retry_count = 1; task->task_state_flags = SAS_TASK_STATE_PENDING; qc->lldd_task = task; switch (qc->tf.protocol) { case ATA_PROT_NCQ: task->ata_task.use_ncq = 1; /* fall through */ case ATAPI_PROT_DMA: case ATA_PROT_DMA: task->ata_task.dma_xfer = 1; break; } if (qc->scsicmd) ASSIGN_SAS_TASK(qc->scsicmd, task); if (sas_ha->lldd_max_execute_num < 2) res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC); else res = sas_queue_up(task); /* Examine */ if (res) { SAS_DPRINTK("lldd_execute_task returned: %d\n", res); if (qc->scsicmd) ASSIGN_SAS_TASK(qc->scsicmd, NULL); sas_free_task(task); return AC_ERR_SYSTEM; } return 0; }
static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc) { struct domain_device *dev = qc->ap->private_data; memcpy(&qc->result_tf, &dev->sata_dev.tf, sizeof(qc->result_tf)); return true; } static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class, unsigned long deadline) { struct ata_port *ap = link->ap; struct domain_device *dev = ap->private_data; struct sas_internal *i = to_sas_internal(dev->port->ha->core.shost->transportt); int res = TMF_RESP_FUNC_FAILED; int ret = 0; if (i->dft->lldd_I_T_nexus_reset) res = i->dft->lldd_I_T_nexus_reset(dev); if (res != TMF_RESP_FUNC_COMPLETE) { SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __func__); ret = -EAGAIN; } switch (dev->sata_dev.command_set) { case ATA_COMMAND_SET: SAS_DPRINTK("%s: Found ATA device.\n", __func__); *class = ATA_DEV_ATA;
/** * sas_form_port -- add this phy to a port * @phy: the phy of interest * * This function adds this phy to an existing port, thus creating a wide * port, or it creates a port and adds the phy to the port. */ static void sas_form_port(struct asd_sas_phy *phy) { int i; struct sas_ha_struct *sas_ha = phy->ha; struct asd_sas_port *port = phy->port; struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt); unsigned long flags; if (port) { if (memcmp(port->attached_sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE) != 0) sas_deform_port(phy); else { SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n", __func__, phy->id, phy->port->id, phy->port->num_phys); return; } } /* find a port */ spin_lock_irqsave(&sas_ha->phy_port_lock, flags); for (i = 0; i < sas_ha->num_phys; i++) { port = sas_ha->sas_port[i]; spin_lock(&port->phy_list_lock); if (*(u64 *) port->sas_addr && memcmp(port->attached_sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE) == 0 && port->num_phys > 0) { /* wide port */ SAS_DPRINTK("phy%d matched wide port%d\n", phy->id, port->id); break; } else if (*(u64 *) port->sas_addr == 0 && port->num_phys==0) { memcpy(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE); break; } spin_unlock(&port->phy_list_lock); } if (i >= sas_ha->num_phys) { printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n", __func__); spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags); return; } /* add the phy to the port */ list_add_tail(&phy->port_phy_el, &port->phy_list); phy->port = port; port->num_phys++; port->phy_mask |= (1U << phy->id); if (!port->phy) port->phy = phy->phy; if (*(u64 *)port->attached_sas_addr == 0) { port->class = phy->class; memcpy(port->attached_sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); port->iproto = phy->iproto; port->tproto = phy->tproto; port->oob_mode = phy->oob_mode; port->linkrate = phy->linkrate; } else
static struct sas_internal *dev_to_sas_internal(struct domain_device *dev) { return to_sas_internal(dev->port->ha->core.shost->transportt); }
static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) { unsigned long flags; struct sas_task *task; struct scatterlist *sg; int ret = AC_ERR_SYSTEM; unsigned int si, xfer = 0; struct ata_port *ap = qc->ap; struct domain_device *dev = ap->private_data; struct sas_ha_struct *sas_ha = dev->port->ha; struct Scsi_Host *host = sas_ha->core.shost; struct sas_internal *i = to_sas_internal(host->transportt); /* TODO: audit callers to ensure they are ready for qc_issue to * unconditionally re-enable interrupts */ local_irq_save(flags); spin_unlock(ap->lock); /* If the device fell off, no sense in issuing commands */ if (test_bit(SAS_DEV_GONE, &dev->state)) goto out; task = sas_alloc_task(GFP_ATOMIC); if (!task) goto out; task->dev = dev; task->task_proto = SAS_PROTOCOL_STP; task->task_done = sas_ata_task_done; if (qc->tf.command == ATA_CMD_FPDMA_WRITE || qc->tf.command == ATA_CMD_FPDMA_READ) { /* Need to zero out the tag libata assigned us */ qc->tf.nsect = 0; } ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis); task->uldd_task = qc; if (ata_is_atapi(qc->tf.protocol)) { memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); task->total_xfer_len = qc->nbytes; task->num_scatter = qc->n_elem; } else { for_each_sg(qc->sg, sg, qc->n_elem, si) xfer += sg->length; task->total_xfer_len = xfer; task->num_scatter = si; } task->data_dir = qc->dma_dir; task->scatter = qc->sg; task->ata_task.retry_count = 1; task->task_state_flags = SAS_TASK_STATE_PENDING; qc->lldd_task = task; switch (qc->tf.protocol) { case ATA_PROT_NCQ: task->ata_task.use_ncq = 1; /* fall through */ case ATAPI_PROT_DMA: case ATA_PROT_DMA: task->ata_task.dma_xfer = 1; break; } if (qc->scsicmd) ASSIGN_SAS_TASK(qc->scsicmd, task); if (sas_ha->lldd_max_execute_num < 2) ret = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC); else ret = sas_queue_up(task); /* Examine */ if (ret) { SAS_DPRINTK("lldd_execute_task returned: %d\n", ret); if (qc->scsicmd) ASSIGN_SAS_TASK(qc->scsicmd, NULL); sas_free_task(task); ret = AC_ERR_SYSTEM; } out: spin_lock(ap->lock); local_irq_restore(flags); return ret; }
* @parameters: See SCSI Core documentation * * Note: XXX: Remove the host unlock/lock pair when SCSI Core can * call us without holding an IRQ spinlock... */ static int sas_queuecommand_lck(struct scsi_cmnd *cmd, void (*scsi_done)(struct scsi_cmnd *)) __releases(host->host_lock) __acquires(dev->sata_dev.ap->lock) __releases(dev->sata_dev.ap->lock) __acquires(host->host_lock) { int res = 0; struct domain_device *dev = cmd_to_domain_dev(cmd); struct Scsi_Host *host = cmd->device->host; struct sas_internal *i = to_sas_internal(host->transportt); spin_unlock_irq(host->host_lock); { struct sas_ha_struct *sas_ha = dev->port->ha; struct sas_task *task; if (dev_is_sata(dev)) { unsigned long flags; spin_lock_irqsave(dev->sata_dev.ap->lock, flags); res = ata_sas_queuecmd(cmd, dev->sata_dev.ap); spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags); goto out; }