static int fnic_slave_alloc(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); sdev->tagged_supported = 1; if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH); return 0; }
static int tcm_loop_slave_configure(struct scsi_device *sd) { if (sd->tagged_supported) { scsi_activate_tcq(sd, sd->queue_depth); scsi_adjust_queue_depth(sd, MSG_SIMPLE_TAG, sd->host->cmd_per_lun); } else { scsi_adjust_queue_depth(sd, 0, sd->host->cmd_per_lun); } return 0; }
static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag) { if (sdev->tagged_supported) { scsi_set_tag_type(sdev, tag); if (tag) scsi_activate_tcq(sdev, sdev->queue_depth); else scsi_deactivate_tcq(sdev, sdev->queue_depth); } else tag = 0; return tag; }
static int fnic_slave_alloc(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); struct fc_lport *lp = shost_priv(sdev->host); struct fnic *fnic = lport_priv(lp); sdev->tagged_supported = 1; if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; if (sdev->tagged_supported) scsi_activate_tcq(sdev, fnic_max_qdepth); rport->dev_loss_tmo = fnic->config.port_down_timeout / 1000; return 0; }