Ejemplo n.º 1
0
static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
{
	if (sdp->tagged_supported)
		scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, default_depth);
	else
		scsi_adjust_queue_depth(sdp, 0, 1);
	return 0;
}
Ejemplo n.º 2
0
static int tcm_loop_slave_configure(struct scsi_device *sd)
{
	if (sd->tagged_supported) {
		scsi_activate_tcq(sd, sd->queue_depth);
		scsi_adjust_queue_depth(sd, MSG_SIMPLE_TAG,
					sd->host->cmd_per_lun);
	} else {
		scsi_adjust_queue_depth(sd, 0,
					sd->host->cmd_per_lun);
	}

	return 0;
}
Ejemplo n.º 3
0
static int u14_34f_slave_configure(struct scsi_device *dev) {
    int j, tqd, utqd;
    char *tag_suffix, *link_suffix;
    struct Scsi_Host *host = dev->host;

    j = ((struct hostdata *) host->hostdata)->board_number;

    utqd = MAX_CMD_PER_LUN;
    tqd = max_queue_depth;

    if (TLDEV(dev->type) && dev->tagged_supported)

        if (tag_mode == TAG_SIMPLE) {
            scsi_adjust_queue_depth(dev, MSG_SIMPLE_TAG, tqd);
            tag_suffix = ", simple tags";
        }
        else if (tag_mode == TAG_ORDERED) {
            scsi_adjust_queue_depth(dev, MSG_ORDERED_TAG, tqd);
            tag_suffix = ", ordered tags";
        }
        else {
            scsi_adjust_queue_depth(dev, 0, tqd);
            tag_suffix = ", no tags";
        }

    else if (TLDEV(dev->type) && linked_comm) {
        scsi_adjust_queue_depth(dev, 0, tqd);
        tag_suffix = ", untagged";
    }

    else {
        scsi_adjust_queue_depth(dev, 0, utqd);
        tag_suffix = "";
    }

    if (TLDEV(dev->type) && linked_comm && dev->queue_depth > 2)
        link_suffix = ", sorted";
    else if (TLDEV(dev->type))
        link_suffix = ", unsorted";
    else
        link_suffix = "";

    sdev_printk(KERN_INFO, dev, "cmds/lun %d%s%s.\n",
                dev->queue_depth, link_suffix, tag_suffix);

    return FALSE;
}
Ejemplo n.º 4
0
static int idescsi_slave_configure(struct scsi_device * sdp)
{
	/* Configure detected device */
	sdp->use_10_for_rw = 1;
	sdp->use_10_for_ms = 1;
	scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, sdp->host->cmd_per_lun);
	return 0;
}
Ejemplo n.º 5
0
static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
					int reason)
{
	switch (reason) {
	case SCSI_QDEPTH_DEFAULT:
		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
		break;
	case SCSI_QDEPTH_QFULL:
		scsi_track_queue_full(sdev, depth);
		break;
	case SCSI_QDEPTH_RAMP_UP:
		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
		break;
	default:
		return -EOPNOTSUPP;
	}
	return sdev->queue_depth;
}
Ejemplo n.º 6
0
/**
 * megaraid_change_queue_depth - Change the device's queue depth
 * @sdev:	scsi device struct
 * @qdepth:	depth to set
 * @reason:	calling context
 *
 * Return value:
 * 	actual depth set
 */
static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth,
				       int reason)
{
	if (reason != SCSI_QDEPTH_DEFAULT)
		return -EOPNOTSUPP;

	if (qdepth > MBOX_MAX_SCSI_CMDS)
		qdepth = MBOX_MAX_SCSI_CMDS;
	scsi_adjust_queue_depth(sdev, 0, qdepth);
	return sdev->queue_depth;
}
Ejemplo n.º 7
0
static int aac_slave_configure(struct scsi_device *sdev)
{
	struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
	if (aac->jbod && (sdev->type == TYPE_DISK))
		sdev->removable = 1;
	if ((sdev->type == TYPE_DISK) &&
			(sdev_channel(sdev) != CONTAINER_CHANNEL) &&
			(!aac->jbod || sdev->inq_periph_qual) &&
			(!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) {
		if (expose_physicals == 0)
			return -ENXIO;
		if (expose_physicals < 0)
			sdev->no_uld_attach = 1;
	}
	if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
			(!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) &&
			!sdev->no_uld_attach) {
		struct scsi_device * dev;
		struct Scsi_Host *host = sdev->host;
		unsigned num_lsu = 0;
		unsigned num_one = 0;
		unsigned depth;
		unsigned cid;

		/*
		 * Firmware has an individual device recovery time typically
		 * of 35 seconds, give us a margin.
		 */
		if (sdev->request_queue->rq_timeout < (45 * HZ))
			blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
		for (cid = 0; cid < aac->maximum_num_containers; ++cid)
			if (aac->fsa_dev[cid].valid)
				++num_lsu;
		__shost_for_each_device(dev, host) {
			if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
					(!aac->raid_scsi_mode ||
						(sdev_channel(sdev) != 2)) &&
					!dev->no_uld_attach) {
				if ((sdev_channel(dev) != CONTAINER_CHANNEL)
				 || !aac->fsa_dev[sdev_id(dev)].valid)
					++num_lsu;
			} else
				++num_one;
		}
		if (num_lsu == 0)
			++num_lsu;
		depth = (host->can_queue - num_one) / num_lsu;
		if (depth > 256)
			depth = 256;
		else if (depth < 2)
			depth = 2;
		scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
	} else
Ejemplo n.º 8
0
static void
qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
{
	fc_port_t *fcport = data;

	if (fcport->ha->max_q_depth <= sdev->queue_depth)
		return;

	if (sdev->ordered_tags)
		scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
		    sdev->queue_depth + 1);
	else
		scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
		    sdev->queue_depth + 1);

	fcport->last_ramp_up = jiffies;

	DEBUG2(qla_printk(KERN_INFO, fcport->ha,
	    "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
	    fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun,
	    sdev->queue_depth));
}
Ejemplo n.º 9
0
void
lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
{
	struct lpfc_vport **vports;
	struct Scsi_Host  *shost;
	struct scsi_device *sdev;
	unsigned long new_queue_depth;
	unsigned long num_rsrc_err, num_cmd_success;
	int i;

	num_rsrc_err = atomic_read(&phba->num_rsrc_err);
	num_cmd_success = atomic_read(&phba->num_cmd_success);

	vports = lpfc_create_vport_work_array(phba);
	if (vports != NULL)
		for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
			shost = lpfc_shost_from_vport(vports[i]);
			shost_for_each_device(sdev, shost) {
				new_queue_depth =
					sdev->queue_depth * num_rsrc_err /
					(num_rsrc_err + num_cmd_success);
				if (!new_queue_depth)
					new_queue_depth = sdev->queue_depth - 1;
				else
					new_queue_depth = sdev->queue_depth -
								new_queue_depth;
				if (sdev->ordered_tags)
					scsi_adjust_queue_depth(sdev,
							MSG_ORDERED_TAG,
							new_queue_depth);
				else
					scsi_adjust_queue_depth(sdev,
							MSG_SIMPLE_TAG,
							new_queue_depth);
			}
		}
Ejemplo n.º 10
0
int pm8001_slave_configure(struct scsi_device *sdev)
{
	struct domain_device *dev = sdev_to_domain_dev(sdev);
	int ret = sas_slave_configure(sdev);
	if (ret)
		return ret;
	if (dev_is_sata(dev)) {
	#ifdef PM8001_DISABLE_NCQ
		struct ata_port *ap = dev->sata_dev.ap;
		struct ata_device *adev = ap->link.device;
		adev->flags |= ATA_DFLAG_NCQ_OFF;
		scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
	#endif
	}
	return 0;
}
Ejemplo n.º 11
0
int pluto_slave_configure(Scsi_Device *device)
{
	int depth_to_use;

	if (device->tagged_supported)
		depth_to_use = /* 254 */ 8;
	else
		depth_to_use = 2;

	scsi_adjust_queue_depth(device,
				(device->tagged_supported ?
				 MSG_SIMPLE_TAG : 0),
				depth_to_use);

	return 0;
}
Ejemplo n.º 12
0
static int aac_slave_configure(struct scsi_device *sdev)
{
	if (sdev_channel(sdev) == CONTAINER_CHANNEL) {
		sdev->skip_ms_page_8 = 1;
		sdev->skip_ms_page_3f = 1;
	}
	if ((sdev->type == TYPE_DISK) &&
			(sdev_channel(sdev) != CONTAINER_CHANNEL)) {
		if (expose_physicals == 0)
			return -ENXIO;
		if (expose_physicals < 0) {
			struct aac_dev *aac =
				(struct aac_dev *)sdev->host->hostdata;
			if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
				sdev->no_uld_attach = 1;
		}
	}
	if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
			(sdev_channel(sdev) == CONTAINER_CHANNEL)) {
		struct scsi_device * dev;
		struct Scsi_Host *host = sdev->host;
		unsigned num_lsu = 0;
		unsigned num_one = 0;
		unsigned depth;

		__shost_for_each_device(dev, host) {
			if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
				(sdev_channel(dev) == CONTAINER_CHANNEL))
				++num_lsu;
			else
				++num_one;
		}
		if (num_lsu == 0)
			++num_lsu;
		depth = (host->can_queue - num_one) / num_lsu;
		if (depth > 256)
			depth = 256;
		else if (depth < 2)
			depth = 2;
		scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
		if (!(((struct aac_dev *)host->hostdata)->adapter_info.options &
				AAC_OPT_NEW_COMM))
			blk_queue_max_segment_size(sdev->request_queue, 65536);
	} else