예제 #1
0
static void __init zfcp_init_device_configure(void)
{
	struct zfcp_adapter *adapter;
	struct zfcp_port *port;
	struct zfcp_unit *unit;

	down(&zfcp_data.config_sema);
	read_lock_irq(&zfcp_data.config_lock);
	adapter = zfcp_get_adapter_by_busid(zfcp_data.init_busid);
	if (adapter)
		zfcp_adapter_get(adapter);
	read_unlock_irq(&zfcp_data.config_lock);

	if (!adapter)
		goto out_adapter;
	port = zfcp_port_enqueue(adapter, zfcp_data.init_wwpn, 0, 0);
	if (IS_ERR(port))
		goto out_port;
	unit = zfcp_unit_enqueue(port, zfcp_data.init_fcp_lun);
	if (IS_ERR(unit))
		goto out_unit;
	up(&zfcp_data.config_sema);
	ccw_device_set_online(adapter->ccw_device);
	zfcp_erp_wait(adapter);
	down(&zfcp_data.config_sema);
	zfcp_unit_put(unit);
out_unit:
	zfcp_port_put(port);
out_port:
	zfcp_adapter_put(adapter);
out_adapter:
	up(&zfcp_data.config_sema);
	return;
}
예제 #2
0
static int ns2_led_get_mode(struct ns2_led_data *led_dat,
			    enum ns2_led_modes *mode)
{
	int i;
	int ret = -EINVAL;
	int cmd_level;
	int slow_level;

	read_lock_irq(&led_dat->rw_lock);

	cmd_level = gpio_get_value(led_dat->cmd);
	slow_level = gpio_get_value(led_dat->slow);

	for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) {
		if (cmd_level == ns2_led_modval[i].cmd_level &&
		    slow_level == ns2_led_modval[i].slow_level) {
			*mode = ns2_led_modval[i].mode;
			ret = 0;
			break;
		}
	}

	read_unlock_irq(&led_dat->rw_lock);

	return ret;
}
예제 #3
0
/*
 * Our MPS died. Tell our daemon to send NHRP data plane purge to each
 * of the egress shortcuts we have.
 */
static void mps_death( struct k_message * msg, struct mpoa_client * mpc )
{
	eg_cache_entry *entry;

	dprintk("mpoa: (%s) mps_death:\n", mpc->dev->name);

	if(memcmp(msg->MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN)){
		printk("mpoa: (%s) mps_death: wrong MPS\n", mpc->dev->name);
		return;
	}

	/* FIXME: This knows too much of the cache structure */
	read_lock_irq(&mpc->egress_lock);
	entry = mpc->eg_cache;
	while (entry != NULL) {
		purge_egress_shortcut(entry->shortcut, entry);
		entry = entry->next;
	}
	read_unlock_irq(&mpc->egress_lock);

	mpc->in_ops->destroy_cache(mpc);
	mpc->eg_ops->destroy_cache(mpc);

	return;
}
예제 #4
0
static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
					    struct device_attribute *attr,
					    const char *buf, size_t count)
{
	struct zfcp_port *port = dev_get_drvdata(dev);
	struct zfcp_unit *unit;
	u64 fcp_lun;
	LIST_HEAD(unit_remove_lh);
	struct scsi_device *sdev;

	mutex_lock(&zfcp_data.config_mutex);
	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
		mutex_unlock(&zfcp_data.config_mutex);
		return -EBUSY;
	}

	if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) {
		mutex_unlock(&zfcp_data.config_mutex);
		return -EINVAL;
	}

	read_lock_irq(&zfcp_data.config_lock);
	unit = zfcp_get_unit_by_lun(port, fcp_lun);
	read_unlock_irq(&zfcp_data.config_lock);
	if (!unit) {
		mutex_unlock(&zfcp_data.config_mutex);
		return -ENXIO;
	}
	zfcp_unit_get(unit);
	mutex_unlock(&zfcp_data.config_mutex);

	sdev = scsi_device_lookup(port->adapter->scsi_host, 0,
				  port->starget_id,
				  scsilun_to_int((struct scsi_lun *)&fcp_lun));
	if (sdev) {
		scsi_remove_device(sdev);
		scsi_device_put(sdev);
	}

	mutex_lock(&zfcp_data.config_mutex);
	zfcp_unit_put(unit);
	if (atomic_read(&unit->refcount)) {
		mutex_unlock(&zfcp_data.config_mutex);
		return -ENXIO;
	}

	write_lock_irq(&zfcp_data.config_lock);
	atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
	list_move(&unit->list, &unit_remove_lh);
	write_unlock_irq(&zfcp_data.config_lock);
	mutex_unlock(&zfcp_data.config_mutex);

	zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL);
	zfcp_erp_wait(unit->port->adapter);
	zfcp_unit_dequeue(unit);

	return (ssize_t)count;
}
예제 #5
0
static eg_cache_entry *eg_cache_get_by_cache_id(__be32 cache_id, struct mpoa_client *mpc)
{
    eg_cache_entry *entry;

    read_lock_irq(&mpc->egress_lock);
    entry = mpc->eg_cache;
    while(entry != NULL) {
        if(entry->ctrl_info.cache_id == cache_id) {
            atomic_inc(&entry->use);
            read_unlock_irq(&mpc->egress_lock);
            return entry;
        }
        entry = entry->next;
    }
    read_unlock_irq(&mpc->egress_lock);

    return NULL;
}
예제 #6
0
static eg_cache_entry *eg_cache_get_by_src_ip(__be32 ipaddr, struct mpoa_client *mpc)
{
    eg_cache_entry *entry;

    read_lock_irq(&mpc->egress_lock);
    entry = mpc->eg_cache;
    while(entry != NULL) {
        if(entry->latest_ip_addr == ipaddr) {
            atomic_inc(&entry->use);
            read_unlock_irq(&mpc->egress_lock);
            return entry;
        }
        entry = entry->next;
    }
    read_unlock_irq(&mpc->egress_lock);

    return NULL;
}
예제 #7
0
파일: btree.c 프로젝트: zhangjinde/nkfs
static struct nkfs_btree_node * nkfs_btree_nodes_lookup(struct nkfs_btree *tree,
	u64 block)
{	
	struct nkfs_btree_node *node;
	read_lock_irq(&tree->nodes_lock);
	node = __nkfs_btree_nodes_lookup(tree, block);
	if (node)
		NKFS_BTREE_NODE_REF(node);
	read_unlock_irq(&tree->nodes_lock);
	return node;
}
예제 #8
0
/**
 * zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer
 * @adapter: The zfcp adapter to unregister.
 */
void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter)
{
	struct Scsi_Host *shost;
	struct zfcp_port *port;

	shost = adapter->scsi_host;
	if (!shost)
		return;

	read_lock_irq(&adapter->port_list_lock);
	list_for_each_entry(port, &adapter->port_list, list)
		port->rport = NULL;
	read_unlock_irq(&adapter->port_list_lock);

	fc_remove_host(shost);
	scsi_remove_host(shost);
	scsi_host_put(shost);
	adapter->scsi_host = NULL;
}
예제 #9
0
파일: mpc.c 프로젝트: BWhitten/linux-stable
/*
 * purge egress cache and tell daemon to 'action' (DIE, RELOAD)
 */
static void clean_up(struct k_message *msg, struct mpoa_client *mpc, int action)
{

	eg_cache_entry *entry;
	msg->type = SND_EGRESS_PURGE;


	/* FIXME: This knows too much of the cache structure */
	read_lock_irq(&mpc->egress_lock);
	entry = mpc->eg_cache;
	while (entry != NULL) {
		msg->content.eg_info = entry->ctrl_info;
		dprintk("cache_id %u\n", entry->ctrl_info.cache_id);
		msg_to_mpoad(msg, mpc);
		entry = entry->next;
	}
	read_unlock_irq(&mpc->egress_lock);

	msg->type = action;
	msg_to_mpoad(msg, mpc);
}
예제 #10
0
static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
{
	struct zfcp_adapter *adapter;
	struct zfcp_port *port;
	struct zfcp_unit *unit;

	down(&zfcp_data.config_sema);
	read_lock_irq(&zfcp_data.config_lock);
	adapter = zfcp_get_adapter_by_busid(busid);
	if (adapter)
		zfcp_adapter_get(adapter);
	read_unlock_irq(&zfcp_data.config_lock);

	if (!adapter)
		goto out_adapter;
	port = zfcp_port_enqueue(adapter, wwpn, 0, 0);
	if (IS_ERR(port))
		goto out_port;
	unit = zfcp_unit_enqueue(port, lun);
	if (IS_ERR(unit))
		goto out_unit;
	up(&zfcp_data.config_sema);
	ccw_device_set_online(adapter->ccw_device);

	zfcp_erp_wait(adapter);
	wait_event(adapter->erp_done_wqh,
		   !(atomic_read(&unit->status) &
				ZFCP_STATUS_UNIT_SCSI_WORK_PENDING));

	down(&zfcp_data.config_sema);
	zfcp_unit_put(unit);
out_unit:
	zfcp_port_put(port);
out_port:
	zfcp_adapter_put(adapter);
out_adapter:
	up(&zfcp_data.config_sema);
	return;
}
예제 #11
0
static inline void check_for_tasks(int dead_cpu)
{
	struct task_struct *g, *p;

	read_lock_irq(&tasklist_lock);
	do_each_thread(g, p) {
		if (!p->on_rq)
			continue;
		/*
		 * We do the check with unlocked task_rq(p)->lock.
		 * Order the reading to do not warn about a task,
		 * which was running on this cpu in the past, and
		 * it's just been woken on another cpu.
		 */
		rmb();
		if (task_cpu(p) != dead_cpu)
			continue;

		pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
			p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
	} while_each_thread(g, p);
	read_unlock_irq(&tasklist_lock);
}
예제 #12
0
static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
{
	struct zfcp_adapter *adapter;
	struct zfcp_port *port;
	struct zfcp_unit *unit;

	mutex_lock(&zfcp_data.config_mutex);
	read_lock_irq(&zfcp_data.config_lock);
	adapter = zfcp_get_adapter_by_busid(busid);
	if (adapter)
		zfcp_adapter_get(adapter);
	read_unlock_irq(&zfcp_data.config_lock);

	if (!adapter)
		goto out_adapter;
	port = zfcp_port_enqueue(adapter, wwpn, 0, 0);
	if (IS_ERR(port))
		goto out_port;
	unit = zfcp_unit_enqueue(port, lun);
	if (IS_ERR(unit))
		goto out_unit;
	mutex_unlock(&zfcp_data.config_mutex);
	ccw_device_set_online(adapter->ccw_device);

	zfcp_erp_wait(adapter);
	flush_work(&unit->scsi_work);

	mutex_lock(&zfcp_data.config_mutex);
	zfcp_unit_put(unit);
out_unit:
	zfcp_port_put(port);
out_port:
	zfcp_adapter_put(adapter);
out_adapter:
	mutex_unlock(&zfcp_data.config_mutex);
	return;
}
예제 #13
0
static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags)
{
	struct zfcp_adapter *adapter = zsdev->port->adapter;
	struct zfcp_scsi_req_filter filter = {
		.tmf_scope = FCP_TMF_TGT_RESET,
		.port_handle = zsdev->port->handle,
	};
	unsigned long flags;

	if (tm_flags == FCP_TMF_LUN_RESET) {
		filter.tmf_scope = FCP_TMF_LUN_RESET;
		filter.lun_handle = zsdev->lun_handle;
	}

	/*
	 * abort_lock secures against other processings - in the abort-function
	 * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data
	 */
	write_lock_irqsave(&adapter->abort_lock, flags);
	zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd,
				   &filter);
	write_unlock_irqrestore(&adapter->abort_lock, flags);
}

/**
 * zfcp_scsi_task_mgmt_function() - Send a task management function (sync).
 * @sdev: Pointer to SCSI device to send the task management command to.
 * @tm_flags: Task management flags,
 *	      here we only handle %FCP_TMF_TGT_RESET or %FCP_TMF_LUN_RESET.
 */
static int zfcp_scsi_task_mgmt_function(struct scsi_device *sdev, u8 tm_flags)
{
	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
	struct zfcp_fsf_req *fsf_req = NULL;
	int retval = SUCCESS, ret;
	int retry = 3;

	while (retry--) {
		fsf_req = zfcp_fsf_fcp_task_mgmt(sdev, tm_flags);
		if (fsf_req)
			break;

		zfcp_dbf_scsi_devreset("wait", sdev, tm_flags, NULL);
		zfcp_erp_wait(adapter);
		ret = fc_block_rport(rport);
		if (ret) {
			zfcp_dbf_scsi_devreset("fiof", sdev, tm_flags, NULL);
			return ret;
		}

		if (!(atomic_read(&adapter->status) &
		      ZFCP_STATUS_COMMON_RUNNING)) {
			zfcp_dbf_scsi_devreset("nres", sdev, tm_flags, NULL);
			return SUCCESS;
		}
	}
	if (!fsf_req) {
		zfcp_dbf_scsi_devreset("reqf", sdev, tm_flags, NULL);
		return FAILED;
	}

	wait_for_completion(&fsf_req->completion);

	if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
		zfcp_dbf_scsi_devreset("fail", sdev, tm_flags, fsf_req);
		retval = FAILED;
	} else {
		zfcp_dbf_scsi_devreset("okay", sdev, tm_flags, fsf_req);
		zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
	}

	zfcp_fsf_req_free(fsf_req);
	return retval;
}

static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
{
	struct scsi_device *sdev = scpnt->device;

	return zfcp_scsi_task_mgmt_function(sdev, FCP_TMF_LUN_RESET);
}

static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
{
	struct scsi_target *starget = scsi_target(scpnt->device);
	struct fc_rport *rport = starget_to_rport(starget);
	struct Scsi_Host *shost = rport_to_shost(rport);
	struct scsi_device *sdev = NULL, *tmp_sdev;
	struct zfcp_adapter *adapter =
		(struct zfcp_adapter *)shost->hostdata[0];
	int ret;

	shost_for_each_device(tmp_sdev, shost) {
		if (tmp_sdev->id == starget->id) {
			sdev = tmp_sdev;
			break;
		}
	}
	if (!sdev) {
		ret = FAILED;
		zfcp_dbf_scsi_eh("tr_nosd", adapter, starget->id, ret);
		return ret;
	}

	ret = zfcp_scsi_task_mgmt_function(sdev, FCP_TMF_TGT_RESET);

	/* release reference from above shost_for_each_device */
	if (sdev)
		scsi_device_put(tmp_sdev);

	return ret;
}

static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
{
	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
	int ret = SUCCESS, fc_ret;

	zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
	zfcp_erp_wait(adapter);
	fc_ret = fc_block_scsi_eh(scpnt);
	if (fc_ret)
		ret = fc_ret;

	zfcp_dbf_scsi_eh("schrh_r", adapter, ~0, ret);
	return ret;
}

/**
 * zfcp_scsi_sysfs_host_reset() - Support scsi_host sysfs attribute host_reset.
 * @shost: Pointer to Scsi_Host to perform action on.
 * @reset_type: We support %SCSI_ADAPTER_RESET but not %SCSI_FIRMWARE_RESET.
 *
 * Return: 0 on %SCSI_ADAPTER_RESET, -%EOPNOTSUPP otherwise.
 *
 * This is similar to zfcp_sysfs_adapter_failed_store().
 */
static int zfcp_scsi_sysfs_host_reset(struct Scsi_Host *shost, int reset_type)
{
	struct zfcp_adapter *adapter =
		(struct zfcp_adapter *)shost->hostdata[0];
	int ret = 0;

	if (reset_type != SCSI_ADAPTER_RESET) {
		ret = -EOPNOTSUPP;
		zfcp_dbf_scsi_eh("scshr_n", adapter, ~0, ret);
		return ret;
	}

	zfcp_erp_adapter_reset_sync(adapter, "scshr_y");
	return ret;
}

struct scsi_transport_template *zfcp_scsi_transport_template;

static struct scsi_host_template zfcp_scsi_host_template = {
	.module			 = THIS_MODULE,
	.name			 = "zfcp",
	.queuecommand		 = zfcp_scsi_queuecommand,
	.eh_timed_out		 = fc_eh_timed_out,
	.eh_abort_handler	 = zfcp_scsi_eh_abort_handler,
	.eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
	.eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
	.eh_host_reset_handler	 = zfcp_scsi_eh_host_reset_handler,
	.slave_alloc		 = zfcp_scsi_slave_alloc,
	.slave_configure	 = zfcp_scsi_slave_configure,
	.slave_destroy		 = zfcp_scsi_slave_destroy,
	.change_queue_depth	 = scsi_change_queue_depth,
	.host_reset		 = zfcp_scsi_sysfs_host_reset,
	.proc_name		 = "zfcp",
	.can_queue		 = 4096,
	.this_id		 = -1,
	.sg_tablesize		 = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
				     * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2),
				   /* GCD, adjusted later */
	.max_sectors		 = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
				     * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
				   /* GCD, adjusted later */
	/* report size limit per scatter-gather segment */
	.max_segment_size	 = ZFCP_QDIO_SBALE_LEN,
	.dma_boundary		 = ZFCP_QDIO_SBALE_LEN - 1,
	.shost_attrs		 = zfcp_sysfs_shost_attrs,
	.sdev_attrs		 = zfcp_sysfs_sdev_attrs,
	.track_queue_depth	 = 1,
	.supported_mode		 = MODE_INITIATOR,
};

/**
 * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer
 * @adapter: The zfcp adapter to register with the SCSI midlayer
 */
int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
{
	struct ccw_dev_id dev_id;

	if (adapter->scsi_host)
		return 0;

	ccw_device_get_id(adapter->ccw_device, &dev_id);
	/* register adapter as SCSI host with mid layer of SCSI stack */
	adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
					     sizeof (struct zfcp_adapter *));
	if (!adapter->scsi_host) {
		dev_err(&adapter->ccw_device->dev,
			"Registering the FCP device with the "
			"SCSI stack failed\n");
		return -EIO;
	}

	/* tell the SCSI stack some characteristics of this adapter */
	adapter->scsi_host->max_id = 511;
	adapter->scsi_host->max_lun = 0xFFFFFFFF;
	adapter->scsi_host->max_channel = 0;
	adapter->scsi_host->unique_id = dev_id.devno;
	adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
	adapter->scsi_host->transportt = zfcp_scsi_transport_template;

	adapter->scsi_host->hostdata[0] = (unsigned long) adapter;

	if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) {
		scsi_host_put(adapter->scsi_host);
		return -EIO;
	}

	return 0;
}

/**
 * zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer
 * @adapter: The zfcp adapter to unregister.
 */
void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter)
{
	struct Scsi_Host *shost;
	struct zfcp_port *port;

	shost = adapter->scsi_host;
	if (!shost)
		return;

	read_lock_irq(&adapter->port_list_lock);
	list_for_each_entry(port, &adapter->port_list, list)
		port->rport = NULL;
	read_unlock_irq(&adapter->port_list_lock);

	fc_remove_host(shost);
	scsi_remove_host(shost);
	scsi_host_put(shost);
	adapter->scsi_host = NULL;
}

static struct fc_host_statistics*
zfcp_scsi_init_fc_host_stats(struct zfcp_adapter *adapter)
{
	struct fc_host_statistics *fc_stats;

	if (!adapter->fc_stats) {
		fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL);
		if (!fc_stats)
			return NULL;
		adapter->fc_stats = fc_stats; /* freed in adapter_release */
	}
	memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats));
	return adapter->fc_stats;
}

static void zfcp_scsi_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
					   struct fsf_qtcb_bottom_port *data,
					   struct fsf_qtcb_bottom_port *old)
{
	fc_stats->seconds_since_last_reset =
		data->seconds_since_last_reset - old->seconds_since_last_reset;
	fc_stats->tx_frames = data->tx_frames - old->tx_frames;
	fc_stats->tx_words = data->tx_words - old->tx_words;
	fc_stats->rx_frames = data->rx_frames - old->rx_frames;
	fc_stats->rx_words = data->rx_words - old->rx_words;
	fc_stats->lip_count = data->lip - old->lip;
	fc_stats->nos_count = data->nos - old->nos;
	fc_stats->error_frames = data->error_frames - old->error_frames;
	fc_stats->dumped_frames = data->dumped_frames - old->dumped_frames;
	fc_stats->link_failure_count = data->link_failure - old->link_failure;
	fc_stats->loss_of_sync_count = data->loss_of_sync - old->loss_of_sync;
	fc_stats->loss_of_signal_count =
		data->loss_of_signal - old->loss_of_signal;
	fc_stats->prim_seq_protocol_err_count =
		data->psp_error_counts - old->psp_error_counts;
	fc_stats->invalid_tx_word_count =
		data->invalid_tx_words - old->invalid_tx_words;
	fc_stats->invalid_crc_count = data->invalid_crcs - old->invalid_crcs;
	fc_stats->fcp_input_requests =
		data->input_requests - old->input_requests;
	fc_stats->fcp_output_requests =
		data->output_requests - old->output_requests;
	fc_stats->fcp_control_requests =
		data->control_requests - old->control_requests;
	fc_stats->fcp_input_megabytes = data->input_mb - old->input_mb;
	fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb;
}

static void zfcp_scsi_set_fc_host_stats(struct fc_host_statistics *fc_stats,
					struct fsf_qtcb_bottom_port *data)
{
	fc_stats->seconds_since_last_reset = data->seconds_since_last_reset;
	fc_stats->tx_frames = data->tx_frames;
	fc_stats->tx_words = data->tx_words;
	fc_stats->rx_frames = data->rx_frames;
	fc_stats->rx_words = data->rx_words;
	fc_stats->lip_count = data->lip;
	fc_stats->nos_count = data->nos;
	fc_stats->error_frames = data->error_frames;
	fc_stats->dumped_frames = data->dumped_frames;
	fc_stats->link_failure_count = data->link_failure;
	fc_stats->loss_of_sync_count = data->loss_of_sync;
	fc_stats->loss_of_signal_count = data->loss_of_signal;
	fc_stats->prim_seq_protocol_err_count = data->psp_error_counts;
	fc_stats->invalid_tx_word_count = data->invalid_tx_words;
	fc_stats->invalid_crc_count = data->invalid_crcs;
	fc_stats->fcp_input_requests = data->input_requests;
	fc_stats->fcp_output_requests = data->output_requests;
	fc_stats->fcp_control_requests = data->control_requests;
	fc_stats->fcp_input_megabytes = data->input_mb;
	fc_stats->fcp_output_megabytes = data->output_mb;
}
예제 #14
0
파일: hp_sdc.c 프로젝트: AshishNamdev/linux
unsigned long hp_sdc_put(void)
{
	hp_sdc_transaction *curr;
	uint8_t act;
	int idx, curridx;

	int limit = 0;

	write_lock(&hp_sdc.lock);

	/* If i8042 buffers are full, we cannot do anything that
	   requires output, so we skip to the administrativa. */
	if (hp_sdc.ibf) {
		hp_sdc_status_in8();
		if (hp_sdc.ibf)
			goto finish;
	}

 anew:
	/* See if we are in the middle of a sequence. */
	if (hp_sdc.wcurr < 0)
		hp_sdc.wcurr = 0;
	read_lock_irq(&hp_sdc.rtq_lock);
	if (hp_sdc.rcurr == hp_sdc.wcurr)
		hp_sdc.wcurr++;
	read_unlock_irq(&hp_sdc.rtq_lock);
	if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN)
		hp_sdc.wcurr = 0;
	curridx = hp_sdc.wcurr;

	if (hp_sdc.tq[curridx] != NULL)
		goto start;

	while (++curridx != hp_sdc.wcurr) {
		if (curridx >= HP_SDC_QUEUE_LEN) {
			curridx = -1; /* Wrap to top */
			continue;
		}
		read_lock_irq(&hp_sdc.rtq_lock);
		if (hp_sdc.rcurr == curridx) {
			read_unlock_irq(&hp_sdc.rtq_lock);
			continue;
		}
		read_unlock_irq(&hp_sdc.rtq_lock);
		if (hp_sdc.tq[curridx] != NULL)
			break; /* Found one. */
	}
	if (curridx == hp_sdc.wcurr) { /* There's nothing queued to do. */
		curridx = -1;
	}
	hp_sdc.wcurr = curridx;

 start:

	/* Check to see if the interrupt mask needs to be set. */
	if (hp_sdc.set_im) {
		hp_sdc_status_out8(hp_sdc.im | HP_SDC_CMD_SET_IM);
		hp_sdc.set_im = 0;
		goto finish;
	}

	if (hp_sdc.wcurr == -1)
		goto done;

	curr = hp_sdc.tq[curridx];
	idx = curr->actidx;

	if (curr->actidx >= curr->endidx) {
		hp_sdc.tq[curridx] = NULL;
		/* Interleave outbound data between the transactions. */
		hp_sdc.wcurr++;
		if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN)
			hp_sdc.wcurr = 0;
		goto finish;
	}

	act = curr->seq[idx];
	idx++;

	if (curr->idx >= curr->endidx) {
		if (act & HP_SDC_ACT_DEALLOC)
			kfree(curr);
		hp_sdc.tq[curridx] = NULL;
		/* Interleave outbound data between the transactions. */
		hp_sdc.wcurr++;
		if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN)
			hp_sdc.wcurr = 0;
		goto finish;
	}

	while (act & HP_SDC_ACT_PRECMD) {
		if (curr->idx != idx) {
			idx++;
			act &= ~HP_SDC_ACT_PRECMD;
			break;
		}
		hp_sdc_status_out8(curr->seq[idx]);
		curr->idx++;
		/* act finished? */
		if ((act & HP_SDC_ACT_DURING) == HP_SDC_ACT_PRECMD)
			goto actdone;
		/* skip quantity field if data-out sequence follows. */
		if (act & HP_SDC_ACT_DATAOUT)
			curr->idx++;
		goto finish;
	}
	if (act & HP_SDC_ACT_DATAOUT) {
		int qty;

		qty = curr->seq[idx];
		idx++;
		if (curr->idx - idx < qty) {
			hp_sdc_data_out8(curr->seq[curr->idx]);
			curr->idx++;
			/* act finished? */
			if (curr->idx - idx >= qty &&
			    (act & HP_SDC_ACT_DURING) == HP_SDC_ACT_DATAOUT)
				goto actdone;
			goto finish;
		}
		idx += qty;
		act &= ~HP_SDC_ACT_DATAOUT;
	} else
	    while (act & HP_SDC_ACT_DATAREG) {
		int mask;
		uint8_t w7[4];

		mask = curr->seq[idx];
		if (idx != curr->idx) {
			idx++;
			idx += !!(mask & 1);
			idx += !!(mask & 2);
			idx += !!(mask & 4);
			idx += !!(mask & 8);
			act &= ~HP_SDC_ACT_DATAREG;
			break;
		}

		w7[0] = (mask & 1) ? curr->seq[++idx] : hp_sdc.r7[0];
		w7[1] = (mask & 2) ? curr->seq[++idx] : hp_sdc.r7[1];
		w7[2] = (mask & 4) ? curr->seq[++idx] : hp_sdc.r7[2];
		w7[3] = (mask & 8) ? curr->seq[++idx] : hp_sdc.r7[3];

		if (hp_sdc.wi > 0x73 || hp_sdc.wi < 0x70 ||
		    w7[hp_sdc.wi - 0x70] == hp_sdc.r7[hp_sdc.wi - 0x70]) {
			int i = 0;

			/* Need to point the write index register */
			while (i < 4 && w7[i] == hp_sdc.r7[i])
				i++;

			if (i < 4) {
				hp_sdc_status_out8(HP_SDC_CMD_SET_D0 + i);
				hp_sdc.wi = 0x70 + i;
				goto finish;
			}

			idx++;
			if ((act & HP_SDC_ACT_DURING) == HP_SDC_ACT_DATAREG)
				goto actdone;

			curr->idx = idx;
			act &= ~HP_SDC_ACT_DATAREG;
			break;
		}

		hp_sdc_data_out8(w7[hp_sdc.wi - 0x70]);
		hp_sdc.r7[hp_sdc.wi - 0x70] = w7[hp_sdc.wi - 0x70];
		hp_sdc.wi++; /* write index register autoincrements */
		{
			int i = 0;

			while ((i < 4) && w7[i] == hp_sdc.r7[i])
				i++;
			if (i >= 4) {
				curr->idx = idx + 1;
				if ((act & HP_SDC_ACT_DURING) ==
				    HP_SDC_ACT_DATAREG)
					goto actdone;
			}
		}
		goto finish;
	}
	/* We don't go any further in the command if there is a pending read,
	   because we don't want interleaved results. */
	read_lock_irq(&hp_sdc.rtq_lock);
	if (hp_sdc.rcurr >= 0) {
		read_unlock_irq(&hp_sdc.rtq_lock);
		goto finish;
	}
	read_unlock_irq(&hp_sdc.rtq_lock);


	if (act & HP_SDC_ACT_POSTCMD) {
		uint8_t postcmd;

		/* curr->idx should == idx at this point. */
		postcmd = curr->seq[idx];
		curr->idx++;
		if (act & HP_SDC_ACT_DATAIN) {

			/* Start a new read */
			hp_sdc.rqty = curr->seq[curr->idx];
			do_gettimeofday(&hp_sdc.rtv);
			curr->idx++;
			/* Still need to lock here in case of spurious irq. */
			write_lock_irq(&hp_sdc.rtq_lock);
			hp_sdc.rcurr = curridx;
			write_unlock_irq(&hp_sdc.rtq_lock);
			hp_sdc_status_out8(postcmd);
			goto finish;
		}
		hp_sdc_status_out8(postcmd);
		goto actdone;
	}

 actdone:
	if (act & HP_SDC_ACT_SEMAPHORE)
		up(curr->act.semaphore);
	else if (act & HP_SDC_ACT_CALLBACK)
		curr->act.irqhook(0,NULL,0,0);

	if (curr->idx >= curr->endidx) { /* This transaction is over. */
		if (act & HP_SDC_ACT_DEALLOC)
			kfree(curr);
		hp_sdc.tq[curridx] = NULL;
	} else {
		curr->actidx = idx + 1;
		curr->idx = idx + 2;
	}
	/* Interleave outbound data between the transactions. */
	hp_sdc.wcurr++;
	if (hp_sdc.wcurr >= HP_SDC_QUEUE_LEN)
		hp_sdc.wcurr = 0;

 finish:
	/* If by some quirk IBF has cleared and our ISR has run to
	   see that that has happened, do it all again. */
	if (!hp_sdc.ibf && limit++ < 20)
		goto anew;

 done:
	if (hp_sdc.wcurr >= 0)
		tasklet_schedule(&hp_sdc.task);
	write_unlock(&hp_sdc.lock);

	return 0;
}
예제 #15
0
/**
 * zfcp_port_enqueue - enqueue port to port list of adapter
 * @adapter: adapter where remote port is added
 * @wwpn: WWPN of the remote port to be enqueued
 * @status: initial status for the port
 * @d_id: destination id of the remote port to be enqueued
 * Returns: pointer to enqueued port on success, ERR_PTR on error
 * Locks: config_sema must be held to serialize changes to the port list
 *
 * All port internal structures are set up and the sysfs entry is generated.
 * d_id is used to enqueue ports with a well known address like the Directory
 * Service for nameserver lookup.
 */
struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
				     u32 status, u32 d_id)
{
	struct zfcp_port *port;
	int retval;

	port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
	if (!port)
		return ERR_PTR(-ENOMEM);

	init_waitqueue_head(&port->remove_wq);
	INIT_LIST_HEAD(&port->unit_list_head);
	INIT_WORK(&port->gid_pn_work, zfcp_erp_port_strategy_open_lookup);

	port->adapter = adapter;
	port->d_id = d_id;
	port->wwpn = wwpn;

	/* mark port unusable as long as sysfs registration is not complete */
	atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
	atomic_set(&port->refcount, 0);

	dev_set_name(&port->sysfs_device, "0x%016llx",
		     (unsigned long long)wwpn);
	port->sysfs_device.parent = &adapter->ccw_device->dev;

	port->sysfs_device.release = zfcp_sysfs_port_release;
	dev_set_drvdata(&port->sysfs_device, port);

	read_lock_irq(&zfcp_data.config_lock);
	if (!(status & ZFCP_STATUS_PORT_NO_WWPN))
		if (zfcp_get_port_by_wwpn(adapter, wwpn)) {
			read_unlock_irq(&zfcp_data.config_lock);
			goto err_out_free;
		}
	read_unlock_irq(&zfcp_data.config_lock);

	if (device_register(&port->sysfs_device))
		goto err_out_free;

	retval = sysfs_create_group(&port->sysfs_device.kobj,
				    &zfcp_sysfs_port_attrs);

	if (retval) {
		device_unregister(&port->sysfs_device);
		goto err_out;
	}

	zfcp_port_get(port);

	write_lock_irq(&zfcp_data.config_lock);
	list_add_tail(&port->list, &adapter->port_list_head);
	atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
	atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status);

	write_unlock_irq(&zfcp_data.config_lock);

	zfcp_adapter_get(adapter);
	return port;

err_out_free:
	kfree(port);
err_out:
	return ERR_PTR(-EINVAL);
}
예제 #16
0
/**
 * zfcp_unit_enqueue - enqueue unit to unit list of a port.
 * @port: pointer to port where unit is added
 * @fcp_lun: FCP LUN of unit to be enqueued
 * Returns: pointer to enqueued unit on success, ERR_PTR on error
 * Locks: config_sema must be held to serialize changes to the unit list
 *
 * Sets up some unit internal structures and creates sysfs entry.
 */
struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
{
	struct zfcp_unit *unit;

	unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
	if (!unit)
		return ERR_PTR(-ENOMEM);

	atomic_set(&unit->refcount, 0);
	init_waitqueue_head(&unit->remove_wq);

	unit->port = port;
	unit->fcp_lun = fcp_lun;

	dev_set_name(&unit->sysfs_device, "0x%016llx",
		     (unsigned long long) fcp_lun);
	unit->sysfs_device.parent = &port->sysfs_device;
	unit->sysfs_device.release = zfcp_sysfs_unit_release;
	dev_set_drvdata(&unit->sysfs_device, unit);

	/* mark unit unusable as long as sysfs registration is not complete */
	atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);

	spin_lock_init(&unit->latencies.lock);
	unit->latencies.write.channel.min = 0xFFFFFFFF;
	unit->latencies.write.fabric.min = 0xFFFFFFFF;
	unit->latencies.read.channel.min = 0xFFFFFFFF;
	unit->latencies.read.fabric.min = 0xFFFFFFFF;
	unit->latencies.cmd.channel.min = 0xFFFFFFFF;
	unit->latencies.cmd.fabric.min = 0xFFFFFFFF;

	read_lock_irq(&zfcp_data.config_lock);
	if (zfcp_get_unit_by_lun(port, fcp_lun)) {
		read_unlock_irq(&zfcp_data.config_lock);
		goto err_out_free;
	}
	read_unlock_irq(&zfcp_data.config_lock);

	if (device_register(&unit->sysfs_device))
		goto err_out_free;

	if (sysfs_create_group(&unit->sysfs_device.kobj,
			       &zfcp_sysfs_unit_attrs)) {
		device_unregister(&unit->sysfs_device);
		return ERR_PTR(-EIO);
	}

	zfcp_unit_get(unit);

	write_lock_irq(&zfcp_data.config_lock);
	list_add_tail(&unit->list, &port->unit_list_head);
	atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
	atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status);

	write_unlock_irq(&zfcp_data.config_lock);

	zfcp_port_get(port);

	return unit;

err_out_free:
	kfree(unit);
	return ERR_PTR(-EINVAL);
}
예제 #17
0
static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags)
{
	struct zfcp_adapter *adapter = zsdev->port->adapter;
	struct zfcp_scsi_req_filter filter = {
		.tmf_scope = FCP_TMF_TGT_RESET,
		.port_handle = zsdev->port->handle,
	};
	unsigned long flags;

	if (tm_flags == FCP_TMF_LUN_RESET) {
		filter.tmf_scope = FCP_TMF_LUN_RESET;
		filter.lun_handle = zsdev->lun_handle;
	}

	/*
	 * abort_lock secures against other processings - in the abort-function
	 * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data
	 */
	write_lock_irqsave(&adapter->abort_lock, flags);
	zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd,
				   &filter);
	write_unlock_irqrestore(&adapter->abort_lock, flags);
}

static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
{
	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
	struct zfcp_fsf_req *fsf_req = NULL;
	int retval = SUCCESS, ret;
	int retry = 3;

	while (retry--) {
		fsf_req = zfcp_fsf_fcp_task_mgmt(scpnt, tm_flags);
		if (fsf_req)
			break;

		zfcp_erp_wait(adapter);
		ret = fc_block_scsi_eh(scpnt);
		if (ret)
			return ret;

		if (!(atomic_read(&adapter->status) &
		      ZFCP_STATUS_COMMON_RUNNING)) {
			zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags);
			return SUCCESS;
		}
	}
	if (!fsf_req)
		return FAILED;

	wait_for_completion(&fsf_req->completion);

	if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
		zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
		retval = FAILED;
	} else {
		zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
		zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
	}

	zfcp_fsf_req_free(fsf_req);
	return retval;
}

static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
{
	return zfcp_task_mgmt_function(scpnt, FCP_TMF_LUN_RESET);
}

static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
{
	return zfcp_task_mgmt_function(scpnt, FCP_TMF_TGT_RESET);
}

static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
{
	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
	int ret;

	zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
	zfcp_erp_wait(adapter);
	ret = fc_block_scsi_eh(scpnt);
	if (ret)
		return ret;

	return SUCCESS;
}

struct scsi_transport_template *zfcp_scsi_transport_template;

static struct scsi_host_template zfcp_scsi_host_template = {
	.module			 = THIS_MODULE,
	.name			 = "zfcp",
	.queuecommand		 = zfcp_scsi_queuecommand,
	.eh_abort_handler	 = zfcp_scsi_eh_abort_handler,
	.eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
	.eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
	.eh_host_reset_handler	 = zfcp_scsi_eh_host_reset_handler,
	.slave_alloc		 = zfcp_scsi_slave_alloc,
	.slave_configure	 = zfcp_scsi_slave_configure,
	.slave_destroy		 = zfcp_scsi_slave_destroy,
	.change_queue_depth	 = scsi_change_queue_depth,
	.proc_name		 = "zfcp",
	.can_queue		 = 4096,
	.this_id		 = -1,
	.sg_tablesize		 = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
				     * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2),
				   /* GCD, adjusted later */
	.max_sectors		 = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
				     * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
				   /* GCD, adjusted later */
	.dma_boundary		 = ZFCP_QDIO_SBALE_LEN - 1,
	.use_clustering		 = 1,
	.shost_attrs		 = zfcp_sysfs_shost_attrs,
	.sdev_attrs		 = zfcp_sysfs_sdev_attrs,
	.track_queue_depth	 = 1,
};

/**
 * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer
 * @adapter: The zfcp adapter to register with the SCSI midlayer
 */
int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
{
	struct ccw_dev_id dev_id;

	if (adapter->scsi_host)
		return 0;

	ccw_device_get_id(adapter->ccw_device, &dev_id);
	/* register adapter as SCSI host with mid layer of SCSI stack */
	adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
					     sizeof (struct zfcp_adapter *));
	if (!adapter->scsi_host) {
		dev_err(&adapter->ccw_device->dev,
			"Registering the FCP device with the "
			"SCSI stack failed\n");
		return -EIO;
	}

	/* tell the SCSI stack some characteristics of this adapter */
	adapter->scsi_host->max_id = 511;
	adapter->scsi_host->max_lun = 0xFFFFFFFF;
	adapter->scsi_host->max_channel = 0;
	adapter->scsi_host->unique_id = dev_id.devno;
	adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
	adapter->scsi_host->transportt = zfcp_scsi_transport_template;

	adapter->scsi_host->hostdata[0] = (unsigned long) adapter;

	if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) {
		scsi_host_put(adapter->scsi_host);
		return -EIO;
	}

	return 0;
}

/**
 * zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer
 * @adapter: The zfcp adapter to unregister.
 */
void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter)
{
	struct Scsi_Host *shost;
	struct zfcp_port *port;

	shost = adapter->scsi_host;
	if (!shost)
		return;

	read_lock_irq(&adapter->port_list_lock);
	list_for_each_entry(port, &adapter->port_list, list)
		port->rport = NULL;
	read_unlock_irq(&adapter->port_list_lock);

	fc_remove_host(shost);
	scsi_remove_host(shost);
	scsi_host_put(shost);
	adapter->scsi_host = NULL;
}

static struct fc_host_statistics*
zfcp_init_fc_host_stats(struct zfcp_adapter *adapter)
{
	struct fc_host_statistics *fc_stats;

	if (!adapter->fc_stats) {
		fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL);
		if (!fc_stats)
			return NULL;
		adapter->fc_stats = fc_stats; /* freed in adapter_release */
	}
	memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats));
	return adapter->fc_stats;
}

static void zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
				      struct fsf_qtcb_bottom_port *data,
				      struct fsf_qtcb_bottom_port *old)
{
	fc_stats->seconds_since_last_reset =
		data->seconds_since_last_reset - old->seconds_since_last_reset;
	fc_stats->tx_frames = data->tx_frames - old->tx_frames;
	fc_stats->tx_words = data->tx_words - old->tx_words;
	fc_stats->rx_frames = data->rx_frames - old->rx_frames;
	fc_stats->rx_words = data->rx_words - old->rx_words;
	fc_stats->lip_count = data->lip - old->lip;
	fc_stats->nos_count = data->nos - old->nos;
	fc_stats->error_frames = data->error_frames - old->error_frames;
	fc_stats->dumped_frames = data->dumped_frames - old->dumped_frames;
	fc_stats->link_failure_count = data->link_failure - old->link_failure;
	fc_stats->loss_of_sync_count = data->loss_of_sync - old->loss_of_sync;
	fc_stats->loss_of_signal_count =
		data->loss_of_signal - old->loss_of_signal;
	fc_stats->prim_seq_protocol_err_count =
		data->psp_error_counts - old->psp_error_counts;
	fc_stats->invalid_tx_word_count =
		data->invalid_tx_words - old->invalid_tx_words;
	fc_stats->invalid_crc_count = data->invalid_crcs - old->invalid_crcs;
	fc_stats->fcp_input_requests =
		data->input_requests - old->input_requests;
	fc_stats->fcp_output_requests =
		data->output_requests - old->output_requests;
	fc_stats->fcp_control_requests =
		data->control_requests - old->control_requests;
	fc_stats->fcp_input_megabytes = data->input_mb - old->input_mb;
	fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb;
}

static void zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats,
				   struct fsf_qtcb_bottom_port *data)
{
	fc_stats->seconds_since_last_reset = data->seconds_since_last_reset;
	fc_stats->tx_frames = data->tx_frames;
	fc_stats->tx_words = data->tx_words;
	fc_stats->rx_frames = data->rx_frames;
	fc_stats->rx_words = data->rx_words;
	fc_stats->lip_count = data->lip;
	fc_stats->nos_count = data->nos;
	fc_stats->error_frames = data->error_frames;
	fc_stats->dumped_frames = data->dumped_frames;
	fc_stats->link_failure_count = data->link_failure;
	fc_stats->loss_of_sync_count = data->loss_of_sync;
	fc_stats->loss_of_signal_count = data->loss_of_signal;
	fc_stats->prim_seq_protocol_err_count = data->psp_error_counts;
	fc_stats->invalid_tx_word_count = data->invalid_tx_words;
	fc_stats->invalid_crc_count = data->invalid_crcs;
	fc_stats->fcp_input_requests = data->input_requests;
	fc_stats->fcp_output_requests = data->output_requests;
	fc_stats->fcp_control_requests = data->control_requests;
	fc_stats->fcp_input_megabytes = data->input_mb;
	fc_stats->fcp_output_megabytes = data->output_mb;
}
예제 #18
0
/**
 * zfcp_port_enqueue - enqueue port to port list of adapter
 * @adapter: adapter where remote port is added
 * @wwpn: WWPN of the remote port to be enqueued
 * @status: initial status for the port
 * @d_id: destination id of the remote port to be enqueued
 * Returns: pointer to enqueued port on success, ERR_PTR on error
 * Locks: config_sema must be held to serialize changes to the port list
 *
 * All port internal structures are set up and the sysfs entry is generated.
 * d_id is used to enqueue ports with a well known address like the Directory
 * Service for nameserver lookup.
 */
struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn,
				     u32 status, u32 d_id)
{
	struct zfcp_port *port;
	int retval;
	char *bus_id;

	port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
	if (!port)
		return ERR_PTR(-ENOMEM);

	init_waitqueue_head(&port->remove_wq);

	INIT_LIST_HEAD(&port->unit_list_head);
	INIT_LIST_HEAD(&port->unit_remove_lh);

	port->adapter = adapter;
	port->d_id = d_id;
	port->wwpn = wwpn;

	/* mark port unusable as long as sysfs registration is not complete */
	atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
	atomic_set(&port->refcount, 0);

	if (status & ZFCP_STATUS_PORT_WKA) {
		switch (d_id) {
		case ZFCP_DID_DIRECTORY_SERVICE:
			bus_id = "directory";
			break;
		case ZFCP_DID_MANAGEMENT_SERVICE:
			bus_id = "management";
			break;
		case ZFCP_DID_KEY_DISTRIBUTION_SERVICE:
			bus_id = "key_distribution";
			break;
		case ZFCP_DID_ALIAS_SERVICE:
			bus_id = "alias";
			break;
		case ZFCP_DID_TIME_SERVICE:
			bus_id = "time";
			break;
		default:
			kfree(port);
			return ERR_PTR(-EINVAL);
		}
		snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, "%s", bus_id);
		port->sysfs_device.parent = &adapter->generic_services;
	} else {
		snprintf(port->sysfs_device.bus_id,
			 BUS_ID_SIZE, "0x%016llx", wwpn);
		port->sysfs_device.parent = &adapter->ccw_device->dev;
	}

	port->sysfs_device.release = zfcp_sysfs_port_release;
	dev_set_drvdata(&port->sysfs_device, port);

	read_lock_irq(&zfcp_data.config_lock);
	if (!(status & ZFCP_STATUS_PORT_NO_WWPN))
		if (zfcp_get_port_by_wwpn(adapter, wwpn)) {
			read_unlock_irq(&zfcp_data.config_lock);
			goto err_out_free;
		}
	read_unlock_irq(&zfcp_data.config_lock);

	if (device_register(&port->sysfs_device))
		goto err_out_free;

	if (status & ZFCP_STATUS_PORT_WKA)
		retval = sysfs_create_group(&port->sysfs_device.kobj,
					    &zfcp_sysfs_ns_port_attrs);
	else
		retval = sysfs_create_group(&port->sysfs_device.kobj,
					    &zfcp_sysfs_port_attrs);

	if (retval) {
		device_unregister(&port->sysfs_device);
		goto err_out;
	}

	zfcp_port_get(port);

	write_lock_irq(&zfcp_data.config_lock);
	list_add_tail(&port->list, &adapter->port_list_head);
	atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
	atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status);
	if (d_id == ZFCP_DID_DIRECTORY_SERVICE)
		if (!adapter->nameserver_port)
			adapter->nameserver_port = port;
	adapter->ports++;

	write_unlock_irq(&zfcp_data.config_lock);

	zfcp_adapter_get(adapter);
	return port;

err_out_free:
	kfree(port);
err_out:
	return ERR_PTR(-EINVAL);
}