Ejemplo n.º 1
0
/*
 * Make get_sync_clock return -EAGAIN.
 */
static void disable_sync_clock(void *dummy)
{
	atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
	/*
	 * Clear the in-sync bit 2^31. All get_sync_clock calls will
	 * fail until the sync bit is turned back on. In addition
	 * increase the "sequence" counter to avoid the race of an
	 * etr event and the complete recovery against get_sync_clock.
	 */
	atomic_clear_mask(0x80000000, sw_ptr);
	atomic_inc(sw_ptr);
}
Ejemplo n.º 2
0
static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *act,
					     int close)
{
	int retval = ZFCP_ERP_SUCCEEDED;
	struct zfcp_adapter *adapter = act->adapter;

	if (close)
		goto close_only;

	retval = zfcp_erp_adapter_strategy_open_qdio(act);
	if (retval != ZFCP_ERP_SUCCEEDED)
		goto failed_qdio;

	retval = zfcp_erp_adapter_strategy_open_fsf(act);
	if (retval != ZFCP_ERP_SUCCEEDED)
		goto failed_openfcp;

	atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &act->adapter->status);

	return ZFCP_ERP_SUCCEEDED;

 close_only:
	atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
			  &act->adapter->status);

 failed_openfcp:
	/* close queues to ensure that buffers are not accessed by adapter */
	zfcp_qdio_close(adapter);
	zfcp_fsf_req_dismiss_all(adapter);
	adapter->fsf_req_seq_no = 0;
	/* all ports and units are closed */
	zfcp_erp_modify_adapter_status(adapter, 24, NULL,
				       ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
 failed_qdio:
	atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
			  ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
			  &act->adapter->status);
	return retval;
}
Ejemplo n.º 3
0
static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
{
    unsigned long flags;

    read_lock_irqsave(&adapter->erp_lock, flags);
    if (list_empty(&adapter->erp_ready_head) &&
            list_empty(&adapter->erp_running_head)) {
        atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
                          &adapter->status);
        wake_up(&adapter->erp_done_wqh);
    }
    read_unlock_irqrestore(&adapter->erp_lock, flags);
}
Ejemplo n.º 4
0
static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
{
    struct zfcp_adapter *adapter = act->adapter;


    zfcp_qdio_close(adapter->qdio);
    zfcp_fsf_req_dismiss_all(adapter);
    adapter->fsf_req_seq_no = 0;
    zfcp_fc_wka_ports_force_offline(adapter->gs);

    zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN);

    atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
                      ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
}
Ejemplo n.º 5
0
static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
{
	struct zfcp_adapter *adapter = act->adapter;

	/* close queues to ensure that buffers are not accessed by adapter */
	zfcp_qdio_close(adapter->qdio);
	zfcp_fsf_req_dismiss_all(adapter);
	adapter->fsf_req_seq_no = 0;
	zfcp_fc_wka_ports_force_offline(adapter->gs);
	/* all ports and LUNs are closed */
	zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN);

	atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
			  ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
}
Ejemplo n.º 6
0
/**
 * zfcp_qdio_open - prepare and initialize response queue
 * @qdio: pointer to struct zfcp_qdio
 * Returns: 0 on success, otherwise -EIO
 */
int zfcp_qdio_open(struct zfcp_qdio *qdio)
{
	struct qdio_buffer_element *sbale;
	struct qdio_initialize init_data;
	struct ccw_device *cdev = qdio->adapter->ccw_device;
	int cc;

	if (atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
		return -EIO;

	atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
			  &qdio->adapter->status);

	zfcp_qdio_setup_init_data(&init_data, qdio);

	if (qdio_establish(&init_data))
		goto failed_establish;

	if (qdio_activate(cdev))
		goto failed_qdio;

	for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
		sbale = &(qdio->resp_q.sbal[cc]->element[0]);
		sbale->length = 0;
		sbale->flags = SBAL_FLAGS_LAST_ENTRY;
		sbale->addr = NULL;
	}

	if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0,
		     QDIO_MAX_BUFFERS_PER_Q))
		goto failed_qdio;

	/* set index of first avalable SBALS / number of available SBALS */
	qdio->req_q.first = 0;
	atomic_set(&qdio->req_q.count, QDIO_MAX_BUFFERS_PER_Q);

	return 0;

failed_qdio:
	qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
failed_establish:
	dev_err(&cdev->dev,
		"Setting up the QDIO connection to the FCP adapter failed\n");
	return -EIO;
}
Ejemplo n.º 7
0
static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
{
	struct zfcp_adapter *adapter = act->adapter;

	if (zfcp_erp_adapter_strategy_open_qdio(act)) {
		atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
				  ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
				  &adapter->status);
		return ZFCP_ERP_FAILED;
	}

	if (zfcp_erp_adapter_strategy_open_fsf(act)) {
		zfcp_erp_adapter_strategy_close(act);
		return ZFCP_ERP_FAILED;
	}

	atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &adapter->status);

	return ZFCP_ERP_SUCCEEDED;
}
Ejemplo n.º 8
0
static int handle_stop(struct kvm_vcpu *vcpu)
{
	int rc;

	vcpu->stat.exit_stop_request++;
	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
	spin_lock_bh(&vcpu->arch.local_int.lock);
	if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
		vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
		rc = __kvm_s390_vcpu_store_status(vcpu,
						  KVM_S390_STORE_STATUS_NOADDR);
		if (rc >= 0)
			rc = -ENOTSUPP;
	}

	if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
		vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
		VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
		rc = -ENOTSUPP;
	} else
		rc = 0;
	spin_unlock_bh(&vcpu->arch.local_int.lock);
	return rc;
}
Ejemplo n.º 9
0
static ssize_t hone_read(struct file *file, char __user *buffer,
		size_t length, loff_t *offset)
{
	struct hone_reader *reader = file->private_data;
	size_t n, copied = 0;

	if (!length)
		return 0;

	do {
		while (!reader->offset && reader_will_block(reader)) {
			if (file->f_flags & O_NONBLOCK)
				return -EAGAIN;
			if (wait_event_interruptible(reader->event_wait_queue,
						!reader_will_block(reader)))
				return -EINTR;
		}

		if (file->f_flags & O_NONBLOCK) {
			if (down_trylock(&reader->sem))
				return -EAGAIN;
		} else if (down_interruptible(&reader->sem)) {
			return -EINTR;
		}

		while (copied < length) {
			if (!reader->offset) {
				int flags;
				struct hone_event *event;
				void (*free_event)(struct hone_event *);

				flags = atomic_read(&reader->flags);
				if (flags & READER_TAIL) {
					atomic_clear_mask(READER_TAIL, &reader->flags);
					event = &tail_event;
					free_event = NULL;
				} else if (flags & READER_FINISH) {
					if (!copied)
						atomic_clear_mask(READER_FINISH, &reader->flags);
					up(&reader->sem);
					return copied;
				} else if (flags & READER_HEAD) {
					atomic_clear_mask(READER_HEAD, &reader->flags);
					event = &head_event;
					free_event = NULL;
				} else if (flags & READER_INIT) {
					atomic_clear_mask(READER_INIT, &reader->flags);
					add_initial_events(reader);
					continue;
				} else if (reader->event) {
					if ((event = reader->event))
						reader->event = event->next;
					free_event = free_hone_event;
				} else {
					event = ring_pop(&reader->ringbuf);
					free_event = put_hone_event;
				}

				if (!event)
					break;
				reader->length = reader->format(&devinfo, &reader->info,
						event, reader->buf, READ_BUFFER_SIZE);
				inc_stats_counter(&reader->info.delivered, event->type);
				if (free_event)
					free_event(event);
			}
			n = min(reader->length - reader->offset, length - copied);
			if (copy_to_user(buffer + copied, reader->buf + reader->offset, n)) {
				up(&reader->sem);
				return -EFAULT;
			}
			copied += n;
			reader->offset += n;
			if (reader->offset >= reader->length)
				reader->offset = 0;
		}
		up(&reader->sem);
	} while (!copied);
	return copied;
}
Ejemplo n.º 10
0
static void zfcp_erp_port_strategy_clearstati(struct zfcp_port *port)
{
	atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, &port->status);
}
Ejemplo n.º 11
0
/**
 * zfcp_adapter_enqueue - enqueue a new adapter to the list
 * @ccw_device: pointer to the struct cc_device
 *
 * Returns:	0             if a new adapter was successfully enqueued
 *		-ENOMEM       if alloc failed
 * Enqueues an adapter at the end of the adapter list in the driver data.
 * All adapter internal structures are set up.
 * Proc-fs entries are also created.
 * locks:	config_sema must be held to serialise changes to the adapter list
 */
int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
{
	struct zfcp_adapter *adapter;

	/*
	 * Note: It is safe to release the list_lock, as any list changes
	 * are protected by the config_sema, which must be held to get here
	 */

	adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL);
	if (!adapter)
		return -ENOMEM;

	ccw_device->handler = NULL;
	adapter->ccw_device = ccw_device;
	atomic_set(&adapter->refcount, 0);

	if (zfcp_qdio_allocate(adapter))
		goto qdio_allocate_failed;

	if (zfcp_allocate_low_mem_buffers(adapter))
		goto failed_low_mem_buffers;

	if (zfcp_reqlist_alloc(adapter))
		goto failed_low_mem_buffers;

	if (zfcp_adapter_debug_register(adapter))
		goto debug_register_failed;

	init_waitqueue_head(&adapter->remove_wq);
	init_waitqueue_head(&adapter->erp_thread_wqh);
	init_waitqueue_head(&adapter->erp_done_wqh);

	INIT_LIST_HEAD(&adapter->port_list_head);
	INIT_LIST_HEAD(&adapter->erp_ready_head);
	INIT_LIST_HEAD(&adapter->erp_running_head);

	spin_lock_init(&adapter->req_list_lock);

	spin_lock_init(&adapter->hba_dbf_lock);
	spin_lock_init(&adapter->san_dbf_lock);
	spin_lock_init(&adapter->scsi_dbf_lock);
	spin_lock_init(&adapter->rec_dbf_lock);
	spin_lock_init(&adapter->req_q_lock);

	rwlock_init(&adapter->erp_lock);
	rwlock_init(&adapter->abort_lock);

	sema_init(&adapter->erp_ready_sem, 0);

	INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
	INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later);

	adapter->service_level.seq_print = zfcp_print_sl;

	/* mark adapter unusable as long as sysfs registration is not complete */
	atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);

	dev_set_drvdata(&ccw_device->dev, adapter);

	if (sysfs_create_group(&ccw_device->dev.kobj,
			       &zfcp_sysfs_adapter_attrs))
		goto sysfs_failed;

	atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
	zfcp_fc_nameserver_init(adapter);

	if (!zfcp_adapter_scsi_register(adapter))
		return 0;

sysfs_failed:
	zfcp_adapter_debug_unregister(adapter);
debug_register_failed:
	dev_set_drvdata(&ccw_device->dev, NULL);
	kfree(adapter->req_list);
failed_low_mem_buffers:
	zfcp_free_low_mem_buffers(adapter);
qdio_allocate_failed:
	zfcp_qdio_free(adapter);
	kfree(adapter);
	return -ENOMEM;
}
Ejemplo n.º 12
0
/**
 * zfcp_port_enqueue - enqueue port to port list of adapter
 * @adapter: adapter where remote port is added
 * @wwpn: WWPN of the remote port to be enqueued
 * @status: initial status for the port
 * @d_id: destination id of the remote port to be enqueued
 * Returns: pointer to enqueued port on success, ERR_PTR on error
 * Locks: config_sema must be held to serialize changes to the port list
 *
 * All port internal structures are set up and the sysfs entry is generated.
 * d_id is used to enqueue ports with a well known address like the Directory
 * Service for nameserver lookup.
 */
struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
				     u32 status, u32 d_id)
{
	struct zfcp_port *port;
	int retval;

	port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
	if (!port)
		return ERR_PTR(-ENOMEM);

	init_waitqueue_head(&port->remove_wq);
	INIT_LIST_HEAD(&port->unit_list_head);
	INIT_WORK(&port->gid_pn_work, zfcp_erp_port_strategy_open_lookup);

	port->adapter = adapter;
	port->d_id = d_id;
	port->wwpn = wwpn;

	/* mark port unusable as long as sysfs registration is not complete */
	atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
	atomic_set(&port->refcount, 0);

	dev_set_name(&port->sysfs_device, "0x%016llx",
		     (unsigned long long)wwpn);
	port->sysfs_device.parent = &adapter->ccw_device->dev;

	port->sysfs_device.release = zfcp_sysfs_port_release;
	dev_set_drvdata(&port->sysfs_device, port);

	read_lock_irq(&zfcp_data.config_lock);
	if (!(status & ZFCP_STATUS_PORT_NO_WWPN))
		if (zfcp_get_port_by_wwpn(adapter, wwpn)) {
			read_unlock_irq(&zfcp_data.config_lock);
			goto err_out_free;
		}
	read_unlock_irq(&zfcp_data.config_lock);

	if (device_register(&port->sysfs_device))
		goto err_out_free;

	retval = sysfs_create_group(&port->sysfs_device.kobj,
				    &zfcp_sysfs_port_attrs);

	if (retval) {
		device_unregister(&port->sysfs_device);
		goto err_out;
	}

	zfcp_port_get(port);

	write_lock_irq(&zfcp_data.config_lock);
	list_add_tail(&port->list, &adapter->port_list_head);
	atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
	atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status);

	write_unlock_irq(&zfcp_data.config_lock);

	zfcp_adapter_get(adapter);
	return port;

err_out_free:
	kfree(port);
err_out:
	return ERR_PTR(-EINVAL);
}
Ejemplo n.º 13
0
static void imxmci_tasklet_fnc(unsigned long data)
{
	struct imxmci_host *host = (struct imxmci_host *)data;
	u32 stat;
	unsigned int data_dir_mask = 0;	/* STATUS_WR_CRC_ERROR_CODE_MASK */
	int timeout = 0;

	if(atomic_read(&host->stuck_timeout) > 4) {
		char *what;
		timeout = 1;
		stat = MMC_STATUS;
		host->status_reg = stat;
		if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
			if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
				what = "RESP+DMA";
			else
				what = "RESP";
		else
			if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
				if(test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events))
					what = "DATA";
				else
					what = "DMA";
			else
				what = "???";

		dev_err(mmc_dev(host->mmc), "%s TIMEOUT, hardware stucked STATUS = 0x%04x IMASK = 0x%04x\n",
		       what, stat, MMC_INT_MASK);
		dev_err(mmc_dev(host->mmc), "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n",
		       MMC_CMD_DAT_CONT, MMC_BLK_LEN, MMC_NOB, CCR(host->dma));
		dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n",
		       host->cmd?host->cmd->opcode:0, host->prev_cmd_code, 1<<host->actual_bus_width, host->dma_size);
	}

	if(!host->present || timeout)
		host->status_reg = STATUS_TIME_OUT_RESP | STATUS_TIME_OUT_READ |
				    STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR;

	if(test_bit(IMXMCI_PEND_IRQ_b, &host->pending_events) || timeout) {
		clear_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);

		stat = MMC_STATUS;
		/*
		 * This is not required in theory, but there is chance to miss some flag
		 * which clears automatically by mask write, FreeScale original code keeps
		 * stat from IRQ time so do I
		 */
		stat |= host->status_reg;

		if(test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
			imxmci_busy_wait_for_status(host, &stat,
					STATUS_END_CMD_RESP | STATUS_ERR_MASK,
					20, "imxmci_tasklet_fnc resp (ERRATUM #4)");
		}

		if(stat & (STATUS_END_CMD_RESP | STATUS_ERR_MASK)) {
			if(test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
				imxmci_cmd_done(host, stat);
			if(host->data && (stat & STATUS_ERR_MASK))
				imxmci_data_done(host, stat);
		}

		if(test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) {
			stat |= MMC_STATUS;
			if(imxmci_cpu_driven_data(host, &stat)){
				if(test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
					imxmci_cmd_done(host, stat);
				atomic_clear_mask(IMXMCI_PEND_IRQ_m|IMXMCI_PEND_CPU_DATA_m,
							&host->pending_events);
				imxmci_data_done(host, stat);
			}
		}
	}

	if(test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events) &&
	   !test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {

		stat = MMC_STATUS;
		/* Same as above */
		stat |= host->status_reg;

		if(host->dma_dir == DMA_TO_DEVICE) {
			data_dir_mask = STATUS_WRITE_OP_DONE;
		} else {
			data_dir_mask = STATUS_DATA_TRANS_DONE;
		}

		if(stat & data_dir_mask) {
			clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
			imxmci_data_done(host, stat);
		}
	}

	if(test_and_clear_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events)) {

		if(host->cmd)
			imxmci_cmd_done(host, STATUS_TIME_OUT_RESP);

		if(host->data)
			imxmci_data_done(host, STATUS_TIME_OUT_READ |
					 STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR);

		if(host->req)
			imxmci_finish_request(host, host->req);

		mmc_detect_change(host->mmc, msecs_to_jiffies(100));

	}
}
Ejemplo n.º 14
0
/**
 * zfcp_unit_enqueue - enqueue unit to unit list of a port.
 * @port: pointer to port where unit is added
 * @fcp_lun: FCP LUN of unit to be enqueued
 * Returns: pointer to enqueued unit on success, ERR_PTR on error
 * Locks: config_sema must be held to serialize changes to the unit list
 *
 * Sets up some unit internal structures and creates sysfs entry.
 */
struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
{
	struct zfcp_unit *unit;

	unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
	if (!unit)
		return ERR_PTR(-ENOMEM);

	atomic_set(&unit->refcount, 0);
	init_waitqueue_head(&unit->remove_wq);

	unit->port = port;
	unit->fcp_lun = fcp_lun;

	dev_set_name(&unit->sysfs_device, "0x%016llx",
		     (unsigned long long) fcp_lun);
	unit->sysfs_device.parent = &port->sysfs_device;
	unit->sysfs_device.release = zfcp_sysfs_unit_release;
	dev_set_drvdata(&unit->sysfs_device, unit);

	/* mark unit unusable as long as sysfs registration is not complete */
	atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);

	spin_lock_init(&unit->latencies.lock);
	unit->latencies.write.channel.min = 0xFFFFFFFF;
	unit->latencies.write.fabric.min = 0xFFFFFFFF;
	unit->latencies.read.channel.min = 0xFFFFFFFF;
	unit->latencies.read.fabric.min = 0xFFFFFFFF;
	unit->latencies.cmd.channel.min = 0xFFFFFFFF;
	unit->latencies.cmd.fabric.min = 0xFFFFFFFF;

	read_lock_irq(&zfcp_data.config_lock);
	if (zfcp_get_unit_by_lun(port, fcp_lun)) {
		read_unlock_irq(&zfcp_data.config_lock);
		goto err_out_free;
	}
	read_unlock_irq(&zfcp_data.config_lock);

	if (device_register(&unit->sysfs_device))
		goto err_out_free;

	if (sysfs_create_group(&unit->sysfs_device.kobj,
			       &zfcp_sysfs_unit_attrs)) {
		device_unregister(&unit->sysfs_device);
		return ERR_PTR(-EIO);
	}

	zfcp_unit_get(unit);

	write_lock_irq(&zfcp_data.config_lock);
	list_add_tail(&unit->list, &port->unit_list_head);
	atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
	atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status);

	write_unlock_irq(&zfcp_data.config_lock);

	zfcp_port_get(port);

	return unit;

err_out_free:
	kfree(unit);
	return ERR_PTR(-EINVAL);
}
Ejemplo n.º 15
0
static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
{
    BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1);
    atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
    clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
}
Ejemplo n.º 16
0
static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
                                   struct kvm_s390_interrupt_info *inti)
{
    const unsigned short table[] = { 2, 4, 4, 6 };
    int rc, exception = 0;

    switch (inti->type) {
    case KVM_S390_INT_EMERGENCY:
        VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
        vcpu->stat.deliver_emergency_signal++;
        rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201);
        if (rc == -EFAULT)
            exception = 1;

        rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->emerg.code);
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
                           &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
                             __LC_EXT_NEW_PSW, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;
        break;

    case KVM_S390_INT_EXTERNAL_CALL:
        VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
        vcpu->stat.deliver_external_call++;
        rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1202);
        if (rc == -EFAULT)
            exception = 1;

        rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->extcall.code);
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
                           &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
                             __LC_EXT_NEW_PSW, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;
        break;

    case KVM_S390_INT_SERVICE:
        VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
                   inti->ext.ext_params);
        vcpu->stat.deliver_service_signal++;
        rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401);
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
                           &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
                             __LC_EXT_NEW_PSW, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
        if (rc == -EFAULT)
            exception = 1;
        break;

    case KVM_S390_INT_VIRTIO:
        VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
                   inti->ext.ext_params, inti->ext.ext_params2);
        vcpu->stat.deliver_virtio_interrupt++;
        rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
        if (rc == -EFAULT)
            exception = 1;

        rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, 0x0d00);
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
                           &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
                             __LC_EXT_NEW_PSW, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
        if (rc == -EFAULT)
            exception = 1;

        rc = put_guest_u64(vcpu, __LC_EXT_PARAMS2,
                           inti->ext.ext_params2);
        if (rc == -EFAULT)
            exception = 1;
        break;

    case KVM_S390_SIGP_STOP:
        VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
        vcpu->stat.deliver_stop_signal++;
        __set_intercept_indicator(vcpu, inti);
        break;

    case KVM_S390_SIGP_SET_PREFIX:
        VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x",
                   inti->prefix.address);
        vcpu->stat.deliver_prefix_signal++;
        kvm_s390_set_prefix(vcpu, inti->prefix.address);
        break;

    case KVM_S390_RESTART:
        VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
        vcpu->stat.deliver_restart_signal++;
        rc = copy_to_guest(vcpu, offsetof(struct _lowcore,
                                          restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
                             offsetof(struct _lowcore, restart_psw), sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;
        atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
        break;

    case KVM_S390_PROGRAM_INT:
        VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
                   inti->pgm.code,
                   table[vcpu->arch.sie_block->ipa >> 14]);
        vcpu->stat.deliver_program_int++;
        rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code);
        if (rc == -EFAULT)
            exception = 1;

        rc = put_guest_u16(vcpu, __LC_PGM_ILC,
                           table[vcpu->arch.sie_block->ipa >> 14]);
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
                           &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;

        rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
                             __LC_PGM_NEW_PSW, sizeof(psw_t));
        if (rc == -EFAULT)
            exception = 1;
        break;

    default:
        BUG();
    }
    if (exception) {
        printk("kvm: The guest lowcore is not mapped during interrupt "
               "delivery, killing userspace\n");
        do_exit(SIGKILL);
    }
}
Ejemplo n.º 17
0
/**
 * zfcp_qdio_open - prepare and initialize response queue
 * @qdio: pointer to struct zfcp_qdio
 * Returns: 0 on success, otherwise -EIO
 */
int zfcp_qdio_open(struct zfcp_qdio *qdio)
{
	struct qdio_buffer_element *sbale;
	struct qdio_initialize init_data;
	struct zfcp_adapter *adapter = qdio->adapter;
	struct ccw_device *cdev = adapter->ccw_device;
	struct qdio_ssqd_desc ssqd;
	int cc;

	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
		return -EIO;

	atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
			  &qdio->adapter->status);

	zfcp_qdio_setup_init_data(&init_data, qdio);

	if (qdio_establish(&init_data))
		goto failed_establish;

	if (qdio_get_ssqd_desc(init_data.cdev, &ssqd))
		goto failed_qdio;

	if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
		atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
				&qdio->adapter->status);

	if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
		atomic_set_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
		qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
	} else {
		atomic_clear_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
		qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
	}

	qdio->max_sbale_per_req =
		ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
		- 2;
	if (qdio_activate(cdev))
		goto failed_qdio;

	for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
		sbale = &(qdio->res_q[cc]->element[0]);
		sbale->length = 0;
		sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
		sbale->sflags = 0;
		sbale->addr = NULL;
	}

	if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
		goto failed_qdio;

	/* set index of first available SBALS / number of available SBALS */
	qdio->req_q_idx = 0;
	atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
	atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);

	if (adapter->scsi_host) {
		adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
		adapter->scsi_host->max_sectors = qdio->max_sbale_per_req * 8;
	}

	return 0;

failed_qdio:
	qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
failed_establish:
	dev_err(&cdev->dev,
		"Setting up the QDIO connection to the FCP adapter failed\n");
	return -EIO;
}
Ejemplo n.º 18
0
/* Note: ignore ppos*/
ssize_t ambsync_proc_read(struct file *file, char __user *buf,
	size_t size, loff_t *ppos)
{
	int				retval = 0;
	struct ambsync_proc_pinfo	*pinfo = file->private_data;
	struct proc_dir_entry		*dp;
	struct ambsync_proc_hinfo	*hinfo;
	struct inode			*inode = file->f_path.dentry->d_inode;
	char				*start;
	int				len;
	size_t				count;

	dp = PDE(inode);
	hinfo = (struct ambsync_proc_hinfo *)dp->data;
	if (!hinfo) {
		retval = -EPERM;
		goto ambsync_proc_read_exit;
	}
	if (!hinfo->sync_read_proc) {
		retval = -EPERM;
		goto ambsync_proc_read_exit;
	}

	if (!pinfo) {
		retval = -ENOMEM;
		goto ambsync_proc_read_exit;
	}

	count = min_t(size_t, AMBA_SYNC_PROC_PAGE_SIZE, size);
	start = pinfo->page;
	len = 0;
	while (1) {
		wait_event_interruptible(hinfo->sync_proc_head,
			(atomic_read(&hinfo->sync_proc_flag) & pinfo->mask));
		atomic_clear_mask(pinfo->mask,
			(unsigned long *)&hinfo->sync_proc_flag);

		len = hinfo->sync_read_proc(start, hinfo->sync_read_data);
		if (len < count) {
			start += len;
			count -= len;
		} else if (len == count) {
			start += len;
			count -= len;
			break;
		} else {
			break;
		}
	}
	len = start - pinfo->page;
	if (len == 0) {
		retval = -EFAULT;
	} else {
		if (copy_to_user(buf, pinfo->page, len)) {
			retval = -EFAULT;
		} else {
			retval = len;
		}
	}

ambsync_proc_read_exit:
	return retval;
}
Ejemplo n.º 19
0
/**
 * zfcp_port_enqueue - enqueue port to port list of adapter
 * @adapter: adapter where remote port is added
 * @wwpn: WWPN of the remote port to be enqueued
 * @status: initial status for the port
 * @d_id: destination id of the remote port to be enqueued
 * Returns: pointer to enqueued port on success, ERR_PTR on error
 * Locks: config_sema must be held to serialize changes to the port list
 *
 * All port internal structures are set up and the sysfs entry is generated.
 * d_id is used to enqueue ports with a well known address like the Directory
 * Service for nameserver lookup.
 */
struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn,
				     u32 status, u32 d_id)
{
	struct zfcp_port *port;
	int retval;
	char *bus_id;

	port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
	if (!port)
		return ERR_PTR(-ENOMEM);

	init_waitqueue_head(&port->remove_wq);

	INIT_LIST_HEAD(&port->unit_list_head);
	INIT_LIST_HEAD(&port->unit_remove_lh);

	port->adapter = adapter;
	port->d_id = d_id;
	port->wwpn = wwpn;

	/* mark port unusable as long as sysfs registration is not complete */
	atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
	atomic_set(&port->refcount, 0);

	if (status & ZFCP_STATUS_PORT_WKA) {
		switch (d_id) {
		case ZFCP_DID_DIRECTORY_SERVICE:
			bus_id = "directory";
			break;
		case ZFCP_DID_MANAGEMENT_SERVICE:
			bus_id = "management";
			break;
		case ZFCP_DID_KEY_DISTRIBUTION_SERVICE:
			bus_id = "key_distribution";
			break;
		case ZFCP_DID_ALIAS_SERVICE:
			bus_id = "alias";
			break;
		case ZFCP_DID_TIME_SERVICE:
			bus_id = "time";
			break;
		default:
			kfree(port);
			return ERR_PTR(-EINVAL);
		}
		snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, "%s", bus_id);
		port->sysfs_device.parent = &adapter->generic_services;
	} else {
		snprintf(port->sysfs_device.bus_id,
			 BUS_ID_SIZE, "0x%016llx", wwpn);
		port->sysfs_device.parent = &adapter->ccw_device->dev;
	}

	port->sysfs_device.release = zfcp_sysfs_port_release;
	dev_set_drvdata(&port->sysfs_device, port);

	read_lock_irq(&zfcp_data.config_lock);
	if (!(status & ZFCP_STATUS_PORT_NO_WWPN))
		if (zfcp_get_port_by_wwpn(adapter, wwpn)) {
			read_unlock_irq(&zfcp_data.config_lock);
			goto err_out_free;
		}
	read_unlock_irq(&zfcp_data.config_lock);

	if (device_register(&port->sysfs_device))
		goto err_out_free;

	if (status & ZFCP_STATUS_PORT_WKA)
		retval = sysfs_create_group(&port->sysfs_device.kobj,
					    &zfcp_sysfs_ns_port_attrs);
	else
		retval = sysfs_create_group(&port->sysfs_device.kobj,
					    &zfcp_sysfs_port_attrs);

	if (retval) {
		device_unregister(&port->sysfs_device);
		goto err_out;
	}

	zfcp_port_get(port);

	write_lock_irq(&zfcp_data.config_lock);
	list_add_tail(&port->list, &adapter->port_list_head);
	atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
	atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status);
	if (d_id == ZFCP_DID_DIRECTORY_SERVICE)
		if (!adapter->nameserver_port)
			adapter->nameserver_port = port;
	adapter->ports++;

	write_unlock_irq(&zfcp_data.config_lock);

	zfcp_adapter_get(adapter);
	return port;

err_out_free:
	kfree(port);
err_out:
	return ERR_PTR(-EINVAL);
}