Пример #1
0
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
						unsigned long va)
{
	/*
	 * A couple of (to be removed) sanity checks:
	 *
	 * - current CPU must not be in mask
	 * - mask must exist :)
	 */
	BUG_ON(cpus_empty(cpumask));
	BUG_ON(cpu_isset(smp_processor_id(), cpumask));
	BUG_ON(!mm);

	/* If a CPU which we ran on has gone down, OK. */
	cpus_and(cpumask, cpumask, cpu_online_map);
	if (cpus_empty(cpumask))
		return;

	/*
	 * i'm not happy about this global shared spinlock in the
	 * MM hot path, but we'll see how contended it is.
	 * Temporarily this turns IRQs off, so that lockups are
	 * detected by the NMI watchdog.
	 */
	spin_lock(&tlbstate_lock);
	
	flush_mm = mm;
	flush_va = va;
#if NR_CPUS <= BITS_PER_LONG
	atomic_set_mask(cpumask, &flush_cpumask);
#else
	{
		int k;
		unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
		unsigned long *cpu_mask = (unsigned long *)&cpumask;
		for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
			atomic_set_mask(cpu_mask[k], &flush_mask[k]);
	}
#endif

	/*
	 * Make the above memory operations globally visible before
	 * sending the IPI.
	 */
	smp_mb();
	/*
	 * We have to send the IPI only to
	 * CPUs affected.
	 */
	send_IPI_mask(flush_cpumask, INVALIDATE_TLB_VECTOR);

	while (!cpus_empty(flush_cpumask))
		/* nothing. lockup detection does not belong here */
		mb();

	flush_mm = NULL;
	flush_va = 0;
	spin_unlock(&tlbstate_lock);
}
Пример #2
0
static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
        struct zfcp_adapter *adapter,
        struct zfcp_port *port,
        struct scsi_device *sdev)
{
    struct zfcp_erp_action *erp_action;
    struct zfcp_scsi_dev *zfcp_sdev;

    switch (need) {
    case ZFCP_ERP_ACTION_REOPEN_LUN:
        zfcp_sdev = sdev_to_zfcp(sdev);
        if (!(act_status & ZFCP_STATUS_ERP_NO_REF))
            if (scsi_device_get(sdev))
                return NULL;
        atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
                        &zfcp_sdev->status);
        erp_action = &zfcp_sdev->erp_action;
        memset(erp_action, 0, sizeof(struct zfcp_erp_action));
        erp_action->port = port;
        erp_action->sdev = sdev;
        if (!(atomic_read(&zfcp_sdev->status) &
                ZFCP_STATUS_COMMON_RUNNING))
            act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
        break;

    case ZFCP_ERP_ACTION_REOPEN_PORT:
    case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
        if (!get_device(&port->dev))
            return NULL;
        zfcp_erp_action_dismiss_port(port);
        atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
        erp_action = &port->erp_action;
        memset(erp_action, 0, sizeof(struct zfcp_erp_action));
        erp_action->port = port;
        if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
            act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
        break;

    case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
        kref_get(&adapter->ref);
        zfcp_erp_action_dismiss_adapter(adapter);
        atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
        erp_action = &adapter->erp_action;
        memset(erp_action, 0, sizeof(struct zfcp_erp_action));
        if (!(atomic_read(&adapter->status) &
                ZFCP_STATUS_COMMON_RUNNING))
            act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
        break;

    default:
        return NULL;
    }

    erp_action->adapter = adapter;
    erp_action->action = need;
    erp_action->status = act_status;

    return erp_action;
}
Пример #3
0
/**
 * zfcp_qdio_open - prepare and initialize response queue
 * @qdio: pointer to struct zfcp_qdio
 * Returns: 0 on success, otherwise -EIO
 */
int zfcp_qdio_open(struct zfcp_qdio *qdio)
{
	struct qdio_buffer_element *sbale;
	struct qdio_initialize init_data;
	struct zfcp_adapter *adapter = qdio->adapter;
	struct ccw_device *cdev = adapter->ccw_device;
	struct qdio_ssqd_desc ssqd;
	int cc;

	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
		return -EIO;

	atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
			  &qdio->adapter->status);

	zfcp_qdio_setup_init_data(&init_data, qdio);

	if (qdio_establish(&init_data))
		goto failed_establish;

	if (qdio_get_ssqd_desc(init_data.cdev, &ssqd))
		goto failed_qdio;

	if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
		atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
				&qdio->adapter->status);

	if (qdio_activate(cdev))
		goto failed_qdio;

	for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
		sbale = &(qdio->res_q[cc]->element[0]);
		sbale->length = 0;
		sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
		sbale->sflags = 0;
		sbale->addr = NULL;
	}

	if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
		goto failed_qdio;

	/* set index of first available SBALS / number of available SBALS */
	qdio->req_q_idx = 0;
	atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
	atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);

	return 0;

failed_qdio:
	qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
failed_establish:
	dev_err(&cdev->dev,
		"Setting up the QDIO connection to the FCP adapter failed\n");
	return -EIO;
}
Пример #4
0
static int hone_ioctl(struct inode *inode, struct file *file,
		unsigned int num, unsigned long param)
#endif
{
	struct hone_reader *reader = file->private_data;
	int err;

	if (_IOC_TYPE(num) != 0xE0)
		return -EINVAL;

	switch (num) {
	case HEIO_RESTART:
		atomic_set_mask(READER_RESTART, &reader->flags);
		wake_up_interruptible_all(&reader->event_wait_queue);
		return 0;
	case HEIO_GET_AT_HEAD:
		return atomic_read(&reader->flags) & READER_HEAD ? 1 : 0;
	case HEIO_GET_SNAPLEN:
		return put_user(reader->info.snaplen, (unsigned int __user *) param);
	case HEIO_SET_SNAPLEN:
		reader->info.snaplen = (unsigned int) param;
		atomic_set_mask(READER_HEAD, &reader->flags);
		return 0;
	case HEIO_SET_FILTER_SOCK:
	{
		int fd = (int) param;
		struct sock *sk;
		if (fd != -1) {
			struct socket *sock;
			if (!(sock = sockfd_lookup(fd, &err)))
				return err;
			sk = sock->sk;
			sock_hold(sk);
			fput(sock->file);
		} else {
			sk = NULL;
		}
		for (;;) {
			struct sock *old_sk = reader->filter_sk;
			if (cmpxchg(&reader->filter_sk, old_sk, sk) == old_sk) {
				if (old_sk)
					sock_put(old_sk);
				break;
			}
		}
		return 0;
	}
	}

	return -EINVAL;
}
Пример #5
0
static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
						  struct zfcp_adapter *adapter,
						  struct zfcp_port *port,
						  struct zfcp_unit *unit)
{
	struct zfcp_erp_action *erp_action;
	u32 status = 0;

	switch (need) {
	case ZFCP_ERP_ACTION_REOPEN_UNIT:
		zfcp_unit_get(unit);
		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status);
		erp_action = &unit->erp_action;
		if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING))
			status = ZFCP_STATUS_ERP_CLOSE_ONLY;
		break;

	case ZFCP_ERP_ACTION_REOPEN_PORT:
	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
		zfcp_port_get(port);
		zfcp_erp_action_dismiss_port(port);
		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
		erp_action = &port->erp_action;
		if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
			status = ZFCP_STATUS_ERP_CLOSE_ONLY;
		break;

	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
		zfcp_adapter_get(adapter);
		zfcp_erp_action_dismiss_adapter(adapter);
		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
		erp_action = &adapter->erp_action;
		if (!(atomic_read(&adapter->status) &
		      ZFCP_STATUS_COMMON_RUNNING))
			status = ZFCP_STATUS_ERP_CLOSE_ONLY;
		break;

	default:
		return NULL;
	}

	memset(erp_action, 0, sizeof(struct zfcp_erp_action));
	erp_action->adapter = adapter;
	erp_action->port = port;
	erp_action->unit = unit;
	erp_action->action = need;
	erp_action->status = status;

	return erp_action;
}
Пример #6
0
static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
{
	unsigned int reg = vcpu->arch.sie_block->ipa & 0xf;
	unsigned long subcode = vcpu->arch.guest_gprs[reg] & 0xffff;

	VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode);
	switch (subcode) {
	case 3:
		vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
		break;
	case 4:
		vcpu->run->s390_reset_flags = 0;
		break;
	default:
		return -EOPNOTSUPP;
	}

	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
	vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
	vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
	vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
	vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
	VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx",
	  vcpu->run->s390_reset_flags);
	return -EREMOTE;
}
Пример #7
0
static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
                                   struct zfcp_port *port,
                                   struct scsi_device *sdev,
                                   char *id, u32 act_status)
{
    int retval = 1, need;
    struct zfcp_erp_action *act;

    if (!adapter->erp_thread)
        return -EIO;

    need = zfcp_erp_required_act(want, adapter, port, sdev);
    if (!need)
        goto out;

    act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
    if (!act)
        goto out;
    atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
    ++adapter->erp_total_count;
    list_add_tail(&act->list, &adapter->erp_ready_head);
    wake_up(&adapter->erp_ready_wq);
    retval = 0;
out:
    zfcp_dbf_rec_trig(id, adapter, port, sdev, want, need);
    return retval;
}
Пример #8
0
static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
				   struct zfcp_port *port,
				   struct zfcp_unit *unit, char *id, void *ref)
{
	int retval = 1, need;
	struct zfcp_erp_action *act = NULL;

	if (!adapter->erp_thread)
		return -EIO;

	need = zfcp_erp_required_act(want, adapter, port, unit);
	if (!need)
		goto out;

	atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
	act = zfcp_erp_setup_act(need, adapter, port, unit);
	if (!act)
		goto out;
	++adapter->erp_total_count;
	list_add_tail(&act->list, &adapter->erp_ready_head);
	wake_up(&adapter->erp_ready_wq);
	zfcp_dbf_rec_thread("eracte1", adapter->dbf);
	retval = 0;
 out:
	zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, unit);
	return retval;
}
Пример #9
0
static int zfcp_erp_thread(void *data)
{
	struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
	struct list_head *next;
	struct zfcp_erp_action *act;
	unsigned long flags;
	int ignore;

	daemonize("zfcperp%s", dev_name(&adapter->ccw_device->dev));
	/* Block all signals */
	siginitsetinv(&current->blocked, 0);
	atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
	wake_up(&adapter->erp_thread_wqh);

	while (!(atomic_read(&adapter->status) &
		 ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL)) {

		zfcp_rec_dbf_event_thread_lock("erthrd1", adapter);
		ignore = down_interruptible(&adapter->erp_ready_sem);
		zfcp_rec_dbf_event_thread_lock("erthrd2", adapter);

		write_lock_irqsave(&adapter->erp_lock, flags);
		next = adapter->erp_ready_head.next;
		write_unlock_irqrestore(&adapter->erp_lock, flags);

		if (next != &adapter->erp_ready_head) {
			act = list_entry(next, struct zfcp_erp_action, list);

			/* there is more to come after dismission, no notify */
			if (zfcp_erp_strategy(act) != ZFCP_ERP_DISMISSED)
				zfcp_erp_wakeup(adapter);
		}
	}
Пример #10
0
static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act)
{
	if (zfcp_qdio_open(act->adapter))
		return ZFCP_ERP_FAILED;
	init_waitqueue_head(&act->adapter->request_wq);
	atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &act->adapter->status);
	return ZFCP_ERP_SUCCEEDED;
}
Пример #11
0
static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
{
    struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);

    if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status))
        zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action);
    atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
}
Пример #12
0
/*==========================================================================*
 * Name:         flush_tlb_others
 *
 * Description:  This routine requests other CPU to execute flush TLB.
 *               1.Setup parameters.
 *               2.Send 'INVALIDATE_TLB_IPI' to other CPU.
 *                 Request other CPU to execute 'smp_invalidate_interrupt()'.
 *               3.Wait for other CPUs operation finished.
 *
 * Born on Date: 2002.02.05
 *
 * Arguments:    cpumask - bitmap of target CPUs
 *               *mm -  a pointer to the mm struct for flush TLB
 *               *vma -  a pointer to the vma struct include va
 *               va - virtual address for flush TLB
 *
 * Returns:      void (cannot fail)
 *
 * Modification log:
 * Date       Who Description
 * ---------- --- --------------------------------------------------------
 *
 *==========================================================================*/
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
	struct vm_area_struct *vma, unsigned long va)
{
	unsigned long *mask;
#ifdef DEBUG_SMP
	unsigned long flags;
	__save_flags(flags);
	if (!(flags & 0x0040))	/* Interrupt Disable NONONO */
		BUG();
#endif /* DEBUG_SMP */

	/*
	 * A couple of (to be removed) sanity checks:
	 *
	 * - we do not send IPIs to not-yet booted CPUs.
	 * - current CPU must not be in mask
	 * - mask must exist :)
	 */
	BUG_ON(cpumask_empty(&cpumask));

	BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
	BUG_ON(!mm);

	/* If a CPU which we ran on has gone down, OK. */
	cpumask_and(&cpumask, &cpumask, cpu_online_mask);
	if (cpumask_empty(&cpumask))
		return;

	/*
	 * i'm not happy about this global shared spinlock in the
	 * MM hot path, but we'll see how contended it is.
	 * Temporarily this turns IRQs off, so that lockups are
	 * detected by the NMI watchdog.
	 */
	spin_lock(&tlbstate_lock);

	flush_mm = mm;
	flush_vma = vma;
	flush_va = va;
	mask=cpumask_bits(&cpumask);
	atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);

	/*
	 * We have to send the IPI only to
	 * CPUs affected.
	 */
	send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);

	while (!cpumask_empty((cpumask_t*)&flush_cpumask)) {
		/* nothing. lockup detection does not belong here */
		mb();
	}

	flush_mm = NULL;
	flush_vma = NULL;
	flush_va = 0;
	spin_unlock(&tlbstate_lock);
}
Пример #13
0
static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
					    struct device_attribute *attr,
					    const char *buf, size_t count)
{
	struct zfcp_port *port = dev_get_drvdata(dev);
	struct zfcp_unit *unit;
	u64 fcp_lun;
	LIST_HEAD(unit_remove_lh);
	struct scsi_device *sdev;

	mutex_lock(&zfcp_data.config_mutex);
	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
		mutex_unlock(&zfcp_data.config_mutex);
		return -EBUSY;
	}

	if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) {
		mutex_unlock(&zfcp_data.config_mutex);
		return -EINVAL;
	}

	read_lock_irq(&zfcp_data.config_lock);
	unit = zfcp_get_unit_by_lun(port, fcp_lun);
	read_unlock_irq(&zfcp_data.config_lock);
	if (!unit) {
		mutex_unlock(&zfcp_data.config_mutex);
		return -ENXIO;
	}
	zfcp_unit_get(unit);
	mutex_unlock(&zfcp_data.config_mutex);

	sdev = scsi_device_lookup(port->adapter->scsi_host, 0,
				  port->starget_id,
				  scsilun_to_int((struct scsi_lun *)&fcp_lun));
	if (sdev) {
		scsi_remove_device(sdev);
		scsi_device_put(sdev);
	}

	mutex_lock(&zfcp_data.config_mutex);
	zfcp_unit_put(unit);
	if (atomic_read(&unit->refcount)) {
		mutex_unlock(&zfcp_data.config_mutex);
		return -ENXIO;
	}

	write_lock_irq(&zfcp_data.config_lock);
	atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
	list_move(&unit->list, &unit_remove_lh);
	write_unlock_irq(&zfcp_data.config_lock);
	mutex_unlock(&zfcp_data.config_mutex);

	zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL);
	zfcp_erp_wait(unit->port->adapter);
	zfcp_unit_dequeue(unit);

	return (ssize_t)count;
}
Пример #14
0
static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
					    struct device_attribute *attr,
					    const char *buf, size_t count)
{
	struct zfcp_port *port = dev_get_drvdata(dev);
	struct zfcp_unit *unit;
	u64 fcp_lun;
	int retval = 0;
	LIST_HEAD(unit_remove_lh);

	mutex_lock(&zfcp_data.config_mutex);
	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
		retval = -EBUSY;
		goto out;
	}

	if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) {
		retval = -EINVAL;
		goto out;
	}

	write_lock_irq(&zfcp_data.config_lock);
	unit = zfcp_get_unit_by_lun(port, fcp_lun);
	if (unit) {
		write_unlock_irq(&zfcp_data.config_lock);
		
		flush_work(&unit->scsi_work);
		write_lock_irq(&zfcp_data.config_lock);

		if (atomic_read(&unit->refcount) == 0) {
			zfcp_unit_get(unit);
			atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE,
					&unit->status);
			list_move(&unit->list, &unit_remove_lh);
		} else {
			unit = NULL;
		}
	}

	write_unlock_irq(&zfcp_data.config_lock);

	if (!unit) {
		retval = -ENXIO;
		goto out;
	}

	zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL);
	zfcp_erp_wait(unit->port->adapter);
	zfcp_unit_put(unit);
	zfcp_unit_dequeue(unit);
out:
	mutex_unlock(&zfcp_data.config_mutex);
	return retval ? retval : (ssize_t) count;
}
Пример #15
0
/**
 * zfcp_qdio_siosl - Trigger logging in FCP channel
 * @adapter: The zfcp_adapter where to trigger logging
 *
 * Call the cio siosl function to trigger hardware logging.  This
 * wrapper function sets a flag to ensure hardware logging is only
 * triggered once before going through qdio shutdown.
 *
 * The triggers are always run from qdio tasklet context, so no
 * additional synchronization is necessary.
 */
void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
{
	int rc;

	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
		return;

	rc = ccw_device_siosl(adapter->ccw_device);
	if (!rc)
		atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
				&adapter->status);
}
Пример #16
0
void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
{
	unsigned int cpu;
	struct ipi_data *bfin_ipi_data;
	unsigned long flags;

	local_irq_save(flags);
	for_each_cpu(cpu, cpumask) {
		bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
		atomic_set_mask((1 << msg), &bfin_ipi_data->bits);
		atomic_inc(&bfin_ipi_data->count);
	}
Пример #17
0
static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
{
	struct zfcp_adapter *adapter = act->adapter;
	struct zfcp_port *port = act->port;

	if (port->wwpn != adapter->peer_wwpn) {
		zfcp_erp_port_failed(port, 25, NULL);
		return ZFCP_ERP_FAILED;
	}
	port->d_id = adapter->peer_d_id;
	atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
	return zfcp_erp_port_strategy_open_port(act);
}
Пример #18
0
/**
 * zfcp_sysfs_unit_remove_store - remove a unit from sysfs tree
 * @dev: pointer to belonging device
 * @buf: pointer to input buffer
 * @count: number of bytes in buffer
 */
static ssize_t
zfcp_sysfs_unit_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
	struct zfcp_port *port;
	struct zfcp_unit *unit;
	fcp_lun_t fcp_lun;
	char *endp;
	int retval = 0;

	down(&zfcp_data.config_sema);

	port = dev_get_drvdata(dev);
	if (atomic_test_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status)) {
		retval = -EBUSY;
		goto out;
	}

	fcp_lun = simple_strtoull(buf, &endp, 0);
	if ((endp + 1) < (buf + count)) {
		retval = -EINVAL;
		goto out;
	}

	write_lock_irq(&zfcp_data.config_lock);
	unit = zfcp_get_unit_by_lun(port, fcp_lun);
	if (unit && (atomic_read(&unit->refcount) == 0)) {
		zfcp_unit_get(unit);
		atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
		list_move(&unit->list, &port->unit_remove_lh);
	}
	else {
		unit = NULL;
	}
	write_unlock_irq(&zfcp_data.config_lock);

	if (!unit) {
		retval = -ENXIO;
		goto out;
	}

	zfcp_erp_unit_shutdown(unit, 0);
	zfcp_erp_wait(unit->port->adapter);
	zfcp_unit_put(unit);
	zfcp_unit_dequeue(unit);
 out:
	up(&zfcp_data.config_sema);
	return retval ? retval : (ssize_t) count;
}
Пример #19
0
/*==========================================================================*
 * Name:         smp_flush_cache_all
 *
 * Description:  This routine sends a 'INVALIDATE_CACHE_IPI' to all other
 *               CPUs in the system.
 *
 * Born on Date: 2003-05-28
 *
 * Arguments:    NONE
 *
 * Returns:      void (cannot fail)
 *
 * Modification log:
 * Date       Who Description
 * ---------- --- --------------------------------------------------------
 *
 *==========================================================================*/
void smp_flush_cache_all(void)
{
	cpumask_t cpumask;
	unsigned long *mask;

	preempt_disable();
	cpumask_copy(&cpumask, cpu_online_mask);
	cpumask_clear_cpu(smp_processor_id(), &cpumask);
	spin_lock(&flushcache_lock);
	mask=cpumask_bits(&cpumask);
	atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
	send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
	_flush_cache_copyback_all();
	while (flushcache_cpumask)
		mb();
	spin_unlock(&flushcache_lock);
	preempt_enable();
}
Пример #20
0
static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
					    struct device_attribute *attr,
					    const char *buf, size_t count)
{
	struct zfcp_adapter *adapter = dev_get_drvdata(dev);
	struct zfcp_port *port;
	u64 wwpn;
	int retval = 0;
	LIST_HEAD(port_remove_lh);

	mutex_lock(&zfcp_data.config_mutex);
	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) {
		retval = -EBUSY;
		goto out;
	}

	if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn)) {
		retval = -EINVAL;
		goto out;
	}

	write_lock_irq(&zfcp_data.config_lock);
	port = zfcp_get_port_by_wwpn(adapter, wwpn);
	if (port && (atomic_read(&port->refcount) == 0)) {
		zfcp_port_get(port);
		atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
		list_move(&port->list, &port_remove_lh);
	} else
		port = NULL;
	write_unlock_irq(&zfcp_data.config_lock);

	if (!port) {
		retval = -ENXIO;
		goto out;
	}

	zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL);
	zfcp_erp_wait(adapter);
	zfcp_port_put(port);
	zfcp_port_dequeue(port);
 out:
	mutex_unlock(&zfcp_data.config_mutex);
	return retval ? retval : (ssize_t) count;
}
Пример #21
0
/**
 * flush_tlb_others - Tell the specified CPUs to invalidate their TLBs
 * @cpumask: The list of CPUs to target.
 * @mm: The VM context to flush from (if va!=FLUSH_ALL).
 * @va: Virtual address to flush or FLUSH_ALL to flush everything.
 */
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
			     unsigned long va)
{
	cpumask_t tmp;

	/* A couple of sanity checks (to be removed):
	 * - mask must not be empty
	 * - current CPU must not be in mask
	 * - we do not send IPIs to as-yet unbooted CPUs.
	 */
	BUG_ON(!mm);
	BUG_ON(cpumask_empty(&cpumask));
	BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));

	cpumask_and(&tmp, &cpumask, cpu_online_mask);
	BUG_ON(!cpumask_equal(&cpumask, &tmp));

	/* I'm not happy about this global shared spinlock in the MM hot path,
	 * but we'll see how contended it is.
	 *
	 * Temporarily this turns IRQs off, so that lockups are detected by the
	 * NMI watchdog.
	 */
	spin_lock(&tlbstate_lock);

	flush_mm = mm;
	flush_va = va;
#if NR_CPUS <= BITS_PER_LONG
	atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
#else
#error Not supported.
#endif

	/* FIXME: if NR_CPUS>=3, change send_IPI_mask */
	smp_call_function(smp_flush_tlb, NULL, 1);

	while (!cpumask_empty(&flush_cpumask))
		/* Lockup detection does not belong here */
		smp_mb();

	flush_mm = NULL;
	flush_va = 0;
	spin_unlock(&tlbstate_lock);
}
Пример #22
0
static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
						unsigned long va)
{
	/*
	 * A couple of (to be removed) sanity checks:
	 *
	 * - we do not send IPIs to not-yet booted CPUs.
	 * - current CPU must not be in mask
	 * - mask must exist :)
	 */
	if (!cpumask)
		BUG();
	if ((cpumask & cpu_online_map) != cpumask)
		BUG();
	if (cpumask & (1 << smp_processor_id()))
		BUG();
	if (!mm)
		BUG();

	/*
	 * i'm not happy about this global shared spinlock in the
	 * MM hot path, but we'll see how contended it is.
	 * Temporarily this turns IRQs off, so that lockups are
	 * detected by the NMI watchdog.
	 */
	spin_lock(&tlbstate_lock);
	
	flush_mm = mm;
	flush_va = va;
	atomic_set_mask(cpumask, &flush_cpumask);
	/*
	 * We have to send the IPI only to
	 * CPUs affected.
	 */
	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);

	while (flush_cpumask)
		/* nothing. lockup detection does not belong here */;

	flush_mm = NULL;
	flush_va = 0;
	spin_unlock(&tlbstate_lock);
}
Пример #23
0
static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
{
	struct kvm_s390_interrupt_info *inti;

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
		return -ENOMEM;
	inti->type = KVM_S390_SIGP_STOP;

	spin_lock_bh(&li->lock);
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
	li->action_bits |= action;
	if (waitqueue_active(&li->wq))
		wake_up_interruptible(&li->wq);
	spin_unlock_bh(&li->lock);

	return 0; /* order accepted */
}
Пример #24
0
static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
{
	struct zfcp_adapter *adapter = act->adapter;

	if (zfcp_erp_adapter_strategy_open_qdio(act)) {
		atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
				  ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
				  &adapter->status);
		return ZFCP_ERP_FAILED;
	}

	if (zfcp_erp_adapter_strategy_open_fsf(act)) {
		zfcp_erp_adapter_strategy_close(act);
		return ZFCP_ERP_FAILED;
	}

	atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &adapter->status);

	return ZFCP_ERP_SUCCEEDED;
}
Пример #25
0
static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store)
{
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li;
	struct kvm_s390_interrupt_info *inti;
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
		return 3; /* not operational */

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
		return -ENOMEM;

	inti->type = KVM_S390_SIGP_STOP;

	spin_lock_bh(&fi->lock);
	li = fi->local_int[cpu_addr];
	if (li == NULL) {
		rc = 3; /* not operational */
		kfree(inti);
		goto unlock;
	}
	spin_lock_bh(&li->lock);
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
	if (store)
		li->action_bits |= ACTION_STORE_ON_STOP;
	li->action_bits |= ACTION_STOP_ON_STOP;
	if (waitqueue_active(&li->wq))
		wake_up_interruptible(&li->wq);
	spin_unlock_bh(&li->lock);
	rc = 0; /* order accepted */
unlock:
	spin_unlock_bh(&fi->lock);
	VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
	return rc;
}
Пример #26
0
static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *act,
					     int close)
{
	int retval = ZFCP_ERP_SUCCEEDED;
	struct zfcp_adapter *adapter = act->adapter;

	if (close)
		goto close_only;

	retval = zfcp_erp_adapter_strategy_open_qdio(act);
	if (retval != ZFCP_ERP_SUCCEEDED)
		goto failed_qdio;

	retval = zfcp_erp_adapter_strategy_open_fsf(act);
	if (retval != ZFCP_ERP_SUCCEEDED)
		goto failed_openfcp;

	atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &act->adapter->status);

	return ZFCP_ERP_SUCCEEDED;

 close_only:
	atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
			  &act->adapter->status);

 failed_openfcp:
	/* close queues to ensure that buffers are not accessed by adapter */
	zfcp_qdio_close(adapter);
	zfcp_fsf_req_dismiss_all(adapter);
	adapter->fsf_req_seq_no = 0;
	/* all ports and units are closed */
	zfcp_erp_modify_adapter_status(adapter, 24, NULL,
				       ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
 failed_qdio:
	atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
			  ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
			  &act->adapter->status);
	return retval;
}
Пример #27
0
static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
{
	struct kvm_s390_interrupt_info *inti;

	inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
	if (!inti)
		return -ENOMEM;
	inti->type = KVM_S390_SIGP_STOP;

	spin_lock_bh(&li->lock);
	if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED))
		goto out;
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
	li->action_bits |= action;
	if (waitqueue_active(&li->wq))
		wake_up_interruptible(&li->wq);
out:
	spin_unlock_bh(&li->lock);

	return SIGP_CC_ORDER_CODE_ACCEPTED;
}
Пример #28
0
static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
	struct kvm_s390_local_interrupt *li;
	struct kvm_s390_interrupt_info *inti;
	int rc;

	if (cpu_addr >= KVM_MAX_VCPUS)
		return SIGP_CC_NOT_OPERATIONAL;

	inti = kzalloc(sizeof(*inti), GFP_KERNEL);
	if (!inti)
		return -ENOMEM;

	inti->type = KVM_S390_INT_EXTERNAL_CALL;
	inti->extcall.code = vcpu->vcpu_id;

	spin_lock(&fi->lock);
	li = fi->local_int[cpu_addr];
	if (li == NULL) {
		rc = SIGP_CC_NOT_OPERATIONAL;
		kfree(inti);
		goto unlock;
	}
	spin_lock_bh(&li->lock);
	list_add_tail(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
	if (waitqueue_active(&li->wq))
		wake_up_interruptible(&li->wq);
	spin_unlock_bh(&li->lock);
	rc = SIGP_CC_ORDER_CODE_ACCEPTED;
	VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
unlock:
	spin_unlock(&fi->lock);
	return rc;
}
Пример #29
0
static void zfcp_erp_unit_unblock(struct zfcp_unit *unit)
{
	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status))
		zfcp_dbf_rec_unit("eruubl1", NULL, unit);
	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status);
}
Пример #30
0
static void zfcp_erp_port_unblock(struct zfcp_port *port)
{
	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
		zfcp_dbf_rec_port("erpubl1", NULL, port);
	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
}