示例#1
0
文件: locks.c 项目: 274914765/C
void __spin_yield(raw_spinlock_t *lock)
{
    unsigned int lock_value, holder_cpu, yield_count;

    lock_value = lock->slock;
    if (lock_value == 0)
        return;
    holder_cpu = lock_value & 0xffff;
    BUG_ON(holder_cpu >= NR_CPUS);
    yield_count = lppaca[holder_cpu].yield_count;
    if ((yield_count & 1) == 0)
        return;        /* virtual cpu is currently running */
    rmb();
    if (lock->slock != lock_value)
        return;        /* something has changed */
    if (firmware_has_feature(FW_FEATURE_ISERIES))
        HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
            ((u64)holder_cpu << 32) | yield_count);
#ifdef CONFIG_PPC_SPLPAR
    else
        plpar_hcall_norets(H_CONFER,
            get_hard_smp_processor_id(holder_cpu), yield_count);
#endif
}
示例#2
0
static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_when_done)
{
	u16 slb_size = mmu_slb_size;
	int rc = H_MULTI_THREADS_ACTIVE;
	int cpu;

	slb_set_size(SLB_MIN_SIZE);
	printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());

	while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
	       !atomic_read(&data->error))
		rc = rtas_call(data->token, 0, 1, NULL);

	if (rc || atomic_read(&data->error)) {
		printk(KERN_DEBUG "ibm,suspend-me returned %d\n", rc);
		slb_set_size(slb_size);
	}

	if (atomic_read(&data->error))
		rc = atomic_read(&data->error);

	atomic_set(&data->error, rc);
	pSeries_coalesce_init();

	if (wake_when_done) {
		atomic_set(&data->done, 1);

		for_each_online_cpu(cpu)
			plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
	}

	if (atomic_dec_return(&data->working) == 0)
		complete(data->complete);

	return rc;
}
示例#3
0
static int pseries_set_dabr(unsigned long dabr)
{
	return plpar_hcall_norets(H_SET_DABR, dabr);
}
示例#4
0
/**
 * tpm_ibmvtpm_probe - ibm vtpm initialize entry point
 * @vio_dev:	vio device struct
 * @id:		vio device id struct
 *
 * Return value:
 *	0 - Success
 *	Non-zero - Failure
 */
static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
				   const struct vio_device_id *id)
{
	struct ibmvtpm_dev *ibmvtpm;
	struct device *dev = &vio_dev->dev;
	struct ibmvtpm_crq_queue *crq_q;
	struct tpm_chip *chip;
	int rc = -ENOMEM, rc1;

	chip = tpm_register_hardware(dev, &tpm_ibmvtpm);
	if (!chip) {
		dev_err(dev, "tpm_register_hardware failed\n");
		return -ENODEV;
	}

	ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
	if (!ibmvtpm) {
		dev_err(dev, "kzalloc for ibmvtpm failed\n");
		goto cleanup;
	}

	crq_q = &ibmvtpm->crq_queue;
	crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
	if (!crq_q->crq_addr) {
		dev_err(dev, "Unable to allocate memory for crq_addr\n");
		goto cleanup;
	}

	crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
	ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
						 CRQ_RES_BUF_SIZE,
						 DMA_BIDIRECTIONAL);

	if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
		dev_err(dev, "dma mapping failed\n");
		goto cleanup;
	}

	rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
				ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
	if (rc == H_RESOURCE)
		rc = ibmvtpm_reset_crq(ibmvtpm);

	if (rc) {
		dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
		goto reg_crq_cleanup;
	}

	rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
			 tpm_ibmvtpm_driver_name, ibmvtpm);
	if (rc) {
		dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
		goto init_irq_cleanup;
	}

	rc = vio_enable_interrupts(vio_dev);
	if (rc) {
		dev_err(dev, "Error %d enabling interrupts\n", rc);
		goto init_irq_cleanup;
	}

	init_waitqueue_head(&ibmvtpm->wq);

	crq_q->index = 0;

	ibmvtpm->dev = dev;
	ibmvtpm->vdev = vio_dev;
	chip->vendor.data = (void *)ibmvtpm;

	spin_lock_init(&ibmvtpm->rtce_lock);

	rc = ibmvtpm_crq_send_init(ibmvtpm);
	if (rc)
		goto init_irq_cleanup;

	rc = ibmvtpm_crq_get_version(ibmvtpm);
	if (rc)
		goto init_irq_cleanup;

	rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
	if (rc)
		goto init_irq_cleanup;

	return rc;
init_irq_cleanup:
	do {
		rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
	} while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
reg_crq_cleanup:
	dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
			 DMA_BIDIRECTIONAL);
cleanup:
	if (ibmvtpm) {
		if (crq_q->crq_addr)
			free_page((unsigned long)crq_q->crq_addr);
		kfree(ibmvtpm);
	}

	tpm_remove_hardware(dev);

	return rc;
}
示例#5
0
/**
 * ibmvtpm_send_crq - Send a CRQ request
 * @vdev:	vio device struct
 * @w1:		first word
 * @w2:		second word
 *
 * Return value:
 *	0 -Sucess
 *	Non-zero - Failure
 */
static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2)
{
	return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2);
}
示例#6
0
/**
 * initialize_crq_queue: - Initializes and registers CRQ with hypervisor
 * @queue:	crq_queue to initialize and register
 * @hostdata:	ibmvscsi_host_data of host
 *
 * Allocates a page for messages, maps it for dma, and registers
 * the crq with the hypervisor.
 * Returns zero on success.
 */
int ibmvscsi_init_crq_queue(struct crq_queue *queue,
			    struct ibmvscsi_host_data *hostdata,
			    int max_requests)
{
	int rc;
	int retrc;
	struct vio_dev *vdev = to_vio_dev(hostdata->dev);

	queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);

	if (!queue->msgs)
		goto malloc_failed;
	queue->size = PAGE_SIZE / sizeof(*queue->msgs);

	queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
					  queue->size * sizeof(*queue->msgs),
					  DMA_BIDIRECTIONAL);

	if (dma_mapping_error(queue->msg_token))
		goto map_failed;

	gather_partition_info();
	set_adapter_info(hostdata);

	retrc = rc = plpar_hcall_norets(H_REG_CRQ,
				vdev->unit_address,
				queue->msg_token, PAGE_SIZE);
	if (rc == H_RESOURCE)
		/* maybe kexecing and resource is busy. try a reset */
		rc = ibmvscsi_reset_crq_queue(queue,
					      hostdata);

	if (rc == 2) {
		/* Adapter is good, but other end is not ready */
		printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n");
		retrc = 0;
	} else if (rc != 0) {
		printk(KERN_WARNING "ibmvscsi: Error %d opening adapter\n", rc);
		goto reg_crq_failed;
	}

	if (request_irq(vdev->irq,
			ibmvscsi_handle_event,
			0, "ibmvscsi", (void *)hostdata) != 0) {
		printk(KERN_ERR "ibmvscsi: couldn't register irq 0x%x\n",
		       vdev->irq);
		goto req_irq_failed;
	}

	rc = vio_enable_interrupts(vdev);
	if (rc != 0) {
		printk(KERN_ERR "ibmvscsi:  Error %d enabling interrupts!!!\n",
		       rc);
		goto req_irq_failed;
	}

	queue->cur = 0;
	spin_lock_init(&queue->lock);

	tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task,
		     (unsigned long)hostdata);

	return retrc;

      req_irq_failed:
	do {
		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
	} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
      reg_crq_failed:
	dma_unmap_single(hostdata->dev,
			 queue->msg_token,
			 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
      map_failed:
	free_page((unsigned long)queue->msgs);
      malloc_failed:
	return -1;
}
示例#7
0
/**
 * ibmvscsi_send_crq: - Send a CRQ
 * @hostdata:	the adapter
 * @word1:	the first 64 bits of the data
 * @word2:	the second 64 bits of the data
 */
int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata, u64 word1, u64 word2)
{
	struct vio_dev *vdev = to_vio_dev(hostdata->dev);

	return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
}
long plpar_tce_put(unsigned long liobn,
		   unsigned long ioba,
		   unsigned long tceval)
{
	return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
}
static inline void icp_hv_set_cppr(u8 value)
{
	long rc = plpar_hcall_norets(H_CPPR, value);
	if (rc != H_SUCCESS)
		panic("bad return code cppr - rc = %lx\n", rc);
}
示例#10
0
int HYPERVISOR_grant_table_op(unsigned int cmd, void *op, unsigned int count)
{
    void *desc;
    void *frame_list = NULL;
    int argsize;
    int ret = -ENOMEM;

    switch (cmd) {
    case GNTTABOP_map_grant_ref:
        argsize = sizeof(struct gnttab_map_grant_ref);
        break;
    case GNTTABOP_unmap_grant_ref:
        gnttab_pre_unmap_grant_ref(op, count);
        argsize = sizeof(struct gnttab_unmap_grant_ref);
        break;
    case GNTTABOP_setup_table: {
        struct gnttab_setup_table setup;

        memcpy(&setup, op, sizeof(setup));
        argsize = sizeof(setup);

        frame_list = xencomm_map(
                         xen_guest_handle(setup.frame_list),
                         (sizeof(*xen_guest_handle(setup.frame_list))
                          * setup.nr_frames));

        if (frame_list == NULL)
            return -ENOMEM;

        set_xen_guest_handle(setup.frame_list, frame_list);
        memcpy(op, &setup, sizeof(setup));
    }
    break;
    case GNTTABOP_dump_table:
        argsize = sizeof(struct gnttab_dump_table);
        break;
    case GNTTABOP_transfer:
        BUG();
        argsize = sizeof(struct gnttab_transfer);
        break;
    case GNTTABOP_copy:
        argsize = sizeof(struct gnttab_transfer);
        break;
    case GNTTABOP_query_size:
        argsize = sizeof(struct gnttab_query_size);
        break;
    default:
        printk(KERN_EMERG "%s: unknown grant table op %d\n",
               __func__, cmd);
        return -ENOSYS;
    }

    desc = xencomm_map_no_alloc(op, argsize);
    if (desc) {
        ret = plpar_hcall_norets(XEN_MARK(__HYPERVISOR_grant_table_op),
                                 cmd, desc, count);
        if (!ret && cmd == GNTTABOP_map_grant_ref)
            gnttab_post_map_grant_ref(op, count);
        xencomm_free(desc);
    }
    xencomm_free(frame_list);

    return ret;
}
static int rpavscsi_init_crq_queue(struct crq_queue *queue,
				   struct ibmvscsi_host_data *hostdata,
				   int max_requests)
{
	int rc;
	int retrc;
	struct vio_dev *vdev = to_vio_dev(hostdata->dev);

	queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);

	if (!queue->msgs)
		goto malloc_failed;
	queue->size = PAGE_SIZE / sizeof(*queue->msgs);

	queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
					  queue->size * sizeof(*queue->msgs),
					  DMA_BIDIRECTIONAL);

	if (dma_mapping_error(hostdata->dev, queue->msg_token))
		goto map_failed;

	gather_partition_info();
	set_adapter_info(hostdata);

	retrc = rc = plpar_hcall_norets(H_REG_CRQ,
				vdev->unit_address,
				queue->msg_token, PAGE_SIZE);
	if (rc == H_RESOURCE)
		
		rc = rpavscsi_reset_crq_queue(queue,
					      hostdata);

	if (rc == 2) {
		
		dev_warn(hostdata->dev, "Partner adapter not ready\n");
		retrc = 0;
	} else if (rc != 0) {
		dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
		goto reg_crq_failed;
	}

	queue->cur = 0;
	spin_lock_init(&queue->lock);

	tasklet_init(&hostdata->srp_task, (void *)rpavscsi_task,
		     (unsigned long)hostdata);

	if (request_irq(vdev->irq,
			rpavscsi_handle_event,
			0, "ibmvscsi", (void *)hostdata) != 0) {
		dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
			vdev->irq);
		goto req_irq_failed;
	}

	rc = vio_enable_interrupts(vdev);
	if (rc != 0) {
		dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
		goto req_irq_failed;
	}

	return retrc;

      req_irq_failed:
	tasklet_kill(&hostdata->srp_task);
	rc = 0;
	do {
		if (rc)
			msleep(100);
		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
	} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
      reg_crq_failed:
	dma_unmap_single(hostdata->dev,
			 queue->msg_token,
			 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
      map_failed:
	free_page((unsigned long)queue->msgs);
      malloc_failed:
	return -1;
}
示例#12
0
long plpar_ipi(unsigned long servernum,
	       unsigned long mfrr)
{
	return plpar_hcall_norets(H_IPI, servernum, mfrr);
}
示例#13
0
long plpar_cppr(unsigned long cppr)
{
	return plpar_hcall_norets(H_CPPR, cppr);
}
示例#14
0
long plpar_eoi(unsigned long xirr)
{
	return plpar_hcall_norets(H_EOI, xirr);
}
示例#15
0
static int pseries_set_xdabr(unsigned long dabr)
{
	/* We want to catch accesses from kernel and userspace */
	return plpar_hcall_norets(H_SET_XDABR, dabr,
			H_DABRX_KERNEL | H_DABRX_USER);
}
static inline void icp_hv_set_xirr(unsigned int value)
{
	long rc = plpar_hcall_norets(H_EOI, value);
	if (rc != H_SUCCESS)
		panic("bad return code EOI - rc = %ld, value=%x\n", rc, value);
}
示例#17
0
	unsigned long r;
	struct p {
		struct hv_get_perf_counter_info_params params;
		struct hv_gpci_system_performance_capabilities caps;
	} __packed __aligned(sizeof(uint64_t));

	struct p arg = {
		.params = {
			.counter_request = cpu_to_be32(
				HV_GPCI_system_performance_capabilities),
			.starting_index = cpu_to_be32(-1),
			.counter_info_version_in = 0,
		}
	};

	r = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO,
			       virt_to_phys(&arg), sizeof(arg));

	if (r)
		return r;

	pr_devel("capability_mask: 0x%x\n", arg.caps.capability_mask);

	caps->version = arg.params.counter_info_version_out;
	caps->collect_privileged = !!arg.caps.perf_collect_privileged;
	caps->ga = !!(arg.caps.capability_mask & HV_GPCI_CM_GA);
	caps->expanded = !!(arg.caps.capability_mask & HV_GPCI_CM_EXPANDED);
	caps->lab = !!(arg.caps.capability_mask & HV_GPCI_CM_LAB);

	return r;
}
示例#18
0
/**
 * hvcs_free_connection - free the connection between a vty-server and vty
 * @unit_address: The unit address of the vty-server that is to have its
 *	connection severed.
 *
 * This function is used to free the partner connection between a vty-server
 * adapter and a vty adapter.
 *
 * If -EBUSY is returned continue to call this function until 0 is returned.
 */
int hvcs_free_connection(uint32_t unit_address)
{
	long retval;
	retval = plpar_hcall_norets(H_FREE_VTERM, unit_address);
	return hvcs_convert(retval);
}
示例#19
0
文件: hv-24x7.c 项目: Benguang/linux
			.starting_lpar_ix = cpu_to_be16(lpar),
			.max_num_lpars = cpu_to_be16(1),
			.starting_ix = cpu_to_be16(ix),
			.max_ix = cpu_to_be16(1),
		}
	};

	struct resb {
		struct hv_24x7_data_result_buffer buf;
		struct hv_24x7_result res;
		struct hv_24x7_result_element elem;
		__be64 result;
	} __packed __aligned(4096) result_buffer = {};

	ret = plpar_hcall_norets(H_GET_24X7_DATA,
			virt_to_phys(&request_buffer), sizeof(request_buffer),
			virt_to_phys(&result_buffer),  sizeof(result_buffer));

	if (ret) {
		if (success_expected)
			pr_err_ratelimited("hcall failed: %d %#x %#x %d => 0x%lx (%ld) detail=0x%x failing ix=%x\n",
					domain, offset, ix, lpar,
					ret, ret,
					result_buffer.buf.detailed_rc,
					result_buffer.buf.failing_request_ix);
		return ret;
	}

	*res = be64_to_cpu(result_buffer.result);
	return ret;
}