示例#1
0
文件: iommu.c 项目: ANFS/ANFS-kernel
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
				     long npages, unsigned long uaddr,
				     enum dma_data_direction direction,
				     struct dma_attrs *attrs)
{
	u64 rc = 0;
	u64 proto_tce;
	u64 *tcep;
	u64 rpn;
	long l, limit;
	long tcenum_start = tcenum, npages_start = npages;
	int ret = 0;

	if (npages == 1) {
		return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
		                           direction, attrs);
	}

	tcep = __get_cpu_var(tce_page);

	/* This is safe to do since interrupts are off when we're called
	 * from iommu_alloc{,_sg}()
	 */
	if (!tcep) {
		tcep = (u64 *)__get_free_page(GFP_ATOMIC);
		/* If allocation fails, fall back to the loop implementation */
		if (!tcep) {
			return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
					    direction, attrs);
		}
		__get_cpu_var(tce_page) = tcep;
	}

	rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
	proto_tce = TCE_PCI_READ;
	if (direction != DMA_TO_DEVICE)
		proto_tce |= TCE_PCI_WRITE;

	/* We can map max one pageful of TCEs at a time */
	do {
		/*
		 * Set up the page with TCE data, looping through and setting
		 * the values.
		 */
		limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE);

		for (l = 0; l < limit; l++) {
			tcep[l] = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
			rpn++;
		}

		rc = plpar_tce_put_indirect((u64)tbl->it_index,
					    (u64)tcenum << 12,
					    (u64)virt_to_abs(tcep),
					    limit);

		npages -= limit;
		tcenum += limit;
	} while (npages > 0 && !rc);

	if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
		ret = (int)rc;
		tce_freemulti_pSeriesLP(tbl, tcenum_start,
		                        (npages_start - (npages + limit)));
		return ret;
	}

	if (rc && printk_ratelimit()) {
		printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
		printk("\tindex   = 0x%llx\n", (u64)tbl->it_index);
		printk("\tnpages  = 0x%llx\n", (u64)npages);
		printk("\ttce[0] val = 0x%llx\n", tcep[0]);
		show_stack(current, (unsigned long *)__get_SP());
	}
	return ret;
}
static void rtas_flash_firmware(int reboot_type)
{
	unsigned long image_size;
	struct flash_block_list *f, *next, *flist;
	unsigned long rtas_block_list;
	int i, status, update_token;

	if (rtas_firmware_flash_list == NULL)
		return;		/* nothing to do */

	if (reboot_type != SYS_RESTART) {
		printk(KERN_ALERT "FLASH: firmware flash requires a reboot\n");
		printk(KERN_ALERT "FLASH: the firmware image will NOT be flashed\n");
		return;
	}

	update_token = rtas_token("ibm,update-flash-64-and-reboot");
	if (update_token == RTAS_UNKNOWN_SERVICE) {
		printk(KERN_ALERT "FLASH: ibm,update-flash-64-and-reboot "
		       "is not available -- not a service partition?\n");
		printk(KERN_ALERT "FLASH: firmware will not be flashed\n");
		return;
	}

	/*
	 * Just before starting the firmware flash, cancel the event scan work
	 * to avoid any soft lockup issues.
	 */
	rtas_cancel_event_scan();

	/*
	 * NOTE: the "first" block must be under 4GB, so we create
	 * an entry with no data blocks in the reserved buffer in
	 * the kernel data segment.
	 */
	spin_lock(&rtas_data_buf_lock);
	flist = (struct flash_block_list *)&rtas_data_buf[0];
	flist->num_blocks = 0;
	flist->next = rtas_firmware_flash_list;
	rtas_block_list = virt_to_abs(flist);
	if (rtas_block_list >= 4UL*1024*1024*1024) {
		printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n");
		spin_unlock(&rtas_data_buf_lock);
		return;
	}

	printk(KERN_ALERT "FLASH: preparing saved firmware image for flash\n");
	/* Update the block_list in place. */
	rtas_firmware_flash_list = NULL; /* too hard to backout on error */
	image_size = 0;
	for (f = flist; f; f = next) {
		/* Translate data addrs to absolute */
		for (i = 0; i < f->num_blocks; i++) {
			f->blocks[i].data = (char *)virt_to_abs(f->blocks[i].data);
			image_size += f->blocks[i].length;
		}
		next = f->next;
		/* Don't translate NULL pointer for last entry */
		if (f->next)
			f->next = (struct flash_block_list *)virt_to_abs(f->next);
		else
			f->next = NULL;
		/* make num_blocks into the version/length field */
		f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
	}

	printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);
	printk(KERN_ALERT "FLASH: performing flash and reboot\n");
	rtas_progress("Flashing        \n", 0x0);
	rtas_progress("Please Wait...  ", 0x0);
	printk(KERN_ALERT "FLASH: this will take several minutes.  Do not power off!\n");
	status = rtas_call(update_token, 1, 1, NULL, rtas_block_list);
	switch (status) {	/* should only get "bad" status */
	    case 0:
		printk(KERN_ALERT "FLASH: success\n");
		break;
	    case -1:
		printk(KERN_ALERT "FLASH: hardware error.  Firmware may not be not flashed\n");
		break;
	    case -3:
		printk(KERN_ALERT "FLASH: image is corrupt or not correct for this platform.  Firmware not flashed\n");
		break;
	    case -4:
		printk(KERN_ALERT "FLASH: flash failed when partially complete.  System may not reboot\n");
		break;
	    default:
		printk(KERN_ALERT "FLASH: unknown flash return code %d\n", status);
		break;
	}
	spin_unlock(&rtas_data_buf_lock);
}
示例#3
0
/*
 * init_qp_queues initializes/constructs r/squeue and registers queue pages.
 */
static inline int init_qp_queues(struct ehca_shca *shca,
                                 struct ehca_qp *my_qp,
                                 int nr_sq_pages,
                                 int nr_rq_pages,
                                 int swqe_size,
                                 int rwqe_size,
                                 int nr_send_sges, int nr_receive_sges)
{
    int ret, cnt, ipz_rc;
    void *vpage;
    u64 rpage, h_ret;
    struct ib_device *ib_dev = &shca->ib_device;
    struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle;

    ipz_rc = ipz_queue_ctor(&my_qp->ipz_squeue,
                            nr_sq_pages,
                            EHCA_PAGESIZE, swqe_size, nr_send_sges);
    if (!ipz_rc) {
        ehca_err(ib_dev,"Cannot allocate page for squeue. ipz_rc=%x",
                 ipz_rc);
        return -EBUSY;
    }

    ipz_rc = ipz_queue_ctor(&my_qp->ipz_rqueue,
                            nr_rq_pages,
                            EHCA_PAGESIZE, rwqe_size, nr_receive_sges);
    if (!ipz_rc) {
        ehca_err(ib_dev, "Cannot allocate page for rqueue. ipz_rc=%x",
                 ipz_rc);
        ret = -EBUSY;
        goto init_qp_queues0;
    }
    /* register SQ pages */
    for (cnt = 0; cnt < nr_sq_pages; cnt++) {
        vpage = ipz_qpageit_get_inc(&my_qp->ipz_squeue);
        if (!vpage) {
            ehca_err(ib_dev, "SQ ipz_qpageit_get_inc() "
                     "failed p_vpage= %p", vpage);
            ret = -EINVAL;
            goto init_qp_queues1;
        }
        rpage = virt_to_abs(vpage);

        h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
                                         my_qp->ipz_qp_handle,
                                         &my_qp->pf, 0, 0,
                                         rpage, 1,
                                         my_qp->galpas.kernel);
        if (h_ret < H_SUCCESS) {
            ehca_err(ib_dev, "SQ hipz_qp_register_rpage()"
                     " failed rc=%lx", h_ret);
            ret = ehca2ib_return_code(h_ret);
            goto init_qp_queues1;
        }
    }

    ipz_qeit_reset(&my_qp->ipz_squeue);

    /* register RQ pages */
    for (cnt = 0; cnt < nr_rq_pages; cnt++) {
        vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
        if (!vpage) {
            ehca_err(ib_dev, "RQ ipz_qpageit_get_inc() "
                     "failed p_vpage = %p", vpage);
            ret = -EINVAL;
            goto init_qp_queues1;
        }

        rpage = virt_to_abs(vpage);

        h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
                                         my_qp->ipz_qp_handle,
                                         &my_qp->pf, 0, 1,
                                         rpage, 1,my_qp->galpas.kernel);
        if (h_ret < H_SUCCESS) {
            ehca_err(ib_dev, "RQ hipz_qp_register_rpage() failed "
                     "rc=%lx", h_ret);
            ret = ehca2ib_return_code(h_ret);
            goto init_qp_queues1;
        }
        if (cnt == (nr_rq_pages - 1)) {	/* last page! */
            if (h_ret != H_SUCCESS) {
                ehca_err(ib_dev, "RQ hipz_qp_register_rpage() "
                         "h_ret= %lx ", h_ret);
                ret = ehca2ib_return_code(h_ret);
                goto init_qp_queues1;
            }
            vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
            if (vpage) {
                ehca_err(ib_dev, "ipz_qpageit_get_inc() "
                         "should not succeed vpage=%p", vpage);
                ret = -EINVAL;
                goto init_qp_queues1;
            }
        } else {
            if (h_ret != H_PAGE_REGISTERED) {
                ehca_err(ib_dev, "RQ hipz_qp_register_rpage() "
                         "h_ret= %lx ", h_ret);
                ret = ehca2ib_return_code(h_ret);
                goto init_qp_queues1;
            }
        }
    }

    ipz_qeit_reset(&my_qp->ipz_rqueue);

    return 0;

init_qp_queues1:
    ipz_queue_dtor(&my_qp->ipz_rqueue);
init_qp_queues0:
    ipz_queue_dtor(&my_qp->ipz_squeue);
    return ret;
}
示例#4
0
struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
			       const enum ehea_eq_type type,
			       const u32 max_nr_of_eqes, const u8 eqe_gen)
{
	int ret, i;
	u64 hret, rpage;
	void *vpage;
	struct ehea_eq *eq;

	eq = kzalloc(sizeof(*eq), GFP_KERNEL);
	if (!eq) {
		ehea_error("no mem for eq");
		return NULL;
	}

	eq->adapter = adapter;
	eq->attr.type = type;
	eq->attr.max_nr_of_eqes = max_nr_of_eqes;
	eq->attr.eqe_gen = eqe_gen;
	spin_lock_init(&eq->spinlock);

	hret = ehea_h_alloc_resource_eq(adapter->handle,
					&eq->attr, &eq->fw_handle);
	if (hret != H_SUCCESS) {
		ehea_error("alloc_resource_eq failed");
		goto out_freemem;
	}

	ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
			    EHEA_PAGESIZE, sizeof(struct ehea_eqe));
	if (ret) {
		ehea_error("can't allocate eq pages");
		goto out_freeres;
	}

	for (i = 0; i < eq->attr.nr_pages; i++) {
		vpage = hw_qpageit_get_inc(&eq->hw_queue);
		if (!vpage) {
			ehea_error("hw_qpageit_get_inc failed");
			hret = H_RESOURCE;
			goto out_kill_hwq;
		}

		rpage = virt_to_abs(vpage);

		hret = ehea_h_register_rpage(adapter->handle, 0,
					     EHEA_EQ_REGISTER_ORIG,
					     eq->fw_handle, rpage, 1);

		if (i == (eq->attr.nr_pages - 1)) {
			/* last page */
			vpage = hw_qpageit_get_inc(&eq->hw_queue);
			if ((hret != H_SUCCESS) || (vpage)) {
				goto out_kill_hwq;
			}
		} else {
			if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
				goto out_kill_hwq;
			}
		}
	}

	hw_qeit_reset(&eq->hw_queue);
	return eq;

out_kill_hwq:
	hw_queue_dtor(&eq->hw_queue);

out_freeres:
	ehea_h_free_resource(adapter->handle, eq->fw_handle, FORCE_FREE);

out_freemem:
	kfree(eq);
	return NULL;
}
示例#5
0
int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
{
	int ret;
	u64 *pt;
	void *pg;
	u64 hret, pt_abs, i, j, m, mr_len;
	u32 acc_ctrl = EHEA_MR_ACC_CTRL;

	mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE;

	pt =  kzalloc(EHEA_MAX_RPAGE * sizeof(u64), GFP_KERNEL);
	if (!pt) {
		ehea_error("no mem");
		ret = -ENOMEM;
		goto out;
	}
	pt_abs = virt_to_abs(pt);

	hret = ehea_h_alloc_resource_mr(adapter->handle,
					EHEA_BUSMAP_START, mr_len,
					acc_ctrl, adapter->pd,
					&mr->handle, &mr->lkey);
	if (hret != H_SUCCESS) {
		ehea_error("alloc_resource_mr failed");
		ret = -EIO;
		goto out;
	}

	for (i = 0 ; i < ehea_bmap.entries; i++)
		if (ehea_bmap.vaddr[i]) {
			void *sectbase = __va(i << SECTION_SIZE_BITS);
			unsigned long k = 0;

			for (j = 0; j < (PAGES_PER_SECTION / EHEA_MAX_RPAGE);
			      j++) {

				for (m = 0; m < EHEA_MAX_RPAGE; m++) {
					pg = sectbase + ((k++) * EHEA_PAGESIZE);
					pt[m] = virt_to_abs(pg);
				}

				hret = ehea_h_register_rpage_mr(adapter->handle,
								mr->handle,
								0, 0, pt_abs,
								EHEA_MAX_RPAGE);
				if ((hret != H_SUCCESS)
				    && (hret != H_PAGE_REGISTERED)) {
					ehea_h_free_resource(adapter->handle,
							     mr->handle,
							     FORCE_FREE);
					ehea_error("register_rpage_mr failed");
					ret = -EIO;
					goto out;
				}
			}
		}

	if (hret != H_SUCCESS) {
		ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
		ehea_error("registering mr failed");
		ret = -EIO;
		goto out;
	}

	mr->vaddr = EHEA_BUSMAP_START;
	mr->adapter = adapter;
	ret = 0;
out:
	kfree(pt);
	return ret;
}
示例#6
0
struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
			       int nr_of_cqe, u64 eq_handle, u32 cq_token)
{
	struct ehea_cq *cq;
	struct h_epa epa;
	u64 *cq_handle_ref, hret, rpage;
	u32 act_nr_of_entries, act_pages, counter;
	int ret;
	void *vpage;

	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
	if (!cq) {
		ehea_error("no mem for cq");
		goto out_nomem;
	}

	cq->attr.max_nr_of_cqes = nr_of_cqe;
	cq->attr.cq_token = cq_token;
	cq->attr.eq_handle = eq_handle;

	cq->adapter = adapter;

	cq_handle_ref = &cq->fw_handle;
	act_nr_of_entries = 0;
	act_pages = 0;

	hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
					&cq->fw_handle, &cq->epas);
	if (hret != H_SUCCESS) {
		ehea_error("alloc_resource_cq failed");
		goto out_freemem;
	}

	ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
			    EHEA_PAGESIZE, sizeof(struct ehea_cqe));
	if (ret)
		goto out_freeres;

	for (counter = 0; counter < cq->attr.nr_pages; counter++) {
		vpage = hw_qpageit_get_inc(&cq->hw_queue);
		if (!vpage) {
			ehea_error("hw_qpageit_get_inc failed");
			goto out_kill_hwq;
		}

		rpage = virt_to_abs(vpage);
		hret = ehea_h_register_rpage(adapter->handle,
					     0, EHEA_CQ_REGISTER_ORIG,
					     cq->fw_handle, rpage, 1);
		if (hret < H_SUCCESS) {
			ehea_error("register_rpage_cq failed ehea_cq=%p "
				   "hret=%lx counter=%i act_pages=%i",
				   cq, hret, counter, cq->attr.nr_pages);
			goto out_kill_hwq;
		}

		if (counter == (cq->attr.nr_pages - 1)) {
			vpage = hw_qpageit_get_inc(&cq->hw_queue);

			if ((hret != H_SUCCESS) || (vpage)) {
				ehea_error("registration of pages not "
					   "complete hret=%lx\n", hret);
				goto out_kill_hwq;
			}
		} else {
			if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
				ehea_error("CQ: registration of page failed "
					   "hret=%lx\n", hret);
				goto out_kill_hwq;
			}
		}
	}

	hw_qeit_reset(&cq->hw_queue);
	epa = cq->epas.kernel;
	ehea_reset_cq_ep(cq);
	ehea_reset_cq_n1(cq);

	return cq;

out_kill_hwq:
	hw_queue_dtor(&cq->hw_queue);

out_freeres:
	ehea_h_free_resource(adapter->handle, cq->fw_handle, FORCE_FREE);

out_freemem:
	kfree(cq);

out_nomem:
	return NULL;
}
示例#7
0
int ehca_create_eq(struct ehca_shca *shca,
		   struct ehca_eq *eq,
		   const enum ehca_eq_type type, const u32 length)
{
	int ret;
	u64 h_ret;
	u32 nr_pages;
	u32 i;
	void *vpage;
	struct ib_device *ib_dev = &shca->ib_device;

	spin_lock_init(&eq->spinlock);
	spin_lock_init(&eq->irq_spinlock);
	eq->is_initialized = 0;

	if (type != EHCA_EQ && type != EHCA_NEQ) {
		ehca_err(ib_dev, "Invalid EQ type %x. eq=%p", type, eq);
		return -EINVAL;
	}
	if (!length) {
		ehca_err(ib_dev, "EQ length must not be zero. eq=%p", eq);
		return -EINVAL;
	}

	h_ret = hipz_h_alloc_resource_eq(shca->ipz_hca_handle,
					 &eq->pf,
					 type,
					 length,
					 &eq->ipz_eq_handle,
					 &eq->length,
					 &nr_pages, &eq->ist);

	if (h_ret != H_SUCCESS) {
		ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq);
		return -EINVAL;
	}

	ret = ipz_queue_ctor(NULL, &eq->ipz_queue, nr_pages,
			     EHCA_PAGESIZE, sizeof(struct ehca_eqe), 0, 0);
	if (!ret) {
		ehca_err(ib_dev, "Can't allocate EQ pages eq=%p", eq);
		goto create_eq_exit1;
	}

	for (i = 0; i < nr_pages; i++) {
		u64 rpage;

		vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
		if (!vpage)
			goto create_eq_exit2;

		rpage = virt_to_abs(vpage);
		h_ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle,
						 eq->ipz_eq_handle,
						 &eq->pf,
						 0, 0, rpage, 1);

		if (i == (nr_pages - 1)) {
			/* last page */
			vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
			if (h_ret != H_SUCCESS || vpage)
				goto create_eq_exit2;
		} else {
			if (h_ret != H_PAGE_REGISTERED)
				goto create_eq_exit2;
		}
	}

	ipz_qeit_reset(&eq->ipz_queue);

	/* register interrupt handlers and initialize work queues */
	if (type == EHCA_EQ) {
		ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq,
					  IRQF_DISABLED, "ehca_eq",
					  (void *)shca);
		if (ret < 0)
			ehca_err(ib_dev, "Can't map interrupt handler.");

		tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
	} else if (type == EHCA_NEQ) {
		ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq,
					  IRQF_DISABLED, "ehca_neq",
					  (void *)shca);
		if (ret < 0)
			ehca_err(ib_dev, "Can't map interrupt handler.");

		tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca);
	}

	eq->is_initialized = 1;

	return 0;

create_eq_exit2:
	ipz_queue_dtor(NULL, &eq->ipz_queue);

create_eq_exit1:
	hipz_h_destroy_eq(shca->ipz_hca_handle, eq);

	return -EINVAL;
}
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
                                    long npages, unsigned long uaddr,
                                    enum dma_data_direction direction,
                                    struct dma_attrs *attrs)
{
    u64 rc = 0;
    u64 proto_tce;
    u64 *tcep;
    u64 rpn;
    long l, limit;
    long tcenum_start = tcenum, npages_start = npages;
    int ret = 0;

    if (npages == 1) {
        return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
                                   direction, attrs);
    }

    tcep = __get_cpu_var(tce_page);

    if (!tcep) {
        tcep = (u64 *)__get_free_page(GFP_ATOMIC);

        if (!tcep) {
            return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
                                       direction, attrs);
        }
        __get_cpu_var(tce_page) = tcep;
    }

    rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
    proto_tce = TCE_PCI_READ;
    if (direction != DMA_TO_DEVICE)
        proto_tce |= TCE_PCI_WRITE;


    do {
        limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE);

        for (l = 0; l < limit; l++) {
            tcep[l] = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
            rpn++;
        }

        rc = plpar_tce_put_indirect((u64)tbl->it_index,
                                    (u64)tcenum << 12,
                                    (u64)virt_to_abs(tcep),
                                    limit);

        npages -= limit;
        tcenum += limit;
    } while (npages > 0 && !rc);

    if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
        ret = (int)rc;
        tce_freemulti_pSeriesLP(tbl, tcenum_start,
                                (npages_start - (npages + limit)));
        return ret;
    }

    if (rc && printk_ratelimit()) {
        printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
        printk("\tindex   = 0x%llx\n", (u64)tbl->it_index);
        printk("\tnpages  = 0x%llx\n", (u64)npages);
        printk("\ttce[0] val = 0x%llx\n", tcep[0]);
        show_stack(current, (unsigned long *)__get_SP());
    }
    return ret;
}
示例#9
0
static void
rtas_flash_firmware(void)
{
	unsigned long image_size;
	struct flash_block_list *f, *next, *flist;
	unsigned long rtas_block_list;
	int i, status, update_token;

	update_token = rtas_token("ibm,update-flash-64-and-reboot");
	if (update_token == RTAS_UNKNOWN_SERVICE) {
		printk(KERN_ALERT "FLASH: ibm,update-flash-64-and-reboot is not available -- not a service partition?\n");
		printk(KERN_ALERT "FLASH: firmware will not be flashed\n");
		return;
	}

	/* NOTE: the "first" block list is a global var with no data
	 * blocks in the kernel data segment.  We do this because
	 * we want to ensure this block_list addr is under 4GB.
	 */
	rtas_firmware_flash_list.num_blocks = 0;
	flist = (struct flash_block_list *)&rtas_firmware_flash_list;
	rtas_block_list = virt_to_abs(flist);
	if (rtas_block_list >= 4UL*1024*1024*1024) {
		printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n");
		return;
	}

	printk(KERN_ALERT "FLASH: preparing saved firmware image for flash\n");
	/* Update the block_list in place. */
	image_size = 0;
	for (f = flist; f; f = next) {
		/* Translate data addrs to absolute */
		for (i = 0; i < f->num_blocks; i++) {
			f->blocks[i].data = (char *)virt_to_abs(f->blocks[i].data);
			image_size += f->blocks[i].length;
		}
		next = f->next;
		/* Don't translate NULL pointer for last entry */
		if (f->next)
			f->next = (struct flash_block_list *)virt_to_abs(f->next);
		else
			f->next = NULL;
		/* make num_blocks into the version/length field */
		f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
	}

	printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);
	printk(KERN_ALERT "FLASH: performing flash and reboot\n");
	ppc_md.progress("Flashing        \n", 0x0);
	ppc_md.progress("Please Wait...  ", 0x0);
	printk(KERN_ALERT "FLASH: this will take several minutes.  Do not power off!\n");
	status = rtas_call(update_token, 1, 1, NULL, rtas_block_list);
	switch (status) {	/* should only get "bad" status */
	    case 0:
		printk(KERN_ALERT "FLASH: success\n");
		break;
	    case -1:
		printk(KERN_ALERT "FLASH: hardware error.  Firmware may not be not flashed\n");
		break;
	    case -3:
		printk(KERN_ALERT "FLASH: image is corrupt or not correct for this platform.  Firmware not flashed\n");
		break;
	    case -4:
		printk(KERN_ALERT "FLASH: flash failed when partially complete.  System may not reboot\n");
		break;
	    default:
		printk(KERN_ALERT "FLASH: unknown flash return code %d\n", status);
		break;
	}
}
示例#10
0
文件: iommu.c 项目: 1x23/unifi-gpl
static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
				     long npages, unsigned long uaddr,
				     enum dma_data_direction direction)
{
	u64 rc;
	union tce_entry tce, *tcep;
	long l, limit;

	if (TCE_PAGE_FACTOR == 0 && npages == 1)
		return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
					   direction);

	tcep = __get_cpu_var(tce_page);

	/* This is safe to do since interrupts are off when we're called
	 * from iommu_alloc{,_sg}()
	 */
	if (!tcep) {
		tcep = (void *)__get_free_page(GFP_ATOMIC);
		/* If allocation fails, fall back to the loop implementation */
		if (!tcep)
			return tce_build_pSeriesLP(tbl, tcenum, npages,
						   uaddr, direction);
		__get_cpu_var(tce_page) = tcep;
	}

	tcenum <<= TCE_PAGE_FACTOR;
	npages <<= TCE_PAGE_FACTOR;

	tce.te_word = 0;
	tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
	tce.te_rdwr = 1;
	if (direction != DMA_TO_DEVICE)
		tce.te_pciwr = 1;

	/* We can map max one pageful of TCEs at a time */
	do {
		/*
		 * Set up the page with TCE data, looping through and setting
		 * the values.
		 */
		limit = min_t(long, npages, 4096/sizeof(union tce_entry));

		for (l = 0; l < limit; l++) {
			tcep[l] = tce;
			tce.te_rpn++;
		}

		rc = plpar_tce_put_indirect((u64)tbl->it_index,
					    (u64)tcenum << 12,
					    (u64)virt_to_abs(tcep),
					    limit);

		npages -= limit;
		tcenum += limit;
	} while (npages > 0 && !rc);

	if (rc && printk_ratelimit()) {
		printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc);
		printk("\tindex   = 0x%lx\n", (u64)tbl->it_index);
		printk("\tnpages  = 0x%lx\n", (u64)npages);
		printk("\ttce[0] val = 0x%lx\n", tcep[0].te_word);
		show_stack(current, (unsigned long *)__get_SP());
	}
}