Esempio n. 1
0
static int64_t opb_write(struct proc_chip *chip, uint32_t addr, uint32_t data,
			 uint32_t sz)
{
	uint64_t ctl = ECCB_CTL_MAGIC, stat;
	int64_t rc, tout;
	uint64_t data_reg;

	switch(sz) {
	case 1:
		data_reg = ((uint64_t)data) << 56;
		break;
	case 2:
		data_reg = ((uint64_t)data) << 48;
		break;
	case 4:
		data_reg = ((uint64_t)data) << 32;
		break;
	default:
		prerror("LPC: Invalid data size %d\n", sz);
		return OPAL_PARAMETER;
	}

	rc = xscom_write(chip->id, chip->lpc_xbase + ECCB_DATA, data_reg);
	if (rc) {
		log_simple_error(&e_info(OPAL_RC_LPC_WRITE),
			"LPC: XSCOM write to ECCB DATA error %lld\n", rc);
		return rc;
	}

	ctl = SETFIELD(ECCB_CTL_DATASZ, ctl, sz);
	ctl = SETFIELD(ECCB_CTL_ADDRLEN, ctl, ECCB_ADDRLEN_4B);
	ctl = SETFIELD(ECCB_CTL_ADDR, ctl, addr);
	rc = xscom_write(chip->id, chip->lpc_xbase + ECCB_CTL, ctl);
	if (rc) {
		log_simple_error(&e_info(OPAL_RC_LPC_WRITE),
			"LPC: XSCOM write to ECCB CTL error %lld\n", rc);
		return rc;
	}

	for (tout = 0; tout < ECCB_TIMEOUT; tout++) {
		rc = xscom_read(chip->id, chip->lpc_xbase + ECCB_STAT, &stat);
		if (rc) {
			log_simple_error(&e_info(OPAL_RC_LPC_WRITE),
				"LPC: XSCOM read from ECCB STAT err %lld\n",
									rc);
			return rc;
		}
		if (stat & ECCB_STAT_OP_DONE) {
			if (stat & ECCB_STAT_ERR_MASK) {
				log_simple_error(&e_info(OPAL_RC_LPC_WRITE),
					"LPC: Error status: 0x%llx\n", stat);
				return OPAL_HARDWARE;
			}
			return OPAL_SUCCESS;
		}
		time_wait(100);
	}
	log_simple_error(&e_info(OPAL_RC_LPC_WRITE), "LPC: Write timeout !\n");
	return OPAL_HARDWARE;
}
Esempio n. 2
0
/*
 * We use the indirect method because it uses the same addresses as
 * the MMIO offsets (NPU RING)
 */
static void npu2_scom_set_addr(uint64_t gcid, uint64_t scom_base,
			       uint64_t addr, uint64_t size)
{
	addr = SETFIELD(NPU2_MISC_DA_ADDR, 0ull, addr);
	addr = SETFIELD(NPU2_MISC_DA_LEN, addr, size);
	xscom_write(gcid, scom_base + NPU2_MISC_SCOM_IND_SCOM_ADDR, addr);
}
void gcSetStretchFactor(UINT32 HorFactor, UINT32 VerFactor)
{
	gcLoadState(AQDEStretchFactorLowRegAddrs, 2,
		// AQDEClipTopLeft.
		SETFIELD(0, AQDE_STRETCH_FACTOR_LOW, X, HorFactor),
		// AQDEClipBottomRight.
		SETFIELD(0, AQDE_STRETCH_FACTOR_HIGH, Y, VerFactor));
}
Esempio n. 4
0
static JET_ERR VTRetrieveIndexinfoColumn(ISAM_VTDef *pVTDef, JET_COLUMNID columnid, 
									   PVOID pvData, ULONG cbData, ULONG *pcbActual)
{
	ISAM_IDXListData *pData = (ISAM_IDXListData*)pVTDef->data.pCurData;
	PVOID pRet;
	ULONG cbRet;

	if (pcbActual)  *pcbActual = 0;
	else ZeroMemory(pvData, cbData);
	switch (columnid)
	{
	case 0:
		pRet = pData->szIdxName;
		cbRet = wcslen(pData->szIdxName) * sizeof(WCHAR) + sizeof(WCHAR);
		break;
	case 1:
		SETFIELD(pData->grbit);
		break;
	case 2:
		SETFIELD(pData->cKey);
		break;
	case 3:
		SETFIELD(pData->cEntry);
		break;
	case 4:
		SETFIELD(pData->cPage);
		break;
	case 5:
		SETFIELD(pData->cColumn);
		break;
	case 6:
		SETFIELD(pData->iColumn);
		break;
	case 7:
		SETFIELD(pData->columnid);
		break;
	case 8:
		SETFIELD(pData->coltyp);
		break;
	case 9:
		SETFIELD(pData->Langid);
		break;
	case 10:
		SETFIELD(pData->grbitCol);
		break;
	case 11:
		pRet = pData->szColName;
		cbRet = wcslen(pData->szColName) * sizeof(WCHAR) + sizeof(WCHAR);
		break;
	default:
		return JET_errBadColumnId;
	}
	if (pcbActual)
		*pcbActual = cbRet>=cbData?cbData:cbRet;
	return AssignResult(pRet, cbRet, pvData, cbData);
}
Esempio n. 5
0
static int64_t opb_read(struct lpcm *lpc, uint32_t addr, uint32_t *data,
		        uint32_t sz)
{
	uint64_t ctl = ECCB_CTL_MAGIC | ECCB_CTL_READ, stat;
	int64_t rc, tout;

	if (lpc->mbase)
		return opb_mmio_read(lpc, addr, data, sz);

	if (sz != 1 && sz != 2 && sz != 4) {
		prerror("Invalid data size %d\n", sz);
		return OPAL_PARAMETER;
	}

	ctl = SETFIELD(ECCB_CTL_DATASZ, ctl, sz);
	ctl = SETFIELD(ECCB_CTL_ADDRLEN, ctl, ECCB_ADDRLEN_4B);
	ctl = SETFIELD(ECCB_CTL_ADDR, ctl, addr);
	rc = xscom_write(lpc->chip_id, lpc->xbase + ECCB_CTL, ctl);
	if (rc) {
		log_simple_error(&e_info(OPAL_RC_LPC_READ),
			"LPC: XSCOM write to ECCB CTL error %lld\n", rc);
		return rc;
	}

	for (tout = 0; tout < ECCB_TIMEOUT; tout++) {
		rc = xscom_read(lpc->chip_id, lpc->xbase + ECCB_STAT,
				&stat);
		if (rc) {
			log_simple_error(&e_info(OPAL_RC_LPC_READ),
				"LPC: XSCOM read from ECCB STAT err %lld\n",
									rc);
			return rc;
		}
		if (stat & ECCB_STAT_OP_DONE) {
			uint32_t rdata = GETFIELD(ECCB_STAT_RD_DATA, stat);
			if (stat & ECCB_STAT_ERR_MASK) {
				log_simple_error(&e_info(OPAL_RC_LPC_READ),
					"LPC: Error status: 0x%llx\n", stat);
				return OPAL_HARDWARE;
			}
			switch(sz) {
			case 1:
				*data = rdata >> 24;
				break;
			case 2:
				*data = rdata >> 16;
				break;
			default:
				*data = rdata;
				break;
			}
			return 0;
		}
		time_wait_nopoll(100);
	}
void gcSetVideoSource(gcSURFACEINFO* Surface, gcRECT* SrcRect, gcPOINT* Origin)
{
	gcLoadState(AQDESrcAddressRegAddrs, 2,
		// AQDESrcAddress.
		Surface->address,
		// AQDESrcStride.
		Surface->stride);

	gcLoadState(AQDESrcConfigRegAddrs, 1,
		SETFIELD(0, AQDE_SRC_CONFIG, FORMAT, Surface->format));

	gcLoadState(UPlaneAddressRegAddrs, 4,
		// UPlaneAddress
		Surface->addressU,
		// UPlaneStride
		Surface->strideU,
		// VPlaneAddress
		Surface->addressV,
		// VPlaneStride
		Surface->strideV);

	gcLoadState(AQVRSourceImageLowRegAddrs, 4,
		// AQVRSourceImageLow
		SETFIELD(0, AQVR_SOURCE_IMAGE_LOW, LEFT, SrcRect->left)
		| SETFIELD(0, AQVR_SOURCE_IMAGE_LOW, TOP, SrcRect->top),
		// AQVRSourceImageHigh
		SETFIELD(0, AQVR_SOURCE_IMAGE_HIGH, RIGHT, SrcRect->right)
		| SETFIELD(0, AQVR_SOURCE_IMAGE_HIGH, BOTTOM, SrcRect->bottom),
		// AQVRSourceOriginLow
		SETFIELD(0, AQVR_SOURCE_ORIGIN_LOW, X, Origin->x),
		// AQVRSourceOriginHigh
		SETFIELD(0, AQVR_SOURCE_ORIGIN_HIGH, Y, Origin->y));
}
void gcSetTargetFetch(UINT32 Default)
{
	UINT32 fetchOverride = Default
		? AQPE_CONFIG_DESTINATION_FETCH_DEFAULT
		: AQPE_CONFIG_DESTINATION_FETCH_DISABLE;

	gcLoadState(AQPEConfigRegAddrs, 1,
		SETFIELDVALUE(~0, AQPE_CONFIG, MASK_DESTINATION_FETCH, ENABLED) &
		SETFIELD     (~0, AQPE_CONFIG,      DESTINATION_FETCH, fetchOverride));
}
void gcStartVR(UINT32 Horizontal)
{
	UINT32 blitType = Horizontal
		? AQVR_CONFIG_START_HORIZONTAL_BLIT
		: AQVR_CONFIG_START_VERTICAL_BLIT;

	gcLoadState(AQVRConfigRegAddrs, 1,
		SETFIELDVALUE(~0, AQVR_CONFIG, MASK_START, ENABLED) &
		SETFIELD     (~0, AQVR_CONFIG,      START, blitType));
}
void gcSetVideoTarget(gcSURFACEINFO* Surface, gcRECT* TrgRect)
{
	gcLoadState(AQDEDestAddressRegAddrs, 2,
		// AQDEDestAddress.
		Surface->address,
		// AQDEDestStride.
		Surface->stride);

	gcLoadState(AQDEDestConfigRegAddrs, 1,
		// AQDEDestConfig.
		SETFIELD(0, AQDE_DEST_CONFIG, FORMAT, Surface->format));

	gcLoadState(AQVRTargetWindowLowRegAddrs, 2,
		// AQVRTargetWindowLow
		SETFIELD(0, AQVR_TARGET_WINDOW_LOW, LEFT, TrgRect->left)
		| SETFIELD(0, AQVR_TARGET_WINDOW_LOW, TOP, TrgRect->top),
		// AQVRTargetWindowHigh
		SETFIELD(0, AQVR_TARGET_WINDOW_HIGH, RIGHT, TrgRect->right)
		| SETFIELD(0, AQVR_TARGET_WINDOW_HIGH, BOTTOM, TrgRect->bottom));
}
Esempio n. 10
0
static JET_ERR VTRetrieveObjinfoColumn(ISAM_VTDef *pVTDef, JET_COLUMNID columnid, 
									   PVOID pvData, ULONG cbData, ULONG *pcbActual)
{
	ISAM_OBJListData *pData = (ISAM_OBJListData *)pVTDef->data.pCurData;
	PVOID pRet;
	ULONG cbRet;

	if (pcbActual)  *pcbActual = 0;
	else ZeroMemory(pvData, cbData);
	switch (columnid)
	{
	case 0:
		pRet = L"Tables";
		cbRet = sizeof(L"Tables")-sizeof(WCHAR);
		break;
	case 1:
		pRet = pData->szObjectName;
		cbRet = wcslen(pData->szObjectName) * sizeof(WCHAR) + sizeof(WCHAR);
		break;
	case 2:
		SETFIELD(pData->objInfo.objtyp);
		break;
	case 3:
		SETFIELD(pData->objInfo.dtCreate);
		break;
	case 4:
		SETFIELD(pData->objInfo.dtUpdate);
		break;
	case 5:
		SETFIELD(pData->objInfo.cRecord);
		break;
	case 6:
		SETFIELD(pData->objInfo.cPage);
		break;
	case 7:
		SETFIELD(pData->objInfo.grbit);
		break;
	case 8:
		SETFIELD(pData->objInfo.flags);
		break;
	default:
		return JET_errBadColumnId;
	}
	if (pcbActual)
		*pcbActual = cbRet>=cbData?cbData:cbRet;
	return AssignResult(pRet, cbRet, pvData, cbData);
}
Esempio n. 11
0
static int nx_cfg_umac_status_ctrl(u32 gcid, u64 xcfg)
{
	u64 uctrl;
	int rc;
#define CRB_ENABLE	1

	rc = xscom_read(gcid, xcfg, &uctrl);
	if (rc)
		return rc;

	uctrl = SETFIELD(NX_P9_UMAC_STATUS_CTRL_CRB_ENABLE, uctrl, CRB_ENABLE);
	rc = xscom_write(gcid, xcfg, uctrl);
	if (rc)
		prerror("NX%d: ERROR: Setting UMAC Status Control failure %d\n",
			gcid, rc);
	else
		prlog(PR_DEBUG, "NX%d: Setting UMAC Status Control 0x%016lx\n",
			gcid, (unsigned long)uctrl);

	return rc;
}
Esempio n. 12
0
static JET_ERR VTRetrieveColinfoColumn(ISAM_VTDef *pVTDef, JET_COLUMNID columnid, 
									   PVOID pvData, ULONG cbData, ULONG *pcbActual)
{
	ISAM_COLListData *pData = (ISAM_COLListData*)pVTDef->data.pCurData;
	PVOID pRet;
	ULONG cbRet;
	unsigned short cp;
	
	if (pcbActual)  *pcbActual = 0;
	else ZeroMemory(pvData, cbData);
	switch (columnid)
	{
	case 0:
	case 2:
		SETFIELD(pData->colDef.columnid);
		break;
	case 1:
	case 9:
		pRet = pData->colDef.szBaseColumnName;
		cbRet = wcslen(pData->colDef.szBaseColumnName) * sizeof(WCHAR) + sizeof(WCHAR);
		break;
	case 3:
		SETFIELD(pData->colDef.coltyp);
		break;
	case 4:
		SETFIELD(pData->colDef.langid);
		break;
	case 5:
		SETFIELD(pData->colDef.cbMax);
		break;
	case 6:
		SETFIELD(pData->colDef.grbit);
		break;
	case 7:
		cp = 1004;
		SETFIELD(cp);
		break;
	case 8:
		pRet = pData->colDef.szBaseTableName;
		cbRet = wcslen(pData->colDef.szBaseTableName) * sizeof(WCHAR) + sizeof(WCHAR);
		break;
	default:
		return JET_errBadColumnId;
	}
	if (pcbActual)
		*pcbActual = cbRet>=cbData?cbData:cbRet;
	return AssignResult(pRet, cbRet, pvData, cbData);
}
Esempio n. 13
0
int nx_cfg_rx_fifo(struct dt_node *node, const char *compat,
			const char *priority, u32 gcid, u32 pid, u32 tid,
			u64 umac_bar, u64 umac_notify)
{
	u64 cfg;
	int rc, size;
	uint64_t fifo;
	u32 lpid = 0xfff; /* All 1's for 12 bits in UMAC notify match reg */
#define MATCH_ENABLE    1

	fifo = (uint64_t) local_alloc(gcid, RX_FIFO_SIZE, RX_FIFO_SIZE);
	assert(fifo);

	/*
	 * When configuring the address of the Rx FIFO into the Receive FIFO
	 * BAR, we should _NOT_ shift the address into bits 8:53. Instead we
	 * should copy the address as is and VAS/NX will extract relevant bits.
	 */
	/*
	 * Section 5.21 of P9 NX Workbook Version 2.42 shows Receive FIFO BAR
	 * 54:56 represents FIFO size
	 * 000 = 1KB, 8 CRBs
	 * 001 = 2KB, 16 CRBs
	 * 010 = 4KB, 32 CRBs
	 * 011 = 8KB, 64 CRBs
	 * 100 = 16KB, 128 CRBs
	 * 101 = 32KB, 256 CRBs
	 * 110 = 111 reserved
	 */
	size = RX_FIFO_SIZE / 1024;
	cfg = SETFIELD(NX_P9_RX_FIFO_BAR_SIZE, fifo, ilog2(size));

	rc = xscom_write(gcid, umac_bar, cfg);
	if (rc) {
		prerror("NX%d: ERROR: Setting UMAC FIFO bar failure %d\n",
			gcid, rc);
		return rc;
	} else
		prlog(PR_DEBUG, "NX%d: Setting UMAC FIFO bar 0x%016lx\n",
			gcid, (unsigned long)cfg);

	rc = xscom_read(gcid, umac_notify, &cfg);
	if (rc)
		return rc;

	/*
	 * VAS issues asb_notify with the unique ID to identify the target
	 * co-processor/engine. Logical partition ID (lpid), process ID (pid),
	 * and thread ID (tid) combination is used to define the unique ID
	 * in the system. Export these values in device-tree such that the
	 * driver configure RxFIFO with VAS. Set these values in RxFIFO notify
	 * match register for each engine which compares the ID with each
	 * request.
	 * To define unique indentification, 0xfff (1's for 12 bits),
	 * co-processor type, and counter within coprocessor type are used
	 * for lpid, pid, and tid respectively.
	 */
	cfg = SETFIELD(NX_P9_RX_FIFO_NOTIFY_MATCH_LPID, cfg, lpid);
	cfg = SETFIELD(NX_P9_RX_FIFO_NOTIFY_MATCH_PID, cfg, pid);
	cfg = SETFIELD(NX_P9_RX_FIFO_NOTIFY_MATCH_TID, cfg, tid);
	cfg = SETFIELD(NX_P9_RX_FIFO_NOTIFY_MATCH_MATCH_ENABLE, cfg,
			MATCH_ENABLE);

	rc = xscom_write(gcid, umac_notify, cfg);
	if (rc) {
		prerror("NX%d: ERROR: Setting UMAC notify match failure %d\n",
			gcid, rc);
		return rc;
	} else
		prlog(PR_DEBUG, "NX%d: Setting UMAC notify match 0x%016lx\n",
				gcid, (unsigned long)cfg);

	dt_add_property_string(node, "compatible", compat);
	dt_add_property_string(node, "priority", priority);
	dt_add_property_u64(node, "rx-fifo-address", fifo);
	dt_add_property_cells(node, "rx-fifo-size", RX_FIFO_SIZE);
	dt_add_property_cells(node, "lpid", lpid);
	dt_add_property_cells(node, "pid", pid);
	dt_add_property_cells(node, "tid", tid);

	return 0;
}
Esempio n. 14
0
int cmdbuf_flush(void)
{
	int ret;
	u32 *buffer;
	u32 base, physical;
	u32 count;

#if ENABLE_POLLING
	u32 retry;
#endif

	ret = cmdbuf_alloc(4 * sizeof(u32), &buffer, &physical);
	if (ret != 0)
		goto fail;

	/* Append EVENT(Event, destination). */
	buffer[0]
		= SETFIELDVAL(0, AQ_COMMAND_LOAD_STATE_COMMAND, OPCODE,
								 LOAD_STATE)
		| SETFIELD(0, AQ_COMMAND_LOAD_STATE_COMMAND, ADDRESS,
								AQEventRegAddrs)
		| SETFIELD(0, AQ_COMMAND_LOAD_STATE_COMMAND, COUNT, 1);

	buffer[1]
		= SETFIELDVAL(0, AQ_EVENT, PE_SRC, ENABLE)
		| SETFIELD(0, AQ_EVENT, EVENT_ID, 16);

	/* Stop FE. */
	buffer[2]
		= SETFIELDVAL(0, AQ_COMMAND_END_COMMAND, OPCODE, END);

#if ENABLE_CMD_DEBUG
	/* Dump command buffer. */
	cmdbuf_dump();
#endif

	/* Determine the command buffer base address. */
	base = cmdbuf.mapped ? cmdbuf.mapped_physical : cmdbuf.page.physical;

	/* Compute the data count. */
	count = (cmdbuf.data_size + 7) >> 3;

#if ENABLE_POLLING
	int_data = 0;
#endif

	CMDBUFPRINT("starting DMA at 0x%08X with count of %d\n", base, count);

#if ENABLE_CMD_DEBUG || ENABLE_GPU_COUNTERS
	/* Reset hardware counters. */
	hw_write_reg(GC_RESET_MEM_COUNTERS_Address, 1);
#endif

	/* Enable all events. */
	hw_write_reg(AQ_INTR_ENBL_Address, ~0U);

	/* Write address register. */
	hw_write_reg(AQ_CMD_BUFFER_ADDR_Address, base);

	/* Write control register. */
	hw_write_reg(AQ_CMD_BUFFER_CTRL_Address,
		SETFIELDVAL(0, AQ_CMD_BUFFER_CTRL, ENABLE, ENABLE) |
		SETFIELD(0, AQ_CMD_BUFFER_CTRL, PREFETCH, count)
		);

	/* Wait for the interrupt. */
#if ENABLE_POLLING
	retry = 0;
	while (1) {
		if (int_data != 0)
			break;

		msleep(500);
		retry += 1;

		if ((retry % 5) == 0)
			gpu_status((char *) __func__, __LINE__, 0);
	}
#else
	wait_event_interruptible(gc_event, done == true);
#endif

#if ENABLE_CMD_DEBUG
	gpu_status((char *) __func__, __LINE__, 0);
#endif

	/* Reset the buffer. */
	cmdbuf.logical  = cmdbuf.page.logical;
	cmdbuf.physical = base;

	cmdbuf.available = cmdbuf.page.size;
	cmdbuf.data_size = 0;

fail:
	return ret;
}
Esempio n. 15
0
enum gcerror mmu2d_create_context(struct mmu2dcontext *ctxt)
{
	enum gcerror gcerror;

#if MMU_ENABLE
	int i;
#endif

	struct mmu2dprivate *mmu = get_mmu();

	if (ctxt == NULL)
		return GCERR_MMU_CTXT_BAD;

	memset(ctxt, 0, sizeof(struct mmu2dcontext));

#if MMU_ENABLE
	/* Allocate MTLB table. */
	gcerror = gc_alloc_pages(&ctxt->master, MMU_MTLB_SIZE);
	if (gcerror != GCERR_NONE) {
		gcerror = GCERR_SETGRP(gcerror, GCERR_MMU_MTLB_ALLOC);
		goto fail;
	}

	/* Allocate an array of pointers to slave descriptors. */
	ctxt->slave = kmalloc(MMU_MTLB_SIZE, GFP_KERNEL);
	if (ctxt->slave == NULL) {
		gcerror = GCERR_SETGRP(GCERR_OODM, GCERR_MMU_STLBIDX_ALLOC);
		goto fail;
	}
	memset(ctxt->slave, 0, MMU_MTLB_SIZE);

	/* Invalidate all entries. */
	for (i = 0; i < MMU_MTLB_ENTRY_NUM; i += 1)
		ctxt->master.logical[i] = MMU_MTLB_ENTRY_VACANT;

	/* Configure the physical address. */
	ctxt->physical
	= SETFIELD(~0U, GCREG_MMU_CONFIGURATION, ADDRESS,
	  (ctxt->master.physical >> GCREG_MMU_CONFIGURATION_ADDRESS_Start))
	& SETFIELDVAL(~0U, GCREG_MMU_CONFIGURATION, MASK_ADDRESS, ENABLED)
	& SETFIELD(~0U, GCREG_MMU_CONFIGURATION, MODE, MMU_MTLB_MODE)
	& SETFIELDVAL(~0U, GCREG_MMU_CONFIGURATION, MASK_MODE, ENABLED);
#endif

	/* Allocate the first vacant arena. */
	gcerror = mmu2d_get_arena(mmu, &ctxt->vacant);
	if (gcerror != GCERR_NONE)
		goto fail;

	/* Everything is vacant. */
	ctxt->vacant->mtlb  = 0;
	ctxt->vacant->stlb  = 0;
	ctxt->vacant->count = MMU_MTLB_ENTRY_NUM * MMU_STLB_ENTRY_NUM;
	ctxt->vacant->next  = NULL;

	/* Nothing is allocated. */
	ctxt->allocated = NULL;

#if MMU_ENABLE
	/* Allocate the safe zone. */
	if (mmu->safezone.size == 0) {
		gcerror = gc_alloc_pages(&mmu->safezone,
						MMU_SAFE_ZONE_SIZE);
		if (gcerror != GCERR_NONE) {
			gcerror = GCERR_SETGRP(gcerror,
						GCERR_MMU_SAFE_ALLOC);
			goto fail;
		}

		/* Initialize safe zone to a value. */
		for (i = 0; i < MMU_SAFE_ZONE_SIZE / sizeof(u32); i += 1)
			mmu->safezone.logical[i] = 0xDEADC0DE;
	}
#endif

	/* Reference MMU. */
	mmu->refcount += 1;
	ctxt->mmu = mmu;

	return GCERR_NONE;

fail:
#if MMU_ENABLE
	gc_free_pages(&ctxt->master);
	if (ctxt->slave != NULL)
		kfree(ctxt->slave);
#endif

	return gcerror;
}
Esempio n. 16
0
/* Return size bytes of memory in *output. *output must point to an
 * array large enough to hold size bytes. */
int adu_getmem(struct target *target, uint64_t start_addr, uint8_t *output, uint64_t size)
{
	int rc = 0;
	uint64_t addr, cmd_reg, ctrl_reg, val;

	CHECK_ERR(adu_lock(target));

	ctrl_reg = TTYPE_TREAD;
	ctrl_reg = SETFIELD(FBC_ALTD_TTYPE, ctrl_reg, TTYPE_DMA_PARTIAL_READ);
	ctrl_reg = SETFIELD(FBC_ALTD_TSIZE, ctrl_reg, 8);

	CHECK_ERR(read_target(target, ALTD_CMD_REG, &cmd_reg));
	cmd_reg |= FBC_ALTD_START_OP;
	cmd_reg = SETFIELD(FBC_ALTD_SCOPE, cmd_reg, SCOPE_SYSTEM);
	cmd_reg = SETFIELD(FBC_ALTD_DROP_PRIORITY, cmd_reg, DROP_PRIORITY_MEDIUM);

	/* We read data in 8-byte aligned chunks */
	for (addr = 8*(start_addr / 8); addr < start_addr + size; addr += 8) {
		uint64_t data;

	retry:
		/* Clear status bits */
		CHECK_ERR(adu_reset(target));

		/* Set the address */
		ctrl_reg = SETFIELD(FBC_ALTD_ADDRESS, ctrl_reg, addr);
		CHECK_ERR(write_target(target, ALTD_CONTROL_REG, ctrl_reg));

		/* Start the command */
		CHECK_ERR(write_target(target, ALTD_CMD_REG, cmd_reg));

		/* Wait for completion */
		do {
			CHECK_ERR(read_target(target, ALTD_STATUS_REG, &val));
		} while (!val);

		if( !(val & FBC_ALTD_ADDR_DONE) ||
		    !(val & FBC_ALTD_DATA_DONE)) {
			/* PBINIT_MISSING is expected occasionally so just retry */
			if (val & FBC_ALTD_PBINIT_MISSING)
				goto retry;
			else {
				PR_ERROR("Unable to read memory. "	\
					 "ALTD_STATUS_REG = 0x%016llx\n", val);
				rc = -1;
				break;
			}
		}

		/* Read data */
		CHECK_ERR(read_target(target, ALTD_DATA_REG, &data));

		/* ADU returns data in big-endian form in the register */
		data = __builtin_bswap64(data);

		if (addr < start_addr) {
			memcpy(output, ((uint8_t *) &data) + (start_addr - addr), 8 - (start_addr - addr));
			output += 8 - (start_addr - addr);
		} else if (addr + 8 > start_addr + size) {
			memcpy(output, &data, start_addr + size - addr);
		} else {
			memcpy(output, &data, 8);
			output += 8;
		}

	}

	adu_unlock(target);

	return rc;
}