int main(void) { Intr_Init(); Intr_SetFaultHandlers(SVGA_DefaultFaultHandler); SVGA_Init(); GMR_Init(); Heap_Reset(); SVGA_SetMode(0, 0, 32); Screen_Init(); SVGAScreenObject myScreen = { .structSize = sizeof(SVGAScreenObject), .id = 0, .flags = SVGA_SCREEN_HAS_ROOT | SVGA_SCREEN_IS_PRIMARY, .size = { 1600, 1200 }, .root = { -1234, 5678 }, }; Screen_Define(&myScreen); const uint32 gmrId = 0; uint32 numPages = 1 + (myScreen.size.width * myScreen.size.height * sizeof(uint32)) / PAGE_SIZE; PPN pages = GMR_DefineEvenPages(gmrId, numPages); const uint32 bitsPerPixel = 32; const uint32 colorDepth = 24; const uint32 bytesPerLine = myScreen.size.width * sizeof(uint32); const SVGAGMRImageFormat format = {{{ .bitsPerPixel = bitsPerPixel, .colorDepth = colorDepth, }}};
void SVGA3DUtil_InitFullscreen(uint32 cid, // IN uint32 width, // IN uint32 height) // IN { SVGA3dRenderState *rs; gFullscreen.screen.x = 0; gFullscreen.screen.y = 0; gFullscreen.screen.w = width; gFullscreen.screen.h = height; Intr_Init(); Intr_SetFaultHandlers(SVGA_DefaultFaultHandler); SVGA_Init(); SVGA_SetMode(width, height, 32); VMBackdoor_MouseInit(TRUE); SVGA3D_Init(); gFullscreen.colorImage.sid = SVGA3DUtil_DefineSurface2D(width, height, SVGA3D_X8R8G8B8); gFullscreen.depthImage.sid = SVGA3DUtil_DefineSurface2D(width, height, SVGA3D_Z_D16); SVGA3D_DefineContext(cid); SVGA3D_SetRenderTarget(cid, SVGA3D_RT_COLOR0, &gFullscreen.colorImage); SVGA3D_SetRenderTarget(cid, SVGA3D_RT_DEPTH, &gFullscreen.depthImage); SVGA3D_SetViewport(cid, &gFullscreen.screen); SVGA3D_SetZRange(cid, 0.0f, 1.0f); /* * The device defaults to flat shading, but to retain compatibility * across OpenGL and Direct3D it may be much slower in this * mode. Usually we don't want flat shading, so go ahead and switch * into smooth shading mode. * * Note that this is a per-context render state. * * XXX: There is also a bug in VMware Workstation 6.5.2 which shows * up if you're in flat shading mode and you're using a drawing * command which does not include an SVGA3dVertexDivisor array. * Avoiding flat shading is one workaround, another is to include * a dummy SVGA3dVertexDivisor array on every draw. */ SVGA3D_BeginSetRenderState(cid, &rs, 1); { rs[0].state = SVGA3D_RS_SHADEMODE; rs[0].uintValue = SVGA3D_SHADEMODE_SMOOTH; } SVGA_FIFOCommitAll(); }
int main(void) { static VMTCLOState tclo; Bool resendCapabilities = FALSE; Intr_Init(); Intr_SetFaultHandlers(SVGA_DefaultFaultHandler); SVGA_Init(); SVGA_SetMode(640, 480, 32); /* Use the PIT to set TCLO polling rate. */ Timer_InitPIT(PIT_HZ / 30); Intr_SetMask(0, TRUE); sendCapabilities(); while (1) { Intr_Halt(); if (!VMBackdoor_PollTCLO(&tclo, FALSE)) { if (resendCapabilities) { resendCapabilities = FALSE; sendCapabilities(); } continue; } if (VMBackdoor_CheckPrefixTCLO(&tclo, "Capabilities_Register")) { /* Send the capabilities after we get a chance to send the reply. */ resendCapabilities = TRUE; VMBackdoor_ReplyTCLO(&tclo, TCLO_SUCCESS); } else if (VMBackdoor_CheckPrefixTCLO(&tclo, "Resolution_Set")) { int width = VMBackdoor_IntParamTCLO(&tclo, 1); int height = VMBackdoor_IntParamTCLO(&tclo, 2); resize(width, height); VMBackdoor_ReplyTCLO(&tclo, TCLO_SUCCESS); } else { /* Unknown command */ VMBackdoor_ReplyTCLO(&tclo, TCLO_UNKNOWN_CMD); } } return 0; }
int main(void) { static FPSCounterState fps; uint32 frameFence = 0; uint32 nextFence; Intr_Init(); Intr_SetFaultHandlers(SVGA_DefaultFaultHandler); SVGA_Init(); GMR_Init(); Heap_Reset(); SVGA_SetMode(0, 0, 32); SVGA3D_Init(); Screen_Init(); ScreenDraw_Init(0); initScreens(); setup3D(); /* * One big circle, and a smaller one that overlaps the top-right * corner. (This tests positive and negative clipping extremes.) */ prepareCircle(&circles[0], 650, 400, 300); prepareCircle(&circles[1], 1000, 50, 250); while (1) { if (SVGA3DUtil_UpdateFPSCounter(&fps)) { Console_MoveTo(900, 730); Console_Format("%s ", fps.text); } drawCube(); /* * Flow control- one frame in the FIFO at a time. */ nextFence = SVGA_InsertFence(); SVGA_SyncToFence(frameFence); frameFence = nextFence; present(); } return 0; }
int main(void) { ConsoleVGA_Init(); Intr_Init(); Intr_SetFaultHandlers(Console_UnhandledFault); /* * Create task 1, and switch to it. We never come * back to main() after this. */ task_init(&task1, task1_main); runQueue.current = runQueue_pop(); Intr_RestoreContext(&runQueue.current->context); return 0; }
int main(void) { uint32 frame = 0; uint32 lastTick = 0; Intr_Init(); Intr_SetFaultHandlers(SVGA_DefaultFaultHandler); Timer_InitPIT(PIT_HZ / FRAME_RATE); Intr_SetMask(PIT_IRQ, TRUE); Intr_SetHandler(IRQ_VECTOR(PIT_IRQ), timerISR); SVGA_Init(); GMR_Init(); Heap_Reset(); SVGA_SetMode(0, 0, 32); Screen_Init(); ScreenDraw_Init(GMRID_SCREEN_DRAW); allocNoise(); /* * Define a screen. */ SVGAScreenObject myScreen = { .structSize = sizeof(SVGAScreenObject), .id = SCREEN_ID, .flags = SVGA_SCREEN_HAS_ROOT | SVGA_SCREEN_IS_PRIMARY, .size = { 800, 600 }, .root = { 0, 0 }, }; Screen_Define(&myScreen); /* * Draw some explanatory text. */ char docString[] = "Annotated Blit Sample:" "\n\n" "You should see two moving rectangles. The left one is animated " "using a fill-annotated blit. The blit itself contains random " "noise, but the annotation is a blue fill. If your host is " "using the annotation, you will see the blue. If not, you'll " "see noise. Either one is correct, but it is often more efficient " "to use the fill." "\n\n" "The right one is a copy-annotated blit. The blit data is again " "random noise, and the copy is a screen-to-screen copy which " "moves the rectangle from its old position to the new position. " "We drew a checkerboard pattern to the screen once, and that " "pattern should be preserved indefinitely if the annotation is " "being executed correctly." "\n\n" "Both rectangles should have a 1-pixel solid white border, and " "in both cases we use a fill-annotated blit to clear the screen " "behind each rectangle. This annotation doesn't lie, its blit data " "matches the advertised fill color."; ScreenDraw_SetScreen(myScreen.id, myScreen.size.width, myScreen.size.height); Console_Clear(); ScreenDraw_WrapText(docString, 770); Console_WriteString(docString); /* * Animate the two rectangles indefinitely, sleeping between frames. */ while (1) { SVGASignedRect oldRect1, oldRect2; SVGASignedRect newRect1, newRect2; /* * Move them around in a circle. */ float theta = frame * 0.01; newRect1.left = 190 + cosf(theta) * 60; newRect1.top = 350 + sinf(theta) * 60; newRect1.right = newRect1.left + 80; newRect1.bottom = newRect1.top + 120; newRect2.left = 530 + sinf(theta) * 60; newRect2.top = 350 + cosf(theta) * 60; newRect2.right = newRect2.left + 80; newRect2.bottom = newRect2.top + 120; /* * Update the position of each. */ updateFillRect(frame ? &oldRect1 : NULL, &newRect1); updateCopyRect(frame ? &oldRect2 : NULL, &newRect2); oldRect1 = newRect1; oldRect2 = newRect2; /* * Wait for the next timer tick. */ while (timerTick == lastTick) { Intr_Halt(); } lastTick = timerTick; frame++; } return 0; }
VOID Ipc_IntrInit(Ipc *pThis, VOID (*pIntrHandler)(VOID *), VOID *Arg, IntrItem eIntrItemName) { Intr_Init(&pThis->IpcIntr, eIntrItemName, pIntrHandler, Arg); }
/** ============================================================================ * @n@b Setup_Rx * * @b Description * @n This API sets up all relevant data structures and configuration required * for receiving data from PASS/Ethernet. It sets up a Rx free descriptor queue * with some empty pre-allocated buffers to receive data, and an Rx queue * to which the Rxed data is streamed for the example application. This API * also sets up the QM high priority accumulation interrupts required to * receive data from the Rx queue. * * @param[in] * @n None * * @return Int32 * -1 - Error * 0 - Success * ============================================================================= */ Int32 Setup_Rx (Ethernet *pThis) { Int32 result; UInt8 isAllocated, accChannelNum; UInt16 numAccEntries, intThreshold, i; Qmss_Queue rxFreeQInfo, rxQInfo; Ptr pCppiDesc; Qmss_AccCmdCfg accCfg; Cppi_RxFlowCfg rxFlowCfg; Ptr pDataBuffer; Error_Block eb; Uint32 mySWInfo[] = {0x11112222, 0x33334444}; /* Open a Receive (Rx) queue. * * This queue will be used to hold all the packets received by PASS/CPSW * * Open the next available High Priority Accumulation queue for Rx. */ if ((gRxQHnd = Qmss_queueOpen (Qmss_QueueType_HIGH_PRIORITY_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAllocated)) < 0) { uart_write ("Error opening a High Priority Accumulation Rx queue \n"); return -1; } rxQInfo = Qmss_getQueueNumber (gRxQHnd); uart_write ("Opened RX queue Number %d \n",rxQInfo.qNum); /* Setup high priority accumulation interrupts on the Rx queue. * * Let's configure the accumulator with the following settings: * (1) Interrupt pacing disabled. * (2) Interrupt on every received packet */ intThreshold = RX_INT_THRESHOLD; numAccEntries = (intThreshold + 1) * 2; accChannelNum = PA_ACC_CHANNEL_NUM; /* Initialize the accumulator list memory */ memset ((Void *) gHiPriAccumList, 0, numAccEntries * 4); /* Ensure that the accumulator channel we are programming is not * in use currently. */ result = Qmss_disableAccumulator (Qmss_PdspId_PDSP1, accChannelNum); if (result != QMSS_ACC_SOK && result != QMSS_ACC_CHANNEL_NOT_ACTIVE) { uart_write ("Error Disabling high priority accumulator for channel : %d error code: %d\n", accChannelNum, result); return -1; } /* Setup the accumulator settings */ accCfg.channel = accChannelNum; accCfg.command = Qmss_AccCmd_ENABLE_CHANNEL; accCfg.queueEnMask = 0; accCfg.listAddress = Convert_CoreLocal2GlobalAddr((Uint32) gHiPriAccumList); // accCfg.listAddress = gHiPriAccumList; accCfg.queMgrIndex = gRxQHnd; accCfg.maxPageEntries = (intThreshold + 1); /* Add an extra entry for holding the entry count */ accCfg.timerLoadCount = 0; accCfg.interruptPacingMode = Qmss_AccPacingMode_LAST_INTERRUPT; accCfg.listEntrySize = Qmss_AccEntrySize_REG_D; accCfg.listCountMode = Qmss_AccCountMode_ENTRY_COUNT; accCfg.multiQueueMode = Qmss_AccQueueMode_SINGLE_QUEUE; /* Program the accumulator */ if ((result = Qmss_programAccumulator (Qmss_PdspId_PDSP1, &accCfg)) != QMSS_ACC_SOK) { uart_write ("Error Programming high priority accumulator for channel : %d queue : %d error code : %d\n", accCfg.channel, accCfg.queMgrIndex, result); return -1; } Error_init(&eb); pThis->RxEventHandle = Event_create(NULL,&eb); if (pThis->RxEventHandle == NULL) { uart_write("Event create failed"); return -1; } memset(pThis->pRxDataPtr,0,sizeof(pThis->pRxDataPtr)); pThis->RxDataHead = 0; pThis->RxDataTail = 0; Intr_Init(&pThis->oEthIntr, INTR_ITEM_ETH_RX,(Intr_Handler)Cpsw_RxISR, (void*)pThis); /* Open a Rx Free Descriptor Queue (Rx FDQ). * * This queue will hold all the Rx free decriptors. These descriptors will be * used by the PASS CPDMA to hold data received via CPSW. */ if ((gRxFreeQHnd = Qmss_queueOpen (Qmss_QueueType_STARVATION_COUNTER_QUEUE, QMSS_PARAM_NOT_SPECIFIED, &isAllocated)) < 0) { uart_write ("Error opening Rx Free descriptor queue \n"); return -1; } rxFreeQInfo = Qmss_getQueueNumber (gRxFreeQHnd); uart_write("Opened RX Free queue Number %d\n",gRxFreeQHnd); /* Attach some free descriptors to the Rx free queue we just opened. */ for (i = 0; i < NUM_RX_DESC; i++) { /* Get a free descriptor from the global free queue we setup * during initialization. */ if ((pCppiDesc = Qmss_queuePop (gGlobalFreeQHnd)) == NULL) { break; } /* The descriptor address returned from the hardware has the * descriptor size appended to the address in the last 4 bits. * * To get the true descriptor size, always mask off the last * 4 bits of the address. */ pCppiDesc = (Ptr) ((UInt32) pCppiDesc & 0xFFFFFFF0); if ((pDataBuffer = (Ptr) Memory_alloc((IHeap_Handle)heap2, ETHER_MAX_SIZE, 0, NULL)) == NULL) { uart_write ("Error allocating memory for Rx data buffer \n"); break; } /* Populate the Rx free descriptor with the buffer we just allocated. */ Cppi_setData (Cppi_DescType_HOST, pCppiDesc, (UInt8 *)Convert_CoreLocal2GlobalAddr((UInt32)pDataBuffer), ETHER_MAX_SIZE); /* Save original buffer information */ Cppi_setOriginalBufInfo (Cppi_DescType_HOST, pCppiDesc, (UInt8 *)Convert_CoreLocal2GlobalAddr((UInt32)pDataBuffer), ETHER_MAX_SIZE); /* Setup the Completion queue: * * Setup the return policy for this desc to return to the free q we just * setup instead of the global free queue. */ Cppi_setReturnQueue (Cppi_DescType_HOST, pCppiDesc, rxFreeQInfo); Cppi_setSoftwareInfo (Cppi_DescType_HOST, pCppiDesc, (UInt8 *) mySWInfo); Cppi_setPacketLen (Cppi_DescType_HOST, pCppiDesc, ETHER_MAX_SIZE); /* Push descriptor to Tx free queue */ Qmss_queuePushDescSize (gRxFreeQHnd, pCppiDesc, SIZE_CPSW_HOST_DESC); } if (i != NUM_RX_DESC) { uart_write ("Error allocating Rx free descriptors \n"); return -1; } //count=Qmss_getQueueEntryCount(gRxFreeQHnd); //platform_write("Total %d entries in queue %d\n",count,gRxFreeQHnd); /* Setup a Rx Flow. * * A Rx flow encapsulates all relevant data properties that CPDMA would * have to know in order to succefully receive data. */ /* Initialize the flow configuration */ memset (&rxFlowCfg, 0, sizeof(Cppi_RxFlowCfg)); /* Let CPPI pick the next available flow */ rxFlowCfg.flowIdNum = CPPI_PARAM_NOT_SPECIFIED; rxFlowCfg.rx_dest_qmgr = rxQInfo.qMgr; rxFlowCfg.rx_dest_qnum = rxQInfo.qNum; rxFlowCfg.rx_desc_type = Cppi_DescType_HOST; rxFlowCfg.rx_ps_location = Cppi_PSLoc_PS_IN_DESC; rxFlowCfg.rx_psinfo_present = 1; /* Enable PS info */ rxFlowCfg.rx_error_handling = 0; /* Drop the packet, do not retry on starvation by default */ rxFlowCfg.rx_einfo_present = 1; /* EPIB info present */ rxFlowCfg.rx_dest_tag_lo_sel = 0; /* Disable tagging */ rxFlowCfg.rx_dest_tag_hi_sel = 0; rxFlowCfg.rx_src_tag_lo_sel = 0; rxFlowCfg.rx_src_tag_hi_sel = 0; rxFlowCfg.rx_size_thresh0_en = 0; /* By default, we disable Rx Thresholds */ rxFlowCfg.rx_size_thresh1_en = 0; /* By default, we disable Rx Thresholds */ rxFlowCfg.rx_size_thresh2_en = 0; /* By default, we disable Rx Thresholds */ rxFlowCfg.rx_size_thresh0 = 0x0; rxFlowCfg.rx_size_thresh1 = 0x0; rxFlowCfg.rx_size_thresh2 = 0x0; rxFlowCfg.rx_fdq0_sz0_qmgr = rxFreeQInfo.qMgr; /* Setup the Receive free queue for the flow */ rxFlowCfg.rx_fdq0_sz0_qnum = rxFreeQInfo.qNum; rxFlowCfg.rx_fdq0_sz1_qnum = 0x0; rxFlowCfg.rx_fdq0_sz1_qmgr = 0x0; rxFlowCfg.rx_fdq0_sz2_qnum = 0x0; rxFlowCfg.rx_fdq0_sz2_qmgr = 0x0; rxFlowCfg.rx_fdq0_sz3_qnum = 0x0; rxFlowCfg.rx_fdq0_sz3_qmgr = 0x0; rxFlowCfg.rx_fdq1_qnum = rxFreeQInfo.qNum; /* Use the Rx Queue to pick descriptors */ rxFlowCfg.rx_fdq1_qmgr = rxFreeQInfo.qMgr; rxFlowCfg.rx_fdq2_qnum = rxFreeQInfo.qNum; /* Use the Rx Queue to pick descriptors */ rxFlowCfg.rx_fdq2_qmgr = rxFreeQInfo.qMgr; rxFlowCfg.rx_fdq3_qnum = rxFreeQInfo.qNum; /* Use the Rx Queue to pick descriptors */ rxFlowCfg.rx_fdq3_qmgr = rxFreeQInfo.qMgr; /* Configure the Rx flow */ if ((gRxFlowHnd = Cppi_configureRxFlow (gCpdmaHnd, &rxFlowCfg, &isAllocated)) == NULL) { uart_write ("Error configuring Rx flow \n"); return -1; } else { //platform_write("Rx flow configured. handle %p Id %d \n",gRxFlowHnd,Cppi_getFlowId (gRxFlowHnd)); } /* All done with Rx configuration. Return success. */ return 0; }