/* * ======== RMM_create ======== */ DSP_STATUS RMM_create(struct RMM_TargetObj **pTarget, struct RMM_Segment segTab[], u32 numSegs) { struct RMM_Header *hptr; struct RMM_Segment *sptr, *tmp; struct RMM_TargetObj *target; s32 i; DSP_STATUS status = DSP_SOK; DBC_Require(pTarget != NULL); DBC_Require(numSegs == 0 || segTab != NULL); GT_3trace(RMM_debugMask, GT_ENTER, "RMM_create(0x%lx, 0x%lx, 0x%lx)\n", pTarget, segTab, numSegs); /* Allocate DBL target object */ MEM_AllocObject(target, struct RMM_TargetObj, RMM_TARGSIGNATURE); if (target == NULL) { GT_0trace(RMM_debugMask, GT_6CLASS, "RMM_create: Memory allocation failed\n"); status = DSP_EMEMORY; } if (DSP_FAILED(status)) goto func_cont; target->numSegs = numSegs; if (!(numSegs > 0)) goto func_cont; /* Allocate the memory for freelist from host's memory */ target->freeList = MEM_Calloc(numSegs * sizeof(struct RMM_Header *), MEM_PAGED); if (target->freeList == NULL) { GT_0trace(RMM_debugMask, GT_6CLASS, "RMM_create: Memory allocation failed\n"); status = DSP_EMEMORY; } else { /* Allocate headers for each element on the free list */ for (i = 0; i < (s32) numSegs; i++) { target->freeList[i] = MEM_Calloc(sizeof(struct RMM_Header), MEM_PAGED); if (target->freeList[i] == NULL) { GT_0trace(RMM_debugMask, GT_6CLASS, "RMM_create: Memory " "allocation failed\n"); status = DSP_EMEMORY; break; } } /* Allocate memory for initial segment table */ target->segTab = MEM_Calloc(numSegs * sizeof(struct RMM_Segment), MEM_PAGED); if (target->segTab == NULL) { GT_0trace(RMM_debugMask, GT_6CLASS, "RMM_create: Memory allocation failed\n"); status = DSP_EMEMORY; } else { /* Initialize segment table and free list */ sptr = target->segTab; for (i = 0, tmp = segTab; numSegs > 0; numSegs--, i++) { *sptr = *tmp; hptr = target->freeList[i]; hptr->addr = tmp->base; hptr->size = tmp->length; hptr->next = NULL; tmp++; sptr++; } } } func_cont: /* Initialize overlay memory list */ if (DSP_SUCCEEDED(status)) { target->ovlyList = LST_Create(); if (target->ovlyList == NULL) { GT_0trace(RMM_debugMask, GT_6CLASS, "RMM_create: Memory allocation failed\n"); status = DSP_EMEMORY; } } if (DSP_SUCCEEDED(status)) { *pTarget = target; } else { *pTarget = NULL; if (target) RMM_delete(target); } DBC_Ensure((DSP_SUCCEEDED(status) && MEM_IsValidHandle((*pTarget), RMM_TARGSIGNATURE)) || (DSP_FAILED(status) && *pTarget == NULL)); return status; }
/* * ======== MGR_EnumNodeInfo ======== * Enumerate and get configuration information about nodes configured * in the node database. */ DSP_STATUS MGR_EnumNodeInfo(u32 uNode, OUT struct DSP_NDBPROPS *pNDBProps, u32 uNDBPropsSize, OUT u32 *puNumNodes) { DSP_STATUS status = DSP_SOK; DSP_STATUS status1 = DSP_SOK; struct DSP_UUID Uuid, uTempUuid; u32 uTempIndex = 0; u32 uNodeIndex = 0; struct DCD_GENERICOBJ GenObj; struct MGR_OBJECT *pMgrObject = NULL; DBC_Require(pNDBProps != NULL); DBC_Require(puNumNodes != NULL); DBC_Require(uNDBPropsSize >= sizeof(struct DSP_NDBPROPS)); DBC_Require(cRefs > 0); GT_4trace(MGR_DebugMask, GT_ENTER, "Entered Manager_EnumNodeInfo, " "args:\n\t uNode: 0x%x\n\tpNDBProps: 0x%x\n\tuNDBPropsSize:" "0x%x\tpuNumNodes: 0x%x\n", uNode, pNDBProps, uNDBPropsSize, puNumNodes); *puNumNodes = 0; /* Get The Manager Object from the Registry */ if (DSP_FAILED(CFG_GetObject((u32 *)&pMgrObject, REG_MGR_OBJECT))) { GT_0trace(MGR_DebugMask, GT_7CLASS, "Manager_EnumNodeInfo:Failed To Get" " MGR Object from Registry\r\n"); goto func_cont; } DBC_Assert(MEM_IsValidHandle(pMgrObject, SIGNATURE)); /* Forever loop till we hit failed or no more items in the * Enumeration. We will exit the loop other than DSP_SOK; */ while (status == DSP_SOK) { status = DCD_EnumerateObject(uTempIndex++, DSP_DCDNODETYPE, &uTempUuid); if (status == DSP_SOK) { uNodeIndex++; if (uNode == (uNodeIndex - 1)) Uuid = uTempUuid; } } if (DSP_SUCCEEDED(status)) { if (uNode > (uNodeIndex - 1)) { status = DSP_EINVALIDARG; GT_0trace(MGR_DebugMask, GT_7CLASS, "Manager_EnumNodeInfo: uNode" " is Invalid \r\n"); } else { status1 = DCD_GetObjectDef(pMgrObject->hDcdMgr, (struct DSP_UUID *)&Uuid, DSP_DCDNODETYPE, &GenObj); if (DSP_SUCCEEDED(status1)) { /* Get the Obj def */ *pNDBProps = GenObj.objData.nodeObj.ndbProps; *puNumNodes = uNodeIndex; status = DSP_SOK; } else { GT_0trace(MGR_DebugMask, GT_7CLASS, "Manager_EnumNodeInfo: " "Failed to Get Node Info \r\n"); status = DSP_EFAIL; } } } else { /* This could be changed during enum, EFAIL ... */ GT_0trace(MGR_DebugMask, GT_7CLASS, "Manager_EnumNodeInfo: " "Enumeration failure\r\n"); status = DSP_EFAIL; } func_cont: GT_4trace(MGR_DebugMask, GT_ENTER, "Exiting Manager_EnumNodeInfo, args:\n\t" "uNode: 0x%x\n\tpNDBProps: 0x%x\n\tuNDBPropsSize:" " 0x%x\tuNumNodes: 0x%x\n", uNode, pNDBProps, uNDBPropsSize, *puNumNodes); DBC_Ensure((DSP_SUCCEEDED(status) && *puNumNodes > 0) || (DSP_FAILED(status) && *puNumNodes == 0)); return status; }
/* * ======== STRM_Select ======== * Purpose: * Selects a ready stream. */ DSP_STATUS STRM_Select(IN struct STRM_OBJECT **aStrmTab, u32 nStrms, OUT u32 *pMask, u32 uTimeout) { u32 uIndex; struct CHNL_INFO chnlInfo; struct WMD_DRV_INTERFACE *pIntfFxns; struct SYNC_OBJECT **hSyncEvents = NULL; u32 i; DSP_STATUS status = DSP_SOK; DBC_Require(cRefs > 0); DBC_Require(aStrmTab != NULL); DBC_Require(pMask != NULL); DBC_Require(nStrms > 0); *pMask = 0; for (i = 0; i < nStrms; i++) { if (!MEM_IsValidHandle(aStrmTab[i], STRM_SIGNATURE)) { status = DSP_EHANDLE; break; } } if (DSP_FAILED(status)) goto func_end; /* Determine which channels have IO ready */ for (i = 0; i < nStrms; i++) { pIntfFxns = aStrmTab[i]->hStrmMgr->pIntfFxns; status = (*pIntfFxns->pfnChnlGetInfo)(aStrmTab[i]->hChnl, &chnlInfo); if (DSP_FAILED(status)) { break; } else { if (chnlInfo.cIOCs > 0) *pMask |= (1 << i); } } if (DSP_SUCCEEDED(status) && uTimeout > 0 && *pMask == 0) { /* Non-zero timeout */ hSyncEvents = (struct SYNC_OBJECT **)MEM_Alloc(nStrms * sizeof(struct SYNC_OBJECT *), MEM_PAGED); if (hSyncEvents == NULL) { status = DSP_EMEMORY; } else { for (i = 0; i < nStrms; i++) { pIntfFxns = aStrmTab[i]->hStrmMgr->pIntfFxns; status = (*pIntfFxns->pfnChnlGetInfo) (aStrmTab[i]->hChnl, &chnlInfo); if (DSP_FAILED(status)) break; else hSyncEvents[i] = chnlInfo.hSyncEvent; } } if (DSP_SUCCEEDED(status)) { status = SYNC_WaitOnMultipleEvents(hSyncEvents, nStrms, uTimeout, &uIndex); if (DSP_SUCCEEDED(status)) { /* Since we waited on the event, we have to * reset it */ SYNC_SetEvent(hSyncEvents[uIndex]); *pMask = 1 << uIndex; } } } func_end: kfree(hSyncEvents); DBC_Ensure((DSP_SUCCEEDED(status) && (*pMask != 0 || uTimeout == 0)) || (DSP_FAILED(status) && *pMask == 0)); return status; }
/* * ======== RMM_alloc ======== */ DSP_STATUS RMM_alloc(struct RMM_TargetObj *target, u32 segid, u32 size, u32 align, u32 *dspAddr, bool reserve) { struct RMM_OvlySect *sect; struct RMM_OvlySect *prevSect = NULL; struct RMM_OvlySect *newSect; u32 addr; DSP_STATUS status = DSP_SOK; DBC_Require(MEM_IsValidHandle(target, RMM_TARGSIGNATURE)); DBC_Require(dspAddr != NULL); DBC_Require(size > 0); DBC_Require(reserve || (target->numSegs > 0)); DBC_Require(cRefs > 0); GT_6trace(RMM_debugMask, GT_ENTER, "RMM_alloc(0x%lx, 0x%lx, 0x%lx, 0x%lx, " "0x%lx, 0x%lx)\n", target, segid, size, align, dspAddr, reserve); if (!reserve) { if (!allocBlock(target, segid, size, align, dspAddr)) { status = DSP_EMEMORY; } else { /* Increment the number of allocated blocks in this * segment */ target->segTab[segid].number++; } goto func_end; } /* An overlay section - See if block is already in use. If not, * insert into the list in ascending address size. */ addr = *dspAddr; sect = (struct RMM_OvlySect *)LST_First(target->ovlyList); /* Find place to insert new list element. List is sorted from * smallest to largest address. */ while (sect != NULL) { if (addr <= sect->addr) { /* Check for overlap with sect */ if ((addr + size > sect->addr) || (prevSect && (prevSect->addr + prevSect->size > addr))) { status = DSP_EOVERLAYMEMORY; } break; } prevSect = sect; sect = (struct RMM_OvlySect *)LST_Next(target->ovlyList, (struct LST_ELEM *)sect); } if (DSP_SUCCEEDED(status)) { /* No overlap - allocate list element for new section. */ newSect = MEM_Calloc(sizeof(struct RMM_OvlySect), MEM_PAGED); if (newSect == NULL) { status = DSP_EMEMORY; } else { LST_InitElem((struct LST_ELEM *)newSect); newSect->addr = addr; newSect->size = size; newSect->page = segid; if (sect == NULL) { /* Put new section at the end of the list */ LST_PutTail(target->ovlyList, (struct LST_ELEM *)newSect); } else { /* Put new section just before sect */ LST_InsertBefore(target->ovlyList, (struct LST_ELEM *)newSect, (struct LST_ELEM *)sect); } } } func_end: return status; }
/* * ======== WMD_DEH_Notify ======== * DEH error notification function. Informs user about the error. */ void WMD_DEH_Notify(struct DEH_MGR *hDehMgr, u32 ulEventMask, u32 dwErrInfo) { struct DEH_MGR *pDehMgr = (struct DEH_MGR *)hDehMgr; struct WMD_DEV_CONTEXT *pDevContext; u32 memPhysical = 0; u32 HW_MMU_MAX_TLB_COUNT = 31; extern u32 faultAddr; u32 cnt = 0; if (MEM_IsValidHandle(pDehMgr, SIGNATURE)) { printk(KERN_INFO "WMD_DEH_Notify: ********** DEVICE EXCEPTION " "**********\n"); pDevContext = (struct WMD_DEV_CONTEXT *)pDehMgr->hWmdContext; switch (ulEventMask) { case DSP_SYSERROR: /* reset errInfo structure before use */ pDehMgr->errInfo.dwErrMask = DSP_SYSERROR; pDehMgr->errInfo.dwVal1 = 0L; pDehMgr->errInfo.dwVal2 = 0L; pDehMgr->errInfo.dwVal3 = 0L; pDehMgr->errInfo.dwVal1 = dwErrInfo; printk(KERN_ERR "WMD_DEH_Notify: DSP_SYSERROR, errInfo " "= 0x%x\n", dwErrInfo); dump_dl_modules(pDevContext); dump_dsp_stack(pDevContext); break; case DSP_MMUFAULT: /* MMU fault routine should have set err info * structure */ pDehMgr->errInfo.dwErrMask = DSP_MMUFAULT; printk(KERN_INFO "WMD_DEH_Notify: DSP_MMUFAULT," "errInfo = 0x%x\n", dwErrInfo); printk(KERN_INFO "WMD_DEH_Notify: DSP_MMUFAULT, High " "Address = 0x%x\n", (unsigned int)pDehMgr->errInfo.dwVal1); printk(KERN_INFO "WMD_DEH_Notify: DSP_MMUFAULT, Low " "Address = 0x%x\n", (unsigned int)pDehMgr->errInfo.dwVal2); printk(KERN_INFO "WMD_DEH_Notify: DSP_MMUFAULT, fault " "address = 0x%x\n", (unsigned int)faultAddr); PrintDspTraceBuffer(pDevContext); dump_dl_modules(pDevContext); dummyVaAddr = (u32)MEM_Calloc(sizeof(char) * 0x1000, MEM_PAGED); memPhysical = VirtToPhys(PG_ALIGN_LOW((u32)dummyVaAddr, PG_SIZE_4K)); pDevContext = (struct WMD_DEV_CONTEXT *) pDehMgr->hWmdContext; /* Reset the dynamic mmu index to fixed count if it * exceeds 31. So that the dynmmuindex is always * between the range of standard/fixed entries * and 31. */ if (pDevContext->numTLBEntries > HW_MMU_MAX_TLB_COUNT) { pDevContext->numTLBEntries = pDevContext-> fixedTLBEntries; } HW_MMU_TLBAdd(pDevContext->dwDSPMmuBase, memPhysical, faultAddr, HW_PAGE_SIZE_4KB, 1, &mapAttrs, HW_SET, HW_SET); /* * Send a GP Timer interrupt to DSP * The DSP expects a GP timer interrupt after an * MMU-Fault Request GPTimer */ if (timer) { omap_dm_timer_enable(timer); /* Enable overflow interrupt */ omap_dm_timer_set_int_enable(timer, GPTIMER_IRQ_OVERFLOW); /* * Set counter value to overflow counter after * one tick and start timer */ omap_dm_timer_set_load_start(timer, 0, 0xfffffffe); /* Wait 80us for timer to overflow */ udelay(80); /* Check interrupt status and */ /* wait for interrupt */ cnt = 0; while (!(omap_dm_timer_read_status(timer) & GPTIMER_IRQ_OVERFLOW)) { if (cnt++ >= GPTIMER_IRQ_WAIT_MAX_CNT) { pr_err("%s: GPTimer interrupt" " failed\n", __func__); break; } } } /* Clear MMU interrupt */ HW_MMU_EventAck(pDevContext->dwDSPMmuBase, HW_MMU_TRANSLATION_FAULT); dump_dsp_stack(hDehMgr->hWmdContext); if (timer) omap_dm_timer_disable(timer); break; #ifdef CONFIG_BRIDGE_NTFY_PWRERR case DSP_PWRERROR: /* reset errInfo structure before use */ pDehMgr->errInfo.dwErrMask = DSP_PWRERROR; pDehMgr->errInfo.dwVal1 = 0L; pDehMgr->errInfo.dwVal2 = 0L; pDehMgr->errInfo.dwVal3 = 0L; pDehMgr->errInfo.dwVal1 = dwErrInfo; printk(KERN_ERR "WMD_DEH_Notify: DSP_PWRERROR, errInfo " "= 0x%x\n", dwErrInfo); break; #endif /* CONFIG_BRIDGE_NTFY_PWRERR */ #ifdef CONFIG_BRIDGE_WDT3 case DSP_WDTOVERFLOW: pDehMgr->errInfo.dwErrMask = DSP_WDTOVERFLOW; pDehMgr->errInfo.dwVal1 = 0L; pDehMgr->errInfo.dwVal2 = 0L; pDehMgr->errInfo.dwVal3 = 0L; pr_err("WMD_DEH_Notify: DSP_WDTOVERFLOW \n "); break; #endif default: DBG_Trace(DBG_LEVEL6, "WMD_DEH_Notify: Unknown Error, errInfo = " "0x%x\n", dwErrInfo); break; } /* Filter subsequent notifications when an error occurs */ if (pDevContext->dwBrdState != BRD_ERROR) { NTFY_Notify(pDehMgr->hNtfy, ulEventMask); #ifdef CONFIG_BRIDGE_RECOVERY bridge_recover_schedule(); #endif } /* Set the Board state as ERROR */ pDevContext->dwBrdState = BRD_ERROR; /* Disable all the clocks that were enabled by DSP */ (void)DSP_PeripheralClocks_Disable(pDevContext, NULL); #ifdef CONFIG_BRIDGE_WDT3 /* * Avoid the subsequent WDT if it happens once, * also If MMU fault occurs */ dsp_wdt_enable(false); #endif } }
/* * ======== WMD_MSG_Delete ======== * Delete a MSG manager allocated in WMD_MSG_Create(). */ void WMD_MSG_Delete(struct MSG_MGR *hMsgMgr) { if (MEM_IsValidHandle(hMsgMgr, MSGMGR_SIGNATURE)) DeleteMsgMgr(hMsgMgr); }
/* * ======== DBLL_load ======== */ DSP_STATUS DBLL_load(struct DBLL_LibraryObj *lib, DBLL_Flags flags, struct DBLL_Attrs *attrs, u32 *pEntry) { struct DBLL_LibraryObj *zlLib = (struct DBLL_LibraryObj *)lib; struct DBLL_TarObj *dbzl; bool gotSymbols = true; s32 err; DSP_STATUS status = DSP_SOK; bool fOpenedDoff = false; DBC_Require(cRefs > 0); DBC_Require(MEM_IsValidHandle(zlLib, DBLL_LIBSIGNATURE)); DBC_Require(pEntry != NULL); DBC_Require(attrs != NULL); GT_4trace(DBLL_debugMask, GT_ENTER, "DBLL_load: lib: 0x%x flags: 0x%x pEntry:" " 0x%x\n", lib, flags, attrs, pEntry); /* * Load if not already loaded. */ if (zlLib->loadRef == 0 || !(flags & DBLL_DYNAMIC)) { dbzl = zlLib->pTarget; dbzl->attrs = *attrs; /* Create a hash table for symbols if not already created */ if (zlLib->symTab == NULL) { gotSymbols = false; zlLib->symTab = GH_create(MAXBUCKETS, sizeof(struct Symbol), nameHash, nameMatch, symDelete); if (zlLib->symTab == NULL) status = DSP_EMEMORY; } /* * Set up objects needed by the dynamic loader */ /* Stream */ zlLib->stream.dlStream.read_buffer = readBuffer; zlLib->stream.dlStream.set_file_posn = setFilePosn; zlLib->stream.lib = zlLib; /* Symbol */ zlLib->symbol.dlSymbol.Find_Matching_Symbol = findSymbol; if (gotSymbols) { zlLib->symbol.dlSymbol.Add_To_Symbol_Table = findInSymbolTable; } else { zlLib->symbol.dlSymbol.Add_To_Symbol_Table = addToSymbolTable; } zlLib->symbol.dlSymbol.Purge_Symbol_Table = purgeSymbolTable; zlLib->symbol.dlSymbol.Allocate = allocate; zlLib->symbol.dlSymbol.Deallocate = deallocate; zlLib->symbol.dlSymbol.Error_Report = errorReport; zlLib->symbol.lib = zlLib; /* Allocate */ zlLib->allocate.dlAlloc.Allocate = rmmAlloc; zlLib->allocate.dlAlloc.Deallocate = rmmDealloc; zlLib->allocate.lib = zlLib; /* Init */ zlLib->init.dlInit.connect = connect; zlLib->init.dlInit.readmem = readMem; zlLib->init.dlInit.writemem = writeMem; zlLib->init.dlInit.fillmem = fillMem; zlLib->init.dlInit.execute = execute; zlLib->init.dlInit.release = release; zlLib->init.lib = zlLib; /* If COFF file is not open, we open it. */ if (zlLib->fp == NULL) { status = dofOpen(zlLib); if (DSP_SUCCEEDED(status)) fOpenedDoff = true; } if (DSP_SUCCEEDED(status)) { zlLib->ulPos = (*(zlLib->pTarget->attrs.ftell)) (zlLib->fp); /* Reset file cursor */ (*(zlLib->pTarget->attrs.fseek))(zlLib->fp, (long)0, SEEK_SET); bSymbolsReloaded = true; /* The 5th argument, DLOAD_INITBSS, tells the DLL * module to zero-init all BSS sections. In general, * this is not necessary and also increases load time. * We may want to make this configurable by the user */ err = Dynamic_Load_Module(&zlLib->stream.dlStream, &zlLib->symbol.dlSymbol, &zlLib->allocate.dlAlloc, &zlLib->init.dlInit, DLOAD_INITBSS, &zlLib->mHandle); if (err != 0) { GT_1trace(DBLL_debugMask, GT_6CLASS, "DBLL_load: " "Dynamic_Load_Module failed: 0x%lx\n", err); status = DSP_EDYNLOAD; } else if (bRedefinedSymbol) { zlLib->loadRef++; DBLL_unload(zlLib, (struct DBLL_Attrs *) attrs); bRedefinedSymbol = false; status = DSP_EDYNLOAD; } else { *pEntry = zlLib->entry; } } } if (DSP_SUCCEEDED(status)) zlLib->loadRef++; /* Clean up DOFF resources */ if (fOpenedDoff) dofClose(zlLib); DBC_Ensure(DSP_FAILED(status) || zlLib->loadRef > 0); return status; }
/* * ======== WMD_CHNL_Open ======== * Open a new half-duplex channel to the DSP board. */ DSP_STATUS WMD_CHNL_Open(OUT struct CHNL_OBJECT **phChnl, struct CHNL_MGR *hChnlMgr, short int uMode, u32 uChnlId, CONST IN struct CHNL_ATTRS *pAttrs) { DSP_STATUS status = DSP_SOK; struct CHNL_MGR *pChnlMgr = hChnlMgr; struct CHNL_OBJECT *pChnl = NULL; struct SYNC_ATTRS *pSyncAttrs = NULL; struct SYNC_OBJECT *hSyncEvent = NULL; /* Ensure DBC requirements: */ DBC_Require(phChnl != NULL); DBC_Require(pAttrs != NULL); DBC_Require(hChnlMgr != NULL); *phChnl = NULL; /* Validate Args: */ if (pAttrs->uIOReqs == 0) { status = DSP_EINVALIDARG; } else { if (!MEM_IsValidHandle(hChnlMgr, CHNL_MGRSIGNATURE)) { status = DSP_EHANDLE; } else { if (uChnlId != CHNL_PICKFREE) { if (uChnlId >= pChnlMgr->cChannels) status = CHNL_E_BADCHANID; else if (pChnlMgr->apChannel[uChnlId] != NULL) status = CHNL_E_CHANBUSY; } else { /* Check for free channel */ status = SearchFreeChannel(pChnlMgr, &uChnlId); } } } if (DSP_FAILED(status)) goto func_end; DBC_Assert(uChnlId < pChnlMgr->cChannels); /* Create channel object: */ MEM_AllocObject(pChnl, struct CHNL_OBJECT, 0x0000); if (!pChnl) { status = DSP_EMEMORY; goto func_end; } /* Protect queues from IO_DPC: */ pChnl->dwState = CHNL_STATECANCEL; /* Allocate initial IOR and IOC queues: */ pChnl->pFreeList = CreateChirpList(pAttrs->uIOReqs); pChnl->pIORequests = CreateChirpList(0); pChnl->pIOCompletions = CreateChirpList(0); pChnl->cChirps = pAttrs->uIOReqs; pChnl->cIOCs = 0; pChnl->cIOReqs = 0; status = SYNC_OpenEvent(&hSyncEvent, pSyncAttrs); if (DSP_SUCCEEDED(status)) status = NTFY_Create(&pChnl->hNtfy); if (DSP_SUCCEEDED(status)) { if (pChnl->pIOCompletions && pChnl->pIORequests && pChnl->pFreeList) { /* Initialize CHNL object fields: */ pChnl->pChnlMgr = pChnlMgr; pChnl->uId = uChnlId; pChnl->uMode = uMode; pChnl->hUserEvent = hSyncEvent; /* for Linux */ pChnl->hSyncEvent = hSyncEvent; /* Get the process handle */ pChnl->hProcess = current->tgid; pChnl->pCBArg = 0; pChnl->cBytesMoved = 0; /* Default to proc-copy */ pChnl->uChnlType = CHNL_PCPY; } else { status = DSP_EMEMORY; } } if (DSP_FAILED(status)) { /* Free memory */ if (pChnl->pIOCompletions) { FreeChirpList(pChnl->pIOCompletions); pChnl->pIOCompletions = NULL; pChnl->cIOCs = 0; } if (pChnl->pIORequests) { FreeChirpList(pChnl->pIORequests); pChnl->pIORequests = NULL; } if (pChnl->pFreeList) { FreeChirpList(pChnl->pFreeList); pChnl->pFreeList = NULL; } if (hSyncEvent) { SYNC_CloseEvent(hSyncEvent); hSyncEvent = NULL; } if (pChnl->hNtfy) { NTFY_Delete(pChnl->hNtfy); pChnl->hNtfy = NULL; } MEM_FreeObject(pChnl); } else { /* Insert channel object in channel manager: */ pChnlMgr->apChannel[pChnl->uId] = pChnl; SYNC_EnterCS(pChnlMgr->hCSObj); pChnlMgr->cOpenChannels++; SYNC_LeaveCS(pChnlMgr->hCSObj); /* Return result... */ pChnl->dwSignature = CHNL_SIGNATURE; pChnl->dwState = CHNL_STATEREADY; *phChnl = pChnl; } func_end: DBC_Ensure((DSP_SUCCEEDED(status) && MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) || (*phChnl == NULL)); return status; }
/* * ======== WMD_MSG_Put ======== * Put a message onto a MSG queue. */ DSP_STATUS WMD_MSG_Put(struct MSG_QUEUE *hMsgQueue, IN CONST struct DSP_MSG *pMsg, u32 uTimeout) { struct MSG_FRAME *pMsgFrame; struct MSG_MGR *hMsgMgr; bool fPutMsg = false; struct SYNC_OBJECT *hSyncs[2]; u32 uIndex; DSP_STATUS status = DSP_SOK; DBC_Require(MEM_IsValidHandle(hMsgQueue, MSGQ_SIGNATURE)); DBC_Require(pMsg != NULL); hMsgMgr = hMsgQueue->hMsgMgr; if (!hMsgMgr->msgFreeList) { status = DSP_EHANDLE; goto func_end; } (void) SYNC_EnterCS(hMsgMgr->hSyncCS); /* If a message frame is available, use it */ if (!LST_IsEmpty(hMsgMgr->msgFreeList)) { pMsgFrame = (struct MSG_FRAME *)LST_GetHead(hMsgMgr-> msgFreeList); if (pMsgFrame != NULL) { pMsgFrame->msgData.msg = *pMsg; pMsgFrame->msgData.dwId = hMsgQueue->dwId; LST_PutTail(hMsgMgr->msgUsedList, (struct LST_ELEM *) pMsgFrame); hMsgMgr->uMsgsPending++; fPutMsg = true; } if (LST_IsEmpty(hMsgMgr->msgFreeList)) SYNC_ResetEvent(hMsgMgr->hSyncEvent); /* Release critical section before scheduling DPC */ (void)SYNC_LeaveCS(hMsgMgr->hSyncCS); /* Schedule a DPC, to do the actual data transfer: */ IO_Schedule(hMsgMgr->hIOMgr); } else { if (hMsgQueue->fDone) status = DSP_EFAIL; else hMsgQueue->refCount++; (void)SYNC_LeaveCS(hMsgMgr->hSyncCS); } if (DSP_SUCCEEDED(status) && !fPutMsg) { /* Wait til a free message frame is available, timeout, * or done */ hSyncs[0] = hMsgMgr->hSyncEvent; hSyncs[1] = hMsgQueue->hSyncDone; status = SYNC_WaitOnMultipleEvents(hSyncs, 2, uTimeout, &uIndex); /* Enter critical section */ (void)SYNC_EnterCS(hMsgMgr->hSyncCS); if (hMsgQueue->fDone) { hMsgQueue->refCount--; /* Exit critical section */ (void)SYNC_LeaveCS(hMsgMgr->hSyncCS); /* Signal that we're not going to access hMsgQueue * anymore, so it can be deleted. */ (void)SYNC_SetEvent(hMsgQueue->hSyncDoneAck); status = DSP_EFAIL; } else { if (DSP_SUCCEEDED(status)) { if (LST_IsEmpty(hMsgMgr->msgFreeList)) { status = DSP_EPOINTER; goto func_cont; } /* Get msg from free list */ pMsgFrame = (struct MSG_FRAME *) LST_GetHead(hMsgMgr->msgFreeList); /* Copy message into pMsg and put frame on the * used list */ if (pMsgFrame != NULL) { pMsgFrame->msgData.msg = *pMsg; pMsgFrame->msgData.dwId = hMsgQueue->dwId; LST_PutTail(hMsgMgr->msgUsedList, (struct LST_ELEM *) pMsgFrame); hMsgMgr->uMsgsPending++; /* Schedule a DPC, to do the actual * data transfer: */ IO_Schedule(hMsgMgr->hIOMgr); } } hMsgQueue->refCount--; /* Reset event if there are still frames available */ if (!LST_IsEmpty(hMsgMgr->msgFreeList)) SYNC_SetEvent(hMsgMgr->hSyncEvent); func_cont: /* Exit critical section */ (void) SYNC_LeaveCS(hMsgMgr->hSyncCS); } } func_end: return status; }
/* * ======== WMD_CHNL_GetIOC ======== * Optionally wait for I/O completion on a channel. Dequeue an I/O * completion record, which contains information about the completed * I/O request. * Note: Ensures Channel Invariant (see notes above). */ DSP_STATUS WMD_CHNL_GetIOC(struct CHNL_OBJECT *hChnl, u32 dwTimeOut, OUT struct CHNL_IOC *pIOC) { DSP_STATUS status = DSP_SOK; struct CHNL_OBJECT *pChnl = (struct CHNL_OBJECT *)hChnl; struct CHNL_IRP *pChirp; DSP_STATUS statSync; bool fDequeueIOC = true; struct CHNL_IOC ioc = { NULL, 0, 0, 0, 0 }; u8 *pHostSysBuf = NULL; DBG_Trace(DBG_ENTER, "> WMD_CHNL_GetIOC pChnl %p CHNL_IsOutput %x " "uChnlType %x\n", pChnl, CHNL_IsOutput(pChnl->uMode), pChnl->uChnlType); /* Check args: */ if (pIOC == NULL) { status = DSP_EPOINTER; } else if (!MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) { status = DSP_EHANDLE; } else if (dwTimeOut == CHNL_IOCNOWAIT) { if (LST_IsEmpty(pChnl->pIOCompletions)) status = CHNL_E_NOIOC; } if (DSP_FAILED(status)) goto func_end; ioc.status = CHNL_IOCSTATCOMPLETE; if (dwTimeOut != CHNL_IOCNOWAIT && LST_IsEmpty(pChnl->pIOCompletions)) { if (dwTimeOut == CHNL_IOCINFINITE) dwTimeOut = SYNC_INFINITE; statSync = SYNC_WaitOnEvent(pChnl->hSyncEvent, dwTimeOut); if (statSync == DSP_ETIMEOUT) { /* No response from DSP */ ioc.status |= CHNL_IOCSTATTIMEOUT; fDequeueIOC = false; } else if (statSync == DSP_EFAIL) { /* This can occur when the user mode thread is * aborted (^C), or when _VWIN32_WaitSingleObject() * fails due to unkown causes. */ /* Even though Wait failed, there may be something in * the Q: */ if (LST_IsEmpty(pChnl->pIOCompletions)) { ioc.status |= CHNL_IOCSTATCANCEL; fDequeueIOC = false; } } } /* See comment in AddIOReq */ SYNC_EnterCS(pChnl->pChnlMgr->hCSObj); disable_irq(MAILBOX_IRQ); if (fDequeueIOC) { /* Dequeue IOC and set pIOC; */ DBC_Assert(!LST_IsEmpty(pChnl->pIOCompletions)); pChirp = (struct CHNL_IRP *)LST_GetHead(pChnl->pIOCompletions); /* Update pIOC from channel state and chirp: */ if (pChirp) { pChnl->cIOCs--; /* If this is a zero-copy channel, then set IOC's pBuf * to the DSP's address. This DSP address will get * translated to user's virtual addr later. */ { pHostSysBuf = pChirp->pHostSysBuf; ioc.pBuf = pChirp->pHostUserBuf; } ioc.cBytes = pChirp->cBytes; ioc.cBufSize = pChirp->cBufSize; ioc.dwArg = pChirp->dwArg; ioc.status |= pChirp->status; /* Place the used chirp on the free list: */ LST_PutTail(pChnl->pFreeList, (struct LST_ELEM *) pChirp); } else { ioc.pBuf = NULL; ioc.cBytes = 0; } } else { ioc.pBuf = NULL; ioc.cBytes = 0; ioc.dwArg = 0; ioc.cBufSize = 0; } /* Ensure invariant: If any IOC's are queued for this channel... */ if (!LST_IsEmpty(pChnl->pIOCompletions)) { /* Since DSPStream_Reclaim() does not take a timeout * parameter, we pass the stream's timeout value to * WMD_CHNL_GetIOC. We cannot determine whether or not * we have waited in User mode. Since the stream's timeout * value may be non-zero, we still have to set the event. * Therefore, this optimization is taken out. * * if (dwTimeOut == CHNL_IOCNOWAIT) { * ... ensure event is set.. * SYNC_SetEvent(pChnl->hSyncEvent); * } */ SYNC_SetEvent(pChnl->hSyncEvent); } else { /* else, if list is empty, ensure event is reset. */ SYNC_ResetEvent(pChnl->hSyncEvent); } enable_irq(MAILBOX_IRQ); SYNC_LeaveCS(pChnl->pChnlMgr->hCSObj); if (fDequeueIOC && (pChnl->uChnlType == CHNL_PCPY && pChnl->uId > 1)) { if (!(ioc.pBuf < (void *) USERMODE_ADDR)) goto func_cont; /* If the addr is in user mode, then copy it */ if (!pHostSysBuf || !ioc.pBuf) { status = DSP_EPOINTER; DBG_Trace(DBG_LEVEL7, "System buffer NULL in IO completion.\n"); goto func_cont; } if (!CHNL_IsInput(pChnl->uMode)) goto func_cont1; /*pHostUserBuf */ status = copy_to_user(ioc.pBuf, pHostSysBuf, ioc.cBytes); #ifndef RES_CLEANUP_DISABLE if (status) { if (current->flags & PF_EXITING) { DBG_Trace(DBG_LEVEL7, "\n2current->flags == PF_EXITING, " " current->flags;0x%x\n", current->flags); status = 0; } else { DBG_Trace(DBG_LEVEL7, "\n2current->flags != PF_EXITING, " " current->flags;0x%x\n", current->flags); } } #endif if (status) { DBG_Trace(DBG_LEVEL7, "Error copying kernel buffer to user, %d" " bytes remaining. in_interupt %d\n", status, in_interrupt()); status = DSP_EPOINTER; } func_cont1: MEM_Free(pHostSysBuf); } func_cont: /* Update User's IOC block: */ *pIOC = ioc; func_end: DBG_Trace(DBG_ENTER, "< WMD_CHNL_GetIOC pChnl %p\n", pChnl); return status; }
/* * ======== WMD_MSG_Get ======== * Get a message from a MSG queue. */ DSP_STATUS WMD_MSG_Get(struct MSG_QUEUE *hMsgQueue, struct DSP_MSG *pMsg, u32 uTimeout) { struct MSG_FRAME *pMsgFrame; struct MSG_MGR *hMsgMgr; bool fGotMsg = false; struct SYNC_OBJECT *hSyncs[2]; u32 uIndex; DSP_STATUS status = DSP_SOK; DBC_Require(MEM_IsValidHandle(hMsgQueue, MSGQ_SIGNATURE)); DBC_Require(pMsg != NULL); hMsgMgr = hMsgQueue->hMsgMgr; if (!hMsgQueue->msgUsedList) { status = DSP_EHANDLE; goto func_end; } /* Enter critical section */ (void)SYNC_EnterCS(hMsgMgr->hSyncCS); /* If a message is already there, get it */ if (!LST_IsEmpty(hMsgQueue->msgUsedList)) { pMsgFrame = (struct MSG_FRAME *)LST_GetHead(hMsgQueue-> msgUsedList); if (pMsgFrame != NULL) { *pMsg = pMsgFrame->msgData.msg; LST_PutTail(hMsgQueue->msgFreeList, (struct LST_ELEM *)pMsgFrame); if (LST_IsEmpty(hMsgQueue->msgUsedList)) SYNC_ResetEvent(hMsgQueue->hSyncEvent); else { NTFY_Notify(hMsgQueue->hNtfy, DSP_NODEMESSAGEREADY); SYNC_SetEvent(hMsgQueue->hSyncEvent); } fGotMsg = true; } } else { if (hMsgQueue->fDone) status = DSP_EFAIL; else hMsgQueue->refCount++; } /* Exit critical section */ (void)SYNC_LeaveCS(hMsgMgr->hSyncCS); if (DSP_SUCCEEDED(status) && !fGotMsg) { /* Wait til message is available, timeout, or done. We don't * have to schedule the DPC, since the DSP will send messages * when they are available. */ hSyncs[0] = hMsgQueue->hSyncEvent; hSyncs[1] = hMsgQueue->hSyncDone; status = SYNC_WaitOnMultipleEvents(hSyncs, 2, uTimeout, &uIndex); /* Enter critical section */ (void)SYNC_EnterCS(hMsgMgr->hSyncCS); if (hMsgQueue->fDone) { hMsgQueue->refCount--; /* Exit critical section */ (void)SYNC_LeaveCS(hMsgMgr->hSyncCS); /* Signal that we're not going to access hMsgQueue * anymore, so it can be deleted. */ (void)SYNC_SetEvent(hMsgQueue->hSyncDoneAck); status = DSP_EFAIL; } else { if (DSP_SUCCEEDED(status)) { DBC_Assert(!LST_IsEmpty(hMsgQueue-> msgUsedList)); /* Get msg from used list */ pMsgFrame = (struct MSG_FRAME *) LST_GetHead(hMsgQueue->msgUsedList); /* Copy message into pMsg and put frame on the * free list */ if (pMsgFrame != NULL) { *pMsg = pMsgFrame->msgData.msg; LST_PutTail(hMsgQueue->msgFreeList, (struct LST_ELEM *)pMsgFrame); } } hMsgQueue->refCount--; /* Reset the event if there are still queued messages */ if (!LST_IsEmpty(hMsgQueue->msgUsedList)) { NTFY_Notify(hMsgQueue->hNtfy, DSP_NODEMESSAGEREADY); SYNC_SetEvent(hMsgQueue->hSyncEvent); } /* Exit critical section */ (void)SYNC_LeaveCS(hMsgMgr->hSyncCS); } } func_end: return status; }
/* * ======== WMD_MSG_Delete ======== * Delete a MSG manager allocated in WMD_MSG_Create(). */ void WMD_MSG_Delete(struct MSG_MGR *hMsgMgr) { DBC_Require(MEM_IsValidHandle(hMsgMgr, MSGMGR_SIGNATURE)); DeleteMsgMgr(hMsgMgr); }
/* * ======== WMD_MSG_CreateQueue ======== * Create a MSG_QUEUE for sending/receiving messages to/from a node * on the DSP. */ DSP_STATUS WMD_MSG_CreateQueue(struct MSG_MGR *hMsgMgr, OUT struct MSG_QUEUE **phMsgQueue, u32 dwId, u32 uMaxMsgs, HANDLE hArg) { u32 i; u32 uNumAllocated = 0; struct MSG_QUEUE *pMsgQ; DSP_STATUS status = DSP_SOK; DBC_Require(MEM_IsValidHandle(hMsgMgr, MSGMGR_SIGNATURE)); DBC_Require(phMsgQueue != NULL); *phMsgQueue = NULL; /* Allocate MSG_QUEUE object */ MEM_AllocObject(pMsgQ, struct MSG_QUEUE, MSGQ_SIGNATURE); if (!pMsgQ) { status = DSP_EMEMORY; goto func_end; } LST_InitElem((struct LST_ELEM *) pMsgQ); pMsgQ->uMaxMsgs = uMaxMsgs; pMsgQ->hMsgMgr = hMsgMgr; pMsgQ->hArg = hArg; /* Node handle */ pMsgQ->dwId = dwId; /* Node env (not valid yet) */ /* Queues of Message frames for messages from the DSP */ pMsgQ->msgFreeList = LST_Create(); pMsgQ->msgUsedList = LST_Create(); if (pMsgQ->msgFreeList == NULL || pMsgQ->msgUsedList == NULL) status = DSP_EMEMORY; /* Create event that will be signalled when a message from * the DSP is available. */ if (DSP_SUCCEEDED(status)) status = SYNC_OpenEvent(&pMsgQ->hSyncEvent, NULL); /* Create a notification list for message ready notification. */ if (DSP_SUCCEEDED(status)) status = NTFY_Create(&pMsgQ->hNtfy); /* Create events that will be used to synchronize cleanup * when the object is deleted. hSyncDone will be set to * unblock threads in MSG_Put() or MSG_Get(). hSyncDoneAck * will be set by the unblocked thread to signal that it * is unblocked and will no longer reference the object. */ if (DSP_SUCCEEDED(status)) status = SYNC_OpenEvent(&pMsgQ->hSyncDone, NULL); if (DSP_SUCCEEDED(status)) status = SYNC_OpenEvent(&pMsgQ->hSyncDoneAck, NULL); if (DSP_SUCCEEDED(status)) { if (!hMsgMgr->msgFreeList) { status = DSP_EHANDLE; goto func_end; } /* Enter critical section */ (void)SYNC_EnterCS(hMsgMgr->hSyncCS); /* Initialize message frames and put in appropriate queues */ for (i = 0; i < uMaxMsgs && DSP_SUCCEEDED(status); i++) { status = AddNewMsg(hMsgMgr->msgFreeList); if (DSP_SUCCEEDED(status)) { uNumAllocated++; status = AddNewMsg(pMsgQ->msgFreeList); } } if (DSP_FAILED(status)) { /* Stay inside CS to prevent others from taking any * of the newly allocated message frames. */ DeleteMsgQueue(pMsgQ, uNumAllocated); } else { LST_PutTail(hMsgMgr->queueList, (struct LST_ELEM *)pMsgQ); *phMsgQueue = pMsgQ; /* Signal that free frames are now available */ if (!LST_IsEmpty(hMsgMgr->msgFreeList)) SYNC_SetEvent(hMsgMgr->hSyncEvent); } /* Exit critical section */ (void)SYNC_LeaveCS(hMsgMgr->hSyncCS); } else { DeleteMsgQueue(pMsgQ, 0); } func_end: return status; }
/* * ======== MGR_EnumProcessorInfo ======== * Enumerate and get configuration information about available * DSP processors. */ DSP_STATUS MGR_EnumProcessorInfo(u32 uProcessor, OUT struct DSP_PROCESSORINFO *pProcessorInfo, u32 uProcessorInfoSize, OUT u32 *puNumProcs) { DSP_STATUS status = DSP_SOK; DSP_STATUS status1 = DSP_SOK; DSP_STATUS status2 = DSP_SOK; struct DSP_UUID uTempUuid; u32 uTempIndex = 0; u32 uProcIndex = 0; struct DCD_GENERICOBJ GenObj; struct MGR_OBJECT *pMgrObject = NULL; struct MGR_PROCESSOREXTINFO *pExtInfo; struct DEV_OBJECT *hDevObject; struct DRV_OBJECT *hDrvObject; s32 devType; struct CFG_DEVNODE *devNode; struct CFG_DSPRES chipResources; bool procDetect = false; DBC_Require(pProcessorInfo != NULL); DBC_Require(puNumProcs != NULL); DBC_Require(uProcessorInfoSize >= sizeof(struct DSP_PROCESSORINFO)); DBC_Require(cRefs > 0); GT_4trace(MGR_DebugMask, GT_ENTER, "Entered Manager_EnumProcessorInfo, " "args:\n\tuProcessor: 0x%x\n\tpProcessorInfo: 0x%x\n\t" "uProcessorInfoSize: 0x%x\tpuNumProcs: 0x%x\n", uProcessor, pProcessorInfo, uProcessorInfoSize, puNumProcs); *puNumProcs = 0; status = CFG_GetObject((u32 *)&hDrvObject, REG_DRV_OBJECT); if (DSP_SUCCEEDED(status)) { status = DRV_GetDevObject(uProcessor, hDrvObject, &hDevObject); if (DSP_SUCCEEDED(status)) { status = DEV_GetDevType(hDevObject, (u32 *) &devType); status = DEV_GetDevNode(hDevObject, &devNode); if (devType == DSP_UNIT) { status = CFG_GetDSPResources(devNode, &chipResources); } else { status = DSP_EFAIL; GT_1trace(MGR_DebugMask, GT_7CLASS, "Unsupported dev type gotten" "from device object %d\n", devType); } if (DSP_SUCCEEDED(status)) { pProcessorInfo->uProcessorType = chipResources.uChipType; } } } if (DSP_FAILED(status)) goto func_end; /* Get The Manager Object from the Registry */ if (DSP_FAILED(CFG_GetObject((u32 *)&pMgrObject, REG_MGR_OBJECT))) { GT_0trace(MGR_DebugMask, GT_7CLASS, "Manager_EnumProcessorInfo: " "Failed To Get MGR Object from Registry\r\n"); goto func_end; } DBC_Assert(MEM_IsValidHandle(pMgrObject, SIGNATURE)); /* Forever loop till we hit no more items in the * Enumeration. We will exit the loop other than DSP_SOK; */ while (status1 == DSP_SOK) { status1 = DCD_EnumerateObject(uTempIndex++, DSP_DCDPROCESSORTYPE, &uTempUuid); if (status1 != DSP_SOK) break; uProcIndex++; /* Get the Object properties to find the Device/Processor * Type */ if (procDetect != false) continue; status2 = DCD_GetObjectDef(pMgrObject->hDcdMgr, (struct DSP_UUID *)&uTempUuid, DSP_DCDPROCESSORTYPE, &GenObj); if (DSP_SUCCEEDED(status2)) { /* Get the Obj def */ if (uProcessorInfoSize < sizeof(struct MGR_PROCESSOREXTINFO)) { *pProcessorInfo = GenObj.objData.procObj; } else { /* extended info */ pExtInfo = (struct MGR_PROCESSOREXTINFO *) pProcessorInfo; *pExtInfo = GenObj.objData.extProcObj; } GT_1trace(MGR_DebugMask, GT_7CLASS, "Manager_EnumProcessorInfo: Got" " Proctype from DCD %x \r\n", pProcessorInfo->uProcessorType); /* See if we got the needed processor */ if (devType == DSP_UNIT) { if (pProcessorInfo->uProcessorType == DSPPROCTYPE_C64) procDetect = true; } else if (devType == IVA_UNIT) { if (pProcessorInfo->uProcessorType == IVAPROCTYPE_ARM7) procDetect = true; } /* User applciatiuons aonly check for chip type, so * this clumsy overwrite */ pProcessorInfo->uProcessorType = chipResources.uChipType; } else { GT_1trace(MGR_DebugMask, GT_7CLASS, "Manager_EnumProcessorInfo: " "Failed to Get DCD Processor Info %x \r\n", status2); status = DSP_EFAIL; } } *puNumProcs = uProcIndex; if (procDetect == false) { GT_0trace(MGR_DebugMask, GT_7CLASS, "Manager_EnumProcessorInfo: Failed" " to get Proc info from DCD , so use CFG registry\n"); pProcessorInfo->uProcessorType = chipResources.uChipType; } func_end: return status; }
/* * ======== DBLL_open ======== */ DSP_STATUS DBLL_open(struct DBLL_TarObj *target, char *file, DBLL_Flags flags, struct DBLL_LibraryObj **pLib) { struct DBLL_TarObj *zlTarget = (struct DBLL_TarObj *)target; struct DBLL_LibraryObj *zlLib = NULL; s32 err; DSP_STATUS status = DSP_SOK; DBC_Require(cRefs > 0); DBC_Require(MEM_IsValidHandle(zlTarget, DBLL_TARGSIGNATURE)); DBC_Require(zlTarget->attrs.fopen != NULL); DBC_Require(file != NULL); DBC_Require(pLib != NULL); GT_3trace(DBLL_debugMask, GT_ENTER, "DBLL_open: target: 0x%x file: %s pLib:" " 0x%x\n", target, file, pLib); zlLib = zlTarget->head; while (zlLib != NULL) { if (strcmp(zlLib->fileName, file) == 0) { /* Library is already opened */ zlLib->openRef++; break; } zlLib = zlLib->next; } if (zlLib == NULL) { /* Allocate DBL library object */ MEM_AllocObject(zlLib, struct DBLL_LibraryObj, DBLL_LIBSIGNATURE); if (zlLib == NULL) { GT_0trace(DBLL_debugMask, GT_6CLASS, "DBLL_open: Memory allocation failed\n"); status = DSP_EMEMORY; } else { zlLib->ulPos = 0; /* Increment ref count to allow close on failure * later on */ zlLib->openRef++; zlLib->pTarget = zlTarget; /* Keep a copy of the file name */ zlLib->fileName = MEM_Calloc(strlen(file) + 1, MEM_PAGED); if (zlLib->fileName == NULL) { GT_0trace(DBLL_debugMask, GT_6CLASS, "DBLL_open: Memory " "allocation failed\n"); status = DSP_EMEMORY; } else { strncpy(zlLib->fileName, file, strlen(file) + 1); } zlLib->symTab = NULL; } } /* * Set up objects needed by the dynamic loader */ if (DSP_FAILED(status)) goto func_cont; /* Stream */ zlLib->stream.dlStream.read_buffer = readBuffer; zlLib->stream.dlStream.set_file_posn = setFilePosn; zlLib->stream.lib = zlLib; /* Symbol */ zlLib->symbol.dlSymbol.Add_To_Symbol_Table = addToSymbolTable; zlLib->symbol.dlSymbol.Find_Matching_Symbol = findSymbol; zlLib->symbol.dlSymbol.Purge_Symbol_Table = purgeSymbolTable; zlLib->symbol.dlSymbol.Allocate = allocate; zlLib->symbol.dlSymbol.Deallocate = deallocate; zlLib->symbol.dlSymbol.Error_Report = errorReport; zlLib->symbol.lib = zlLib; /* Allocate */ zlLib->allocate.dlAlloc.Allocate = rmmAlloc; zlLib->allocate.dlAlloc.Deallocate = rmmDealloc; zlLib->allocate.lib = zlLib; /* Init */ zlLib->init.dlInit.connect = connect; zlLib->init.dlInit.readmem = readMem; zlLib->init.dlInit.writemem = writeMem; zlLib->init.dlInit.fillmem = fillMem; zlLib->init.dlInit.execute = execute; zlLib->init.dlInit.release = release; zlLib->init.lib = zlLib; if (DSP_SUCCEEDED(status) && zlLib->fp == NULL) status = dofOpen(zlLib); zlLib->ulPos = (*(zlLib->pTarget->attrs.ftell)) (zlLib->fp); (*(zlLib->pTarget->attrs.fseek))(zlLib->fp, (long) 0, SEEK_SET); /* Create a hash table for symbols if flag is set */ if (zlLib->symTab != NULL || !(flags & DBLL_SYMB)) goto func_cont; zlLib->symTab = GH_create(MAXBUCKETS, sizeof(struct Symbol), nameHash, nameMatch, symDelete); if (zlLib->symTab == NULL) { status = DSP_EMEMORY; } else { /* Do a fake load to get symbols - set write function to NoOp */ zlLib->init.dlInit.writemem = NoOp; err = Dynamic_Open_Module(&zlLib->stream.dlStream, &zlLib->symbol.dlSymbol, &zlLib->allocate.dlAlloc, &zlLib->init.dlInit, 0, &zlLib->mHandle); if (err != 0) { GT_1trace(DBLL_debugMask, GT_6CLASS, "DBLL_open: " "Dynamic_Load_Module failed: 0x%lx\n", err); status = DSP_EDYNLOAD; } else { /* Now that we have the symbol table, we can unload */ err = Dynamic_Unload_Module(zlLib->mHandle, &zlLib->symbol.dlSymbol, &zlLib->allocate.dlAlloc, &zlLib->init.dlInit); if (err != 0) { GT_1trace(DBLL_debugMask, GT_6CLASS, "DBLL_open: " "Dynamic_Unload_Module failed: 0x%lx\n", err); status = DSP_EDYNLOAD; } zlLib->mHandle = NULL; } } func_cont: if (DSP_SUCCEEDED(status)) { if (zlLib->openRef == 1) { /* First time opened - insert in list */ if (zlTarget->head) (zlTarget->head)->prev = zlLib; zlLib->prev = NULL; zlLib->next = zlTarget->head; zlTarget->head = zlLib; } *pLib = (struct DBLL_LibraryObj *)zlLib; } else { *pLib = NULL; if (zlLib != NULL) DBLL_close((struct DBLL_LibraryObj *)zlLib); } DBC_Ensure((DSP_SUCCEEDED(status) && (zlLib->openRef > 0) && MEM_IsValidHandle(((struct DBLL_LibraryObj *)(*pLib)), DBLL_LIBSIGNATURE)) || (DSP_FAILED(status) && *pLib == NULL)); return status; }
/* * ======== WMD_CHNL_GetIOC ======== * Optionally wait for I/O completion on a channel. Dequeue an I/O * completion record, which contains information about the completed * I/O request. * Note: Ensures Channel Invariant (see notes above). */ DSP_STATUS WMD_CHNL_GetIOC(struct CHNL_OBJECT *hChnl, u32 dwTimeOut, OUT struct CHNL_IOC *pIOC) { DSP_STATUS status = DSP_SOK; struct CHNL_OBJECT *pChnl = (struct CHNL_OBJECT *)hChnl; struct CHNL_IRP *pChirp; DSP_STATUS statSync; bool fDequeueIOC = true; struct CHNL_IOC ioc = { NULL, 0, 0, 0, 0 }; u8 *pHostSysBuf = NULL; struct WMD_DEV_CONTEXT *dev_ctxt; struct DEV_OBJECT *dev_obj; /* Check args: */ if (pIOC == NULL) { status = DSP_EPOINTER; } else if (!MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) { status = DSP_EHANDLE; } else if (dwTimeOut == CHNL_IOCNOWAIT) { if (LST_IsEmpty(pChnl->pIOCompletions)) status = CHNL_E_NOIOC; } dev_obj = DEV_GetFirst(); DEV_GetWMDContext(dev_obj, &dev_ctxt); if (!dev_ctxt) status = DSP_EHANDLE; if (DSP_FAILED(status)) goto func_end; ioc.status = CHNL_IOCSTATCOMPLETE; if (dwTimeOut != CHNL_IOCNOWAIT && LST_IsEmpty(pChnl->pIOCompletions)) { if (dwTimeOut == CHNL_IOCINFINITE) dwTimeOut = SYNC_INFINITE; statSync = SYNC_WaitOnEvent(pChnl->hSyncEvent, dwTimeOut); if (statSync == DSP_ETIMEOUT) { /* No response from DSP */ ioc.status |= CHNL_IOCSTATTIMEOUT; fDequeueIOC = false; } else if (statSync == DSP_EFAIL) { /* This can occur when the user mode thread is * aborted (^C), or when _VWIN32_WaitSingleObject() * fails due to unkown causes. */ /* Even though Wait failed, there may be something in * the Q: */ if (LST_IsEmpty(pChnl->pIOCompletions)) { ioc.status |= CHNL_IOCSTATCANCEL; fDequeueIOC = false; } } } /* See comment in AddIOReq */ SYNC_EnterCS(pChnl->pChnlMgr->hCSObj); omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX); if (fDequeueIOC) { /* Dequeue IOC and set pIOC; */ DBC_Assert(!LST_IsEmpty(pChnl->pIOCompletions)); pChirp = (struct CHNL_IRP *)LST_GetHead(pChnl->pIOCompletions); /* Update pIOC from channel state and chirp: */ if (pChirp) { pChnl->cIOCs--; /* If this is a zero-copy channel, then set IOC's pBuf * to the DSP's address. This DSP address will get * translated to user's virtual addr later. */ { pHostSysBuf = pChirp->pHostSysBuf; ioc.pBuf = pChirp->pHostUserBuf; } ioc.cBytes = pChirp->cBytes; ioc.cBufSize = pChirp->cBufSize; ioc.dwArg = pChirp->dwArg; ioc.status |= pChirp->status; /* Place the used chirp on the free list: */ LST_PutTail(pChnl->pFreeList, (struct list_head *)pChirp); } else { ioc.pBuf = NULL; ioc.cBytes = 0; } } else { ioc.pBuf = NULL; ioc.cBytes = 0; ioc.dwArg = 0; ioc.cBufSize = 0; } /* Ensure invariant: If any IOC's are queued for this channel... */ if (!LST_IsEmpty(pChnl->pIOCompletions)) { /* Since DSPStream_Reclaim() does not take a timeout * parameter, we pass the stream's timeout value to * WMD_CHNL_GetIOC. We cannot determine whether or not * we have waited in User mode. Since the stream's timeout * value may be non-zero, we still have to set the event. * Therefore, this optimization is taken out. * * if (dwTimeOut == CHNL_IOCNOWAIT) { * ... ensure event is set.. * SYNC_SetEvent(pChnl->hSyncEvent); * } */ SYNC_SetEvent(pChnl->hSyncEvent); } else { /* else, if list is empty, ensure event is reset. */ SYNC_ResetEvent(pChnl->hSyncEvent); } omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX); SYNC_LeaveCS(pChnl->pChnlMgr->hCSObj); if (fDequeueIOC && (pChnl->uChnlType == CHNL_PCPY && pChnl->uId > 1)) { if (!(ioc.pBuf < (void *) USERMODE_ADDR)) goto func_cont; /* If the addr is in user mode, then copy it */ if (!pHostSysBuf || !ioc.pBuf) { status = DSP_EPOINTER; goto func_cont; } if (!CHNL_IsInput(pChnl->uMode)) goto func_cont1; /*pHostUserBuf */ status = copy_to_user(ioc.pBuf, pHostSysBuf, ioc.cBytes); if (status) { if (current->flags & PF_EXITING) status = 0; } if (status) status = DSP_EPOINTER; func_cont1: kfree(pHostSysBuf); } func_cont: /* Update User's IOC block: */ *pIOC = ioc; func_end: return status; }
/* * ======== DBLL_readSect ======== * Get the content of a COFF section. */ DSP_STATUS DBLL_readSect(struct DBLL_LibraryObj *lib, char *name, char *pContent, u32 size) { struct DBLL_LibraryObj *zlLib = (struct DBLL_LibraryObj *)lib; bool fOpenedDoff = false; u32 uByteSize; /* size of bytes */ u32 ulSectSize; /* size of section */ const struct LDR_SECTION_INFO *sect = NULL; DSP_STATUS status = DSP_SOK; DBC_Require(cRefs > 0); DBC_Require(MEM_IsValidHandle(zlLib, DBLL_LIBSIGNATURE)); DBC_Require(name != NULL); DBC_Require(pContent != NULL); DBC_Require(size != 0); GT_4trace(DBLL_debugMask, GT_ENTER, "DBLL_readSect: lib: 0x%x name: %s " "pContent: 0x%x size: 0x%x\n", lib, name, pContent, size); /* If DOFF file is not open, we open it. */ if (zlLib != NULL) { if (zlLib->fp == NULL) { status = dofOpen(zlLib); if (DSP_SUCCEEDED(status)) fOpenedDoff = true; } else { (*(zlLib->pTarget->attrs.fseek))(zlLib->fp, zlLib->ulPos, SEEK_SET); } } else { status = DSP_EHANDLE; } if (DSP_FAILED(status)) goto func_cont; uByteSize = 1; if (!DLOAD_GetSectionInfo(zlLib->desc, name, §)) { status = DSP_ENOSECT; goto func_cont; } /* * Ensure the supplied buffer size is sufficient to store * the section content to be read. */ ulSectSize = sect->size * uByteSize; /* Make sure size is even for good swap */ if (ulSectSize % 2) ulSectSize++; /* Align size */ ulSectSize = DOFF_ALIGN(ulSectSize); if (ulSectSize > size) { status = DSP_EFAIL; } else { if (!DLOAD_GetSection(zlLib->desc, sect, pContent)) status = DSP_EFREAD; } func_cont: if (fOpenedDoff) { dofClose(zlLib); fOpenedDoff = false; } return status; }
/* * ======== WMD_CHNL_AddIOReq ======== * Enqueue an I/O request for data transfer on a channel to the DSP. * The direction (mode) is specified in the channel object. Note the DSP * address is specified for channels opened in direct I/O mode. */ DSP_STATUS WMD_CHNL_AddIOReq(struct CHNL_OBJECT *hChnl, void *pHostBuf, u32 cBytes, u32 cBufSize, OPTIONAL u32 dwDspAddr, u32 dwArg) { DSP_STATUS status = DSP_SOK; struct CHNL_OBJECT *pChnl = (struct CHNL_OBJECT *)hChnl; struct CHNL_IRP *pChirp = NULL; struct WMD_DEV_CONTEXT *dev_ctxt; struct DEV_OBJECT *dev_obj; u32 dwState; bool fIsEOS; struct CHNL_MGR *pChnlMgr = pChnl->pChnlMgr; u8 *pHostSysBuf = NULL; bool fSchedDPC = false; u16 wMbVal = 0; fIsEOS = (cBytes == 0); /* Validate args: */ if (pHostBuf == NULL) { status = DSP_EPOINTER; } else if (!MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) { status = DSP_EHANDLE; } else if (fIsEOS && CHNL_IsInput(pChnl->uMode)) { status = CHNL_E_NOEOS; } else { /* Check the channel state: only queue chirp if channel state * allows */ dwState = pChnl->dwState; if (dwState != CHNL_STATEREADY) { if (dwState & CHNL_STATECANCEL) status = CHNL_E_CANCELLED; else if ((dwState & CHNL_STATEEOS) && CHNL_IsOutput(pChnl->uMode)) status = CHNL_E_EOS; else /* No other possible states left: */ DBC_Assert(0); } } dev_obj = DEV_GetFirst(); DEV_GetWMDContext(dev_obj, &dev_ctxt); if (!dev_ctxt) status = DSP_EHANDLE; if (DSP_FAILED(status)) goto func_end; if (pChnl->uChnlType == CHNL_PCPY && pChnl->uId > 1 && pHostBuf) { if (!(pHostBuf < (void *)USERMODE_ADDR)) { pHostSysBuf = pHostBuf; goto func_cont; } /* if addr in user mode, then copy to kernel space */ pHostSysBuf = MEM_Alloc(cBufSize, MEM_NONPAGED); if (pHostSysBuf == NULL) { status = DSP_EMEMORY; goto func_end; } if (CHNL_IsOutput(pChnl->uMode)) { status = copy_from_user(pHostSysBuf, pHostBuf, cBufSize); if (status) { kfree(pHostSysBuf); pHostSysBuf = NULL; status = DSP_EPOINTER; goto func_end; } } } func_cont: /* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY * channels. DPCCS is held to avoid race conditions with PCPY channels. * If DPC is scheduled in process context (IO_Schedule) and any * non-mailbox interrupt occurs, that DPC will run and break CS. Hence * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */ SYNC_EnterCS(pChnlMgr->hCSObj); omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX); if (pChnl->uChnlType == CHNL_PCPY) { /* This is a processor-copy channel. */ if (DSP_SUCCEEDED(status) && CHNL_IsOutput(pChnl->uMode)) { /* Check buffer size on output channels for fit. */ if (cBytes > IO_BufSize(pChnl->pChnlMgr->hIOMgr)) status = CHNL_E_BUFSIZE; } } if (DSP_SUCCEEDED(status)) { /* Get a free chirp: */ pChirp = (struct CHNL_IRP *)LST_GetHead(pChnl->pFreeList); if (pChirp == NULL) status = CHNL_E_NOIORPS; } if (DSP_SUCCEEDED(status)) { /* Enqueue the chirp on the chnl's IORequest queue: */ pChirp->pHostUserBuf = pChirp->pHostSysBuf = pHostBuf; if (pChnl->uChnlType == CHNL_PCPY && pChnl->uId > 1) pChirp->pHostSysBuf = pHostSysBuf; /* * Note: for dma chans dwDspAddr contains dsp address * of SM buffer. */ DBC_Assert(pChnlMgr->uWordSize != 0); /* DSP address */ pChirp->uDspAddr = dwDspAddr / pChnlMgr->uWordSize; pChirp->cBytes = cBytes; pChirp->cBufSize = cBufSize; /* Only valid for output channel */ pChirp->dwArg = dwArg; pChirp->status = (fIsEOS ? CHNL_IOCSTATEOS : CHNL_IOCSTATCOMPLETE); LST_PutTail(pChnl->pIORequests, (struct list_head *)pChirp); pChnl->cIOReqs++; DBC_Assert(pChnl->cIOReqs <= pChnl->cChirps); /* If end of stream, update the channel state to prevent * more IOR's: */ if (fIsEOS) pChnl->dwState |= CHNL_STATEEOS; /* Legacy DSM Processor-Copy */ DBC_Assert(pChnl->uChnlType == CHNL_PCPY); /* Request IO from the DSP */ IO_RequestChnl(pChnlMgr->hIOMgr, pChnl, (CHNL_IsInput(pChnl->uMode) ? IO_INPUT : IO_OUTPUT), &wMbVal); fSchedDPC = true; } omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX); SYNC_LeaveCS(pChnlMgr->hCSObj); if (wMbVal != 0) IO_IntrDSP2(pChnlMgr->hIOMgr, wMbVal); /* Schedule a DPC, to do the actual data transfer: */ if (fSchedDPC) IO_Schedule(pChnlMgr->hIOMgr); func_end: return status; }
/* * ======== STRM_Open ======== * Purpose: * Open a stream for sending/receiving data buffers to/from a task or * XDAIS socket node on the DSP. */ DSP_STATUS STRM_Open(struct NODE_OBJECT *hNode, u32 uDir, u32 uIndex, IN struct STRM_ATTR *pAttr, OUT struct STRM_OBJECT **phStrm, struct PROCESS_CONTEXT *pr_ctxt) { struct STRM_MGR *hStrmMgr; struct WMD_DRV_INTERFACE *pIntfFxns; u32 ulChnlId; struct STRM_OBJECT *pStrm = NULL; short int uMode; struct CHNL_ATTRS chnlAttrs; DSP_STATUS status = DSP_SOK; struct CMM_OBJECT *hCmmMgr = NULL; /* Shared memory manager hndl */ HANDLE hSTRMRes; DBC_Require(cRefs > 0); DBC_Require(phStrm != NULL); DBC_Require(pAttr != NULL); GT_5trace(STRM_debugMask, GT_ENTER, "STRM_Open: hNode: 0x%x\tuDir: 0x%x\t" "uIndex: 0x%x\tpAttr: 0x%x\tphStrm: 0x%x\n", hNode, uDir, uIndex, pAttr, phStrm); *phStrm = NULL; if (uDir != DSP_TONODE && uDir != DSP_FROMNODE) { status = DSP_EDIRECTION; } else { /* Get the channel id from the node (set in NODE_Connect()) */ status = NODE_GetChannelId(hNode, uDir, uIndex, &ulChnlId); } if (DSP_SUCCEEDED(status)) status = NODE_GetStrmMgr(hNode, &hStrmMgr); if (DSP_SUCCEEDED(status)) { MEM_AllocObject(pStrm, struct STRM_OBJECT, STRM_SIGNATURE); if (pStrm == NULL) { status = DSP_EMEMORY; } else { pStrm->hStrmMgr = hStrmMgr; pStrm->uDir = uDir; pStrm->strmState = STREAM_IDLE; pStrm->hUserEvent = pAttr->hUserEvent; if (pAttr->pStreamAttrIn != NULL) { pStrm->uTimeout = pAttr->pStreamAttrIn-> uTimeout; pStrm->uNumBufs = pAttr->pStreamAttrIn-> uNumBufs; pStrm->lMode = pAttr->pStreamAttrIn->lMode; pStrm->uSegment = pAttr->pStreamAttrIn-> uSegment; pStrm->uAlignment = pAttr->pStreamAttrIn-> uAlignment; pStrm->uDMAChnlId = pAttr->pStreamAttrIn-> uDMAChnlId; pStrm->uDMAPriority = pAttr->pStreamAttrIn-> uDMAPriority; chnlAttrs.uIOReqs = pAttr->pStreamAttrIn-> uNumBufs; } else { pStrm->uTimeout = DEFAULTTIMEOUT; pStrm->uNumBufs = DEFAULTNUMBUFS; pStrm->lMode = STRMMODE_PROCCOPY; pStrm->uSegment = 0; /* local memory */ pStrm->uAlignment = 0; pStrm->uDMAChnlId = 0; pStrm->uDMAPriority = 0; chnlAttrs.uIOReqs = DEFAULTNUMBUFS; } chnlAttrs.hReserved1 = NULL; /* DMA chnl flush timeout */ chnlAttrs.hReserved2 = pStrm->uTimeout; chnlAttrs.hEvent = NULL; if (pAttr->hUserEvent != NULL) chnlAttrs.hEvent = pAttr->hUserEvent; } } if (DSP_FAILED(status)) goto func_cont; if ((pAttr->pVirtBase == NULL) || !(pAttr->ulVirtSize > 0)) goto func_cont; DBC_Assert(pStrm->lMode != STRMMODE_LDMA); /* no System DMA */ /* Get the shared mem mgr for this streams dev object */ status = DEV_GetCmmMgr(hStrmMgr->hDev, &hCmmMgr); if (DSP_SUCCEEDED(status)) { /*Allocate a SM addr translator for this strm.*/ status = CMM_XlatorCreate(&pStrm->hXlator, hCmmMgr, NULL); if (DSP_SUCCEEDED(status)) { DBC_Assert(pStrm->uSegment > 0); /* Set translators Virt Addr attributes */ status = CMM_XlatorInfo(pStrm->hXlator, (u8 **)&pAttr->pVirtBase, pAttr->ulVirtSize, pStrm->uSegment, true); } } func_cont: if (DSP_SUCCEEDED(status)) { /* Open channel */ uMode = (uDir == DSP_TONODE) ? CHNL_MODETODSP : CHNL_MODEFROMDSP; pIntfFxns = hStrmMgr->pIntfFxns; status = (*pIntfFxns->pfnChnlOpen) (&(pStrm->hChnl), hStrmMgr->hChnlMgr, uMode, ulChnlId, &chnlAttrs); if (DSP_FAILED(status)) { /* * over-ride non-returnable status codes so we return * something documented */ if (status != DSP_EMEMORY && status != DSP_EINVALIDARG && status != DSP_EFAIL) { /* * We got a status that's not return-able. * Assert that we got something we were * expecting (DSP_EHANDLE isn't acceptable, * hStrmMgr->hChnlMgr better be valid or we * assert here), and then return DSP_EFAIL. */ DBC_Assert(status == CHNL_E_OUTOFSTREAMS || status == CHNL_E_BADCHANID || status == CHNL_E_CHANBUSY || status == CHNL_E_NOIORPS); status = DSP_EFAIL; } } } if (DSP_SUCCEEDED(status)) { *phStrm = pStrm; DRV_ProcInsertSTRMResElement(*phStrm, &hSTRMRes, pr_ctxt); } else { (void)DeleteStrm(pStrm); } /* ensure we return a documented error code */ DBC_Ensure((DSP_SUCCEEDED(status) && MEM_IsValidHandle((*phStrm), STRM_SIGNATURE)) || (*phStrm == NULL && (status == DSP_EHANDLE || status == DSP_EDIRECTION || status == DSP_EVALUE || status == DSP_EFAIL))); return status; }
/* * ======== STRM_AllocateBuffer ======== * Purpose: * Allocates buffers for a stream. */ DSP_STATUS STRM_AllocateBuffer(struct STRM_OBJECT *hStrm, u32 uSize, OUT u8 **apBuffer, u32 uNumBufs) { DSP_STATUS status = DSP_SOK; u32 uAllocated = 0; u32 i; #ifndef RES_CLEANUP_DISABLE DSP_STATUS res_status = DSP_SOK; u32 hProcess; HANDLE pCtxt = NULL; HANDLE hDrvObject; HANDLE hSTRMRes; #endif DBC_Require(cRefs > 0); DBC_Require(apBuffer != NULL); GT_4trace(STRM_debugMask, GT_ENTER, "STRM_AllocateBuffer: hStrm: 0x%x\t" "uSize: 0x%x\tapBuffer: 0x%x\tuNumBufs: 0x%x\n", hStrm, uSize, apBuffer, uNumBufs); if (MEM_IsValidHandle(hStrm, STRM_SIGNATURE)) { /* * Allocate from segment specified at time of stream open. */ if (uSize == 0) status = DSP_ESIZE; } if (DSP_FAILED(status)) { status = DSP_EHANDLE; goto func_end; } for (i = 0; i < uNumBufs; i++) { DBC_Assert(hStrm->hXlator != NULL); (void)CMM_XlatorAllocBuf(hStrm->hXlator, &apBuffer[i], uSize); if (apBuffer[i] == NULL) { GT_0trace(STRM_debugMask, GT_7CLASS, "STRM_AllocateBuffer: " "DSP_FAILED to alloc shared memory.\n"); status = DSP_EMEMORY; uAllocated = i; break; } } if (DSP_FAILED(status)) STRM_FreeBuffer(hStrm, apBuffer, uAllocated); #ifndef RES_CLEANUP_DISABLE if (DSP_FAILED(status)) goto func_end; /* Return PID instead of process handle */ hProcess = current->pid; res_status = CFG_GetObject((u32 *)&hDrvObject, REG_DRV_OBJECT); if (DSP_FAILED(res_status)) goto func_end; DRV_GetProcContext(hProcess, (struct DRV_OBJECT *)hDrvObject, &pCtxt, NULL, 0); if (pCtxt != NULL) { if (DRV_GetSTRMResElement(hStrm, &hSTRMRes, pCtxt) != DSP_ENOTFOUND) { DRV_ProcUpdateSTRMRes(uNumBufs, hSTRMRes, pCtxt); } } #endif func_end: return status; }
/* * ======== NTFY_Register ======== * Purpose: * Add a notification element to the list. If the notification is already * registered, and uEventMask != 0, the notification will get posted for * events specified in the new event mask. If the notification is already * registered and uEventMask == 0, the notification will be unregistered. */ DSP_STATUS NTFY_Register(struct NTFY_OBJECT *hNtfy, struct DSP_NOTIFICATION *hNotification, u32 uEventMask, u32 uNotifyType) { struct NOTIFICATION *pNotify; struct SYNC_ATTRS syncAttrs; DSP_STATUS status = DSP_SOK; DBC_Require(MEM_IsValidHandle(hNtfy, NTFY_SIGNATURE)); if (hNotification == NULL) status = DSP_EHANDLE; /* Return DSP_ENOTIMPL if uNotifyType is not supported */ if (DSP_SUCCEEDED(status)) { if (!IsValidNotifyMask(uNotifyType)) status = DSP_ENOTIMPL; } if (DSP_FAILED(status)) return status; (void)SYNC_EnterCS(hNtfy->hSync); pNotify = (struct NOTIFICATION *)LST_First(hNtfy->notifyList); while (pNotify != NULL) { /* If there is more than one notification type, each * type may require its own handler code. */ if (hNotification->handle == pNotify->hSync) { /* found */ break; } pNotify = (struct NOTIFICATION *)LST_Next(hNtfy->notifyList, (struct list_head *)pNotify); } if (pNotify == NULL) { /* Not registered */ if (uEventMask == 0) { status = DSP_EVALUE; } else { /* Allocate NOTIFICATION object, add to list */ pNotify = MEM_Calloc(sizeof(struct NOTIFICATION), MEM_PAGED); if (pNotify == NULL) status = DSP_EMEMORY; } if (DSP_SUCCEEDED(status)) { LST_InitElem((struct list_head *)pNotify); /* If there is more than one notification type, each * type may require its own handler code. */ status = SYNC_OpenEvent(&pNotify->hSync, &syncAttrs); hNotification->handle = pNotify->hSync; if (DSP_SUCCEEDED(status)) { pNotify->uEventMask = uEventMask; pNotify->uNotifyType = uNotifyType; LST_PutTail(hNtfy->notifyList, (struct list_head *)pNotify); } else { DeleteNotify(pNotify); } } } else { /* Found in list */ if (uEventMask == 0) { /* Remove from list and free */ LST_RemoveElem(hNtfy->notifyList, (struct list_head *)pNotify); DeleteNotify(pNotify); } else { /* Update notification mask (type shouldn't change) */ pNotify->uEventMask = uEventMask; } } (void)SYNC_LeaveCS(hNtfy->hSync); return status; }