/* * ======== dmm_reserve_memory ======== * Purpose: * Reserve a chunk of virtually contiguous DSP/IVA address space. */ int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size, u32 *prsv_addr) { int status = 0; struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; struct map_page *node; u32 rsv_addr = 0; u32 rsv_size = 0; spin_lock(&dmm_obj->dmm_lock); /* Try to get a DSP chunk from the free list */ node = get_free_region(size); if (node != NULL) { /* DSP chunk of given size is available. */ rsv_addr = DMM_ADDR_VIRTUAL(node); /* Calculate the number entries to use */ rsv_size = size / PG_SIZE4K; if (rsv_size < node->region_size) { /* Mark remainder of free region */ node[rsv_size].mapped = false; node[rsv_size].reserved = false; node[rsv_size].region_size = node->region_size - rsv_size; node[rsv_size].mapped_size = 0; } /* get_region will return first fit chunk. But we only use what is requested. */ node->mapped = false; node->reserved = true; node->region_size = rsv_size; node->mapped_size = 0; /* Return the chunk's starting address */ *prsv_addr = rsv_addr; } else /*dSP chunk of given size is not available */ status = -ENOMEM; spin_unlock(&dmm_obj->dmm_lock); dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, " "rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size, prsv_addr, status, rsv_addr, rsv_size); return status; }
/* * ======== DMM_ReserveMemory ======== * Purpose: * Reserve a chunk of virtually contiguous DSP/IVA address space. */ DSP_STATUS DMM_ReserveMemory(struct DMM_OBJECT *hDmmMgr, u32 size, u32 *pRsvAddr) { DSP_STATUS status = DSP_SOK; struct DMM_OBJECT *pDmmObj = (struct DMM_OBJECT *)hDmmMgr; struct MapPage *node; u32 rsvAddr = 0; u32 rsvSize = 0; GT_3trace(DMM_debugMask, GT_ENTER, "Entered DMM_ReserveMemory () hDmmMgr %x, " "size %x, pRsvAddr %x\n", hDmmMgr, size, pRsvAddr); SYNC_EnterCS(pDmmObj->hDmmLock); /* Try to get a DSP chunk from the free list */ node = GetFreeRegion(size); if (node != NULL) { /* DSP chunk of given size is available. */ rsvAddr = DMM_ADDR_VIRTUAL(node); /* Calculate the number entries to use */ rsvSize = size/PG_SIZE_4K; if (rsvSize < node->RegionSize) { /* Mark remainder of free region */ node[rsvSize].bMapped = false; node[rsvSize].bReserved = false; node[rsvSize].RegionSize = node->RegionSize - rsvSize; node[rsvSize].MappedSize = 0; } /* GetRegion will return first fit chunk. But we only use what is requested. */ node->bMapped = false; node->bReserved = true; node->RegionSize = rsvSize; node->MappedSize = 0; /* Return the chunk's starting address */ *pRsvAddr = rsvAddr; } else /*dSP chunk of given size is not available */ status = DSP_EMEMORY; SYNC_LeaveCS(pDmmObj->hDmmLock); GT_3trace(DMM_debugMask, GT_4CLASS, "Leaving ReserveMemory status %x, rsvAddr" " %x, rsvSize %x\n", status, rsvAddr, rsvSize); return status; }
int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size, u32 *prsv_addr) { int status = 0; struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; struct map_page *node; u32 rsv_addr = 0; u32 rsv_size = 0; spin_lock(&dmm_obj->dmm_lock); /* */ node = get_free_region(size); if (node != NULL) { /* */ rsv_addr = DMM_ADDR_VIRTUAL(node); /* */ rsv_size = size / PG_SIZE4K; if (rsv_size < node->region_size) { /* */ node[rsv_size].mapped = false; node[rsv_size].reserved = false; node[rsv_size].region_size = node->region_size - rsv_size; node[rsv_size].mapped_size = 0; } /* */ node->mapped = false; node->reserved = true; node->region_size = rsv_size; node->mapped_size = 0; /* */ *prsv_addr = rsv_addr; } else /* */ status = -ENOMEM; spin_unlock(&dmm_obj->dmm_lock); dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, " "rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size, prsv_addr, status, rsv_addr, rsv_size); return status; }