void MB_MarkPage(MemoryBitmap *Bitmap, physical Page, qword Flag) { switch (Flag) { case MB_PAGE_OCCUPIED: { address l_PageAddr = (address) Page; l_PageAddr = PAGE_SHIFT(PAGE_ROUND_DOWN(l_PageAddr)); if (l_PageAddr / 32 < Bitmap->m_Length) Bitmap->m_Bitmap[l_PageAddr / 32] &= ~(1 << (l_PageAddr % 32)); } return; case MB_PAGE_FREE: { address l_PageAddr = (address) Page; qword t_Div32, t_Mod32; l_PageAddr = PAGE_SHIFT(PAGE_ROUND_DOWN(l_PageAddr)); t_Div32 = l_PageAddr / 32; t_Mod32 = l_PageAddr % 32; if (t_Div32 < Bitmap->m_Length) Bitmap->m_Bitmap[t_Div32] |= 1 << t_Mod32; if (t_Div32 < Bitmap->m_LastIndex) Bitmap->m_LastIndex = t_Div32; } return; default: return; } }
NTSTATUS NTAPI MmCreateVirtualMappingUnsafe( PEPROCESS Process, PVOID Address, ULONG PageProtection, PPFN_NUMBER Pages, ULONG PageCount) { ULONG i; MMPTE TmplPte, *Pte; ASSERT((ULONG_PTR)Address % PAGE_SIZE == 0); /* Check if the range is valid */ if ((Process == NULL && Address < MmSystemRangeStart) || (Process != NULL && Address > MmHighestUserAddress)) { DPRINT1("Address 0x%p is invalid for process %p\n", Address, Process); ASSERT(FALSE); } TmplPte.u.Long = 0; TmplPte.u.Hard.Valid = 1; MiSetPteProtection(&TmplPte, PageProtection); TmplPte.u.Flush.Owner = (Address < MmHighestUserAddress) ? 1 : 0; //__debugbreak(); for (i = 0; i < PageCount; i++) { TmplPte.u.Hard.PageFrameNumber = Pages[i]; Pte = MiGetPteForProcess(Process, Address, TRUE); DPRINT("MmCreateVirtualMappingUnsafe, Address=%p, TmplPte=%p, Pte=%p\n", Address, TmplPte.u.Long, Pte); if (InterlockedExchangePte(Pte, TmplPte)) { KeInvalidateTlbEntry(Address); } if (MiIsHyperspaceAddress(Pte)) MmDeleteHyperspaceMapping((PVOID)PAGE_ROUND_DOWN(Pte)); Address = (PVOID)((ULONG64)Address + PAGE_SIZE); } return STATUS_SUCCESS; }
VOID MiFlushTlb(PMMPTE Pte, PVOID Address) { if (MiIsHyperspaceAddress(Pte)) { MmDeleteHyperspaceMapping((PVOID)PAGE_ROUND_DOWN(Pte)); } else { __invlpg(Address); } }
static VOID FreeGuarded( _In_ PVOID Pointer) { NTSTATUS Status; PVOID VirtualMemory = (PVOID)PAGE_ROUND_DOWN((SIZE_T)Pointer); SIZE_T Size = 0; Status = NtFreeVirtualMemory(NtCurrentProcess(), &VirtualMemory, &Size, MEM_RELEASE); ok(Status == STATUS_SUCCESS, "Status = %lx\n", Status); }
void MPMsgMgr::init(DispatcherID dspid, MemoryMgrPrimitive *pa, MPMsgMgrRegistryRef ®istry) { SysStatus rc; sendQueue.init(); replyQueue.init(); allocMsgLock.init(); thisDspID = dspid; // allocate array of buffers tassert((sizeof(MsgHolder) == MSG_HOLDER_SIZE), err_printf("oops\n")); const uval amt = NUM_MSGS * sizeof(MsgHolder); uval space; if (pa != NULL) { pa->alloc(space, amt, MSG_CHUNK_SIZE); } else { space = uval(allocGlobalPadded(amt)); } tassert(space != 0, err_printf("couldn't allocate msg buffers\n")); msgHolder = (MsgHolder *) space; uval i; for (i = 0; i < NUM_MSGS; i++) { msgHolder[i].manager = this; msgHolder[i].busy = 0; } nextMsgIdx = 0; // Create the registry, but don't register ourselves yet because our // interrupt handlers haven't been installed. if (dspid == SysTypes::DSPID(0,0)) { if (registry!=NULL) { uval* y = (uval*)PAGE_ROUND_UP((uval)®istry); uval* x = (uval*)PAGE_ROUND_DOWN((uval)®istry); while (x < y) { if (*x) { err_printf("%p: %lx\n",x,*x); } ++x; } } passertMsg(registry == NULL,"MPMsgMgr already initialized %p\n", registry); rc = MPMsgMgrRegistry::Create(registry, pa); tassert(_SUCCESS(rc), err_printf("MPMsgMgrRegistry::Create failed\n")); } registryRef = registry; }
/* * @implemented */ VOID NTAPI IoBuildPartialMdl(IN PMDL SourceMdl, IN PMDL TargetMdl, IN PVOID VirtualAddress, IN ULONG Length) { PPFN_NUMBER TargetPages = (PPFN_NUMBER)(TargetMdl + 1); PPFN_NUMBER SourcePages = (PPFN_NUMBER)(SourceMdl + 1); ULONG Offset; ULONG FlagsMask = (MDL_IO_PAGE_READ | MDL_SOURCE_IS_NONPAGED_POOL | MDL_MAPPED_TO_SYSTEM_VA | MDL_IO_SPACE); /* Calculate the offset */ Offset = (ULONG)((ULONG_PTR)VirtualAddress - (ULONG_PTR)SourceMdl->StartVa) - SourceMdl->ByteOffset; /* Check if we don't have a length and calculate it */ if (!Length) Length = SourceMdl->ByteCount - Offset; /* Write the process, start VA and byte data */ TargetMdl->StartVa = (PVOID)PAGE_ROUND_DOWN(VirtualAddress); TargetMdl->Process = SourceMdl->Process; TargetMdl->ByteCount = Length; TargetMdl->ByteOffset = BYTE_OFFSET(VirtualAddress); /* Recalculate the length in pages */ Length = ADDRESS_AND_SIZE_TO_SPAN_PAGES(VirtualAddress, Length); /* Set the MDL Flags */ TargetMdl->MdlFlags &= (MDL_ALLOCATED_FIXED_SIZE | MDL_ALLOCATED_MUST_SUCCEED); TargetMdl->MdlFlags |= SourceMdl->MdlFlags & FlagsMask; TargetMdl->MdlFlags |= MDL_PARTIAL; /* Set the mapped VA */ TargetMdl->MappedSystemVa = (PCHAR)SourceMdl->MappedSystemVa + Offset; /* Now do the copy */ Offset = (ULONG)(((ULONG_PTR)TargetMdl->StartVa - (ULONG_PTR)SourceMdl->StartVa) >> PAGE_SHIFT); SourcePages += Offset; RtlCopyMemory(TargetPages, SourcePages, Length * sizeof(PFN_NUMBER)); }
static ULONG64 MiGetPteValueForProcess( PEPROCESS Process, PVOID Address) { PMMPTE Pte; ULONG64 PteValue; Pte = MiGetPteForProcess(Process, Address, FALSE); PteValue = Pte ? Pte->u.Long : 0; if (MiIsHyperspaceAddress(Pte)) MmDeleteHyperspaceMapping((PVOID)PAGE_ROUND_DOWN(Pte)); return PteValue; }
VOID NTAPI CcpReadAhead(PVOID Context) { LARGE_INTEGER Offset; PWORK_QUEUE_WITH_READ_AHEAD WorkItem = (PWORK_QUEUE_WITH_READ_AHEAD)Context; PNOCC_CACHE_MAP Map = (PNOCC_CACHE_MAP)WorkItem->FileObject->SectionObjectPointer->SharedCacheMap; DPRINT1("Reading ahead %08x%08x:%x %wZ\n", WorkItem->FileOffset.HighPart, WorkItem->FileOffset.LowPart, WorkItem->Length, &WorkItem->FileObject->FileName); Offset.HighPart = WorkItem->FileOffset.HighPart; Offset.LowPart = PAGE_ROUND_DOWN(WorkItem->FileOffset.LowPart); if (Map) { PLIST_ENTRY ListEntry; volatile char *chptr; PNOCC_BCB Bcb; for (ListEntry = Map->AssociatedBcb.Flink; ListEntry != &Map->AssociatedBcb; ListEntry = ListEntry->Flink) { Bcb = CONTAINING_RECORD(ListEntry, NOCC_BCB, ThisFileList); if ((Offset.QuadPart + WorkItem->Length < Bcb->FileOffset.QuadPart) || (Bcb->FileOffset.QuadPart + Bcb->Length < Offset.QuadPart)) continue; for (chptr = Bcb->BaseAddress, Offset = Bcb->FileOffset; chptr < ((PCHAR)Bcb->BaseAddress) + Bcb->Length && Offset.QuadPart < WorkItem->FileOffset.QuadPart + WorkItem->Length; chptr += PAGE_SIZE, Offset.QuadPart += PAGE_SIZE) { *chptr ^= 0; } } } ObDereferenceObject(WorkItem->FileObject); ExFreePool(WorkItem); DPRINT("Done\n"); }
static VOID MakeReadOnly( PVOID Pointer, SIZE_T SizeRequested) { NTSTATUS Status; SIZE_T Size = PAGE_ROUND_UP(SizeRequested); PVOID VirtualMemory = (PVOID)PAGE_ROUND_DOWN((SIZE_T)Pointer); if (Size) { Status = NtAllocateVirtualMemory(NtCurrentProcess(), &VirtualMemory, 0, &Size, MEM_COMMIT, PAGE_READWRITE); if (!NT_SUCCESS(Status)) { Size = 0; Status = NtFreeVirtualMemory(NtCurrentProcess(), &VirtualMemory, &Size, MEM_RELEASE); ok(Status == STATUS_SUCCESS, "Status = %lx\n", Status); } } }
void MB_ReleasePages(MemoryBitmap *Bmp, linear BlockAddr, qword Pages) { if (Pages == 0) return; if (Pages == 1) { MB_MarkPage(Bmp, LINEAR_TO_PHYSICAL(BlockAddr), MB_PAGE_FREE); return; } address l_BlockAddr = (address) LINEAR_TO_PHYSICAL(BlockAddr); qword t_Div32, t_Mod32; l_BlockAddr = PAGE_SHIFT(PAGE_ROUND_DOWN(l_BlockAddr)); t_Div32 = l_BlockAddr / 32; t_Mod32 = l_BlockAddr % 32; if (t_Div32 < Bmp->m_LastIndex) Bmp->m_LastIndex = t_Div32; while (t_Div32 < Bmp->m_Length && Pages != 0) { Bmp->m_Bitmap[t_Div32] |= (1 << t_Mod32); ++t_Mod32; if (t_Mod32 == 32) { ++t_Div32; t_Mod32 = 0; } --Pages; } }
static int firmware_validate(const char *firmware_path, struct controller_param **params) { FILE *f = fopen(firmware_path, "r"); if(!f) { perror("Opening firmware file failed"); return 0; } int ret = 0; char buf[256]; uint32_t offset = 0; bool got_type = false; while(fgets(buf, 256, f)) { chomp(buf); if(buf[0] == '#') continue; else if(buf[0] == 'C') { const char *name = &buf[2]; size_t name_len = strlen(name); got_type = true; struct controller_param *p; *params = NULL; for(p = controller_params; p->name; p++) if(!strncasecmp(p->name, name, name_len)) { *params = p; break; } if(!*params) { log("Unknown controller type \"%s\"", name); ret = 0; goto out; } } else if(buf[0] == ':') { if(!got_type) { log("Got IHEX before type statement"); ret = 0; goto out; } uint32_t new_offset, length; enum ihex_record rtype = parse_ihex(buf, &new_offset, &length, NULL); switch(rtype) { case IHEX_INVALID: ret = 0; goto out; break; case IHEX_DATA: if(new_offset < offset) { log("IHEX is not monotonic"); ret = 0; goto out; } offset = new_offset; if(offset + length > PAGE_ROUND_DOWN((*params)->pagesize, (*params)->bootloader_offset)) { log("Data reaches into bootloader area"); ret = 0; goto out; } break; case IHEX_EOF: break; default: log("Unsupported IHEX record %02X", rtype); ret = 0; goto out; break; } } else { log("Got invalid line: %s", buf); ret = 0; goto out; } } if(!got_type) { log("Controller type needs to be specified"); ret = 0; goto out; } ret = 1; out: fclose(f); return ret; }
int firmware_flash(int cansock, const char *firmware_dir, canid_t addr) { const char *file = firmware_make_path(firmware_dir, addr); log("Using firmware file %s", file); struct controller_param *params; if(!firmware_validate(file, ¶ms)) { log("Firmware file validation failed, sending reset"); can_send_reset(cansock, addr); return -1; } FILE *f = fopen(file, "r"); char buf[256]; uint8_t *pagebuf = malloc(params->pagesize); uint32_t fillcnt = 0, base_addr; while(fgets(buf, 256, f)) { chomp(buf); if(buf[0] != ':') continue; uint32_t offset, length; uint8_t *data; if(parse_ihex(buf, &offset, &length, &data) == IHEX_EOF) break; while(length) { if(fillcnt == 0) { // We're starting a new page base_addr = PAGE_ROUND_DOWN(params->pagesize, offset); memset(pagebuf, 0, offset - base_addr); fillcnt = offset - base_addr; can_send_set_zpointer(cansock, addr, base_addr); } uint32_t to_write = MIN(length, params->pagesize - fillcnt); memcpy(pagebuf + fillcnt, data, to_write); fillcnt += to_write; offset += to_write; length -= to_write; if(fillcnt == params->pagesize) { // page complete, flash it if(do_flash(cansock, addr, fillcnt, pagebuf) < 0) { log("Flashing page 0x%X failed", base_addr / 2); free(data); free(pagebuf); return -1; } fillcnt = 0; } memmove(data, data + to_write, length); } free(data); } if(fillcnt) { // there's some data left if(do_flash(cansock, addr, fillcnt, pagebuf) < 0) { log("Flashing page 0x%X failed", base_addr / 2); free(pagebuf); return -1; } } free(pagebuf); log("Resetting device"); can_send_reset(cansock, addr); return 0; }
/* This could be implemented much more intelligently by mapping instances of a CoW zero page into the affected regions. We just RtlZeroMemory for now. */ BOOLEAN NTAPI CcZeroData(IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER StartOffset, IN PLARGE_INTEGER EndOffset, IN BOOLEAN Wait) { PNOCC_BCB Bcb = NULL; PLIST_ENTRY ListEntry = NULL; LARGE_INTEGER LowerBound = *StartOffset; LARGE_INTEGER UpperBound = *EndOffset; LARGE_INTEGER Target, End; PVOID PinnedBcb, PinnedBuffer; PNOCC_CACHE_MAP Map = FileObject->SectionObjectPointer->SharedCacheMap; DPRINT("S %08x%08x E %08x%08x\n", StartOffset->u.HighPart, StartOffset->u.LowPart, EndOffset->u.HighPart, EndOffset->u.LowPart); if (!Map) { NTSTATUS Status; IO_STATUS_BLOCK IOSB; PCHAR ZeroBuf = ExAllocatePool(PagedPool, PAGE_SIZE); ULONG ToWrite; if (!ZeroBuf) RtlRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); DPRINT1("RtlZeroMemory(%x,%x)\n", ZeroBuf, PAGE_SIZE); RtlZeroMemory(ZeroBuf, PAGE_SIZE); Target.QuadPart = PAGE_ROUND_DOWN(LowerBound.QuadPart); End.QuadPart = PAGE_ROUND_UP(UpperBound.QuadPart); // Handle leading page if (LowerBound.QuadPart != Target.QuadPart) { ToWrite = MIN(UpperBound.QuadPart - LowerBound.QuadPart, (PAGE_SIZE - LowerBound.QuadPart) & (PAGE_SIZE - 1)); DPRINT("Zero last half %08x%08x %x\n", Target.u.HighPart, Target.u.LowPart, ToWrite); Status = MiSimpleRead(FileObject, &Target, ZeroBuf, PAGE_SIZE, TRUE, &IOSB); if (!NT_SUCCESS(Status)) { ExFreePool(ZeroBuf); RtlRaiseStatus(Status); } DPRINT1("RtlZeroMemory(%x,%x)\n", ZeroBuf + LowerBound.QuadPart - Target.QuadPart, ToWrite); RtlZeroMemory(ZeroBuf + LowerBound.QuadPart - Target.QuadPart, ToWrite); Status = MiSimpleWrite(FileObject, &Target, ZeroBuf, MIN(PAGE_SIZE, UpperBound.QuadPart-Target.QuadPart), &IOSB); if (!NT_SUCCESS(Status)) { ExFreePool(ZeroBuf); RtlRaiseStatus(Status); } Target.QuadPart += PAGE_SIZE; } DPRINT1("RtlZeroMemory(%x,%x)\n", ZeroBuf, PAGE_SIZE); RtlZeroMemory(ZeroBuf, PAGE_SIZE); while (UpperBound.QuadPart - Target.QuadPart > PAGE_SIZE) { DPRINT("Zero full page %08x%08x\n", Target.u.HighPart, Target.u.LowPart); Status = MiSimpleWrite(FileObject, &Target, ZeroBuf, PAGE_SIZE, &IOSB); if (!NT_SUCCESS(Status)) { ExFreePool(ZeroBuf); RtlRaiseStatus(Status); } Target.QuadPart += PAGE_SIZE; } if (UpperBound.QuadPart > Target.QuadPart) { ToWrite = UpperBound.QuadPart - Target.QuadPart; DPRINT("Zero first half %08x%08x %x\n", Target.u.HighPart, Target.u.LowPart, ToWrite); Status = MiSimpleRead(FileObject, &Target, ZeroBuf, PAGE_SIZE, TRUE, &IOSB); if (!NT_SUCCESS(Status)) { ExFreePool(ZeroBuf); RtlRaiseStatus(Status); } DPRINT1("RtlZeroMemory(%x,%x)\n", ZeroBuf, ToWrite); RtlZeroMemory(ZeroBuf, ToWrite); Status = MiSimpleWrite(FileObject, &Target, ZeroBuf, MIN(PAGE_SIZE, UpperBound.QuadPart-Target.QuadPart), &IOSB); if (!NT_SUCCESS(Status)) { ExFreePool(ZeroBuf); RtlRaiseStatus(Status); } Target.QuadPart += PAGE_SIZE; } ExFreePool(ZeroBuf); return TRUE; } CcpLock(); ListEntry = Map->AssociatedBcb.Flink; while (ListEntry != &Map->AssociatedBcb) { Bcb = CONTAINING_RECORD(ListEntry, NOCC_BCB, ThisFileList); CcpReferenceCache(Bcb - CcCacheSections); if (Bcb->FileOffset.QuadPart + Bcb->Length >= LowerBound.QuadPart && Bcb->FileOffset.QuadPart < UpperBound.QuadPart) { DPRINT("Bcb #%x (@%08x%08x)\n", Bcb - CcCacheSections, Bcb->FileOffset.u.HighPart, Bcb->FileOffset.u.LowPart); Target.QuadPart = MAX(Bcb->FileOffset.QuadPart, LowerBound.QuadPart); End.QuadPart = MIN(Map->FileSizes.ValidDataLength.QuadPart, UpperBound.QuadPart); End.QuadPart = MIN(End.QuadPart, Bcb->FileOffset.QuadPart + Bcb->Length); CcpUnlock(); if (!CcPreparePinWrite(FileObject, &Target, End.QuadPart - Target.QuadPart, TRUE, Wait, &PinnedBcb, &PinnedBuffer)) { return FALSE; } ASSERT(PinnedBcb == Bcb); CcpLock(); ListEntry = ListEntry->Flink; /* Return from pin state */ CcpUnpinData(PinnedBcb, TRUE); } CcpUnpinData(Bcb, TRUE); } CcpUnlock(); return TRUE; }
SysStatus RegionReplicated::RegionReplicatedRoot::initRegion( ProcessRef pRef, uval &vaddr, uval vaddr2, uval size, uval alignreq, FRRef frRef, uval writable, uval fOff, AccessMode::mode accessreq, uval useVaddr, RegionType::Type regionType) { SysStatus rc=0; PMRef pmRef; size = PAGE_ROUND_UP(size); tassertWrn(!useVaddr || (PAGE_ROUND_DOWN(vaddr)==vaddr), "creating an unaligned region, vaddr=%lx\n", vaddr); rc = DREF(pRef)->getPM(pmRef); if (!_SUCCESS(rc)) return rc; regionState = CREATING; /* * we record the actual permission of the FR. This is so that * if a debugger asks to write a readonly mapping, we can decide * if its legal. * Of course, we also use this to prevent an initial writable * mapping of a read only FR. */ writeAllowed = writable; rc = DREF(pRef)->getHATProcess(hat); tassert(_SUCCESS(rc), err_printf("process destroyed\n")); regionVaddr = vaddr; regionSize = size; proc = pRef; access = accessreq; alignment = alignreq; fileOffset = fOff; /* can make regions without an fcm - see redzone for example * we attach first so we can ask fcm if it uses shared segments */ if (frRef) { rc = DREF(frRef)->attachRegion(fcm, (RegionRef)getRef(), pmRef, accessreq); tassertWrn(_SUCCESS(rc), "attach failed\n"); if (_FAILURE(rc)) { fcm = 0; // clear our fcm field tassert(0,err_printf("attach failed\n")); // FIXME we going to have a problem if we just call destroy // because that will attempt to create a // rep to call the method on, but we are currently holding // a lock preventing reps from being created (*((RegionRef)getRef()))->destroy(); // destroy ourselves regionState = DESTROYING; return rc; } } else { fcm = NULL; } // If ok, attach the region to the process // attach newly contructed region to process if (useVaddr) { rc = DREF(pRef)->attachFixedRegion( regionVaddr, regionSize, (RegionRef)getRef(), regionType); } else if (vaddr2 == 0) { // alignment fix up for shared segments if(size >= SEGMENT_SIZE && alignment == 0 && !useVaddr && fcm && DREF(fcm)->sharedSegments()) { alignment = SEGMENT_SIZE; } rc = DREF(pRef)->attachDynamicRegion( regionVaddr, regionSize, (RegionRef)getRef(), regionType, alignment); // return address allocated by process vaddr = regionVaddr; } else { rc = DREF(pRef)->attachWithinRangeRegion( vaddr, vaddr2, regionSize, (RegionRef)getRef(), regionType, alignment); regionVaddr = vaddr; } if (!_SUCCESS(rc)) { // failed - delete it tassert(0,err_printf("Region constructor failed\n")); if (fcm != NULL) { DREF(fcm)->detachRegion((RegionRef)getRef()); fcm = NULL; } // FIXME we are going to have a problem if we just call destroy // unchecked because that will attempt to create a // rep to call the method on, but we are currently holding // a lock preventing reps from being created (*((RegionRef)getRef()))->destroyUnchecked(); // free ref regionState = DESTROYING; return rc; } // unmap any full segments so shared mappings can be used if ((SEGMENT_ROUND_DOWN(vaddr+size)>SEGMENT_ROUND_UP(vaddr)) && fcm && DREF(fcm)->sharedSegments()) { rc = DREF(hat)->unmapRange( SEGMENT_ROUND_UP(vaddr), SEGMENT_ROUND_DOWN(vaddr+size)- SEGMENT_ROUND_UP(vaddr), ppset); tassert(_SUCCESS(rc), err_printf("oops\n")); } regionState = NORMAL; return rc; }
SysStatus ProcessVPList::createDispatcher(CPUDomainAnnex *cda, DispatcherID dspid, EntryPointDesc entry, uval dispatcherAddr, uval initMsgLength, char *initMsg, ProcessRef procRef, HATRef hatRef) { SysStatus rc; VPInfo *vpInfo; uval newLimit, size; DspTable *newTable; ProcessAnnex *pa; SegmentTable *segTable; Dispatcher *dsp, *dspUser; RegionRef dspRegRef; FCMRef dspFCMRef; uval dspOffset, dspAddrKern; tassertMsg(cda->getPP() == Scheduler::GetVP(), "CDA not on this pp.\n"); RDNum rd; VPNum vp; SysTypes::UNPACK_DSPID(dspid, rd, vp); if (vp >= Scheduler::VPLimit) { return _SERROR(1752, 0, EINVAL); } if (rd >= Scheduler::RDLimit) { return _SERROR(1751, 0, EINVAL); } if (PAGE_ROUND_DOWN(dispatcherAddr) != dispatcherAddr) { return _SERROR(1327, 0, EINVAL); } if (requests.enter() < 0) { return _SERROR(1328, 0, ESRCH); // process being destroyed } if ((vp < vpLimit) && (dspTable->vpInfo[vp] != NULL)) { vpInfo = dspTable->vpInfo[vp]; } else { // We don't have a VPInfo structure for this vp. Create one, guarded // by stop()'ing requests. RequestCountWithStop doesn't support an // upgrade operation, so we have to "leave" before we can "stop". requests.leave(); if (requests.stop() < 0) { return _SERROR(2640, 0, ESRCH); // process being destroyed } if (vp >= vpLimit) { // We have to increase the size of the table. We make the first // increment larger than subsequent ones to lessen ramp-up costs. newLimit = (vpLimit == 1) ? 16 : (vpLimit * 2); // Make sure the newLimit is large enough to include vp. We won't // blow up because we know that vp < Scheduler::VPLimit. while (vp >= newLimit) { newLimit *= 2; } // Allocate a new table. DspTable includes space for one VPInfo // pointer, hence the "newLimit - 1" in the following calculation. size = sizeof(DspTable) + ((newLimit - 1) * sizeof(VPInfo *)); newTable = (DspTable *) AllocGlobalPadded::alloc(size); tassertMsg(newTable != NULL, "DspTable allocation failed.\n"); // Copy content of the old table to the new, and initialize the // rest of the new table. for (uval i = 0; i < vpLimit; i++) { newTable->vpInfo[i] = dspTable->vpInfo[i]; } for (uval i = vpLimit; i < newLimit; i++) { newTable->vpInfo[i] = NULL; } // Free the old table, unless it is the initial (pre-allocated) // table. if (vpLimit > 1) { size = sizeof(DspTable) + ((vpLimit - 1) * sizeof(VPInfo *)); AllocGlobalPadded::free(dspTable, size); } // Install the new table. dspTable = newTable; vpLimit = newLimit; } // We have to check vpInfo[vp] again now that requests are stop'd. vpInfo = dspTable->vpInfo[vp]; if (vpInfo == NULL) { if (vp == 0) { // Space for the first VPInfo structure is pre-allocated. vpInfo = &vpInfo0; } else { vpInfo = new VPInfo; tassertMsg(vpInfo != NULL, "VPInfo allocation failed.\n"); } vpInfo->init(cda->getPP()); dspTable->vpInfo[vp] = vpInfo; vpCounter++; if (!KernelInfo::ControlFlagIsSet(KernelInfo::RUN_SILENT)) { err_printf("Mapping program %s, pid 0x%lx, vp %ld to pp %ld.\n", name, processID, vp, vpInfo->pp); } } // Restart and then re-enter the request counter. requests.restart(); if (requests.enter() < 0) { return _SERROR(2641, 0, ESRCH); // process being destroyed } } /* * At this point the requests counter has been enter'd and vpInfo points * to a valid VPInfo structure for this vp. All further processing is * done under the vp lock. */ vpInfo->lock.acquire(); if (vpInfo->pp != cda->getPP()) { // VP is not on this physical processor. rc = _SERROR(1750, 0, EINVAL); goto CleanupAndReturn; } if (vpInfo->dspInfo[rd].pa != NULL) { // Dispatcher already exists. rc = _SERROR(1329, 0, EEXIST); goto CleanupAndReturn; } dspUser = (Dispatcher *) dispatcherAddr; if (isKern) { dspFCMRef = NULL; dspOffset = 0; dsp = dspUser; // Set a bogus interrupt bit to make the dispatcher runnable. (void) dsp->interrupts.fetchAndSet(SoftIntr::PREEMPT); } else { rc = DREF(procRef)->vaddrToRegion(dispatcherAddr, dspRegRef); if (_FAILURE(rc)) goto CleanupAndReturn; rc = DREF(dspRegRef)->vaddrToFCM(vp, dispatcherAddr, 0, dspFCMRef, dspOffset); if (_FAILURE(rc)) goto CleanupAndReturn; rc = DREF(dspFCMRef)->addReference(); if (_FAILURE(rc)) goto CleanupAndReturn; rc = archAllocDispatcherPage(dispatcherAddr, dspAddrKern); tassertMsg(_SUCCESS(rc), "archAllocDispatcherPage failed.\n"); rc = DREF(dspFCMRef)->establishPage(dspOffset, dspAddrKern, PAGE_SIZE); tassertMsg(_SUCCESS(rc), "establishPage failed.\n"); dsp = (Dispatcher *) dspAddrKern; dsp->init(dspid); rc = dsp->asyncBufferLocal.storeMsg(_KERNEL_PID, 0, 0, initMsgLength, initMsg); if (_FAILURE(rc)) { (void) DREF(dspFCMRef)->disEstablishPage(dspOffset, PAGE_SIZE); (void) DREF(dspFCMRef)->removeReference(); goto CleanupAndReturn; } (void) dsp->interrupts.fetchAndSet(SoftIntr::ASYNC_MSG); } rc = DREF(hatRef)->getSegmentTable(vp, segTable); tassertMsg(_SUCCESS(rc), "getSegmentTable failed.\n"); pa = new ProcessAnnex(); tassertMsg(pa != NULL, "ProcessAnnex allocation failed.\n"); pa->init(procRef, processID, userMode, isKern, dspUser, dsp, dspFCMRef, dspOffset, segTable, dspid); pa->setEntryPoint(RUN_ENTRY, entry); vpInfo->dspInfo[rd].pa = pa; vpInfo->dspCounter++; InterruptState is; disableHardwareInterrupts(is); exceptionLocal.ipcTargetTable.enter(pa); pa->attach(cda); enableHardwareInterrupts(is); rc = 0; CleanupAndReturn: vpInfo->lock.release(); requests.leave(); return rc; }