void devTree::initialize(uint64_t i_addr, size_t i_maxSize, bool i_virtual) { /* Initialize the device tree header. */ mMaxSize = i_maxSize; if (i_virtual) { mPhysAddr = 0; mSpace = reinterpret_cast<char*>(i_addr); } else { mPhysAddr = i_addr; mSpace= static_cast<char*> (mm_block_map(reinterpret_cast<void*>(mPhysAddr), mMaxSize)); } memset(mSpace, 0, mMaxSize); mNextPhandle = 0x10000000; TRACFCOMP( g_trac_devtree, "FDT located @ v:%p p:0x%x", mSpace, mPhysAddr); mHeader->magicNumber = DT_MAGIC; mHeader->totalSize = (sizeof(*mHeader) + (sizeof(dtReserveEntry_t) * DT_MAX_MEM_RESERVE)); mHeader->offsetStruct = mHeader->totalSize; mHeader->offsetStrings = mHeader->totalSize; mHeader->offsetReservedMemMap = sizeof(*mHeader); mHeader->version = DT_CUR_VERSION; mHeader->lastCompatVersion = DT_COMPAT_VERSION; mHeader->bootCpuId = 0; mHeader->sizeStrings = 0; mHeader->sizeStruct = 0; /* Create the initial root node. */ uint32_t* curWord = getStructSectionAtOffset(0); *curWord++ = DT_BEGIN_NODE; *curWord++ = 0; *curWord++ = DT_END_NODE; *curWord = DT_END; /* Adjust offsets and sizes to account for the root node we just added*/ uint32_t structSizeAdded = sizeof(uint32_t) * 4; mHeader->offsetStrings += structSizeAdded; mHeader->sizeStruct += structSizeAdded; mHeader->totalSize += structSizeAdded; /* Add the standard root node properties. */ dtOffset_t rootNode = findNode("/"); addPropertyCell32(rootNode, "#address-cells", 2); addPropertyCell32(rootNode, "#size-cells", 2); //"Get" the phandle -- this will add one to root node as //it doesn't already have one getPhandle(rootNode); }
static errlHndl_t load_pnor_section(PNOR::SectionId i_section, uint64_t i_physAddr) { TRACFCOMP(ISTEPS_TRACE::g_trac_isteps_trace,ENTER_MRK"load_pnor_section()"); errlHndl_t err = nullptr; #ifdef CONFIG_SECUREBOOT // Securely load section TRACFCOMP(ISTEPS_TRACE::g_trac_isteps_trace,"load_pnor_section: secure section load of secId=0x%X (%s)", i_section, PNOR::SectionIdToString(i_section)); err = PNOR::loadSecureSection(i_section); if (err) { return err; } // Do not need to unload since we have plenty of memory at this point. #endif // Get the section info from PNOR. PNOR::SectionInfo_t pnorSectionInfo; err = PNOR::getSectionInfo( i_section, pnorSectionInfo ); if( err != nullptr ) { TRACFCOMP(ISTEPS_TRACE::g_trac_isteps_trace, "load_pnor_section: Could not get section info from %x", i_section); return err; } // XZ repository: http:git.tukaani.org/xz.git // Header specifics can be found in xz/doc/xz-file-format.txt const uint8_t HEADER_MAGIC[]= { 0xFD, '7', 'z', 'X', 'Z', 0x00 }; uint8_t* l_pnor_header = reinterpret_cast<uint8_t *>(pnorSectionInfo.vaddr); bool l_pnor_is_XZ_compressed = (0 == memcmp(l_pnor_header, HEADER_MAGIC, sizeof(HEADER_MAGIC))); TRACFCOMP(ISTEPS_TRACE::g_trac_isteps_trace, "load_pnor_section: is XZ_compressed: %d", l_pnor_is_XZ_compressed); // This assumes that the maximum decompression ratio will always be less // than 1:16 (compressed:uncompressed). This works because XZ compression // is usually 14.1%, the decompressor does not need the exact size, and // we have all of mainstore memory at this point. uint32_t uncompressedPayloadSize = l_pnor_is_XZ_compressed ? (pnorSectionInfo.size * 16) : pnorSectionInfo.size; const uint32_t originalPayloadSize = pnorSectionInfo.size; printk( "Loading PNOR section %d (%s) %d bytes @0x%lx\n", i_section, pnorSectionInfo.name, originalPayloadSize, i_physAddr ); void * loadAddr = NULL; // Map in the physical memory we are loading into. // If we are not xz compressed, the uncompressedSize // is equal to the original size. loadAddr = mm_block_map( reinterpret_cast<void*>( i_physAddr ), uncompressedPayloadSize ); // Print out inital progress bar. #ifdef CONFIG_CONSOLE const int progressSteps = 80; int progress = 0; for ( int i = 0; i < progressSteps; ++i ) { printk( "." ); } printk( "\r" ); #endif if(!l_pnor_is_XZ_compressed) { // Load the data block by block and update the progress bar. const uint32_t BLOCK_SIZE = 4096; for ( uint32_t i = 0; i < originalPayloadSize; i += BLOCK_SIZE ) { memcpy( reinterpret_cast<void*>( reinterpret_cast<uint64_t>(loadAddr) + i ), reinterpret_cast<void*>( pnorSectionInfo.vaddr + i ), std::min( originalPayloadSize - i, BLOCK_SIZE ) ); #ifdef CONFIG_CONSOLE for ( int new_progress = (i * progressSteps) / originalPayloadSize; progress <= new_progress; progress++ ) { printk( "=" ); } #endif } #ifdef CONFIG_CONSOLE printk( "\n" ); #endif } if(l_pnor_is_XZ_compressed) { struct xz_buf b; struct xz_dec *s; enum xz_ret ret; xz_crc32_init(); s = xz_dec_init(XZ_SINGLE, 0); if(s == NULL) { TRACFCOMP(ISTEPS_TRACE::g_trac_isteps_trace,ERR_MRK "load_pnor_section: XZ Embedded Initialization failed"); return err; } static const uint64_t compressed_SIZE = originalPayloadSize; static const uint64_t decompressed_SIZE = uncompressedPayloadSize; b.in = reinterpret_cast<uint8_t *>( pnorSectionInfo.vaddr); b.in_pos = 0; b.in_size = compressed_SIZE; b.out = reinterpret_cast<uint8_t *>(loadAddr); b.out_pos = 0; b.out_size = decompressed_SIZE; ret = xz_dec_run(s, &b); if(ret == XZ_STREAM_END) { TRACFCOMP(ISTEPS_TRACE::g_trac_isteps_trace, "load_pnor_section: The %s section was decompressed.", pnorSectionInfo.name); }else { TRACFCOMP(ISTEPS_TRACE::g_trac_isteps_trace,ERR_MRK "load_pnor_section: xz-embedded returned an error, ", "the ret is %d",ret); /*@ * @errortype * @reasoncode fapi::RC_INVALID_RETURN_XZ_CODE * @severity ERRORLOG::ERRL_SEV_UNRECOVERABLE * @moduleid fapi::MOD_START_XZ_PAYLOAD * @devdesc xz-embedded has returned an error. * the return code can be found in xz.h * @custdesc Error uncompressing payload image from * boot flash * @userdata1 Return code from xz-embedded * @userdata2[0:31] Original Payload Size * @userdata2[32:63] Uncompressed Payload Size */ err = new ERRORLOG::ErrlEntry(ERRORLOG::ERRL_SEV_UNRECOVERABLE, fapi::MOD_START_XZ_PAYLOAD, fapi::RC_INVALID_RETURN_XZ_CODE, ret,TWO_UINT32_TO_UINT64( originalPayloadSize, uncompressedPayloadSize)); err->addProcedureCallout(HWAS::EPUB_PRC_PHYP_CODE, HWAS::SRCI_PRIORITY_HIGH); } //Clean up memory xz_dec_end(s); } int rc = 0; rc = mm_block_unmap(reinterpret_cast<void *>(loadAddr)); if(rc) { TRACFCOMP(ISTEPS_TRACE::g_trac_isteps_trace,ERR_MRK "load_pnor_section: mm_block_unmap returned 1"); /*@ * @errortype * @reasoncode fapi::RC_MM_UNMAP_ERR * @severity ERRORLOG::ERRL_SEV_UNRECOVERABLE * @moduleid fapi::MOD_START_XZ_PAYLOAD * @devdesc mm_block_unmap returned incorrectly with 0 * @custdesc Error unmapping memory section */ err = new ERRORLOG::ErrlEntry(ERRORLOG::ERRL_SEV_UNRECOVERABLE, fapi::MOD_START_XZ_PAYLOAD, fapi::RC_MM_UNMAP_ERR, 0,0,0); } return err; }
errlHndl_t initiateDrtm() { SB_ENTER("initiateDrtm"); errlHndl_t pError = nullptr; // For DRTM, the thread has to be pinned to a core (and therefore pinned to // a chip) task_affinity_pin(); void* drtmPayloadVirtAddr = nullptr; do { const std::vector<SECUREBOOT::ProcSecurity> LLP { SECUREBOOT::ProcSecurity::LLPBit, }; const std::vector<SECUREBOOT::ProcSecurity> LLS { SECUREBOOT::ProcSecurity::LLSBit, }; // Determine which fabric group and chip this task is executing on and // create a filter to find the matching chip target auto cpuId = task_getcpuid(); auto groupId = PIR_t::groupFromPir(cpuId); auto chipId = PIR_t::chipFromPir(cpuId); TARGETING::PredicateAttrVal<TARGETING::ATTR_FABRIC_GROUP_ID> matchesGroup(groupId); TARGETING::PredicateAttrVal<TARGETING::ATTR_FABRIC_CHIP_ID> matchesChip(chipId); TARGETING::PredicatePostfixExpr matchesGroupAndChip; matchesGroupAndChip.push(&matchesGroup).push(&matchesChip).And(); // Get all the functional proc chips and find the chip we're running on TARGETING::TargetHandleList funcProcChips; TARGETING::getAllChips(funcProcChips, TARGETING::TYPE_PROC); if(funcProcChips.empty()) { // TODO: RTC 167205: GA error handling assert(false,"initiateDrtm: BUG! Functional proc chips is empty, " "yet this code is running on a functional chip!"); break; } // NOTE: std::find_if requires predicates to be copy constructable, but // predicates are not; hence use a wrapper lambda function to bypass // that limitation auto pMatch = std::find_if(funcProcChips.begin(),funcProcChips.end(), [&matchesGroupAndChip] ( TARGETING::Target* pTarget ) { return matchesGroupAndChip(pTarget); } ); if(pMatch == funcProcChips.end()) { // TODO: RTC 167205: GA error handling assert(false, "initiateDrtm: BUG! No functional chip found " "to be running this code"); break; } // Move the matching target to the end of the list. // NOTE: If reverse iterators were supported, we could have verified the // last element of the container is not the match, and done a // std::iter_swap of the match and the last element TARGETING::Target* const pMatchTarget = *pMatch; funcProcChips.erase(pMatch); funcProcChips.push_back(pMatchTarget); // Map to the DRTM payload area in mainstore const uint32_t drtmPayloadPhysAddrMb = DRTM_RIT_PAYLOAD_PHYS_ADDR_MB; drtmPayloadVirtAddr = mm_block_map( reinterpret_cast<void*>(drtmPayloadPhysAddrMb*BYTES_PER_MEGABYTE), PAGESIZE); if(drtmPayloadVirtAddr == nullptr) { // TODO: RTC 167205: GA error handling assert(false, "initiateDrtm: BUG! Failed in call to mm_block_map " "to map the DRTM payload."); break; } // Copy the DRTM payload to the DRTM payload area memcpy( reinterpret_cast<uint32_t*>(drtmPayloadVirtAddr), DRTM_RIT_PAYLOAD, sizeof(DRTM_RIT_PAYLOAD)); // The required generic sequencing to initiate DRTM is as follows: // 1) Initiating task must pin itself to a core (to ensure it // will not be accidentally queisced by SBE) // 2) It must set the DRTM payload information in the master processor // mailbox scratch registers (registers 7 and 8) before it goes // offline // 3) It must determine the processor it's currently running on // 4) It must set the late launch bit (LL) on all other processors // 4a) If the given processor is an active master, it must set // late launch primary (LLP) bit // 4b) Otherwise it must set late launch secondary (LLS) bit // 5) Finally, it must its own processor's LL bit last, according to the // rules of step 4. for(auto &pFuncProc :funcProcChips) { const auto procMasterType = pFuncProc->getAttr< TARGETING::ATTR_PROC_MASTER_TYPE>(); // If master chip, set the DRTM payload address and validity if(procMasterType == TARGETING::PROC_MASTER_TYPE_ACTING_MASTER) { (void)setDrtmPayloadPhysAddrMb(drtmPayloadPhysAddrMb); } pError = SECUREBOOT::setSecuritySwitchBits(procMasterType == TARGETING::PROC_MASTER_TYPE_ACTING_MASTER ? LLP : LLS, pFuncProc); if(pError) { SB_ERR("initiateDrtm: setSecuritySwitchBits() failed for proc " "= 0x%08X. Tried to set LLP or LLS.", get_huid(pFuncProc)); break; } } if(pError) { break; } SB_INF("initiateDrtm: SBE should eventually quiesce all cores; until " "then, endlessly yield the task"); while(1) { task_yield(); } } while(0); // If we -do- come back from this function (on error path only), then we // should unpin task_affinity_unpin(); if(drtmPayloadVirtAddr) { auto rc = mm_block_unmap(const_cast<void*>(drtmPayloadVirtAddr)); if(rc != 0) { // TODO: RTC 167205: GA error handling assert(false,"initiateDrtm: BUG! mm_block_unmap failed for virtual " "address 0x%16llX.", drtmPayloadVirtAddr); } } if(pError) { SB_ERR("initiateDrtm: plid=0x%08X, eid=0x%08X, reason=0x%04X", ERRL_GETPLID_SAFE(pError), ERRL_GETEID_SAFE(pError), ERRL_GETRC_SAFE(pError)); } SB_EXIT("initiateDrtm"); return pError; }
errlHndl_t validateDrtmPayload() { SB_ENTER("validateDrtmPayload"); errlHndl_t pError = nullptr; const void* drtmPayloadVirtAddr = nullptr; do { bool drtmMpIpl = false; isDrtmMpipl(drtmMpIpl); if(drtmMpIpl) { SB_DBG("validateDrtmPayload: DRTM active, validating DRTM payload." "proc chips."); TARGETING::Target* pSysTarget = nullptr; TARGETING::targetService().getTopLevelTarget(pSysTarget); if(pSysTarget == nullptr) { // TODO: RTC 167205: GA error handling assert(false,"validateDrtmPayload: BUG! nullptr system target " "detected"); break; } const auto drtmPayloadPhysAddrMb = pSysTarget->getAttr< TARGETING::ATTR_DRTM_PAYLOAD_ADDR_MB_HB>(); if(drtmPayloadPhysAddrMb == 0) { assert(false,"validateDrtmPayload: BUG! DRTM payload physical " "address should not be 0."); break; } const uint64_t drtmPayloadPhysAddr = drtmPayloadPhysAddrMb*BYTES_PER_MEGABYTE; SB_INF("validateDrtmPayload: DRTM payload available at physical " "address of %d MB (0x%16llX).", drtmPayloadPhysAddrMb,drtmPayloadPhysAddr); // Compute DRTM payload size // TODO: RTC 167205: Once the DRTM save area is known/defined, // need to figure out a better initial size to map. For example, // perhaps we map just one page to begin with, in order to read out // the actual total size. Also, a size is available, assert if the // size is 0. uint64_t drtmPayloadSize = 0; #ifdef CONFIG_DRTM_TRIGGERING drtmPayloadSize = ALIGN_PAGE(sizeof(DRTM_RIT_PAYLOAD)); #endif // Map in the physical memory to virtual memory drtmPayloadVirtAddr = mm_block_map ( reinterpret_cast<void*>(drtmPayloadPhysAddr),drtmPayloadSize); if(drtmPayloadVirtAddr == nullptr) { // TODO: RTC 167205: GA error handling assert(false,"validateDrtmPayload: BUG! mm_block_map returned " "nullptr for physical address 0x%016llX and size " "0x%016llX.", drtmPayloadPhysAddr,drtmPayloadSize); break; } #ifdef CONFIG_DRTM_TRIGGERING // Verify the payload matches expected result if(memcmp(drtmPayloadVirtAddr,DRTM_RIT_PAYLOAD, sizeof(DRTM_RIT_PAYLOAD) != 0)) { const uint32_t* pAddrAct = reinterpret_cast<const uint32_t*>( drtmPayloadVirtAddr); const uint32_t* pAddrExp = reinterpret_cast<const uint32_t*>( &DRTM_RIT_PAYLOAD); SB_ERR("validateDrtmPayload: DRTM RIT: payload content at " "0x%16llX was not as expected. Expected value = 0x%08X, " "actual = 0x%08X", drtmPayloadVirtAddr, *pAddrAct, *pAddrExp); // TODO: RTC 167205: GA error handling assert(false,"validateDrtmPayload: BUG: DRTM payload content " "at 0x%16llX was not as expected. Expected value = " "0x%08X, actual = 0x%08X", drtmPayloadVirtAddr, *pAddrAct, *pAddrExp); break; } // Extend (arbitrary) measurement to PCR17 SHA512_t hash = {0}; memcpy(hash,DRTM_RIT_PAYLOAD,sizeof(DRTM_RIT_PAYLOAD)); pError = TRUSTEDBOOT::pcrExtend(TRUSTEDBOOT::PCR_DRTM_17, TRUSTEDBOOT::EV_COMPACT_HASH, hash, sizeof(SHA512_t),DRTM_RIT_LOG_TEXT); if(pError) { SB_ERR("validateDrtmPayload: Failed in pcrExtend() for PCR 17"); break; } #else // TODO: RTC 167205: Securely verify the measured launch environment // TODO: RTC 167205: Really measure+extend the payload #endif } else { SB_INF("validateDrtmPayload: DRTM not active, skipping DRTM " "payload verification "); } } while(0); if(drtmPayloadVirtAddr) { auto rc = mm_block_unmap(const_cast<void*>(drtmPayloadVirtAddr)); if(rc != 0) { // TODO: RTC 167205: GA error handling assert(false,"validateDrtmPayload: BUG! mm_block_unmap failed for " "virtual address 0x%16llX.", drtmPayloadVirtAddr); } } if(pError) { SB_ERR("validateDrtmPayload: plid=0x%08X, eid=0x%08X, reason=0x%04X", ERRL_GETPLID_SAFE(pError), ERRL_GETEID_SAFE(pError), ERRL_GETRC_SAFE(pError)); } SB_EXIT("validateDrtmPayload"); return pError; }
/** * @brief Starts OCCs on all Processors in the node * This is intended to be used for AVP testing. * * @param[out] o_failedOccTarget: Pointer to the target failing * loadnStartAllOccs * @param[in] i_useSRAM: bool - use SRAM for OCC image, ie during IPL * true if during IPL, false if at end of IPL (default) * @return errlHndl_t Error log if OCC load failed */ errlHndl_t loadnStartAllOccs(TARGETING::Target *& o_failedOccTarget, bool i_useSRAM) { errlHndl_t l_errl = NULL; void* homerVirtAddrBase = NULL; uint64_t homerPhysAddrBase = VMM_HOMER_REGION_START_ADDR; bool winkle_loaded = false; TRACUCOMP( g_fapiTd, ENTER_MRK"loadnStartAllOccs(%d)", i_useSRAM); do { #ifndef __HOSTBOOT_RUNTIME //OCC requires the build_winkle_images library if ( !VFS::module_is_loaded( "libbuild_winkle_images.so" ) ) { l_errl = VFS::module_load( "libbuild_winkle_images.so" ); if ( l_errl ) { // load module returned with errl set TRACFCOMP( g_fapiTd,ERR_MRK"loadnStartAllOccs: Could not load build_winkle module" ); // break from do loop if error occured break; } winkle_loaded = true; } assert(VMM_HOMER_REGION_SIZE <= THIRTYTWO_GB, "loadnStartAllOccs: Unsupported HOMER Region size"); if (!i_useSRAM) { //If running Sapphire need to place this at the top of memory if(TARGETING::is_sapphire_load()) { homerPhysAddrBase = TARGETING::get_top_mem_addr(); assert (homerPhysAddrBase != 0, "loadnStartAllOccs: Top of memory was 0!"); homerPhysAddrBase -= VMM_ALL_HOMER_OCC_MEMORY_SIZE; } TRACFCOMP( g_fapiTd, "HOMER is at %.16X", homerPhysAddrBase ); //Map entire homer region into virtual memory homerVirtAddrBase = mm_block_map(reinterpret_cast<void*>(homerPhysAddrBase), VMM_HOMER_REGION_SIZE); TRACFCOMP( g_fapiTd, "HOMER virtaddrbase %.16X", homerVirtAddrBase ); } else { // malloc space big enough for all of OCC homerVirtAddrBase = (void *)malloc(1 * MEGABYTE); homerPhysAddrBase = mm_virt_to_phys(homerVirtAddrBase); } #endif if (i_useSRAM) { // OCC is going into L3 and SRAM so only need 1 prime and load // into the Master Proc TargetService & tS = targetService(); Target * sysTarget = NULL; tS.getTopLevelTarget( sysTarget ); assert( sysTarget != NULL ); Target* masterproc = NULL; tS.masterProcChipTargetHandle( masterproc ); /******* SETUP AND LOAD **************/ l_errl = primeAndLoadOcc (masterproc, homerVirtAddrBase, homerPhysAddrBase, i_useSRAM); if(l_errl) { o_failedOccTarget = masterproc; TRACFCOMP( g_fapiImpTd, ERR_MRK "loadnStartAllOccs:primeAndLoadOcc failed"); free(homerVirtAddrBase); break; } /********* START OCC *************/ l_errl = HBOCC::startOCC (masterproc, NULL, o_failedOccTarget); // it either started or errored; either way, free the memory free(homerVirtAddrBase); if (l_errl) { TRACFCOMP( g_fapiImpTd, ERR_MRK"loadnStartAllOccs: startOCC failed"); break; } } else { TargetHandleList procChips; getAllChips(procChips, TYPE_PROC, true); if(procChips.size() == 0) { TRACFCOMP( g_fapiTd,INFO_MRK"loadnStartAllOccs: No processors found" ); //We'll never get this far in the IPL without any processors, // so just exit. break; } TRACUCOMP( g_fapiTd, INFO_MRK"loadnStartAllOccs: %d procs found", procChips.size()); TargetHandleList::iterator itr1 = procChips.begin(); //The OCC Procedures require processors within a DCM be //setup together. So, first checking if any proc has //DCM installed attribute set. If not, we can iterate //over the list in any order. //If DCM installed is set, we work under the assumption //that each nodeID is a DCM. So sort the list by NodeID //then call OCC Procedures on NodeID pairs. if(0 == (*itr1)->getAttr<ATTR_PROC_DCM_INSTALLED>()) { TRACUCOMP( g_fapiTd, INFO_MRK"loadnStartAllOccs: non-dcm path entered"); for (TargetHandleList::iterator itr = procChips.begin(); itr != procChips.end(); ++itr) { /******* SETUP AND LOAD **************/ l_errl = primeAndLoadOcc (*itr, homerVirtAddrBase, homerPhysAddrBase, i_useSRAM); if(l_errl) { o_failedOccTarget = *itr; TRACFCOMP( g_fapiImpTd, ERR_MRK "loadnStartAllOccs:primeAndLoadOcc failed"); break; } /********* START OCC *************/ l_errl = HBOCC::startOCC (*itr, NULL, o_failedOccTarget); if (l_errl) { TRACFCOMP( g_fapiImpTd, ERR_MRK"loadnStartAllOccs: startOCC failed"); break; } } if (l_errl) { break; } } else { TRACUCOMP( g_fapiTd, INFO_MRK"loadnStartAllOccs: Following DCM Path"); std::sort(procChips.begin(), procChips.end(), orderByNodeAndPosition); TRACUCOMP( g_fapiTd, INFO_MRK"loadnStartAllOccs: procChips list sorted"); for (TargetHandleList::iterator itr = procChips.begin(); itr != procChips.end(); ++itr) { TRACUCOMP( g_fapiImpTd, INFO_MRK"loadnStartAllOccs: Insepcting first target" ); Target* targ0 = *itr; Target* targ1 = NULL; TRACUCOMP( g_fapiImpTd, INFO_MRK "loadnStartAllOccs: Cur target nodeID=%d", targ0->getAttr<ATTR_FABRIC_NODE_ID>()); //if the next target in the list is in the same node // they are on the same DCM, so bump itr forward // and update targ1 pointer if((itr+1) != procChips.end()) { TRACUCOMP( g_fapiImpTd, INFO_MRK "loadnStartAllOccs: n+1 target nodeID=%d", ((*(itr+1))->getAttr<ATTR_FABRIC_NODE_ID>()) ); if((targ0->getAttr<ATTR_FABRIC_NODE_ID>()) == ((*(itr+1))->getAttr<ATTR_FABRIC_NODE_ID>())) { itr++; targ1 = *itr; } } /********** Setup and load targ0 ***********/ l_errl = primeAndLoadOcc (targ0, homerVirtAddrBase, homerPhysAddrBase, i_useSRAM); if(l_errl) { o_failedOccTarget = targ0; TRACFCOMP( g_fapiImpTd, ERR_MRK "loadnStartAllOccs: " "primeAndLoadOcc failed on targ0"); break; } /*********** Setup and load targ1 **********/ l_errl = primeAndLoadOcc (targ1, homerVirtAddrBase, homerPhysAddrBase, i_useSRAM); if(l_errl) { o_failedOccTarget = targ1; TRACFCOMP( g_fapiImpTd, ERR_MRK "loadnStartAllOccs: " "primeAndLoadOcc failed on targ1"); break; } /*********** Start OCC *******************/ l_errl = HBOCC::startOCC (targ0, targ1, o_failedOccTarget); if (l_errl) { TRACFCOMP( g_fapiImpTd, ERR_MRK "loadnStartAllOccs: start failed"); break; } } if (l_errl) { break; } } } } while(0); errlHndl_t l_tmpErrl = NULL; //Unless HTMGT is in use, there are no further accesses to HOMER memory //required during the IPL #ifndef CONFIG_HTMGT if(homerVirtAddrBase) { int rc = 0; rc = mm_block_unmap(homerVirtAddrBase); if (rc != 0) { /*@ * @errortype * @moduleid fapi::MOD_OCC_LOAD_START_ALL_OCCS * @reasoncode fapi::RC_MM_UNMAP_ERR * @userdata1 Return Code * @userdata2 Unmap address * @devdesc mm_block_unmap() returns error * @custdesc A problem occurred during the IPL * of the system. */ l_tmpErrl = new ERRORLOG::ErrlEntry( ERRORLOG::ERRL_SEV_UNRECOVERABLE, fapi::MOD_OCC_LOAD_START_ALL_OCCS, fapi::RC_MM_UNMAP_ERR, rc, reinterpret_cast<uint64_t> (homerVirtAddrBase)); if(l_tmpErrl) { if(l_errl) { errlCommit( l_tmpErrl, HWPF_COMP_ID ); } else { l_errl = l_tmpErrl; } } } } #endif //make sure we always unload the module if (winkle_loaded) { l_tmpErrl = VFS::module_unload( "libbuild_winkle_images.so" ); if ( l_tmpErrl ) { TRACFCOMP ( g_fapiTd,ERR_MRK "loadnStartAllOccs: Error unloading build_winkle module" ); if(l_errl) { errlCommit( l_tmpErrl, HWPF_COMP_ID ); } else { l_errl = l_tmpErrl; } } } TRACUCOMP( g_fapiTd, EXIT_MRK"loadnStartAllOccs" ); return l_errl; }
HdatSpiraS::HdatSpiraS(const hdatMsAddr_t &i_msAddr) : iv_spirasSize(0), iv_spiras(NULL) { HDAT_ENTER(); iv_spirasSize = sizeof(hdatSpiraS_t); uint64_t l_base_addr = ((uint64_t) i_msAddr.hi << 32) | i_msAddr.lo; HDAT_DBG("l_base_addr at SPIRA-S=0x%016llX",l_base_addr); //calculate the hrmor and add to base address TARGETING::Target * sys = NULL; TARGETING::targetService().getTopLevelTarget( sys ); assert(sys != NULL); uint64_t l_hrmor = sys->getAttr<TARGETING::ATTR_PAYLOAD_BASE>()*MEGABYTE; HDAT_DBG("HRMOR=0x%08x",l_hrmor); l_base_addr = l_hrmor + l_base_addr; HDAT_DBG("base address after adding HRMOR=0x%08x",l_base_addr); uint64_t l_base_addr_down = ALIGN_PAGE_DOWN(l_base_addr); HDAT_DBG("l_base_addr_down=0x%016llX",l_base_addr_down); HDAT_DBG("reqd space=0x%x, will do a block map of size 0x%x", iv_spirasSize, ALIGN_PAGE(iv_spirasSize)); void *l_virt_addr = mm_block_map( reinterpret_cast<void*>(l_base_addr_down), ALIGN_PAGE(iv_spirasSize) + PAGESIZE); HDAT_DBG("l_virt_addr=0x%016llX after block map",l_virt_addr); uint64_t l_vaddr = reinterpret_cast<uint64_t>(l_virt_addr); HDAT_DBG("will add offset %x to starting virtual address", (l_base_addr-l_base_addr_down)); l_vaddr += l_base_addr-l_base_addr_down; HDAT_DBG("l_vaddr after adding=0x%016llX",l_vaddr); l_virt_addr = reinterpret_cast<void *>(l_vaddr); HDAT_DBG("l_virt_addr=0x%016llX",l_virt_addr); iv_spiras = reinterpret_cast<hdatSpiraS_t *>(l_virt_addr); HDAT_DBG("constructor iv_spiras addr 0x%016llX virtual addr 0x%016llX,space" " allocated=0x%x",(uint64_t) this->iv_spiras, (uint64_t)l_virt_addr,iv_spirasSize); HDAT_DBG("creating SPIRA-S header"); setSpiraSHdrs(); HDAT_DBG("done setting the SPIRA-S header"); iv_spiras->hdatHDIF.hdatSize = sizeof(hdatSpira_t); HDAT_EXIT(); return; }