void GetClusterCoordinates(HCluster clusterIdx, PetscScalar* coordinates, AppCtx* context, double& x, double& y) { HClusterWrapper cw = (*context->hd)[clusterIdx]; if (cw.IsTerminals()) { HCell terminal = context->ci->terminalCells[cw.id()]; x = context->hd->GetDouble<HCell::X>(terminal); y = context->hd->GetDouble<HCell::Y>(terminal); } else if (cw.IsPrimary()) { HPin pin = context->ci->primaryPins[cw.id()]; x = context->hd->GetDouble<HPin::X>(pin); y = context->hd->GetDouble<HPin::Y>(pin); } else { int clusterIdxInCoordinatesArray = cw.clusterIdx2solutionIdxLUT(); CRITICAL_ASSERT(clusterIdxInCoordinatesArray != -1); x = coordinates[2*clusterIdxInCoordinatesArray+0]; y = coordinates[2*clusterIdxInCoordinatesArray+1]; } }
void ParseLEF(HDesign& design) { ConfigContext ctx(design.cfg.OpenContext("LEFParser")); if(!design.HasTechInfo()) design.SetTechInfo(); LEFParserData userData(&design); userData.rlBuilder.LayersStart( design.cfg.ValueOf(".startLayersCount", 6)); userData.mtBuilder.MacroTypesStart( design.cfg.ValueOf(".startMacrosCount", 42), design.cfg.ValueOf(".startPinsCount", 224)); userData.stBuilder.SitesStart( design.cfg.ValueOf(".startSitesCount", 1)); lefrSetMacroBeginCbk(macroBeginCB); lefrSetMacroCbk(macroCB); lefrSetMacroEndCbk(macroEndCB); lefrSetPinCbk(pinCB); lefrSetLayerCbk(layerCB); lefrSetSiteCbk(siteCB); lefrSetMallocFunction(mallocCB); lefrSetReallocFunction(reallocCB); lefrSetFreeFunction(freeCB); lefrSetCaseSensitivity(false); lefrSetRelaxMode(); lefrSetShiftCase(); // will shift name to uppercase if caseinsensitive // is set to off or not set static int __init_reader_code = lefrInit(); lefrReset(); FILE* lefFile = fopen(design.cfg.ValueOf("benchmark.lef"),"r"); CRITICAL_ASSERT(lefFile != NULL); ALERT("LEF file %s parsing started...", (const char*)design.cfg.ValueOf("benchmark.lef")); int lefReaderStatus = lefrRead(lefFile, design.cfg.ValueOf("benchmark.lef"), (void*)(&userData)); ASSERT(lefReaderStatus == PARSE_OK); fclose(lefFile); lefrReleaseNResetMemory(); ALERT("LEF parsing finished."); }
/** @brief Branch an inode. A branched inode is one in which the allocation state for one copy is free or almost free, and the other copy is in the new state. The copy which is in the new state is the writeable copy, which is also buffered and dirty. @param pInode Pointer to the cached inode structure which has already been mounted. @return A negated ::REDSTATUS code indicating the operation result. @retval 0 Operation was successful. @retval -RED_EINVAL Invalid parameters. @retval -RED_EIO A disk I/O error occurred. */ REDSTATUS RedInodeBranch( CINODE *pInode) { REDSTATUS ret; if(!CINODE_IS_MOUNTED(pInode)) { REDERROR(); ret = -RED_EINVAL; } else if(!pInode->fBranched) { uint8_t bWhich; ret = InodeGetWriteableCopy(pInode->ulInode, &bWhich); if(ret == 0) { RedBufferBranch(pInode->pInodeBuf, InodeBlock(pInode->ulInode, bWhich)); pInode->fBranched = true; pInode->fDirty = true; } /* Toggle the inode slots: the old slot block becomes almost free (still used by the committed state) and the new slot block becomes new. */ if(ret == 0) { ret = InodeBitSet(pInode->ulInode, 1U - bWhich, false); } if(ret == 0) { ret = InodeBitSet(pInode->ulInode, bWhich, true); } CRITICAL_ASSERT(ret == 0); } else { RedBufferDirty(pInode->pInodeBuf); pInode->fDirty = true; ret = 0; } return ret; }
/** @brief Acquire a buffer. @param ulBlock Block number to acquire. @param uFlags BFLAG_ values for the operation. @param ppBuffer On success, populated with the acquired buffer. @return A negated ::REDSTATUS code indicating the operation result. @retval 0 Operation was successful. @retval -RED_EIO A disk I/O error occurred. @retval -RED_EINVAL Invalid parameters. @retval -RED_EBUSY All buffers are referenced. */ REDSTATUS RedBufferGet( uint32_t ulBlock, uint16_t uFlags, void **ppBuffer) { REDSTATUS ret = 0; uint8_t bIdx; if((ulBlock >= gpRedVolume->ulBlockCount) || ((uFlags & BFLAG_MASK) != uFlags) || (ppBuffer == NULL)) { REDERROR(); ret = -RED_EINVAL; } else { if(BufferFind(ulBlock, &bIdx)) { /* Error if the buffer exists and BFLAG_NEW was specified, since the new flag is used when a block is newly allocated/created, so the block was previously free and and there should never be an existing buffer for a free block. Error if the buffer exists but does not have the same type as was requested. */ if( ((uFlags & BFLAG_NEW) != 0U) || ((uFlags & BFLAG_META_MASK) != (gBufCtx.aHead[bIdx].uFlags & BFLAG_META_MASK))) { CRITICAL_ERROR(); ret = -RED_EFUBAR; } } else if(gBufCtx.uNumUsed == REDCONF_BUFFER_COUNT) { /* The MINIMUM_BUFFER_COUNT is supposed to ensure that no operation ever runs out of buffers, so this should never happen. */ CRITICAL_ERROR(); ret = -RED_EBUSY; } else { BUFFERHEAD *pHead; /* Search for the least recently used buffer which is not referenced. */ for(bIdx = (uint8_t)(REDCONF_BUFFER_COUNT - 1U); bIdx > 0U; bIdx--) { if(gBufCtx.aHead[gBufCtx.abMRU[bIdx]].bRefCount == 0U) { break; } } bIdx = gBufCtx.abMRU[bIdx]; pHead = &gBufCtx.aHead[bIdx]; if(pHead->bRefCount == 0U) { /* If the LRU buffer is valid and dirty, write it out before repurposing it. */ if(((pHead->uFlags & BFLAG_DIRTY) != 0U) && (pHead->ulBlock != BBLK_INVALID)) { #if REDCONF_READ_ONLY == 1 CRITICAL_ERROR(); ret = -RED_EFUBAR; #else ret = BufferWrite(bIdx); #endif } } else { /* All the buffers are used, which should have been caught by checking gBufCtx.uNumUsed. */ CRITICAL_ERROR(); ret = -RED_EBUSY; } if(ret == 0) { if((uFlags & BFLAG_NEW) == 0U) { /* Invalidate the LRU buffer. If the read fails, we do not want the buffer head to continue to refer to the old block number, since the read, even if it fails, may have partially overwritten the buffer data (consider the case where block size exceeds sector size, and some but not all of the sectors are read successfully), and if the buffer were to be used subsequently with its partially erroneous contents, bad things could happen. */ pHead->ulBlock = BBLK_INVALID; ret = RedIoRead(gbRedVolNum, ulBlock, 1U, gBufCtx.b.aabBuffer[bIdx]); if((ret == 0) && ((uFlags & BFLAG_META) != 0U)) { if(!BufferIsValid(gBufCtx.b.aabBuffer[bIdx], uFlags)) { /* A corrupt metadata node is usually a critical error. The master block is an exception since it might be invalid because the volume is not mounted; that condition is expected and should not result in an assertion. */ CRITICAL_ASSERT((uFlags & BFLAG_META_MASTER) == BFLAG_META_MASTER); ret = -RED_EIO; } } #ifdef REDCONF_ENDIAN_SWAP if(ret == 0) { BufferEndianSwap(gBufCtx.b.aabBuffer[bIdx], uFlags); } #endif } else { RedMemSet(gBufCtx.b.aabBuffer[bIdx], 0U, REDCONF_BLOCK_SIZE); } } if(ret == 0) { pHead->bVolNum = gbRedVolNum; pHead->ulBlock = ulBlock; pHead->uFlags = 0U; } } /* Reference the buffer, update its flags, and promote it to MRU. This happens both when BufferFind() found an existing buffer for the block and when the LRU buffer was repurposed to create a buffer for the block. */ if(ret == 0) { BUFFERHEAD *pHead = &gBufCtx.aHead[bIdx]; pHead->bRefCount++; if(pHead->bRefCount == 1U) { gBufCtx.uNumUsed++; } /* BFLAG_NEW tells this function to zero the buffer instead of reading it from disk; it has no meaning later on, and thus is not saved. */ pHead->uFlags |= (uFlags & (~BFLAG_NEW)); BufferMakeMRU(bIdx); *ppBuffer = gBufCtx.b.aabBuffer[bIdx]; } } return ret; }
/** @brief Commit a transaction point. @return A negated ::REDSTATUS code indicating the operation result. @retval 0 Operation was successful. @retval -RED_EIO A disk I/O error occurred. */ REDSTATUS RedVolTransact(void) { REDSTATUS ret = 0; REDASSERT(!gpRedVolume->fReadOnly); /* Should be checked by caller. */ if(gpRedCoreVol->fBranched) { gpRedMR->ulFreeBlocks += gpRedCoreVol->ulAlmostFreeBlocks; gpRedCoreVol->ulAlmostFreeBlocks = 0U; ret = RedBufferFlush(0U, gpRedVolume->ulBlockCount); if(ret == 0) { gpRedMR->hdr.ulSignature = META_SIG_METAROOT; gpRedMR->hdr.ullSequence = gpRedVolume->ullSequence; ret = RedVolSeqNumIncrement(); } if(ret == 0) { const uint8_t *pbMR = CAST_VOID_PTR_TO_CONST_UINT8_PTR(gpRedMR); uint32_t ulSectorCRC; #ifdef REDCONF_ENDIAN_SWAP MetaRootEndianSwap(gpRedMR); #endif gpRedMR->ulSectorCRC = 0U; ulSectorCRC = RedCrc32Update(0U, &pbMR[8U], gpRedVolConf->ulSectorSize - 8U); if(gpRedVolConf->ulSectorSize < REDCONF_BLOCK_SIZE) { gpRedMR->hdr.ulCRC = RedCrc32Update(ulSectorCRC, &pbMR[gpRedVolConf->ulSectorSize], REDCONF_BLOCK_SIZE - gpRedVolConf->ulSectorSize); } else { gpRedMR->hdr.ulCRC = ulSectorCRC; } gpRedMR->ulSectorCRC = ulSectorCRC; #ifdef REDCONF_ENDIAN_SWAP gpRedMR->hdr.ulCRC = RedRev32(gpRedMR->hdr.ulCRC); gpRedMR->ulSectorCRC = RedRev32(gpRedMR->ulSectorCRC); #endif /* Flush the block device before writing the metaroot, so that all previously written blocks are guaranteed to be on the media before the metaroot is written. Otherwise, if the block device reorders the writes, the metaroot could reach the media before metadata it points at, creating a window for disk corruption if power is lost. */ ret = RedIoFlush(gbRedVolNum); } if(ret == 0) { ret = RedIoWrite(gbRedVolNum, BLOCK_NUM_FIRST_METAROOT + gpRedCoreVol->bCurMR, 1U, gpRedMR); #ifdef REDCONF_ENDIAN_SWAP MetaRootEndianSwap(gpRedMR); #endif } /* Flush the block device to force the metaroot write to the media. This guarantees the transaction point is really complete before we return. */ if(ret == 0) { ret = RedIoFlush(gbRedVolNum); } /* Toggle to the other metaroot buffer. The working state and committed state metaroot buffers exchange places. */ if(ret == 0) { uint8_t bNextMR = 1U - gpRedCoreVol->bCurMR; gpRedCoreVol->aMR[bNextMR] = *gpRedMR; gpRedCoreVol->bCurMR = bNextMR; gpRedMR = &gpRedCoreVol->aMR[gpRedCoreVol->bCurMR]; gpRedCoreVol->fBranched = false; } CRITICAL_ASSERT(ret == 0); } return ret; }