SortTerms ComputeSortTerms(int numSortThreads, int valuesPerThread, bool useTransList, int numBits, int numElements, int numSMs) { SortTerms terms; int numValues = numSortThreads * valuesPerThread; terms.numSortBlocks = DivUp(numElements, numValues); terms.numCountBlocks = DivUp(terms.numSortBlocks, NumCountWarps); terms.countValuesPerThread = numValues / WarpSize; int numBuckets = 1<< numBits; terms.countSize = 4 * std::max(WarpSize, numBuckets * NumCountWarps) * terms.numCountBlocks; int numChannels = numBuckets / 2; int numSortBlocksPerCountWarp = std::min(NumCountWarps, WarpSize / numChannels); terms.numHistRows = DivUp(terms.numSortBlocks, numSortBlocksPerCountWarp); terms.numHistBlocks = std::min(numSMs, DivUp(terms.numHistRows, NumHistWarps)); int bucketCodeBlockSize = numBuckets; if(useTransList) bucketCodeBlockSize += numBuckets + WarpSize; terms.scatterStructSize = RoundUp(bucketCodeBlockSize, WarpSize); // hist3 kernel (for 1 - 5 radix bits) may write two blocks of codes at a // time, even if only one block is required. To support this, round the // number of sort blocks up to a multiple of 2. terms.bucketCodesSize = 4 * RoundUp(terms.numSortBlocks, 2) * terms.scatterStructSize; terms.numEndKeys = RoundUp(numElements, numValues) - numElements; return terms; }
sortStatus_t SORTAPI sortArrayEx(sortEngine_t engine, sortData_t data, int numSortThreads, int valuesPerThread, int bitPass, bool useTransList) { if((128 != numSortThreads && 256 != numSortThreads) || (8 != valuesPerThread)) return SORT_STATUS_INVALID_VALUE; if(data->numElements > data->maxElements) return SORT_STATUS_INVALID_VALUE; if(bitPass <= 0 || bitPass > 6) bitPass = 6; int numBits = data->endBit - data->firstBit; if(bitPass > numBits) bitPass = numBits; int numPasses = DivUp(numBits, bitPass); int split = numPasses * bitPass - numBits; // Generate a pass list. SortTable table = { { 0 } }; int bit = data->firstBit; for(int pass(0); pass < numPasses; ++pass) { numBits = bitPass - (pass < split); ++table.pass[numBits - 1]; bit += numBits; } table.numSortThreads = numSortThreads; table.valuesPerThread = valuesPerThread; table.useTransList = useTransList; return sortArrayFromList(engine, data, table); }
CUresult FindGlobalMax(MaxIndexEngine* engine, CUdeviceptr data, int count, float* maxX, int* maxIndex) { // Process 256 values a time . int numBricks = DivUp(count, 256); int numBlocks = std::min(numBricks, engine->numBlocks); // Distribute the work along complete bricks. div_t brickDiv = div(numBricks, numBlocks); std::vector<int2> ranges(numBlocks); for(int i(0); i < numBlocks; ++i) { int2 range; range.x = i ? ranges[i - 1].y : 0; int bricks = (i < brickDiv.rem) ? (brickDiv.quot + 1) : brickDiv.quot; range.y = std::min(range.x + bricks * 256, count); ranges[i] = range; } engine->rangeMem->FromHost(ranges); CuCallStack callStack; callStack.Push(data, engine->maxMem, engine->indexMem, engine->rangeMem); CUresult result = engine->pass1->Launch(numBlocks, 1, callStack); if(CUDA_SUCCESS != result) return result; callStack.Reset(); callStack.Push(engine->maxMem, engine->indexMem, numBlocks); result = engine->pass2->Launch(1, 1, callStack); if(CUDA_SUCCESS != result) return result; // Retrieve the max elements. engine->maxMem->ToHost(maxX, 1); engine->indexMem->ToHost(maxIndex, 1); return CUDA_SUCCESS; }
// // Read functions // int ext2::ReadInode(ulong inode, Inode *theInode) { if(inode < 2) { Panic::PrintMessage("Tried to get inode zero\n"); return(-1); } ulong groupNumber = inode / theSuperBlock.inodesPerGroup; // DivUp(inode, theSuperBlock.inodesPerGroup); ulong blocksNeeded = DivUp(theSuperBlock.inodesPerGroup * sizeof(Inode), blockSize); // cal the blocks needed for all inodes uchar *buff = new uchar[blocksNeeded * blockSize]; Inode *tmpInode = reinterpret_cast<Inode*>(buff); /* printf("INODE: %d\n", inode); printf("INODES PER GROUP: %d\n", theSuperBlock.inodesPerGroup); printf("GROUP NUMBER: %d\n", groupNumber); printf("INODE TABLE ADDR: %d\n", theGroupDescriptors[groupNumber].inodeTableAddress); printf("BLOCKS REQUESTED: %d\n", blocksNeeded); printf("INODE SIZE: %d\n", sizeof(Inode)); */ // read the inode table for that group int ret = ReadBlocks(theGroupDescriptors[groupNumber].inodeTableAddress, blocksNeeded, buff); if(ret >= 0) // decrease inode because array index at zero and inodes at one MemCopy(theInode, &tmpInode[(inode-1) % theSuperBlock.inodesPerGroup], sizeof(Inode)); delete [] buff; return(ret); }
ulong ext2::AllocateNewDataBlock(ulong blockNumber, FileDescriptor *fd) { // FIX ME (void)blockNumber; int groupNumber = fd->inodeNumber / theSuperBlock.inodesPerGroup; if(theGroupDescriptors[groupNumber].freeBlockCount == 0) Panic::PrintMessage("FILE SYSTEM FULL\n"); // read in the block bitmap ulong numBlocksForBitmap = DivUp(DivUp(theSuperBlock.blocksPerGroup,ulong(8)), ulong(blockSize)); uchar *freeBlockBitmap = new uchar[numBlocksForBitmap * blockSize]; printf("BLOCKS PER GROUP: %d\n", theSuperBlock.blocksPerGroup); printf("NUM BLOCKS: %d\n", numBlocksForBitmap); // search through the bitmap for an unused block ReadBlocks(theGroupDescriptors[groupNumber].blockBitmapAddress, numBlocksForBitmap, freeBlockBitmap); for(uint i=0; i < numBlocksForBitmap * blockSize; ++i) printf("%x ", freeBlockBitmap[i]); /* vector<bool> bitMap(reinterpret_cast<ulong*>(freeBlockBitmap), theSuperBlock.blocksPerGroup); vector<bool>::iterator it = find(bitMap.begin(), bitMap.end(), false); for(it = bitMap.begin(); it != bitMap.end(); ++it) printf("%s", *it == true ? "U" : "F"); */ // mark it as used // update the count in the group descriptor // update the count in the super block // update the count in the inode return(0); }
void PrintArray(const int* values, int count, int place) { int lines = DivUp(count, 16); for(int line(0); line < lines; ++line) { int cols = std::min(count, 16); count -= cols; for(int col(0); col < cols; ++col) PrintNumberHTML(values[col], place); printf("\n"); values += cols; } }
sparseStatus_t sparseEngine_d::Multiply(sparseMat_t mat, T alpha, T beta, CUdeviceptr xVec, CUdeviceptr yVec) { sparseMatrix* m = static_cast<sparseMatrix*>(mat); Kernel* k; sparseStatus_t status = LoadKernel(m->prec, &k); if(SPARSE_STATUS_SUCCESS != status) return status; // Push the args and select the xVec as a texture CuCallStack callStack; callStack.Push(m->outputIndices, m->colIndices, m->sparseValues, m->tempOutput, m->numGroups); // Get the size of the xVec elements PrecTerm precTerms = PrecTerms[m->prec]; size_t offset; CUresult result = cuTexRefSetAddress(&offset, k->xVec_texture, xVec, m->width * precTerms.vecSize); if(CUDA_SUCCESS != result) return SPARSE_STATUS_KERNEL_ERROR; // Launch the function uint numBlocks = DivUp(m->numGroups, WarpsPerBlock); result = k->func[IndexFromVT(m->valuesPerThread)]->Launch(numBlocks, 1, callStack); if(CUDA_SUCCESS != result) return SPARSE_STATUS_LAUNCH_ERROR; // Finalize the vector int numFinalizeBlocks = DivUp(m->numGroups, WarpsPerBlock); int useBeta = !IsZero(beta); callStack.Reset(); callStack.Push(m->tempOutput, m->rowIndices, m->height, yVec, alpha, beta, useBeta); result = k->finalize->Launch(numFinalizeBlocks, 1, callStack); if(CUDA_SUCCESS != result) return SPARSE_STATUS_KERNEL_ERROR; return SPARSE_STATUS_SUCCESS; }
sortStatus_t sortPass(sortEngine_t engine, sortData_t data, int numSortThreads, int valuesPerThread, bool useTransList, int firstBit, int endBit, int endKeyFlags, int valueCode, int* earlyExitCode, int& parity) { if(data->numElements > data->maxElements) return SORT_STATUS_INVALID_VALUE; if((firstBit < 0) || (endBit > 32) || (endBit <= firstBit) || ((endBit - firstBit) > 6)) return SORT_STATUS_INVALID_VALUE; int numBits = endBit - firstBit; SortTerms terms = ComputeSortTerms(numSortThreads, valuesPerThread, useTransList, numBits, data->numElements, engine->numSMs); sortEngine_d::HistKernel* hist; sortEngine_d::SortKernel* sort; CUresult result; sortStatus_t status = LoadKernels(engine, numSortThreads, valuesPerThread, useTransList, valueCode, &hist, &sort); if(SORT_STATUS_SUCCESS != status) return status; status = AllocSortResources(terms, engine); if(SORT_STATUS_SUCCESS != status) return status; // Set numHistRows into rangePairs if it hasn't already been set to this // size. if(terms.numHistRows != engine->lastNumHistRowsProcessed) { int2* pairs = &engine->rangePairsHost[0]; int numPairs = terms.numHistBlocks * NumHistWarps; int pairCount = terms.numHistRows / numPairs; int pairSplit = terms.numHistRows % numPairs; pairs[0].x = 0; for(int i = 0; i < numPairs; ++i) { if(i) pairs[i].x = pairs[i - 1].y; pairs[i].y = pairs[i].x + pairCount + (i < pairSplit); } // Copy rangePairsHost to device memory. CUresult result = engine->rangePairs->FromHost( &engine->rangePairsHost[0], numPairs); if(CUDA_SUCCESS != result) return SORT_STATUS_DEVICE_ERROR; engine->lastNumHistRowsProcessed = terms.numHistRows; } // Save the trailing keys if((SORT_END_KEY_SAVE & endKeyFlags) && terms.numEndKeys) { engine->restoreSourceSize = terms.numEndKeys; CUdeviceptr source = AdjustPointer<uint>(data->keys[0], data->numElements); CUresult result = cuMemcpy(engine->keyRestoreBuffer->Handle(), source, 4 * engine->restoreSourceSize); if(CUDA_SUCCESS != result) return SORT_STATUS_DEVICE_ERROR; } // Set the trailing keys to all set bits here. if((SORT_END_KEY_SET & endKeyFlags) && terms.numEndKeys) { // Back up the overwritten keys in the engine CUdeviceptr target = AdjustPointer<uint>(data->keys[0], data->numElements); CUresult result = cuMemsetD32(target, 0xffffffff, terms.numEndKeys); if(CUDA_SUCCESS != result) return SORT_STATUS_DEVICE_ERROR; } // Run the count kernel if(data->earlyExit) engine->sortDetectCounters->Fill(0); CuCallStack callStack; callStack.Push(data->keys[0], firstBit, data->numElements, terms.countValuesPerThread, engine->countBuffer); CuFunction* count = data->earlyExit ? engine->count->eeFunctions[numBits - 1].get() : engine->count->functions[numBits - 1].get(); result = count->Launch(terms.numCountBlocks, 1, callStack); if(CUDA_SUCCESS != result) return SORT_STATUS_LAUNCH_ERROR; *earlyExitCode = 0; if(data->earlyExit) { uint4 detect; result = engine->sortDetectCounters->ToHost(&detect, 1); if(CUDA_SUCCESS != result) return SORT_STATUS_DEVICE_ERROR; uint radixSort = detect.x; uint fullCount = detect.y; uint radixCount = detect.z; if(terms.numCountBlocks == (int)fullCount) *earlyExitCode = 3; else if(terms.numCountBlocks == (int)radixCount) *earlyExitCode = 2; // If 5% of the sort blocks are sorted, use the slightly slower early // exit sort kernel. else if((double)radixSort / terms.numSortBlocks > 0.05) *earlyExitCode = 1; else *earlyExitCode = 0; } if(*earlyExitCode <= 1) { // Run the three histogram kernels callStack.Reset(); callStack.Push(engine->countBuffer, engine->rangePairs, engine->countScan, engine->columnScan); result = hist->pass1[numBits - 1]->Launch(terms.numHistBlocks, 1, callStack); if(CUDA_SUCCESS != result) return SORT_STATUS_LAUNCH_ERROR; callStack.Reset(); callStack.Push(terms.numHistBlocks, engine->countScan); result = hist->pass2[numBits - 1]->Launch(1, 1, callStack); if(CUDA_SUCCESS != result) return SORT_STATUS_LAUNCH_ERROR; callStack.Reset(); callStack.Push(engine->countBuffer, engine->rangePairs, engine->countScan, engine->columnScan, engine->bucketCodes, *earlyExitCode); result = hist->pass3[numBits - 1]->Launch(terms.numHistBlocks, 1, callStack); if(CUDA_SUCCESS != result) return SORT_STATUS_LAUNCH_ERROR; // Run the sort kernel // Because the max grid size is only 65535 in any dimension, large // sorts require multiple kernel launche. int MaxGridSize = 65535; int numSortLaunches = DivUp(terms.numSortBlocks, MaxGridSize); for(int launch(0); launch < numSortLaunches; ++launch) { int block = MaxGridSize * launch; int numBlocks = std::min(MaxGridSize, terms.numSortBlocks - block); callStack.Reset(); callStack.Push(data->keys[0], block, engine->bucketCodes, firstBit, data->keys[1]); switch(valueCode) { case 1: // VALUE_TYPE_INDEX callStack.Push(data->values1[1]); break; case 2: // VALUE_TYPE_SINGLE callStack.Push(data->values1[0], data->values1[1]); break; case 3: // VALUE_TYPE_MULTI callStack.Push(data->valueCount, // Six values_global_in data->values1[0], data->values2[0], data->values3[0], data->values4[0], data->values5[0], data->values6[0], // Six values_global_out data->values1[1], data->values2[1], data->values3[1], data->values4[1], data->values5[1], data->values6[1]); break; } CuFunction* sortFunc = *earlyExitCode ? sort->eeFunctions[numBits - 1].get() : sort->functions[numBits - 1].get(); result = sortFunc->Launch(numBlocks, 1, callStack); if(CUDA_SUCCESS != result) return SORT_STATUS_LAUNCH_ERROR; } // Swap the source and target buffers in the data structure. std::swap(data->keys[0], data->keys[1]); std::swap(data->values1[0], data->values1[1]); std::swap(data->values2[0], data->values2[1]); std::swap(data->values3[0], data->values3[1]); std::swap(data->values4[0], data->values4[1]); std::swap(data->values5[0], data->values5[1]); std::swap(data->values6[0], data->values6[1]); parity ^= 1; } return SORT_STATUS_SUCCESS; }
/** \brief Initializes a device to be read by by the driver \param Device String - Device to read from \param Options NULL Terminated array of option strings \return Root Node */ tVFS_Node *Ext2_InitDevice(const char *Device, const char **Options) { tExt2_Disk *disk = NULL; int fd; int groupCount; tExt2_SuperBlock sb; ENTER("sDevice pOptions", Device, Options); // Open Disk fd = VFS_Open(Device, VFS_OPENFLAG_READ|VFS_OPENFLAG_WRITE); //Open Device if(fd == -1) { Log_Warning("EXT2", "Unable to open '%s'", Device); LEAVE('n'); return NULL; } // Read Superblock at offset 1024 VFS_ReadAt(fd, 1024, 1024, &sb); // Read Superblock // Sanity Check Magic value if(sb.s_magic != 0xEF53) { Log_Warning("EXT2", "Volume '%s' is not an EXT2 volume (0x%x != 0xEF53)", Device, sb.s_magic); goto _error; } if( sb.s_blocks_per_group < MIN_BLOCKS_PER_GROUP ) { Log_Warning("Ext2", "Blocks per group is too small (%i < %i)", sb.s_blocks_per_group, MIN_BLOCKS_PER_GROUP); goto _error; } // Get Group count groupCount = DivUp(sb.s_blocks_count, sb.s_blocks_per_group); LOG("groupCount = %i", groupCount); // Allocate Disk Information disk = malloc(sizeof(tExt2_Disk) + sizeof(tExt2_Group)*groupCount); if(!disk) { Log_Warning("EXT2", "Unable to allocate disk structure"); goto _error; } disk->FD = fd; memcpy(&disk->SuperBlock, &sb, 1024); disk->GroupCount = groupCount; // Get an inode cache handle disk->CacheID = Inode_GetHandle(NULL); // Get Block Size if( sb.s_log_block_size > MAX_BLOCK_LOG_SIZE ) { Log_Warning("Ext2", "Block size (log2) too large (%i > %i)", sb.s_log_block_size, MAX_BLOCK_LOG_SIZE); goto _error; } disk->BlockSize = 1024 << sb.s_log_block_size; LOG("Disk->BlockSie = 0x%x (1024 << %i)", disk->BlockSize, sb.s_log_block_size); // Read Group Information LOG("sb,s_first_data_block = %x", sb.s_first_data_block); VFS_ReadAt( disk->FD, sb.s_first_data_block * disk->BlockSize + 1024, sizeof(tExt2_Group)*groupCount, disk->Groups ); LOG("Block Group 0"); LOG(".bg_block_bitmap = 0x%x", disk->Groups[0].bg_block_bitmap); LOG(".bg_inode_bitmap = 0x%x", disk->Groups[0].bg_inode_bitmap); LOG(".bg_inode_table = 0x%x", disk->Groups[0].bg_inode_table); LOG("Block Group 1"); LOG(".bg_block_bitmap = 0x%x", disk->Groups[1].bg_block_bitmap); LOG(".bg_inode_bitmap = 0x%x", disk->Groups[1].bg_inode_bitmap); LOG(".bg_inode_table = 0x%x", disk->Groups[1].bg_inode_table); // Get root Inode Ext2_int_ReadInode(disk, 2, &disk->RootInode); // Create Root Node memset(&disk->RootNode, 0, sizeof(tVFS_Node)); disk->RootNode.Inode = 2; // Root inode ID disk->RootNode.ImplPtr = disk; // Save disk pointer disk->RootNode.Size = -1; // Fill in later (on readdir) disk->RootNode.Flags = VFS_FFLAG_DIRECTORY; disk->RootNode.Type = &gExt2_DirType; // Complete root node disk->RootNode.UID = disk->RootInode.i_uid; disk->RootNode.GID = disk->RootInode.i_gid; disk->RootNode.NumACLs = 1; disk->RootNode.ACLs = &gVFS_ACL_EveryoneRW; #if DEBUG LOG("inode.i_size = 0x%x", disk->RootInode.i_size); LOG("inode.i_block[0] = 0x%x", disk->RootInode.i_block[0]); #endif LEAVE('p', &disk->RootNode); return &disk->RootNode; _error: if( disk ) free(disk); VFS_Close(fd); LEAVE('n'); return NULL; }
/** * \fn vfs_node *Ext2_int_CreateNode(tExt2_Disk *Disk, Uint InodeID) * \brief Create a new VFS Node */ tVFS_Node *Ext2_int_CreateNode(tExt2_Disk *Disk, Uint InodeID) { struct { tVFS_Node retNode; tExt2_Inode inode; } data; tVFS_Node *node = &data.retNode; tExt2_Inode *in = &data.inode; if( !Ext2_int_ReadInode(Disk, InodeID, &data.inode) ) return NULL; if( (node = Inode_GetCache(Disk->CacheID, InodeID)) ) return node; node = &data.retNode; memset(node, 0, sizeof(*node)); // Set identifiers node->Inode = InodeID; node->ImplPtr = Disk; node->ImplInt = in->i_links_count; if( in->i_links_count == 0 ) { Log_Notice("Ext2", "Inode %p:%x is not referenced, bug?", Disk, InodeID); } // Set file length node->Size = in->i_size; // Set Access Permissions node->UID = in->i_uid; node->GID = in->i_gid; node->NumACLs = 3; node->ACLs = VFS_UnixToAcessACL(in->i_mode & 0777, in->i_uid, in->i_gid); // Set Function Pointers node->Type = &gExt2_FileType; switch(in->i_mode & EXT2_S_IFMT) { // Symbolic Link case EXT2_S_IFLNK: node->Flags = VFS_FFLAG_SYMLINK; break; // Regular File case EXT2_S_IFREG: node->Flags = 0; node->Size |= (Uint64)in->i_dir_acl << 32; break; // Directory case EXT2_S_IFDIR: node->Type = &gExt2_DirType; node->Flags = VFS_FFLAG_DIRECTORY; node->Data = calloc( sizeof(Uint16), DivUp(node->Size, Disk->BlockSize) ); break; // Unknown, Write protect it to be safe default: node->Flags = VFS_FFLAG_READONLY; break; } // Set Timestamps node->ATime = in->i_atime * 1000; node->MTime = in->i_mtime * 1000; node->CTime = in->i_ctime * 1000; // Save in node cache and return saved node return Inode_CacheNodeEx(Disk->CacheID, &data.retNode, sizeof(data)); }
// the fd will be changed to a block device ext2::ext2(BlockDevice *theDrive, ulong baseAddress) : FileSystemBase(theDrive, baseAddress) { uchar *buff = new uchar[1024]; // 2 block buffer, assume 512 byte blocks for ATA driver // // CLEAN THIS UP & DOUBLE CHECK // ReadBlocks(2, 2, buff); /* for(int i=0; i < 100; ++i) printf("%x ", buff[i]); */ MemCopy(&theSuperBlock, buff, sizeof(SuperBlock)); // copy over the super block delete [] buff; // delete this memory // sanity checks if(theSuperBlock.magicValue != 0xEF53 || theSuperBlock.creatorOS != 0) { printf("MAGIC VALUE: 0x%x\n", theSuperBlock.magicValue); printf("OS TYPE: 0x%x\n", theSuperBlock.creatorOS); Panic::PrintMessage("Invalid super block or wrong OS type\n", false); } // fix up the block size switch(theSuperBlock.blockSize) { case 0: theSuperBlock.blockSize = blockSize = 1024; break; case 1: theSuperBlock.blockSize = blockSize = 2048; break; case 2: theSuperBlock.blockSize = blockSize = 4096; break; default: Panic::PrintMessage("Unknown blocks size\n"); } // set the block size multiplier SetFileSystemBlockSize(blockSize); // set the addresses per block addrPerBlock = blockSize / sizeof(ulong); /* printf("BLOCK SIZE: %d\n", blockSize); printf("BLOCK MUL: %d\n", blockMultiplier); printf("*** SUPER BLOCK ***\n"); printf("INODE COUNT: %ul\n", theSuperBlock.inodeCount); // Inodes count printf("BLOCK COUNT: %ul\n", theSuperBlock.blockCount); // Blocks count printf("RESERVE COUNT: %u\n", theSuperBlock.reservedBlockCount); // Reserved blocks count printf("FREE COUNT: %u\n", theSuperBlock.freeBlockCount); // Free blocks count printf("FREE INODE COUNT: %u\n", theSuperBlock.freeInodeCount); // Free inodes count printf("FIRST DATA BLOCK: %u\n", theSuperBlock.firstDataBlock); // First Data Block printf("BLOCK SIZE: %u\n", theSuperBlock.blockSize); // Block size printf("%u\n", theSuperBlock.fragmentSize); // Fragment size printf("BLOCKS PER GROUP: %u\n", theSuperBlock.blocksPerGroup); // # Blocks per group printf("%u\n", theSuperBlock.fragmentsPerGroup); // # Fragments per group printf("INODES PER GROUP: %u\n", theSuperBlock.inodesPerGroup); // # Inodes per group printf("MOUNT TIME: %u\n", theSuperBlock.mountTime); // Mount time printf("WRITE TIME: %u\n", theSuperBlock.writeTime); // Write time printf("MOUNT COUNT: %u\n", theSuperBlock.mountCount); // Mount count printf("MAX MOUNT COUNT: %u\n", theSuperBlock.maxMountCount); // Maximal mount count printf("MAGIC: 0x%x\n", theSuperBlock.magicValue); // Magic signature printf("STATE: 0x%x\n", theSuperBlock.state); // File system state printf("ERRORS: %u\n", theSuperBlock.errors); // Behaviour when detecting errors printf("MINOR REVISION: %u\n", theSuperBlock.minorRevisionLevel); // minor revision level printf("LAST CHECKED: %u\n", theSuperBlock.timeLastChecked); // time of last check printf("CHECK INTERVAL: %u\n", theSuperBlock.checkInterval); // max. time between checks printf("OS: %u\n", theSuperBlock.creatorOS); // OS printf("REVISION: %u\n", theSuperBlock.revisionLevel); // Revision level printf("UID: %u\n", theSuperBlock.defaultReservedUID); // Default uid for reserved blocks printf("GID: %u\n\n", theSuperBlock.defaultReservedGID); // Default gid for reserved blocks */ // // Figure out how many blocks there are for all the group descriptors // ulong numGroups = DivUp(theSuperBlock.blockCount, theSuperBlock.blocksPerGroup); ulong blocksForGroupDescriptors = DivUp(numGroups*sizeof(GroupDescriptor), blockSize); uchar *groupBlocks = new uchar[blocksForGroupDescriptors * blockSize]; GroupDescriptor *tmpDescriptor; /* printf("NUM GROUPS: %d\n", numGroups); printf("BLOCKS NEEDED: %d\n", blocksForGroupDescriptors); printf("SIZE: %d\n", blocksForGroupDescriptors * blockSize); */ int groupAddr = 0; // I have no idea how to calculate this value switch(blockSize) { case 1024: groupAddr += 2; break; case 4096: groupAddr += 4; break; default: Panic::PrintMessage("Unknown block size\n", false); } // read in the group ReadBlocks(groupAddr, blocksForGroupDescriptors, groupBlocks); for(ulong i=0; i < numGroups; ++i) { tmpDescriptor = reinterpret_cast<GroupDescriptor *>(&groupBlocks[i*sizeof(GroupDescriptor)]); /* printf("BLOCK BITMAP ADDR: %d\n", tmpDescriptor->blockBitmapAddress); printf("INODE BITMAP ADDR: %d\n", tmpDescriptor->inodeBitmapAddress); printf("INODE TABLE ADDR: %d\n", tmpDescriptor->inodeTableAddress); printf("FREE BLOCK COUNT: %d\n", tmpDescriptor->freeBlockCount); printf("FREE INODE COUNT: %d\n", tmpDescriptor->freeInodeCount); printf("USED DIR: %d\n\n", tmpDescriptor->usedDirCount); */ theGroupDescriptors.push_back(*tmpDescriptor); // add it to our list } delete [] groupBlocks; // free up some memory }