int rf_ConfigureAccessTrace(RF_ShutdownList_t **listp) { int rc; numTracesSoFar = accessTraceBufCount = rf_stopCollectingTraces = 0; if (rf_accessTraceBufSize) { RF_Malloc(access_tracebuf, rf_accessTraceBufSize * sizeof(RF_AccTraceEntry_t), (RF_AccTraceEntry_t *)); accessTraceBufCount = 0; } traceCount = 0; numTracesSoFar = 0; rc = rf_mutex_init(&rf_tracing_mutex); if (rc) { RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d.\n", __FILE__, __LINE__, rc); } rc = rf_ShutdownCreate(listp, rf_ShutdownAccessTrace, NULL); if (rc) { RF_ERRORMSG3("Unable to add to shutdown list file %s line %d" " rc=%d.\n", __FILE__, __LINE__, rc); if (rf_accessTraceBufSize) { RF_Free(access_tracebuf, rf_accessTraceBufSize * sizeof(RF_AccTraceEntry_t)); rf_mutex_destroy(&rf_tracing_mutex); } } return (rc); }
/* called at system boot time */ int rf_BootRaidframe() { int rc; if (raidframe_booted) return (EBUSY); raidframe_booted = 1; #if RF_DEBUG_ATOMIC > 0 rf_atent_init(); #endif /* RF_DEBUG_ATOMIC > 0 */ rf_setup_threadid(); rf_assign_threadid(); rc = rf_mutex_init(&configureMutex); if (rc) { RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__, __LINE__, rc); RF_PANIC(); } configureCount = 0; isconfigged = 0; globalShutdown = NULL; return (0); }
RF_CommonLogData_t * rf_AllocParityLogCommonData(RF_Raid_t *raidPtr) { RF_CommonLogData_t *common = NULL; int rc; /* * Return a struct for holding common parity log information from the * free list (rf_parityLogDiskQueue.freeCommonList). If the free list * is empty, call RF_Malloc to create a new structure. NON-BLOCKING */ RF_LOCK_MUTEX(raidPtr->parityLogDiskQueue.mutex); if (raidPtr->parityLogDiskQueue.freeCommonList) { common = raidPtr->parityLogDiskQueue.freeCommonList; raidPtr->parityLogDiskQueue.freeCommonList = raidPtr->parityLogDiskQueue.freeCommonList->next; RF_UNLOCK_MUTEX(raidPtr->parityLogDiskQueue.mutex); } else { RF_UNLOCK_MUTEX(raidPtr->parityLogDiskQueue.mutex); RF_Malloc(common, sizeof(RF_CommonLogData_t), (RF_CommonLogData_t *)); rc = rf_mutex_init(&common->mutex); if (rc) { RF_ERRORMSG3("Unable to init mutex file %s line %d" " rc=%d\n", __FILE__, __LINE__, rc); RF_Free(common, sizeof(RF_CommonLogData_t)); common = NULL; } } common->next = NULL; return (common); }
/* Configure a single disk queue. */ int rf_ConfigureDiskQueue( RF_Raid_t *raidPtr, RF_DiskQueue_t *diskqueue, /* row & col -- Debug only. BZZT not any more... */ RF_RowCol_t r, RF_RowCol_t c, RF_DiskQueueSW_t *p, RF_SectorCount_t sectPerDisk, dev_t dev, int maxOutstanding, RF_ShutdownList_t **listp, RF_AllocListElem_t *clList ) { int rc; diskqueue->row = r; diskqueue->col = c; diskqueue->qPtr = p; diskqueue->qHdr = (p->Create) (sectPerDisk, clList, listp); diskqueue->dev = dev; diskqueue->numOutstanding = 0; diskqueue->queueLength = 0; diskqueue->maxOutstanding = maxOutstanding; diskqueue->curPriority = RF_IO_NORMAL_PRIORITY; diskqueue->nextLockingOp = NULL; diskqueue->unlockingOp = NULL; diskqueue->numWaiting = 0; diskqueue->flags = 0; diskqueue->raidPtr = raidPtr; diskqueue->rf_cinfo = &raidPtr->raid_cinfo[r][c]; rc = rf_create_managed_mutex(listp, &diskqueue->mutex); if (rc) { RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__, __LINE__, rc); return (rc); } rc = rf_create_managed_cond(listp, &diskqueue->cond); if (rc) { RF_ERRORMSG3("Unable to init cond file %s line %d rc=%d\n", __FILE__, __LINE__, rc); return (rc); } return (0); }
int rf_init_mcpair(RF_MCPair_t *t) { int rc; rc = rf_mutex_init(&t->mutex); if (rc) { RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__, __LINE__, rc); return (rc); } rc = rf_cond_init(&t->cond); if (rc) { RF_ERRORMSG3("Unable to init cond file %s line %d rc=%d\n", __FILE__, __LINE__, rc); rf_mutex_destroy(&t->mutex); return (rc); } return (0); }
int rf_ConfigureAllocList(RF_ShutdownList_t **listp) { int rc; rc = rf_mutex_init(&alist_mutex); if (rc) { RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d.\n", __FILE__, __LINE__, rc); return (rc); } al_free_list = NULL; fl_hit_count = fl_miss_count = al_free_list_count = 0; rc = rf_ShutdownCreate(listp, rf_ShutdownAllocList, NULL); if (rc) { RF_ERRORMSG3("Unable to add to shutdown list file %s line %d" " rc=%d.\n", __FILE__, __LINE__, rc); rf_mutex_destroy(&alist_mutex); return (rc); } return (0); }
RF_ReconMap_t * rf_MakeReconMap( RF_Raid_t *raidPtr, RF_SectorCount_t ru_sectors, /* * Size of reconstruction unit * in sectors. */ RF_SectorCount_t disk_sectors, /* Size of disk in sectors. */ RF_ReconUnitCount_t spareUnitsPerDisk /* * Zero unless distributed * sparing. */ ) { RF_RaidLayout_t *layoutPtr = &raidPtr->Layout; RF_ReconUnitCount_t num_rus = layoutPtr->stripeUnitsPerDisk / layoutPtr->SUsPerRU; RF_ReconMap_t *p; int rc; RF_Malloc(p, sizeof(RF_ReconMap_t), (RF_ReconMap_t *)); p->sectorsPerReconUnit = ru_sectors; p->sectorsInDisk = disk_sectors; p->totalRUs = num_rus; p->spareRUs = spareUnitsPerDisk; p->unitsLeft = num_rus - spareUnitsPerDisk; RF_Malloc(p->status, num_rus * sizeof(RF_ReconMapListElem_t *), (RF_ReconMapListElem_t **)); RF_ASSERT(p->status != (RF_ReconMapListElem_t **) NULL); (void) bzero((char *) p->status, num_rus * sizeof(RF_ReconMapListElem_t *)); p->size = sizeof(RF_ReconMap_t) + num_rus * sizeof(RF_ReconMapListElem_t *); p->maxSize = p->size; rc = rf_mutex_init(&p->mutex); if (rc) { RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d.\n", __FILE__, __LINE__, rc); RF_Free(p->status, num_rus * sizeof(RF_ReconMapListElem_t *)); RF_Free(p, sizeof(RF_ReconMap_t)); return (NULL); } return (p); }
int rf_ConfigureDebugMem(RF_ShutdownList_t **listp) { int i, rc; rc = rf_create_managed_mutex(listp, &rf_debug_mem_mutex); if (rc) { RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__, __LINE__, rc); return (rc); } if (rf_memDebug) { for (i = 0; i < RF_MH_TABLESIZE; i++) mh_table[i] = NULL; mh_table_initialized = 1; } return (0); }
/* * This function is really just for debugging user-level stuff: it * frees up all memory, other RAIDframe resources which might otherwise * be kept around. This is used with systems like "sentinel" to detect * memory leaks. */ int rf_UnbootRaidframe() { int rc; RF_LOCK_MUTEX(configureMutex); if (configureCount) { RF_UNLOCK_MUTEX(configureMutex); return (EBUSY); } raidframe_booted = 0; RF_UNLOCK_MUTEX(configureMutex); rc = rf_mutex_destroy(&configureMutex); if (rc) { RF_ERRORMSG3("Unable to destroy mutex file %s line %d rc=%d\n", __FILE__, __LINE__, rc); RF_PANIC(); } #if RF_DEBUG_ATOMIC > 0 rf_atent_shutdown(); #endif /* RF_DEBUG_ATOMIC > 0 */ return (0); }
int rf_ConfigureDeclustered(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr, RF_Config_t *cfgPtr) { RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout); int b, v, k, r, lambda; /* block design params */ int i, j; RF_RowCol_t *first_avail_slot; RF_StripeCount_t complete_FT_count, numCompleteFullTablesPerDisk; RF_DeclusteredConfigInfo_t *info; RF_StripeCount_t PUsPerDisk, spareRegionDepthInPUs, numCompleteSpareRegionsPerDisk, extraPUsPerDisk; RF_StripeCount_t totSparePUsPerDisk; RF_SectorNum_t diskOffsetOfLastFullTableInSUs; RF_SectorCount_t SpareSpaceInSUs; char *cfgBuf = (char *) (cfgPtr->layoutSpecific); RF_StripeNum_t l, SUID; SUID = l = 0; numCompleteSpareRegionsPerDisk = 0; /* 1. create layout specific structure */ RF_MallocAndAdd(info, sizeof(RF_DeclusteredConfigInfo_t), (RF_DeclusteredConfigInfo_t *), raidPtr->cleanupList); if (info == NULL) return (ENOMEM); layoutPtr->layoutSpecificInfo = (void *) info; info->SpareTable = NULL; /* 2. extract parameters from the config structure */ if (layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) { (void)memcpy(info->sparemap_fname, cfgBuf, RF_SPAREMAP_NAME_LEN); } cfgBuf += RF_SPAREMAP_NAME_LEN; b = *((int *) cfgBuf); cfgBuf += sizeof(int); v = *((int *) cfgBuf); cfgBuf += sizeof(int); k = *((int *) cfgBuf); cfgBuf += sizeof(int); r = *((int *) cfgBuf); cfgBuf += sizeof(int); lambda = *((int *) cfgBuf); cfgBuf += sizeof(int); raidPtr->noRotate = *((int *) cfgBuf); cfgBuf += sizeof(int); /* the sparemaps are generated assuming that parity is rotated, so we * issue a warning if both distributed sparing and no-rotate are on at * the same time */ if ((layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) && raidPtr->noRotate) { RF_ERRORMSG("Warning: distributed sparing specified without parity rotation.\n"); } if (raidPtr->numCol != v) { RF_ERRORMSG2("RAID: config error: table element count (%d) not equal to no. of cols (%d)\n", v, raidPtr->numCol); return (EINVAL); } /* 3. set up the values used in the mapping code */ info->BlocksPerTable = b; info->Lambda = lambda; info->NumParityReps = info->groupSize = k; info->SUsPerTable = b * (k - 1) * layoutPtr->SUsPerPU; /* b blks, k-1 SUs each */ info->SUsPerFullTable = k * info->SUsPerTable; /* rot k times */ info->PUsPerBlock = k - 1; info->SUsPerBlock = info->PUsPerBlock * layoutPtr->SUsPerPU; info->TableDepthInPUs = (b * k) / v; info->FullTableDepthInPUs = info->TableDepthInPUs * k; /* k repetitions */ /* used only in distributed sparing case */ info->FullTablesPerSpareRegion = (v - 1) / rf_gcd(r, v - 1); /* (v-1)/gcd fulltables */ info->TablesPerSpareRegion = k * info->FullTablesPerSpareRegion; info->SpareSpaceDepthPerRegionInSUs = (r * info->TablesPerSpareRegion / (v - 1)) * layoutPtr->SUsPerPU; /* check to make sure the block design is sufficiently small */ if ((raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) { if (info->FullTableDepthInPUs * layoutPtr->SUsPerPU + info->SpareSpaceDepthPerRegionInSUs > layoutPtr->stripeUnitsPerDisk) { RF_ERRORMSG3("RAID: config error: Full Table depth (%d) + Spare Space (%d) larger than disk size (%d) (BD too big)\n", (int) info->FullTableDepthInPUs, (int) info->SpareSpaceDepthPerRegionInSUs, (int) layoutPtr->stripeUnitsPerDisk); return (EINVAL); } } else { if (info->TableDepthInPUs * layoutPtr->SUsPerPU > layoutPtr->stripeUnitsPerDisk) { RF_ERRORMSG2("RAID: config error: Table depth (%d) larger than disk size (%d) (BD too big)\n", (int) (info->TableDepthInPUs * layoutPtr->SUsPerPU), \ (int) layoutPtr->stripeUnitsPerDisk); return (EINVAL); } } /* compute the size of each disk, and the number of tables in the last * fulltable (which need not be complete) */ if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) { PUsPerDisk = layoutPtr->stripeUnitsPerDisk / layoutPtr->SUsPerPU; spareRegionDepthInPUs = (info->TablesPerSpareRegion * info->TableDepthInPUs + (info->TablesPerSpareRegion * info->TableDepthInPUs) / (v - 1)); info->SpareRegionDepthInSUs = spareRegionDepthInPUs * layoutPtr->SUsPerPU; numCompleteSpareRegionsPerDisk = PUsPerDisk / spareRegionDepthInPUs; info->NumCompleteSRs = numCompleteSpareRegionsPerDisk; extraPUsPerDisk = PUsPerDisk % spareRegionDepthInPUs; /* assume conservatively that we need the full amount of spare * space in one region in order to provide spares for the * partial spare region at the end of the array. We set "i" * to the number of tables in the partial spare region. This * may actually include some fulltables. */ extraPUsPerDisk -= (info->SpareSpaceDepthPerRegionInSUs / layoutPtr->SUsPerPU); if (extraPUsPerDisk <= 0) i = 0; else i = extraPUsPerDisk / info->TableDepthInPUs; complete_FT_count = (numCompleteSpareRegionsPerDisk * (info->TablesPerSpareRegion / k) + i / k); info->FullTableLimitSUID = complete_FT_count * info->SUsPerFullTable; info->ExtraTablesPerDisk = i % k; /* note that in the last spare region, the spare space is * complete even though data/parity space is not */ totSparePUsPerDisk = (numCompleteSpareRegionsPerDisk + 1) * (info->SpareSpaceDepthPerRegionInSUs / layoutPtr->SUsPerPU); info->TotSparePUsPerDisk = totSparePUsPerDisk; layoutPtr->stripeUnitsPerDisk = ((complete_FT_count) * info->FullTableDepthInPUs + /* data & parity space */ info->ExtraTablesPerDisk * info->TableDepthInPUs + totSparePUsPerDisk /* spare space */ ) * layoutPtr->SUsPerPU; layoutPtr->dataStripeUnitsPerDisk = (complete_FT_count * info->FullTableDepthInPUs + info->ExtraTablesPerDisk * info->TableDepthInPUs) * layoutPtr->SUsPerPU * (k - 1) / k; } else { /* non-dist spare case: force each disk to contain an * integral number of tables */ layoutPtr->stripeUnitsPerDisk /= (info->TableDepthInPUs * layoutPtr->SUsPerPU); layoutPtr->stripeUnitsPerDisk *= (info->TableDepthInPUs * layoutPtr->SUsPerPU); /* compute the number of tables in the last fulltable, which * need not be complete */ complete_FT_count = ((layoutPtr->stripeUnitsPerDisk / layoutPtr->SUsPerPU) / info->FullTableDepthInPUs); info->FullTableLimitSUID = complete_FT_count * info->SUsPerFullTable; info->ExtraTablesPerDisk = ((layoutPtr->stripeUnitsPerDisk / layoutPtr->SUsPerPU) / info->TableDepthInPUs) % k; } raidPtr->sectorsPerDisk = layoutPtr->stripeUnitsPerDisk * layoutPtr->sectorsPerStripeUnit; /* find the disk offset of the stripe unit where the last fulltable * starts */ numCompleteFullTablesPerDisk = complete_FT_count; diskOffsetOfLastFullTableInSUs = numCompleteFullTablesPerDisk * info->FullTableDepthInPUs * layoutPtr->SUsPerPU; if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) { SpareSpaceInSUs = numCompleteSpareRegionsPerDisk * info->SpareSpaceDepthPerRegionInSUs; diskOffsetOfLastFullTableInSUs += SpareSpaceInSUs; info->DiskOffsetOfLastSpareSpaceChunkInSUs = diskOffsetOfLastFullTableInSUs + info->ExtraTablesPerDisk * info->TableDepthInPUs * layoutPtr->SUsPerPU; } info->DiskOffsetOfLastFullTableInSUs = diskOffsetOfLastFullTableInSUs; info->numCompleteFullTablesPerDisk = numCompleteFullTablesPerDisk; /* 4. create and initialize the lookup tables */ info->LayoutTable = rf_make_2d_array(b, k, raidPtr->cleanupList); if (info->LayoutTable == NULL) return (ENOMEM); info->OffsetTable = rf_make_2d_array(b, k, raidPtr->cleanupList); if (info->OffsetTable == NULL) return (ENOMEM); info->BlockTable = rf_make_2d_array(info->TableDepthInPUs * layoutPtr->SUsPerPU, raidPtr->numCol, raidPtr->cleanupList); if (info->BlockTable == NULL) return (ENOMEM); first_avail_slot = rf_make_1d_array(v, NULL); if (first_avail_slot == NULL) return (ENOMEM); for (i = 0; i < b; i++) for (j = 0; j < k; j++) info->LayoutTable[i][j] = *cfgBuf++; /* initialize offset table */ for (i = 0; i < b; i++) for (j = 0; j < k; j++) { info->OffsetTable[i][j] = first_avail_slot[info->LayoutTable[i][j]]; first_avail_slot[info->LayoutTable[i][j]]++; } /* initialize block table */ for (SUID = l = 0; l < layoutPtr->SUsPerPU; l++) { for (i = 0; i < b; i++) { for (j = 0; j < k; j++) { info->BlockTable[(info->OffsetTable[i][j] * layoutPtr->SUsPerPU) + l] [info->LayoutTable[i][j]] = SUID; } SUID++; } } rf_free_1d_array(first_avail_slot, v); /* 5. set up the remaining redundant-but-useful parameters */ raidPtr->totalSectors = (k * complete_FT_count + info->ExtraTablesPerDisk) * info->SUsPerTable * layoutPtr->sectorsPerStripeUnit; layoutPtr->numStripe = (raidPtr->totalSectors / layoutPtr->sectorsPerStripeUnit) / (k - 1); /* strange evaluation order below to try and minimize overflow * problems */ layoutPtr->dataSectorsPerStripe = (k - 1) * layoutPtr->sectorsPerStripeUnit; layoutPtr->numDataCol = k - 1; layoutPtr->numParityCol = 1; return (0); }
void rf_print_unable_to_init_mutex(const char *file, int line, int rc) { RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", file, line, rc); }
/**************************************************************************** * Set up the data structures describing the spare disks in the array. * Recall from the above comment that the spare disk descriptors are stored * in row zero, which is specially expanded to hold them. ****************************************************************************/ int rf_ConfigureSpareDisks(RF_ShutdownList_t ** listp, RF_Raid_t * raidPtr, RF_Config_t * cfgPtr) { int i, ret; unsigned int bs; RF_RaidDisk_t *disks; int num_spares_done; num_spares_done = 0; /* * The space for the spares should have already been allocated by * ConfigureDisks(). */ disks = &raidPtr->Disks[0][raidPtr->numCol]; for (i = 0; i < raidPtr->numSpare; i++) { ret = rf_ConfigureDisk(raidPtr, &cfgPtr->spare_names[i][0], &disks[i], 0, raidPtr->numCol + i); if (ret) goto fail; if (disks[i].status != rf_ds_optimal) { RF_ERRORMSG1("Warning: spare disk %s failed TUR\n", &cfgPtr->spare_names[i][0]); } else { /* Change status to spare. */ disks[i].status = rf_ds_spare; DPRINTF6("Spare Disk %d: dev %s numBlocks %ld" " blockSize %d (%ld MB).\n", i, disks[i].devname, (long int) disks[i].numBlocks, disks[i].blockSize, (long int) disks[i].numBlocks * disks[i].blockSize / 1024 / 1024); } num_spares_done++; } /* Check sizes and block sizes on spare disks. */ bs = 1 << raidPtr->logBytesPerSector; for (i = 0; i < raidPtr->numSpare; i++) { if (disks[i].blockSize != bs) { RF_ERRORMSG3("Block size of %d on spare disk %s is" " not the same as on other disks (%d).\n", disks[i].blockSize, disks[i].devname, bs); ret = EINVAL; goto fail; } if (disks[i].numBlocks < raidPtr->sectorsPerDisk) { RF_ERRORMSG3("Spare disk %s (%llu blocks) is too small" " to serve as a spare (need %llu blocks).\n", disks[i].devname, disks[i].numBlocks, raidPtr->sectorsPerDisk); ret = EINVAL; goto fail; } else if (disks[i].numBlocks > raidPtr->sectorsPerDisk) { RF_ERRORMSG2("Warning: truncating spare disk" " %s to %llu blocks.\n", disks[i].devname, raidPtr->sectorsPerDisk); disks[i].numBlocks = raidPtr->sectorsPerDisk; } } return (0); fail: /* * Release the hold on the main components. We've failed to allocate * a spare, and since we're failing, we need to free things... * * XXX Failing to allocate a spare is *not* that big of a deal... * We *can* survive without it, if need be, esp. if we get hot * adding working. * If we don't fail out here, then we need a way to remove this spare... * That should be easier to do here than if we are "live"... */ rf_UnconfigureVnodes(raidPtr); return (ret); }
/**************************************************************************** * * Initialize the disks comprising the array. * * We want the spare disks to have regular row,col numbers so that we can * easily substitue a spare for a failed disk. But, the driver code assumes * throughout that the array contains numRow by numCol _non-spare_ disks, so * it's not clear how to fit in the spares. This is an unfortunate holdover * from raidSim. The quick and dirty fix is to make row zero bigger than the * rest, and put all the spares in it. This probably needs to get changed * eventually. * ****************************************************************************/ int rf_ConfigureDisks(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr, RF_Config_t *cfgPtr) { RF_RaidDisk_t **disks; RF_SectorCount_t min_numblks = (RF_SectorCount_t) 0x7FFFFFFFFFFFLL; RF_RowCol_t r, c; int bs, ret; unsigned i, count, foundone = 0, numFailuresThisRow; int force; force = cfgPtr->force; ret = rf_AllocDiskStructures(raidPtr, cfgPtr); if (ret) goto fail; disks = raidPtr->Disks; for (r = 0; r < raidPtr->numRow; r++) { numFailuresThisRow = 0; for (c = 0; c < raidPtr->numCol; c++) { ret = rf_ConfigureDisk(raidPtr, &cfgPtr->devnames[r][c][0], &disks[r][c], r, c); if (ret) goto fail; if (disks[r][c].status == rf_ds_optimal) { raidread_component_label( raidPtr->raid_cinfo[r][c].ci_dev, raidPtr->raid_cinfo[r][c].ci_vp, &raidPtr->raid_cinfo[r][c].ci_label); } if (disks[r][c].status != rf_ds_optimal) { numFailuresThisRow++; } else { if (disks[r][c].numBlocks < min_numblks) min_numblks = disks[r][c].numBlocks; DPRINTF7("Disk at row %d col %d: dev %s" " numBlocks %ld blockSize %d (%ld MB)\n", r, c, disks[r][c].devname, (long int) disks[r][c].numBlocks, disks[r][c].blockSize, (long int) disks[r][c].numBlocks * disks[r][c].blockSize / 1024 / 1024); } } /* XXX Fix for n-fault tolerant. */ /* * XXX This should probably check to see how many failures * we can handle for this configuration ! */ if (numFailuresThisRow > 0) raidPtr->status[r] = rf_rs_degraded; } /* * All disks must be the same size & have the same block size, bs must * be a power of 2. */ bs = 0; for (foundone = r = 0; !foundone && r < raidPtr->numRow; r++) { for (c = 0; !foundone && c < raidPtr->numCol; c++) { if (disks[r][c].status == rf_ds_optimal) { bs = disks[r][c].blockSize; foundone = 1; } } } if (!foundone) { RF_ERRORMSG("RAIDFRAME: Did not find any live disks in" " the array.\n"); ret = EINVAL; goto fail; } for (count = 0, i = 1; i; i <<= 1) if (bs & i) count++; if (count != 1) { RF_ERRORMSG1("Error: block size on disks (%d) must be a" " power of 2.\n", bs); ret = EINVAL; goto fail; } if (rf_CheckLabels(raidPtr, cfgPtr)) { printf("raid%d: There were fatal errors\n", raidPtr->raidid); if (force != 0) { printf("raid%d: Fatal errors being ignored.\n", raidPtr->raidid); } else { ret = EINVAL; goto fail; } } for (r = 0; r < raidPtr->numRow; r++) { for (c = 0; c < raidPtr->numCol; c++) { if (disks[r][c].status == rf_ds_optimal) { if (disks[r][c].blockSize != bs) { RF_ERRORMSG2("Error: block size of" " disk at r %d c %d different from" " disk at r 0 c 0.\n", r, c); ret = EINVAL; goto fail; } if (disks[r][c].numBlocks != min_numblks) { RF_ERRORMSG3("WARNING: truncating disk" " at r %d c %d to %d blocks.\n", r, c, (int) min_numblks); disks[r][c].numBlocks = min_numblks; } } } } raidPtr->sectorsPerDisk = min_numblks; raidPtr->logBytesPerSector = ffs(bs) - 1; raidPtr->bytesPerSector = bs; raidPtr->sectorMask = bs - 1; return (0); fail: rf_UnconfigureVnodes(raidPtr); return (ret); }
int rf_add_hot_spare(RF_Raid_t *raidPtr, RF_SingleComponent_t *sparePtr) { RF_RaidDisk_t *disks; RF_DiskQueue_t *spareQueues; int ret; unsigned int bs; int spare_number; #if 0 printf("Just in rf_add_hot_spare: %d.\n", raidPtr->numSpare); printf("Num col: %d.\n", raidPtr->numCol); #endif if (raidPtr->numSpare >= RF_MAXSPARE) { RF_ERRORMSG1("Too many spares: %d.\n", raidPtr->numSpare); return(EINVAL); } RF_LOCK_MUTEX(raidPtr->mutex); /* The beginning of the spares... */ disks = &raidPtr->Disks[0][raidPtr->numCol]; spare_number = raidPtr->numSpare; ret = rf_ConfigureDisk(raidPtr, sparePtr->component_name, &disks[spare_number], 0, raidPtr->numCol + spare_number); if (ret) goto fail; if (disks[spare_number].status != rf_ds_optimal) { RF_ERRORMSG1("Warning: spare disk %s failed TUR.\n", sparePtr->component_name); ret = EINVAL; goto fail; } else { disks[spare_number].status = rf_ds_spare; DPRINTF6("Spare Disk %d: dev %s numBlocks %ld blockSize %d" " (%ld MB).\n", spare_number, disks[spare_number].devname, (long int) disks[spare_number].numBlocks, disks[spare_number].blockSize, (long int) disks[spare_number].numBlocks * disks[spare_number].blockSize / 1024 / 1024); } /* Check sizes and block sizes on the spare disk. */ bs = 1 << raidPtr->logBytesPerSector; if (disks[spare_number].blockSize != bs) { RF_ERRORMSG3("Block size of %d on spare disk %s is not" " the same as on other disks (%d).\n", disks[spare_number].blockSize, disks[spare_number].devname, bs); ret = EINVAL; goto fail; } if (disks[spare_number].numBlocks < raidPtr->sectorsPerDisk) { RF_ERRORMSG3("Spare disk %s (%llu blocks) is too small to serve" " as a spare (need %llu blocks).\n", disks[spare_number].devname, disks[spare_number].numBlocks, raidPtr->sectorsPerDisk); ret = EINVAL; goto fail; } else { if (disks[spare_number].numBlocks > raidPtr->sectorsPerDisk) { RF_ERRORMSG2("Warning: truncating spare disk %s to %llu" " blocks.\n", disks[spare_number].devname, raidPtr->sectorsPerDisk); disks[spare_number].numBlocks = raidPtr->sectorsPerDisk; } } spareQueues = &raidPtr->Queues[0][raidPtr->numCol]; ret = rf_ConfigureDiskQueue(raidPtr, &spareQueues[spare_number], 0, raidPtr->numCol + spare_number, raidPtr->qType, raidPtr->sectorsPerDisk, raidPtr->Disks[0][raidPtr->numCol + spare_number].dev, raidPtr->maxOutstanding, &raidPtr->shutdownList, raidPtr->cleanupList); raidPtr->numSpare++; RF_UNLOCK_MUTEX(raidPtr->mutex); return (0); fail: RF_UNLOCK_MUTEX(raidPtr->mutex); return(ret); }
/**************************************************************************************** * set up the data structures describing the spare disks in the array * recall from the above comment that the spare disk descriptors are stored * in row zero, which is specially expanded to hold them. ***************************************************************************************/ int rf_ConfigureSpareDisks( RF_ShutdownList_t ** listp, RF_Raid_t * raidPtr, RF_Config_t * cfgPtr) { char buf[256]; int r, c, i, ret; RF_DiskOp_t *rdcap_op = NULL, *tur_op = NULL; unsigned bs; RF_RaidDisk_t *disks; int num_spares_done; struct proc *proc; #if !defined(__NetBSD__) && !defined(__OpenBSD__) ret = rf_SCSI_AllocReadCapacity(&rdcap_op); if (ret) goto fail; ret = rf_SCSI_AllocTUR(&tur_op); if (ret) goto fail; #endif /* !__NetBSD__ && !__OpenBSD__ */ num_spares_done = 0; proc = raidPtr->proc; /* The space for the spares should have already been allocated by * ConfigureDisks() */ disks = &raidPtr->Disks[0][raidPtr->numCol]; for (i = 0; i < raidPtr->numSpare; i++) { ret = rf_ConfigureDisk(raidPtr, &cfgPtr->spare_names[i][0], &disks[i], rdcap_op, tur_op, cfgPtr->spare_devs[i], 0, raidPtr->numCol + i); if (ret) goto fail; if (disks[i].status != rf_ds_optimal) { RF_ERRORMSG1("Warning: spare disk %s failed TUR\n", buf); } else { disks[i].status = rf_ds_spare; /* change status to * spare */ DPRINTF6("Spare Disk %d: dev %s numBlocks %ld blockSize %d (%ld MB)\n", i, disks[i].devname, (long int) disks[i].numBlocks, disks[i].blockSize, (long int) disks[i].numBlocks * disks[i].blockSize / 1024 / 1024); } num_spares_done++; } #if (defined(__NetBSD__) || defined(__OpenBSD__)) && (_KERNEL) #else rf_SCSI_FreeDiskOp(rdcap_op, 1); rdcap_op = NULL; rf_SCSI_FreeDiskOp(tur_op, 0); tur_op = NULL; #endif /* check sizes and block sizes on spare disks */ bs = 1 << raidPtr->logBytesPerSector; for (i = 0; i < raidPtr->numSpare; i++) { if (disks[i].blockSize != bs) { RF_ERRORMSG3("Block size of %d on spare disk %s is not the same as on other disks (%d)\n", disks[i].blockSize, disks[i].devname, bs); ret = EINVAL; goto fail; } if (disks[i].numBlocks < raidPtr->sectorsPerDisk) { RF_ERRORMSG3("Spare disk %s (%d blocks) is too small to serve as a spare (need %ld blocks)\n", disks[i].devname, disks[i].blockSize, (long int) raidPtr->sectorsPerDisk); ret = EINVAL; goto fail; } else if (disks[i].numBlocks > raidPtr->sectorsPerDisk) { RF_ERRORMSG2("Warning: truncating spare disk %s to %ld blocks\n", disks[i].devname, (long int) raidPtr->sectorsPerDisk); disks[i].numBlocks = raidPtr->sectorsPerDisk; } } return (0); fail: #if (defined(__NetBSD__) || defined(__OpenBSD__)) && defined(_KERNEL) /* Release the hold on the main components. We've failed to allocate * a spare, and since we're failing, we need to free things.. */ for (r = 0; r < raidPtr->numRow; r++) { for (c = 0; c < raidPtr->numCol; c++) { /* Cleanup.. */ #ifdef DEBUG printf("Cleaning up row: %d col: %d\n", r, c); #endif if (raidPtr->raid_cinfo[r][c].ci_vp) { (void) vn_close(raidPtr->raid_cinfo[r][c].ci_vp, FREAD | FWRITE, proc->p_ucred, proc); } } } for (i = 0; i < raidPtr->numSpare; i++) { /* Cleanup.. */ #ifdef DEBUG printf("Cleaning up spare: %d\n", i); #endif if (raidPtr->raid_cinfo[0][raidPtr->numCol + i].ci_vp) { (void) vn_close(raidPtr->raid_cinfo[0][raidPtr->numCol + i].ci_vp, FREAD | FWRITE, proc->p_ucred, proc); } } #else if (rdcap_op) rf_SCSI_FreeDiskOp(rdcap_op, 1); if (tur_op) rf_SCSI_FreeDiskOp(tur_op, 0); #endif return (ret); }
/**************************************************************************************** * * initialize the disks comprising the array * * We want the spare disks to have regular row,col numbers so that we can easily * substitue a spare for a failed disk. But, the driver code assumes throughout * that the array contains numRow by numCol _non-spare_ disks, so it's not clear * how to fit in the spares. This is an unfortunate holdover from raidSim. The * quick and dirty fix is to make row zero bigger than the rest, and put all the * spares in it. This probably needs to get changed eventually. * ***************************************************************************************/ int rf_ConfigureDisks( RF_ShutdownList_t ** listp, RF_Raid_t * raidPtr, RF_Config_t * cfgPtr) { RF_RaidDisk_t **disks; RF_SectorCount_t min_numblks = (RF_SectorCount_t) 0x7FFFFFFFFFFFLL; RF_RowCol_t r, c; int bs, ret; unsigned i, count, foundone = 0, numFailuresThisRow; RF_DiskOp_t *rdcap_op = NULL, *tur_op = NULL; int num_rows_done, num_cols_done; struct proc *proc = 0; #if !defined(__NetBSD__) && !defined(__OpenBSD__) ret = rf_SCSI_AllocReadCapacity(&rdcap_op); if (ret) goto fail; ret = rf_SCSI_AllocTUR(&tur_op); if (ret) goto fail; #endif /* !__NetBSD__ && !__OpenBSD__ */ num_rows_done = 0; num_cols_done = 0; RF_CallocAndAdd(disks, raidPtr->numRow, sizeof(RF_RaidDisk_t *), (RF_RaidDisk_t **), raidPtr->cleanupList); if (disks == NULL) { ret = ENOMEM; goto fail; } raidPtr->Disks = disks; proc = raidPtr->proc; /* Blah XXX */ /* get space for the device-specific stuff... */ RF_CallocAndAdd(raidPtr->raid_cinfo, raidPtr->numRow, sizeof(struct raidcinfo *), (struct raidcinfo **), raidPtr->cleanupList); if (raidPtr->raid_cinfo == NULL) { ret = ENOMEM; goto fail; } for (r = 0; r < raidPtr->numRow; r++) { numFailuresThisRow = 0; RF_CallocAndAdd(disks[r], raidPtr->numCol + ((r == 0) ? raidPtr->numSpare : 0), sizeof(RF_RaidDisk_t), (RF_RaidDisk_t *), raidPtr->cleanupList); if (disks[r] == NULL) { ret = ENOMEM; goto fail; } /* get more space for device specific stuff.. */ RF_CallocAndAdd(raidPtr->raid_cinfo[r], raidPtr->numCol + ((r == 0) ? raidPtr->numSpare : 0), sizeof(struct raidcinfo), (struct raidcinfo *), raidPtr->cleanupList); if (raidPtr->raid_cinfo[r] == NULL) { ret = ENOMEM; goto fail; } for (c = 0; c < raidPtr->numCol; c++) { ret = rf_ConfigureDisk(raidPtr, &cfgPtr->devnames[r][c][0], &disks[r][c], rdcap_op, tur_op, cfgPtr->devs[r][c], r, c); if (ret) goto fail; if (disks[r][c].status != rf_ds_optimal) { numFailuresThisRow++; } else { if (disks[r][c].numBlocks < min_numblks) min_numblks = disks[r][c].numBlocks; DPRINTF7("Disk at row %d col %d: dev %s numBlocks %ld blockSize %d (%ld MB)\n", r, c, disks[r][c].devname, (long int) disks[r][c].numBlocks, disks[r][c].blockSize, (long int) disks[r][c].numBlocks * disks[r][c].blockSize / 1024 / 1024); } num_cols_done++; } /* XXX fix for n-fault tolerant */ if (numFailuresThisRow > 0) raidPtr->status[r] = rf_rs_degraded; num_rows_done++; } #if (defined(__NetBSD__) || defined(__OpenBSD__)) && defined(_KERNEL) /* we do nothing */ #else rf_SCSI_FreeDiskOp(rdcap_op, 1); rdcap_op = NULL; rf_SCSI_FreeDiskOp(tur_op, 0); tur_op = NULL; #endif /* all disks must be the same size & have the same block size, bs must * be a power of 2 */ bs = 0; for (foundone = r = 0; !foundone && r < raidPtr->numRow; r++) { for (c = 0; !foundone && c < raidPtr->numCol; c++) { if (disks[r][c].status == rf_ds_optimal) { bs = disks[r][c].blockSize; foundone = 1; } } } if (!foundone) { RF_ERRORMSG("RAIDFRAME: Did not find any live disks in the array.\n"); ret = EINVAL; goto fail; } for (count = 0, i = 1; i; i <<= 1) if (bs & i) count++; if (count != 1) { RF_ERRORMSG1("Error: block size on disks (%d) must be a power of 2\n", bs); ret = EINVAL; goto fail; } for (r = 0; r < raidPtr->numRow; r++) { for (c = 0; c < raidPtr->numCol; c++) { if (disks[r][c].status == rf_ds_optimal) { if (disks[r][c].blockSize != bs) { RF_ERRORMSG2("Error: block size of disk at r %d c %d different from disk at r 0 c 0\n", r, c); ret = EINVAL; goto fail; } if (disks[r][c].numBlocks != min_numblks) { RF_ERRORMSG3("WARNING: truncating disk at r %d c %d to %d blocks\n", r, c, (int) min_numblks); disks[r][c].numBlocks = min_numblks; } } } } raidPtr->sectorsPerDisk = min_numblks; raidPtr->logBytesPerSector = ffs(bs) - 1; raidPtr->bytesPerSector = bs; raidPtr->sectorMask = bs - 1; return (0); fail: #if (defined(__NetBSD__) || defined(__OpenBSD__)) && defined(_KERNEL) for (r = 0; r < raidPtr->numRow; r++) { for (c = 0; c < raidPtr->numCol; c++) { /* Cleanup.. */ #ifdef DEBUG printf("Cleaning up row: %d col: %d\n", r, c); #endif if (raidPtr->raid_cinfo[r][c].ci_vp) { (void) vn_close(raidPtr->raid_cinfo[r][c].ci_vp, FREAD | FWRITE, proc->p_ucred, proc); } } } /* Space allocated for raid_vpp will get cleaned up at some other * point */ /* XXX Need more #ifdefs in the above... */ #else if (rdcap_op) rf_SCSI_FreeDiskOp(rdcap_op, 1); if (tur_op) rf_SCSI_FreeDiskOp(tur_op, 0); #endif return (ret); }
void rf_print_unable_to_add_shutdown(const char *file, int line, int rc) { RF_ERRORMSG3("Unable to add to shutdown list file %s line %d rc=%d\n", file, line, rc); }