Ejemplo n.º 1
0
int
rf_ConfigureAccessTrace(RF_ShutdownList_t **listp)
{
	int rc;

	numTracesSoFar = accessTraceBufCount = rf_stopCollectingTraces = 0;
	if (rf_accessTraceBufSize) {
		RF_Malloc(access_tracebuf, rf_accessTraceBufSize *
		    sizeof(RF_AccTraceEntry_t), (RF_AccTraceEntry_t *));
		accessTraceBufCount = 0;
	}
	traceCount = 0;
	numTracesSoFar = 0;
	rc = rf_mutex_init(&rf_tracing_mutex);
	if (rc) {
		RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d.\n",
		    __FILE__, __LINE__, rc);
	}
	rc = rf_ShutdownCreate(listp, rf_ShutdownAccessTrace, NULL);
	if (rc) {
		RF_ERRORMSG3("Unable to add to shutdown list file %s line %d"
		    " rc=%d.\n", __FILE__, __LINE__, rc);
		if (rf_accessTraceBufSize) {
			RF_Free(access_tracebuf, rf_accessTraceBufSize *
			    sizeof(RF_AccTraceEntry_t));
			rf_mutex_destroy(&rf_tracing_mutex);
		}
	}
	return (rc);
}
Ejemplo n.º 2
0
RF_CommonLogData_t *
rf_AllocParityLogCommonData(RF_Raid_t *raidPtr)
{
	RF_CommonLogData_t *common = NULL;
	int rc;

	/*
	 * Return a struct for holding common parity log information from the
	 * free list (rf_parityLogDiskQueue.freeCommonList). If the free list
	 * is empty, call RF_Malloc to create a new structure. NON-BLOCKING
	 */

	RF_LOCK_MUTEX(raidPtr->parityLogDiskQueue.mutex);
	if (raidPtr->parityLogDiskQueue.freeCommonList) {
		common = raidPtr->parityLogDiskQueue.freeCommonList;
		raidPtr->parityLogDiskQueue.freeCommonList =
		    raidPtr->parityLogDiskQueue.freeCommonList->next;
		RF_UNLOCK_MUTEX(raidPtr->parityLogDiskQueue.mutex);
	} else {
		RF_UNLOCK_MUTEX(raidPtr->parityLogDiskQueue.mutex);
		RF_Malloc(common, sizeof(RF_CommonLogData_t),
		    (RF_CommonLogData_t *));
		rc = rf_mutex_init(&common->mutex);
		if (rc) {
			RF_ERRORMSG3("Unable to init mutex file %s line %d"
			    " rc=%d\n", __FILE__, __LINE__, rc);
			RF_Free(common, sizeof(RF_CommonLogData_t));
			common = NULL;
		}
	}
	common->next = NULL;
	return (common);
}
Ejemplo n.º 3
0
/* called at system boot time */
int     
rf_BootRaidframe()
{
	int     rc;

	if (raidframe_booted)
		return (EBUSY);
	raidframe_booted = 1;

#if RF_DEBUG_ATOMIC > 0
	rf_atent_init();
#endif				/* RF_DEBUG_ATOMIC > 0 */

	rf_setup_threadid();
	rf_assign_threadid();

	rc = rf_mutex_init(&configureMutex);
	if (rc) {
		RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__,
		    __LINE__, rc);
		RF_PANIC();
	}
	configureCount = 0;
	isconfigged = 0;
	globalShutdown = NULL;
	return (0);
}
Ejemplo n.º 4
0
/*****************************************************************************************
 * sets up the pss table
 * We pre-allocate a bunch of entries to avoid as much as possible having to
 * malloc up hash chain entries.
 ****************************************************************************************/
RF_PSStatusHeader_t *
rf_MakeParityStripeStatusTable(RF_Raid_t *raidPtr)
{
	RF_PSStatusHeader_t *pssTable;
	int     i;

	RF_Malloc(pssTable,
		  raidPtr->pssTableSize * sizeof(RF_PSStatusHeader_t),
		  (RF_PSStatusHeader_t *));
	for (i = 0; i < raidPtr->pssTableSize; i++) {
		rf_mutex_init(&pssTable[i].mutex);
	}
	return (pssTable);
}
Ejemplo n.º 5
0
RF_ReconMap_t *
rf_MakeReconMap(
    RF_Raid_t		*raidPtr,
    RF_SectorCount_t	 ru_sectors,		/*
						 * Size of reconstruction unit
						 * in sectors.
						 */
    RF_SectorCount_t	 disk_sectors,		/* Size of disk in sectors. */
    RF_ReconUnitCount_t	 spareUnitsPerDisk	/*
						 * Zero unless distributed
						 * sparing.
						 */
)
{
	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
	RF_ReconUnitCount_t num_rus = layoutPtr->stripeUnitsPerDisk /
	    layoutPtr->SUsPerRU;
	RF_ReconMap_t *p;
	int rc;

	RF_Malloc(p, sizeof(RF_ReconMap_t), (RF_ReconMap_t *));
	p->sectorsPerReconUnit = ru_sectors;
	p->sectorsInDisk = disk_sectors;

	p->totalRUs = num_rus;
	p->spareRUs = spareUnitsPerDisk;
	p->unitsLeft = num_rus - spareUnitsPerDisk;

	RF_Malloc(p->status, num_rus * sizeof(RF_ReconMapListElem_t *),
	    (RF_ReconMapListElem_t **));
	RF_ASSERT(p->status != (RF_ReconMapListElem_t **) NULL);

	(void) bzero((char *) p->status, num_rus *
	    sizeof(RF_ReconMapListElem_t *));

	p->size = sizeof(RF_ReconMap_t) + num_rus *
	    sizeof(RF_ReconMapListElem_t *);
	p->maxSize = p->size;

	rc = rf_mutex_init(&p->mutex);
	if (rc) {
		RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d.\n",
		    __FILE__, __LINE__, rc);
		RF_Free(p->status, num_rus * sizeof(RF_ReconMapListElem_t *));
		RF_Free(p, sizeof(RF_ReconMap_t));
		return (NULL);
	}
	return (p);
}
Ejemplo n.º 6
0
int
rf_ConfigureEngine(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr,
		   RF_Config_t *cfgPtr)
{

	rf_mutex_init(&raidPtr->node_queue_mutex);
	raidPtr->node_queue = NULL;
	raidPtr->dags_in_flight = 0;

	/* we create the execution thread only once per system boot. no need
	 * to check return code b/c the kernel panics if it can't create the
	 * thread. */
#if RF_DEBUG_ENGINE
	if (rf_engineDebug) {
		printf("raid%d: Creating engine thread\n", raidPtr->raidid);
	}
#endif
	if (RF_CREATE_ENGINE_THREAD(raidPtr->engine_thread,
				    DAGExecutionThread, raidPtr,
				    "raid%d", raidPtr->raidid)) {
		printf("raid%d: Unable to create engine thread\n",
		       raidPtr->raidid);
		return (ENOMEM);
	}
	if (RF_CREATE_ENGINE_THREAD(raidPtr->engine_helper_thread,
				    rf_RaidIOThread, raidPtr,
				    "raidio%d", raidPtr->raidid)) {
		printf("raid%d: Unable to create raidio thread\n",
		       raidPtr->raidid);
		return (ENOMEM);
	}
#if RF_DEBUG_ENGINE
	if (rf_engineDebug) {
		printf("raid%d: Created engine thread\n", raidPtr->raidid);
	}
#endif

	/* engine thread is now running and waiting for work */
#if RF_DEBUG_ENGINE
	if (rf_engineDebug) {
		printf("raid%d: Engine thread running and waiting for events\n", raidPtr->raidid);
	}
#endif
	rf_ShutdownCreate(listp, rf_ShutdownEngine, raidPtr);

	return (0);
}
Ejemplo n.º 7
0
int
rf_init_mcpair(RF_MCPair_t *t)
{
	int rc;

	rc = rf_mutex_init(&t->mutex);
	if (rc) {
		RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n",
		    __FILE__, __LINE__, rc);
		return (rc);
	}
	rc = rf_cond_init(&t->cond);
	if (rc) {
		RF_ERRORMSG3("Unable to init cond file %s line %d rc=%d\n",
		    __FILE__, __LINE__, rc);
		rf_mutex_destroy(&t->mutex);
		return (rc);
	}
	return (0);
}
Ejemplo n.º 8
0
int
rf_ConfigureDiskQueue(RF_Raid_t *raidPtr, RF_DiskQueue_t *diskqueue,
		      RF_RowCol_t c, const RF_DiskQueueSW_t *p,
		      RF_SectorCount_t sectPerDisk, dev_t dev,
		      int maxOutstanding, RF_ShutdownList_t **listp,
		      RF_AllocListElem_t *clList)
{
	diskqueue->col = c;
	diskqueue->qPtr = p;
	diskqueue->qHdr = (p->Create) (sectPerDisk, clList, listp);
	diskqueue->dev = dev;
	diskqueue->numOutstanding = 0;
	diskqueue->queueLength = 0;
	diskqueue->maxOutstanding = maxOutstanding;
	diskqueue->curPriority = RF_IO_NORMAL_PRIORITY;
	diskqueue->flags = 0;
	diskqueue->raidPtr = raidPtr;
	diskqueue->rf_cinfo = &raidPtr->raid_cinfo[c];
	rf_mutex_init(&diskqueue->mutex);
	diskqueue->cond = 0;
	return (0);
}
Ejemplo n.º 9
0
int
rf_ConfigureAllocList(RF_ShutdownList_t **listp)
{
	int rc;

	rc = rf_mutex_init(&alist_mutex);
	if (rc) {
		RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d.\n",
		    __FILE__, __LINE__, rc);
		return (rc);
	}
	al_free_list = NULL;
	fl_hit_count = fl_miss_count = al_free_list_count = 0;
	rc = rf_ShutdownCreate(listp, rf_ShutdownAllocList, NULL);
	if (rc) {
		RF_ERRORMSG3("Unable to add to shutdown list file %s line %d"
		    " rc=%d.\n", __FILE__, __LINE__, rc);
		rf_mutex_destroy(&alist_mutex);
		return (rc);
	}
	return (0);
}
Ejemplo n.º 10
0
int
rf_Configure(RF_Raid_t *raidPtr, RF_Config_t *cfgPtr, RF_AutoConfig_t *ac)
{
	RF_RowCol_t col;
	int rc;

	RF_LOCK_LKMGR_MUTEX(configureMutex);
	configureCount++;
	if (isconfigged == 0) {
		rf_mutex_init(&rf_printf_mutex);

		/* initialize globals */

		DO_INIT_CONFIGURE(rf_ConfigureAllocList);

		/*
	         * Yes, this does make debugging general to the whole
	         * system instead of being array specific. Bummer, drag.
		 */
		rf_ConfigureDebug(cfgPtr);
		DO_INIT_CONFIGURE(rf_ConfigureDebugMem);
#if RF_ACC_TRACE > 0
		DO_INIT_CONFIGURE(rf_ConfigureAccessTrace);
#endif
		DO_INIT_CONFIGURE(rf_ConfigureMapModule);
		DO_INIT_CONFIGURE(rf_ConfigureReconEvent);
		DO_INIT_CONFIGURE(rf_ConfigureCallback);
		DO_INIT_CONFIGURE(rf_ConfigureRDFreeList);
		DO_INIT_CONFIGURE(rf_ConfigureNWayXor);
		DO_INIT_CONFIGURE(rf_ConfigureStripeLockFreeList);
		DO_INIT_CONFIGURE(rf_ConfigureMCPair);
		DO_INIT_CONFIGURE(rf_ConfigureDAGs);
		DO_INIT_CONFIGURE(rf_ConfigureDAGFuncs);
		DO_INIT_CONFIGURE(rf_ConfigureReconstruction);
		DO_INIT_CONFIGURE(rf_ConfigureCopyback);
		DO_INIT_CONFIGURE(rf_ConfigureDiskQueueSystem);
		DO_INIT_CONFIGURE(rf_ConfigurePSStatus);
		isconfigged = 1;
	}
	RF_UNLOCK_LKMGR_MUTEX(configureMutex);

	DO_RAID_MUTEX(&raidPtr->mutex);
	/* set up the cleanup list.  Do this after ConfigureDebug so that
	 * value of memDebug will be set */

	rf_MakeAllocList(raidPtr->cleanupList);
	if (raidPtr->cleanupList == NULL) {
		DO_RAID_FAIL();
		return (ENOMEM);
	}
	rf_ShutdownCreate(&raidPtr->shutdownList,
			  (void (*) (void *)) rf_FreeAllocList,
			  raidPtr->cleanupList);

	raidPtr->numCol = cfgPtr->numCol;
	raidPtr->numSpare = cfgPtr->numSpare;

	raidPtr->status = rf_rs_optimal;
	raidPtr->reconControl = NULL;

	TAILQ_INIT(&(raidPtr->iodone));
	simple_lock_init(&(raidPtr->iodone_lock));

	DO_RAID_INIT_CONFIGURE(rf_ConfigureEngine);
	DO_RAID_INIT_CONFIGURE(rf_ConfigureStripeLocks);

	raidPtr->outstandingCond = 0;

	raidPtr->nAccOutstanding = 0;
	raidPtr->waitShutdown = 0;

	DO_RAID_MUTEX(&raidPtr->access_suspend_mutex);

	raidPtr->waitForReconCond = 0;

	if (ac!=NULL) {
		/* We have an AutoConfig structure..  Don't do the
		   normal disk configuration... call the auto config
		   stuff */
		rf_AutoConfigureDisks(raidPtr, cfgPtr, ac);
	} else {
		DO_RAID_INIT_CONFIGURE(rf_ConfigureDisks);
		DO_RAID_INIT_CONFIGURE(rf_ConfigureSpareDisks);
	}
	/* do this after ConfigureDisks & ConfigureSpareDisks to be sure dev
	 * no. is set */
	DO_RAID_INIT_CONFIGURE(rf_ConfigureDiskQueues);

	DO_RAID_INIT_CONFIGURE(rf_ConfigureLayout);

	/* Initialize per-RAID PSS bits */
	rf_InitPSStatus(raidPtr);

#if RF_INCLUDE_CHAINDECLUSTER > 0
	for (col = 0; col < raidPtr->numCol; col++) {
		/*
		 * XXX better distribution
		 */
		raidPtr->hist_diskreq[col] = 0;
	}
#endif
	raidPtr->numNewFailures = 0;
	raidPtr->copyback_in_progress = 0;
	raidPtr->parity_rewrite_in_progress = 0;
	raidPtr->adding_hot_spare = 0;
	raidPtr->recon_in_progress = 0;
	raidPtr->maxOutstanding = cfgPtr->maxOutstandingDiskReqs;

	/* autoconfigure and root_partition will actually get filled in
	   after the config is done */
	raidPtr->autoconfigure = 0;
	raidPtr->root_partition = 0;
	raidPtr->last_unit = raidPtr->raidid;
	raidPtr->config_order = 0;

	if (rf_keepAccTotals) {
		raidPtr->keep_acc_totals = 1;
	}

	/* Allocate a bunch of buffers to be used in low-memory conditions */
	raidPtr->iobuf = NULL;

	rc = rf_AllocEmergBuffers(raidPtr);
	if (rc) {
		printf("raid%d: Unable to allocate emergency buffers.\n",
		       raidPtr->raidid);
		DO_RAID_FAIL();
		return(rc);
	}

	/* Set up parity map stuff, if applicable. */
#ifndef RF_NO_PARITY_MAP
	rf_paritymap_attach(raidPtr, cfgPtr->force);
#endif

	raidPtr->valid = 1;

	printf("raid%d: %s\n", raidPtr->raidid,
	       raidPtr->Layout.map->configName);
	printf("raid%d: Components:", raidPtr->raidid);

	for (col = 0; col < raidPtr->numCol; col++) {
		printf(" %s", raidPtr->Disks[col].devname);
		if (RF_DEAD_DISK(raidPtr->Disks[col].status)) {
			printf("[**FAILED**]");
		}
	}
	printf("\n");
	printf("raid%d: Total Sectors: %" PRIu64 " (%" PRIu64 " MB)\n",
	       raidPtr->raidid,
	       raidPtr->totalSectors,
	       (raidPtr->totalSectors / 1024 *
				(1 << raidPtr->logBytesPerSector) / 1024));

	return (0);
}