Example #1
0
void init_block_cache(ext2fs_st *st)
{
    int i;

    st->cache.blocks = st->cache.size / st->fsc.block_size;
    DBO(printf("ext2fs: block cache contains %d blocks\n",st->cache.blocks));
    st->cache.block_cache = Heap$Malloc(st->heap,
					(sizeof(*st->cache.block_cache) * 
					 st->cache.blocks));
    DBO(printf("ext2fs: block cache descriptors at [%p,%p)\n",
	       st->cache.block_cache, (void *)st->cache.block_cache + 
	       (sizeof(*st->cache.block_cache) * st->cache.blocks) - 1));
    MU_INIT(&st->cache.mu);
    CV_INIT(&st->cache.freebufs);
    LINK_INIT(&st->cache.bufs);

    for (i = 0; i < st->cache.blocks; i++) {
	struct buffer_head *buf;

	buf = &st->cache.block_cache[i];
	LINK_ADD_TO_TAIL(&st->cache.bufs, buf);
	MU_INIT(&buf->mu);
	CV_INIT(&buf->cv);
	buf->st        = st;
	buf->b_blocknr = 0;
	buf->state     = buf_empty;
	buf->b_count   = 0;
	buf->b_data    = st->cache.buf+(i*st->fsc.block_size);
	buf->b_size    = st->fsc.block_size;
    }
}
Example #2
0
/**
 * allocate a work node.
 *
 * @param[out] node_out  address in which to store new work node
 *
 * @return operation status
 *    @retval 0 success
 *    @retval ENOMEM         out of memory
 */
int
afs_wq_node_alloc(struct afs_work_queue_node ** node_out)
{
    int ret = 0;
    struct afs_work_queue_node * node;

    *node_out = node = (struct afs_work_queue_node *) malloc(sizeof(*node));
    if (node == NULL) {
	ret = ENOMEM;
	goto error;
    }

    queue_NodeInit(&node->node_list);
    node->qidx = AFS_WQ_NODE_LIST_NONE;
    node->cbf = NULL;
    node->rock = node->queue = NULL;
    node->refcount = 1;
    node->block_count = 0;
    node->error_count = 0;
    MUTEX_INIT(&node->lock, "node", MUTEX_DEFAULT, 0);
    CV_INIT(&node->state_cv, "node state", CV_DEFAULT, 0);
    node->state = AFS_WQ_NODE_STATE_INIT;
    queue_Init(&node->dep_children);

 error:
    return ret;
}
Example #3
0
/**
 * initialize volume group cache subsystem.
 *
 * @return operation status
 *    @retval 0 success
 */
int
VVGCache_PkgInit(void)
{
    int code = 0;
    int i;

    /* allocate hash table */
    VVGCache_hash_table.hash_buckets =
	malloc(VolumeHashTable.Size * sizeof(struct rx_queue));
    if (VVGCache_hash_table.hash_buckets == NULL) {
	code = ENOMEM;
	goto error;
    }

    /* setup hash chain heads */
    for (i = 0; i < VolumeHashTable.Size; i++) {
	queue_Init(&VVGCache_hash_table.hash_buckets[i]);
    }

    /* initialize per-partition VVGC state */
    for (i = 0; i <= VOLMAXPARTS; i++) {
	VVGCache.part[i].state = VVGC_PART_STATE_INVALID;
	VVGCache.part[i].dlist_hash_buckets = NULL;
	CV_INIT(&VVGCache.part[i].cv, "cache part", CV_DEFAULT, 0);
	if (code) {
	    goto error;
	}
    }

 error:
    return code;
}
Example #4
0
/**
 * create a thread pool.
 *
 * @param[inout] pool_out  address in which to store pool object pointer.
 * @param[in]    queue     work queue serviced by thread pool
 *
 * @return operation status
 *    @retval 0 success
 *    @retval ENOMEM out of memory
 */
int
afs_tp_create(struct afs_thread_pool ** pool_out,
              struct afs_work_queue * queue)
{
    int ret = 0;
    struct afs_thread_pool * pool;

    ret = _afs_tp_alloc(pool_out);
    if (ret) {
        goto error;
    }
    pool = *pool_out;

    MUTEX_INIT(&pool->lock, "pool", MUTEX_DEFAULT, 0);
    CV_INIT(&pool->shutdown_cv, "pool shutdown", CV_DEFAULT, 0);
    queue_Init(&pool->thread_list);
    pool->work_queue = queue;
    pool->entry = &_afs_tp_worker_default;
    pool->rock = NULL;
    pool->nthreads = 0;
    pool->max_threads = 4;
    pool->state = AFS_TP_STATE_INIT;

error:
    return ret;
}
Example #5
0
static void
VInitPartition_r(char *path, char *devname, Device dev)
{
    struct DiskPartition64 *dp, *op;

    dp = malloc(sizeof(struct DiskPartition64));
    /* Add it to the end, to preserve order when we print statistics */
    for (op = DiskPartitionList; op; op = op->next) {
	if (!op->next)
	    break;
    }
    if (op)
	op->next = dp;
    else
	DiskPartitionList = dp;
    dp->next = 0;
    dp->name = strdup(path);
    dp->index = volutil_GetPartitionID(path);
#if defined(AFS_NAMEI_ENV) && !defined(AFS_NT40_ENV)
    /* Create a lockfile for the partition, of the form /vicepa/Lock/vicepa */
    dp->devName = malloc(2 * strlen(path) + 6);
    strcpy(dp->devName, path);
    strcat(dp->devName, OS_DIRSEP);
    strcat(dp->devName, "Lock");
    mkdir(dp->devName, 0700);
    strcat(dp->devName, path);
    close(afs_open(dp->devName, O_RDWR | O_CREAT, 0600));
    dp->device = dp->index;
#else
    dp->devName = strdup(devname);
    dp->device = dev;
#endif
    dp->lock_fd = INVALID_FD;
    dp->flags = 0;
    dp->f_files = 1;		/* just a default value */
#if defined(AFS_NAMEI_ENV) && !defined(AFS_NT40_ENV)
    if (programType == fileServer)
	(void)namei_ViceREADME(VPartitionPath(dp));
#endif
    VSetPartitionDiskUsage_r(dp);
#ifdef AFS_DEMAND_ATTACH_FS
    AddPartitionToTable_r(dp);
    queue_Init(&dp->vol_list.head);
    CV_INIT(&dp->vol_list.cv, "vol list", CV_DEFAULT, 0);
    dp->vol_list.len = 0;
    dp->vol_list.busy = 0;
    {
	char lockpath[MAXPATHLEN+1];
	snprintf(lockpath, MAXPATHLEN, "%s/" AFS_PARTLOCK_FILE, dp->name);
	lockpath[MAXPATHLEN] = '\0';
	VLockFileInit(&dp->headerLockFile, lockpath);

	snprintf(lockpath, MAXPATHLEN, "%s/" AFS_VOLUMELOCK_FILE, dp->name);
	lockpath[MAXPATHLEN] = '\0';
	VLockFileInit(&dp->volLockFile, lockpath);
    }
    VDiskLockInit(&dp->headerLock, &dp->headerLockFile, 1);
#endif /* AFS_DEMAND_ATTACH_FS */
}
Example #6
0
/**
 * initialize a struct VDiskLock.
 *
 * @param[in] dl struct VDiskLock to initialize
 * @param[in] lf the struct VLockFile to associate with this disk lock
 */
void
VDiskLockInit(struct VDiskLock *dl, struct VLockFile *lf, afs_uint32 offset)
{
    osi_Assert(lf);
    memset(dl, 0, sizeof(*dl));
    Lock_Init(&dl->rwlock);
    MUTEX_INIT(&dl->mutex, "disklock", MUTEX_DEFAULT, 0);
    CV_INIT(&dl->cv, "disklock cv", CV_DEFAULT, 0);
    dl->lockfile = lf;
    dl->offset = offset;
}
Example #7
0
/**
 * initialize a node list object.
 *
 * @param[in] list list object
 * @param[in] id   list identifier
 *
 * @return operation status
 *    @retval 0 success
 *
 * @internal
 */
static int
_afs_wq_node_list_init(struct afs_work_queue_node_list * list,
		       afs_wq_node_list_id_t id)
{
    queue_Init(&list->list);
    MUTEX_INIT(&list->lock, "list", MUTEX_DEFAULT, 0);
    CV_INIT(&list->cv, "list", CV_DEFAULT, 0);
    list->qidx = id;
    list->shutdown = 0;

    return 0;
}
Example #8
0
/**
 * allocate and initialize a work queue object.
 *
 * @param[out]   queue_out  address in which to store newly allocated work queue object
 * @param[in]    rock       work queue opaque pointer (passed as first arg to all fired callbacks)
 * @param[in]    opts       options for the new created queue
 *
 * @return operation status
 *    @retval 0 success
 *    @retval ENOMEM         out of memory
 */
int
afs_wq_create(struct afs_work_queue ** queue_out,
	      void * rock,
              struct afs_work_queue_opts *opts)
{
    int ret = 0;
    struct afs_work_queue * queue;

    ret = _afs_wq_alloc(queue_out);
    if (ret) {
	goto error;
    }
    queue = *queue_out;

    if (opts) {
	memcpy(&queue->opts, opts, sizeof(queue->opts));
    } else {
	afs_wq_opts_init(&queue->opts);
    }

    _afs_wq_node_list_init(&queue->ready_list,
				AFS_WQ_NODE_LIST_READY);
    _afs_wq_node_list_init(&queue->blocked_list,
				AFS_WQ_NODE_LIST_BLOCKED);
    _afs_wq_node_list_init(&queue->done_list,
				AFS_WQ_NODE_LIST_DONE);
    queue->rock = rock;
    queue->drain = 0;
    queue->shutdown = 0;
    queue->pend_count = 0;
    queue->running_count = 0;

    MUTEX_INIT(&queue->lock, "queue", MUTEX_DEFAULT, 0);
    CV_INIT(&queue->pend_cv, "queue pending", CV_DEFAULT, 0);
    CV_INIT(&queue->empty_cv, "queue empty", CV_DEFAULT, 0);
    CV_INIT(&queue->running_cv, "queue running", CV_DEFAULT, 0);

 error:
    return ret;
}
Example #9
0
Rd_clp CreatePipe(Heap_clp heap, uint32_t bufsize, uint32_t trigger,
		  Wr_clp *wr)
{
    Pipe_st *st;

    st=Heap$Malloc(heap, sizeof(*st));
    if (!st) {
	RAISE_Heap$NoMemory();
    }

    st->buffer=Heap$Malloc(heap, bufsize);
    if (!st->buffer) {
	FREE(st);
	RAISE_Heap$NoMemory();
    }

    CL_INIT(st->rd, &rd_ms, st);
    CL_INIT(st->wr, &wr_ms, st);

    if (bufsize<2) bufsize=2;
    if (trigger>=bufsize) trigger=(bufsize>>1); /* If the trigger value
						   is invalid then fix it */

    st->bufsize=bufsize;
    st->trigger=trigger;
    st->wrp=0;
    st->rdp=0;

    st->ungetc=False;
    st->lastc=-1; /* No character is ungetted */
    MU_INIT(&st->mu);
    CV_INIT(&st->cv);
    MU_INIT(&st->wr_mu);
    MU_INIT(&st->rd_mu);

    st->rd_open=True;
    st->wr_open=True;

    *wr=&st->wr;
    return &st->rd;
}
Example #10
0
struct multi_handle *
multi_Init(struct rx_connection **conns, int nConns)
{
    struct rx_call **calls;
    short *ready;
    struct multi_handle *mh;
    int i;

    /*
     * Note: all structures that are possibly referenced by other
     * processes must be allocated.  In some kernels variables allocated on
     * a process stack will not be accessible to other processes
     */

    calls = (struct rx_call **)osi_Alloc(sizeof(struct rx_call *) * nConns);
    ready = (short *)osi_Alloc(sizeof(short *) * nConns);
    mh = (struct multi_handle *)osi_Alloc(sizeof(struct multi_handle));
    if (!calls || !ready || !mh)
	osi_Panic("multi_Rx: no mem\n");
    memset(mh, 0, sizeof(struct multi_handle));
    mh->calls = calls;
    mh->nextReady = mh->firstNotReady = mh->ready = ready;
    mh->nReady = 0;
    mh->nConns = nConns;

#ifdef RX_ENABLE_LOCKS
    MUTEX_INIT(&mh->lock, "rx_multi_lock", MUTEX_DEFAULT, 0);
    CV_INIT(&mh->cv, "rx_multi_cv", CV_DEFAULT, 0);
#endif /* RX_ENABLE_LOCKS */
    for (i = 0; i < nConns; i++) {
	struct rx_call *call;
	call = mh->calls[i] = rx_NewCall(conns[i]);
	rx_SetArrivalProc(call, multi_Ready, (void *) mh, i);
    }
    return mh;
}
Example #11
0
static void
SalvageServer(int argc, char **argv)
{
    int pid, ret;
    struct SalvageQueueNode * node;
    pthread_t tid;
    pthread_attr_t attrs;
    int slot;
    VolumePackageOptions opts;

    /* All entries to the log will be appended.  Useful if there are
     * multiple salvagers appending to the log.
     */

    CheckLogFile((char *)AFSDIR_SERVER_SALSRVLOG_FILEPATH);
#ifndef AFS_NT40_ENV
#ifdef AFS_LINUX20_ENV
    fcntl(fileno(logFile), F_SETFL, O_APPEND);	/* Isn't this redundant? */
#else
    fcntl(fileno(logFile), F_SETFL, FAPPEND);	/* Isn't this redundant? */
#endif
#endif
    setlinebuf(logFile);

    fprintf(logFile, "%s\n", cml_version_number);
    LogCommandLine(argc, argv, "Online Salvage Server",
		   SalvageVersion, "Starting OpenAFS", Log);
    /* Get and hold a lock for the duration of the salvage to make sure
     * that no other salvage runs at the same time.  The routine
     * VInitVolumePackage2 (called below) makes sure that a file server or
     * other volume utilities don't interfere with the salvage.
     */

    /* even demand attach online salvager
     * still needs this because we don't want
     * a stand-alone salvager to conflict with
     * the salvager daemon */
    ObtainSharedSalvageLock();

    child_slot = (int *) malloc(Parallel * sizeof(int));
    osi_Assert(child_slot != NULL);
    memset(child_slot, 0, Parallel * sizeof(int));

    /* initialize things */
    VOptDefaults(salvageServer, &opts);
    if (VInitVolumePackage2(salvageServer, &opts)) {
	Log("Shutting down: errors encountered initializing volume package\n");
	Exit(1);
    }
    DInit(10);
    queue_Init(&pending_q);
    queue_Init(&log_cleanup_queue);
    MUTEX_INIT(&worker_lock, "worker", MUTEX_DEFAULT, 0);
    CV_INIT(&worker_cv, "worker", CV_DEFAULT, 0);
    CV_INIT(&log_cleanup_queue.queue_change_cv, "queuechange", CV_DEFAULT, 0);
    osi_Assert(pthread_attr_init(&attrs) == 0);

    /* start up the reaper and log cleaner threads */
    osi_Assert(pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED) == 0);
    osi_Assert(pthread_create(&tid,
			  &attrs,
			  &SalvageChildReaperThread,
			  NULL) == 0);
    osi_Assert(pthread_create(&tid,
			  &attrs,
			  &SalvageLogCleanupThread,
			  NULL) == 0);
    osi_Assert(pthread_create(&tid,
			  &attrs,
			  &SalvageLogScanningThread,
			  NULL) == 0);

    /* loop forever serving requests */
    while (1) {
	node = SALVSYNC_getWork();
	osi_Assert(node != NULL);

	Log("dispatching child to salvage volume %u...\n",
	    node->command.sop.parent);

	VOL_LOCK;
	/* find a slot */
	for (slot = 0; slot < Parallel; slot++) {
	  if (!child_slot[slot])
	    break;
	}
	osi_Assert (slot < Parallel);

    do_fork:
	pid = Fork();
	if (pid == 0) {
	    VOL_UNLOCK;
	    ret = DoSalvageVolume(node, slot);
	    Exit(ret);
	} else if (pid < 0) {
	    Log("failed to fork child worker process\n");
	    sleep(1);
	    goto do_fork;
	} else {
	    child_slot[slot] = pid;
	    node->pid = pid;
	    VOL_UNLOCK;

	    MUTEX_ENTER(&worker_lock);
	    current_workers++;

	    /* let the reaper thread know another worker was spawned */
	    CV_BROADCAST(&worker_cv);

	    /* if we're overquota, wait for the reaper */
	    while (current_workers >= Parallel) {
		CV_WAIT(&worker_cv, &worker_lock);
	    }
	    MUTEX_EXIT(&worker_lock);
	}
    }
}