Exemplo n.º 1
0
static void create_once(void) {
    queue_Init(&active_Q);
    queue_Init(&cache_Q);
    pthread_mutex_init(&active_Q_mutex, (const pthread_mutexattr_t*)0);
    pthread_mutex_init(&cache_Q_mutex, (const pthread_mutexattr_t*)0);
    pthread_cache_done = 1;
}
Exemplo n.º 2
0
void
rxevent_Init(int nEvents, void (*scheduler) ())
{
    if (initialized)
	return;
    clock_Init();
    if (nEvents)
	rxevent_allocUnit = nEvents;
    queue_Init(&rxevent_free);
    queue_Init(&rxevent_queue);
    rxevent_nFree = rxevent_nPosted = 0;
    rxevent_ScheduledEarlierEvent = scheduler;
    initialized = 1;
}
Exemplo n.º 3
0
/**
 * allocate a work node.
 *
 * @param[out] node_out  address in which to store new work node
 *
 * @return operation status
 *    @retval 0 success
 *    @retval ENOMEM         out of memory
 */
int
afs_wq_node_alloc(struct afs_work_queue_node ** node_out)
{
    int ret = 0;
    struct afs_work_queue_node * node;

    *node_out = node = (struct afs_work_queue_node *) malloc(sizeof(*node));
    if (node == NULL) {
	ret = ENOMEM;
	goto error;
    }

    queue_NodeInit(&node->node_list);
    node->qidx = AFS_WQ_NODE_LIST_NONE;
    node->cbf = NULL;
    node->rock = node->queue = NULL;
    node->refcount = 1;
    node->block_count = 0;
    node->error_count = 0;
    MUTEX_INIT(&node->lock, "node", MUTEX_DEFAULT, 0);
    CV_INIT(&node->state_cv, "node state", CV_DEFAULT, 0);
    node->state = AFS_WQ_NODE_STATE_INIT;
    queue_Init(&node->dep_children);

 error:
    return ret;
}
Exemplo n.º 4
0
/**
 * initialize volume group cache subsystem.
 *
 * @return operation status
 *    @retval 0 success
 */
int
VVGCache_PkgInit(void)
{
    int code = 0;
    int i;

    /* allocate hash table */
    VVGCache_hash_table.hash_buckets =
	malloc(VolumeHashTable.Size * sizeof(struct rx_queue));
    if (VVGCache_hash_table.hash_buckets == NULL) {
	code = ENOMEM;
	goto error;
    }

    /* setup hash chain heads */
    for (i = 0; i < VolumeHashTable.Size; i++) {
	queue_Init(&VVGCache_hash_table.hash_buckets[i]);
    }

    /* initialize per-partition VVGC state */
    for (i = 0; i <= VOLMAXPARTS; i++) {
	VVGCache.part[i].state = VVGC_PART_STATE_INVALID;
	VVGCache.part[i].dlist_hash_buckets = NULL;
	CV_INIT(&VVGCache.part[i].cv, "cache part", CV_DEFAULT, 0);
	if (code) {
	    goto error;
	}
    }

 error:
    return code;
}
Exemplo n.º 5
0
/**
 * create a thread pool.
 *
 * @param[inout] pool_out  address in which to store pool object pointer.
 * @param[in]    queue     work queue serviced by thread pool
 *
 * @return operation status
 *    @retval 0 success
 *    @retval ENOMEM out of memory
 */
int
afs_tp_create(struct afs_thread_pool ** pool_out,
              struct afs_work_queue * queue)
{
    int ret = 0;
    struct afs_thread_pool * pool;

    ret = _afs_tp_alloc(pool_out);
    if (ret) {
        goto error;
    }
    pool = *pool_out;

    MUTEX_INIT(&pool->lock, "pool", MUTEX_DEFAULT, 0);
    CV_INIT(&pool->shutdown_cv, "pool shutdown", CV_DEFAULT, 0);
    queue_Init(&pool->thread_list);
    pool->work_queue = queue;
    pool->entry = &_afs_tp_worker_default;
    pool->rock = NULL;
    pool->nthreads = 0;
    pool->max_threads = 4;
    pool->state = AFS_TP_STATE_INIT;

error:
    return ret;
}
Exemplo n.º 6
0
static void
VInitPartition_r(char *path, char *devname, Device dev)
{
    struct DiskPartition64 *dp, *op;

    dp = malloc(sizeof(struct DiskPartition64));
    /* Add it to the end, to preserve order when we print statistics */
    for (op = DiskPartitionList; op; op = op->next) {
	if (!op->next)
	    break;
    }
    if (op)
	op->next = dp;
    else
	DiskPartitionList = dp;
    dp->next = 0;
    dp->name = strdup(path);
    dp->index = volutil_GetPartitionID(path);
#if defined(AFS_NAMEI_ENV) && !defined(AFS_NT40_ENV)
    /* Create a lockfile for the partition, of the form /vicepa/Lock/vicepa */
    dp->devName = malloc(2 * strlen(path) + 6);
    strcpy(dp->devName, path);
    strcat(dp->devName, OS_DIRSEP);
    strcat(dp->devName, "Lock");
    mkdir(dp->devName, 0700);
    strcat(dp->devName, path);
    close(afs_open(dp->devName, O_RDWR | O_CREAT, 0600));
    dp->device = dp->index;
#else
    dp->devName = strdup(devname);
    dp->device = dev;
#endif
    dp->lock_fd = INVALID_FD;
    dp->flags = 0;
    dp->f_files = 1;		/* just a default value */
#if defined(AFS_NAMEI_ENV) && !defined(AFS_NT40_ENV)
    if (programType == fileServer)
	(void)namei_ViceREADME(VPartitionPath(dp));
#endif
    VSetPartitionDiskUsage_r(dp);
#ifdef AFS_DEMAND_ATTACH_FS
    AddPartitionToTable_r(dp);
    queue_Init(&dp->vol_list.head);
    CV_INIT(&dp->vol_list.cv, "vol list", CV_DEFAULT, 0);
    dp->vol_list.len = 0;
    dp->vol_list.busy = 0;
    {
	char lockpath[MAXPATHLEN+1];
	snprintf(lockpath, MAXPATHLEN, "%s/" AFS_PARTLOCK_FILE, dp->name);
	lockpath[MAXPATHLEN] = '\0';
	VLockFileInit(&dp->headerLockFile, lockpath);

	snprintf(lockpath, MAXPATHLEN, "%s/" AFS_VOLUMELOCK_FILE, dp->name);
	lockpath[MAXPATHLEN] = '\0';
	VLockFileInit(&dp->volLockFile, lockpath);
    }
    VDiskLockInit(&dp->headerLock, &dp->headerLockFile, 1);
#endif /* AFS_DEMAND_ATTACH_FS */
}
Exemplo n.º 7
0
/**
 * initialize a node list object.
 *
 * @param[in] list list object
 * @param[in] id   list identifier
 *
 * @return operation status
 *    @retval 0 success
 *
 * @internal
 */
static int
_afs_wq_node_list_init(struct afs_work_queue_node_list * list,
		       afs_wq_node_list_id_t id)
{
    queue_Init(&list->list);
    MUTEX_INIT(&list->lock, "list", MUTEX_DEFAULT, 0);
    CV_INIT(&list->cv, "list", CV_DEFAULT, 0);
    list->qidx = id;
    list->shutdown = 0;

    return 0;
}
Exemplo n.º 8
0
/**
 * Thread to look for SalvageLog.$pid files that are not from our child
 * worker salvagers, and notify SalvageLogCleanupThread to clean them
 * up. This can happen if we restart during salvages, or the
 * salvageserver crashes or something.
 *
 * @param arg  unused
 *
 * @return always NULL
 */
static void *
SalvageLogScanningThread(void * arg)
{
    struct rx_queue log_watch_queue;

    queue_Init(&log_watch_queue);

    {
	DIR *dp;
	struct dirent *dirp;
	char prefix[AFSDIR_PATH_MAX];
	size_t prefix_len;

	afs_snprintf(prefix, sizeof(prefix), "%s.", AFSDIR_SLVGLOG_FILE);
	prefix_len = strlen(prefix);

	dp = opendir(AFSDIR_LOGS_DIR);
	assert(dp);

	while ((dirp = readdir(dp)) != NULL) {
	    pid_t pid;
	    struct log_cleanup_node *cleanup;
	    int i;

	    if (strncmp(dirp->d_name, prefix, prefix_len) != 0) {
		/* not a salvage logfile; skip */
		continue;
	    }

	    errno = 0;
	    pid = strtol(dirp->d_name + prefix_len, NULL, 10);

	    if (errno != 0) {
		/* file is SalvageLog.<something> but <something> isn't
		 * a pid, so skip */
		 continue;
	    }

	    VOL_LOCK;
	    for (i = 0; i < Parallel; ++i) {
		if (pid == child_slot[i]) {
		    break;
		}
	    }
	    VOL_UNLOCK;
	    if (i < Parallel) {
		/* this pid is one of our children, so the reaper thread
		 * will take care of it; skip */
		continue;
	    }

	    cleanup =
		(struct log_cleanup_node *) malloc(sizeof(struct log_cleanup_node));
	    cleanup->pid = pid;

	    queue_Append(&log_watch_queue, cleanup);
	}

	closedir(dp);
    }

    ScanLogs(&log_watch_queue);

    while (queue_IsNotEmpty(&log_watch_queue)) {
	sleep(SALVAGE_SCAN_POLL_INTERVAL);
	ScanLogs(&log_watch_queue);
    }

    return NULL;
}
Exemplo n.º 9
0
static void
SalvageServer(void)
{
    int pid, ret;
    struct SalvageQueueNode * node;
    pthread_t tid;
    pthread_attr_t attrs;
    int slot;
    VolumePackageOptions opts;

    /* All entries to the log will be appended.  Useful if there are
     * multiple salvagers appending to the log.
     */

    CheckLogFile((char *)AFSDIR_SERVER_SALSRVLOG_FILEPATH);
#ifndef AFS_NT40_ENV
#ifdef AFS_LINUX20_ENV
    fcntl(fileno(logFile), F_SETFL, O_APPEND);	/* Isn't this redundant? */
#else
    fcntl(fileno(logFile), F_SETFL, FAPPEND);	/* Isn't this redundant? */
#endif
#endif
    setlinebuf(logFile);

    fprintf(logFile, "%s\n", cml_version_number);
    Log("Starting OpenAFS Online Salvage Server %s (%s)\n", SalvageVersion, commandLine);
    
    /* Get and hold a lock for the duration of the salvage to make sure
     * that no other salvage runs at the same time.  The routine
     * VInitVolumePackage2 (called below) makes sure that a file server or
     * other volume utilities don't interfere with the salvage.
     */
    
    /* even demand attach online salvager
     * still needs this because we don't want
     * a stand-alone salvager to conflict with
     * the salvager daemon */
    ObtainSharedSalvageLock();

    child_slot = (int *) malloc(Parallel * sizeof(int));
    assert(child_slot != NULL);
    memset(child_slot, 0, Parallel * sizeof(int));

    /* initialize things */
    VOptDefaults(salvageServer, &opts);
    if (VInitVolumePackage2(salvageServer, &opts)) {
	Log("Shutting down: errors encountered initializing volume package\n");
	Exit(1);
    }
    DInit(10);
    queue_Init(&pending_q);
    queue_Init(&log_cleanup_queue);
    assert(pthread_mutex_init(&worker_lock, NULL) == 0);
    assert(pthread_cond_init(&worker_cv, NULL) == 0);
    assert(pthread_cond_init(&log_cleanup_queue.queue_change_cv, NULL) == 0);
    assert(pthread_attr_init(&attrs) == 0);

    /* start up the reaper and log cleaner threads */
    assert(pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED) == 0);
    assert(pthread_create(&tid, 
			  &attrs, 
			  &SalvageChildReaperThread,
			  NULL) == 0);
    assert(pthread_create(&tid, 
			  &attrs, 
			  &SalvageLogCleanupThread,
			  NULL) == 0);
    assert(pthread_create(&tid,
			  &attrs,
			  &SalvageLogScanningThread,
			  NULL) == 0);

    /* loop forever serving requests */
    while (1) {
	node = SALVSYNC_getWork();
	assert(node != NULL);

	Log("dispatching child to salvage volume %u...\n",
	    node->command.sop.parent);

	VOL_LOCK;
	/* find a slot */
	for (slot = 0; slot < Parallel; slot++) {
	  if (!child_slot[slot])
	    break;
	}
	assert (slot < Parallel);

    do_fork:
	pid = Fork();
	if (pid == 0) {
	    VOL_UNLOCK;
	    ret = DoSalvageVolume(node, slot);
	    Exit(ret);
	} else if (pid < 0) {
	    Log("failed to fork child worker process\n");
	    sleep(1);
	    goto do_fork;
	} else {
	    child_slot[slot] = pid;
	    node->pid = pid;
	    VOL_UNLOCK;
	    
	    assert(pthread_mutex_lock(&worker_lock) == 0);
	    current_workers++;
	    
	    /* let the reaper thread know another worker was spawned */
	    assert(pthread_cond_broadcast(&worker_cv) == 0);
	    
	    /* if we're overquota, wait for the reaper */
	    while (current_workers >= Parallel) {
		assert(pthread_cond_wait(&worker_cv, &worker_lock) == 0);
	    }
	    assert(pthread_mutex_unlock(&worker_lock) == 0);
	}
    }
}
Exemplo n.º 10
0
/**
 * start a background scan.
 *
 * @param[in] dp  disk partition object
 *
 * @return operation status
 *    @retval 0 success
 *    @retval -1 internal error
 *    @retval -3 racing against another thread
 *
 * @internal
 */
int
_VVGC_scan_start(struct DiskPartition64 * dp)
{
    int code = 0;
    pthread_t tid;
    pthread_attr_t attrs;
    int i;

    if (_VVGC_state_change(dp,
			   VVGC_PART_STATE_UPDATING)
	== VVGC_PART_STATE_UPDATING) {
	/* race */
	ViceLog(0, ("VVGC_scan_partition: race detected; aborting scanning partition %s\n",
	            VPartitionPath(dp)));
	code = -3;
	goto error;
    }

    /* initialize partition's to-delete list */
    VVGCache.part[dp->index].dlist_hash_buckets =
	malloc(VolumeHashTable.Size * sizeof(struct rx_queue));
    if (!VVGCache.part[dp->index].dlist_hash_buckets) {
	code = -1;
	goto error;
    }
    for (i = 0; i < VolumeHashTable.Size; i++) {
	queue_Init(&VVGCache.part[dp->index].dlist_hash_buckets[i]);
    }

    code = pthread_attr_init(&attrs);
    if (code) {
	goto error;
    }

    code = pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED);
    if (code) {
	goto error;
    }

    code = pthread_create(&tid, &attrs, &_VVGC_scanner_thread, dp);

    if (code) {
	VVGCache_part_state_t old_state;

	ViceLog(0, ("_VVGC_scan_start: pthread_create failed with %d\n", code));

	old_state = _VVGC_state_change(dp, VVGC_PART_STATE_INVALID);
	osi_Assert(old_state == VVGC_PART_STATE_UPDATING);
    }

 error:
    if (code) {
	ViceLog(0, ("_VVGC_scan_start failed with code %d for partition %s\n",
	        code, VPartitionPath(dp)));
	if (VVGCache.part[dp->index].dlist_hash_buckets) {
	    free(VVGCache.part[dp->index].dlist_hash_buckets);
	    VVGCache.part[dp->index].dlist_hash_buckets = NULL;
	}
    }

    return code;
}
Exemplo n.º 11
0
/* rxi_WritevProc -- internal version.
 *
 * Send buffers allocated in rxi_WritevAlloc.
 *
 * LOCKS USED -- called at netpri.
 */
int
rxi_WritevProc(struct rx_call *call, struct iovec *iov, int nio, int nbytes)
{
    struct rx_packet *cp = NULL;
#ifdef RX_TRACK_PACKETS
    struct rx_packet *p, *np;
#endif
    int nextio;
    int requestCount;
    struct rx_queue tmpq;
#ifdef RXDEBUG_PACKET
    u_short tmpqc;
#endif

    requestCount = nbytes;
    nextio = 0;

    MUTEX_ENTER(&call->lock);
    if (call->error) {
        call->mode = RX_MODE_ERROR;
    } else if (call->mode != RX_MODE_SENDING) {
	call->error = RX_PROTOCOL_ERROR;
    }
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
    rxi_WaitforTQBusy(call);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
    cp = call->currentPacket;

    if (call->error) {
        call->mode = RX_MODE_ERROR;
	MUTEX_EXIT(&call->lock);
	if (cp) {
#ifdef RX_TRACK_PACKETS
            cp->flags &= ~RX_PKTFLAG_CP;
            cp->flags |= RX_PKTFLAG_IOVQ;
#endif
	    queue_Prepend(&call->iovq, cp);
#ifdef RXDEBUG_PACKET
            call->iovqc++;
#endif /* RXDEBUG_PACKET */
	    call->currentPacket = (struct rx_packet *)0;
	}
#ifdef RXDEBUG_PACKET
        call->iovqc -=
#endif /* RXDEBUG_PACKET */
            rxi_FreePackets(0, &call->iovq);
	return 0;
    }

    /* Loop through the I/O vector adjusting packet pointers.
     * Place full packets back onto the iovq once they are ready
     * to send. Set RX_PROTOCOL_ERROR if any problems are found in
     * the iovec. We put the loop condition at the end to ensure that
     * a zero length write will push a short packet. */
    nextio = 0;
    queue_Init(&tmpq);
#ifdef RXDEBUG_PACKET
    tmpqc = 0;
#endif /* RXDEBUG_PACKET */
    do {
	if (call->nFree == 0 && cp) {
	    clock_NewTime();	/* Bogus:  need new time package */
	    /* The 0, below, specifies that it is not the last packet:
	     * there will be others. PrepareSendPacket may
	     * alter the packet length by up to
	     * conn->securityMaxTrailerSize */
	    hadd32(call->bytesSent, cp->length);
	    rxi_PrepareSendPacket(call, cp, 0);
	    queue_Append(&tmpq, cp);
#ifdef RXDEBUG_PACKET
            tmpqc++;
#endif /* RXDEBUG_PACKET */
            cp = call->currentPacket = (struct rx_packet *)0;

	    /* The head of the iovq is now the current packet */
	    if (nbytes) {
		if (queue_IsEmpty(&call->iovq)) {
                    MUTEX_EXIT(&call->lock);
		    call->error = RX_PROTOCOL_ERROR;
#ifdef RXDEBUG_PACKET
                    tmpqc -=
#endif /* RXDEBUG_PACKET */
                        rxi_FreePackets(0, &tmpq);
		    return 0;
		}
		cp = queue_First(&call->iovq, rx_packet);
		queue_Remove(cp);
#ifdef RX_TRACK_PACKETS
                cp->flags &= ~RX_PKTFLAG_IOVQ;
#endif
#ifdef RXDEBUG_PACKET
                call->iovqc--;
#endif /* RXDEBUG_PACKET */
#ifdef RX_TRACK_PACKETS
                cp->flags |= RX_PKTFLAG_CP;
#endif
		call->currentPacket = cp;
		call->nFree = cp->length;
		call->curvec = 1;
		call->curpos =
		    (char *)cp->wirevec[1].iov_base +
		    call->conn->securityHeaderSize;
		call->curlen =
		    cp->wirevec[1].iov_len - call->conn->securityHeaderSize;
	    }
	}

	if (nbytes) {
	    /* The next iovec should point to the current position */
	    if (iov[nextio].iov_base != call->curpos
		|| iov[nextio].iov_len > (int)call->curlen) {
		call->error = RX_PROTOCOL_ERROR;
                MUTEX_EXIT(&call->lock);
		if (cp) {
#ifdef RX_TRACK_PACKETS
		    cp->flags &= ~RX_PKTFLAG_CP;
#endif
                    queue_Prepend(&tmpq, cp);
#ifdef RXDEBUG_PACKET
                    tmpqc++;
#endif /* RXDEBUG_PACKET */
                    cp = call->currentPacket = (struct rx_packet *)0;
		}
#ifdef RXDEBUG_PACKET
                tmpqc -=
#endif /* RXDEBUG_PACKET */
                    rxi_FreePackets(0, &tmpq);
		return 0;
	    }
	    nbytes -= iov[nextio].iov_len;
	    call->curpos += iov[nextio].iov_len;
	    call->curlen -= iov[nextio].iov_len;
	    call->nFree -= iov[nextio].iov_len;
	    nextio++;
	    if (call->curlen == 0) {
		if (++call->curvec > cp->niovecs) {
		    call->nFree = 0;
		} else {
		    call->curpos = (char *)cp->wirevec[call->curvec].iov_base;
		    call->curlen = cp->wirevec[call->curvec].iov_len;
		}
	    }
	}
    } while (nbytes && nextio < nio);

    /* Move the packets from the temporary queue onto the transmit queue.
     * We may end up with more than call->twind packets on the queue. */

#ifdef RX_TRACK_PACKETS
    for (queue_Scan(&tmpq, p, np, rx_packet))
    {
        p->flags |= RX_PKTFLAG_TQ;
    }
#endif

    if (call->error)
        call->mode = RX_MODE_ERROR;

    queue_SpliceAppend(&call->tq, &tmpq);

    if (!(call->flags & (RX_CALL_FAST_RECOVER | RX_CALL_FAST_RECOVER_WAIT))) {
	rxi_Start(0, call, 0, 0);
    }

    /* Wait for the length of the transmit queue to fall below call->twind */
    while (!call->error && call->tnext + 1 > call->tfirst + (2 * call->twind)) {
	clock_NewTime();
	call->startWait = clock_Sec();
#ifdef	RX_ENABLE_LOCKS
	CV_WAIT(&call->cv_twind, &call->lock);
#else
	call->flags |= RX_CALL_WAIT_WINDOW_ALLOC;
	osi_rxSleep(&call->twind);
#endif
	call->startWait = 0;
    }

    /* cp is no longer valid since we may have given up the lock */
    cp = call->currentPacket;

    if (call->error) {
        call->mode = RX_MODE_ERROR;
        call->currentPacket = NULL;
        MUTEX_EXIT(&call->lock);
	if (cp) {
#ifdef RX_TRACK_PACKETS
	    cp->flags &= ~RX_PKTFLAG_CP;
#endif
	    rxi_FreePacket(cp);
	}
	return 0;
    }
    MUTEX_EXIT(&call->lock);

    return requestCount - nbytes;
}