Exemple #1
0
static void worker_start (void)
{
    worker_t *handler = calloc (1, sizeof(worker_t));

    worker_control_create (&handler->wakeup_fd[0]);

    handler->pending_clients_tail = &handler->pending_clients;
    thread_spin_create (&handler->lock);
    handler->last_p = &handler->clients;

    thread_rwlock_wlock (&workers_lock);
    if (worker_incoming == NULL)
    {
        worker_incoming = handler;
        handler->thread = thread_create ("worker", worker, handler, THREAD_ATTACHED);
        thread_rwlock_unlock (&workers_lock);
        INFO0 ("starting incoming worker thread");
        worker_start();  // single level recursion, just get a special worker thread set up
        return;
    }
    handler->next = workers;
    workers = handler;
    worker_count++;
    worker_least_used = worker_balance_to_check = workers;
    thread_rwlock_unlock (&workers_lock);

    handler->thread = thread_create ("worker", worker, handler, THREAD_ATTACHED);
}
Exemple #2
0
/* Verification start */
static void lmn_worker_start(void *arg)
{
  LmnWorkerGroup *wp;
  LmnWorker *w;
  unsigned long id;

  w = (LmnWorker *)arg;
  wp = worker_group(w);
  id = worker_id(w);
  worker_TLS_init(id);

  mc_react_cxt_init(worker_rc(w));

  if (worker_id(w) == LMN_PRIMARY_ID && mc_is_dump(worker_flags(w))) {
    StateSpaceRef ss = worker_states(w);
    dump_state_data(statespace_init_state(ss), (LmnWord)ss->out, (LmnWord)NULL);
  }

  if (lmn_env.profile_level >= 1) profile_start_exec_thread();
  worker_start(w);

  if (!workers_are_exit(wp) && !workers_have_error(wp)) {
    if (worker_use_owcty(w)) {
      owcty_start(w);
    } else if (worker_use_map(w) && !worker_use_weak_map(w)) {
      map_iteration_start(w);
    }
    /* else: ND-mode etc */
  }

  if (lmn_env.profile_level >= 1) profile_finish_exec_thread();
  mc_react_cxt_destroy(worker_rc(w));
  worker_TLS_finalize();
}
int main ()
{
    float time = 0;
    int valid = 1;
    FILE *f = fopen ("asynclocklessresultsN", "a");
    int n = 100;
    N = 0;
    while ((N+=1000) < 100000)
    {
        int i = 0;
        time = 0;
        while (i++ < 10){
        clock_t t1, t2;
        Data data;
        SynchronisingThread *mainThread;
        Worker **workers;
        data.value = 0;
        t1 = clock ();  
        mainThread = synchronising_thread_new (n, &data);
        int a = synchronising_thread_start (mainThread);
        if (a)
        {
           printf ("Error creating Main Thread");
        }

        workers = malloc (sizeof (Worker *) *n);
        int i;
        for (i = 0; i < n; i++)
        {
            workers[i] = worker_new (mainThread, i, worker_thread_func);
            int ans = worker_start (workers[i]);
    
            if (ans)
            {
                printf ("Error creating thread %d", i);
            }
        }    
        
        for (i = 0; i < n; i++)
        {
            worker_join (workers[i]);
            free (workers[i]);
        }
        free (workers);
        synchronising_thread_exit (mainThread, 1);
        synchronising_thread_join (mainThread);
        synchronising_thread_free (mainThread);
        //free (mainThread);
        t2 = clock ();
        printf ("%d\n", data.value);
        time += (((float)t2 - (float)t1) / 1000000.0 ) * 1000;
        }
        printf ("Timeout: %f for N: %d\n", time/i, N);
        fprintf (f, "Timeout: %f for N: %d\n", time/i, N);
        fflush (f);
     }
    fclose (f);
    return 0;
}
static void*
worker_thread_start(void* arg)
{
    worker_type* worker = (worker_type*) arg;
    ods_thread_blocksigs();
    worker_start(worker);
    return NULL;
}
Exemple #5
0
void armas_sched_start(armas_scheduler_t *S)
{
    int i;
    for (i = 0; i < S->nworker; i++) {
        worker_start(&S->workers[i]);
    }
    S->status = 1;
}
Exemple #6
0
int
server_start(struct server *s) {

	int i, ret;

	/* initialize libevent */
	s->base = event_base_new();

	if(s->cfg->daemonize) {
		server_daemonize(s->cfg->pidfile);

		/* sometimes event mech gets lost on fork */
		if(event_reinit(s->base) != 0) {
			fprintf(stderr, "Error: event_reinit failed after fork");
		}
	}

	/* ignore sigpipe */
#ifdef SIGPIPE
	signal(SIGPIPE, SIG_IGN);
#endif

	slog_init(s);

	/* install signal handlers */
	server_install_signal_handlers(s);

	/* start worker threads */
	for(i = 0; i < s->cfg->http_threads; ++i) {
		worker_start(s->w[i]);
	}

	/* create socket */
	s->fd = socket_setup(s, s->cfg->http_host, s->cfg->http_port);
	if(s->fd < 0) {
		return -1;
	}
	
	/*set keepalive socket option to do with half connection*/
        int keep_alive = 1;
        setsockopt(s->fd , SOL_SOCKET, SO_KEEPALIVE, (void*)&keep_alive, sizeof(keep_alive));

	/* start http server */
	event_set(&s->ev, s->fd, EV_READ | EV_PERSIST, server_can_accept, s);
	event_base_set(s->base, &s->ev);
	ret = event_add(&s->ev, NULL);

	if(ret < 0) {
		slog(s, WEBDIS_ERROR, "Error calling event_add on socket", 0);
		return -1;
	}

	slog(s, WEBDIS_INFO, "Webdis " WEBDIS_VERSION " up and running", 0);
	event_base_dispatch(s->base);

	return 0;
}
Exemple #7
0
int AXB_REG_FUNC UserDevOpen(AXB_REG(struct IOStdReq *ior,a1),
                             AXB_REG(ULONG unit,d0),
                             AXB_REG(struct DevBase *db,a6))
{
  struct ExBase *eb = (struct ExBase *)db;

  D(("UserDevOpen\n"));
  eb->eb_WorkerPort = worker_start(db);
  return (eb->eb_WorkerPort == NULL);
}
Exemple #8
0
void workers_adjust (int new_count)
{
    INFO1 ("requested worker count %d", new_count);
    while (worker_count != new_count)
    {
        if (worker_count < new_count)
            worker_start ();
        else if (worker_count > new_count)
            worker_stop ();
    }
}
int server_start(struct server *s) {
    int i, ret;


    /* setrlimit */
    server_setrlimit(8192);

    /* daemonize */
    if (s->cfg->daemonize) {
        server_daemonize(s->cfg->pid);

        /* sometimes event mech gets lost on fork */
        if (event_reinit(s->base) != 0) {
            fprintf(stderr, "Error: event_reinit failed after fork");
        }
    }

    /* ignore sigpipe */
#ifdef SIGPIPE
    signal(SIGPIPE, SIG_IGN);
#endif

    /* lose root privileges if we have them */
    /*ret = server_setuid(s->cfg->user);
    if(ret < 0) {
        return -1;
    }*/

    /* slog init */
    slog_init(s);

    /* install signal handlers */
    server_install_signal_handlers(s);

    /* initialize libevent */
    s->base = event_base_new();

    /* start worker threads */
    for (i = 0; i < s->cfg->worker_processes; ++i) {
        worker_start(s->w[i]);
    }

    /* create socket */
	ret = socket_setup(s, s->cfg->http.servers);
    if(ret < 0) {
		return -1;
	}

    /* dispatch */
    slog(s, LOG_INFO, "pbiws " PBIWS_VERSION " up and running", 0);
    event_base_dispatch(s->base);

    return 0;
}
/*===========================================================================*
 *				unblock					     *
 *===========================================================================*/
static int
unblock(struct fproc *rfp)
{
/* Unblock a process that was previously blocked on a pipe or a lock.  This is
 * done by reconstructing the original request and continuing/repeating it.
 * This function returns TRUE when it has restored a request for execution, and
 * FALSE if the caller should continue looking for work to do.
 */
  int blocked_on;

  blocked_on = rfp->fp_blocked_on;

  /* Reconstruct the original request from the saved data. */
  memset(&m_in, 0, sizeof(m_in));
  m_in.m_source = rfp->fp_endpoint;
  switch (blocked_on) {
  case FP_BLOCKED_ON_PIPE:
	assert(rfp->fp_pipe.callnr == VFS_READ ||
	    rfp->fp_pipe.callnr == VFS_WRITE);
	m_in.m_type = rfp->fp_pipe.callnr;
	m_in.m_lc_vfs_readwrite.fd = rfp->fp_pipe.fd;
	m_in.m_lc_vfs_readwrite.buf = rfp->fp_pipe.buf;
	m_in.m_lc_vfs_readwrite.len = rfp->fp_pipe.nbytes;
	m_in.m_lc_vfs_readwrite.cum_io = rfp->fp_pipe.cum_io;
	break;
  case FP_BLOCKED_ON_FLOCK:
	assert(rfp->fp_flock.cmd == F_SETLKW);
	m_in.m_type = VFS_FCNTL;
	m_in.m_lc_vfs_fcntl.fd = rfp->fp_flock.fd;
	m_in.m_lc_vfs_fcntl.cmd = rfp->fp_flock.cmd;
	m_in.m_lc_vfs_fcntl.arg_ptr = rfp->fp_flock.arg;
	break;
  default:
	panic("unblocking call blocked on %d ??", blocked_on);
  }

  rfp->fp_blocked_on = FP_BLOCKED_ON_NONE;	/* no longer blocked */
  rfp->fp_flags &= ~FP_REVIVED;
  reviving--;
  assert(reviving >= 0);

  /* Pending pipe reads/writes cannot be repeated as is, and thus require a
   * special resumption procedure.
   */
  if (blocked_on == FP_BLOCKED_ON_PIPE) {
	worker_start(rfp, do_pending_pipe, &m_in, FALSE /*use_spare*/);
	return(FALSE);	/* Retrieve more work */
  }

  /* A lock request. Repeat the original request as though it just came in. */
  fp = rfp;
  return(TRUE);	/* We've unblocked a process */
}
Exemple #11
0
int
server_start(struct server *s) {

	int i, ret;

	/* initialize libevent */
	s->base = event_base_new();

	if(s->cfg->daemonize) {
		server_daemonize();

		/* sometimes event mech gets lost on fork */
		if(event_reinit(s->base) != 0) {
			fprintf(stderr, "Error: event_reinit failed after fork");
		}
	}

	/* ignore sigpipe */
#ifdef SIGPIPE
	signal(SIGPIPE, SIG_IGN);
#endif

	slog_init(s);

	/* start worker threads */
	for(i = 0; i < s->cfg->http_threads; ++i) {
		worker_start(s->w[i]);
	}

	/* create socket */
	s->fd = socket_setup(s->cfg->http_host, s->cfg->http_port);
	if(s->fd < 0) {
		return -1;
	}

	/* start http server */
	event_set(&s->ev, s->fd, EV_READ | EV_PERSIST, server_can_accept, s);
	event_base_set(s->base, &s->ev);
	ret = event_add(&s->ev, NULL);

	if(ret < 0) {
		slog(s, WEBDIS_ERROR, "Error calling event_add on socket", 0);
		return -1;
	}

	event_base_dispatch(s->base);

	return 0;
}
Exemple #12
0
void workers_adjust (int new_count)
{
    INFO1 ("requested worker count %d", new_count);
    while (worker_count != new_count)
    {
        if (worker_count < new_count)
            worker_start ();
        else if (worker_count > new_count)
            worker_stop ();
    }
    if (worker_count == 0)
    {
        logger_commits(0);
        sock_close (logger_fd[1]);
        sock_close (logger_fd[0]);
    }
}
Exemple #13
0
/*===========================================================================*
 *			       handle_work				     *
 *===========================================================================*/
static void handle_work(void *(*func)(void *arg))
{
/* Handle asynchronous device replies and new system calls. If the originating
 * endpoint is an FS endpoint, take extra care not to get in deadlock. */
  struct vmnt *vmp = NULL;
  endpoint_t proc_e;

  proc_e = m_in.m_source;

  if (fp->fp_flags & FP_SYS_PROC) {
	if (worker_available() == 0) {
		if (!deadlock_resolving) {
			if ((vmp = find_vmnt(proc_e)) != NULL) {
				/* A call back or dev result from an FS
				 * endpoint. Set call back flag. Can do only
				 * one call back at a time.
				 */
				if (vmp->m_flags & VMNT_CALLBACK) {
					reply(proc_e, EAGAIN);
					return;
				}
				vmp->m_flags |= VMNT_CALLBACK;

				/* When an FS endpoint has to make a call back
				 * in order to mount, force its device to a
				 * "none device" so block reads/writes will be
				 * handled by ROOT_FS_E.
				 */
				if (vmp->m_flags & VMNT_MOUNTING)
					vmp->m_flags |= VMNT_FORCEROOTBSF;
			}
			deadlock_resolving = 1;
			dl_worker_start(func);
			return;
		}
		/* Already trying to resolve a deadlock, can't
		 * handle more, sorry */

		reply(proc_e, EAGAIN);
		return;
	}
  }

  worker_start(func);
}
int scheduler_restart(struct scheduler *sched) {
    int status;

    // notify everyone that they don't have to keep checking the lock.
    sync_store(&sched->stopped, SCHED_RUN, SYNC_RELEASE);

    // now switch the state of the running condition
    pthread_mutex_lock(&sched->run_state.lock);
    sched->run_state.state = SCHED_RUN;
    pthread_cond_broadcast(&sched->run_state.running);
    pthread_mutex_unlock(&sched->run_state.lock);

    status = worker_start();

    // now switch the state to stopped
    pthread_mutex_lock(&sched->run_state.lock);
    sched->run_state.state = SCHED_STOP;
    pthread_mutex_unlock(&sched->run_state.lock);

    return status;
}
Exemple #15
0
void
server_start(server *s) {
	int i;

	s->base = event_base_new();
	s->signal = event_new(s->base, SIGINT, EV_SIGNAL|EV_PERSIST, &server_sig_handler, s);
	event_add(s->signal, NULL);

	s->fd = server_setup_socket(s->cfg->ip, s->cfg->port);
	assert(s->fd != -1);

	/* start workers */
	for(i=0; i<s->cfg->workers; i++) {
		worker_start(s->w[i]);
	}

	event_base_dispatch(s->base);

	server_free(s);
	exit(EXIT_SUCCESS);
}
/// The pthread entry function for dedicated worker threads.
///
/// This is used by _create().
static void *_run(void *worker) {
    dbg_assert(here);
    dbg_assert(here->gas);
    dbg_assert(worker);

    _bind_self(worker);

    // Ensure that all of the threads have joined the address spaces.
    as_join(AS_REGISTERED);
    as_join(AS_GLOBAL);
    as_join(AS_CYCLIC);

#ifdef HAVE_APEX
    // let APEX know there is a new thread
    apex_register_thread("HPX WORKER THREAD");
#endif

    // wait for the other threads to join
    system_barrier_wait(&here->sched->barrier);

    if (worker_start()) {
        dbg_error("failed to start processing lightweight threads.\n");
        return NULL;
    }

#ifdef HAVE_APEX
    // let APEX know the thread is exiting
    apex_exit_thread();
#endif

    // leave the global address space
    as_leave();

    // unbind self and return NULL
    return (self = NULL);
}
Exemple #17
0
/*===========================================================================*
 *			       handle_work				     *
 *===========================================================================*/
static void handle_work(void (*func)(void))
{
/* Handle asynchronous device replies and new system calls. If the originating
 * endpoint is an FS endpoint, take extra care not to get in deadlock. */
  struct vmnt *vmp = NULL;
  endpoint_t proc_e;
  int use_spare = FALSE;

  proc_e = m_in.m_source;

  if (fp->fp_flags & FP_SRV_PROC) {
	vmp = find_vmnt(proc_e);
	if (vmp != NULL) {
		/* A callback from an FS endpoint. Can do only one at once. */
		if (vmp->m_flags & VMNT_CALLBACK) {
			replycode(proc_e, EAGAIN);
			return;
		}
		/* Already trying to resolve a deadlock? Can't handle more. */
		if (worker_available() == 0) {
			replycode(proc_e, EAGAIN);
			return;
		}
		/* A thread is available. Set callback flag. */
		vmp->m_flags |= VMNT_CALLBACK;
		if (vmp->m_flags & VMNT_MOUNTING) {
			vmp->m_flags |= VMNT_FORCEROOTBSF;
		}
	}

	/* Use the spare thread to handle this request if needed. */
	use_spare = TRUE;
  }

  worker_start(fp, func, &m_in, use_spare);
}
Exemple #18
0
void python_worker_start(){
    worker_start();
}
Exemple #19
0
/*===========================================================================*
 *				service_pm				     *
 *===========================================================================*/
static void service_pm()
{
  int r, slot;

  switch (job_call_nr) {
    case PM_SETUID:
	{
		endpoint_t proc_e;
		uid_t euid, ruid;

		proc_e = job_m_in.PM_PROC;
		euid = job_m_in.PM_EID;
		ruid = job_m_in.PM_RID;

		pm_setuid(proc_e, euid, ruid);

		m_out.m_type = PM_SETUID_REPLY;
		m_out.PM_PROC = proc_e;
	}
	break;

    case PM_SETGID:
	{
		endpoint_t proc_e;
		gid_t egid, rgid;

		proc_e = job_m_in.PM_PROC;
		egid = job_m_in.PM_EID;
		rgid = job_m_in.PM_RID;

		pm_setgid(proc_e, egid, rgid);

		m_out.m_type = PM_SETGID_REPLY;
		m_out.PM_PROC = proc_e;
	}
	break;

    case PM_SETSID:
	{
		endpoint_t proc_e;

		proc_e = job_m_in.PM_PROC;
		pm_setsid(proc_e);

		m_out.m_type = PM_SETSID_REPLY;
		m_out.PM_PROC = proc_e;
	}
	break;

    case PM_EXEC:
    case PM_EXIT:
    case PM_DUMPCORE:
	{
		endpoint_t proc_e = job_m_in.PM_PROC;
		okendpt(proc_e, &slot);
		fp = &fproc[slot];

		if (fp->fp_flags & FP_PENDING) {
			/* This process has a request pending, but PM wants it
			 * gone. Forget about the pending request and satisfy
			 * PM's request instead. Note that a pending request
			 * AND an EXEC request are mutually exclusive. Also, PM
			 * should send only one request/process at a time.
			 */
			 assert(fp->fp_job.j_m_in.m_source != PM_PROC_NR);
		}

		/* PM requests on behalf of a proc are handled after the
		 * system call that might be in progress for that proc has
		 * finished. If the proc is not busy, we start a dummy call.
		 */
		if (!(fp->fp_flags & FP_PENDING) &&
					mutex_trylock(&fp->fp_lock) == 0) {
			mutex_unlock(&fp->fp_lock);
			worker_start(do_dummy);
			fp->fp_flags |= FP_DROP_WORK;
		}

		fp->fp_job.j_m_in = job_m_in;
		fp->fp_flags |= FP_PM_PENDING;

		return;
	}
    case PM_FORK:
    case PM_SRV_FORK:
	{
		endpoint_t pproc_e, proc_e;
		pid_t child_pid;
		uid_t reuid;
		gid_t regid;

		pproc_e = job_m_in.PM_PPROC;
		proc_e = job_m_in.PM_PROC;
		child_pid = job_m_in.PM_CPID;
		reuid = job_m_in.PM_REUID;
		regid = job_m_in.PM_REGID;

		pm_fork(pproc_e, proc_e, child_pid);
		m_out.m_type = PM_FORK_REPLY;

		if (job_call_nr == PM_SRV_FORK) {
			m_out.m_type = PM_SRV_FORK_REPLY;
			pm_setuid(proc_e, reuid, reuid);
			pm_setgid(proc_e, regid, regid);
		}

		m_out.PM_PROC = proc_e;
	}
	break;
    case PM_SETGROUPS:
	{
		endpoint_t proc_e;
		int group_no;
		gid_t *group_addr;

		proc_e = job_m_in.PM_PROC;
		group_no = job_m_in.PM_GROUP_NO;
		group_addr = (gid_t *) job_m_in.PM_GROUP_ADDR;

		pm_setgroups(proc_e, group_no, group_addr);

		m_out.m_type = PM_SETGROUPS_REPLY;
		m_out.PM_PROC = proc_e;
	}
	break;

    case PM_UNPAUSE:
	{
		endpoint_t proc_e;

		proc_e = job_m_in.PM_PROC;

		unpause(proc_e);

		m_out.m_type = PM_UNPAUSE_REPLY;
		m_out.PM_PROC = proc_e;
	}
	break;

    case PM_REBOOT:
	pm_reboot();

	/* Reply dummy status to PM for synchronization */
	m_out.m_type = PM_REBOOT_REPLY;

	break;

    default:
	printf("VFS: don't know how to handle PM request %d\n", job_call_nr);

	return;
  }

  r = send(PM_PROC_NR, &m_out);
  if (r != OK)
	panic("service_pm: send failed: %d", r);

}
Exemple #20
0
/*===========================================================================*
 *				sef_cb_init_fresh			     *
 *===========================================================================*/
static int sef_cb_init_fresh(int UNUSED(type), sef_init_info_t *info)
{
/* Initialize the virtual file server. */
  int s, i;
  struct fproc *rfp;
  message mess;
  struct rprocpub rprocpub[NR_BOOT_PROCS];

  force_sync = 0;
  receive_from = ANY;
  self = NULL;
  verbose = 0;

  /* Initialize proc endpoints to NONE */
  for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
	rfp->fp_endpoint = NONE;
	rfp->fp_pid = PID_FREE;
  }

  /* Initialize the process table with help of the process manager messages.
   * Expect one message for each system process with its slot number and pid.
   * When no more processes follow, the magic process number NONE is sent.
   * Then, stop and synchronize with the PM.
   */
  do {
	if ((s = sef_receive(PM_PROC_NR, &mess)) != OK)
		panic("VFS: couldn't receive from PM: %d", s);

	if (mess.m_type != PM_INIT)
		panic("unexpected message from PM: %d", mess.m_type);

	if (NONE == mess.PM_PROC) break;

	rfp = &fproc[mess.PM_SLOT];
	rfp->fp_flags = FP_NOFLAGS;
	rfp->fp_pid = mess.PM_PID;
	rfp->fp_endpoint = mess.PM_PROC;
	rfp->fp_grant = GRANT_INVALID;
	rfp->fp_blocked_on = FP_BLOCKED_ON_NONE;
	rfp->fp_realuid = (uid_t) SYS_UID;
	rfp->fp_effuid = (uid_t) SYS_UID;
	rfp->fp_realgid = (gid_t) SYS_GID;
	rfp->fp_effgid = (gid_t) SYS_GID;
	rfp->fp_umask = ~0;
  } while (TRUE);			/* continue until process NONE */
  mess.m_type = OK;			/* tell PM that we succeeded */
  s = send(PM_PROC_NR, &mess);		/* send synchronization message */

  /* All process table entries have been set. Continue with initialization. */
  fp = &fproc[_ENDPOINT_P(VFS_PROC_NR)];/* During init all communication with
					 * FSes is on behalf of myself */
  init_dmap();			/* Initialize device table. */
  system_hz = sys_hz();

  /* Map all the services in the boot image. */
  if ((s = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0,
			    (vir_bytes) rprocpub, sizeof(rprocpub), S)) != OK){
	panic("sys_safecopyfrom failed: %d", s);
  }
  for (i = 0; i < NR_BOOT_PROCS; i++) {
	if (rprocpub[i].in_use) {
		if ((s = map_service(&rprocpub[i])) != OK) {
			panic("VFS: unable to map service: %d", s);
		}
	}
  }

  /* Subscribe to block and character driver events. */
  s = ds_subscribe("drv\\.[bc]..\\..*", DSF_INITIAL | DSF_OVERWRITE);
  if (s != OK) panic("VFS: can't subscribe to driver events (%d)", s);

  /* Initialize worker threads */
  for (i = 0; i < NR_WTHREADS; i++)  {
	worker_init(&workers[i]);
  }
  worker_init(&sys_worker); /* exclusive system worker thread */
  worker_init(&dl_worker); /* exclusive worker thread to resolve deadlocks */

  /* Initialize global locks */
  if (mthread_mutex_init(&pm_lock, NULL) != 0)
	panic("VFS: couldn't initialize pm lock mutex");
  if (mthread_mutex_init(&exec_lock, NULL) != 0)
	panic("VFS: couldn't initialize exec lock");
  if (mthread_mutex_init(&bsf_lock, NULL) != 0)
	panic("VFS: couldn't initialize block special file lock");

  /* Initialize event resources for boot procs and locks for all procs */
  for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
	if (mutex_init(&rfp->fp_lock, NULL) != 0)
		panic("unable to initialize fproc lock");
#if LOCK_DEBUG
	rfp->fp_vp_rdlocks = 0;
	rfp->fp_vmnt_rdlocks = 0;
#endif
  }

  init_vnodes();		/* init vnodes */
  init_vmnts();			/* init vmnt structures */
  init_select();		/* init select() structures */
  init_filps();			/* Init filp structures */
  mount_pfs();			/* mount Pipe File Server */
  worker_start(do_init_root);	/* mount initial ramdisk as file system root */
  yield();			/* force do_init_root to start */
  self = NULL;

  return(OK);
}
Exemple #21
0
/*===========================================================================*
 *				sef_cb_init_fresh			     *
 *===========================================================================*/
static int sef_cb_init_fresh(int UNUSED(type), sef_init_info_t *info)
{
/* Initialize the virtual file server. */
  int s, i;
  struct fproc *rfp;
  message mess;
  struct rprocpub rprocpub[NR_BOOT_PROCS];

  self = NULL;
  verbose = 0;

  /* Initialize proc endpoints to NONE */
  for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
	rfp->fp_endpoint = NONE;
	rfp->fp_pid = PID_FREE;
  }

  /* Initialize the process table with help of the process manager messages.
   * Expect one message for each system process with its slot number and pid.
   * When no more processes follow, the magic process number NONE is sent.
   * Then, stop and synchronize with the PM.
   */
  do {
	if ((s = sef_receive(PM_PROC_NR, &mess)) != OK)
		panic("VFS: couldn't receive from PM: %d", s);

	if (mess.m_type != VFS_PM_INIT)
		panic("unexpected message from PM: %d", mess.m_type);

	if (NONE == mess.VFS_PM_ENDPT) break;

	rfp = &fproc[mess.VFS_PM_SLOT];
	rfp->fp_flags = FP_NOFLAGS;
	rfp->fp_pid = mess.VFS_PM_PID;
	rfp->fp_endpoint = mess.VFS_PM_ENDPT;
	rfp->fp_grant = GRANT_INVALID;
	rfp->fp_blocked_on = FP_BLOCKED_ON_NONE;
	rfp->fp_realuid = (uid_t) SYS_UID;
	rfp->fp_effuid = (uid_t) SYS_UID;
	rfp->fp_realgid = (gid_t) SYS_GID;
	rfp->fp_effgid = (gid_t) SYS_GID;
	rfp->fp_umask = ~0;
  } while (TRUE);			/* continue until process NONE */
  mess.m_type = OK;			/* tell PM that we succeeded */
  s = ipc_send(PM_PROC_NR, &mess);		/* send synchronization message */

  system_hz = sys_hz();

  /* Subscribe to block and character driver events. */
  s = ds_subscribe("drv\\.[bc]..\\..*", DSF_INITIAL | DSF_OVERWRITE);
  if (s != OK) panic("VFS: can't subscribe to driver events (%d)", s);

  /* Initialize worker threads */
  worker_init();

  /* Initialize global locks */
  if (mthread_mutex_init(&bsf_lock, NULL) != 0)
	panic("VFS: couldn't initialize block special file lock");

  init_dmap();			/* Initialize device table. */

  /* Map all the services in the boot image. */
  if ((s = sys_safecopyfrom(RS_PROC_NR, info->rproctab_gid, 0,
			    (vir_bytes) rprocpub, sizeof(rprocpub))) != OK){
	panic("sys_safecopyfrom failed: %d", s);
  }
  for (i = 0; i < NR_BOOT_PROCS; i++) {
	if (rprocpub[i].in_use) {
		if ((s = map_service(&rprocpub[i])) != OK) {
			panic("VFS: unable to map service: %d", s);
		}
	}
  }

  /* Initialize locks and initial values for all processes. */
  for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++) {
	if (mutex_init(&rfp->fp_lock, NULL) != 0)
		panic("unable to initialize fproc lock");
	rfp->fp_worker = NULL;
#if LOCK_DEBUG
	rfp->fp_vp_rdlocks = 0;
	rfp->fp_vmnt_rdlocks = 0;
#endif

	/* Initialize process directories. mount_fs will set them to the
	 * correct values.
	 */
	for (i = 0; i < OPEN_MAX; i++)
		rfp->fp_filp[i] = NULL;
	rfp->fp_rd = NULL;
	rfp->fp_wd = NULL;
  }

  init_vnodes();		/* init vnodes */
  init_vmnts();			/* init vmnt structures */
  init_select();		/* init select() structures */
  init_filps();			/* Init filp structures */

  /* Mount PFS and initial file system root. */
  worker_start(fproc_addr(VFS_PROC_NR), do_init_root, &mess /*unused*/,
	FALSE /*use_spare*/);

  return(OK);
}
Exemple #22
0
int
main(int argc, char *argv[])
{
    char *unixctl_path = NULL;
    struct unixctl_server *unixctl;
    struct signal *sighup;
    char *remote;
    bool exiting;
    int retval;

    proctitle_init(argc, argv);
    set_program_name(argv[0]);
    stress_init_command();
    remote = parse_options(argc, argv, &unixctl_path);
    signal(SIGPIPE, SIG_IGN);
    sighup = signal_register(SIGHUP);
    process_init();
    ovsrec_init();

    daemonize_start();

    if (want_mlockall) {
#ifdef HAVE_MLOCKALL
        if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
            VLOG_ERR("mlockall failed: %s", ovs_strerror(errno));
        }
#else
        VLOG_ERR("mlockall not supported on this system");
#endif
    }

    worker_start();

    retval = unixctl_server_create(unixctl_path, &unixctl);
    if (retval) {
        exit(EXIT_FAILURE);
    }
    unixctl_command_register("exit", "", 0, 0, ovs_vswitchd_exit, &exiting);

    bridge_init(remote);
    free(remote);

    exiting = false;
    while (!exiting) {
        worker_run();
        if (signal_poll(sighup)) {
            vlog_reopen_log_file();
        }
        memory_run();
        if (memory_should_report()) {
            struct simap usage;

            simap_init(&usage);
            bridge_get_memory_usage(&usage);
            memory_report(&usage);
            simap_destroy(&usage);
        }
        bridge_run_fast();
        bridge_run();
        bridge_run_fast();
        unixctl_server_run(unixctl);
        netdev_run();

        worker_wait();
        signal_wait(sighup);
        memory_wait();
        bridge_wait();
        unixctl_server_wait(unixctl);
        netdev_wait();
        if (exiting) {
            poll_immediate_wake();
        }
        poll_block();
    }
    bridge_exit();
    unixctl_server_destroy(unixctl);

    return 0;
}
Exemple #23
0
static int run_paxos(int duelling_proposers)
{
	int i;
	struct timespec ts;

	if (sem_init(&g_sem_accept_leader, 0, 0))
		abort();
	if (pthread_mutex_init(&g_start_lock, 0))
		abort();
	if (pthread_cond_init(&g_start_cond, 0))
		abort();
	g_start = 0;
	memset(g_nodes, 0, sizeof(g_nodes));
	memset(g_node_data, 0, sizeof(g_node_data));
	for (i = 0; i < g_num_nodes;  ++i) {
		char name[WORKER_NAME_MAX];

		snprintf(name, WORKER_NAME_MAX, "node_%3d", i);
		g_node_data[i].id = i;
		g_node_data[i].state = NODE_STATE_INIT;
		reset_remotes(g_node_data[i].remotes);
		g_node_data[i].seen_pseq = 0;
		g_node_data[i].prop_pseq = 0;
		g_node_data[i].prop_leader = -1;
		g_node_data[i].leader = -1;
		g_nodes[i] = worker_start(name, paxos_handle_msg,
				NULL, &g_node_data[i]);
		if (!g_nodes[i]) {
			fprintf(stderr, "failed to allocate node %d\n", i);
			abort();
		}
	}
	if (g_num_nodes < 3)
		abort();
	send_do_propose(2);
	if (duelling_proposers) {
		send_do_propose(0);
		send_do_propose(1);
	}
	/* start acceptors */
	pthread_mutex_lock(&g_start_lock);
	g_start = 1;
	pthread_cond_broadcast(&g_start_cond);
	pthread_mutex_unlock(&g_start_lock);
	/* Wait for consensus.
	 * We only actually need more than half the nodes.  However, to make
	 * debugging a little nicer, we'll wait 10 seconds for all the remaining
	 * nodes rather than exiting immediately after we get half. */
	for (i = 0; i < 1 + (g_num_nodes / 2); ++i) {
		TEMP_FAILURE_RETRY(sem_wait(&g_sem_accept_leader));
	}
	if (clock_gettime(CLOCK_REALTIME, &ts) == -1)
		abort();
	ts.tv_sec += 10;
	for (; i < g_num_nodes; ++i) {
		TEMP_FAILURE_RETRY(sem_timedwait(&g_sem_accept_leader, &ts));
	}
	/* cleanup */
	for (i = 0; i < g_num_nodes; ++i) {
		worker_stop(g_nodes[i]);
	}
	for (i = 0; i < g_num_nodes; ++i) {
		worker_join(g_nodes[i]);
	}
	pthread_cond_destroy(&g_start_cond);
	g_start = 0;
	pthread_mutex_destroy(&g_start_lock);
	sem_destroy(&g_sem_accept_leader);

	return check_leaders();
}
Exemple #24
0
int 
main(int len,char** args)
{
    /* char p[80]; */
    int i=0;
    char ch[80];
    pxy_worker_t *w;

    if(pxy_init_master() < 0){
	D("master initialize failed");
	return -1;
    }
 
    D("master initialized");

    /*spawn worker*/

    for(;i<config->worker_count;i++){
  
	w = (pxy_worker_t*)(master->workers + i);

	if(socketpair(AF_UNIX,SOCK_STREAM,0,w->socket_pair) < 0) {
	    D("create socket pair error"); continue;
	}

	if(setnonblocking(w->socket_pair[0]) < 0) {
	    D("setnonblocking error fd:#%d",w->socket_pair[0]); continue;
	}
 
	if(setnonblocking(w->socket_pair[1]) < 0) {
	    D("setnonblocking error fd:#%d",w->socket_pair[1]); continue;
	}
    
	pid_t p = fork();

	if(p < 0) {
	    D("%s","forkerror");
	}
	else if(p == 0){/*child*/

	    if(worker_init()<0){
		D("worker #%d initialized failed" , getpid());
		return -1;
	    }
	    D("worker #%d initialized success", getpid());


	    close (w->socket_pair[0]); /*child should close the pair[0]*/
	    ev_file_item_t *f = ev_file_item_new(w->socket_pair[1],
						 worker,
						 worker_recv_cmd,
						 NULL,
						 EV_READABLE | EPOLLET);
	    if(!f){ 
		D("new file item error"); return -1; 
	    }
	    if(ev_add_file_item(worker->ev,f) < 0) {
		D("add event error"); return -1;
	    }


	    if(!worker_start()) {
		D("worker #%d started failed", getpid()); return -1;
	    }
	}
	else{ /*parent*/
	    w->pid = p;
	    close(w->socket_pair[1]); /*parent close the pair[1]*/
	}

    }


    while(scanf("%s",ch) >= 0 && strcmp(ch,"quit") !=0){ 
    }

    w = (pxy_worker_t*)master->workers;
    pxy_send_command(w,PXY_CMD_QUIT,-1);

    sleep(5);
    pxy_master_close();
    return 1;
}
Exemple #25
0
/*===========================================================================*
 *				service_pm				     *
 *===========================================================================*/
static void service_pm(void)
{
/* Process a request from PM. This function is called from the main thread, and
 * may therefore not block. Any requests that may require blocking the calling
 * thread must be executed in a separate thread. Aside from VFS_PM_REBOOT, all
 * requests from PM involve another, target process: for example, PM tells VFS
 * that a process is performing a setuid() call. For some requests however,
 * that other process may not be idle, and in that case VFS must serialize the
 * PM request handling with any operation is it handling for that target
 * process. As it happens, the requests that may require blocking are also the
 * ones where the target process may not be idle. For both these reasons, such
 * requests are run in worker threads associated to the target process.
 */
  struct fproc *rfp;
  int r, slot;
  message m_out;

  memset(&m_out, 0, sizeof(m_out));

  switch (call_nr) {
  case VFS_PM_SETUID:
	{
		endpoint_t proc_e;
		uid_t euid, ruid;

		proc_e = m_in.VFS_PM_ENDPT;
		euid = m_in.VFS_PM_EID;
		ruid = m_in.VFS_PM_RID;

		pm_setuid(proc_e, euid, ruid);

		m_out.m_type = VFS_PM_SETUID_REPLY;
		m_out.VFS_PM_ENDPT = proc_e;
	}
	break;

  case VFS_PM_SETGID:
	{
		endpoint_t proc_e;
		gid_t egid, rgid;

		proc_e = m_in.VFS_PM_ENDPT;
		egid = m_in.VFS_PM_EID;
		rgid = m_in.VFS_PM_RID;

		pm_setgid(proc_e, egid, rgid);

		m_out.m_type = VFS_PM_SETGID_REPLY;
		m_out.VFS_PM_ENDPT = proc_e;
	}
	break;

  case VFS_PM_SETSID:
	{
		endpoint_t proc_e;

		proc_e = m_in.VFS_PM_ENDPT;
		pm_setsid(proc_e);

		m_out.m_type = VFS_PM_SETSID_REPLY;
		m_out.VFS_PM_ENDPT = proc_e;
	}
	break;

  case VFS_PM_EXEC:
  case VFS_PM_EXIT:
  case VFS_PM_DUMPCORE:
  case VFS_PM_UNPAUSE:
	{
		endpoint_t proc_e = m_in.VFS_PM_ENDPT;

		if(isokendpt(proc_e, &slot) != OK) {
			printf("VFS: proc ep %d not ok\n", proc_e);
			return;
		}

		rfp = &fproc[slot];

		/* PM requests on behalf of a proc are handled after the
		 * system call that might be in progress for that proc has
		 * finished. If the proc is not busy, we start a new thread.
		 */
		worker_start(rfp, NULL, &m_in, FALSE /*use_spare*/);

		return;
	}
  case VFS_PM_FORK:
  case VFS_PM_SRV_FORK:
	{
		endpoint_t pproc_e, proc_e;
		pid_t child_pid;
		uid_t reuid;
		gid_t regid;

		pproc_e = m_in.VFS_PM_PENDPT;
		proc_e = m_in.VFS_PM_ENDPT;
		child_pid = m_in.VFS_PM_CPID;
		reuid = m_in.VFS_PM_REUID;
		regid = m_in.VFS_PM_REGID;

		pm_fork(pproc_e, proc_e, child_pid);
		m_out.m_type = VFS_PM_FORK_REPLY;

		if (call_nr == VFS_PM_SRV_FORK) {
			m_out.m_type = VFS_PM_SRV_FORK_REPLY;
			pm_setuid(proc_e, reuid, reuid);
			pm_setgid(proc_e, regid, regid);
		}

		m_out.VFS_PM_ENDPT = proc_e;
	}
	break;
  case VFS_PM_SETGROUPS:
	{
		endpoint_t proc_e;
		int group_no;
		gid_t *group_addr;

		proc_e = m_in.VFS_PM_ENDPT;
		group_no = m_in.VFS_PM_GROUP_NO;
		group_addr = (gid_t *) m_in.VFS_PM_GROUP_ADDR;

		pm_setgroups(proc_e, group_no, group_addr);

		m_out.m_type = VFS_PM_SETGROUPS_REPLY;
		m_out.VFS_PM_ENDPT = proc_e;
	}
	break;

  case VFS_PM_REBOOT:
	/* Reboot requests are not considered postponed PM work and are instead
	 * handled from a separate worker thread that is associated with PM's
	 * process. PM makes no regular VFS calls, and thus, from VFS's
	 * perspective, PM is always idle. Therefore, we can safely do this.
	 * We do assume that PM sends us only one VFS_PM_REBOOT message at
	 * once, or ever for that matter. :)
	 */
	worker_start(fproc_addr(PM_PROC_NR), pm_reboot, &m_in,
		FALSE /*use_spare*/);

	return;

    default:
	printf("VFS: don't know how to handle PM request %d\n", call_nr);

	return;
  }

  r = ipc_send(PM_PROC_NR, &m_out);
  if (r != OK)
	panic("service_pm: ipc_send failed: %d", r);
}
Exemple #26
0
int main(int argc, char** argv)
{
	log_file = stdout;
        init_log();
	D("process start");
	char ch[80];

        D("Start init settings!");

        if (argc > 1) {
                strcpy(conf_file, argv[1]);
        }
        else {
                strcpy(conf_file, "mspc.conf");
        }

        if (pxy_setting_init(conf_file) != 0) {
                D("settings initialize failed!\n");
                return -1;
        }


        if (pxy_init_logdb()) {
		E("init logdb failed!\n");
		return -1;
	}
    
        char time[32];
	char loggername[64];
	db_gettimestr(time, sizeof(time));
	sprintf(loggername, "%s:%s:%d", __FILE__, __FUNCTION__, __LINE__);
	db_insert_log(30000, 
                  0,
                  getpid(),
                  time,
                  loggername,
                  "mspc start...",
                  "",
                  "00000",
                  "",
                  "mspc",
                  setting.ip);
	
	if (pxy_init_master() < 0) {
		E("master initialize failed");
		return -1;
	}
	D("master initialized");


	/* TODO: Maybe we need remove the master-worker mode,
	   seems useless
	 */

	if (worker_init()<0) {
		E("worker #%d initialized failed" , getpid());
		return -1;
	}
	D("worker inited");

	if (worker_start() < 0) {
		E("worker #%d started failed", getpid()); 
		return -1;
	}
	D("worker started");

	while(scanf("%s",ch) >= 0 && strcmp(ch,"quit") !=0) { 
	}

	sleep(5);
	D("pxy_master_close");
	pxy_master_close();
	return 1;
}
Exemple #27
0
/*===========================================================================*
 *				main					     *
 *===========================================================================*/
int main(void)
{
/* This is the main program of the file system.  The main loop consists of
 * three major activities: getting new work, processing the work, and sending
 * the reply.  This loop never terminates as long as the file system runs.
 */
  int transid;
  struct job *job;

  /* SEF local startup. */
  sef_local_startup();

  printf("Started VFS: %d worker thread(s)\n", NR_WTHREADS);

  if (OK != (sys_getkinfo(&kinfo)))
	panic("couldn't get kernel kinfo");

  /* This is the main loop that gets work, processes it, and sends replies. */
  while (TRUE) {
	yield_all();	/* let other threads run */
	self = NULL;
	job = NULL;
	send_work();
	get_work();

	transid = TRNS_GET_ID(m_in.m_type);
	if (IS_VFS_FS_TRANSID(transid)) {
		job = worker_getjob( (thread_t) transid - VFS_TRANSID);
		if (job == NULL) {
			printf("VFS: spurious message %d from endpoint %d\n",
				m_in.m_type, m_in.m_source);
			continue;
		}
		m_in.m_type = TRNS_DEL_ID(m_in.m_type);
	}

	if (job != NULL) {
		do_fs_reply(job);
		continue;
	} else if (who_e == PM_PROC_NR) { /* Calls from PM */
		/* Special control messages from PM */
		sys_worker_start(do_pm);
		continue;
	} else if (is_notify(call_nr)) {
		/* A task notify()ed us */
		if (who_e == DS_PROC_NR)
			worker_start(ds_event);
		else
			sys_worker_start(do_control_msgs);
		continue;
	} else if (who_p < 0) { /* i.e., message comes from a task */
		/* We're going to ignore this message. Tasks should
		 * send notify()s only.
		 */
		 printf("VFS: ignoring message from %d (%d)\n", who_e, call_nr);
		 continue;
	}

	/* At this point we either have results from an asynchronous device
	 * or a new system call. In both cases a new worker thread has to be
	 * started and there might not be one available from the pool. This is
	 * not a problem (requests/replies are simply queued), except when
	 * they're from an FS endpoint, because these can cause a deadlock.
	 * handle_work() takes care of the details. */
	if (IS_DRV_REPLY(call_nr)) {
		/* We've got results for a device request */

		struct dmap *dp;

		dp = get_dmap(who_e);
		if (dp != NULL) {
			if (dev_style_asyn(dp->dmap_style)) {
				handle_work(do_async_dev_result);

			} else {
				if (dp->dmap_servicing == NONE) {
					printf("Got spurious dev reply from %d",
					who_e);
				} else {
					dev_reply(dp);
				}
			}
			continue;
		}
		printf("VFS: ignoring dev reply from unknown driver %d\n",
			who_e);
	} else {
		/* Normal syscall. */
		handle_work(do_work);
	}
  }
  return(OK);				/* shouldn't come here */
}