Пример #1
0
/*===========================================================================*
 *			       do_reply				             *
 *===========================================================================*/
static void do_reply(struct worker_thread *wp)
{
  struct vmnt *vmp = NULL;

  if(who_e != VM_PROC_NR && (vmp = find_vmnt(who_e)) == NULL)
	panic("Couldn't find vmnt for endpoint %d", who_e);

  if (wp->w_task != who_e) {
	printf("VFS: tid %d: expected %d to reply, not %d\n",
		wp->w_tid, wp->w_task, who_e);
	return;
  }
  /* It should be impossible to trigger the following case, but it is here for
   * consistency reasons: worker_stop() resets w_sendrec but not w_task.
   */
  if (wp->w_sendrec == NULL) {
	printf("VFS: tid %d: late reply from %d ignored\n", wp->w_tid, who_e);
	return;
  }
  *wp->w_sendrec = m_in;
  wp->w_sendrec = NULL;
  wp->w_task = NONE;
  if(vmp) vmp->m_comm.c_cur_reqs--; /* We've got our reply, make room for others */
  worker_signal(wp); /* Continue this thread */
}
Пример #2
0
/*===========================================================================*
 *			       do_fs_reply				     *
 *===========================================================================*/
static void *do_fs_reply(struct job *job)
{
  struct vmnt *vmp;
  struct fproc *rfp;

  if ((vmp = find_vmnt(who_e)) == NULL)
	panic("Couldn't find vmnt for endpoint %d", who_e);

  rfp = job->j_fp;

  if (rfp == NULL || rfp->fp_endpoint == NONE) {
	printf("VFS: spurious reply from %d\n", who_e);
	return(NULL);
  }

  if (rfp->fp_task != who_e)
	printf("VFS: expected %d to reply, not %d\n", rfp->fp_task, who_e);
  *rfp->fp_sendrec = m_in;
  rfp->fp_task = NONE;
  vmp->m_comm.c_cur_reqs--; /* We've got our reply, make room for others */
  if (rfp->fp_wtid != invalid_thread_id)
	worker_signal(worker_get(rfp->fp_wtid)); /* Continue this thread */
  else
	printf("VFS: consistency error: reply for finished job\n");

  return(NULL);
}
Пример #3
0
/*===========================================================================*
 *				fs_sendrec				     *
 *===========================================================================*/
int fs_sendrec(endpoint_t fs_e, message *reqmp)
{
  struct vmnt *vmp;
  int r;

  if ((vmp = find_vmnt(fs_e)) == NULL) {
	printf("Trying to talk to non-existent FS endpoint %d\n", fs_e);
	return(EIO);
  }
  if (fs_e == fp->fp_endpoint) return(EDEADLK);

  self->w_sendrec = reqmp;	/* Where to store request and reply */

  /* Find out whether we can send right away or have to enqueue */
  if (	!(vmp->m_flags & VMNT_CALLBACK) &&
	vmp->m_comm.c_cur_reqs < vmp->m_comm.c_max_reqs) {
	/* There's still room to send more and no proc is queued */
	r = sendmsg(vmp, vmp->m_fs_e, self);
  } else {
	r = queuemsg(vmp);
  }
  self->w_next = NULL;	/* End of list */

  if (r != OK) return(r);

  worker_wait();	/* Yield execution until we've received the reply. */

  return(reqmp->m_type);
}
Пример #4
0
/*===========================================================================*
 *			       do_fs_reply				     *
 *===========================================================================*/
static void *do_fs_reply(struct job *job)
{
  struct vmnt *vmp;
  struct worker_thread *wp;

  if ((vmp = find_vmnt(who_e)) == NULL)
	panic("Couldn't find vmnt for endpoint %d", who_e);

  wp = worker_get(job->j_fp->fp_wtid);

  if (wp == NULL) {
	printf("VFS: spurious reply from %d\n", who_e);
	return(NULL);
  }

  if (wp->w_task != who_e) {
	printf("VFS: expected %d to reply, not %d\n", wp->w_task, who_e);
	return(NULL);
  }
  *wp->w_fs_sendrec = m_in;
  wp->w_task = NONE;
  vmp->m_comm.c_cur_reqs--; /* We've got our reply, make room for others */
  worker_signal(wp); /* Continue this thread */
  return(NULL);
}
Пример #5
0
/*===========================================================================*
 *                             vmnt_unmap_by_endpoint			     *
 *===========================================================================*/
PUBLIC void vmnt_unmap_by_endpt(endpoint_t proc_e)
{
  struct vmnt *vmp;

  if ((vmp = find_vmnt(proc_e)) != NULL)
	clear_vmnt(vmp);

}
Пример #6
0
/*===========================================================================*
 *			       do_async_dev_result			     *
 *===========================================================================*/
static void *do_async_dev_result(void *arg)
{
  endpoint_t endpt;
  struct job my_job;

  my_job = *((struct job *) arg);
  fp = my_job.j_fp;

  /* An asynchronous character driver has results for us */
  if (job_call_nr == DEV_REVIVE) {
	endpt = job_m_in.REP_ENDPT;
	if (endpt == VFS_PROC_NR)
		endpt = find_suspended_ep(job_m_in.m_source,
					  job_m_in.REP_IO_GRANT);

	if (endpt == NONE) {
		printf("VFS: proc with grant %d from %d not found\n",
			job_m_in.REP_IO_GRANT, job_m_in.m_source);
	} else if (job_m_in.REP_STATUS == SUSPEND) {
		printf("VFS: got SUSPEND on DEV_REVIVE: not reviving proc\n");
	} else
		revive(endpt, job_m_in.REP_STATUS);
  }
  else if (job_call_nr == DEV_OPEN_REPL) open_reply();
  else if (job_call_nr == DEV_REOPEN_REPL) reopen_reply();
  else if (job_call_nr == DEV_CLOSE_REPL) close_reply();
  else if (job_call_nr == DEV_SEL_REPL1)
	select_reply1(job_m_in.m_source, job_m_in.DEV_MINOR,
		      job_m_in.DEV_SEL_OPS);
  else if (job_call_nr == DEV_SEL_REPL2)
	select_reply2(job_m_in.m_source, job_m_in.DEV_MINOR,
		      job_m_in.DEV_SEL_OPS);

  if (deadlock_resolving) {
	if (fp != NULL && fp->fp_wtid == dl_worker.w_tid)
		deadlock_resolving = 0;
  }

  if (fp != NULL && (fp->fp_flags & FP_SYS_PROC)) {
	struct vmnt *vmp;

	if ((vmp = find_vmnt(fp->fp_endpoint)) != NULL)
		vmp->m_flags &= ~VMNT_CALLBACK;
  }

  thread_cleanup(NULL);
  return(NULL);
}
Пример #7
0
/*===========================================================================*
 *			       handle_work				     *
 *===========================================================================*/
static void handle_work(void *(*func)(void *arg))
{
/* Handle asynchronous device replies and new system calls. If the originating
 * endpoint is an FS endpoint, take extra care not to get in deadlock. */
  struct vmnt *vmp = NULL;
  endpoint_t proc_e;

  proc_e = m_in.m_source;

  if (fp->fp_flags & FP_SYS_PROC) {
	if (worker_available() == 0) {
		if (!deadlock_resolving) {
			if ((vmp = find_vmnt(proc_e)) != NULL) {
				/* A call back or dev result from an FS
				 * endpoint. Set call back flag. Can do only
				 * one call back at a time.
				 */
				if (vmp->m_flags & VMNT_CALLBACK) {
					reply(proc_e, EAGAIN);
					return;
				}
				vmp->m_flags |= VMNT_CALLBACK;

				/* When an FS endpoint has to make a call back
				 * in order to mount, force its device to a
				 * "none device" so block reads/writes will be
				 * handled by ROOT_FS_E.
				 */
				if (vmp->m_flags & VMNT_MOUNTING)
					vmp->m_flags |= VMNT_FORCEROOTBSF;
			}
			deadlock_resolving = 1;
			dl_worker_start(func);
			return;
		}
		/* Already trying to resolve a deadlock, can't
		 * handle more, sorry */

		reply(proc_e, EAGAIN);
		return;
	}
  }

  worker_start(func);
}
Пример #8
0
/*===========================================================================*
 *				thread_cleanup				     *
 *===========================================================================*/
void thread_cleanup(void)
{
/* Perform cleanup actions for a worker thread. */

#if LOCK_DEBUG
  check_filp_locks_by_me();
  check_vnode_locks_by_me(fp);
  check_vmnt_locks_by_me(fp);
#endif

  if (fp->fp_flags & FP_SRV_PROC) {
	struct vmnt *vmp;

	if ((vmp = find_vmnt(fp->fp_endpoint)) != NULL) {
		vmp->m_flags &= ~VMNT_CALLBACK;
	}
  }
}
Пример #9
0
/*===========================================================================*
 *				pm_reboot				     *
 *===========================================================================*/
void pm_reboot()
{
/* Perform the VFS side of the reboot call. */
  int i;
  struct fproc *rfp;

  do_sync();

  /* Do exit processing for all leftover processes and servers, but don't
   * actually exit them (if they were really gone, PM will tell us about it).
   * Skip processes that handle parts of the file system; we first need to give
   * them the chance to unmount (which should be possible as all normal
   * processes have no open files anymore).
   */
  for (i = 0; i < NR_PROCS; i++) {
	rfp = &fproc[i];

	/* Don't just free the proc right away, but let it finish what it was
	 * doing first */
	lock_proc(rfp, 0);
	if (rfp->fp_endpoint != NONE && find_vmnt(rfp->fp_endpoint) == NULL)
		free_proc(rfp, 0);
	unlock_proc(rfp);
  }

  do_sync();
  unmount_all(0 /* Don't force */);

  /* Try to exit all processes again including File Servers */
  for (i = 0; i < NR_PROCS; i++) {
	rfp = &fproc[i];

	/* Don't just free the proc right away, but let it finish what it was
	 * doing first */
	lock_proc(rfp, 0);
	if (rfp->fp_endpoint != NONE)
		free_proc(rfp, 0);
	unlock_proc(rfp);
  }

  do_sync();
  unmount_all(1 /* Force */);

}
Пример #10
0
/*===========================================================================*
 *			       handle_work				     *
 *===========================================================================*/
static void handle_work(void (*func)(void))
{
/* Handle asynchronous device replies and new system calls. If the originating
 * endpoint is an FS endpoint, take extra care not to get in deadlock. */
  struct vmnt *vmp = NULL;
  endpoint_t proc_e;
  int use_spare = FALSE;

  proc_e = m_in.m_source;

  if (fp->fp_flags & FP_SRV_PROC) {
	vmp = find_vmnt(proc_e);
	if (vmp != NULL) {
		/* A callback from an FS endpoint. Can do only one at once. */
		if (vmp->m_flags & VMNT_CALLBACK) {
			replycode(proc_e, EAGAIN);
			return;
		}
		/* Already trying to resolve a deadlock? Can't handle more. */
		if (worker_available() == 0) {
			replycode(proc_e, EAGAIN);
			return;
		}
		/* A thread is available. Set callback flag. */
		vmp->m_flags |= VMNT_CALLBACK;
		if (vmp->m_flags & VMNT_MOUNTING) {
			vmp->m_flags |= VMNT_FORCEROOTBSF;
		}
	}

	/* Use the spare thread to handle this request if needed. */
	use_spare = TRUE;
  }

  worker_start(fp, func, &m_in, use_spare);
}
Пример #11
0
/*===========================================================================*
 *			       do_work					     *
 *===========================================================================*/
static void *do_work(void *arg)
{
  int error;
  struct job my_job;

  my_job = *((struct job *) arg);
  fp = my_job.j_fp;

  lock_proc(fp, 0); /* This proc is busy */

  if (job_call_nr == MAPDRIVER) {
	error = do_mapdriver();
  } else if (job_call_nr == COMMON_GETSYSINFO) {
	error = do_getsysinfo();
  } else if (IS_PFS_VFS_RQ(job_call_nr)) {
	if (who_e != PFS_PROC_NR) {
		printf("VFS: only PFS is allowed to make nested VFS calls\n");
		error = ENOSYS;
	} else if (job_call_nr <= PFS_BASE ||
		   job_call_nr >= PFS_BASE + PFS_NREQS) {
		error = ENOSYS;
	} else {
		job_call_nr -= PFS_BASE;
		error = (*pfs_call_vec[job_call_nr])();
	}
  } else {
	/* We're dealing with a POSIX system call from a normal
	 * process. Call the internal function that does the work.
	 */
	if (job_call_nr < 0 || job_call_nr >= NCALLS) {
		error = ENOSYS;
	} else if (fp->fp_pid == PID_FREE) {
		/* Process vanished before we were able to handle request.
		 * Replying has no use. Just drop it. */
		error = SUSPEND;
	} else {
#if ENABLE_SYSCALL_STATS
		calls_stats[job_call_nr]++;
#endif
		error = (*call_vec[job_call_nr])();
	}
  }

  /* Copy the results back to the user and send reply. */
  if (error != SUSPEND) {

	if ((fp->fp_flags & FP_SYS_PROC)) {
		struct vmnt *vmp;

		if ((vmp = find_vmnt(fp->fp_endpoint)) != NULL)
			vmp->m_flags &= ~VMNT_CALLBACK;
	}

	if (deadlock_resolving) {
		if (fp->fp_wtid == dl_worker.w_tid)
			deadlock_resolving = 0;
	}

	reply(fp->fp_endpoint, error);
  }

  thread_cleanup(fp);
  return(NULL);
}
Пример #12
0
/*===========================================================================*
 *				pm_reboot				     *
 *===========================================================================*/
void pm_reboot()
{
/* Perform the VFS side of the reboot call. This call is performed from the PM
 * process context.
 */
  message m_out;
  int i, r;
  struct fproc *rfp, *pmfp;

  pmfp = fp;

  do_sync();

  /* Do exit processing for all leftover processes and servers, but don't
   * actually exit them (if they were really gone, PM will tell us about it).
   * Skip processes that handle parts of the file system; we first need to give
   * them the chance to unmount (which should be possible as all normal
   * processes have no open files anymore).
   */
  /* This is the only place where we allow special modification of "fp". The
   * reboot procedure should really be implemented as a PM message broadcasted
   * to all processes, so that each process will be shut down cleanly by a
   * thread operating on its behalf. Doing everything here is simpler, but it
   * requires an exception to the strict model of having "fp" be the process
   * that owns the current worker thread.
   */
  for (i = 0; i < NR_PROCS; i++) {
	rfp = &fproc[i];

	/* Don't just free the proc right away, but let it finish what it was
	 * doing first */
	if (rfp != fp) lock_proc(rfp);
	if (rfp->fp_endpoint != NONE && find_vmnt(rfp->fp_endpoint) == NULL) {
		worker_set_proc(rfp);	/* temporarily fake process context */
		free_proc(0);
		worker_set_proc(pmfp);	/* restore original process context */
	}
	if (rfp != fp) unlock_proc(rfp);
  }

  do_sync();
  unmount_all(0 /* Don't force */);

  /* Try to exit all processes again including File Servers */
  for (i = 0; i < NR_PROCS; i++) {
	rfp = &fproc[i];

	/* Don't just free the proc right away, but let it finish what it was
	 * doing first */
	if (rfp != fp) lock_proc(rfp);
	if (rfp->fp_endpoint != NONE) {
		worker_set_proc(rfp);	/* temporarily fake process context */
		free_proc(0);
		worker_set_proc(pmfp);	/* restore original process context */
	}
	if (rfp != fp) unlock_proc(rfp);
  }

  do_sync();
  unmount_all(1 /* Force */);

  /* Reply to PM for synchronization */
  memset(&m_out, 0, sizeof(m_out));

  m_out.m_type = VFS_PM_REBOOT_REPLY;

  if ((r = ipc_send(PM_PROC_NR, &m_out)) != OK)
	panic("pm_reboot: ipc_send failed: %d", r);
}
Пример #13
0
/*===========================================================================*
 *				do_pipe					     *
 *===========================================================================*/
int do_pipe()
{
/* Perform the pipe(fil_des) system call. */

  register struct fproc *rfp;
  int r;
  struct filp *fil_ptr0, *fil_ptr1;
  int fil_des[2];		/* reply goes here */
  struct vnode *vp;
  struct vmnt *vmp;
  struct node_details res;

  /* Get a lock on PFS */
  if ((vmp = find_vmnt(PFS_PROC_NR)) == NULL) panic("PFS gone");
  if ((r = lock_vmnt(vmp, VMNT_READ)) != OK) return(r);

  /* See if a free vnode is available */
  if ((vp = get_free_vnode()) == NULL) {
	unlock_vmnt(vmp);
	return(err_code);
  }
  lock_vnode(vp, VNODE_OPCL);

  /* Acquire two file descriptors. */
  rfp = fp;
  if ((r = get_fd(0, R_BIT, &fil_des[0], &fil_ptr0)) != OK) {
	unlock_vnode(vp);
	unlock_vmnt(vmp);
	return(r);
  }
  rfp->fp_filp[fil_des[0]] = fil_ptr0;
  FD_SET(fil_des[0], &rfp->fp_filp_inuse);
  fil_ptr0->filp_count = 1;		/* mark filp in use */
  if ((r = get_fd(0, W_BIT, &fil_des[1], &fil_ptr1)) != OK) {
	rfp->fp_filp[fil_des[0]] = NULL;
	FD_CLR(fil_des[0], &rfp->fp_filp_inuse);
	fil_ptr0->filp_count = 0;	/* mark filp free */
	unlock_filp(fil_ptr0);
	unlock_vnode(vp);
	unlock_vmnt(vmp);
	return(r);
  }
  rfp->fp_filp[fil_des[1]] = fil_ptr1;
  FD_SET(fil_des[1], &rfp->fp_filp_inuse);
  fil_ptr1->filp_count = 1;

  /* Create a named pipe inode on PipeFS */
  r = req_newnode(PFS_PROC_NR, fp->fp_effuid, fp->fp_effgid, I_NAMED_PIPE,
		  NO_DEV, &res);

  if (r != OK) {
	rfp->fp_filp[fil_des[0]] = NULL;
	FD_CLR(fil_des[0], &rfp->fp_filp_inuse);
	fil_ptr0->filp_count = 0;
	rfp->fp_filp[fil_des[1]] = NULL;
	FD_CLR(fil_des[1], &rfp->fp_filp_inuse);
	fil_ptr1->filp_count = 0;
	unlock_filp(fil_ptr1);
	unlock_filp(fil_ptr0);
	unlock_vnode(vp);
	unlock_vmnt(vmp);
	return(r);
  }

  /* Fill in vnode */
  vp->v_fs_e = res.fs_e;
  vp->v_mapfs_e = res.fs_e;
  vp->v_inode_nr = res.inode_nr;
  vp->v_mapinode_nr = res.inode_nr;
  vp->v_mode = res.fmode;
  vp->v_fs_count = 1;
  vp->v_mapfs_count = 1;
  vp->v_ref_count = 1;
  vp->v_size = 0;
  vp->v_vmnt = NULL;
  vp->v_dev = NO_DEV;

  /* Fill in filp objects */
  fil_ptr0->filp_vno = vp;
  dup_vnode(vp);
  fil_ptr1->filp_vno = vp;
  fil_ptr0->filp_flags = O_RDONLY;
  fil_ptr1->filp_flags = O_WRONLY;

  m_out.reply_i1 = fil_des[0];
  m_out.reply_i2 = fil_des[1];

  unlock_filps(fil_ptr0, fil_ptr1);
  unlock_vmnt(vmp);

  return(OK);
}
Пример #14
0
/*===========================================================================*
 *				create_pipe				     *
 *===========================================================================*/
static int create_pipe(int fil_des[2], int flags)
{
  register struct fproc *rfp;
  int r;
  struct filp *fil_ptr0, *fil_ptr1;
  struct vnode *vp;
  struct vmnt *vmp;
  struct node_details res;

  /* Get a lock on PFS */
  if ((vmp = find_vmnt(PFS_PROC_NR)) == NULL) panic("PFS gone");
  if ((r = lock_vmnt(vmp, VMNT_READ)) != OK) return(r);

  /* See if a free vnode is available */
  if ((vp = get_free_vnode()) == NULL) {
	unlock_vmnt(vmp);
	return(err_code);
  }
  lock_vnode(vp, VNODE_OPCL);

  /* Acquire two file descriptors. */
  rfp = fp;
  if ((r = get_fd(fp, 0, R_BIT, &fil_des[0], &fil_ptr0)) != OK) {
	unlock_vnode(vp);
	unlock_vmnt(vmp);
	return(r);
  }
  rfp->fp_filp[fil_des[0]] = fil_ptr0;
  fil_ptr0->filp_count = 1;		/* mark filp in use */
  if ((r = get_fd(fp, 0, W_BIT, &fil_des[1], &fil_ptr1)) != OK) {
	rfp->fp_filp[fil_des[0]] = NULL;
	fil_ptr0->filp_count = 0;	/* mark filp free */
	unlock_filp(fil_ptr0);
	unlock_vnode(vp);
	unlock_vmnt(vmp);
	return(r);
  }
  rfp->fp_filp[fil_des[1]] = fil_ptr1;
  fil_ptr1->filp_count = 1;

  /* Create a named pipe inode on PipeFS */
  r = req_newnode(PFS_PROC_NR, fp->fp_effuid, fp->fp_effgid, I_NAMED_PIPE,
		  NO_DEV, &res);

  if (r != OK) {
	rfp->fp_filp[fil_des[0]] = NULL;
	fil_ptr0->filp_count = 0;
	rfp->fp_filp[fil_des[1]] = NULL;
	fil_ptr1->filp_count = 0;
	unlock_filp(fil_ptr1);
	unlock_filp(fil_ptr0);
	unlock_vnode(vp);
	unlock_vmnt(vmp);
	return(r);
  }

  /* Fill in vnode */
  vp->v_fs_e = res.fs_e;
  vp->v_mapfs_e = res.fs_e;
  vp->v_inode_nr = res.inode_nr;
  vp->v_mapinode_nr = res.inode_nr;
  vp->v_mode = res.fmode;
  vp->v_fs_count = 1;
  vp->v_mapfs_count = 1;
  vp->v_ref_count = 1;
  vp->v_size = 0;
  vp->v_vmnt = NULL;
  vp->v_dev = NO_DEV;

  /* Fill in filp objects */
  fil_ptr0->filp_vno = vp;
  dup_vnode(vp);
  fil_ptr1->filp_vno = vp;
  fil_ptr0->filp_flags = O_RDONLY | (flags & ~O_ACCMODE);
  fil_ptr1->filp_flags = O_WRONLY | (flags & ~O_ACCMODE);
  if (flags & O_CLOEXEC) {
	FD_SET(fil_des[0], &rfp->fp_cloexec_set);
	FD_SET(fil_des[1], &rfp->fp_cloexec_set);
  }

  unlock_filps(fil_ptr0, fil_ptr1);
  unlock_vmnt(vmp);

  return(OK);
}