Example #1
0
int setup_child_process(job_t *j, process_t *p, const io_chain_t &io_chain)
{
    bool ok=true;

    if (p)
    {
        ok = (0 == set_child_group(j, p, 1));
    }

    if (ok)
    {
        ok = (0 == handle_child_io(io_chain));
        if (p != 0 && ! ok)
        {
            exit_without_destructors(1);
        }
    }

    /* Set the handling for job control signals back to the default.  */
    if (ok)
    {
        signal_reset_handlers();
    }

    /* Remove all signal blocks */
    signal_unblock();

    return ok ? 0 : -1;
}
Example #2
0
File: exec.c Project: CodeMonk/fish
/**
   Initialize a new child process. This should be called right away
   after forking in the child process. If job control is enabled for
   this job, the process is put in the process group of the job, all
   signal handlers are reset, signals are unblocked (this function may
   only be called inside the exec function, which blocks all signals),
   and all IO redirections and other file descriptor actions are
   performed.

   \param j the job to set up the IO for
   \param p the child process to set up

   \return 0 on sucess, -1 on failiure. When this function returns,
   signals are always unblocked. On failiure, signal handlers, io
   redirections and process group of the process is undefined.
*/
static int setup_child_process( job_t *j, process_t *p )
{
	int res=0;
	
	if( p )
	{
		res = set_child_group( j, p, 1 );
	}
	
	if( !res )	
	{
		res = handle_child_io( j->io );
		if( p != 0 && res )
		{
			exit( 1 );
		}
	}
	
	/* Set the handling for job control signals back to the default.  */
	if( !res )
	{
		signal_reset_handlers();
	}
	
	/* Remove all signal blocks */
	signal_unblock();	
	
	return res;
	
}
Example #3
0
/// Call fork() as part of executing a process \p p in a job \j. Execute \p child_action in the
/// context of the child. Returns true if fork succeeded, false if fork failed.
static bool fork_child_for_process(job_t *j, process_t *p, const io_chain_t &io_chain,
                                   bool drain_threads, const char *fork_type,
                                   const std::function<void()> &child_action) {
    pid_t pid = execute_fork(drain_threads);
    if (pid == 0) {
        // This is the child process. Setup redirections, print correct output to
        // stdout and stderr, and then exit.
        p->pid = getpid();
        child_set_group(j, p);
        setup_child_process(p, io_chain);
        child_action();
        DIE("Child process returned control to fork_child lambda!");
    }

    if (pid < 0) {
        debug(1, L"Failed to fork %s!\n", fork_type);
        job_mark_process_as_failed(j, p);
        return false;
    }

    // This is the parent process. Store away information on the child, and
    // possibly give it control over the terminal.
    debug(4, L"Fork #%d, pid %d: %s for '%ls'", g_fork_count, pid, fork_type, p->argv0());

    p->pid = pid;
    on_process_created(j, p->pid);
    set_child_group(j, p->pid);
    maybe_assign_terminal(j);
    return true;
}
Example #4
0
File: exec.c Project: CodeMonk/fish
void exec( job_t *j )
{
	process_t *p;
	pid_t pid;
	int mypipe[2];
	sigset_t chldset; 
	int skip_fork;
	
	io_data_t pipe_read, pipe_write;
	io_data_t *tmp;

	io_data_t *io_buffer =0;

	/*
	  Set to 1 if something goes wrong while exec:ing the job, in
	  which case the cleanup code will kick in.
	*/
	int exec_error=0;

	int needs_keepalive = 0;
	process_t keepalive;
	

	CHECK( j, );
	CHECK_BLOCK();
	
	if( no_exec )
		return;
	
	sigemptyset( &chldset );
	sigaddset( &chldset, SIGCHLD );
	
	debug( 4, L"Exec job '%ls' with id %d", j->command, j->job_id );	
	
	if( block_io )
	{
		if( j->io )
		{
			j->io = io_add( io_duplicate( j, block_io), j->io );
		}
		else
		{
			j->io=io_duplicate( j, block_io);				
		}
	}

	
	io_data_t *input_redirect;

	for( input_redirect = j->io; input_redirect; input_redirect = input_redirect->next )
	{
		if( (input_redirect->io_mode == IO_BUFFER) && 
			input_redirect->is_input )
		{
			/*
			  Input redirection - create a new gobetween process to take
			  care of buffering
			*/
			process_t *fake = halloc( j, sizeof(process_t) );
			fake->type = INTERNAL_BUFFER;
			fake->pipe_write_fd = 1;
			j->first_process->pipe_read_fd = input_redirect->fd;
			fake->next = j->first_process;
			j->first_process = fake;
			break;
		}
	}
	
	if( j->first_process->type==INTERNAL_EXEC )
	{
		/*
		  Do a regular launch -  but without forking first...
		*/
		signal_block();

		/*
		  setup_child_process makes sure signals are properly set
		  up. It will also call signal_unblock
		*/
		if( !setup_child_process( j, 0 ) )
		{
			/*
			  launch_process _never_ returns
			*/
			launch_process( j->first_process );
		}
		else
		{
			job_set_flag( j, JOB_CONSTRUCTED, 1 );
			j->first_process->completed=1;
			return;
		}

	}	

	pipe_read.fd=0;
	pipe_write.fd=1;
	pipe_read.io_mode=IO_PIPE;
	pipe_read.param1.pipe_fd[0] = -1;
	pipe_read.param1.pipe_fd[1] = -1;
	pipe_read.is_input = 1;

	pipe_write.io_mode=IO_PIPE;
	pipe_write.is_input = 0;
	pipe_read.next=0;
	pipe_write.next=0;
	pipe_write.param1.pipe_fd[0]=pipe_write.param1.pipe_fd[1]=-1;
	
	j->io = io_add( j->io, &pipe_write );
	
	signal_block();

	/*
	  See if we need to create a group keepalive process. This is
	  a process that we create to make sure that the process group
	  doesn't die accidentally, and is often needed when a
	  builtin/block/function is inside a pipeline, since that
	  usually means we have to wait for one program to exit before
	  continuing in the pipeline, causing the group leader to
	  exit.
	*/
	
	if( job_get_flag( j, JOB_CONTROL ) )
	{
		for( p=j->first_process; p; p = p->next )
		{
			if( p->type != EXTERNAL )
			{
				if( p->next )
				{
					needs_keepalive = 1;
					break;
				}
				if( p != j->first_process )
				{
					needs_keepalive = 1;
					break;
				}
				
			}
			
		}
	}
		
	if( needs_keepalive )
	{
		keepalive.pid = exec_fork();

		if( keepalive.pid == 0 )
		{
			keepalive.pid = getpid();
			set_child_group( j, &keepalive, 1 );
			pause();			
			exit(0);
		}
		else
		{
			set_child_group( j, &keepalive, 0 );			
		}
	}
	
	/*
	  This loop loops over every process_t in the job, starting it as
	  appropriate. This turns out to be rather complex, since a
	  process_t can be one of many rather different things.

	  The loop also has to handle pipelining between the jobs.
	*/

	for( p=j->first_process; p; p = p->next )
	{
		mypipe[1]=-1;
		skip_fork=0;
		
		pipe_write.fd = p->pipe_write_fd;
		pipe_read.fd = p->pipe_read_fd;
//		debug( 0, L"Pipe created from fd %d to fd %d", pipe_write.fd, pipe_read.fd );
		

		/* 
		   This call is used so the global environment variable array
		   is regenerated, if needed, before the fork. That way, we
		   avoid a lot of duplicate work where EVERY child would need
		   to generate it, since that result would not get written
		   back to the parent. This call could be safely removed, but
		   it would result in slightly lower performance - at least on
		   uniprocessor systems.
		*/
		if( p->type == EXTERNAL )
			env_export_arr( 1 );
		
		
		/*
		  Set up fd:s that will be used in the pipe 
		*/
		
		if( p == j->first_process->next )
		{
			j->io = io_add( j->io, &pipe_read );
		}
		
		if( p->next )
		{
//			debug( 1, L"%ls|%ls" , p->argv[0], p->next->argv[0]);
			
			if( exec_pipe( mypipe ) == -1 )
			{
				debug( 1, PIPE_ERROR );
				wperror (L"pipe");
				exec_error=1;
				break;
			}

			memcpy( pipe_write.param1.pipe_fd, mypipe, sizeof(int)*2);
		}
		else
		{
			/*
			  This is the last element of the pipeline.
			  Remove the io redirection for pipe output.
			*/
			j->io = io_remove( j->io, &pipe_write );
			
		}

		switch( p->type )
		{
			case INTERNAL_FUNCTION:
			{
				const wchar_t * orig_def;
				wchar_t * def=0;
				array_list_t *named_arguments;
				int shadows;
				

				/*
				  Calls to function_get_definition might need to
				  source a file as a part of autoloading, hence there
				  must be no blocks.
				*/

				signal_unblock();
				orig_def = function_get_definition( p->argv[0] );
				named_arguments = function_get_named_arguments( p->argv[0] );
				shadows = function_get_shadows( p->argv[0] );

				signal_block();
				
				if( orig_def )
				{
					def = halloc_register( j, wcsdup(orig_def) );
				}
				if( def == 0 )
				{
					debug( 0, _( L"Unknown function '%ls'" ), p->argv[0] );
					break;
				}

				parser_push_block( shadows?FUNCTION_CALL:FUNCTION_CALL_NO_SHADOW );
				
				current_block->param2.function_call_process = p;
				current_block->param1.function_call_name = halloc_register( current_block, wcsdup( p->argv[0] ) );
						

				/*
				  set_argv might trigger an event
				  handler, hence we need to unblock
				  signals.
				*/
				signal_unblock();
				parse_util_set_argv( p->argv+1, named_arguments );
				signal_block();
								
				parser_forbid_function( p->argv[0] );

				if( p->next )
				{
					io_buffer = io_buffer_create( 0 );					
					j->io = io_add( j->io, io_buffer );
				}
				
				internal_exec_helper( def, TOP, j->io );
				
				parser_allow_function();
				parser_pop_block();
				
				break;				
			}
			
			case INTERNAL_BLOCK:
			{
				if( p->next )
				{
					io_buffer = io_buffer_create( 0 );					
					j->io = io_add( j->io, io_buffer );
				}
								
				internal_exec_helper( p->argv[0], TOP, j->io );			
				break;
				
			}

			case INTERNAL_BUILTIN:
			{
				int builtin_stdin=0;
				int fg;
				int close_stdin=0;

				/*
				  If this is the first process, check the io
				  redirections and see where we should be reading
				  from.
				*/
				if( p == j->first_process )
				{
					io_data_t *in = io_get( j->io, 0 );
					
					if( in )
					{
						switch( in->io_mode )
						{
							
							case IO_FD:
							{
								builtin_stdin = in->param1.old_fd;
								break;
							}
							case IO_PIPE:
							{
								builtin_stdin = in->param1.pipe_fd[0];
								break;
							}
							
							case IO_FILE:
							{
								builtin_stdin=wopen( in->param1.filename,
                                              in->param2.flags, OPEN_MASK );
								if( builtin_stdin == -1 )
								{
									debug( 1, 
										   FILE_ERROR,
										   in->param1.filename );
									wperror( L"open" );
								}
								else
								{
									close_stdin = 1;
								}
								
								break;
							}
	
							case IO_CLOSE:
							{
								/*
								  FIXME:

								  When
								  requesting
								  that
								  stdin
								  be
								  closed,
								  we
								  really
								  don't
								  do
								  anything. How
								  should
								  this
								  be
								  handled?
								 */
								builtin_stdin = -1;
								
								break;
							}
							
							default:
							{
								builtin_stdin=-1;
								debug( 1, 
									   _( L"Unknown input redirection type %d" ),
									   in->io_mode);
								break;
							}
						
						}
					}
				}
				else
				{
					builtin_stdin = pipe_read.param1.pipe_fd[0];
				}

				if( builtin_stdin == -1 )
				{
					exec_error=1;
					break;
				}
				else
				{
					int old_out = builtin_out_redirect;
					int old_err = builtin_err_redirect;

					/* 
					   Since this may be the foreground job, and since
					   a builtin may execute another foreground job,
					   we need to pretend to suspend this job while
					   running the builtin, in order to avoid a
					   situation where two jobs are running at once.

					   The reason this is done here, and not by the
					   relevant builtins, is that this way, the
					   builtin does not need to know what job it is
					   part of. It could probably figure that out by
					   walking the job list, but it seems more robust
					   to make exec handle things.
					*/
					
					builtin_push_io( builtin_stdin );
					
					builtin_out_redirect = has_fd( j->io, 1 );
					builtin_err_redirect = has_fd( j->io, 2 );		

					fg = job_get_flag( j, JOB_FOREGROUND );
					job_set_flag( j, JOB_FOREGROUND, 0 );
					
					signal_unblock();
					
					p->status = builtin_run( p->argv, j->io );
					
					builtin_out_redirect=old_out;
					builtin_err_redirect=old_err;
					
					signal_block();
					
					/*
					  Restore the fg flag, which is temporarily set to
					  false during builtin execution so as not to confuse
					  some job-handling builtins.
					*/
					job_set_flag( j, JOB_FOREGROUND, fg );
				}
				
				/*
				  If stdin has been redirected, close the redirection
				  stream.
				*/
				if( close_stdin )
				{
					exec_close( builtin_stdin );
				}				
				break;				
			}
		}
		
		if( exec_error )
		{
			break;
		}
		
		switch( p->type )
		{

			case INTERNAL_BLOCK:
			case INTERNAL_FUNCTION:
			{
				int status = proc_get_last_status();
						
				/*
				  Handle output from a block or function. This usually
				  means do nothing, but in the case of pipes, we have
				  to buffer such io, since otherwise the internal pipe
				  buffer might overflow.
				*/
				if( !io_buffer )
				{
					/*
					  No buffer, so we exit directly. This means we
					  have to manually set the exit status.
					*/
					if( p->next == 0 )
					{
						proc_set_last_status( job_get_flag( j, JOB_NEGATE )?(!status):status);
					}
					p->completed = 1;
					break;
				}

				j->io = io_remove( j->io, io_buffer );
				
				io_buffer_read( io_buffer );
				
				if( io_buffer->param2.out_buffer->used != 0 )
				{
					pid = exec_fork();

					if( pid == 0 )
					{
						
						/*
						  This is the child process. Write out the contents of the pipeline.
						*/
						p->pid = getpid();
						setup_child_process( j, p );

						exec_write_and_exit(io_buffer->fd, 
											io_buffer->param2.out_buffer->buff,
											io_buffer->param2.out_buffer->used,
											status);
					}
					else
					{
						/* 
						   This is the parent process. Store away
						   information on the child, and possibly give
						   it control over the terminal.
						*/
						p->pid = pid;						
						set_child_group( j, p, 0 );
												
					}					
					
				}
				else
				{
					if( p->next == 0 )
					{
						proc_set_last_status( job_get_flag( j, JOB_NEGATE )?(!status):status);
					}
					p->completed = 1;
				}
				
				io_buffer_destroy( io_buffer );
				
				io_buffer=0;
				break;
				
			}


			case INTERNAL_BUFFER:
			{
		
				pid = exec_fork();
				
				if( pid == 0 )
				{
					/*
					  This is the child process. Write out the
					  contents of the pipeline.
					*/
					p->pid = getpid();
					setup_child_process( j, p );
					
					exec_write_and_exit( 1,
										 input_redirect->param2.out_buffer->buff, 
										 input_redirect->param2.out_buffer->used,
										 0);
				}
				else
				{
					/* 
					   This is the parent process. Store away
					   information on the child, and possibly give
					   it control over the terminal.
					*/
					p->pid = pid;						
					set_child_group( j, p, 0 );	
				}	

				break;				
			}
			
			case INTERNAL_BUILTIN:
			{
				int skip_fork;
				
				/*
				  Handle output from builtin commands. In the general
				  case, this means forking of a worker process, that
				  will write out the contents of the stdout and stderr
				  buffers to the correct file descriptor. Since
				  forking is expensive, fish tries to avoid it wehn
				  possible.
				*/

				/*
				  If a builtin didn't produce any output, and it is
				  not inside a pipeline, there is no need to fork
				*/
				skip_fork =
					( !sb_out->used ) &&
					( !sb_err->used ) &&
					( !p->next );
	
				/*
				  If the output of a builtin is to be sent to an internal
				  buffer, there is no need to fork. This helps out the
				  performance quite a bit in complex completion code.
				*/

				io_data_t *io = io_get( j->io, 1 );
				int buffer_stdout = io && io->io_mode == IO_BUFFER;
				
				if( ( !sb_err->used ) && 
					( !p->next ) &&
					( sb_out->used ) && 
					( buffer_stdout ) )
				{
					char *res = wcs2str( (wchar_t *)sb_out->buff );
					b_append( io->param2.out_buffer, res, strlen( res ) );
					skip_fork = 1;
					free( res );
				}

				for( io = j->io; io; io=io->next )
				{
					if( io->io_mode == IO_FILE && wcscmp(io->param1.filename, L"/dev/null" ))
					{
						skip_fork = 0;
					}
				}
				
				if( skip_fork )
				{
					p->completed=1;
					if( p->next == 0 )
					{
						debug( 3, L"Set status of %ls to %d using short circut", j->command, p->status );
						
						int status = proc_format_status(p->status);
						proc_set_last_status( job_get_flag( j, JOB_NEGATE )?(!status):status );
					}
					break;
				}

				/*
				  Ok, unfortunatly, we have to do a real fork. Bummer.
				*/
								
				pid = exec_fork();
				if( pid == 0 )
				{

					/*
					  This is the child process. Setup redirections,
					  print correct output to stdout and stderr, and
					  then exit.
					*/
					p->pid = getpid();
					setup_child_process( j, p );
					do_builtin_io( sb_out->used ? (wchar_t *)sb_out->buff : 0, sb_err->used ? (wchar_t *)sb_err->buff : 0 );
					
					exit( p->status );
						
				}
				else
				{
					/* 
					   This is the parent process. Store away
					   information on the child, and possibly give
					   it control over the terminal.
					*/
					p->pid = pid;
						
					set_child_group( j, p, 0 );
										
				}					
				
				break;
			}
			
			case EXTERNAL:
			{
				pid = exec_fork();
				if( pid == 0 )
				{
					/*
					  This is the child process. 
					*/
					p->pid = getpid();
					setup_child_process( j, p );
					launch_process( p );
					
					/*
					  launch_process _never_ returns...
					*/
				}
				else
				{
					/* 
					   This is the parent process. Store away
					   information on the child, and possibly fice
					   it control over the terminal.
					*/
					p->pid = pid;

					set_child_group( j, p, 0 );
															
				}
				break;
			}
			
		}

		if( p->type == INTERNAL_BUILTIN )
			builtin_pop_io();
				
		/* 
		   Close the pipe the current process uses to read from the
		   previous process_t
		*/
		if( pipe_read.param1.pipe_fd[0] >= 0 )
			exec_close( pipe_read.param1.pipe_fd[0] );
		/* 
		   Set up the pipe the next process uses to read from the
		   current process_t
		*/
		if( p->next )
			pipe_read.param1.pipe_fd[0] = mypipe[0];
		
		/* 
		   If there is a next process in the pipeline, close the
		   output end of the current pipe (the surrent child
		   subprocess already has a copy of the pipe - this makes sure
		   we don't leak file descriptors either in the shell or in
		   the children).
		*/
		if( p->next )
		{
			exec_close(mypipe[1]);
		}		
	}

	/*
	  The keepalive process is no longer needed, so we terminate it
	  with extreme prejudice
	*/
	if( needs_keepalive )
	{
		kill( keepalive.pid, SIGKILL );
	}
	
	signal_unblock();	

	debug( 3, L"Job is constructed" );

	j->io = io_remove( j->io, &pipe_read );

	for( tmp = block_io; tmp; tmp=tmp->next )
		j->io = io_remove( j->io, tmp );
	
	job_set_flag( j, JOB_CONSTRUCTED, 1 );

	if( !job_get_flag( j, JOB_FOREGROUND ) )
	{
		proc_last_bg_pid = j->pgid;
	}

	if( !exec_error )
	{
		job_continue (j, 0);
	}
	
}
Example #5
0
/// Executes an external command.
/// \return true on success, false if there is an exec error.
static bool exec_external_command(job_t *j, process_t *p, const io_chain_t &proc_io_chain) {
    assert(p->type == EXTERNAL && "Process is not external");
    // Get argv and envv before we fork.
    null_terminated_array_t<char> argv_array;
    convert_wide_array_to_narrow(p->get_argv_array(), &argv_array);

    // Ensure that stdin is blocking before we hand it off (see issue #176). It's a
    // little strange that we only do this with stdin and not with stdout or stderr.
    // However in practice, setting or clearing O_NONBLOCK on stdin also sets it for the
    // other two fds, presumably because they refer to the same underlying file
    // (/dev/tty?).
    make_fd_blocking(STDIN_FILENO);

    const char *const *argv = argv_array.get();
    const char *const *envv = env_export_arr();

    std::string actual_cmd_str = wcs2string(p->actual_cmd);
    const char *actual_cmd = actual_cmd_str.c_str();
    const wchar_t *file = reader_current_filename();

#if FISH_USE_POSIX_SPAWN
    // Prefer to use posix_spawn, since it's faster on some systems like OS X.
    bool use_posix_spawn = g_use_posix_spawn && can_use_posix_spawn_for_job(j, p);
    if (use_posix_spawn) {
        g_fork_count++;  // spawn counts as a fork+exec
        // Create posix spawn attributes and actions.
        pid_t pid = 0;
        posix_spawnattr_t attr = posix_spawnattr_t();
        posix_spawn_file_actions_t actions = posix_spawn_file_actions_t();
        bool made_it = fork_actions_make_spawn_properties(&attr, &actions, j, p, proc_io_chain);
        if (made_it) {
            // We successfully made the attributes and actions; actually call
            // posix_spawn.
            int spawn_ret =
                posix_spawn(&pid, actual_cmd, &actions, &attr, const_cast<char *const *>(argv),
                            const_cast<char *const *>(envv));

            // This usleep can be used to test for various race conditions
            // (https://github.com/fish-shell/fish-shell/issues/360).
            // usleep(10000);

            if (spawn_ret != 0) {
                safe_report_exec_error(spawn_ret, actual_cmd, argv, envv);
                // Make sure our pid isn't set.
                pid = 0;
            }

            // Clean up our actions.
            posix_spawn_file_actions_destroy(&actions);
            posix_spawnattr_destroy(&attr);
        }

        // A 0 pid means we failed to posix_spawn. Since we have no pid, we'll never get
        // told when it's exited, so we have to mark the process as failed.
        debug(4, L"Fork #%d, pid %d: spawn external command '%s' from '%ls'", g_fork_count, pid,
              actual_cmd, file ? file : L"<no file>");
        if (pid == 0) {
            job_mark_process_as_failed(j, p);
            return false;
        }

        // these are all things do_fork() takes care of normally (for forked processes):
        p->pid = pid;
        on_process_created(j, p->pid);

        // We explicitly don't call set_child_group() for spawned processes because that
        // a) isn't necessary, and b) causes issues like fish-shell/fish-shell#4715

#if defined(__GLIBC__)
        // Unfortunately, using posix_spawn() is not the panacea it would appear to be,
        // glibc has a penchant for using fork() instead of vfork() when posix_spawn() is
        // called, meaning that atomicity is not guaranteed and we can get here before the
        // child group has been set. See discussion here:
        // https://github.com/Microsoft/WSL/issues/2997 And confirmation that this persists
        // past glibc 2.24+ here: https://github.com/fish-shell/fish-shell/issues/4715
        if (j->get_flag(job_flag_t::JOB_CONTROL) && getpgid(p->pid) != j->pgid) {
            set_child_group(j, p->pid);
        }
#else
        // In do_fork, the pid of the child process is used as the group leader if j->pgid
        // invalid, posix_spawn assigned the new group a pgid equal to its own id if
        // j->pgid was invalid, so this is what we do instead of calling set_child_group
        if (j->pgid == INVALID_PID) {
            j->pgid = pid;
        }
#endif

        maybe_assign_terminal(j);
    } else
#endif
    {
        if (!fork_child_for_process(j, p, proc_io_chain, false, "external command",
                                    [&] { safe_launch_process(p, actual_cmd, argv, envv); })) {
            return false;
        }
    }

    return true;
}