示例#1
0
文件: srv_samr.c 项目: hajuuk/R7000
static BOOL api_samr_set_userinfo(pipes_struct *p)
{
	SAMR_Q_SET_USERINFO q_u;
	SAMR_R_SET_USERINFO r_u;
	prs_struct *data = &p->in_data.data;
	prs_struct *rdata = &p->out_data.rdata;

	ZERO_STRUCT(q_u);
	ZERO_STRUCT(r_u);

	if (!samr_io_q_set_userinfo("", &q_u, data, 0)) {
		DEBUG(0,("api_samr_set_userinfo: Unable to unmarshall SAMR_Q_SET_USERINFO.\n"));
		/* Fix for W2K SP2 */
		/* what is that status-code ? - gd */
		if (q_u.switch_value == 0x1a) {
			setup_fault_pdu(p, NT_STATUS(0x1c000006));
			return True;
		}
		return False;
	}

	r_u.status = _samr_set_userinfo(p, &q_u, &r_u);

	if(!samr_io_r_set_userinfo("", &r_u, rdata, 0)) {
		DEBUG(0,("api_samr_set_userinfo: Unable to marshall SAMR_R_SET_USERINFO.\n"));
		return False;
	}

	return True;
}
示例#2
0
bool create_next_pdu(struct pipes_struct *p)
{
	size_t pdu_size = 0;
	NTSTATUS status;

	/*
	 * If we're in the fault state, keep returning fault PDU's until
	 * the pipe gets closed. JRA.
	 */
	if (p->fault_state) {
		setup_fault_pdu(p, NT_STATUS(p->fault_state));
		return true;
	}

	status = create_next_packet(p->mem_ctx, &p->auth,
				    p->call_id, &p->out_data.rdata,
				    p->out_data.data_sent_length,
				    &p->out_data.frag, &pdu_size);
	if (!NT_STATUS_IS_OK(status)) {
		DEBUG(0, ("Failed to create packet with error %s, "
			  "(auth level %u / type %u)\n",
			  nt_errstr(status),
			  (unsigned int)p->auth.auth_level,
			  (unsigned int)p->auth.auth_type));
		return false;
	}

	/* Setup the counts for this PDU. */
	p->out_data.data_sent_length += pdu_size;
	p->out_data.current_pdu_sent = 0;
	return true;
}
示例#3
0
static bool api_spoolss_addprinterdriver(pipes_struct *p)
{
	SPOOL_Q_ADDPRINTERDRIVER q_u;
	SPOOL_R_ADDPRINTERDRIVER r_u;
	prs_struct *data = &p->in_data.data;
	prs_struct *rdata = &p->out_data.rdata;
	
	ZERO_STRUCT(q_u);
	ZERO_STRUCT(r_u);
	
	if(!spoolss_io_q_addprinterdriver("", &q_u, data, 0)) {
		if (q_u.level != 3 && q_u.level != 6) {
			/* Clever hack from Martin Zielinski <*****@*****.**>
			 * to allow downgrade from level 8 (Vista).
			 */
			DEBUG(3,("api_spoolss_addprinterdriver: unknown SPOOL_Q_ADDPRINTERDRIVER level %u.\n",
				(unsigned int)q_u.level ));
			setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_INVALID_TAG));
			return True;
		}
		DEBUG(0,("spoolss_io_q_addprinterdriver: unable to unmarshall SPOOL_Q_ADDPRINTERDRIVER.\n"));
		return False;
	}
	
	r_u.status = _spoolss_addprinterdriver(p, &q_u, &r_u);
				
	if(!spoolss_io_r_addprinterdriver("", &r_u, rdata, 0)) {
		DEBUG(0,("spoolss_io_r_addprinterdriver: unable to marshall SPOOL_R_ADDPRINTERDRIVER.\n"));
		return False;
	}
	
	return True;
}
示例#4
0
static void process_complete_pdu(pipes_struct *p)
{
    prs_struct rpc_in;
    size_t data_len = p->in_data.pdu_received_len - RPC_HEADER_LEN;
    char *data_p = (char *)&p->in_data.current_in_pdu[RPC_HEADER_LEN];
    bool reply = False;

    if(p->fault_state) {
        DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
                  get_pipe_name_from_iface(&p->syntax)));
        set_incoming_fault(p);
        setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
        return;
    }

    prs_init_empty( &rpc_in, p->mem_ctx, UNMARSHALL);

    /*
     * Ensure we're using the corrent endianness for both the
     * RPC header flags and the raw data we will be reading from.
     */

    prs_set_endian_data( &rpc_in, p->endian);
    prs_set_endian_data( &p->in_data.data, p->endian);

    prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);

    DEBUG(10,("process_complete_pdu: processing packet type %u\n",
              (unsigned int)p->hdr.pkt_type ));

    switch (p->hdr.pkt_type) {
    case RPC_REQUEST:
        reply = process_request_pdu(p, &rpc_in);
        break;

    case RPC_PING: /* CL request - ignore... */
        DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
                 (unsigned int)p->hdr.pkt_type,
                 get_pipe_name_from_iface(&p->syntax)));
        break;

    case RPC_RESPONSE: /* No responses here. */
        DEBUG(0,("process_complete_pdu: Error. RPC_RESPONSE received from client on pipe %s.\n",
                 get_pipe_name_from_iface(&p->syntax)));
        break;

    case RPC_FAULT:
    case RPC_WORKING: /* CL request - reply to a ping when a call in process. */
    case RPC_NOCALL: /* CL - server reply to a ping call. */
    case RPC_REJECT:
    case RPC_ACK:
    case RPC_CL_CANCEL:
    case RPC_FACK:
    case RPC_CANCEL_ACK:
        DEBUG(0,("process_complete_pdu: Error. Connectionless packet type %u received on pipe %s.\n",
                 (unsigned int)p->hdr.pkt_type,
                 get_pipe_name_from_iface(&p->syntax)));
        break;

    case RPC_BIND:
        /*
         * We assume that a pipe bind is only in one pdu.
         */
        if(pipe_init_outgoing_data(p)) {
            reply = api_pipe_bind_req(p, &rpc_in);
        }
        break;

    case RPC_BINDACK:
    case RPC_BINDNACK:
        DEBUG(0,("process_complete_pdu: Error. RPC_BINDACK/RPC_BINDNACK packet type %u received on pipe %s.\n",
                 (unsigned int)p->hdr.pkt_type,
                 get_pipe_name_from_iface(&p->syntax)));
        break;


    case RPC_ALTCONT:
        /*
         * We assume that a pipe bind is only in one pdu.
         */
        if(pipe_init_outgoing_data(p)) {
            reply = api_pipe_alter_context(p, &rpc_in);
        }
        break;

    case RPC_ALTCONTRESP:
        DEBUG(0,("process_complete_pdu: Error. RPC_ALTCONTRESP on pipe %s: Should only be server -> client.\n",
                 get_pipe_name_from_iface(&p->syntax)));
        break;

    case RPC_AUTH3:
        /*
         * The third packet in an NTLMSSP auth exchange.
         */
        if(pipe_init_outgoing_data(p)) {
            reply = api_pipe_bind_auth3(p, &rpc_in);
        }
        break;

    case RPC_SHUTDOWN:
        DEBUG(0,("process_complete_pdu: Error. RPC_SHUTDOWN on pipe %s: Should only be server -> client.\n",
                 get_pipe_name_from_iface(&p->syntax)));
        break;

    case RPC_CO_CANCEL:
        /* For now just free all client data and continue processing. */
        DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
        /* As we never do asynchronous RPC serving, we can never cancel a
           call (as far as I know). If we ever did we'd have to send a cancel_ack
           reply. For now, just free all client data and continue processing. */
        reply = True;
        break;
#if 0
        /* Enable this if we're doing async rpc. */
        /* We must check the call-id matches the outstanding callid. */
        if(pipe_init_outgoing_data(p)) {
            /* Send a cancel_ack PDU reply. */
            /* We should probably check the auth-verifier here. */
            reply = setup_cancel_ack_reply(p, &rpc_in);
        }
        break;
#endif

    case RPC_ORPHANED:
        /* We should probably check the auth-verifier here.
           For now just free all client data and continue processing. */
        DEBUG(3,("process_complete_pdu: RPC_ORPHANED. Abandoning rpc call.\n"));
        reply = True;
        break;

    default:
        DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
        break;
    }

    /* Reset to little endian. Probably don't need this but it won't hurt. */
    prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);

    if (!reply) {
        DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on "
                 "pipe %s\n", get_pipe_name_from_iface(&p->syntax)));
        set_incoming_fault(p);
        setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
        prs_mem_free(&rpc_in);
    } else {
        /*
         * Reset the lengths. We're ready for a new pdu.
         */
        TALLOC_FREE(p->in_data.current_in_pdu);
        p->in_data.pdu_needed_len = 0;
        p->in_data.pdu_received_len = 0;
    }

    prs_mem_free(&rpc_in);
}
示例#5
0
文件: srv_pipe.c 项目: hajuuk/R7000
BOOL create_next_pdu(pipes_struct *p)
{
	RPC_HDR_RESP hdr_resp;
	BOOL auth_verify = ((p->ntlmssp_chal_flags & NTLMSSP_NEGOTIATE_SIGN) != 0);
	BOOL auth_seal   = ((p->ntlmssp_chal_flags & NTLMSSP_NEGOTIATE_SEAL) != 0);
	uint32 ss_padding_len = 0;
	uint32 data_len;
	uint32 data_space_available;
	uint32 data_len_left;
	prs_struct outgoing_pdu;
	uint32 data_pos;

	/*
	 * If we're in the fault state, keep returning fault PDU's until
	 * the pipe gets closed. JRA.
	 */

	if(p->fault_state) {
		setup_fault_pdu(p, NT_STATUS(0x1c010002));
		return True;
	}

	memset((char *)&hdr_resp, '\0', sizeof(hdr_resp));

	/* Change the incoming request header to a response. */
	p->hdr.pkt_type = RPC_RESPONSE;

	/* Set up rpc header flags. */
	if (p->out_data.data_sent_length == 0) {
		p->hdr.flags = RPC_FLG_FIRST;
	} else {
		p->hdr.flags = 0;
	}

	/*
	 * Work out how much we can fit in a single PDU.
	 */

	data_space_available = sizeof(p->out_data.current_pdu) - RPC_HEADER_LEN - RPC_HDR_RESP_LEN;
	if(p->ntlmssp_auth_validated) {
		data_space_available -= (RPC_HDR_AUTH_LEN + RPC_AUTH_NTLMSSP_CHK_LEN);
	} else if(p->netsec_auth_validated) {
		data_space_available -= (RPC_HDR_AUTH_LEN + RPC_AUTH_NETSEC_SIGN_OR_SEAL_CHK_LEN);
	}

	/*
	 * The amount we send is the minimum of the available
	 * space and the amount left to send.
	 */

	data_len_left = prs_offset(&p->out_data.rdata) - p->out_data.data_sent_length;

	/*
	 * Ensure there really is data left to send.
	 */

	if(!data_len_left) {
		DEBUG(0,("create_next_pdu: no data left to send !\n"));
		return False;
	}

	data_len = MIN(data_len_left, data_space_available);

	/*
	 * Set up the alloc hint. This should be the data left to
	 * send.
	 */

	hdr_resp.alloc_hint = data_len_left;

	/*
	 * Work out if this PDU will be the last.
	 */

	if(p->out_data.data_sent_length + data_len >= prs_offset(&p->out_data.rdata)) {
		p->hdr.flags |= RPC_FLG_LAST;
		if ((auth_seal || auth_verify) && (data_len_left % 8)) {
			ss_padding_len = 8 - (data_len_left % 8);
			DEBUG(10,("create_next_pdu: adding sign/seal padding of %u\n",
				ss_padding_len ));
		}
	}

	/*
	 * Set up the header lengths.
	 */

	if (p->ntlmssp_auth_validated) {
		p->hdr.frag_len = RPC_HEADER_LEN + RPC_HDR_RESP_LEN +
			data_len + ss_padding_len +
			RPC_HDR_AUTH_LEN + RPC_AUTH_NTLMSSP_CHK_LEN;
		p->hdr.auth_len = RPC_AUTH_NTLMSSP_CHK_LEN;
	} else if (p->netsec_auth_validated) {
		p->hdr.frag_len = RPC_HEADER_LEN + RPC_HDR_RESP_LEN +
			data_len + ss_padding_len +
			RPC_HDR_AUTH_LEN + RPC_AUTH_NETSEC_SIGN_OR_SEAL_CHK_LEN;
		p->hdr.auth_len = RPC_AUTH_NETSEC_SIGN_OR_SEAL_CHK_LEN;
	} else {
		p->hdr.frag_len = RPC_HEADER_LEN + RPC_HDR_RESP_LEN + data_len;
		p->hdr.auth_len = 0;
	}

	/*
	 * Init the parse struct to point at the outgoing
	 * data.
	 */

	prs_init( &outgoing_pdu, 0, p->mem_ctx, MARSHALL);
	prs_give_memory( &outgoing_pdu, (char *)p->out_data.current_pdu, sizeof(p->out_data.current_pdu), False);

	/* Store the header in the data stream. */
	if(!smb_io_rpc_hdr("hdr", &p->hdr, &outgoing_pdu, 0)) {
		DEBUG(0,("create_next_pdu: failed to marshall RPC_HDR.\n"));
		prs_mem_free(&outgoing_pdu);
		return False;
	}

	if(!smb_io_rpc_hdr_resp("resp", &hdr_resp, &outgoing_pdu, 0)) {
		DEBUG(0,("create_next_pdu: failed to marshall RPC_HDR_RESP.\n"));
		prs_mem_free(&outgoing_pdu);
		return False;
	}

	/* Store the current offset. */
	data_pos = prs_offset(&outgoing_pdu);

	/* Copy the data into the PDU. */

	if(!prs_append_some_prs_data(&outgoing_pdu, &p->out_data.rdata, p->out_data.data_sent_length, data_len)) {
		DEBUG(0,("create_next_pdu: failed to copy %u bytes of data.\n", (unsigned int)data_len));
		prs_mem_free(&outgoing_pdu);
		return False;
	}

	/* Copy the sign/seal padding data. */
	if (ss_padding_len) {
		char pad[8];
		memset(pad, '\0', 8);
		if (!prs_copy_data_in(&outgoing_pdu, pad, ss_padding_len)) {
			DEBUG(0,("create_next_pdu: failed to add %u bytes of pad data.\n", (unsigned int)ss_padding_len));
			prs_mem_free(&outgoing_pdu);
			return False;
		}
	}

	if (p->ntlmssp_auth_validated) {
		/*
		 * NTLMSSP processing. Mutually exclusive with Schannel.
		 */
		uint32 crc32 = 0;
		char *data;

		DEBUG(5,("create_next_pdu: sign: %s seal: %s data %d auth %d\n",
			 BOOLSTR(auth_verify), BOOLSTR(auth_seal), data_len + ss_padding_len, p->hdr.auth_len));

		/*
		 * Set data to point to where we copied the data into.
		 */

		data = prs_data_p(&outgoing_pdu) + data_pos;

		if (auth_seal) {
			crc32 = crc32_calc_buffer(data, data_len + ss_padding_len);
			NTLMSSPcalc_p(p, (uchar*)data, data_len + ss_padding_len);
		}

		if (auth_seal || auth_verify) {
			RPC_HDR_AUTH auth_info;

			init_rpc_hdr_auth(&auth_info, NTLMSSP_AUTH_TYPE,
					auth_seal ? RPC_PIPE_AUTH_SEAL_LEVEL : RPC_PIPE_AUTH_SIGN_LEVEL,
					(auth_verify ? ss_padding_len : 0), (auth_verify ? 1 : 0));
			if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, &outgoing_pdu, 0)) {
				DEBUG(0,("create_next_pdu: failed to marshall RPC_HDR_AUTH.\n"));
				prs_mem_free(&outgoing_pdu);
				return False;
			}
		}

		if (auth_verify) {
			RPC_AUTH_NTLMSSP_CHK ntlmssp_chk;
			char *auth_data = prs_data_p(&outgoing_pdu);

			p->ntlmssp_seq_num++;
			init_rpc_auth_ntlmssp_chk(&ntlmssp_chk, NTLMSSP_SIGN_VERSION,
					crc32, p->ntlmssp_seq_num++);
			auth_data = prs_data_p(&outgoing_pdu) + prs_offset(&outgoing_pdu) + 4;
			if(!smb_io_rpc_auth_ntlmssp_chk("auth_sign", &ntlmssp_chk, &outgoing_pdu, 0)) {
				DEBUG(0,("create_next_pdu: failed to marshall RPC_AUTH_NTLMSSP_CHK.\n"));
				prs_mem_free(&outgoing_pdu);
				return False;
			}
			NTLMSSPcalc_p(p, (uchar*)auth_data, RPC_AUTH_NTLMSSP_CHK_LEN - 4);
		}
	} else if (p->netsec_auth_validated) {
		/*
		 * Schannel processing. Mutually exclusive with NTLMSSP.
		 */
		int auth_type, auth_level;
		char *data;
		RPC_HDR_AUTH auth_info;

		RPC_AUTH_NETSEC_CHK verf;
		prs_struct rverf;
		prs_struct rauth;

		data = prs_data_p(&outgoing_pdu) + data_pos;
		/* Check it's the type of reply we were expecting to decode */

		get_auth_type_level(p->netsec_auth.auth_flags, &auth_type, &auth_level);
		init_rpc_hdr_auth(&auth_info, auth_type, auth_level, 
				  ss_padding_len, 1);

		if(!smb_io_rpc_hdr_auth("hdr_auth", &auth_info, &outgoing_pdu, 0)) {
			DEBUG(0,("create_next_pdu: failed to marshall RPC_HDR_AUTH.\n"));
			prs_mem_free(&outgoing_pdu);
			return False;
		}

		prs_init(&rverf, 0, p->mem_ctx, MARSHALL);
		prs_init(&rauth, 0, p->mem_ctx, MARSHALL);

		netsec_encode(&p->netsec_auth, 
			      p->netsec_auth.auth_flags,
			      SENDER_IS_ACCEPTOR,
			      &verf, data, data_len + ss_padding_len);

		smb_io_rpc_auth_netsec_chk("", RPC_AUTH_NETSEC_SIGN_OR_SEAL_CHK_LEN, 
			&verf, &outgoing_pdu, 0);

		p->netsec_auth.seq_num++;
	}

	/*
	 * Setup the counts for this PDU.
	 */

	p->out_data.data_sent_length += data_len;
	p->out_data.current_pdu_len = p->hdr.frag_len;
	p->out_data.current_pdu_sent = 0;

	prs_mem_free(&outgoing_pdu);
	return True;
}
示例#6
0
void process_complete_pdu(struct pipes_struct *p, struct ncacn_packet *pkt)
{
	bool reply = false;

	/* Store the call_id */
	p->call_id = pkt->call_id;

	DEBUG(10, ("Processing packet type %u\n", (unsigned int)pkt->ptype));

	if (!pipe_init_outgoing_data(p)) {
		goto done;
	}

	switch (pkt->ptype) {
	case DCERPC_PKT_REQUEST:
		reply = process_request_pdu(p, pkt);
		break;

	case DCERPC_PKT_PING: /* CL request - ignore... */
		DEBUG(0, ("Error - Connectionless packet type %u received\n",
			  (unsigned int)pkt->ptype));
		break;

	case DCERPC_PKT_RESPONSE: /* No responses here. */
		DEBUG(0, ("Error - DCERPC_PKT_RESPONSE received from client"));
		break;

	case DCERPC_PKT_FAULT:
	case DCERPC_PKT_WORKING:
		/* CL request - reply to a ping when a call in process. */
	case DCERPC_PKT_NOCALL:
		/* CL - server reply to a ping call. */
	case DCERPC_PKT_REJECT:
	case DCERPC_PKT_ACK:
	case DCERPC_PKT_CL_CANCEL:
	case DCERPC_PKT_FACK:
	case DCERPC_PKT_CANCEL_ACK:
		DEBUG(0, ("Error - Connectionless packet type %u received\n",
			  (unsigned int)pkt->ptype));
		break;

	case DCERPC_PKT_BIND:
		/*
		 * We assume that a pipe bind is only in one pdu.
		 */
		reply = api_pipe_bind_req(p, pkt);
		break;

	case DCERPC_PKT_BIND_ACK:
	case DCERPC_PKT_BIND_NAK:
		DEBUG(0, ("Error - DCERPC_PKT_BINDACK/DCERPC_PKT_BINDNACK "
			  "packet type %u received.\n",
			  (unsigned int)pkt->ptype));
		break;


	case DCERPC_PKT_ALTER:
		/*
		 * We assume that a pipe bind is only in one pdu.
		 */
		reply = api_pipe_alter_context(p, pkt);
		break;

	case DCERPC_PKT_ALTER_RESP:
		DEBUG(0, ("Error - DCERPC_PKT_ALTER_RESP received: "
			  "Should only be server -> client.\n"));
		break;

	case DCERPC_PKT_AUTH3:
		/*
		 * The third packet in an auth exchange.
		 */
		reply = api_pipe_bind_auth3(p, pkt);
		break;

	case DCERPC_PKT_SHUTDOWN:
		DEBUG(0, ("Error - DCERPC_PKT_SHUTDOWN received: "
			  "Should only be server -> client.\n"));
		break;

	case DCERPC_PKT_CO_CANCEL:
		/* For now just free all client data and continue
		 * processing. */
		DEBUG(3,("process_complete_pdu: DCERPC_PKT_CO_CANCEL."
			 " Abandoning rpc call.\n"));
		/* As we never do asynchronous RPC serving, we can
		 * never cancel a call (as far as I know).
		 * If we ever did we'd have to send a cancel_ack reply.
		 * For now, just free all client data and continue
		 * processing. */
		reply = True;
		break;

#if 0
		/* Enable this if we're doing async rpc. */
		/* We must check the outstanding callid matches. */
		if (pipe_init_outgoing_data(p)) {
			/* Send a cancel_ack PDU reply. */
			/* We should probably check the auth-verifier here. */
			reply = setup_cancel_ack_reply(p, pkt);
		}
		break;
#endif

	case DCERPC_PKT_ORPHANED:
		/* We should probably check the auth-verifier here.
		 * For now just free all client data and continue
		 * processing. */
		DEBUG(3, ("process_complete_pdu: DCERPC_PKT_ORPHANED."
			  " Abandoning rpc call.\n"));
		reply = True;
		break;

	default:
		DEBUG(0, ("process_complete_pdu: "
			  "Unknown rpc type = %u received.\n",
			  (unsigned int)pkt->ptype));
		break;
	}

done:
	if (!reply) {
		DEBUG(3,("DCE/RPC fault sent!"));
		set_incoming_fault(p);
		setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
	}
	/* pkt and p->in_data.pdu.data freed by caller */
}
示例#7
0
static bool api_rpcTNP(struct pipes_struct *p, struct ncacn_packet *pkt,
		       const struct api_struct *api_rpc_cmds, int n_cmds,
		       const struct ndr_syntax_id *syntax)
{
	int fn_num;
	uint32_t offset1;
	const struct ndr_interface_table *table;

	/* interpret the command */
	DEBUG(4,("api_rpcTNP: %s op 0x%x - ",
		 ndr_interface_name(&syntax->uuid, syntax->if_version),
		 pkt->u.request.opnum));

	table = ndr_table_by_uuid(&syntax->uuid);
	if (table == NULL) {
		DEBUG(0,("unknown interface\n"));
		return false;
	}

	if (DEBUGLEVEL >= 50) {
		fstring name;
		slprintf(name, sizeof(name)-1, "in_%s",
			 dcerpc_default_transport_endpoint(pkt, NCACN_NP, table));
		dump_pdu_region(name, pkt->u.request.opnum,
				&p->in_data.data, 0,
				p->in_data.data.length);
	}

	for (fn_num = 0; fn_num < n_cmds; fn_num++) {
		if (api_rpc_cmds[fn_num].opnum == pkt->u.request.opnum &&
		    api_rpc_cmds[fn_num].fn != NULL) {
			DEBUG(3, ("api_rpcTNP: rpc command: %s\n",
				  api_rpc_cmds[fn_num].name));
			break;
		}
	}

	if (fn_num == n_cmds) {
		/*
		 * For an unknown RPC just return a fault PDU but
		 * return True to allow RPC's on the pipe to continue
		 * and not put the pipe into fault state. JRA.
		 */
		DEBUG(4, ("unknown\n"));
		setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_OP_RNG_ERROR));
		return True;
	}

	offset1 = p->out_data.rdata.length;

        DEBUG(6, ("api_rpc_cmds[%d].fn == %p\n", 
                fn_num, api_rpc_cmds[fn_num].fn));
	/* do the actual command */
	if(!api_rpc_cmds[fn_num].fn(p)) {
		DEBUG(0,("api_rpcTNP: %s: %s failed.\n",
			 ndr_interface_name(&syntax->uuid, syntax->if_version),
			 api_rpc_cmds[fn_num].name));
		data_blob_free(&p->out_data.rdata);
		return False;
	}

	if (p->fault_state) {
		DEBUG(4,("api_rpcTNP: fault(%d) return.\n", p->fault_state));
		setup_fault_pdu(p, NT_STATUS(p->fault_state));
		p->fault_state = 0;
		return true;
	}

	if (DEBUGLEVEL >= 50) {
		fstring name;
		slprintf(name, sizeof(name)-1, "out_%s",
			 dcerpc_default_transport_endpoint(pkt, NCACN_NP, table));
		dump_pdu_region(name, pkt->u.request.opnum,
				&p->out_data.rdata, offset1,
				p->out_data.rdata.length);
	}

	DEBUG(5,("api_rpcTNP: called %s successfully\n",
		 ndr_interface_name(&syntax->uuid, syntax->if_version)));

	/* Check for buffer underflow in rpc parsing */
	if ((DEBUGLEVEL >= 10) &&
	    (pkt->frag_length < p->in_data.data.length)) {
		DEBUG(10, ("api_rpcTNP: rpc input buffer underflow (parse error?)\n"));
		dump_data(10, p->in_data.data.data + pkt->frag_length,
			      p->in_data.data.length - pkt->frag_length);
	}

	return True;
}
示例#8
0
static bool srv_pipe_check_verification_trailer(struct pipes_struct *p,
						struct ncacn_packet *pkt,
						struct pipe_rpc_fns *pipe_fns)
{
	TALLOC_CTX *frame = talloc_stackframe();
	struct dcerpc_sec_verification_trailer *vt = NULL;
	const uint32_t bitmask1 =
		p->auth.client_hdr_signing ? DCERPC_SEC_VT_CLIENT_SUPPORTS_HEADER_SIGNING : 0;
	const struct dcerpc_sec_vt_pcontext pcontext = {
		.abstract_syntax = pipe_fns->syntax,
		.transfer_syntax = ndr_transfer_syntax_ndr,
	};
	const struct dcerpc_sec_vt_header2 header2 =
	       dcerpc_sec_vt_header2_from_ncacn_packet(pkt);
	struct ndr_pull *ndr;
	enum ndr_err_code ndr_err;
	bool ret = false;

	ndr = ndr_pull_init_blob(&p->in_data.data, frame);
	if (ndr == NULL) {
		goto done;
	}

	ndr_err = ndr_pop_dcerpc_sec_verification_trailer(ndr, frame, &vt);
	if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
		goto done;
	}

	ret = dcerpc_sec_verification_trailer_check(vt, &bitmask1,
						    &pcontext, &header2);
done:
	TALLOC_FREE(frame);
	return ret;
}

/****************************************************************************
 Find the correct RPC function to call for this request.
 If the pipe is authenticated then become the correct UNIX user
 before doing the call.
****************************************************************************/

static bool api_pipe_request(struct pipes_struct *p,
				struct ncacn_packet *pkt)
{
	TALLOC_CTX *frame = talloc_stackframe();
	bool ret = False;
	struct pipe_rpc_fns *pipe_fns;

	if (!p->pipe_bound) {
		DEBUG(1, ("Pipe not bound!\n"));
		data_blob_free(&p->out_data.rdata);
		TALLOC_FREE(frame);
		return false;
	}

	/* get the set of RPC functions for this context */
	pipe_fns = find_pipe_fns_by_context(p->contexts,
					    pkt->u.request.context_id);
	if (pipe_fns == NULL) {
		DEBUG(0, ("No rpc function table associated with context "
			  "[%d]\n",
			  pkt->u.request.context_id));
		data_blob_free(&p->out_data.rdata);
		TALLOC_FREE(frame);
		return false;
	}

	if (!srv_pipe_check_verification_trailer(p, pkt, pipe_fns)) {
		DEBUG(1, ("srv_pipe_check_verification_trailer: failed\n"));
		setup_fault_pdu(p, NT_STATUS(DCERPC_FAULT_ACCESS_DENIED));
		data_blob_free(&p->out_data.rdata);
		TALLOC_FREE(frame);
		return true;
	}

	if (!become_authenticated_pipe_user(p->session_info)) {
		DEBUG(1, ("Failed to become pipe user!\n"));
		data_blob_free(&p->out_data.rdata);
		TALLOC_FREE(frame);
		return false;
	}

	DEBUG(5, ("Requested %s rpc service\n",
		  ndr_interface_name(&pipe_fns->syntax.uuid,
				     pipe_fns->syntax.if_version)));

	ret = api_rpcTNP(p, pkt, pipe_fns->cmds, pipe_fns->n_cmds,
			 &pipe_fns->syntax);
	unbecome_authenticated_pipe_user();

	TALLOC_FREE(frame);
	return ret;
}
示例#9
0
static ssize_t process_complete_pdu(pipes_struct *p)
{
	prs_struct rpc_in;
	size_t data_len = p->in_data.pdu_received_len;
	char *data_p = (char *)&p->in_data.current_in_pdu[0];
	BOOL reply = False;

	if(p->fault_state) {
		DEBUG(10,("process_complete_pdu: pipe %s in fault state.\n",
			p->name ));
		set_incoming_fault(p);
		setup_fault_pdu(p, NT_STATUS(0x1c010002));
		return (ssize_t)data_len;
	}

	prs_init( &rpc_in, 0, p->mem_ctx, UNMARSHALL);

	/*
	 * Ensure we're using the corrent endianness for both the 
	 * RPC header flags and the raw data we will be reading from.
	 */

	prs_set_endian_data( &rpc_in, p->endian);
	prs_set_endian_data( &p->in_data.data, p->endian);

	prs_give_memory( &rpc_in, data_p, (uint32)data_len, False);

	DEBUG(10,("process_complete_pdu: processing packet type %u\n",
			(unsigned int)p->hdr.pkt_type ));

	switch (p->hdr.pkt_type) {
		case RPC_BIND:
		case RPC_ALTCONT:
			/*
			 * We assume that a pipe bind is only in one pdu.
			 */
			if(pipe_init_outgoing_data(p))
				reply = api_pipe_bind_req(p, &rpc_in);
			break;
		case RPC_BINDRESP:
			/*
			 * We assume that a pipe bind_resp is only in one pdu.
			 */
			if(pipe_init_outgoing_data(p))
				reply = api_pipe_bind_auth_resp(p, &rpc_in);
			break;
		case RPC_REQUEST:
			reply = process_request_pdu(p, &rpc_in);
			break;
		default:
			DEBUG(0,("process_complete_pdu: Unknown rpc type = %u received.\n", (unsigned int)p->hdr.pkt_type ));
			break;
	}

	/* Reset to little endian. Probably don't need this but it won't hurt. */
	prs_set_endian_data( &p->in_data.data, RPC_LITTLE_ENDIAN);

	if (!reply) {
		DEBUG(3,("process_complete_pdu: DCE/RPC fault sent on pipe %s\n", p->pipe_srv_name));
		set_incoming_fault(p);
		setup_fault_pdu(p, NT_STATUS(0x1c010002));
		prs_mem_free(&rpc_in);
	} else {
		/*
		 * Reset the lengths. We're ready for a new pdu.
		 */
		p->in_data.pdu_needed_len = 0;
		p->in_data.pdu_received_len = 0;
	}

	prs_mem_free(&rpc_in);
	return (ssize_t)data_len;
}