static bool test_handles_random_assoc(struct torture_context *torture) { NTSTATUS status; struct dcerpc_pipe *p1, *p2, *p3; TALLOC_CTX *mem_ctx = talloc_new(torture); enum dcerpc_transport_t transport; uint32_t assoc_group_id; torture_comment(torture, "RPC-HANDLE-RANDOM-ASSOC\n"); torture_comment(torture, "connect samr pipe1\n"); status = torture_rpc_connection(torture, &p1, &ndr_table_samr); torture_assert_ntstatus_ok(torture, status, "opening samr pipe1"); torture_comment(torture, "pipe1 uses assoc_group_id[0x%08X]\n", dcerpc_binding_get_assoc_group_id(p1->binding)); transport = p1->conn->transport.transport; /* * We use ~p1->assoc_group_id instead of p1->assoc_group_id, because * this way we are less likely to use an id which is already in use. */ assoc_group_id = dcerpc_binding_get_assoc_group_id(p1->binding); assoc_group_id = ~assoc_group_id; torture_comment(torture, "connect samr pipe2 with assoc_group_id[0x%08X]- should fail\n", ++assoc_group_id); status = torture_rpc_connection_transport(torture, &p2, &ndr_table_samr, transport, assoc_group_id); torture_assert_ntstatus_equal(torture, status, NT_STATUS_UNSUCCESSFUL, "opening samr pipe2"); torture_comment(torture, "connect samr pipe3 with assoc_group_id[0x%08X]- should fail\n", ++assoc_group_id); status = torture_rpc_connection_transport(torture, &p3, &ndr_table_samr, transport, assoc_group_id); torture_assert_ntstatus_equal(torture, status, NT_STATUS_UNSUCCESSFUL, "opening samr pipe3"); talloc_free(mem_ctx); return true; }
static bool test_handles_mixed_shared(struct torture_context *torture) { NTSTATUS status; struct dcerpc_pipe *p1, *p2, *p3, *p4, *p5, *p6; struct dcerpc_binding_handle *b1, *b2; struct policy_handle handle; struct policy_handle handle2; struct samr_Connect r; struct lsa_Close lc; struct samr_Close sc; TALLOC_CTX *mem_ctx = talloc_new(torture); enum dcerpc_transport_t transport; uint32_t assoc_group_id; torture_comment(torture, "RPC-HANDLE-MIXED-SHARED\n"); torture_comment(torture, "connect samr pipe1\n"); status = torture_rpc_connection(torture, &p1, &ndr_table_samr); torture_assert_ntstatus_ok(torture, status, "opening samr pipe1"); b1 = p1->binding_handle; transport = p1->conn->transport.transport; assoc_group_id = dcerpc_binding_get_assoc_group_id(p1->binding); torture_comment(torture, "use assoc_group_id[0x%08X] for new connections\n", assoc_group_id); torture_comment(torture, "connect lsa pipe2\n"); status = torture_rpc_connection_transport(torture, &p2, &ndr_table_lsarpc, transport, assoc_group_id); torture_assert_ntstatus_ok(torture, status, "opening lsa pipe2"); b2 = p2->binding_handle; torture_comment(torture, "got assoc_group_id[0x%08X] for p2\n", dcerpc_binding_get_assoc_group_id(p2->binding)); r.in.system_name = 0; r.in.access_mask = SEC_FLAG_MAXIMUM_ALLOWED; r.out.connect_handle = &handle; torture_comment(torture, "samr_Connect to open a policy handle on samr p1\n"); torture_assert_ntstatus_ok(torture, dcerpc_samr_Connect_r(b1, mem_ctx, &r), "Connect failed"); torture_assert_ntstatus_ok(torture, r.out.result, "opening policy handle on p1"); lc.in.handle = &handle; lc.out.handle = &handle2; sc.in.handle = &handle; sc.out.handle = &handle2; torture_comment(torture, "use policy handle on lsa p2 - should fail\n"); status = dcerpc_lsa_Close_r(b2, mem_ctx, &lc); torture_assert_ntstatus_equal(torture, status, NT_STATUS_RPC_SS_CONTEXT_MISMATCH, "closing handle on lsa p2"); torture_comment(torture, "closing policy handle on samr p1\n"); torture_assert_ntstatus_ok(torture, dcerpc_samr_Close_r(b1, mem_ctx, &sc), "Close failed"); torture_assert_ntstatus_ok(torture, sc.out.result, "closing policy handle on p1"); talloc_free(p1); talloc_free(p2); smb_msleep(10); torture_comment(torture, "connect samr pipe3 - should fail\n"); status = torture_rpc_connection_transport(torture, &p3, &ndr_table_samr, transport, assoc_group_id); torture_assert_ntstatus_equal(torture, status, NT_STATUS_UNSUCCESSFUL, "opening samr pipe3"); torture_comment(torture, "connect lsa pipe4 - should fail\n"); status = torture_rpc_connection_transport(torture, &p4, &ndr_table_lsarpc, transport, assoc_group_id); torture_assert_ntstatus_equal(torture, status, NT_STATUS_UNSUCCESSFUL, "opening lsa pipe4"); /* * We use ~assoc_group_id instead of p1->assoc_group_id, because * this way we are less likely to use an id which is already in use. */ assoc_group_id = ~assoc_group_id; torture_comment(torture, "connect samr pipe5 with assoc_group_id[0x%08X]- should fail\n", ++assoc_group_id); status = torture_rpc_connection_transport(torture, &p5, &ndr_table_samr, transport, assoc_group_id); torture_assert_ntstatus_equal(torture, status, NT_STATUS_UNSUCCESSFUL, "opening samr pipe5"); torture_comment(torture, "connect lsa pipe6 with assoc_group_id[0x%08X]- should fail\n", ++assoc_group_id); status = torture_rpc_connection_transport(torture, &p6, &ndr_table_lsarpc, transport, assoc_group_id); torture_assert_ntstatus_equal(torture, status, NT_STATUS_UNSUCCESSFUL, "opening lsa pipe6"); talloc_free(mem_ctx); return true; }
/* test the TestSleep interface */ static bool test_sleep(struct torture_context *tctx, struct dcerpc_pipe *p) { int i; #define ASYNC_COUNT 3 struct tevent_req *req[ASYNC_COUNT]; struct echo_TestSleep r[ASYNC_COUNT]; bool done1[ASYNC_COUNT]; bool done2[ASYNC_COUNT]; struct timeval snd[ASYNC_COUNT]; struct timeval rcv[ASYNC_COUNT]; struct timeval diff[ASYNC_COUNT]; int total_done = 0; struct dcerpc_binding_handle *b = p->binding_handle; enum dcerpc_transport_t transport; uint32_t assoc_group_id; struct dcerpc_pipe *p2 = NULL; NTSTATUS status; if (torture_setting_bool(tctx, "quick", false)) { torture_skip(tctx, "TestSleep disabled - use \"torture:quick=no\" to enable\n"); } torture_comment(tctx, "Testing TestSleep - use \"torture:quick=yes\" to disable\n"); transport = dcerpc_binding_get_transport(p->binding); assoc_group_id = dcerpc_binding_get_assoc_group_id(p->binding); torture_comment(tctx, "connect echo connection 2 with " "DCERPC_CONCURRENT_MULTIPLEX\n"); status = torture_rpc_connection_transport(tctx, &p2, &ndr_table_rpcecho, transport, assoc_group_id, DCERPC_CONCURRENT_MULTIPLEX); torture_assert_ntstatus_ok(tctx, status, "opening echo connection 2"); b = p2->binding_handle; for (i=0;i<ASYNC_COUNT;i++) { done1[i] = false; done2[i] = false; snd[i] = timeval_current(); rcv[i] = timeval_zero(); r[i].in.seconds = ASYNC_COUNT-i; req[i] = dcerpc_echo_TestSleep_r_send(tctx, tctx->ev, b, &r[i]); torture_assert(tctx, req[i], "Failed to send async sleep request\n"); tevent_req_set_callback(req[i], test_sleep_done, &done1[i]); } while (total_done < ASYNC_COUNT) { torture_assert(tctx, tevent_loop_once(tctx->ev) == 0, "Event context loop failed"); for (i=0;i<ASYNC_COUNT;i++) { if (done2[i] == false && done1[i] == true) { int rounded_tdiff; total_done++; done2[i] = true; rcv[i] = timeval_current(); diff[i] = timeval_until(&snd[i], &rcv[i]); rounded_tdiff = (int)(0.5 + diff[i].tv_sec + (1.0e-6*diff[i].tv_usec)); torture_comment(tctx, "rounded_tdiff=%d\n", rounded_tdiff); torture_assert_ntstatus_ok(tctx, dcerpc_echo_TestSleep_r_recv(req[i], tctx), talloc_asprintf(tctx, "TestSleep(%d) failed", i)); torture_assert(tctx, r[i].out.result == r[i].in.seconds, talloc_asprintf(tctx, "Failed - Asked to sleep for %u seconds (server replied with %u seconds and the reply takes only %u seconds)", r[i].out.result, r[i].in.seconds, (unsigned int)diff[i].tv_sec)); torture_assert(tctx, r[i].out.result <= rounded_tdiff, talloc_asprintf(tctx, "Failed - Slept for %u seconds (but reply takes only %u.%06u seconds)", r[i].out.result, (unsigned int)diff[i].tv_sec, (unsigned int)diff[i].tv_usec)); if (r[i].out.result+1 == rounded_tdiff) { torture_comment(tctx, "Slept for %u seconds (but reply takes %u.%06u seconds - busy server?)\n", r[i].out.result, (unsigned int)diff[i].tv_sec, (unsigned int)diff[i].tv_usec); } else if (r[i].out.result == rounded_tdiff) { torture_comment(tctx, "Slept for %u seconds (reply takes %u.%06u seconds - ok)\n", r[i].out.result, (unsigned int)diff[i].tv_sec, (unsigned int)diff[i].tv_usec); } else { torture_fail(tctx, talloc_asprintf(tctx, "(Failed) - Not async - Slept for %u seconds (but reply takes %u.%06u seconds)\n", r[i].out.result, (unsigned int)diff[i].tv_sec, (unsigned int)diff[i].tv_usec)); } } } } torture_comment(tctx, "\n"); return true; }
static bool test_handles_lsa_shared(struct torture_context *torture) { NTSTATUS status; struct dcerpc_pipe *p1, *p2, *p3, *p4, *p5; struct dcerpc_binding_handle *b1, *b2, *b3, *b4; struct policy_handle handle; struct policy_handle handle2; struct lsa_ObjectAttribute attr; struct lsa_QosInfo qos; struct lsa_OpenPolicy r; struct lsa_Close c; struct lsa_QuerySecurity qsec; struct sec_desc_buf *sdbuf = NULL; uint16_t system_name = '\\'; TALLOC_CTX *mem_ctx = talloc_new(torture); enum dcerpc_transport_t transport; uint32_t assoc_group_id; torture_comment(torture, "RPC-HANDLE-LSARPC-SHARED\n"); torture_comment(torture, "connect lsa pipe1\n"); status = torture_rpc_connection(torture, &p1, &ndr_table_lsarpc); torture_assert_ntstatus_ok(torture, status, "opening lsa pipe1"); b1 = p1->binding_handle; transport = p1->conn->transport.transport; assoc_group_id = dcerpc_binding_get_assoc_group_id(p1->binding); torture_comment(torture, "use assoc_group_id[0x%08X] for new connections\n", assoc_group_id); torture_comment(torture, "connect lsa pipe2\n"); status = torture_rpc_connection_transport(torture, &p2, &ndr_table_lsarpc, transport, assoc_group_id); torture_assert_ntstatus_ok(torture, status, "opening lsa pipe2"); b2 = p2->binding_handle; torture_comment(torture, "got assoc_group_id[0x%08X] for p2\n", dcerpc_binding_get_assoc_group_id(p2->binding)); qos.len = 0; qos.impersonation_level = 2; qos.context_mode = 1; qos.effective_only = 0; attr.len = 0; attr.root_dir = NULL; attr.object_name = NULL; attr.attributes = 0; attr.sec_desc = NULL; attr.sec_qos = &qos; r.in.system_name = &system_name; r.in.attr = &attr; r.in.access_mask = SEC_FLAG_MAXIMUM_ALLOWED; r.out.handle = &handle; torture_comment(torture, "open lsa policy handle\n"); torture_assert_ntstatus_ok(torture, dcerpc_lsa_OpenPolicy_r(b1, mem_ctx, &r), "OpenPolicy failed"); if (!NT_STATUS_IS_OK(r.out.result)) { torture_comment(torture, "lsa_OpenPolicy not supported - skipping\n"); talloc_free(mem_ctx); return true; } /* * connect p3 after the policy handle is opened */ torture_comment(torture, "connect lsa pipe3 after the policy handle is opened\n"); status = torture_rpc_connection_transport(torture, &p3, &ndr_table_lsarpc, transport, assoc_group_id); torture_assert_ntstatus_ok(torture, status, "opening lsa pipe3"); b3 = p3->binding_handle; qsec.in.handle = &handle; qsec.in.sec_info = 0; qsec.out.sdbuf = &sdbuf; c.in.handle = &handle; c.out.handle = &handle2; /* * use policy handle on all 3 connections */ torture_comment(torture, "use the policy handle on p1,p2,p3\n"); torture_assert_ntstatus_ok(torture, dcerpc_lsa_QuerySecurity_r(b1, mem_ctx, &qsec), "QuerySecurity failed"); torture_assert_ntstatus_equal(torture, qsec.out.result, NT_STATUS_OK, "use policy handle on p1"); torture_assert_ntstatus_ok(torture, dcerpc_lsa_QuerySecurity_r(b2, mem_ctx, &qsec), "QuerySecurity failed"); torture_assert_ntstatus_equal(torture, qsec.out.result, NT_STATUS_OK, "use policy handle on p2"); torture_assert_ntstatus_ok(torture, dcerpc_lsa_QuerySecurity_r(b3, mem_ctx, &qsec), "QuerySecurity failed"); torture_assert_ntstatus_equal(torture, qsec.out.result, NT_STATUS_OK, "use policy handle on p3"); /* * close policy handle on connection 2 and the others get a fault */ torture_comment(torture, "close the policy handle on p2 others get a fault\n"); torture_assert_ntstatus_ok(torture, dcerpc_lsa_Close_r(b2, mem_ctx, &c), "Close failed"); torture_assert_ntstatus_equal(torture, c.out.result, NT_STATUS_OK, "closing policy handle on p2"); status = dcerpc_lsa_Close_r(b1, mem_ctx, &c); torture_assert_ntstatus_equal(torture, status, NT_STATUS_RPC_SS_CONTEXT_MISMATCH, "closing policy handle on p1 again"); status = dcerpc_lsa_Close_r(b3, mem_ctx, &c); torture_assert_ntstatus_equal(torture, status, NT_STATUS_RPC_SS_CONTEXT_MISMATCH, "closing policy handle on p3"); status = dcerpc_lsa_Close_r(b2, mem_ctx, &c); torture_assert_ntstatus_equal(torture, status, NT_STATUS_RPC_SS_CONTEXT_MISMATCH, "closing policy handle on p2 again"); /* * open a new policy handle on p3 */ torture_comment(torture, "open a new policy handle on p3\n"); torture_assert_ntstatus_ok(torture, dcerpc_lsa_OpenPolicy_r(b3, mem_ctx, &r), "OpenPolicy failed"); torture_assert_ntstatus_equal(torture, r.out.result, NT_STATUS_OK, "open policy handle on p3"); /* * use policy handle on all 3 connections */ torture_comment(torture, "use the policy handle on p1,p2,p3\n"); torture_assert_ntstatus_ok(torture, dcerpc_lsa_QuerySecurity_r(b1, mem_ctx, &qsec), "Query Security failed"); torture_assert_ntstatus_equal(torture, status, NT_STATUS_OK, "use policy handle on p1"); torture_assert_ntstatus_ok(torture, dcerpc_lsa_QuerySecurity_r(b2, mem_ctx, &qsec), "Query Security failed"); torture_assert_ntstatus_equal(torture, status, NT_STATUS_OK, "use policy handle on p2"); torture_assert_ntstatus_ok(torture, dcerpc_lsa_QuerySecurity_r(b3, mem_ctx, &qsec), "Query Security failed"); torture_assert_ntstatus_equal(torture, status, NT_STATUS_OK, "use policy handle on p3"); /* * close policy handle on connection 2 and the others get a fault */ torture_comment(torture, "close the policy handle on p2 others get a fault\n"); torture_assert_ntstatus_ok(torture, dcerpc_lsa_Close_r(b2, mem_ctx, &c), "Close failed"); torture_assert_ntstatus_equal(torture, c.out.result, NT_STATUS_OK, "closing policy handle on p2"); status = dcerpc_lsa_Close_r(b1, mem_ctx, &c); torture_assert_ntstatus_equal(torture, status, NT_STATUS_RPC_SS_CONTEXT_MISMATCH, "closing policy handle on p1 again"); status = dcerpc_lsa_Close_r(b3, mem_ctx, &c); torture_assert_ntstatus_equal(torture, status, NT_STATUS_RPC_SS_CONTEXT_MISMATCH, "closing policy handle on p3"); status = dcerpc_lsa_Close_r(b2, mem_ctx, &c); torture_assert_ntstatus_equal(torture, status, NT_STATUS_RPC_SS_CONTEXT_MISMATCH, "closing policy handle on p2 again"); /* * open a new policy handle */ torture_comment(torture, "open a new policy handle on p1 and use it\n"); torture_assert_ntstatus_ok(torture, dcerpc_lsa_OpenPolicy_r(b1, mem_ctx, &r), "OpenPolicy failed"); torture_assert_ntstatus_equal(torture, r.out.result, NT_STATUS_OK, "open 2nd policy handle on p1"); torture_assert_ntstatus_ok(torture, dcerpc_lsa_QuerySecurity_r(b1, mem_ctx, &qsec), "QuerySecurity failed"); torture_assert_ntstatus_equal(torture, qsec.out.result, NT_STATUS_OK, "QuerySecurity handle on p1"); /* close first connection */ torture_comment(torture, "disconnect p1\n"); talloc_free(p1); smb_msleep(5); /* * and it's still available on p2,p3 */ torture_comment(torture, "use policy handle on p2,p3\n"); torture_assert_ntstatus_ok(torture, dcerpc_lsa_QuerySecurity_r(b2, mem_ctx, &qsec), "QuerySecurity failed"); torture_assert_ntstatus_equal(torture, qsec.out.result, NT_STATUS_OK, "QuerySecurity handle on p2 after p1 was disconnected"); torture_assert_ntstatus_ok(torture, dcerpc_lsa_QuerySecurity_r(b3, mem_ctx, &qsec), "QuerySecurity failed"); torture_assert_ntstatus_equal(torture, qsec.out.result, NT_STATUS_OK, "QuerySecurity handle on p3 after p1 was disconnected"); /* * now open p4 * and use the handle on it */ torture_comment(torture, "connect lsa pipe4 and use policy handle\n"); status = torture_rpc_connection_transport(torture, &p4, &ndr_table_lsarpc, transport, assoc_group_id); torture_assert_ntstatus_ok(torture, status, "opening lsa pipe4"); b4 = p4->binding_handle; torture_assert_ntstatus_ok(torture, dcerpc_lsa_QuerySecurity_r(b4, mem_ctx, &qsec), "QuerySecurity failed"); torture_assert_ntstatus_equal(torture, qsec.out.result, NT_STATUS_OK, "using policy handle on p4"); /* * now close p2,p3,p4 * without closing the policy handle */ torture_comment(torture, "disconnect p2,p3,p4\n"); talloc_free(p2); talloc_free(p3); talloc_free(p4); smb_msleep(10); /* * now open p5 */ torture_comment(torture, "connect lsa pipe5 - should fail\n"); status = torture_rpc_connection_transport(torture, &p5, &ndr_table_lsarpc, transport, assoc_group_id); torture_assert_ntstatus_equal(torture, status, NT_STATUS_UNSUCCESSFUL, "opening lsa pipe5"); talloc_free(mem_ctx); return true; }
static NTSTATUS remote_op_bind(struct dcesrv_call_state *dce_call, const struct dcesrv_interface *iface, uint32_t if_version) { NTSTATUS status; const struct ndr_interface_table *table; struct dcesrv_remote_private *priv; const char *binding = lpcfg_parm_string(dce_call->conn->dce_ctx->lp_ctx, NULL, "dcerpc_remote", "binding"); const char *user, *pass, *domain; struct cli_credentials *credentials; bool must_free_credentials = true; bool machine_account; struct dcerpc_binding *b; struct composite_context *pipe_conn_req; machine_account = lpcfg_parm_bool(dce_call->conn->dce_ctx->lp_ctx, NULL, "dcerpc_remote", "use_machine_account", false); priv = talloc(dce_call->conn, struct dcesrv_remote_private); if (!priv) { return NT_STATUS_NO_MEMORY; } priv->c_pipe = NULL; dce_call->context->private_data = priv; if (!binding) { DEBUG(0,("You must specify a DCE/RPC binding string\n")); return NT_STATUS_INVALID_PARAMETER; } user = lpcfg_parm_string(dce_call->conn->dce_ctx->lp_ctx, NULL, "dcerpc_remote", "user"); pass = lpcfg_parm_string(dce_call->conn->dce_ctx->lp_ctx, NULL, "dcerpc_remote", "password"); domain = lpcfg_parm_string(dce_call->conn->dce_ctx->lp_ctx, NULL, "dceprc_remote", "domain"); table = ndr_table_by_syntax(&iface->syntax_id); if (!table) { dce_call->fault_code = DCERPC_FAULT_UNK_IF; return NT_STATUS_NET_WRITE_FAULT; } if (user && pass) { DEBUG(5, ("dcerpc_remote: RPC Proxy: Using specified account\n")); credentials = cli_credentials_init(priv); if (!credentials) { return NT_STATUS_NO_MEMORY; } cli_credentials_set_conf(credentials, dce_call->conn->dce_ctx->lp_ctx); cli_credentials_set_username(credentials, user, CRED_SPECIFIED); if (domain) { cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED); } cli_credentials_set_password(credentials, pass, CRED_SPECIFIED); } else if (machine_account) { DEBUG(5, ("dcerpc_remote: RPC Proxy: Using machine account\n")); credentials = cli_credentials_init(priv); cli_credentials_set_conf(credentials, dce_call->conn->dce_ctx->lp_ctx); if (domain) { cli_credentials_set_domain(credentials, domain, CRED_SPECIFIED); } status = cli_credentials_set_machine_account(credentials, dce_call->conn->dce_ctx->lp_ctx); if (!NT_STATUS_IS_OK(status)) { return status; } } else if (dce_call->conn->auth_state.session_info->credentials) { DEBUG(5, ("dcerpc_remote: RPC Proxy: Using delegated credentials\n")); credentials = dce_call->conn->auth_state.session_info->credentials; must_free_credentials = false; } else { DEBUG(1,("dcerpc_remote: RPC Proxy: You must supply binding, user and password or have delegated credentials\n")); return NT_STATUS_INVALID_PARAMETER; } /* parse binding string to the structure */ status = dcerpc_parse_binding(dce_call->context, binding, &b); if (!NT_STATUS_IS_OK(status)) { DEBUG(0, ("Failed to parse dcerpc binding '%s'\n", binding)); return status; } /* If we already have a remote association group ID, then use that */ if (dce_call->context->assoc_group->proxied_id != 0) { status = dcerpc_binding_set_assoc_group_id(b, dce_call->context->assoc_group->proxied_id); if (!NT_STATUS_IS_OK(status)) { DEBUG(0, ("dcerpc_binding_set_assoc_group_id() - %s'\n", nt_errstr(status))); return status; } } status = dcerpc_binding_set_abstract_syntax(b, &iface->syntax_id); if (!NT_STATUS_IS_OK(status)) { DEBUG(0, ("dcerpc_binding_set_abstract_syntax() - %s'\n", nt_errstr(status))); return status; } DEBUG(3, ("Using binding %s\n", dcerpc_binding_string(dce_call->context, b))); pipe_conn_req = dcerpc_pipe_connect_b_send(dce_call->context, b, table, credentials, dce_call->event_ctx, dce_call->conn->dce_ctx->lp_ctx); status = dcerpc_pipe_connect_b_recv(pipe_conn_req, dce_call->context, &(priv->c_pipe)); if (must_free_credentials) { talloc_free(credentials); } if (!NT_STATUS_IS_OK(status)) { return status; } if (dce_call->context->assoc_group->proxied_id == 0) { dce_call->context->assoc_group->proxied_id = dcerpc_binding_get_assoc_group_id(priv->c_pipe->binding); } if (!NT_STATUS_IS_OK(status)) { return status; } return NT_STATUS_OK; }