Exemplo n.º 1
0
END_TEST

START_TEST(test_group_both_set)
{
    struct tevent_req *req;

    test_ctx->ctx->allow_groups = discard_const(ulist_1);
    test_ctx->ctx->deny_groups = discard_const(ulist_1);

    req = simple_access_check_send(test_ctx, test_ctx->ev,
                                   test_ctx->ctx, "u1");
    fail_unless(test_ctx != NULL, "Cannot create request\n");
    tevent_req_set_callback(req, simple_access_check_done, test_ctx);

    test_loop(test_ctx);
    test_ctx->done = false;

    fail_unless(test_ctx->error == EOK, "access_simple_check failed.");
    fail_unless(test_ctx->access_granted == false,
                "Access granted while user is in deny list.");

    req = simple_access_check_send(test_ctx, test_ctx->ev,
                                   test_ctx->ctx, "u3");
    fail_unless(test_ctx != NULL, "Cannot create request\n");
    tevent_req_set_callback(req, simple_access_check_done, test_ctx);

    test_loop(test_ctx);

    fail_unless(test_ctx->error == EOK, "access_simple_check failed.");
    fail_unless(test_ctx->access_granted == false,
                "Access granted while user is not in allow list.");
}
Exemplo n.º 2
0
static void wb_next_pwent_fetch_done(struct tevent_req *subreq)
{
	struct tevent_req *req = tevent_req_callback_data(
		subreq, struct tevent_req);
	struct wb_next_pwent_state *state = tevent_req_data(
		req, struct wb_next_pwent_state);
	NTSTATUS status;

	status = wb_query_user_list_recv(subreq, state->gstate,
					 &state->gstate->num_users,
					 &state->gstate->users);
	TALLOC_FREE(subreq);
	if (!NT_STATUS_IS_OK(status)) {
		/* Ignore errors here, just log it */
		DEBUG(10, ("query_user_list for domain %s returned %s\n",
			   state->gstate->domain->name,
			   nt_errstr(status)));
		state->gstate->num_users = 0;
	}

	if (state->gstate->num_users == 0) {
		state->gstate->domain = state->gstate->domain->next;

		if ((state->gstate->domain != NULL)
		    && sid_check_is_domain(&state->gstate->domain->sid)) {
			state->gstate->domain = state->gstate->domain->next;
		}

		if (state->gstate->domain == NULL) {
			tevent_req_nterror(req, NT_STATUS_NO_MORE_ENTRIES);
			return;
		}
		subreq = wb_query_user_list_send(state, state->ev,
						 state->gstate->domain);
		if (tevent_req_nomem(subreq, req)) {
			return;
		}
		tevent_req_set_callback(subreq, wb_next_pwent_fetch_done, req);
		return;
	}

	state->gstate->next_user = 0;

	subreq = wb_fill_pwent_send(
		state, state->ev,
		&state->gstate->users[state->gstate->next_user],
		state->pw);
	if (tevent_req_nomem(subreq, req)) {
		return;
	}
	tevent_req_set_callback(subreq, wb_next_pwent_fill_done, req);
}
Exemplo n.º 3
0
static void wb_next_pwent_fill_done(struct tevent_req *subreq)
{
	struct tevent_req *req = tevent_req_callback_data(
		subreq, struct tevent_req);
	struct wb_next_pwent_state *state = tevent_req_data(
		req, struct wb_next_pwent_state);
	NTSTATUS status;

	status = wb_fill_pwent_recv(subreq);
	TALLOC_FREE(subreq);
	/*
	 * When you try to enumerate users with 'getent passwd' and the user
	 * doesn't have a uid set we should just move on.
	 */
	if (NT_STATUS_EQUAL(status, NT_STATUS_NONE_MAPPED)) {
		state->gstate->next_user += 1;

		if (state->gstate->next_user >= state->gstate->num_users) {
			TALLOC_FREE(state->gstate->users);

			state->gstate->domain = wb_next_find_domain(state->gstate->domain);
			if (state->gstate->domain == NULL) {
				tevent_req_nterror(req, NT_STATUS_NO_MORE_ENTRIES);
				return;
			}

			subreq = wb_query_user_list_send(state, state->ev,
					state->gstate->domain);
			if (tevent_req_nomem(subreq, req)) {
				return;
			}
			tevent_req_set_callback(subreq, wb_next_pwent_fetch_done, req);
			return;
		}

		subreq = wb_fill_pwent_send(state,
					    state->ev,
					    &state->gstate->users[state->gstate->next_user],
					    state->pw);
		if (tevent_req_nomem(subreq, req)) {
			return;
		}
		tevent_req_set_callback(subreq, wb_next_pwent_fill_done, req);

		return;
	} else if (tevent_req_nterror(req, status)) {
		return;
	}
	state->gstate->next_user += 1;
	tevent_req_done(req);
}
Exemplo n.º 4
0
int ifp_groups_list_by_domain_and_name(struct sbus_request *sbus_req,
                                       void *data,
                                       const char *domain,
                                       const char *filter,
                                       uint32_t limit)
{
    struct tevent_req *req;
    struct ifp_ctx *ctx;
    struct ifp_list_ctx *list_ctx;

    ctx = talloc_get_type(data, struct ifp_ctx);
    if (ctx == NULL) {
        DEBUG(SSSDBG_CRIT_FAILURE, "Invalid pointer!\n");
        return ERR_INTERNAL;
    }

    list_ctx = ifp_list_ctx_new(sbus_req, ctx, filter, limit);
    if (list_ctx == NULL) {
        return ENOMEM;
    }

    req = cache_req_group_by_filter_send(list_ctx, ctx->rctx->ev, ctx->rctx,
                                        domain, filter);
    if (req == NULL) {
        return ENOMEM;
    }
    tevent_req_set_callback(req,
                            ifp_groups_list_by_domain_and_name_done, list_ctx);

    return EOK;
}
Exemplo n.º 5
0
void sdap_pam_access_handler(struct be_req *breq)
{
    struct be_ctx *be_ctx = be_req_get_be_ctx(breq);
    struct pam_data *pd;
    struct tevent_req *req;
    struct sdap_access_ctx *access_ctx;
    struct sss_domain_info *dom;

    pd = talloc_get_type(be_req_get_data(breq), struct pam_data);

    access_ctx =
            talloc_get_type(be_ctx->bet_info[BET_ACCESS].pvt_bet_data,
                            struct sdap_access_ctx);

    dom = be_ctx->domain;
    if (strcasecmp(pd->domain, be_ctx->domain->name) != 0) {
        /* Subdomain request, verify subdomain */
        dom = find_subdomain_by_name(be_ctx->domain, pd->domain, true);
    }

    req = sdap_access_send(breq, be_ctx->ev, be_ctx,
                           dom, access_ctx,
                           access_ctx->id_ctx->conn,
                           pd);
    if (req == NULL) {
        DEBUG(SSSDBG_CRIT_FAILURE, ("Unable to start sdap_access request\n"));
        sdap_access_reply(breq, PAM_SYSTEM_ERR);
        return;
    }

    tevent_req_set_callback(req, sdap_access_done, breq);
}
Exemplo n.º 6
0
static bool cldap_recvfrom_setup(struct cldap_socket *c)
{
	struct tevent_context *ev;

	if (c->recv_subreq) {
		return true;
	}

	if (!c->searches.list && !c->incoming.handler) {
		return true;
	}

	ev = c->incoming.ev;
	if (ev == NULL) {
		ev = c->searches.list->caller.ev;
	}

	c->recv_subreq = tdgram_recvfrom_send(c, ev, c->sock);
	if (!c->recv_subreq) {
		return false;
	}
	tevent_req_set_callback(c->recv_subreq, cldap_recvfrom_done, c);

	return true;
}
Exemplo n.º 7
0
NTSTATUS smbd_smb2_request_process_tcon(struct smbd_smb2_request *req)
{
	const uint8_t *inbody;
	uint16_t in_path_offset;
	uint16_t in_path_length;
	DATA_BLOB in_path_buffer;
	char *in_path_string;
	size_t in_path_string_size;
	NTSTATUS status;
	bool ok;
	struct tevent_req *subreq;

	status = smbd_smb2_request_verify_sizes(req, 0x09);
	if (!NT_STATUS_IS_OK(status)) {
		return smbd_smb2_request_error(req, status);
	}
	inbody = SMBD_SMB2_IN_BODY_PTR(req);

	in_path_offset = SVAL(inbody, 0x04);
	in_path_length = SVAL(inbody, 0x06);

	if (in_path_offset != (SMB2_HDR_BODY + SMBD_SMB2_IN_BODY_LEN(req))) {
		return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
	}

	if (in_path_length > SMBD_SMB2_IN_DYN_LEN(req)) {
		return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
	}

	in_path_buffer.data = SMBD_SMB2_IN_DYN_PTR(req);
	in_path_buffer.length = in_path_length;

	ok = convert_string_talloc(req, CH_UTF16, CH_UNIX,
				   in_path_buffer.data,
				   in_path_buffer.length,
				   &in_path_string,
				   &in_path_string_size);
	if (!ok) {
		return smbd_smb2_request_error(req, NT_STATUS_ILLEGAL_CHARACTER);
	}

	if (in_path_buffer.length == 0) {
		in_path_string_size = 0;
	}

	if (strlen(in_path_string) != in_path_string_size) {
		return smbd_smb2_request_error(req, NT_STATUS_BAD_NETWORK_NAME);
	}

	subreq = smbd_smb2_tree_connect_send(req,
					     req->sconn->ev_ctx,
					     req,
					     in_path_string);
	if (subreq == NULL) {
		return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
	}
	tevent_req_set_callback(subreq, smbd_smb2_request_tcon_done, req);

	return smbd_smb2_request_pending_queue(req, subreq, 500);
}
Exemplo n.º 8
0
static NTSTATUS remote_op_dispatch(struct dcesrv_call_state *dce_call, TALLOC_CTX *mem_ctx, void *r)
{
	struct dcesrv_remote_private *priv = talloc_get_type_abort(dce_call->context->private_data,
								   struct dcesrv_remote_private);
	uint16_t opnum = dce_call->pkt.u.request.opnum;
	const struct ndr_interface_table *table = dce_call->context->iface->private_data;
	const struct ndr_interface_call *call;
	const char *name;
	struct tevent_req *subreq;

	name = table->calls[opnum].name;
	call = &table->calls[opnum];

	if (priv->c_pipe->conn->flags & DCERPC_DEBUG_PRINT_IN) {
		ndr_print_function_debug(call->ndr_print, name, NDR_IN | NDR_SET_VALUES, r);		
	}

	priv->c_pipe->conn->flags |= DCERPC_NDR_REF_ALLOC;

	/* we didn't use the return code of this function as we only check the last_fault_code */
	subreq = dcerpc_binding_handle_call_send(dce_call, dce_call->event_ctx,
						 priv->c_pipe->binding_handle,
						 NULL, table,
						 opnum, mem_ctx, r);
	if (subreq == NULL) {
		DEBUG(0,("dcesrv_remote: call[%s] dcerpc_binding_handle_call_send() failed!\n", name));
		return NT_STATUS_NO_MEMORY;
	}
	tevent_req_set_callback(subreq, remote_op_dispatch_done, dce_call);

	dce_call->state_flags |= DCESRV_CALL_STATE_FLAG_ASYNC;
	return NT_STATUS_OK;
}
static void init_domain_recv_lsa_policy(struct tevent_req *subreq)
{
	struct init_domain_state *state =
		tevent_req_callback_data(subreq,
		struct init_domain_state);

	state->ctx->status = dcerpc_lsa_OpenPolicy2_r_recv(subreq, state);
	TALLOC_FREE(subreq);
	if ((!NT_STATUS_IS_OK(state->ctx->status)
	      || !NT_STATUS_IS_OK(state->lsa_openpolicy.out.result))) {
		if (retry_with_schannel(state, state->domain->lsa_binding, 
					&ndr_table_lsarpc,
					init_domain_recv_lsa_pipe)) {
			return;
		}
	}
	if (!composite_is_ok(state->ctx)) return;
	state->ctx->status = state->lsa_openpolicy.out.result;
	if (!composite_is_ok(state->ctx)) return;

	state->info = talloc_zero(state->ctx, union lsa_PolicyInformation);
	if (composite_nomem(state->info, state->ctx)) return;

	state->queryinfo.in.handle = &state->domain->libnet_ctx->lsa.handle;
	state->queryinfo.in.level = LSA_POLICY_INFO_ACCOUNT_DOMAIN;
	state->queryinfo.out.info = &state->info;

	subreq = dcerpc_lsa_QueryInfoPolicy_r_send(state,
						   state->ctx->event_ctx,
						   state->domain->libnet_ctx->lsa.pipe->binding_handle,
						   &state->queryinfo);
	if (composite_nomem(subreq, state->ctx)) return;
	tevent_req_set_callback(subreq, init_domain_recv_queryinfo, state);
}
Exemplo n.º 10
0
void ipa_access_handler(struct be_req *be_req)
{
    struct pam_data *pd;
    struct ipa_access_ctx *ipa_access_ctx;
    struct tevent_req *req;
    struct sss_domain_info *dom;
    struct be_ctx *be_ctx = be_req_get_be_ctx(be_req);

    pd = talloc_get_type(be_req_get_data(be_req), struct pam_data);

    ipa_access_ctx = talloc_get_type(be_ctx->bet_info[BET_ACCESS].pvt_bet_data,
                                     struct ipa_access_ctx);

    dom = be_ctx->domain;
    if (strcasecmp(pd->domain, be_ctx->domain->name) != 0) {
        /* Subdomain request, verify subdomain */
        dom = find_subdomain_by_name(be_ctx->domain, pd->domain, true);
    }

    /* First, verify that this account isn't locked.
     * We need to do this in case the auth phase was
     * skipped (such as during GSSAPI single-sign-on
     * or SSH public key exchange.
     */
    req = sdap_access_send(be_req, be_ctx->ev, be_ctx, dom,
                           ipa_access_ctx->sdap_access_ctx,
                           ipa_access_ctx->sdap_access_ctx->id_ctx->conn,
                           pd);
    if (!req) {
        be_req_terminate(be_req, DP_ERR_FATAL, PAM_SYSTEM_ERR, NULL);
        return;
    }
    tevent_req_set_callback(req, ipa_hbac_check, be_req);
}
Exemplo n.º 11
0
static void hbac_get_service_info_step(struct tevent_req *req)
{
    errno_t ret;
    struct hbac_ctx *hbac_ctx =
            tevent_req_callback_data(req, struct hbac_ctx);
    struct be_ctx *be_ctx = be_req_get_be_ctx(hbac_ctx->be_req);

    ret = ipa_host_info_recv(req, hbac_ctx,
                             &hbac_ctx->host_count,
                             &hbac_ctx->hosts,
                             &hbac_ctx->hostgroup_count,
                             &hbac_ctx->hostgroups);
    talloc_zfree(req);
    if (!hbac_check_step_result(hbac_ctx, ret)) {
        return;
    }

    /* Get services and service groups */
    req = ipa_hbac_service_info_send(hbac_ctx, be_ctx->ev,
                                    sdap_id_op_handle(hbac_ctx->sdap_op),
                                     hbac_ctx->sdap_ctx->opts,
                                    hbac_ctx->search_bases);
    if (req == NULL) {
        DEBUG(1,("Could not get service info\n"));
        goto fail;
    }
    tevent_req_set_callback(req, hbac_get_rule_info_step, hbac_ctx);
    return;

fail:
    ipa_access_reply(hbac_ctx, PAM_SYSTEM_ERR);
}
Exemplo n.º 12
0
void sdap_pam_access_handler(struct be_req *breq)
{
    struct pam_data *pd;
    struct tevent_req *req;
    struct sdap_access_ctx *access_ctx;

    pd = talloc_get_type(breq->req_data, struct pam_data);

    access_ctx =
            talloc_get_type(breq->be_ctx->bet_info[BET_ACCESS].pvt_bet_data,
                            struct sdap_access_ctx);

    req = sdap_access_send(breq,
                           breq->be_ctx->ev,
                           breq,
                           access_ctx,
                           pd);
    if (req == NULL) {
        DEBUG(1, ("Unable to start sdap_access request\n"));
        sdap_access_reply(breq, PAM_SYSTEM_ERR);
        return;
    }

    tevent_req_set_callback(req, sdap_access_done, breq);
}
Exemplo n.º 13
0
static int sdap_sudo_schedule_smart_refresh(struct sdap_sudo_ctx *sudo_ctx,
                                            time_t delay)
{
    struct sdap_id_ctx *id_ctx = sudo_ctx->id_ctx;
    struct tevent_req *req = NULL;
    struct timeval tv;

    /* schedule new refresh */
    tv = tevent_timeval_current_ofs(delay, 0);
    req = sdap_sudo_timer_send(sudo_ctx, id_ctx->be->ev, sudo_ctx,
                               tv, delay, sdap_sudo_smart_refresh_send);
    if (req == NULL) {
        DEBUG(SSSDBG_OP_FAILURE, ("Unable to schedule smart refresh of sudo "
              "rules!\n"));
        return ENOMEM;
    }

    tevent_req_set_callback(req, sdap_sudo_periodical_smart_refresh_done,
                            sudo_ctx);

    DEBUG(SSSDBG_TRACE_FUNC, ("Smart refresh scheduled at: %lld\n",
                              (long long)tv.tv_sec));

    return EOK;
}
Exemplo n.º 14
0
void dyndns_test_error(void **state)
{
    struct tevent_req *req;
    errno_t ret;
    TALLOC_CTX *tmp_ctx;

    tmp_ctx = talloc_new(global_talloc_context);
    assert_non_null(tmp_ctx);
    check_leaks_push(tmp_ctx);

    dyndns_test_ctx->state = MOCK_NSUPDATE_ERR;

    req = be_nsupdate_send(tmp_ctx, dyndns_test_ctx->tctx->ev,
                           BE_NSUPDATE_AUTH_GSS_TSIG,
                           discard_const("test message"), false);
    assert_non_null(req);
    tevent_req_set_callback(req, dyndns_test_done, dyndns_test_ctx);

    /* Wait until the test finishes with EIO (child error) */
    ret = test_ev_loop(dyndns_test_ctx->tctx);
    DEBUG(SSSDBG_TRACE_LIBS,
          "Child request returned [%d]: %s\n", ret, strerror(ret));
    assert_int_equal(ret, ERR_DYNDNS_FAILED);

    assert_true(WIFEXITED(dyndns_test_ctx->child_status));
    assert_int_equal(WEXITSTATUS(dyndns_test_ctx->child_status), 1);

    assert_true(check_leaks_pop(tmp_ctx) == true);
    talloc_free(tmp_ctx);
}
Exemplo n.º 15
0
/*
  startup a copy of smbd as a child daemon
*/
static void s3fs_task_init(struct task_server *task)
{
	const char *fileserver_conf;
	struct tevent_req *req;
	const char *smbd_path;
	const char *smbd_cmd[2] = { NULL, NULL };

	task_server_set_title(task, "task[s3fs_parent]");

	/* create a smb.conf for smbd to use */
	fileserver_conf = generate_smb_conf(task);

	smbd_path = talloc_asprintf(task, "%s/smbd", dyn_SBINDIR);
	smbd_cmd[0] = smbd_path;

	/* start it as a child process */
	req = samba_runcmd_send(task, task->event_ctx, timeval_zero(), 1, 0,
				smbd_cmd,
				"--configfile", fileserver_conf,
				"--foreground",
				debug_get_output_is_stdout()?"--log-stdout":NULL,
				NULL);
	if (req == NULL) {
		DEBUG(0, ("Failed to start smbd as child daemon\n"));
		goto failed;
	}

	tevent_req_set_callback(req, file_server_smbd_done, task);

	DEBUG(1,("Started file server smbd with config %s\n", fileserver_conf));
	return;
failed:
	task_server_terminate(task, "Failed to startup s3fs smb task", true);
}
Exemplo n.º 16
0
NTSTATUS smbd_smb2_request_process_close(struct smbd_smb2_request *req)
{
	const uint8_t *inbody;
	uint16_t in_flags;
	uint64_t in_file_id_persistent;
	uint64_t in_file_id_volatile;
	struct files_struct *in_fsp;
	NTSTATUS status;
	struct tevent_req *subreq;

	status = smbd_smb2_request_verify_sizes(req, 0x18);
	if (!NT_STATUS_IS_OK(status)) {
		return smbd_smb2_request_error(req, status);
	}
	inbody = SMBD_SMB2_IN_BODY_PTR(req);

	in_flags		= SVAL(inbody, 0x02);
	in_file_id_persistent	= BVAL(inbody, 0x08);
	in_file_id_volatile	= BVAL(inbody, 0x10);

	in_fsp = file_fsp_smb2(req, in_file_id_persistent, in_file_id_volatile);
	if (in_fsp == NULL) {
		return smbd_smb2_request_error(req, NT_STATUS_FILE_CLOSED);
	}

	subreq = smbd_smb2_close_send(req, req->sconn->ev_ctx,
				      req, in_fsp, in_flags);
	if (subreq == NULL) {
		return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
	}
	tevent_req_set_callback(subreq, smbd_smb2_request_close_done, req);

	return smbd_smb2_request_pending_queue(req, subreq, 500);
}
Exemplo n.º 17
0
/*
 * queue a wbsrv_call reply on a wbsrv_connection
 * NOTE: that this implies talloc_free(call),
 *       use talloc_reference(call) if you need it after
 *       calling wbsrv_queue_reply
 */
NTSTATUS wbsrv_samba3_send_reply(struct wbsrv_samba3_call *call)
{
	struct wbsrv_connection *wbsrv_conn = call->wbconn;
	struct tevent_req *subreq;
	NTSTATUS status;

	status = wbsrv_samba3_push_reply(call);
	NT_STATUS_NOT_OK_RETURN(status);

	call->out_iov[0].iov_base = (char *) call->out.data;
	call->out_iov[0].iov_len = call->out.length;

	subreq = tstream_writev_queue_send(call,
					   wbsrv_conn->conn->event.ctx,
					   wbsrv_conn->tstream,
					   wbsrv_conn->send_queue,
					   call->out_iov, 1);
	if (subreq == NULL) {
		wbsrv_terminate_connection(wbsrv_conn, "wbsrv_call_loop: "
				"no memory for tstream_writev_queue_send");
		return NT_STATUS_NO_MEMORY;
	}
	tevent_req_set_callback(subreq, wbsrv_samba3_send_reply_done, call);

	return status;
}
Exemplo n.º 18
0
static NTSTATUS wreplsrv_pull_cycle_next_owner_wrapper(struct wreplsrv_pull_cycle_state *state)
{
	NTSTATUS status;

	status = wreplsrv_pull_cycle_next_owner_do_work(state);
	if (NT_STATUS_IS_OK(status)) {
		state->stage = WREPLSRV_PULL_CYCLE_STAGE_DONE;
	} else if (NT_STATUS_EQUAL(STATUS_MORE_ENTRIES, status)) {
		state->stage = WREPLSRV_PULL_CYCLE_STAGE_WAIT_SEND_REPLIES;
		status = NT_STATUS_OK;
	}

	if (state->stage == WREPLSRV_PULL_CYCLE_STAGE_DONE && state->io->in.wreplconn) {
		state->assoc_stop_io.in.assoc_ctx	= state->io->in.wreplconn->assoc_ctx.peer_ctx;
		state->assoc_stop_io.in.reason		= 0;
		state->subreq = wrepl_associate_stop_send(state,
							  state->io->in.wreplconn->service->task->event_ctx,
							  state->io->in.wreplconn->sock,
							  &state->assoc_stop_io);
		NT_STATUS_HAVE_NO_MEMORY(state->subreq);

		tevent_req_set_callback(state->subreq,
					wreplsrv_pull_cycle_handler_treq,
					state);

		state->stage = WREPLSRV_PULL_CYCLE_STAGE_WAIT_STOP_ASSOC;
	}

	return status;
}
Exemplo n.º 19
0
static NTSTATUS wreplsrv_push_notify_inform(struct wreplsrv_push_notify_state *state)
{
	struct wreplsrv_service *service = state->io->in.partner->service;
	struct wrepl_packet *req = &state->req_packet;
	struct wrepl_replication *repl_out = &state->req_packet.message.replication;
	struct wrepl_table *table_out = &state->req_packet.message.replication.info.table;
	NTSTATUS status;

	req->opcode	= WREPL_OPCODE_BITS;
	req->assoc_ctx	= state->wreplconn->assoc_ctx.peer_ctx;
	req->mess_type	= WREPL_REPLICATION;

	repl_out->command = state->command;

	status = wreplsrv_fill_wrepl_table(service, state, table_out,
					   service->wins_db->local_owner, state->full_table);
	NT_STATUS_NOT_OK_RETURN(status);

	/* we won't get a reply to a inform message */
	state->ctrl.send_only		= true;

	state->subreq = wrepl_request_send(state,
					   state->wreplconn->service->task->event_ctx,
					   state->wreplconn->sock, req, &state->ctrl);
	NT_STATUS_HAVE_NO_MEMORY(state->subreq);

	tevent_req_set_callback(state->subreq,
				wreplsrv_push_notify_handler_treq,
				state);

	state->stage = WREPLSRV_PUSH_NOTIFY_STAGE_WAIT_INFORM;

	return NT_STATUS_OK;
}
Exemplo n.º 20
0
static void named_pipe_accept(struct stream_connection *conn)
{
	struct tstream_context *plain_tstream;
	int fd;
	struct tevent_req *subreq;
	int ret;

	/* Let tstream take over fd operations */

	fd = socket_get_fd(conn->socket);
	socket_set_flags(conn->socket, SOCKET_FLAG_NOCLOSE);
	TALLOC_FREE(conn->event.fde);
	TALLOC_FREE(conn->socket);

	ret = tstream_bsd_existing_socket(conn, fd, &plain_tstream);
	if (ret != 0) {
		stream_terminate_connection(conn,
				"named_pipe_accept: out of memory");
		return;
	}

	subreq = tstream_npa_accept_existing_send(conn, conn->event.ctx,
						  plain_tstream,
						  FILE_TYPE_MESSAGE_MODE_PIPE,
						  0xff | 0x0400 | 0x0100,
						  4096);
	if (subreq == NULL) {
		stream_terminate_connection(conn,
			"named_pipe_accept: "
			"no memory for tstream_npa_accept_existing_send");
		return;
	}
	tevent_req_set_callback(subreq, named_pipe_accept_done, conn);
}
Exemplo n.º 21
0
NTSTATUS smbd_smb2_request_process_read(struct smbd_smb2_request *req)
{
	NTSTATUS status;
	const uint8_t *inhdr;
	const uint8_t *inbody;
	int i = req->current_idx;
	uint32_t in_smbpid;
	uint32_t in_length;
	uint64_t in_offset;
	uint64_t in_file_id_persistent;
	uint64_t in_file_id_volatile;
	uint32_t in_minimum_count;
	uint32_t in_remaining_bytes;
	struct tevent_req *subreq;

	status = smbd_smb2_request_verify_sizes(req, 0x31);
	if (!NT_STATUS_IS_OK(status)) {
		return smbd_smb2_request_error(req, status);
	}
	inhdr = (const uint8_t *)req->in.vector[i+0].iov_base;
	inbody = (const uint8_t *)req->in.vector[i+1].iov_base;

	in_smbpid = IVAL(inhdr, SMB2_HDR_PID);

	in_length		= IVAL(inbody, 0x04);
	in_offset		= BVAL(inbody, 0x08);
	in_file_id_persistent	= BVAL(inbody, 0x10);
	in_file_id_volatile	= BVAL(inbody, 0x18);
	in_minimum_count	= IVAL(inbody, 0x20);
	in_remaining_bytes	= IVAL(inbody, 0x28);

	/* check the max read size */
	if (in_length > lp_smb2_max_read()) {
		DEBUG(0,("here:%s: 0x%08X: 0x%08X\n",
			__location__, in_length, lp_smb2_max_read()));
		return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
	}

	if (req->compat_chain_fsp) {
		/* skip check */
	} else if (in_file_id_persistent != in_file_id_volatile) {
		return smbd_smb2_request_error(req, NT_STATUS_FILE_CLOSED);
	}

	subreq = smbd_smb2_read_send(req,
				     req->sconn->smb2.event_ctx,
				     req,
				     in_smbpid,
				     in_file_id_volatile,
				     in_length,
				     in_offset,
				     in_minimum_count,
				     in_remaining_bytes);
	if (subreq == NULL) {
		return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
	}
	tevent_req_set_callback(subreq, smbd_smb2_request_read_done, req);

	return smbd_smb2_request_pending_queue(req, subreq);
}
Exemplo n.º 22
0
NTSTATUS smbd_smb2_request_process_flush(struct smbd_smb2_request *req)
{
	NTSTATUS status;
	const uint8_t *inbody;
	int i = req->current_idx;
	uint64_t in_file_id_persistent;
	uint64_t in_file_id_volatile;
	struct tevent_req *subreq;

	status = smbd_smb2_request_verify_sizes(req, 0x18);
	if (!NT_STATUS_IS_OK(status)) {
		return smbd_smb2_request_error(req, status);
	}
	inbody = (const uint8_t *)req->in.vector[i+1].iov_base;

	in_file_id_persistent	= BVAL(inbody, 0x08);
	in_file_id_volatile	= BVAL(inbody, 0x10);

	if (req->compat_chain_fsp) {
		/* skip check */
	} else if (in_file_id_persistent != in_file_id_volatile) {
		return smbd_smb2_request_error(req, NT_STATUS_FILE_CLOSED);
	}

	subreq = smbd_smb2_flush_send(req,
				      req->sconn->ev_ctx,
				      req,
				      in_file_id_volatile);
	if (subreq == NULL) {
		return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
	}
	tevent_req_set_callback(subreq, smbd_smb2_request_flush_done, req);

	return smbd_smb2_request_pending_queue(req, subreq, 500);
}
Exemplo n.º 23
0
static void unbecomeDC_drsuapi_bind_send(struct libnet_UnbecomeDC_state *s)
{
	struct composite_context *c = s->creq;
	struct drsuapi_DsBindInfo28 *bind_info28;
	struct tevent_req *subreq;

	GUID_from_string(DRSUAPI_DS_BIND_GUID, &s->drsuapi.bind_guid);

	bind_info28				= &s->drsuapi.local_info28;
	bind_info28->supported_extensions	= 0;
	bind_info28->site_guid			= GUID_zero();
	bind_info28->pid			= 0;
	bind_info28->repl_epoch			= 0;

	s->drsuapi.bind_info_ctr.length		= 28;
	s->drsuapi.bind_info_ctr.info.info28	= *bind_info28;

	s->drsuapi.bind_r.in.bind_guid = &s->drsuapi.bind_guid;
	s->drsuapi.bind_r.in.bind_info = &s->drsuapi.bind_info_ctr;
	s->drsuapi.bind_r.out.bind_handle = &s->drsuapi.bind_handle;

	subreq = dcerpc_drsuapi_DsBind_r_send(s, c->event_ctx,
					      s->drsuapi.drsuapi_handle,
					      &s->drsuapi.bind_r);
	if (composite_nomem(subreq, c)) return;
	tevent_req_set_callback(subreq, unbecomeDC_drsuapi_bind_recv, s);
}
Exemplo n.º 24
0
static void unbecomeDC_send_cldap(struct libnet_UnbecomeDC_state *s)
{
	struct composite_context *c = s->creq;
	struct tevent_req *req;
	struct tsocket_address *dest_address;
	int ret;

	s->cldap.io.in.dest_address	= NULL;
	s->cldap.io.in.dest_port	= 0;
	s->cldap.io.in.realm		= s->domain.dns_name;
	s->cldap.io.in.host		= s->dest_dsa.netbios_name;
	s->cldap.io.in.user		= NULL;
	s->cldap.io.in.domain_guid	= NULL;
	s->cldap.io.in.domain_sid	= NULL;
	s->cldap.io.in.acct_control	= -1;
	s->cldap.io.in.version		= NETLOGON_NT_VERSION_5 | NETLOGON_NT_VERSION_5EX;
	s->cldap.io.in.map_response	= true;

	ret = tsocket_address_inet_from_strings(s, "ip",
						s->source_dsa.address,
						lpcfg_cldap_port(s->libnet->lp_ctx),
						&dest_address);
	if (ret != 0) {
		c->status = map_nt_error_from_unix_common(errno);
		if (!composite_is_ok(c)) return;
	}

	c->status = cldap_socket_init(s, NULL, dest_address, &s->cldap.sock);
	if (!composite_is_ok(c)) return;

	req = cldap_netlogon_send(s, s->libnet->event_ctx,
				  s->cldap.sock, &s->cldap.io);
	if (composite_nomem(req, c)) return;
	tevent_req_set_callback(req, unbecomeDC_recv_cldap, s);
}
Exemplo n.º 25
0
static void ldap_id_enumerate_timer(struct tevent_context *ev,
                                    struct tevent_timer *tt,
                                    struct timeval tv, void *pvt)
{
    struct sdap_id_ctx *ctx = talloc_get_type(pvt, struct sdap_id_ctx);
    struct tevent_timer *timeout;
    struct tevent_req *req;
    int delay;
    errno_t ret;

    if (be_is_offline(ctx->be)) {
        DEBUG(4, ("Backend is marked offline, retry later!\n"));
        /* schedule starting from now, not the last run */
        delay = dp_opt_get_int(ctx->opts->basic, SDAP_ENUM_REFRESH_TIMEOUT);
        tv = tevent_timeval_current_ofs(delay, 0);
        ldap_id_enumerate_set_timer(ctx, tv);
        return;
    }

    req = ldap_id_enumerate_send(ev, ctx);
    if (!req) {
        DEBUG(1, ("Failed to schedule enumeration, retrying later!\n"));
        /* schedule starting from now, not the last run */
        delay = dp_opt_get_int(ctx->opts->basic, SDAP_ENUM_REFRESH_TIMEOUT);
        tv = tevent_timeval_current_ofs(delay, 0);
        ret = ldap_id_enumerate_set_timer(ctx, tv);
        if (ret != EOK) {
            DEBUG(1, ("Error setting up enumerate timer\n"));
        }
        return;
    }
    tevent_req_set_callback(req, ldap_id_enumerate_reschedule, ctx);

    /* if enumeration takes so long, either we try to enumerate too
     * frequently, or something went seriously wrong */
    delay = dp_opt_get_int(ctx->opts->basic, SDAP_ENUM_REFRESH_TIMEOUT);
    tv = tevent_timeval_current_ofs(delay, 0);
    timeout = tevent_add_timer(ctx->be->ev, req, tv,
                               ldap_id_enumerate_timeout, req);
    if (timeout == NULL) {
        /* If we can't guarantee a timeout, we
         * need to cancel the request, to avoid
         * the possibility of starting another
         * concurrently
         */
        talloc_zfree(req);

        DEBUG(1, ("Failed to schedule enumeration, retrying later!\n"));
        /* schedule starting from now, not the last run */
        delay = dp_opt_get_int(ctx->opts->basic, SDAP_ENUM_REFRESH_TIMEOUT);
        tv = tevent_timeval_current_ofs(delay, 0);
        ret = ldap_id_enumerate_set_timer(ctx, tv);
        if (ret != EOK) {
            DEBUG(1, ("Error setting up enumerate timer\n"));
        }
        return;
    }
    return;
}
Exemplo n.º 26
0
static void named_pipe_accept_done(struct tevent_req *subreq)
{
	struct auth_session_info_transport *session_info_transport;
	struct named_pipe_client *npc =
		tevent_req_callback_data(subreq, struct named_pipe_client);
	int error;
	int ret;

	ret = tstream_npa_accept_existing_recv(subreq, &error, npc,
						&npc->tstream,
						&npc->client,
						&npc->client_name,
						&npc->server,
						&npc->server_name,
						&session_info_transport);

	npc->session_info = talloc_move(npc, &session_info_transport->session_info);

	TALLOC_FREE(subreq);
	if (ret != 0) {
		DEBUG(2, ("Failed to accept named pipe connection! (%s)\n",
			  strerror(error)));
		TALLOC_FREE(npc);
		return;
	}

	ret = make_server_pipes_struct(npc,
				       npc->msg_ctx,
				       npc->pipe_name, NCACN_NP,
					false, npc->server, npc->client, npc->session_info,
					&npc->p, &error);
	if (ret != 0) {
		DEBUG(2, ("Failed to create pipes_struct! (%s)\n",
			  strerror(error)));
		goto fail;
	}

	npc->write_queue = tevent_queue_create(npc, "np_server_write_queue");
	if (!npc->write_queue) {
		DEBUG(2, ("Failed to set up write queue!\n"));
		goto fail;
	}

	/* And now start receiving and processing packets */
	subreq = dcerpc_read_ncacn_packet_send(npc, npc->ev, npc->tstream);
	if (!subreq) {
		DEBUG(2, ("Failed to start receving packets\n"));
		goto fail;
	}
	tevent_req_set_callback(subreq, named_pipe_packet_process, npc);
	return;

fail:
	DEBUG(2, ("Fatal error. Terminating client(%s) connection!\n",
		  npc->client_name));
	/* terminate client connection */
	talloc_free(npc);
	return;
}
Exemplo n.º 27
0
static void nested_groups_test_one_group_dup_group_members(void **state)
{
    struct nested_groups_test_ctx *test_ctx = NULL;
    struct sysdb_attrs *rootgroup = NULL;
    struct tevent_req *req = NULL;
    TALLOC_CTX *req_mem_ctx = NULL;
    errno_t ret;
    const char *groups[] = { "cn=emptygroup1,"GROUP_BASE_DN,
                             "cn=emptygroup1,"GROUP_BASE_DN,
                             NULL };
    const struct sysdb_attrs *group1_reply[2] = { NULL };
    const struct sysdb_attrs *group2_reply[2] = { NULL };
    const char * expected[] = { "rootgroup",
                                "emptygroup1" };

    test_ctx = talloc_get_type_abort(*state, struct nested_groups_test_ctx);

    /* mock return values */
    rootgroup = mock_sysdb_group_rfc2307bis(test_ctx, GROUP_BASE_DN, 1000,
                                            "rootgroup", groups);

    group1_reply[0] = mock_sysdb_group_rfc2307bis(test_ctx, GROUP_BASE_DN,
                                                  1001, "emptygroup1", NULL);
    assert_non_null(group1_reply[0]);
    will_return(sdap_get_generic_recv, 1);
    will_return(sdap_get_generic_recv, group1_reply);

    group2_reply[0] = mock_sysdb_group_rfc2307bis(test_ctx, GROUP_BASE_DN,
                                                  1001, "emptygroup1", NULL);
    assert_non_null(group2_reply[0]);
    will_return(sdap_get_generic_recv, 1);
    will_return(sdap_get_generic_recv, group2_reply);

    sss_will_return_always(sdap_has_deref_support, false);

    /* run test, check for memory leaks */
    req_mem_ctx = talloc_new(global_talloc_context);
    assert_non_null(req_mem_ctx);
    check_leaks_push(req_mem_ctx);

    req = sdap_nested_group_send(req_mem_ctx, test_ctx->tctx->ev,
                                 test_ctx->sdap_domain, test_ctx->sdap_opts,
                                 test_ctx->sdap_handle, rootgroup);
    assert_non_null(req);
    tevent_req_set_callback(req, nested_groups_test_done, test_ctx);

    ret = test_ev_loop(test_ctx->tctx);
    assert_true(check_leaks_pop(req_mem_ctx) == true);
    talloc_zfree(req_mem_ctx);

    /* check return code */
    assert_int_equal(ret, ERR_OK);

    assert_int_equal(test_ctx->num_users, 0);
    assert_int_equal(test_ctx->num_groups, N_ELEMENTS(expected));

    compare_sysdb_string_array_noorder(test_ctx->groups,
                                       expected, N_ELEMENTS(expected));
}
Exemplo n.º 28
0
static void open_socket_out_connected(struct tevent_req *subreq)
{
	struct tevent_req *req =
		tevent_req_callback_data(subreq, struct tevent_req);
	struct open_socket_out_state *state =
		tevent_req_data(req, struct open_socket_out_state);
	int ret;
	int sys_errno;

	ret = async_connect_recv(subreq, &sys_errno);
	TALLOC_FREE(subreq);
	if (ret == 0) {
		tevent_req_done(req);
		return;
	}

	if (
#ifdef ETIMEDOUT
		(sys_errno == ETIMEDOUT) ||
#endif
		(sys_errno == EINPROGRESS) ||
		(sys_errno == EALREADY) ||
		(sys_errno == EAGAIN)) {

		/*
		 * retry
		 */

		if (state->wait_usec < 250000) {
			state->wait_usec *= 1.5;
		}

		subreq = async_connect_send(state, state->ev, state->fd,
					    (struct sockaddr *)&state->ss,
					    state->salen);
		if (tevent_req_nomem(subreq, req)) {
			return;
		}
		if (!tevent_req_set_endtime(
			    subreq, state->ev,
			    timeval_current_ofs_usec(state->wait_usec))) {
			tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
			return;
		}
		tevent_req_set_callback(subreq, open_socket_out_connected, req);
		return;
	}

#ifdef EISCONN
	if (sys_errno == EISCONN) {
		tevent_req_done(req);
		return;
	}
#endif

	/* real error */
	tevent_req_nterror(req, map_nt_error_from_unix(sys_errno));
}
Exemplo n.º 29
0
static void nested_groups_test_one_group_dup_users(void **state)
{
    struct nested_groups_test_ctx *test_ctx = NULL;
    struct sysdb_attrs *rootgroup = NULL;
    struct tevent_req *req = NULL;
    TALLOC_CTX *req_mem_ctx = NULL;
    errno_t ret;
    const char *name;
    const char *users[] = { "cn=user1,"USER_BASE_DN,
                            "cn=user1,"USER_BASE_DN,
                            NULL };
    const struct sysdb_attrs *user1_reply[2] = { NULL };
    const struct sysdb_attrs *user2_reply[2] = { NULL };

    test_ctx = talloc_get_type_abort(*state, struct nested_groups_test_ctx);

    /* mock return values */
    rootgroup = mock_sysdb_group_rfc2307bis(test_ctx, GROUP_BASE_DN, 1000,
                                            "rootgroup", users);

    user1_reply[0] = mock_sysdb_user(test_ctx, USER_BASE_DN, 2001, "user1");
    assert_non_null(user1_reply[0]);
    will_return(sdap_get_generic_recv, 1);
    will_return(sdap_get_generic_recv, user1_reply);

    user2_reply[0] = mock_sysdb_user(test_ctx, USER_BASE_DN, 2001, "user1");
    assert_non_null(user2_reply[0]);
    will_return(sdap_get_generic_recv, 1);
    will_return(sdap_get_generic_recv, user2_reply);

    sss_will_return_always(sdap_has_deref_support, false);

    /* run test, check for memory leaks */
    req_mem_ctx = talloc_new(global_talloc_context);
    assert_non_null(req_mem_ctx);
    check_leaks_push(req_mem_ctx);

    req = sdap_nested_group_send(req_mem_ctx, test_ctx->tctx->ev,
                                 test_ctx->sdap_domain, test_ctx->sdap_opts,
                                 test_ctx->sdap_handle, rootgroup);
    assert_non_null(req);
    tevent_req_set_callback(req, nested_groups_test_done, test_ctx);

    ret = test_ev_loop(test_ctx->tctx);
    assert_true(check_leaks_pop(req_mem_ctx) == true);
    talloc_zfree(req_mem_ctx);

    /* check return code */
    assert_int_equal(ret, ERR_OK);

    /* Check the users */
    assert_int_equal(test_ctx->num_users, 1);
    assert_int_equal(test_ctx->num_groups, 1);

    ret = sysdb_attrs_get_string(test_ctx->users[0], SYSDB_NAME, &name);
    assert_int_equal(ret, ERR_OK);
    assert_string_equal(name, "user1");
}
Exemplo n.º 30
0
static NTSTATUS smbXsrv_session_table_init(struct smbXsrv_connection *conn,
					   uint32_t lowest_id,
					   uint32_t highest_id,
					   uint32_t max_sessions)
{
	struct smbXsrv_client *client = conn->client;
	struct smbXsrv_session_table *table;
	NTSTATUS status;
	struct tevent_req *subreq;
	uint64_t max_range;

	if (lowest_id > highest_id) {
		return NT_STATUS_INTERNAL_ERROR;
	}

	max_range = highest_id;
	max_range -= lowest_id;
	max_range += 1;

	if (max_sessions > max_range) {
		return NT_STATUS_INTERNAL_ERROR;
	}

	table = talloc_zero(client, struct smbXsrv_session_table);
	if (table == NULL) {
		return NT_STATUS_NO_MEMORY;
	}

	table->local.db_ctx = db_open_rbt(table);
	if (table->local.db_ctx == NULL) {
		TALLOC_FREE(table);
		return NT_STATUS_NO_MEMORY;
	}
	table->local.lowest_id = lowest_id;
	table->local.highest_id = highest_id;
	table->local.max_sessions = max_sessions;

	status = smbXsrv_session_global_init();
	if (!NT_STATUS_IS_OK(status)) {
		TALLOC_FREE(table);
		return status;
	}

	table->global.db_ctx = smbXsrv_session_global_db_ctx;

	dbwrap_watch_db(table->global.db_ctx, client->msg_ctx);

	subreq = messaging_read_send(table, client->ev_ctx, client->msg_ctx,
				     MSG_SMBXSRV_SESSION_CLOSE);
	if (subreq == NULL) {
		TALLOC_FREE(table);
		return NT_STATUS_NO_MEMORY;
	}
	tevent_req_set_callback(subreq, smbXsrv_session_close_loop, client);

	client->session_table = table;
	return NT_STATUS_OK;
}