예제 #1
0
파일: ctdb_io.c 프로젝트: AIdrifter/samba
/*
  setup a packet queue on a socket
 */
struct ctdb_queue *ctdb_queue_setup(struct ctdb_context *ctdb,
				    TALLOC_CTX *mem_ctx, int fd, int alignment,
				    ctdb_queue_cb_fn_t callback,
				    void *private_data, const char *fmt, ...)
{
	struct ctdb_queue *queue;
	va_list ap;

	queue = talloc_zero(mem_ctx, struct ctdb_queue);
	CTDB_NO_MEMORY_NULL(ctdb, queue);
	va_start(ap, fmt);
	queue->name = talloc_vasprintf(mem_ctx, fmt, ap);
	va_end(ap);
	CTDB_NO_MEMORY_NULL(ctdb, queue->name);

	queue->im= tevent_create_immediate(queue);
	CTDB_NO_MEMORY_NULL(ctdb, queue->im);

	queue->ctdb = ctdb;
	queue->fd = fd;
	queue->alignment = alignment;
	queue->private_data = private_data;
	queue->callback = callback;
	if (fd != -1) {
		if (ctdb_queue_set_fd(queue, fd) != 0) {
			talloc_free(queue);
			return NULL;
		}
	}
	talloc_set_destructor(queue, queue_destructor);

	return queue;
}
예제 #2
0
struct tevent_queue *_tevent_queue_create(TALLOC_CTX *mem_ctx,
					  const char *name,
					  const char *location)
{
	struct tevent_queue *queue;

	queue = talloc_zero(mem_ctx, struct tevent_queue);
	if (!queue) {
		return NULL;
	}

	queue->name = talloc_strdup(queue, name);
	if (!queue->name) {
		talloc_free(queue);
		return NULL;
	}
	queue->immediate = tevent_create_immediate(queue);
	if (!queue->immediate) {
		talloc_free(queue);
		return NULL;
	}

	queue->location = location;

	/* queue is running by default */
	queue->running = true;

	talloc_set_destructor(queue, tevent_queue_destructor);
	return queue;
}
예제 #3
0
int sss_dbus_conn_send(DBusConnection *dbus_conn,
                       DBusMessage *msg,
                       int timeout_ms,
                       DBusPendingCallNotifyFunction reply_handler,
                       void *pvt,
                       DBusPendingCall **pending)
{
    struct tevent_immediate *imm;

    global_test_ctx->reply_pvt = pvt;
    global_test_ctx->reply_handler = reply_handler;

    imm = tevent_create_immediate(global_test_ctx->stc->ev);
    assert_non_null(imm);
    tevent_schedule_immediate(imm, global_test_ctx->stc->ev, fake_sbus_msg_done, global_test_ctx);

    return EOK;
}
예제 #4
0
파일: ctdb_io.c 프로젝트: urisimchoni/samba
/*
  setup a packet queue on a socket
 */
struct ctdb_queue *ctdb_queue_setup(struct ctdb_context *ctdb,
				    TALLOC_CTX *mem_ctx, int fd, int alignment,
				    ctdb_queue_cb_fn_t callback,
				    void *private_data, const char *fmt, ...)
{
	struct ctdb_queue *queue;
	va_list ap;

	queue = talloc_zero(mem_ctx, struct ctdb_queue);
	CTDB_NO_MEMORY_NULL(ctdb, queue);
	va_start(ap, fmt);
	queue->name = talloc_vasprintf(mem_ctx, fmt, ap);
	va_end(ap);
	CTDB_NO_MEMORY_NULL(ctdb, queue->name);

	queue->im= tevent_create_immediate(queue);
	CTDB_NO_MEMORY_NULL(ctdb, queue->im);

	queue->ctdb = ctdb;
	queue->fd = fd;
	queue->alignment = alignment;
	queue->private_data = private_data;
	queue->callback = callback;
	if (fd != -1) {
		if (ctdb_queue_set_fd(queue, fd) != 0) {
			talloc_free(queue);
			return NULL;
		}
	}
	talloc_set_destructor(queue, queue_destructor);

	queue->buffer_size = ctdb->tunable.queue_buffer_size;
	/* In client code, ctdb->tunable is not initialized.
	 * This does not affect recovery daemon.
	 */
	if (queue->buffer_size == 0) {
		queue->buffer_size = 1024;
	}

	return queue;
}
예제 #5
0
struct tevent_req *_tevent_req_create(TALLOC_CTX *mem_ctx,
				    void *pdata,
				    size_t data_size,
				    const char *type,
				    const char *location)
{
	struct tevent_req *req;
	void **ppdata = (void **)pdata;
	void *data;

	req = talloc_zero(mem_ctx, struct tevent_req);
	if (req == NULL) {
		return NULL;
	}
	req->internal.private_type	= type;
	req->internal.create_location	= location;
	req->internal.finish_location	= NULL;
	req->internal.state		= TEVENT_REQ_IN_PROGRESS;
	req->internal.trigger		= tevent_create_immediate(req);
	if (!req->internal.trigger) {
		talloc_free(req);
		return NULL;
	}
	req->internal.defer_callback_ev	= NULL;

	data = talloc_zero_size(req, data_size);
	if (data == NULL) {
		talloc_free(req);
		return NULL;
	}
	talloc_set_name_const(data, type);

	req->data = data;

	*ppdata = data;
	return req;
}
예제 #6
0
static struct tevent_req *smbd_smb2_notify_send(TALLOC_CTX *mem_ctx,
						struct tevent_context *ev,
						struct smbd_smb2_request *smb2req,
						struct files_struct *fsp,
						uint16_t in_flags,
						uint32_t in_output_buffer_length,
						uint64_t in_completion_filter)
{
	struct tevent_req *req;
	struct smbd_smb2_notify_state *state;
	struct smb_request *smbreq;
	connection_struct *conn = smb2req->tcon->compat_conn;
	bool recursive = (in_flags & 0x0001) ? true : false;
	NTSTATUS status;

	req = tevent_req_create(mem_ctx, &state,
				struct smbd_smb2_notify_state);
	if (req == NULL) {
		return NULL;
	}
	state->smb2req = smb2req;
	state->status = NT_STATUS_INTERNAL_ERROR;
	state->out_output_buffer = data_blob_null;
	state->im = NULL;

	DEBUG(10,("smbd_smb2_notify_send: %s - fnum[%d]\n",
		  fsp_str_dbg(fsp), fsp->fnum));

	smbreq = smbd_smb2_fake_smb_request(smb2req);
	if (tevent_req_nomem(smbreq, req)) {
		return tevent_req_post(req, ev);
	}

	state->smbreq = smbreq;
	smbreq->async_priv = (void *)req;

	{
		char *filter_string;

		filter_string = notify_filter_string(NULL, in_completion_filter);
		if (tevent_req_nomem(filter_string, req)) {
			return tevent_req_post(req, ev);
		}

		DEBUG(3,("smbd_smb2_notify_send: notify change "
			 "called on %s, filter = %s, recursive = %d\n",
			 fsp_str_dbg(fsp), filter_string, recursive));

		TALLOC_FREE(filter_string);
	}

	if ((!fsp->is_directory) || (conn != fsp->conn)) {
		tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
		return tevent_req_post(req, ev);
	}

	if (fsp->notify == NULL) {

		status = change_notify_create(fsp,
					      in_completion_filter,
					      recursive);
		if (!NT_STATUS_IS_OK(status)) {
			DEBUG(10, ("change_notify_create returned %s\n",
				   nt_errstr(status)));
			tevent_req_nterror(req, status);
			return tevent_req_post(req, ev);
		}
	}

	if (fsp->notify->num_changes != 0) {

		/*
		 * We've got changes pending, respond immediately
		 */

		/*
		 * TODO: write a torture test to check the filtering behaviour
		 * here.
		 */

		change_notify_reply(smbreq,
				    NT_STATUS_OK,
				    in_output_buffer_length,
				    fsp->notify,
				    smbd_smb2_notify_reply);

		/*
		 * change_notify_reply() above has independently
		 * called tevent_req_done().
		 */
		return tevent_req_post(req, ev);
	}

	state->im = tevent_create_immediate(state);
	if (tevent_req_nomem(state->im, req)) {
		return tevent_req_post(req, ev);
	}

	/*
	 * No changes pending, queue the request
	 */

	status = change_notify_add_request(smbreq,
			in_output_buffer_length,
			in_completion_filter,
			recursive, fsp,
			smbd_smb2_notify_reply);
	if (!NT_STATUS_IS_OK(status)) {
		tevent_req_nterror(req, status);
		return tevent_req_post(req, ev);
	}

	/* allow this request to be canceled */
	tevent_req_set_cancel_fn(req, smbd_smb2_notify_cancel);

	return req;
}
예제 #7
0
파일: tevent_threads.c 프로젝트: GSam/samba
struct tevent_thread_proxy *tevent_thread_proxy_create(
		struct tevent_context *dest_ev_ctx)
{
	int ret;
	int pipefds[2];
	struct tevent_thread_proxy *tp;

	tp = talloc_zero(dest_ev_ctx, struct tevent_thread_proxy);
	if (tp == NULL) {
		return NULL;
	}

	ret = pthread_mutex_init(&tp->mutex, NULL);
	if (ret != 0) {
		goto fail;
	}

	tp->dest_ev_ctx = dest_ev_ctx;
	tp->read_fd = -1;
	tp->write_fd = -1;

	talloc_set_destructor(tp, tevent_thread_proxy_destructor);

	ret = pipe(pipefds);
	if (ret == -1) {
		goto fail;
	}

	tp->read_fd = pipefds[0];
	tp->write_fd = pipefds[1];

	ret = ev_set_blocking(pipefds[0], false);
	if (ret != 0) {
		goto fail;
	}
	ret = ev_set_blocking(pipefds[1], false);
	if (ret != 0) {
		goto fail;
	}
	if (!ev_set_close_on_exec(pipefds[0])) {
		goto fail;
	}
	if (!ev_set_close_on_exec(pipefds[1])) {
		goto fail;
	}

	tp->pipe_read_fde = tevent_add_fd(dest_ev_ctx,
				tp,
				tp->read_fd,
				TEVENT_FD_READ,
				pipe_read_handler,
				tp);
	if (tp->pipe_read_fde == NULL) {
		goto fail;
	}

	/*
	 * Create an immediate event to free
	 * completed lists.
	 */
	tp->free_im = tevent_create_immediate(tp);
	if (tp->free_im == NULL) {
		goto fail;
	}

	return tp;

  fail:

	TALLOC_FREE(tp);
	return NULL;
}
예제 #8
0
/*
  startup the dsdb replicator service task
*/
static void dreplsrv_task_init(struct task_server *task)
{
	WERROR status;
	struct dreplsrv_service *service;
	uint32_t periodic_startup_interval;

	switch (lpcfg_server_role(task->lp_ctx)) {
	case ROLE_STANDALONE:
		task_server_terminate(task, "dreplsrv: no DSDB replication required in standalone configuration",
				      false);
		return;
	case ROLE_DOMAIN_MEMBER:
		task_server_terminate(task, "dreplsrv: no DSDB replication required in domain member configuration",
				      false);
		return;
	case ROLE_ACTIVE_DIRECTORY_DC:
		/* Yes, we want DSDB replication */
		break;
	}

	task_server_set_title(task, "task[dreplsrv]");

	service = talloc_zero(task, struct dreplsrv_service);
	if (!service) {
		task_server_terminate(task, "dreplsrv_task_init: out of memory", true);
		return;
	}
	service->task		= task;
	service->startup_time	= timeval_current();
	task->private_data	= service;

	status = dreplsrv_init_creds(service);
	if (!W_ERROR_IS_OK(status)) {
		task_server_terminate(task, talloc_asprintf(task,
				      "dreplsrv: Failed to obtain server credentials: %s\n",
							    win_errstr(status)), true);
		return;
	}

	status = dreplsrv_connect_samdb(service, task->lp_ctx);
	if (!W_ERROR_IS_OK(status)) {
		task_server_terminate(task, talloc_asprintf(task,
				      "dreplsrv: Failed to connect to local samdb: %s\n",
							    win_errstr(status)), true);
		return;
	}

	status = dreplsrv_load_partitions(service);
	if (!W_ERROR_IS_OK(status)) {
		task_server_terminate(task, talloc_asprintf(task,
				      "dreplsrv: Failed to load partitions: %s\n",
							    win_errstr(status)), true);
		return;
	}

	periodic_startup_interval	= lpcfg_parm_int(task->lp_ctx, NULL, "dreplsrv", "periodic_startup_interval", 15); /* in seconds */
	service->periodic.interval	= lpcfg_parm_int(task->lp_ctx, NULL, "dreplsrv", "periodic_interval", 300); /* in seconds */

	status = dreplsrv_periodic_schedule(service, periodic_startup_interval);
	if (!W_ERROR_IS_OK(status)) {
		task_server_terminate(task, talloc_asprintf(task,
				      "dreplsrv: Failed to periodic schedule: %s\n",
							    win_errstr(status)), true);
		return;
	}

	service->pending.im = tevent_create_immediate(service);
	if (service->pending.im == NULL) {
		task_server_terminate(task,
				      "dreplsrv: Failed to create immediate "
				      "task for future DsReplicaSync\n",
				      true);
		return;
	}

	/* if we are a RODC then we do not send DSReplicaSync*/
	if (!service->am_rodc) {
		service->notify.interval = lpcfg_parm_int(task->lp_ctx, NULL, "dreplsrv",
							   "notify_interval", 5); /* in seconds */
		status = dreplsrv_notify_schedule(service, service->notify.interval);
		if (!W_ERROR_IS_OK(status)) {
			task_server_terminate(task, talloc_asprintf(task,
						  "dreplsrv: Failed to setup notify schedule: %s\n",
									win_errstr(status)), true);
			return;
		}
	}

	irpc_add_name(task->msg_ctx, "dreplsrv");

	IRPC_REGISTER(task->msg_ctx, irpc, DREPLSRV_REFRESH, dreplsrv_refresh, service);
	IRPC_REGISTER(task->msg_ctx, drsuapi, DRSUAPI_DSREPLICASYNC, drepl_replica_sync, service);
	IRPC_REGISTER(task->msg_ctx, drsuapi, DRSUAPI_DSREPLICAADD, dreplsrv_replica_add, service);
	IRPC_REGISTER(task->msg_ctx, drsuapi, DRSUAPI_DSREPLICADEL, dreplsrv_replica_del, service);
	IRPC_REGISTER(task->msg_ctx, drsuapi, DRSUAPI_DSREPLICAMOD, dreplsrv_replica_mod, service);
	IRPC_REGISTER(task->msg_ctx, irpc, DREPL_TAKEFSMOROLE, drepl_take_FSMO_role, service);
	IRPC_REGISTER(task->msg_ctx, irpc, DREPL_TRIGGER_REPL_SECRET, drepl_trigger_repl_secret, service);
	imessaging_register(task->msg_ctx, service, MSG_DREPL_ALLOCATE_RID, dreplsrv_allocate_rid);
}