Пример #1
0
static void schedule_immediate_functions(struct tevent_thread_proxy *tp)
{
	struct tevent_immediate_list *im_entry = NULL;
	struct tevent_immediate_list *im_next = NULL;

	for (im_entry = tp->im_list; im_entry; im_entry = im_next) {
		im_next = im_entry->next;
		DLIST_REMOVE(tp->im_list, im_entry);

		tevent_schedule_immediate(im_entry->im,
					tp->dest_ev_ctx,
					im_entry->handler,
					im_entry->private_ptr);

		/* Move from pending list to free list. */
		DLIST_ADD(tp->tofree_im_list, im_entry);
	}
	if (tp->tofree_im_list != NULL) {
		/*
		 * Once the current immediate events
		 * are processed, we need to reshedule
		 * ourselves to free them. This works
		 * as tevent_schedule_immediate()
		 * always adds events to the *END* of
		 * the immediate events list.
		 */
		tevent_schedule_immediate(tp->free_im,
					tp->dest_ev_ctx,
					free_list_handler,
					tp);
	}
}
Пример #2
0
struct tevent_req *tevent_req_post(struct tevent_req *req,
				   struct tevent_context *ev)
{
	tevent_schedule_immediate(req->internal.trigger,
				  ev, tevent_req_trigger, req);
	return req;
}
Пример #3
0
static int tevent_queue_entry_destructor(struct tevent_queue_entry *e)
{
	struct tevent_queue *q = e->queue;

	if (!q) {
		return 0;
	}

	DLIST_REMOVE(q->list, e);
	q->length--;

	if (!q->running) {
		return 0;
	}

	if (!q->list) {
		return 0;
	}

	if (q->list->triggered) {
		return 0;
	}

	tevent_schedule_immediate(q->immediate,
				  q->list->ev,
				  tevent_queue_immediate_trigger,
				  q);

	return 0;
}
Пример #4
0
/*
 * This function is used to process data in queue buffer.
 *
 * Queue callback function can end up freeing the queue, there should not be a
 * loop processing packets from queue buffer.  Instead set up a timed event for
 * immediate run to process remaining packets from buffer.
 */
static void queue_process(struct ctdb_queue *queue)
{
	uint32_t pkt_size;
	uint8_t *data;

	if (queue->buffer.length < sizeof(pkt_size)) {
		return;
	}

	pkt_size = *(uint32_t *)queue->buffer.data;
	if (pkt_size == 0) {
		DEBUG(DEBUG_CRIT, ("Invalid packet of length 0\n"));
		goto failed;
	}

	if (queue->buffer.length < pkt_size) {
		if (pkt_size > QUEUE_BUFFER_SIZE) {
			queue->buffer.extend = pkt_size;
		}
		return;
	}

	/* Extract complete packet */
	data = talloc_size(queue, pkt_size);
	if (data == NULL) {
		DEBUG(DEBUG_ERR, ("read error alloc failed for %u\n", pkt_size));
		return;
	}
	memcpy(data, queue->buffer.data, pkt_size);

	/* Shift packet out from buffer */
	if (queue->buffer.length > pkt_size) {
		memmove(queue->buffer.data,
			queue->buffer.data + pkt_size,
			queue->buffer.length - pkt_size);
	}
	queue->buffer.length -= pkt_size;

	if (queue->buffer.length > 0) {
		/* There is more data to be processed, schedule an event */
		tevent_schedule_immediate(queue->im, queue->ctdb->ev,
					  queue_process_event, queue);
	} else {
		if (queue->buffer.size > QUEUE_BUFFER_SIZE) {
			TALLOC_FREE(queue->buffer.data);
			queue->buffer.size = 0;
		}
	}

	/* It is the responsibility of the callback to free 'data' */
	queue->callback(data, pkt_size, queue->private_data);
	return;

failed:
	queue->callback(NULL, 0, queue->private_data);

}
Пример #5
0
int sss_dbus_conn_send(DBusConnection *dbus_conn,
                       DBusMessage *msg,
                       int timeout_ms,
                       DBusPendingCallNotifyFunction reply_handler,
                       void *pvt,
                       DBusPendingCall **pending)
{
    struct tevent_immediate *imm;

    global_test_ctx->reply_pvt = pvt;
    global_test_ctx->reply_handler = reply_handler;

    imm = tevent_create_immediate(global_test_ctx->stc->ev);
    assert_non_null(imm);
    tevent_schedule_immediate(imm, global_test_ctx->stc->ev, fake_sbus_msg_done, global_test_ctx);

    return EOK;
}
Пример #6
0
/*
  called when an incoming connection is writeable
*/
static void queue_io_write(struct ctdb_queue *queue)
{
	while (queue->out_queue) {
		struct ctdb_queue_pkt *pkt = queue->out_queue;
		ssize_t n;
		if (queue->ctdb->flags & CTDB_FLAG_TORTURE) {
			n = write(queue->fd, pkt->data, 1);
		} else {
			n = write(queue->fd, pkt->data, pkt->length);
		}

		if (n == -1 && errno != EAGAIN && errno != EWOULDBLOCK) {
			if (pkt->length != pkt->full_length) {
				/* partial packet sent - we have to drop it */
				DLIST_REMOVE(queue->out_queue, pkt);
				queue->out_queue_length--;
				talloc_free(pkt);
			}
			talloc_free(queue->fde);
			queue->fde = NULL;
			queue->fd = -1;
			tevent_schedule_immediate(queue->im, queue->ctdb->ev,
						  queue_dead, queue);
			return;
		}
		if (n <= 0) return;
		
		if (n != pkt->length) {
			pkt->length -= n;
			pkt->data += n;
			return;
		}

		DLIST_REMOVE(queue->out_queue, pkt);
		queue->out_queue_length--;
		talloc_free(pkt);
	}

	EVENT_FD_NOT_WRITEABLE(queue->fde);
}
Пример #7
0
void tevent_queue_start(struct tevent_queue *queue)
{
	if (queue->running) {
		/* already started */
		return;
	}

	queue->running = true;

	if (!queue->list) {
		return;
	}

	if (queue->list->triggered) {
		return;
	}

	tevent_schedule_immediate(queue->immediate,
				  queue->list->ev,
				  tevent_queue_immediate_trigger,
				  queue);
}
Пример #8
0
static void smbd_smb2_notify_reply(struct smb_request *smbreq,
				   NTSTATUS error_code,
				   uint8_t *buf, size_t len)
{
	struct tevent_req *req = talloc_get_type_abort(smbreq->async_priv,
						       struct tevent_req);
	struct smbd_smb2_notify_state *state = tevent_req_data(req,
					       struct smbd_smb2_notify_state);

	state->status = error_code;
	if (!NT_STATUS_IS_OK(error_code)) {
		/* nothing */
	} else if (len == 0) {
		state->status = STATUS_NOTIFY_ENUM_DIR;
	} else {
		state->out_output_buffer = data_blob_talloc(state, buf, len);
		if (state->out_output_buffer.data == NULL) {
			state->status = NT_STATUS_NO_MEMORY;
		}
	}

	if (state->im == NULL) {
		smbd_smb2_notify_reply_trigger(NULL, NULL, req);
		return;
	}

	/*
	 * if this is called async, we need to go via an immediate event
	 * because the caller replies on the smb_request (a child of req
	 * being arround after calling this function
	 */
	tevent_schedule_immediate(state->im,
				  state->smb2req->sconn->smb2.event_ctx,
				  smbd_smb2_notify_reply_trigger,
				  req);
}
Пример #9
0
/*
  queue a packet for sending
*/
int ctdb_queue_send(struct ctdb_queue *queue, uint8_t *data, uint32_t length)
{
	struct ctdb_queue_pkt *pkt;
	uint32_t length2, full_length;

	if (queue->alignment) {
		/* enforce the length and alignment rules from the tcp packet allocator */
		length2 = (length+(queue->alignment-1)) & ~(queue->alignment-1);
		*(uint32_t *)data = length2;
	} else {
		length2 = length;
	}

	if (length2 != length) {
		memset(data+length, 0, length2-length);
	}

	full_length = length2;
	
	/* if the queue is empty then try an immediate write, avoiding
	   queue overhead. This relies on non-blocking sockets */
	if (queue->out_queue == NULL && queue->fd != -1 &&
	    !(queue->ctdb->flags & CTDB_FLAG_TORTURE)) {
		ssize_t n = write(queue->fd, data, length2);
		if (n == -1 && errno != EAGAIN && errno != EWOULDBLOCK) {
			talloc_free(queue->fde);
			queue->fde = NULL;
			queue->fd = -1;
			tevent_schedule_immediate(queue->im, queue->ctdb->ev,
						  queue_dead, queue);
			/* yes, we report success, as the dead node is 
			   handled via a separate event */
			return 0;
		}
		if (n > 0) {
			data += n;
			length2 -= n;
		}
		if (length2 == 0) return 0;
	}

	pkt = talloc(queue, struct ctdb_queue_pkt);
	CTDB_NO_MEMORY(queue->ctdb, pkt);

	pkt->data = talloc_memdup(pkt, data, length2);
	CTDB_NO_MEMORY(queue->ctdb, pkt->data);

	pkt->length = length2;
	pkt->full_length = full_length;

	if (queue->out_queue == NULL && queue->fd != -1) {
		EVENT_FD_WRITEABLE(queue->fde);
	}

	DLIST_ADD_END(queue->out_queue, pkt, NULL);

	queue->out_queue_length++;

	if (queue->ctdb->tunable.verbose_memory_names != 0) {
		struct ctdb_req_header *hdr = (struct ctdb_req_header *)pkt->data;
		switch (hdr->operation) {
		case CTDB_REQ_CONTROL: {
			struct ctdb_req_control *c = (struct ctdb_req_control *)hdr;
			talloc_set_name(pkt, "ctdb_queue_pkt: %s control opcode=%u srvid=%llu datalen=%u",
					queue->name, (unsigned)c->opcode, (unsigned long long)c->srvid, (unsigned)c->datalen);
			break;
		}
		case CTDB_REQ_MESSAGE: {
			struct ctdb_req_message *m = (struct ctdb_req_message *)hdr;
			talloc_set_name(pkt, "ctdb_queue_pkt: %s message srvid=%llu datalen=%u",
					queue->name, (unsigned long long)m->srvid, (unsigned)m->datalen);
			break;
		}
		default:
			talloc_set_name(pkt, "ctdb_queue_pkt: %s operation=%u length=%u src=%u dest=%u",
					queue->name, (unsigned)hdr->operation, (unsigned)hdr->length,
					(unsigned)hdr->srcnode, (unsigned)hdr->destnode);
			break;
		}
	}

	return 0;
}
Пример #10
0
static struct tevent_queue_entry *tevent_queue_add_internal(
					struct tevent_queue *queue,
					struct tevent_context *ev,
					struct tevent_req *req,
					tevent_queue_trigger_fn_t trigger,
					void *private_data,
					bool allow_direct)
{
	struct tevent_queue_entry *e;

	e = talloc_zero(req, struct tevent_queue_entry);
	if (e == NULL) {
		return NULL;
	}

	e->queue = queue;
	e->req = req;
	e->ev = ev;
	e->trigger = trigger;
	e->private_data = private_data;

	/*
	 * if there is no trigger, it is just a blocker
	 */
	if (trigger == NULL) {
		e->triggered = true;
	}

	if (queue->length > 0) {
		/*
		 * if there are already entries in the
		 * queue do not optimize.
		 */
		allow_direct = false;
	}

	if (req->async.fn != NULL) {
		/*
		 * If the caller wants to optimize for the
		 * empty queue case, call the trigger only
		 * if there is no callback defined for the
		 * request yet.
		 */
		allow_direct = false;
	}

	DLIST_ADD_END(queue->list, e);
	queue->length++;
	talloc_set_destructor(e, tevent_queue_entry_destructor);

	if (!queue->running) {
		return e;
	}

	if (queue->list->triggered) {
		return e;
	}

	/*
	 * If allowed we directly call the trigger
	 * avoiding possible delays caused by
	 * an immediate event.
	 */
	if (allow_direct) {
		queue->list->triggered = true;
		queue->list->trigger(queue->list->req,
				     queue->list->private_data);
		return e;
	}

	tevent_schedule_immediate(queue->immediate,
				  queue->list->ev,
				  tevent_queue_immediate_trigger,
				  queue);

	return e;
}