Exemplo n.º 1
0
static void ctdb_run_startup(struct event_context *ev, struct timed_event *te,
			     struct timeval t, void *private_data)
{
	struct ctdb_context *ctdb = talloc_get_type(private_data,
						    struct ctdb_context);
	int ret;

	/* This is necessary to avoid the "startup" event colliding
	 * with the "ipreallocated" event from the takeover run
	 * following the first recovery.  We might as well serialise
	 * these things if we can.
	 */
	if (ctdb->runstate < CTDB_RUNSTATE_STARTUP) {
		DEBUG(DEBUG_NOTICE,
		      ("Not yet in startup runstate. Wait one more second\n"));
		event_add_timed(ctdb->ev, ctdb->monitor->monitor_context,
				timeval_current_ofs(1, 0),
				ctdb_run_startup, ctdb);
		return;
	}

	DEBUG(DEBUG_NOTICE,("Running the \"startup\" event.\n"));
	ret = ctdb_event_script_callback(ctdb,
					 ctdb->monitor->monitor_context,
					 ctdb_startup_callback,
					 ctdb, CTDB_EVENT_STARTUP, "%s", "");

	if (ret != 0) {
		DEBUG(DEBUG_ERR,("Unable to launch startup event script\n"));
		event_add_timed(ctdb->ev, ctdb->monitor->monitor_context,
				timeval_current_ofs(5, 0),
				ctdb_run_startup, ctdb);
	}
}
Exemplo n.º 2
0
/*
  see if the event scripts think we are healthy
 */
static void ctdb_check_health(struct event_context *ev, struct timed_event *te, 
			      struct timeval t, void *private_data)
{
	struct ctdb_context *ctdb = talloc_get_type(private_data, struct ctdb_context);
	int ret = 0;

	if (ctdb->recovery_mode != CTDB_RECOVERY_NORMAL ||
	    (ctdb->monitor->monitoring_mode == CTDB_MONITORING_DISABLED && ctdb->done_startup)) {
		event_add_timed(ctdb->ev, ctdb->monitor->monitor_context,
				timeval_current_ofs(ctdb->monitor->next_interval, 0), 
				ctdb_check_health, ctdb);
		return;
	}
	
	if (!ctdb->done_startup) {
		ret = ctdb_event_script_callback(ctdb, 
						 ctdb->monitor->monitor_context, ctdb_startup_callback, 
						 ctdb, false,
						 CTDB_EVENT_STARTUP, "%s", "");
	} else {
		int i;
		int skip_monitoring = 0;
		
		if (ctdb->recovery_mode != CTDB_RECOVERY_NORMAL) {
			skip_monitoring = 1;
			DEBUG(DEBUG_ERR,("Skip monitoring during recovery\n"));
		}
		for (i=1; i<=NUM_DB_PRIORITIES; i++) {
			if (ctdb->freeze_handles[i] != NULL) {
				DEBUG(DEBUG_ERR,("Skip monitoring since databases are frozen\n"));
				skip_monitoring = 1;
				break;
			}
		}
		if (skip_monitoring != 0) {
			event_add_timed(ctdb->ev, ctdb->monitor->monitor_context,
					timeval_current_ofs(ctdb->monitor->next_interval, 0), 
					ctdb_check_health, ctdb);
			return;
		} else {
			ret = ctdb_event_script_callback(ctdb, 
					ctdb->monitor->monitor_context, ctdb_health_callback,
					ctdb, false,
					CTDB_EVENT_MONITOR, "%s", "");
		}
	}

	if (ret != 0) {
		DEBUG(DEBUG_ERR,("Unable to launch monitor event script\n"));
		ctdb->monitor->next_interval = 5;
		event_add_timed(ctdb->ev, ctdb->monitor->monitor_context, 
			timeval_current_ofs(5, 0), 
			ctdb_check_health, ctdb);
	}
}
Exemplo n.º 3
0
static void report_rate(struct tevent_context *ev, struct tevent_timer *te, 
			struct timeval t, void *private_data)
{
	struct benchopen_state *state = talloc_get_type(private_data, 
							struct benchopen_state);
	int i;
	for (i=0;i<nprocs;i++) {
		printf("%5u ", (unsigned)(state[i].count - state[i].lastcount));
		state[i].lastcount = state[i].count;
	}
	printf("\r");
	fflush(stdout);
	report_te = event_add_timed(ev, state, timeval_current_ofs(1, 0), 
				    report_rate, state);

	/* send an echo on each interface to ensure it stays alive - this helps
	   with IP takeover */
	for (i=0;i<nprocs;i++) {
		struct smb_echo p;
		struct smbcli_request *req;

		if (!state[i].tree) {
			continue;
		}

		p.in.repeat_count = 1;
		p.in.size = 0;
		p.in.data = NULL;
		req = smb_raw_echo_send(state[i].tree->session->transport, &p);
		req->async.private_data = &state[i];
		req->async.fn      = echo_completion;
	}
}
Exemplo n.º 4
0
/*
  complete an async reconnect
 */
static void reopen_connection_complete(struct composite_context *ctx)
{
	struct benchopen_state *state = (struct benchopen_state *)ctx->async.private_data;
	NTSTATUS status;
	struct smb_composite_connect *io = &state->reconnect;

	status = smb_composite_connect_recv(ctx, state->mem_ctx);
	if (!NT_STATUS_IS_OK(status)) {
		talloc_free(state->te);
		state->te = event_add_timed(state->ev, state->mem_ctx, 
					    timeval_current_ofs(1,0), 
					    reopen_connection, state);
		return;
	}

	state->tree = io->out.tree;

	num_connected++;

	DEBUG(0,("[%u] reconnect to %s finished (%u connected)\n",
		 state->client_num, state->dest_host, num_connected));

	state->open_fnum = -1;
	state->close_fnum = -1;
	next_open(state);
}
Exemplo n.º 5
0
/*
  construct an event driven local ctdb_call

  this is used so that locally processed ctdb_call requests are processed
  in an event driven manner
*/
struct ctdb_call_state *ctdb_call_local_send(struct ctdb_db_context *ctdb_db, 
					     struct ctdb_call *call,
					     struct ctdb_ltdb_header *header,
					     TDB_DATA *data)
{
	struct ctdb_call_state *state;
	struct ctdb_context *ctdb = ctdb_db->ctdb;
	int ret;

	state = talloc_zero(ctdb_db, struct ctdb_call_state);
	CTDB_NO_MEMORY_NULL(ctdb, state);

	talloc_steal(state, data->dptr);

	state->state = CTDB_CALL_DONE;
	state->node = ctdb->nodes[ctdb->vnn];
	state->call = *call;
	state->ctdb_db = ctdb_db;

	ret = ctdb_call_local(ctdb_db, &state->call, header, data, ctdb->vnn);
	talloc_steal(state, state->call.reply_data.dptr);

	event_add_timed(ctdb->ev, state, timeval_zero(), call_local_trigger, state);

	return state;
}
Exemplo n.º 6
0
/*
  establish a new connection to the web server
*/
static void websrv_accept(struct stream_connection *conn)
{
    struct web_server_data *wdata = talloc_get_type(conn->private_data, struct web_server_data);
    struct websrv_context *web;
    struct socket_context *tls_socket;

    web = talloc_zero(conn, struct websrv_context);
    if (web == NULL) goto failed;

    web->task = wdata->task;
    web->conn = conn;
    conn->private_data = web;
    talloc_set_destructor(web, websrv_destructor);

    event_add_timed(conn->event.ctx, web,
                    timeval_current_ofs(HTTP_TIMEOUT, 0),
                    websrv_timeout, web);

    /* Overwrite the socket with a (possibly) TLS socket */
    tls_socket = tls_init_server(wdata->tls_params, conn->socket,
                                 conn->event.fde, "GPHO");
    /* We might not have TLS, or it might not have initilised */
    if (tls_socket) {
        talloc_unlink(conn, conn->socket);
        talloc_steal(conn, tls_socket);
        conn->socket = tls_socket;
    } else {
        DEBUG(3, ("TLS not available for web_server connections\n"));
    }

    return;

failed:
    talloc_free(conn);
}
Exemplo n.º 7
0
/*
  handle a socket write event
*/
static void messaging_send_handler(struct messaging_context *msg)
{
	while (msg->pending) {
		struct messaging_rec *rec = msg->pending;
		NTSTATUS status;
		status = try_send(rec);
		if (NT_STATUS_EQUAL(status, STATUS_MORE_ENTRIES)) {
			rec->retries++;
			if (rec->retries > 3) {
				/* we're getting continuous write errors -
				   backoff this record */
				DLIST_REMOVE(msg->pending, rec);
				DLIST_ADD_END(msg->retry_queue, rec, 
					      struct messaging_rec *);
				if (msg->retry_te == NULL) {
					msg->retry_te = 
						event_add_timed(msg->event.ev, msg, 
								timeval_current_ofs(1, 0), 
								msg_retry_timer, msg);
				}
			}
			break;
		}
		rec->retries = 0;
		if (!NT_STATUS_IS_OK(status)) {
			DEBUG(1,("messaging: Lost message from %s to %s of type %u - %s\n", 
				 cluster_id_string(debug_ctx(), rec->header->from), 
				 cluster_id_string(debug_ctx(), rec->header->to), 
				 rec->header->msg_type, 
				 nt_errstr(status)));
		}
		DLIST_REMOVE(msg->pending, rec);
		talloc_free(rec);
	}
Exemplo n.º 8
0
/*
  perform the send side of a async dcerpc request
*/
static struct rpc_request *dcerpc_request_send(struct dcerpc_pipe *p, 
					       const struct GUID *object,
					       uint16_t opnum,
					       bool async,
					       DATA_BLOB *stub_data)
{
	struct rpc_request *req;

	p->conn->transport.recv_data = dcerpc_recv_data;

	req = talloc(p, struct rpc_request);
	if (req == NULL) {
		return NULL;
	}

	req->p = p;
	req->call_id = next_call_id(p->conn);
	req->status = NT_STATUS_OK;
	req->state = RPC_REQUEST_QUEUED;
	req->payload = data_blob(NULL, 0);
	req->flags = 0;
	req->fault_code = 0;
	req->async_call = async;
	req->ignore_timeout = false;
	req->async.callback = NULL;
	req->async.private_data = NULL;
	req->recv_handler = NULL;

	if (object != NULL) {
		req->object = (struct GUID *)talloc_memdup(req, (const void *)object, sizeof(*object));
		if (req->object == NULL) {
			talloc_free(req);
			return NULL;
		}
	} else {
		req->object = NULL;
	}

	req->opnum = opnum;
	req->request_data.length = stub_data->length;
	req->request_data.data = talloc_reference(req, stub_data->data);
	if (req->request_data.length && req->request_data.data == NULL) {
		return NULL;
	}

	DLIST_ADD_END(p->conn->request_queue, req, struct rpc_request *);
	talloc_set_destructor(req, dcerpc_req_dequeue);

	dcerpc_ship_next_request(p->conn);

	if (p->request_timeout) {
		event_add_timed(dcerpc_event_context(p), req, 
				timeval_current_ofs(p->request_timeout, 0), 
				dcerpc_timeout_handler, req);
	}

	return req;
}
Exemplo n.º 9
0
static void each_second(struct event_context *ev, struct timed_event *te,
					 struct timeval t, void *private_data)
{
	struct ctdb_context *ctdb = talloc_get_type(private_data, struct ctdb_context);

	print_counters();

	event_add_timed(ev, ctdb, timeval_current_ofs(1, 0), each_second, ctdb);
}
Exemplo n.º 10
0
int32_t ctdb_control_set_ban_state(struct ctdb_context *ctdb, TDB_DATA indata)
{
	struct ctdb_ban_time *bantime = (struct ctdb_ban_time *)indata.dptr;

	DEBUG(DEBUG_INFO,("SET BAN STATE\n"));

	if (bantime->pnn != ctdb->pnn) {
		if (bantime->pnn < 0 || bantime->pnn >= ctdb->num_nodes) {
			DEBUG(DEBUG_ERR,(__location__ " ERROR: Invalid ban request. PNN:%d is invalid. Max nodes %d\n", bantime->pnn, ctdb->num_nodes));
			return -1;
		}
		if (bantime->time == 0) {
			DEBUG(DEBUG_INFO,("unbanning node %d\n", bantime->pnn));
			ctdb->nodes[bantime->pnn]->flags &= ~NODE_FLAGS_BANNED;
		} else {
			DEBUG(DEBUG_INFO,("banning node %d\n", bantime->pnn));
			if (ctdb->tunable.enable_bans == 0) {
				DEBUG(DEBUG_INFO,("Bans are disabled - ignoring ban of node %u\n", bantime->pnn));
				return 0;
			}

			ctdb->nodes[bantime->pnn]->flags |= NODE_FLAGS_BANNED;
		}
		return 0;
	}

	if (ctdb->banning_ctx != NULL) {
		talloc_free(ctdb->banning_ctx);
		ctdb->banning_ctx = NULL;
	}

	if (bantime->time == 0) {
		DEBUG(DEBUG_ERR,("Unbanning this node\n"));
		ctdb->nodes[bantime->pnn]->flags &= ~NODE_FLAGS_BANNED;
		return 0;
	}

	if (ctdb->tunable.enable_bans == 0) {
		DEBUG(DEBUG_ERR,("Bans are disabled - ignoring ban of node %u\n", bantime->pnn));
		return 0;
	}

	ctdb->banning_ctx = talloc(ctdb, struct ctdb_ban_time);
	if (ctdb->banning_ctx == NULL) {
		DEBUG(DEBUG_CRIT,(__location__ " ERROR Failed to allocate new banning state\n"));
		return -1;
	}
	*((struct ctdb_ban_time *)(ctdb->banning_ctx)) = *bantime;


	DEBUG(DEBUG_ERR,("Banning this node for %d seconds\n", bantime->time));
	ctdb->nodes[bantime->pnn]->flags |= NODE_FLAGS_BANNED;

	event_add_timed(ctdb->ev, ctdb->banning_ctx, timeval_current_ofs(bantime->time,0), ctdb_ban_node_event, ctdb);
	
	return 0;
}
Exemplo n.º 11
0
/*
  called when a wins name register has completed
*/
static void nbtd_wins_register_handler(struct composite_context *c)
{
	NTSTATUS status;
	struct nbt_name_register_wins io;
	struct nbtd_iface_name *iname = talloc_get_type(c->async.private_data, 
							struct nbtd_iface_name);
	TALLOC_CTX *tmp_ctx = talloc_new(iname);

	status = nbt_name_register_wins_recv(c, tmp_ctx, &io);
	if (NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) {
		/* none of the WINS servers responded - try again 
		   periodically */
		int wins_retry_time = lp_parm_int(iname->iface->nbtsrv->task->lp_ctx, NULL, "nbtd", "wins_retry", 300);
		event_add_timed(iname->iface->nbtsrv->task->event_ctx, 
				iname,
				timeval_current_ofs(wins_retry_time, 0),
				nbtd_wins_register_retry,
				iname);
		talloc_free(tmp_ctx);
		return;
	}

	if (!NT_STATUS_IS_OK(status)) {
		DEBUG(1,("Name register failure with WINS for %s - %s\n", 
			 nbt_name_string(tmp_ctx, &iname->name), nt_errstr(status)));
		talloc_free(tmp_ctx);
		return;
	}	

	if (io.out.rcode != 0) {
		DEBUG(1,("WINS server %s rejected name register of %s - %s\n", 
			 io.out.wins_server, nbt_name_string(tmp_ctx, &iname->name), 
			 nt_errstr(nbt_rcode_to_ntstatus(io.out.rcode))));
		iname->nb_flags |= NBT_NM_CONFLICT;
		talloc_free(tmp_ctx);
		return;
	}	

	/* success - start a periodic name refresh */
	iname->nb_flags |= NBT_NM_ACTIVE;
	if (iname->wins_server) {
		/*
		 * talloc_free() would generate a warning,
		 * so steal it into the tmp context
		 */
		talloc_steal(tmp_ctx, iname->wins_server);
	}
	iname->wins_server = talloc_steal(iname, io.out.wins_server);

	iname->registration_time = timeval_current();
	nbtd_wins_start_refresh_timer(iname);

	DEBUG(3,("Registered %s with WINS server %s\n",
		 nbt_name_string(tmp_ctx, &iname->name), iname->wins_server));

	talloc_free(tmp_ctx);
}
Exemplo n.º 12
0
/*
  setup a timer to destroy a open search after a inactivity period
*/
static void pvfs_search_setup_timer(struct pvfs_search_state *search)
{
	struct tevent_context *ev = search->pvfs->ntvfs->ctx->event_ctx;
	if (search->handle == INVALID_SEARCH_HANDLE) return;
	talloc_free(search->te);
	search->te = event_add_timed(ev, search, 
				     timeval_current_ofs(search->pvfs->search.inactivity_time, 0), 
				     pvfs_search_timer, search);
}
Exemplo n.º 13
0
static void add_krb5_ticket_gain_handler_event(struct WINBINDD_CCACHE_ENTRY *entry,
				     struct timeval t)
{
	entry->refresh_time = 0;
	entry->event = event_add_timed(winbind_event_context(),
				       entry,
				       t,
				       krb5_ticket_gain_handler,
				       entry);
}
Exemplo n.º 14
0
static bool recalc_brl_timeout(void)
{
	struct blocking_lock_record *blr;
	struct timeval next_timeout;

	TALLOC_FREE(brl_timeout);

	next_timeout = timeval_zero();	

	for (blr = blocking_lock_queue; blr; blr = blr->next) {
		if (timeval_is_zero(&blr->expire_time)) {
			/*
			 * If we're blocked on pid 0xFFFFFFFF this is
			 * a POSIX lock, so calculate a timeout of
			 * 10 seconds into the future.
			 */
                        if (blr->blocking_pid == 0xFFFFFFFF) {
				struct timeval psx_to = timeval_current_ofs(10, 0);
				next_timeout = timeval_min(&next_timeout, &psx_to);
                        }

			continue;
		}

		if (timeval_is_zero(&next_timeout)) {
			next_timeout = blr->expire_time;
		}
		else {
			next_timeout = timeval_min(&next_timeout,
						   &blr->expire_time);
		}
	}

	if (timeval_is_zero(&next_timeout)) {
		DEBUG(10, ("Next timeout = Infinite.\n"));
		return True;
	}

	if (DEBUGLVL(10)) {
		struct timeval cur, from_now;

		cur = timeval_current();
		from_now = timeval_until(&cur, &next_timeout);
		DEBUG(10, ("Next timeout = %d.%d seconds from now.\n",
		    (int)from_now.tv_sec, (int)from_now.tv_usec));
	}

	if (!(brl_timeout = event_add_timed(smbd_event_context(), NULL,
					    next_timeout,
					    brl_timeout_fn, NULL))) {
		return False;
	}

	return True;
}
Exemplo n.º 15
0
static void report_rate(struct tevent_context *ev, struct tevent_timer *te, 
			struct timeval t, void *private_data)
{
	struct offline_state *state = talloc_get_type(private_data, 
							struct offline_state);
	int i;
	uint32_t total=0, total_offline=0, total_online=0;
	for (i=0;i<numstates;i++) {
		total += state[i].count - state[i].lastcount;
		if (timeval_elapsed(&state[i].tv_start) > latencies[state[i].op]) {
			latencies[state[i].op] = timeval_elapsed(&state[i].tv_start);
		}
		state[i].lastcount = state[i].count;		
		total_online += state[i].online_count;
		total_offline += state[i].offline_count;
	}
	printf("ops/s=%4u  offline=%5u online=%4u  set_lat=%.1f/%.1f get_lat=%.1f/%.1f save_lat=%.1f/%.1f load_lat=%.1f/%.1f\n",
	       total, total_offline, total_online,
	       latencies[OP_SETOFFLINE],
	       worst_latencies[OP_SETOFFLINE],
	       latencies[OP_GETOFFLINE],
	       worst_latencies[OP_GETOFFLINE],
	       latencies[OP_SAVEFILE],
	       worst_latencies[OP_SAVEFILE],
	       latencies[OP_LOADFILE],
	       worst_latencies[OP_LOADFILE]);
	fflush(stdout);
	event_add_timed(ev, state, timeval_current_ofs(1, 0), report_rate, state);

	for (i=0;i<OP_ENDOFLIST;i++) {
		if (latencies[i] > worst_latencies[i]) {
			worst_latencies[i] = latencies[i];
		}
		latencies[i] = 0;
	}

	/* send an echo on each interface to ensure it stays alive - this helps
	   with IP takeover */
	for (i=0;i<numstates;i++) {
		struct smb_echo p;
		struct smbcli_request *req;

		if (!state[i].tree) {
			continue;
		}

		p.in.repeat_count = 1;
		p.in.size = 0;
		p.in.data = NULL;
		req = smb_raw_echo_send(state[i].tree->session->transport, &p);
		req->async.private_data = &state[i];
		req->async.fn      = echo_completion;
	}
}
Exemplo n.º 16
0
/*
  called when the startup event script finishes
 */
static void ctdb_startup_callback(struct ctdb_context *ctdb, int status, void *p)
{
	if (status != 0) {
		DEBUG(DEBUG_ERR,("startup event failed\n"));
		event_add_timed(ctdb->ev, ctdb->monitor->monitor_context,
				timeval_current_ofs(5, 0),
				ctdb_run_startup, ctdb);
		return;
	}

	DEBUG(DEBUG_NOTICE,("startup event OK - enabling monitoring\n"));
	ctdb_set_runstate(ctdb, CTDB_RUNSTATE_RUNNING);
	ctdb->monitor->next_interval = 2;
	ctdb_run_notification_script(ctdb, "startup");

	ctdb->monitor->monitoring_mode = CTDB_MONITORING_ACTIVE;

	event_add_timed(ctdb->ev, ctdb->monitor->monitor_context,
			timeval_current_ofs(ctdb->monitor->next_interval, 0),
			ctdb_check_health, ctdb);
}
Exemplo n.º 17
0
/*
  start a timer to refresh this name
*/
static void nbtd_wins_start_refresh_timer(struct nbtd_iface_name *iname)
{
	uint32_t refresh_time;
	uint32_t max_refresh_time = lp_parm_int(iname->iface->nbtsrv->task->lp_ctx, NULL, "nbtd", "max_refresh_time", 7200);

	refresh_time = MIN(max_refresh_time, iname->ttl/2);
	
	event_add_timed(iname->iface->nbtsrv->task->event_ctx, 
			iname, 
			timeval_add(&iname->registration_time, refresh_time, 0),
			nbtd_wins_refresh, iname);
}
Exemplo n.º 18
0
static void idle_handler(struct tevent_context *ev, 
			 struct tevent_timer *te, struct timeval t, void *private_data)
{
	struct smbcli_transport *transport = talloc_get_type(private_data,
							     struct smbcli_transport);
	struct timeval next = timeval_add(&t, 0, transport->idle.period);
	transport->socket->event.te = event_add_timed(transport->socket->event.ctx, 
						      transport,
						      next,
						      idle_handler, transport);
	transport->idle.func(transport, transport->idle.private_data);
}
Exemplo n.º 19
0
/*
  complete an async reconnect
 */
static void reopen_connection_complete(struct composite_context *ctx)
{
	struct benchlock_state *state = (struct benchlock_state *)ctx->async.private_data;
	NTSTATUS status;
	struct smb_composite_connect *io = &state->reconnect;

	status = smb_composite_connect_recv(ctx, state->mem_ctx);
	if (!NT_STATUS_IS_OK(status)) {
		talloc_free(state->te);
		state->te = event_add_timed(state->ev, state->mem_ctx, 
					    timeval_current_ofs(1,0), 
					    reopen_connection, state);
		return;
	}

	talloc_free(state->tree);
	state->tree = io->out.tree;

	/* do the reopen as a separate event */
	event_add_timed(state->ev, state->mem_ctx, timeval_zero(), reopen_file, state);
}
Exemplo n.º 20
0
static void ctdb_time_tick(struct event_context *ev, struct timed_event *te, 
				  struct timeval t, void *private_data)
{
	struct ctdb_context *ctdb = talloc_get_type(private_data, struct ctdb_context);

	if (getpid() != ctdb->ctdbd_pid) {
		return;
	}

	event_add_timed(ctdb->ev, ctdb, 
			timeval_current_ofs(1, 0), 
			ctdb_time_tick, ctdb);
}
Exemplo n.º 21
0
/*
  start watching for nodes that might be dead
 */
void ctdb_wait_for_first_recovery(struct ctdb_context *ctdb)
{
	ctdb_set_runstate(ctdb, CTDB_RUNSTATE_FIRST_RECOVERY);

	ctdb->monitor = talloc(ctdb, struct ctdb_monitor_state);
	CTDB_NO_MEMORY_FATAL(ctdb, ctdb->monitor);

	ctdb->monitor->monitor_context = talloc_new(ctdb->monitor);
	CTDB_NO_MEMORY_FATAL(ctdb, ctdb->monitor->monitor_context);

	event_add_timed(ctdb->ev, ctdb->monitor->monitor_context,
			timeval_current_ofs(1, 0),
			ctdb_wait_until_recovered, ctdb);
}
Exemplo n.º 22
0
/*
  make a remote ctdb call - async send. Called in daemon context.

  This constructs a ctdb_call request and queues it for processing. 
  This call never blocks.
*/
struct ctdb_call_state *ctdb_daemon_call_send_remote(struct ctdb_db_context *ctdb_db, 
						     struct ctdb_call *call, 
						     struct ctdb_ltdb_header *header)
{
	uint32_t len;
	struct ctdb_call_state *state;
	struct ctdb_context *ctdb = ctdb_db->ctdb;

	state = talloc_zero(ctdb_db, struct ctdb_call_state);
	CTDB_NO_MEMORY_NULL(ctdb, state);

	len = offsetof(struct ctdb_req_call, data) + call->key.dsize + call->call_data.dsize;
	state->c = ctdb->methods->allocate_pkt(state, len);
	CTDB_NO_MEMORY_NULL(ctdb, state->c);
	talloc_set_name_const(state->c, "req_call packet");

	state->c->hdr.length    = len;
	state->c->hdr.ctdb_magic = CTDB_MAGIC;
	state->c->hdr.ctdb_version = CTDB_VERSION;
	state->c->hdr.operation = CTDB_REQ_CALL;
	state->c->hdr.destnode  = header->dmaster;
	state->c->hdr.srcnode   = ctdb->vnn;
	/* this limits us to 16k outstanding messages - not unreasonable */
	state->c->hdr.reqid     = idr_get_new(ctdb->idr, state, 0xFFFF);
	state->c->flags         = call->flags;
	state->c->db_id         = ctdb_db->db_id;
	state->c->callid        = call->call_id;
	state->c->keylen        = call->key.dsize;
	state->c->calldatalen   = call->call_data.dsize;
	memcpy(&state->c->data[0], call->key.dptr, call->key.dsize);
	memcpy(&state->c->data[call->key.dsize], 
	       call->call_data.dptr, call->call_data.dsize);
	state->call                = *call;
	state->call.call_data.dptr = &state->c->data[call->key.dsize];
	state->call.key.dptr       = &state->c->data[0];

	state->node   = ctdb->nodes[header->dmaster];
	state->state  = CTDB_CALL_WAIT;
	state->header = *header;
	state->ctdb_db = ctdb_db;

	talloc_set_destructor(state, ctdb_call_destructor);

	ctdb_queue_packet(ctdb, &state->c->hdr);

	event_add_timed(ctdb->ev, state, timeval_current_ofs(CTDB_REQ_TIMEOUT, 0), 
			ctdb_call_timeout, state);
	return state;
}
Exemplo n.º 23
0
/*
  called when a complete packet has come in - should not happen on this socket
  unless the other side closes the connection with RST or FIN
 */
void ctdb_tcp_tnode_cb(uint8_t *data, size_t cnt, void *private_data)
{
	struct ctdb_node *node = talloc_get_type(private_data, struct ctdb_node);
	struct ctdb_tcp_node *tnode = talloc_get_type(
		node->private_data, struct ctdb_tcp_node);

	if (data == NULL) {
		node->ctdb->upcalls->node_dead(node);
	}

	ctdb_tcp_stop_connection(node);
	tnode->connect_te = event_add_timed(node->ctdb->ev, tnode,
					    timeval_current_ofs(3, 0),
					    ctdb_tcp_node_connect, node);
}
Exemplo n.º 24
0
void ccache_regain_all_now(void)
{
	struct WINBINDD_CCACHE_ENTRY *cur;
	struct timeval t = timeval_current();

	for (cur = ccache_list; cur; cur = cur->next) {
		struct timed_event *new_event;

		/*
		 * if refresh_time is 0, we know that the
		 * the event has the krb5_ticket_gain_handler
		 */
		if (cur->refresh_time == 0) {
			new_event = event_add_timed(winbind_event_context(),
						    cur,
						    t,
						    krb5_ticket_gain_handler,
						    cur);
		} else {
			new_event = event_add_timed(winbind_event_context(),
						    cur,
						    t,
						    krb5_ticket_refresh_handler,
						    cur);
		}

		if (!new_event) {
			continue;
		}

		TALLOC_FREE(cur->event);
		cur->event = new_event;
	}

	return;
}
Exemplo n.º 25
0
/*
  called when the startup event script finishes
 */
static void ctdb_startup_callback(struct ctdb_context *ctdb, int status, void *p)
{
	if (status != 0) {
		DEBUG(DEBUG_ERR,("startup event failed\n"));
	} else if (status == 0) {
		DEBUG(DEBUG_NOTICE,("startup event OK - enabling monitoring\n"));
		ctdb->done_startup = true;
		ctdb->monitor->next_interval = 5;
		ctdb_run_notification_script(ctdb, "startup");
	}

	event_add_timed(ctdb->ev, ctdb->monitor->monitor_context, 
			timeval_current_ofs(ctdb->monitor->next_interval, 0),
			ctdb_check_health, ctdb);
}
static void schedule_dns_register_smbd_retry(struct dns_reg_state *dns_state,
		struct timeval *timeout)
{
	struct timed_event * event;

	dns_state->srv_ref = NULL;
	event= event_add_timed(smbd_event_context(),
			NULL,
			timeval_current_ofs(DNS_REG_RETRY_INTERVAL, 0),
			dns_register_smbd_retry,
			dns_state);

	dns_state->retry_handler = event;
	get_timed_events_timeout(smbd_event_context(), timeout);
}
Exemplo n.º 27
0
static void remove_child_pid(struct tevent_context *ev_ctx,
			     pid_t pid,
			     bool unclean_shutdown)
{
	struct child_pid *child;
	static struct timed_event *cleanup_te;
	struct server_id child_id;

	if (unclean_shutdown) {
		/* a child terminated uncleanly so tickle all
		   processes to see if they can grab any of the
		   pending locks
                */
		DEBUG(3,(__location__ " Unclean shutdown of pid %u\n",
			(unsigned int)pid));
		if (!cleanup_te) {
			/* call the cleanup timer, but not too often */
			int cleanup_time = lp_parm_int(-1, "smbd", "cleanuptime", 20);
			cleanup_te = event_add_timed(ev_ctx, NULL,
						timeval_current_ofs(cleanup_time, 0),
						cleanup_timeout_fn,
						&cleanup_te);
			DEBUG(1,("Scheduled cleanup of brl and lock database after unclean shutdown\n"));
		}
	}

	child_id = procid_self(); /* Just initialize pid and potentially vnn */
	child_id.pid = pid;

	if (!serverid_deregister(child_id)) {
		DEBUG(1, ("Could not remove pid %d from serverid.tdb\n",
			  (int)pid));
	}

	for (child = children; child != NULL; child = child->next) {
		if (child->pid == pid) {
			struct child_pid *tmp = child;
			DLIST_REMOVE(children, child);
			SAFE_FREE(tmp);
			num_children -= 1;
			return;
		}
	}

	/* not all forked child processes are added to the children list */
	DEBUG(1, ("Could not find child %d -- ignoring\n", (int)pid));
}
Exemplo n.º 28
0
static void echo_completion(struct smbcli_request *req)
{
	struct benchlock_state *state = (struct benchlock_state *)req->async.private_data;
	NTSTATUS status = smbcli_request_simple_recv(req);
	if (NT_STATUS_EQUAL(status, NT_STATUS_END_OF_FILE) ||
	    NT_STATUS_EQUAL(status, NT_STATUS_LOCAL_DISCONNECT) ||
	    NT_STATUS_EQUAL(status, NT_STATUS_CONNECTION_RESET)) {
		talloc_free(state->tree);
		state->tree = NULL;
		num_connected--;	
		DEBUG(0,("reopening connection to %s\n", state->dest_host));
		talloc_free(state->te);
		state->te = event_add_timed(state->ev, state->mem_ctx, 
					    timeval_current_ofs(1,0), 
					    reopen_connection, state);
	}
}
Exemplo n.º 29
0
/*
  put a request into the send queue
*/
void smbcli_transport_send(struct smbcli_request *req)
{
	DATA_BLOB blob;
	NTSTATUS status;

	/* check if the transport is dead */
	if (req->transport->socket->sock == NULL) {
		req->state = SMBCLI_REQUEST_ERROR;
		req->status = NT_STATUS_NET_WRITE_FAULT;
		return;
	}

	blob = data_blob_const(req->out.buffer, req->out.size);
	status = packet_send(req->transport->packet, blob);
	if (!NT_STATUS_IS_OK(status)) {
		req->state = SMBCLI_REQUEST_ERROR;
		req->status = status;
		return;
	}

	packet_queue_run(req->transport->packet);
	if (req->transport->socket->sock == NULL) {
		req->state = SMBCLI_REQUEST_ERROR;
		req->status = NT_STATUS_NET_WRITE_FAULT;
		return;
	}

	if (req->one_way_request) {
		req->state = SMBCLI_REQUEST_DONE;
		smbcli_request_destroy(req);
		return;
	}

	req->state = SMBCLI_REQUEST_RECV;
	DLIST_ADD(req->transport->pending_recv, req);

	/* add a timeout */
	if (req->transport->options.request_timeout) {
		event_add_timed(req->transport->socket->event.ctx, req, 
				timeval_current_ofs(req->transport->options.request_timeout, 0), 
				smbcli_timeout_handler, req);
	}

	talloc_set_destructor(req, smbcli_request_destructor);
}
Exemplo n.º 30
0
/*
  setup the idle handler for a transport
  the period is in microseconds
*/
_PUBLIC_ void smbcli_transport_idle_handler(struct smbcli_transport *transport, 
				   void (*idle_func)(struct smbcli_transport *, void *),
				   uint64_t period,
				   void *private_data)
{
	transport->idle.func = idle_func;
	transport->idle.private_data = private_data;
	transport->idle.period = period;

	if (transport->socket->event.te != NULL) {
		talloc_free(transport->socket->event.te);
	}

	transport->socket->event.te = event_add_timed(transport->socket->event.ctx, 
						      transport,
						      timeval_current_ofs(0, period),
						      idle_handler, transport);
}