static int _handle_sasl_result(xmpp_conn_t * const conn, xmpp_stanza_t * const stanza, void * const userdata) { char *name; name = xmpp_stanza_get_name(stanza); /* the server should send a <success> or <failure> stanza */ if (strcmp(name, "failure") == 0) { xmpp_debug(conn->ctx, "xmpp", "SASL %s auth failed", (char *)userdata); /* fall back to next auth method */ _auth(conn); } else if (strcmp(name, "success") == 0) { /* SASL PLAIN auth successful, we need to restart the stream */ xmpp_debug(conn->ctx, "xmpp", "SASL %s auth successful", (char *)userdata); /* reset parser */ conn_prepare_reset(conn, _handle_open_sasl); /* send stream tag */ conn_open_stream(conn); } else { /* got unexpected reply */ xmpp_error(conn->ctx, "xmpp", "Got unexpected reply to SASL %s"\ "authentication.", (char *)userdata); xmpp_disconnect(conn); } return 0; }
DBClientConnection* DBClientReplicaSet::selectNodeUsingTags( shared_ptr<ReadPreferenceSetting> readPref) { if (!shouldReevaluate() && checkLastHost(readPref.get())) { LOG( 3 ) << "dbclient_rs selecting compatible last used node " << _lastSlaveOkHost << endl; return _lastSlaveOkConn.get(); } ReplicaSetMonitorPtr monitor = _getMonitor(); _lastSlaveOkHost = monitor->getHostOrRefresh(*readPref); if ( _lastSlaveOkHost.empty() ){ LOG( 3 ) << "dbclient_rs no compatible node found" << endl; return NULL; } _lastReadPref = readPref; // Primary connection is special because it is the only connection that is // versioned in mongos. Therefore, we have to make sure that this object // maintains only one connection to the primary and use that connection // every time we need to talk to the primary. if (monitor->isPrimary(_lastSlaveOkHost)) { checkMaster(); _lastSlaveOkConn = _master; _lastSlaveOkHost = _masterHost; // implied, but still assign just to be safe LOG( 3 ) << "dbclient_rs selecting primary node " << _lastSlaveOkHost << endl; return _master.get(); } string errmsg; ConnectionString connStr(_lastSlaveOkHost); // Needs to perform a dynamic_cast because we need to set the replSet // callback. We should eventually not need this after we remove the // callback. DBClientConnection* newConn = dynamic_cast<DBClientConnection*>( connStr.connect(errmsg, _so_timeout)); // Assert here instead of returning NULL since the contract of this method is such // that returning NULL means none of the nodes were good, which is not the case here. uassert(16532, str::stream() << "Failed to connect to " << _lastSlaveOkHost.toString(), newConn != NULL); _lastSlaveOkConn.reset(newConn); _lastSlaveOkConn->setReplSetClientCallback(this); _lastSlaveOkConn->setRunCommandHook(_runCommandHook); _lastSlaveOkConn->setPostRunCommandHook(_postRunCommandHook); _auth(_lastSlaveOkConn.get()); LOG( 3 ) << "dbclient_rs selecting node " << _lastSlaveOkHost << endl; return _lastSlaveOkConn.get(); }
static int _handle_register(xmpp_conn_t * const conn, xmpp_stanza_t * const stanza, void * const userdata) { char *type; /* delete missing handler */ xmpp_timed_handler_delete(conn, _handle_missing_register); /* server responded to legacy auth request */ type = xmpp_stanza_get_type(stanza); if (!type) { xmpp_error(conn->ctx, "xmpp", "Server sent us an unexpected response "\ "to register request."); xmpp_disconnect(conn); } else if (strcmp(type, "error") == 0) { /* legacy client auth failed, no more fallbacks */ xmpp_error(conn->ctx, "xmpp", "Register clientfailed."); xmpp_disconnect(conn); } else if (strcmp(type, "result") == 0) { /* auth succeeded */ xmpp_debug(conn->ctx, "xmpp", "Register succeeded."); _auth(conn); } else { xmpp_error(conn->ctx, "xmpp", "Server sent us a register" \ "response with a bad type."); xmpp_disconnect(conn); } return 0; }
/* stream:features handlers */ static int _handle_missing_features(xmpp_conn_t * const conn, void * const userdata) { xmpp_debug(conn->ctx, "xmpp", "didn't get stream features"); /* legacy auth will be attempted */ _auth(conn); return 0; }
Client::Context::Context( string ns , Database * db, bool doauth ) : _client( currentClient.get() ) , _oldContext( _client->_context ) , _path( dbpath ) , _lock(0) , _justCreated(false) { assert( db && db->isOk() ); _ns = ns; _db = db; _client->_context = this; if ( doauth ) _auth(); }
void Client::Context::_finishInit( bool doauth ){ int lockState = dbMutex.getState(); assert( lockState ); _db = dbHolder.get( _ns , _path ); if ( _db ){ _justCreated = false; } else if ( dbMutex.getState() > 0 ){ // already in a write lock _db = dbHolder.getOrCreate( _ns , _path , _justCreated ); assert( _db ); } else if ( dbMutex.getState() < -1 ){ // nested read lock :( assert( _lock ); _lock->releaseAndWriteLock(); _db = dbHolder.getOrCreate( _ns , _path , _justCreated ); assert( _db ); } else { // we have a read lock, but need to get a write lock for a bit // we need to be in a write lock since we're going to create the DB object // to do that, we're going to unlock, then get a write lock // this is so that if this is the first query and its long doesn't block db // we just have to check that the db wasn't closed in the interim where we unlock for ( int x=0; x<2; x++ ){ { dbtemprelease unlock; writelock lk( _ns ); dbHolder.getOrCreate( _ns , _path , _justCreated ); } _db = dbHolder.get( _ns , _path ); if ( _db ) break; log() << "db was closed on us right after we opened it: " << _ns << endl; } uassert( 13005 , "can't create db, keeps getting closed" , _db ); } _client->_context = this; _client->_curOp->enter( this ); if ( doauth ) _auth( lockState ); }
DBClientConnection * DBClientReplicaSet::checkSlave() { HostAndPort h = _monitor->getSlave( _slaveHost ); if ( h == _slaveHost && _slave ) { if ( ! _slave->isFailed() ) return _slave.get(); _monitor->notifySlaveFailure( _slaveHost ); } _slaveHost = _monitor->getSlave(); _slave.reset( new DBClientConnection( true , this ) ); _slave->connect( _slaveHost ); _auth( _slave.get() ); return _slave.get(); }
DBClientConnection * DBClientReplicaSet::checkMaster() { HostAndPort h = _monitor->getMaster(); if ( h == _masterHost ) { // a master is selected. let's just make sure connection didn't die if ( ! _master->isFailed() ) return _master.get(); _monitor->notifyFailure( _masterHost ); } _masterHost = _monitor->getMaster(); _master.reset( new DBClientConnection( true ) ); _master->connect( _masterHost ); _auth( _master.get() ); return _master.get(); }
static int _handle_features(xmpp_conn_t * const conn, xmpp_stanza_t * const stanza, void * const userdata) { xmpp_stanza_t *child, *mech; char *text; /* remove the handler that detects missing stream:features */ xmpp_timed_handler_delete(conn, _handle_missing_features); /* check for TLS */ if (!conn->secured) { if (!conn->tls_disabled) { child = xmpp_stanza_get_child_by_name(stanza, "starttls"); if (child && (strcmp(xmpp_stanza_get_ns(child), XMPP_NS_TLS) == 0)) conn->tls_support = 1; } else { conn->tls_support = 0; } } /* check for SASL */ child = xmpp_stanza_get_child_by_name(stanza, "mechanisms"); if (child && (strcmp(xmpp_stanza_get_ns(child), XMPP_NS_SASL) == 0)) { for (mech = xmpp_stanza_get_children(child); mech; mech = xmpp_stanza_get_next(mech)) { if (xmpp_stanza_get_name(mech) && strcmp(xmpp_stanza_get_name(mech), "mechanism") == 0) { text = xmpp_stanza_get_text(mech); if (strcasecmp(text, "PLAIN") == 0) conn->sasl_support |= SASL_MASK_PLAIN; else if (strcasecmp(text, "DIGEST-MD5") == 0) conn->sasl_support |= SASL_MASK_DIGESTMD5; else if (strcasecmp(text, "SCRAM-SHA-1") == 0) conn->sasl_support |= SASL_MASK_SCRAMSHA1; else if (strcasecmp(text, "ANONYMOUS") == 0) conn->sasl_support |= SASL_MASK_ANONYMOUS; xmpp_free(conn->ctx, text); } } } _auth(conn); return 0; }
DBClientConnection * DBClientReplicaSet::checkMaster() { ReplicaSetMonitorPtr monitor = _getMonitor(); HostAndPort h = monitor->getMasterOrUassert(); if ( h == _masterHost && _master ) { // a master is selected. let's just make sure connection didn't die if ( ! _master->isFailed() ) return _master.get(); monitor->failedHost( _masterHost ); h = monitor->getMasterOrUassert(); // old master failed, try again. } _masterHost = h; ConnectionString connStr(_masterHost); string errmsg; DBClientConnection* newConn = NULL; try { // Needs to perform a dynamic_cast because we need to set the replSet // callback. We should eventually not need this after we remove the // callback. newConn = dynamic_cast<DBClientConnection*>( connStr.connect(errmsg, _so_timeout)); } catch (const AssertionException& ex) { errmsg = ex.toString(); } if (newConn == NULL || !errmsg.empty()) { monitor->failedHost(_masterHost); uasserted(13639, str::stream() << "can't connect to new replica set master [" << _masterHost.toString() << "]" << (errmsg.empty()? "" : ", err: ") << errmsg); } _master.reset(newConn); _master->setReplSetClientCallback(this); _master->setRunCommandHook(_runCommandHook); _master->setPostRunCommandHook(_postRunCommandHook); _auth( _master.get() ); return _master.get(); }
DBClientConnection * DBClientReplicaSet::checkMaster() { HostAndPort h = _monitor->getMaster(); if ( h == _masterHost && _master ) { // a master is selected. let's just make sure connection didn't die if ( ! _master->isFailed() ) return _master.get(); _monitor->notifyFailure( _masterHost ); } _masterHost = _monitor->getMaster(); _master.reset( new DBClientConnection( true , this ) ); string errmsg; if ( ! _master->connect( _masterHost , errmsg ) ) { _monitor->notifyFailure( _masterHost ); uasserted( 13639 , str::stream() << "can't connect to new replica set master [" << _masterHost.toString() << "] err: " << errmsg ); } _auth( _master.get() ); return _master.get(); }
bool Connection::forward_out(const std::string& packet) { if ( ! _authenticated) _auth(packet); else _incoming(packet); return 1; }
/* authenticate the connection * this may get called multiple times. if any auth method fails, * this will get called again until one auth method succeeds or every * method fails */ static void _auth(xmpp_conn_t * const conn) { xmpp_stanza_t *auth, *authdata, *query, *child, *iq; char *str, *authid; char *scram_init; int anonjid; /* if there is no node in conn->jid, we assume anonymous connect */ str = xmpp_jid_node(conn->ctx, conn->jid); if (str == NULL) { anonjid = 1; } else { xmpp_free(conn->ctx, str); anonjid = 0; } if (conn->tls_support) { tls_t *tls = tls_new(conn->ctx, conn->sock); /* If we couldn't init tls, it isn't there, so go on */ if (!tls) { conn->tls_support = 0; _auth(conn); return; } else { tls_free(tls); } auth = _make_starttls(conn); if (!auth) { disconnect_mem_error(conn); return; } handler_add(conn, _handle_proceedtls_default, XMPP_NS_TLS, NULL, NULL, NULL); xmpp_send(conn, auth); xmpp_stanza_release(auth); /* TLS was tried, unset flag */ conn->tls_support = 0; } else if (anonjid && conn->sasl_support & SASL_MASK_ANONYMOUS) { /* some crap here */ auth = _make_sasl_auth(conn, "ANONYMOUS"); if (!auth) { disconnect_mem_error(conn); return; } handler_add(conn, _handle_sasl_result, XMPP_NS_SASL, NULL, NULL, "ANONYMOUS"); xmpp_send(conn, auth); xmpp_stanza_release(auth); /* SASL ANONYMOUS was tried, unset flag */ conn->sasl_support &= ~SASL_MASK_ANONYMOUS; } else if (anonjid) { xmpp_error(conn->ctx, "auth", "No node in JID, and SASL ANONYMOUS unsupported."); xmpp_disconnect(conn); } else if (conn->sasl_support & SASL_MASK_SCRAMSHA1) { auth = _make_sasl_auth(conn, "SCRAM-SHA-1"); if (!auth) { disconnect_mem_error(conn); return; } /* don't free scram_init on success */ scram_init = _make_scram_sha1_init_msg(conn); if (!scram_init) { xmpp_stanza_release(auth); disconnect_mem_error(conn); return; } str = (char *)base64_encode(conn->ctx, (unsigned char *)scram_init, strlen(scram_init)); if (!str) { xmpp_free(conn->ctx, scram_init); xmpp_stanza_release(auth); disconnect_mem_error(conn); return; } authdata = xmpp_stanza_new(conn->ctx); if (!authdata) { xmpp_free(conn->ctx, str); xmpp_free(conn->ctx, scram_init); xmpp_stanza_release(auth); disconnect_mem_error(conn); return; } xmpp_stanza_set_text(authdata, str); xmpp_free(conn->ctx, str); xmpp_stanza_add_child(auth, authdata); xmpp_stanza_release(authdata); handler_add(conn, _handle_scram_sha1_challenge, XMPP_NS_SASL, NULL, NULL, (void *)scram_init); xmpp_send(conn, auth); xmpp_stanza_release(auth); /* SASL SCRAM-SHA-1 was tried, unset flag */ conn->sasl_support &= ~SASL_MASK_SCRAMSHA1; } else if (conn->sasl_support & SASL_MASK_DIGESTMD5) { auth = _make_sasl_auth(conn, "DIGEST-MD5"); if (!auth) { disconnect_mem_error(conn); return; } handler_add(conn, _handle_digestmd5_challenge, XMPP_NS_SASL, NULL, NULL, NULL); xmpp_send(conn, auth); xmpp_stanza_release(auth); /* SASL DIGEST-MD5 was tried, unset flag */ conn->sasl_support &= ~SASL_MASK_DIGESTMD5; } else if (conn->sasl_support & SASL_MASK_PLAIN) { auth = _make_sasl_auth(conn, "PLAIN"); if (!auth) { disconnect_mem_error(conn); return; } authdata = xmpp_stanza_new(conn->ctx); if (!authdata) { disconnect_mem_error(conn); return; } authid = _get_authid(conn); if (!authid) { disconnect_mem_error(conn); return; } str = sasl_plain(conn->ctx, authid, conn->pass); if (!str) { disconnect_mem_error(conn); return; } xmpp_stanza_set_text(authdata, str); xmpp_free(conn->ctx, str); xmpp_free(conn->ctx, authid); xmpp_stanza_add_child(auth, authdata); xmpp_stanza_release(authdata); handler_add(conn, _handle_sasl_result, XMPP_NS_SASL, NULL, NULL, "PLAIN"); xmpp_send(conn, auth); xmpp_stanza_release(auth); /* SASL PLAIN was tried */ conn->sasl_support &= ~SASL_MASK_PLAIN; } else if (conn->type == XMPP_CLIENT) { /* legacy client authentication */ iq = xmpp_stanza_new(conn->ctx); if (!iq) { disconnect_mem_error(conn); return; } xmpp_stanza_set_name(iq, "iq"); xmpp_stanza_set_type(iq, "set"); xmpp_stanza_set_id(iq, "_xmpp_auth1"); query = xmpp_stanza_new(conn->ctx); if (!query) { xmpp_stanza_release(iq); disconnect_mem_error(conn); return; } xmpp_stanza_set_name(query, "query"); xmpp_stanza_set_ns(query, XMPP_NS_AUTH); xmpp_stanza_add_child(iq, query); xmpp_stanza_release(query); child = xmpp_stanza_new(conn->ctx); if (!child) { xmpp_stanza_release(iq); disconnect_mem_error(conn); return; } xmpp_stanza_set_name(child, "username"); xmpp_stanza_add_child(query, child); xmpp_stanza_release(child); authdata = xmpp_stanza_new(conn->ctx); if (!authdata) { xmpp_stanza_release(iq); disconnect_mem_error(conn); return; } str = xmpp_jid_node(conn->ctx, conn->jid); xmpp_stanza_set_text(authdata, str); xmpp_free(conn->ctx, str); xmpp_stanza_add_child(child, authdata); xmpp_stanza_release(authdata); child = xmpp_stanza_new(conn->ctx); if (!child) { xmpp_stanza_release(iq); disconnect_mem_error(conn); return; } xmpp_stanza_set_name(child, "password"); xmpp_stanza_add_child(query, child); xmpp_stanza_release(child); authdata = xmpp_stanza_new(conn->ctx); if (!authdata) { xmpp_stanza_release(iq); disconnect_mem_error(conn); return; } xmpp_stanza_set_text(authdata, conn->pass); xmpp_stanza_add_child(child, authdata); xmpp_stanza_release(authdata); child = xmpp_stanza_new(conn->ctx); if (!child) { xmpp_stanza_release(iq); disconnect_mem_error(conn); return; } xmpp_stanza_set_name(child, "resource"); xmpp_stanza_add_child(query, child); xmpp_stanza_release(child); authdata = xmpp_stanza_new(conn->ctx); if (!authdata) { xmpp_stanza_release(iq); disconnect_mem_error(conn); return; } str = xmpp_jid_resource(conn->ctx, conn->jid); if (str) { xmpp_stanza_set_text(authdata, str); xmpp_free(conn->ctx, str); } else { xmpp_stanza_release(authdata); xmpp_stanza_release(iq); xmpp_error(conn->ctx, "auth", "Cannot authenticate without resource"); xmpp_disconnect(conn); return; } xmpp_stanza_add_child(child, authdata); xmpp_stanza_release(authdata); handler_add_id(conn, _handle_legacy, "_xmpp_auth1", NULL); handler_add_timed(conn, _handle_missing_legacy, LEGACY_TIMEOUT, NULL); xmpp_send(conn, iq); xmpp_stanza_release(iq); } }
/* main */ int main(void) { return (_auth() == 0) ? 0 : 2; }