void ha_replica_set_shutdown (ha_replica_set_t *replica_set) { ha_node_t *node; for (node = replica_set->nodes; node; node = node->next) { ha_node_kill(node); } }
static void ha_node_destroy (ha_node_t *node) { ha_node_kill(node); bson_free(node->name); bson_free(node->repl_set); bson_free(node->dbpath); bson_free(node->configopt); bson_free(node); }
void ha_sharded_cluster_shutdown (ha_sharded_cluster_t *cluster) { ha_node_t *iter; int i; bson_return_if_fail(cluster); for (i = 0; i < 12; i++) { if (cluster->replicas[i]) { ha_replica_set_shutdown(cluster->replicas[i]); } } for (iter = cluster->configs; iter; iter = iter->next) { ha_node_kill(iter); } for (iter = cluster->routers; iter; iter = iter->next) { ha_node_kill(iter); } }
void ha_node_restart (ha_node_t *node) { struct stat st; pid_t pid; char portstr[12]; char *argv[30]; int i = 0; snprintf(portstr, sizeof portstr, "%hu", node->port); portstr[sizeof portstr - 1] = '\0'; ha_node_kill(node); if (!node->is_router && !node->is_config) { argv[i++] = (char *) "mongod"; argv[i++] = (char *) "--dbpath"; argv[i++] = (char *) "."; argv[i++] = (char *) "--port"; argv[i++] = portstr; argv[i++] = (char *) "--nojournal"; argv[i++] = (char *) "--noprealloc"; argv[i++] = (char *) "--smallfiles"; argv[i++] = (char *) "--nohttpinterface"; argv[i++] = (char *) "--bind_ip"; argv[i++] = (char *) "127.0.0.1"; argv[i++] = (char *) "--replSet"; argv[i++] = node->repl_set; #ifdef MONGOC_ENABLE_SSL if (node->ssl_opt) { if (node->ssl_opt->pem_file) { argv[i++] = (char *) "--sslPEMKeyFile"; argv[i++] = (char *)(node->ssl_opt->pem_file); argv[i++] = (char *) "--sslClusterFile"; argv[i++] = (char *)(node->ssl_opt->pem_file); } if (node->ssl_opt->ca_file) { argv[i++] = (char *) "--sslCAFile"; argv[i++] = (char *)(node->ssl_opt->ca_file); } argv[i++] = (char *) "--sslOnNormalPorts"; } #endif argv[i++] = "--logpath"; argv[i++] = "log"; argv[i++] = NULL; } else if (node->is_config) { argv[i++] = (char *) "mongod"; argv[i++] = (char *) "--configsvr"; argv[i++] = (char *) "--dbpath"; argv[i++] = (char *) "."; argv[i++] = (char *) "--port"; argv[i++] = (char *) portstr; argv[i++] = NULL; } else { argv[i++] = (char *) "mongos"; argv[i++] = (char *) "--bind_ip"; argv[i++] = (char *) "127.0.0.1"; argv[i++] = (char *) "--nohttpinterface"; argv[i++] = (char *) "--port"; argv[i++] = (char *) portstr; argv[i++] = (char *) "--configdb"; argv[i++] = node->configopt; argv[i++] = NULL; } pid = fork(); if (pid < 0) { perror("Failed to fork process"); abort(); } if (!pid) { int fd; #ifdef __linux prctl (PR_SET_PDEATHSIG, 15); #endif if (0 != chdir(node->dbpath)) { perror("Failed to chdir"); abort(); } if (0 == stat("mongod.lock", &st)) { unlink("mongod.lock"); } fd = open("/dev/null", O_RDWR); if (fd == -1) { perror("Failed to open /dev/null"); abort(); } dup2(fd, STDIN_FILENO); dup2(fd, STDOUT_FILENO); dup2(fd, STDERR_FILENO); close(fd); if (-1 == execvp(argv[0], argv)) { perror("Failed to spawn process"); abort(); } } fprintf(stderr, "[%d]: ", (int)pid); for (i = 0; argv[i]; i++) fprintf(stderr, "%s ", argv[i]); fprintf(stderr, "\n"); node->pid = pid; }
static void test2 (void) { mongoc_read_prefs_t *read_prefs; mongoc_collection_t *collection; mongoc_cursor_t *cursor; mongoc_client_t *client; mongoc_client_pool_t *pool = NULL; const bson_t *doc; bson_error_t error; bool r; bson_t q; bson_init(&q); /* * Start by killing 2 of the replica set nodes. */ ha_node_kill(r1); ha_node_kill(r2); if (use_pool) { pool = ha_replica_set_create_client_pool(replica_set); client = mongoc_client_pool_pop (pool); } else { client = ha_replica_set_create_client(replica_set); } collection = mongoc_client_get_collection(client, "test2", "test2"); /* * Perform a query and ensure it fails with no nodes available. */ read_prefs = mongoc_read_prefs_new(MONGOC_READ_SECONDARY_PREFERRED); cursor = mongoc_collection_find(collection, MONGOC_QUERY_NONE, 0, 100, 0, &q, NULL, read_prefs); /* * Try to submit OP_QUERY. Since it is SECONDARY PREFERRED, it should * succeed if there is any node up (which r3 is up). */ r = mongoc_cursor_next(cursor, &doc); BSON_ASSERT(!r); /* No docs */ /* No error, slaveOk was set */ ASSERT_OR_PRINT (!mongoc_cursor_error(cursor, &error), error); mongoc_read_prefs_destroy(read_prefs); mongoc_cursor_destroy(cursor); mongoc_collection_destroy(collection); if (use_pool) { mongoc_client_pool_push (pool, client); mongoc_client_pool_destroy (pool); } else { mongoc_client_destroy(client); } bson_destroy(&q); ha_node_restart(r1); ha_node_restart(r2); }
static void test1 (void) { mongoc_server_description_t *description; mongoc_collection_t *collection; mongoc_read_prefs_t *read_prefs; mongoc_cursor_t *cursor; mongoc_client_t *client; mongoc_client_pool_t *pool = NULL; const bson_t *doc; bson_error_t error; bool r; ha_node_t *replica; bson_t q; int i; bson_init(&q); if (use_pool) { pool = ha_replica_set_create_client_pool(replica_set); client = mongoc_client_pool_pop (pool); } else { client = ha_replica_set_create_client(replica_set); } collection = mongoc_client_get_collection(client, "test1", "test1"); MONGOC_DEBUG("Inserting test documents."); insert_test_docs(collection); MONGOC_INFO("Test documents inserted."); read_prefs = mongoc_read_prefs_new(MONGOC_READ_SECONDARY); MONGOC_DEBUG("Sending query to a SECONDARY."); cursor = mongoc_collection_find(collection, MONGOC_QUERY_NONE, 0, 0, 100, &q, NULL, read_prefs); BSON_ASSERT(cursor); BSON_ASSERT(!cursor->server_id); /* * Send OP_QUERY to server and get first document back. */ MONGOC_INFO("Sending OP_QUERY."); r = mongoc_cursor_next(cursor, &doc); BSON_ASSERT(r); BSON_ASSERT(cursor->server_id); BSON_ASSERT(cursor->sent); BSON_ASSERT(!cursor->done); BSON_ASSERT(cursor->rpc.reply.n_returned == 100); BSON_ASSERT(!cursor->end_of_event); /* * Make sure we queried a secondary. */ description = mongoc_topology_server_by_id(client->topology, cursor->server_id, &error); ASSERT_OR_PRINT (description, error); BSON_ASSERT (description->type != MONGOC_SERVER_RS_PRIMARY); mongoc_server_description_destroy(description); /* * Exhaust the items in our first OP_REPLY. */ MONGOC_DEBUG("Exhausting OP_REPLY."); for (i = 0; i < 98; i++) { r = mongoc_cursor_next(cursor, &doc); BSON_ASSERT(r); BSON_ASSERT(cursor->server_id); BSON_ASSERT(!cursor->done); BSON_ASSERT(!cursor->end_of_event); } /* * Finish off the last item in this OP_REPLY. */ MONGOC_INFO("Fetcing last doc from OP_REPLY."); r = mongoc_cursor_next(cursor, &doc); BSON_ASSERT(r); BSON_ASSERT(cursor->server_id); BSON_ASSERT(cursor->sent); BSON_ASSERT(!cursor->done); BSON_ASSERT(!cursor->end_of_event); /* * Determine which node we queried by using the server_id to * get the cluster information. */ BSON_ASSERT(cursor->server_id); replica = get_replica(client, cursor->server_id); /* * Kill the node we are communicating with. */ MONGOC_INFO("Killing replicaSet node to synthesize failure."); ha_node_kill(replica); /* * Try to fetch the next result set, expect failure. */ MONGOC_DEBUG("Checking for expected failure."); r = mongoc_cursor_next(cursor, &doc); BSON_ASSERT(!r); r = mongoc_cursor_error(cursor, &error); BSON_ASSERT(r); MONGOC_WARNING("%s", error.message); mongoc_cursor_destroy(cursor); mongoc_read_prefs_destroy(read_prefs); mongoc_collection_destroy(collection); if (use_pool) { mongoc_client_pool_push (pool, client); mongoc_client_pool_destroy (pool); } else { mongoc_client_destroy(client); } bson_destroy(&q); ha_node_restart(replica); }