Esempio n. 1
0
void testGroupByDay(CuTest *tc) {
    struct NameValuePair fromParam  = {"from", "0", NULL};
    struct NameValuePair toParam    = {"to",   "1258281927", &fromParam};
    struct NameValuePair groupParam = {"group", "2", &toParam};
    struct Request req = {"GET", "/query", &groupParam, NULL};

    emptyDb();
    addDbRow(makeTs("2009-11-01 10:00:00"), 3600, NULL,  1,  1, "");
    addDbRow(makeTs("2009-11-01 11:00:00"), 3600, NULL,  2,  2, "");
    addDbRow(makeTs("2009-11-01 12:00:00"), 3600, NULL,  4,  4, "");
    addDbRow(makeTs("2009-11-02 09:00:00"), 3600, NULL,  8,  8, "");
    addDbRow(makeTs("2009-11-02 23:00:00"), 3600, NULL, 16, 16, "");

    int tmpFd = makeTmpFile();
    processQueryRequest(tmpFd, &req);

    char* result = readTmpFile();

    CuAssertStrEquals(tc,
        "HTTP/1.0 200 OK" HTTP_EOL
        "Content-Type: application/json" HTTP_EOL
        "Server: BitMeterOS " VERSION " Web Server" HTTP_EOL
        "Date: Sun, 08 Nov 2009 10:00:00 +0000" HTTP_EOL
        "Connection: Close" HTTP_EOL HTTP_EOL
        "[{\"dl\": 7,\"ul\": 7,\"ts\": 1257120000,\"dr\": 54000},{\"dl\": 24,\"ul\": 24,\"ts\": 1257206400,\"dr\": 86400}]"
    , result);
}
Esempio n. 2
0
void testCompressSecMultiAdapters(CuTest *tc) {
// Check that database second->minute compression is performed correctly for multiple adapters
    int now = 7200;
    setTime(now);
    emptyDb();
    addDbRow(3601, 1, "eth0",  1,  1, "");
    addDbRow(3601, 1, "eth1",  2,  2, "");
    addDbRow(3601, 1, "eth2",  4,  4, "");
    addDbRow(3600, 1, "eth0",  8,  8, "");
    addDbRow(3600, 1, "eth1", 16, 16, "");
    addDbRow(3600, 1, "eth2", 32, 32, "");
    addDbRow(3599, 1, "eth0", 64, 64, "");
    addDbRow(3598, 1, "eth1",128,128, "");
    addDbRow(3597, 1, "eth2",256,256, "");
    compressDb();

    struct Data row1 = {3601, 1,    1,   1, "eth0", "", NULL};
    struct Data row2 = {3601, 1,    2,   2, "eth1", "", NULL};
    struct Data row3 = {3601, 1,    4,   4, "eth2", "", NULL};
    struct Data row4 = {3600, 60,  72,  72, "eth0", "", NULL};
    struct Data row5 = {3600, 60, 144, 144, "eth1", "", NULL};
    struct Data row6 = {3600, 60, 288, 288, "eth2", "", NULL};

    checkTableContents(tc, 6, row1, row2, row3, row4, row5, row6);
}
Esempio n. 3
0
void testGroupByDayCsv(CuTest *tc) {
    struct NameValuePair fromParam  = {"from", "0", NULL};
    struct NameValuePair toParam    = {"to",   "1258281927", &fromParam};
    struct NameValuePair groupParam = {"group", "2", &toParam};
    struct NameValuePair csvParam   = {"csv", "1", &groupParam};
    struct Request req = {"GET", "/query", &csvParam, NULL};

    emptyDb();
    addDbRow(makeTs("2009-11-01 10:00:00"), 3600, NULL,  1,  1, "");
    addDbRow(makeTs("2009-11-01 11:00:00"), 3600, NULL,  2,  2, "");
    addDbRow(makeTs("2009-11-01 12:00:00"), 3600, NULL,  4,  4, "");
    addDbRow(makeTs("2009-11-02 09:00:00"), 3600, NULL,  8,  8, "");
    addDbRow(makeTs("2009-11-02 23:00:00"), 3600, NULL, 16, 16, "");

    int tmpFd = makeTmpFile();
    processQueryRequest(tmpFd, &req);

    char* result = readTmpFile();

    CuAssertStrEquals(tc,
        "HTTP/1.0 200 OK" HTTP_EOL
        "Content-Type: text/csv" HTTP_EOL
        "Server: BitMeterOS " VERSION " Web Server" HTTP_EOL
        "Date: Sun, 08 Nov 2009 10:00:00 +0000" HTTP_EOL
        "Connection: Close" HTTP_EOL
        "Content-Disposition: attachment;filename=bitmeterOsQuery.csv" HTTP_EOL HTTP_EOL
        "2009-11-01 09:00:00,7,7\n"
		"2009-11-02 00:00:00,24,24\n"
    , result);
}
Esempio n. 4
0
void testParamsOk(CuTest *tc) {
    struct NameValuePair fromParam  = {"from", "1257120000", NULL};       // 2009-11-02
    struct NameValuePair toParam    = {"to",   "1257292800", &fromParam}; // 2009-11-04
    struct NameValuePair groupParam = {"group", "5", &toParam};           // Total
    struct Request req = {"GET", "/query", &groupParam, NULL};

 // The query range covers the second, third and fourth rows only
    emptyDb();
    addDbRow(makeTs("2009-11-01 12:00:00"), 3600, NULL,  1,  1, "");
    addDbRow(makeTs("2009-11-02 12:00:00"), 3600, NULL,  2,  2, ""); // Match
    addDbRow(makeTs("2009-11-03 12:00:00"), 3600, NULL,  4,  4, ""); // Match
    addDbRow(makeTs("2009-11-04 12:00:00"), 3600, NULL,  8,  8, ""); // Match
    addDbRow(makeTs("2009-11-05 12:00:00"), 3600, NULL, 16, 16, "");

    int tmpFd = makeTmpFile();
    processQueryRequest(tmpFd, &req);

    char* result = readTmpFile();

    // The 'ts' value = 2009-11-05 00:00:00, ie the end of the date range covered by the query
    // The 'dr' value = 3 * 24 * 3600, ie the number of seconds in 3 days
    CuAssertStrEquals(tc,
        "HTTP/1.0 200 OK" HTTP_EOL
        "Content-Type: application/json" HTTP_EOL
        "Server: BitMeterOS " VERSION " Web Server" HTTP_EOL
        "Date: Sun, 08 Nov 2009 10:00:00 +0000" HTTP_EOL
        "Connection: Close" HTTP_EOL HTTP_EOL
        "[{\"dl\": 14,\"ul\": 14,\"ts\": 1257379200,\"dr\": 259200}]"
    , result);
}
void testSummaryWithHostAdapterParams(CuTest *tc) {
    emptyDb();
    addDbRow(makeTs("2008-11-02 12:00:00"), 3600, "eth0",  1,  1, "host1"); // Last year
    addDbRow(makeTs("2008-11-02 12:00:00"), 3600, "eth0",  1,  1, "host2"); // Last year
    addDbRow(makeTs("2009-10-01 12:00:00"), 3600, "eth0",  2,  2, "host1"); // Earlier this year
    addDbRow(makeTs("2009-10-01 12:00:00"), 3600, "eth0",  2,  2, ""); // Earlier this year
    addDbRow(makeTs("2009-11-04 12:00:00"), 3600, "eth0",  4,  4, "host1"); // Earlier this month
    addDbRow(makeTs("2009-11-04 12:00:00"), 3600, "eth0",  4,  4, ""); // Earlier this month
    addDbRow(makeTs("2009-11-08 01:00:00"), 3600, "eth0",  8,  8, "host1"); // Today
    addDbRow(makeTs("2009-11-08 01:00:00"), 3600, "eth0",  8,  8, ""); // Today

    struct NameValuePair param = {"ha", "host1:eth0", NULL};
    struct Request req = {"GET", "/summary", &param, NULL};

    time_t now = makeTs("2009-11-08 10:00:00");
    setTime(now);

    int tmpFd = makeTmpFile();
    processSummaryRequest(tmpFd, &req);

    char* result = readTmpFile();

    CuAssertStrEquals(tc,
        "HTTP/1.0 200 OK" HTTP_EOL
        "Content-Type: application/json" HTTP_EOL
        "Server: BitMeterOS " VERSION " Web Server" HTTP_EOL
        "Date: Sun, 08 Nov 2009 10:00:00 +0000" HTTP_EOL
        "Connection: Close" HTTP_EOL HTTP_EOL
        "{\"today\": {\"dl\": 8,\"ul\": 8,\"ts\": 0,\"dr\": 0}, \"month\": {\"dl\": 12,\"ul\": 12,\"ts\": 0,\"dr\": 0}, \"year\": {\"dl\": 14,\"ul\": 14,\"ts\": 0,\"dr\": 0}, \"total\": {\"dl\": 15,\"ul\": 15,\"ts\": 0,\"dr\": 0}"
        ", \"hosts\": null, \"since\": 1225627200}"
    , result);
}
Esempio n. 6
0
File: db.c Progetto: Xwuming/misc
/* FLUSHDB [ASYNC]
 *
 * Flushes the currently SELECTed Redis DB. */
void flushdbCommand(client *c) {
    int flags;

    if (getFlushCommandFlags(c,&flags) == C_ERR) return;
    signalFlushedDb(c->db->id);
    server.dirty += emptyDb(c->db->id,flags,NULL);
    addReply(c,shared.ok);
}
Esempio n. 7
0
void flushallCommand(redisClient *c) {
    touchWatchedKeysOnFlush(-1);
    server.dirty += emptyDb();
    addReply(c,shared.ok);
    if (server.bgsavechildpid != -1) {
        kill(server.bgsavechildpid,SIGKILL);
        rdbRemoveTempFile(server.bgsavechildpid);
    }
    rdbSave(server.dbfilename);
    server.dirty++;
}
Esempio n. 8
0
File: db.c Progetto: andmej/redis
void flushallCommand(redisClient *c) {
    signalFlushedDb(-1);
    server.dirty += emptyDb();
    addReply(c,shared.ok);
    if (server.bgsavechildpid != -1) {
        kill(server.bgsavechildpid,SIGKILL);
        rdbRemoveTempFile(server.bgsavechildpid);
    }
    if (server.ds_enabled)
        dsFlushDb(-1);
    else
        rdbSave(server.dbfilename);
    server.dirty++;
}
Esempio n. 9
0
void flushallCommand(redisClient *c) {
    signalFlushedDb(-1);
    server.dirty += emptyDb();
    addReply(c,shared.ok);
    if (server.rdb_child_pid != -1) {
        kill(server.rdb_child_pid,SIGUSR1);
        rdbRemoveTempFile(server.rdb_child_pid);
    }
    if (server.saveparamslen > 0) {
        /* Normally rdbSave() will reset dirty, but we don't want this here
         * as otherwise FLUSHALL will not be replicated nor put into the AOF. */
        int saved_dirty = server.dirty;
        rdbSave(server.rdb_filename);
        server.dirty = saved_dirty;
    }
    server.dirty++;
}
Esempio n. 10
0
void testCompressSec1Adapter(CuTest *tc) {
// Check that database second->minute compression is performed correctly for a single adapter
    int now = 7200;
    setTime(now);
    emptyDb();
    addDbRow(3601, 1, "eth0",  1,  1, "");
    addDbRow(3600, 1, "eth0",  2,  2, "");
    addDbRow(3599, 1, "eth0",  4,  4, "");
    addDbRow(3598, 1, "eth0",  8,  8, "");
    addDbRow(3597, 1, "eth0", 16, 16, "");
    compressDb();

    struct Data row1 = {3601, 1,   1,  1, "eth0", "", NULL};
    struct Data row2 = {3600, 60, 30, 30, "eth0", "", NULL};

    checkTableContents(tc, 2, row1, row2);
}
Esempio n. 11
0
void testCompressSecMultiIterations(CuTest *tc) {
    /* Check that database second->minute compression is performed correctly for multiple adapters where
       the data is spread out over time such that multiple compressed rows for each
       adapter will result. */
    int now = 7200;
    setTime(now);
    emptyDb();
    addDbRow(3601, 1, "eth0",    1,    1, "");
    addDbRow(3601, 1, "eth1",    2,    2, "");
    addDbRow(3601, 1, "eth2",    4,    4, "");
    addDbRow(3600, 1, "eth0",    8,    8, "");
    addDbRow(3600, 1, "eth1",   16,   16, "");
    addDbRow(3600, 1, "eth2",   32,   32, "");
    addDbRow(3599, 1, "eth0",   64,   64, "");
    addDbRow(3598, 1, "eth1",  128,  128, "");
    addDbRow(3597, 1, "eth2",  256,  256, "");
    addDbRow(3540, 1, "eth0",  512,  512, "");
    addDbRow(3540, 1, "eth1", 1024, 1024, "");
    addDbRow(3540, 1, "eth2", 2048, 2048, "");
    addDbRow(3539, 1, "eth0", 4096, 4096, "");
    addDbRow(3538, 1, "eth1", 8192, 8192, "");
    addDbRow(3537, 1, "eth2",16384,16384, "");
    compressDb();

    struct Data row1 = {3601, 1,     1,    1, "eth0", "", NULL};
    struct Data row2 = {3601, 1,     2,    2, "eth1", "", NULL};
    struct Data row3 = {3601, 1,     4,    4, "eth2", "", NULL};
    struct Data row4 = {3600, 60,   72,   72, "eth0", "", NULL};
    struct Data row5 = {3600, 60,  144,  144, "eth1", "", NULL};
    struct Data row6 = {3600, 60,  288,  288, "eth2", "", NULL};
    struct Data row7 = {3540, 60, 4608, 4608, "eth0", "", NULL};
    struct Data row8 = {3540, 60, 9216, 9216, "eth1", "", NULL};
    struct Data row9 = {3540, 60,18432,18432, "eth2", "", NULL};

    checkTableContents(tc, 9, row1, row2, row3, row4, row5, row6, row7, row8, row9);
}
Esempio n. 12
0
/* Asynchronously read the SYNC payload we receive from a master */
void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) {
    char buf[4096];
    ssize_t nread, readlen;
    REDIS_NOTUSED(el);
    REDIS_NOTUSED(privdata);
    REDIS_NOTUSED(mask);

    /* If repl_transfer_left == -1 we still have to read the bulk length
     * from the master reply. */
    if (server.repl_transfer_left == -1) {
        if (syncReadLine(fd,buf,1024,3600) == -1) {
            redisLog(REDIS_WARNING,
                "I/O error reading bulk count from MASTER: %s",
                strerror(errno));
            replicationAbortSyncTransfer();
            return;
        }
        if (buf[0] == '-') {
            redisLog(REDIS_WARNING,
                "MASTER aborted replication with an error: %s",
                buf+1);
            replicationAbortSyncTransfer();
            return;
        } else if (buf[0] == '\0') {
            /* At this stage just a newline works as a PING in order to take
             * the connection live. So we refresh our last interaction
             * timestamp. */
            server.repl_transfer_lastio = time(NULL);
            return;
        } else if (buf[0] != '$') {
            redisLog(REDIS_WARNING,"Bad protocol from MASTER, the first byte is not '$', are you sure the host and port are right?");
            replicationAbortSyncTransfer();
            return;
        }
        server.repl_transfer_left = strtol(buf+1,NULL,10);
        redisLog(REDIS_NOTICE,
            "MASTER <-> SLAVE sync: receiving %ld bytes from master",
            server.repl_transfer_left);
        return;
    }

    /* Read bulk data */
    readlen = (server.repl_transfer_left < (signed)sizeof(buf)) ?
        server.repl_transfer_left : (signed)sizeof(buf);
    nread = read(fd,buf,readlen);
    if (nread <= 0) {
        redisLog(REDIS_WARNING,"I/O error trying to sync with MASTER: %s",
            (nread == -1) ? strerror(errno) : "connection lost");
        replicationAbortSyncTransfer();
        return;
    }
    server.repl_transfer_lastio = time(NULL);
    if (write(server.repl_transfer_fd,buf,nread) != nread) {
        redisLog(REDIS_WARNING,"Write error or short write writing to the DB dump file needed for MASTER <-> SLAVE synchrnonization: %s", strerror(errno));
        replicationAbortSyncTransfer();
        return;
    }
    server.repl_transfer_left -= nread;
    /* Check if the transfer is now complete */
    if (server.repl_transfer_left == 0) {
        if (rename(server.repl_transfer_tmpfile,server.dbfilename) == -1) {
            redisLog(REDIS_WARNING,"Failed trying to rename the temp DB into dump.rdb in MASTER <-> SLAVE synchronization: %s", strerror(errno));
            replicationAbortSyncTransfer();
            return;
        }
        redisLog(REDIS_NOTICE, "MASTER <-> SLAVE sync: Loading DB in memory");
        emptyDb();
        /* Before loading the DB into memory we need to delete the readable
         * handler, otherwise it will get called recursively since
         * rdbLoad() will call the event loop to process events from time to
         * time for non blocking loading. */
        aeDeleteFileEvent(server.el,server.repl_transfer_s,AE_READABLE);
        if (rdbLoad(server.dbfilename) != REDIS_OK) {
            redisLog(REDIS_WARNING,"Failed trying to load the MASTER synchronization DB from disk");
            replicationAbortSyncTransfer();
            return;
        }
        /* Final setup of the connected slave <- master link */
        zfree(server.repl_transfer_tmpfile);
        close(server.repl_transfer_fd);
        server.master = createClient(server.repl_transfer_s);
        server.master->flags |= REDIS_MASTER;
        server.master->authenticated = 1;
        server.replstate = REDIS_REPL_CONNECTED;
        redisLog(REDIS_NOTICE, "MASTER <-> SLAVE sync: Finished with success");
    }
}
Esempio n. 13
0
/* Asynchronously read the SYNC payload we receive from a master */
#define REPL_MAX_WRITTEN_BEFORE_FSYNC (1024*1024*8) /* 8 MB */
void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) {
    char buf[4096];
    ssize_t nread, readlen;
    off_t left;
    REDIS_NOTUSED(el);
    REDIS_NOTUSED(privdata);
    REDIS_NOTUSED(mask);

    /* If repl_transfer_size == -1 we still have to read the bulk length
     * from the master reply. */
    if (server.repl_transfer_size == -1) {
        if (syncReadLine(fd,buf,1024,server.repl_syncio_timeout*1000) == -1) {
            redisLog(REDIS_WARNING,
                "I/O error reading bulk count from MASTER: %s",
                strerror(errno));
            goto error;
        }

        if (buf[0] == '-') {
            redisLog(REDIS_WARNING,
                "MASTER aborted replication with an error: %s",
                buf+1);
            goto error;
        } else if (buf[0] == '\0') {
            /* At this stage just a newline works as a PING in order to take
             * the connection live. So we refresh our last interaction
             * timestamp. */
            server.repl_transfer_lastio = server.unixtime;
            return;
        } else if (buf[0] != '$') {
            redisLog(REDIS_WARNING,"Bad protocol from MASTER, the first byte is not '$', are you sure the host and port are right?");
            goto error;
        }
        server.repl_transfer_size = strtol(buf+1,NULL,10);
        redisLog(REDIS_NOTICE,
            "MASTER <-> SLAVE sync: receiving %ld bytes from master",
            server.repl_transfer_size);
        return;
    }

    /* Read bulk data */
    left = server.repl_transfer_size - server.repl_transfer_read;
    readlen = (left < (signed)sizeof(buf)) ? left : (signed)sizeof(buf);
    nread = read(fd,buf,readlen);
    if (nread <= 0) {
        redisLog(REDIS_WARNING,"I/O error trying to sync with MASTER: %s",
            (nread == -1) ? strerror(errno) : "connection lost");
        replicationAbortSyncTransfer();
        return;
    }
    server.repl_transfer_lastio = server.unixtime;
    if (write(server.repl_transfer_fd,buf,nread) != nread) {
        redisLog(REDIS_WARNING,"Write error or short write writing to the DB dump file needed for MASTER <-> SLAVE synchronization: %s", strerror(errno));
        goto error;
    }
    server.repl_transfer_read += nread;

    /* Sync data on disk from time to time, otherwise at the end of the transfer
     * we may suffer a big delay as the memory buffers are copied into the
     * actual disk. */
    if (server.repl_transfer_read >=
        server.repl_transfer_last_fsync_off + REPL_MAX_WRITTEN_BEFORE_FSYNC)
    {
        off_t sync_size = server.repl_transfer_read -
                          server.repl_transfer_last_fsync_off;
        rdb_fsync_range(server.repl_transfer_fd,
            server.repl_transfer_last_fsync_off, sync_size);
        server.repl_transfer_last_fsync_off += sync_size;
    }

    /* Check if the transfer is now complete */
    if (server.repl_transfer_read == server.repl_transfer_size) {
        if (rename(server.repl_transfer_tmpfile,server.rdb_filename) == -1) {
            redisLog(REDIS_WARNING,"Failed trying to rename the temp DB into dump.rdb in MASTER <-> SLAVE synchronization: %s", strerror(errno));
            replicationAbortSyncTransfer();
            return;
        }
        redisLog(REDIS_NOTICE, "MASTER <-> SLAVE sync: Loading DB in memory");
        emptyDb();
        /* Before loading the DB into memory we need to delete the readable
         * handler, otherwise it will get called recursively since
         * rdbLoad() will call the event loop to process events from time to
         * time for non blocking loading. */
        aeDeleteFileEvent(server.el,server.repl_transfer_s,AE_READABLE);
        if (rdbLoad(server.rdb_filename) != REDIS_OK) {
            redisLog(REDIS_WARNING,"Failed trying to load the MASTER synchronization DB from disk");
            replicationAbortSyncTransfer();
            return;
        }
        /* Final setup of the connected slave <- master link */
        zfree(server.repl_transfer_tmpfile);
        close(server.repl_transfer_fd);
        server.master = createClient(server.repl_transfer_s);
        server.master->flags |= REDIS_MASTER;
        server.master->authenticated = 1;
        server.repl_state = REDIS_REPL_CONNECTED;
        redisLog(REDIS_NOTICE, "MASTER <-> SLAVE sync: Finished with success");
        /* Restart the AOF subsystem now that we finished the sync. This
         * will trigger an AOF rewrite, and when done will start appending
         * to the new file. */
        if (server.aof_state != REDIS_AOF_OFF) {
            int retry = 10;

            stopAppendOnly();
            while (retry-- && startAppendOnly() == REDIS_ERR) {
                redisLog(REDIS_WARNING,"Failed enabling the AOF after successful master synchrnization! Trying it again in one second.");
                sleep(1);
            }
            if (!retry) {
                redisLog(REDIS_WARNING,"FATAL: this slave instance finished the synchronization with its master, but the AOF can't be turned on. Exiting now.");
                exit(1);
            }
        }
    }

    return;

error:
    replicationAbortSyncTransfer();
    return;
}
Esempio n. 14
0
int syncWithMaster(void) {
    char buf[1024], tmpfile[256], authcmd[1024];
    long dumpsize;
    int fd = anetTcpConnect(NULL,server.masterhost,server.masterport);
    int dfd, maxtries = 5;

    if (fd == -1) {
        redisLog(REDIS_WARNING,"Unable to connect to MASTER: %s",
            strerror(errno));
        return REDIS_ERR;
    }

    /* AUTH with the master if required. */
    if(server.masterauth) {
    	snprintf(authcmd, 1024, "AUTH %s\r\n", server.masterauth);
    	if (syncWrite(fd, authcmd, strlen(server.masterauth)+7, 5) == -1) {
            close(fd);
            redisLog(REDIS_WARNING,"Unable to AUTH to MASTER: %s",
                strerror(errno));
            return REDIS_ERR;
    	}
        /* Read the AUTH result.  */
        if (syncReadLine(fd,buf,1024,3600) == -1) {
            close(fd);
            redisLog(REDIS_WARNING,"I/O error reading auth result from MASTER: %s",
                strerror(errno));
            return REDIS_ERR;
        }
        if (buf[0] != '+') {
            close(fd);
            redisLog(REDIS_WARNING,"Cannot AUTH to MASTER, is the masterauth password correct?");
            return REDIS_ERR;
        }
    }

    /* Issue the SYNC command */
    if (syncWrite(fd,"SYNC \r\n",7,5) == -1) {
        close(fd);
        redisLog(REDIS_WARNING,"I/O error writing to MASTER: %s",
            strerror(errno));
        return REDIS_ERR;
    }
    /* Read the bulk write count */
    if (syncReadLine(fd,buf,1024,3600) == -1) {
        close(fd);
        redisLog(REDIS_WARNING,"I/O error reading bulk count from MASTER: %s",
            strerror(errno));
        return REDIS_ERR;
    }
    if (buf[0] == '-') {
        close(fd);
        redisLog(REDIS_WARNING,"MASTER aborted replication with an error: %s",
            buf+1);
        return REDIS_ERR;
    } else if (buf[0] != '$') {
        close(fd);
        redisLog(REDIS_WARNING,"Bad protocol from MASTER, the first byte is not '$', are you sure the host and port are right?");
        return REDIS_ERR;
    }
    dumpsize = strtol(buf+1,NULL,10);
    redisLog(REDIS_NOTICE,"Receiving %ld bytes data dump from MASTER",dumpsize);
    /* Read the bulk write data on a temp file */
    while(maxtries--) {
        snprintf(tmpfile,256,
            "temp-%d.%ld.rdb",(int)time(NULL),(long int)getpid());
        dfd = open(tmpfile,O_CREAT|O_WRONLY|O_EXCL,0644);
        if (dfd != -1) break;
        sleep(1);
    }
    if (dfd == -1) {
        close(fd);
        redisLog(REDIS_WARNING,"Opening the temp file needed for MASTER <-> SLAVE synchronization: %s",strerror(errno));
        return REDIS_ERR;
    }
    while(dumpsize) {
        int nread, nwritten;

        nread = read(fd,buf,(dumpsize < 1024)?dumpsize:1024);
        if (nread <= 0) {
            redisLog(REDIS_WARNING,"I/O error trying to sync with MASTER: %s",
                (nread == -1) ? strerror(errno) : "connection lost");
            close(fd);
            close(dfd);
            return REDIS_ERR;
        }
        nwritten = write(dfd,buf,nread);
        if (nwritten == -1) {
            redisLog(REDIS_WARNING,"Write error writing to the DB dump file needed for MASTER <-> SLAVE synchrnonization: %s", strerror(errno));
            close(fd);
            close(dfd);
            return REDIS_ERR;
        }
        dumpsize -= nread;
    }
    close(dfd);
    if (rename(tmpfile,server.dbfilename) == -1) {
        redisLog(REDIS_WARNING,"Failed trying to rename the temp DB into dump.rdb in MASTER <-> SLAVE synchronization: %s", strerror(errno));
        unlink(tmpfile);
        close(fd);
        return REDIS_ERR;
    }
    emptyDb();
    if (rdbLoad(server.dbfilename) != REDIS_OK) {
        redisLog(REDIS_WARNING,"Failed trying to load the MASTER synchronization DB from disk");
        close(fd);
        return REDIS_ERR;
    }
    server.master = createClient(fd);
    server.master->flags |= REDIS_MASTER;
    server.master->authenticated = 1;
    server.replstate = REDIS_REPL_CONNECTED;
    return REDIS_OK;
}
Esempio n. 15
0
/* Main */
int
clusterMain (int argc, char **argv)
{
  long long teid;
  redisLog (REDIS_WARNING, "Server started, Redis version " REDIS_VERSION);
#ifdef __linux__
  linuxOvercommitMemoryWarning ();
#endif
  loadDataFromDisk ();
  server.last_bgsave_seqnum = server.smr_seqnum;

  /* Warning the user about suspicious maxmemory setting. */
  if (server.maxmemory > 0 && server.maxmemory < 1024 * 1024)
    {
      redisLog (REDIS_WARNING,
		"WARNING: You specified a maxmemory value that is less than 1MB (current value is %llu bytes). Are you sure this is what you really want?",
		server.maxmemory);
    }

  smrConnect ();

  /* initializeCron for handling sigterm */
  teid = aeCreateTimeEvent (server.el, 1, initializeCron, NULL, NULL);
  aeMain (server.el);
  aeDeleteTimeEvent (server.el, teid);

  if (server.need_rckpt)
    {
      redisLog (REDIS_NOTICE,
		"Need more checkpoint from %s:%d", server.ckpt_host,
		server.ckpt_port);
      smrDisconnect ();
      server.smr_init_flags = SMR_INIT_RCKPT;
      if (getdump
	  (server.ckpt_host, server.ckpt_port, server.rdb_filename,
	   "0-8191", REDIS_GETDUMP_DEFAULT_NET_LIMIT_MB) != REDIS_OK)
	{
	  exit (1);
	}

      emptyDb (NULL);
      loadDataFromDisk ();
      server.last_bgsave_seqnum = server.smr_seqnum;

      smrConnect ();
      aeMain (server.el);
    }

  if (!server.is_ready)
    {
      redisLog (REDIS_WARNING, "Invalid initialization state");
      exit (1);
    }

  if (server.last_bgsave_seqnum)
    {
      if (smr_seq_ckpted (server.smr_conn, server.last_bgsave_seqnum) != 0)
	{
	  redisLog (REDIS_WARNING,
		    "Failed to notify checkpointed sequence to smr");
	  exit (1);
	}
      else
	{
	  redisLog (REDIS_NOTICE,
		    "Checkpointed sequence is sent to SMR, seqnum:%lld",
		    server.last_bgsave_seqnum);
	}
    }

  server.smr_seqnum_reset = 0;
  server.last_catchup_check_mstime = mstime ();
  server.smr_init_flags = SMR_INIT_CATCHUP_PHASE1;
  if (smr_catchup (server.smr_conn) == -1)
    {
      redisLog (REDIS_WARNING, "Failed to catchup errno(%d)", errno);
      exit (1);
    }
  server.smr_init_flags = SMR_INIT_CATCHUP_PHASE2;
  checkSmrCatchup ();
  aeSetBeforeSleepProc (server.el, beforeSleep);
  aeMain (server.el);
  aeDeleteEventLoop (server.el);
  exit (0);
}