Beispiel #1
0
int rrd_create_fn(const char *file_name, rrd_t *rrd)
{
	unsigned long i, ii;
	rrd_value_t *unknown;
	int       unkn_cnt;
	rrd_file_t *rrd_file_dn;
	rrd_t     rrd_dn;
	unsigned  rrd_flags = RRD_READWRITE | RRD_CREAT;
	int ret = 0;

	if (opt_no_overwrite) {
		rrd_flags |= RRD_EXCL ;
	}

	unkn_cnt = 0;
	for (i = 0; i < rrd->stat_head->rra_cnt; i++)
		unkn_cnt += rrd->stat_head->ds_cnt * rrd->rra_def[i].row_cnt;

	if ((rrd_file_dn = rrd_open(file_name, rrd, rrd_flags, &ret)) == NULL) {
		rrd_free2(rrd);
		return ret;
	}

	if (rrd_write(rrd_file_dn, rrd->stat_head, sizeof(stat_head_t)) < 0){
		rrd_free2(rrd);
		rrd_close(rrd_file_dn);
		return (-RRD_ERR_WRITE5);
	}

	rrd_write(rrd_file_dn, rrd->ds_def, sizeof(ds_def_t) * rrd->stat_head->ds_cnt);

	rrd_write(rrd_file_dn, rrd->rra_def,
			sizeof(rra_def_t) * rrd->stat_head->rra_cnt);

	rrd_write(rrd_file_dn, rrd->live_head, sizeof(live_head_t));

	if ((rrd->pdp_prep = (pdp_prep_t*)calloc(1, sizeof(pdp_prep_t))) == NULL) {
		rrd_free2(rrd);
		rrd_close(rrd_file_dn);
		return (-RRD_ERR_ALLOC);
	}

	strcpy(rrd->pdp_prep->last_ds, "U");

	rrd->pdp_prep->scratch[PDP_val].u_val = 0.0;
	rrd->pdp_prep->scratch[PDP_unkn_sec_cnt].u_cnt =
		rrd->live_head->last_up % rrd->stat_head->pdp_step;

	for (i = 0; i < rrd->stat_head->ds_cnt; i++)
		rrd_write(rrd_file_dn, rrd->pdp_prep, sizeof(pdp_prep_t));

	if ((rrd->cdp_prep = (cdp_prep_t*)calloc(1, sizeof(cdp_prep_t))) == NULL) {
		rrd_free2(rrd);
		rrd_close(rrd_file_dn);
		return (-RRD_ERR_ALLOC);
	}


	for (i = 0; i < rrd->stat_head->rra_cnt; i++) {
		switch (cf_conv(rrd->rra_def[i].cf_nam)) {
			case CF_HWPREDICT:
			case CF_MHWPREDICT:
				init_hwpredict_cdp(rrd->cdp_prep);
				break;
			case CF_SEASONAL:
			case CF_DEVSEASONAL:
				init_seasonal_cdp(rrd->cdp_prep);
				break;
			case CF_FAILURES:
				/* initialize violation history to 0 */
				for (ii = 0; ii < MAX_CDP_PAR_EN; ii++) {
					/* We can zero everything out, by setting u_val to the
					 * NULL address. Each array entry in scratch is 8 bytes
					 * (a double), but u_cnt only accessed 4 bytes (long) */
					rrd->cdp_prep->scratch[ii].u_val = 0.0;
				}
				break;
			default:
				/* can not be zero because we don't know anything ... */
				rrd->cdp_prep->scratch[CDP_val].u_val = DNAN;
				/* startup missing pdp count */
				rrd->cdp_prep->scratch[CDP_unkn_pdp_cnt].u_cnt =
					((rrd->live_head->last_up -
					  rrd->pdp_prep->scratch[PDP_unkn_sec_cnt].u_cnt)
					 % (rrd->stat_head->pdp_step
						 * rrd->rra_def[i].pdp_cnt)) / rrd->stat_head->pdp_step;
				break;
		}

		for (ii = 0; ii < rrd->stat_head->ds_cnt; ii++) {
			rrd_write(rrd_file_dn, rrd->cdp_prep, sizeof(cdp_prep_t));
		}
	}

	/* now, we must make sure that the rest of the rrd
	   struct is properly initialized */

	if ((rrd->rra_ptr = (rra_ptr_t*)calloc(1, sizeof(rra_ptr_t))) == NULL) {
		rrd_free2(rrd);
		rrd_close(rrd_file_dn);
		return -RRD_ERR_ALLOC;
	}

	/* changed this initialization to be consistent with
	 * rrd_restore. With the old value (0), the first update
	 * would occur for cur_row = 1 because rrd_update increments
	 * the pointer a priori. */
	for (i = 0; i < rrd->stat_head->rra_cnt; i++) {
		rrd->rra_ptr->cur_row = rrd_select_initial_row(rrd_file_dn, i, &rrd->rra_def[i]);
		rrd_write(rrd_file_dn, rrd->rra_ptr, sizeof(rra_ptr_t));
	}

	/* write the empty data area */
	if ((unknown = (rrd_value_t *) malloc(512 * sizeof(rrd_value_t))) == NULL) {
		rrd_free2(rrd);
		rrd_close(rrd_file_dn);
		return -RRD_ERR_ALLOC;
	}
	for (i = 0; i < 512; ++i)
		unknown[i] = DNAN;

	while (unkn_cnt > 0) {
		if(rrd_write(rrd_file_dn, unknown, sizeof(rrd_value_t) * min(unkn_cnt, 512)) < 0)
		{
			return -RRD_ERR_CREATE_WRITE;
		}

		unkn_cnt -= 512;
	}
	free(unknown);
	rrd_free2(rrd);
	if (rrd_close(rrd_file_dn) == -1) {
		return -RRD_ERR_CREATE_WRITE;
	}
	/* flush all we don't need out of the cache */
	rrd_init(&rrd_dn);
	if((rrd_file_dn = rrd_open(file_name, &rrd_dn, RRD_READONLY, &ret)) != NULL)
	{
		rrd_dontneed(rrd_file_dn, &rrd_dn);
		/* rrd_free(&rrd_dn); */
		rrd_close(rrd_file_dn);
	}
	return ret;
}
Beispiel #2
0
/* Reset aberrant behavior model coefficients, including intercept, slope,
 * seasonal, and seasonal deviation for the specified data source. */
void reset_aberrant_coefficients(
    rrd_t *rrd,
    rrd_file_t *rrd_file,
    unsigned long ds_idx)
{
    unsigned long cdp_idx, rra_idx, i;
    unsigned long cdp_start, rra_start;
    rrd_value_t nan_buffer = DNAN;

    /* compute the offset for the cdp area */
    cdp_start = sizeof(stat_head_t) +
        rrd->stat_head->ds_cnt * sizeof(ds_def_t) +
        rrd->stat_head->rra_cnt * sizeof(rra_def_t) +
        sizeof(live_head_t) + rrd->stat_head->ds_cnt * sizeof(pdp_prep_t);
    /* compute the offset for the first rra */
    rra_start = cdp_start +
        (rrd->stat_head->ds_cnt) * (rrd->stat_head->rra_cnt) *
        sizeof(cdp_prep_t) + rrd->stat_head->rra_cnt * sizeof(rra_ptr_t);

    /* loop over the RRAs */
    for (rra_idx = 0; rra_idx < rrd->stat_head->rra_cnt; rra_idx++) {
        cdp_idx = rra_idx * (rrd->stat_head->ds_cnt) + ds_idx;
        switch (cf_conv(rrd->rra_def[rra_idx].cf_nam)) {
        case CF_HWPREDICT:
        case CF_MHWPREDICT:
            init_hwpredict_cdp(&(rrd->cdp_prep[cdp_idx]));
            break;
        case CF_SEASONAL:
        case CF_DEVSEASONAL:
            /* don't use init_seasonal because it will reset burn-in, which
             * means different data sources will be calling for the smoother
             * at different times. */
            rrd->cdp_prep[cdp_idx].scratch[CDP_hw_seasonal].u_val = DNAN;
            rrd->cdp_prep[cdp_idx].scratch[CDP_hw_last_seasonal].u_val = DNAN;
            /* move to first entry of data source for this rra */
            rrd_seek(rrd_file, rra_start + ds_idx * sizeof(rrd_value_t),
                     SEEK_SET);
            /* entries for the same data source are not contiguous,
             * temporal entries are contiguous */
            for (i = 0; i < rrd->rra_def[rra_idx].row_cnt; ++i) {
                if (rrd_write(rrd_file, &nan_buffer, sizeof(rrd_value_t) * 1)
                    != sizeof(rrd_value_t) * 1) {
                    rrd_set_error
                        ("reset_aberrant_coefficients: write failed data source %lu rra %s",
                         ds_idx, rrd->rra_def[rra_idx].cf_nam);
                    return;
                }
                rrd_seek(rrd_file, (rrd->stat_head->ds_cnt - 1) *
                         sizeof(rrd_value_t), SEEK_CUR);
            }
            break;
        case CF_FAILURES:
            erase_violations(rrd, cdp_idx, rra_idx);
            break;
        default:
            break;
        }
        /* move offset to the next rra */
        rra_start += rrd->rra_def[rra_idx].row_cnt * rrd->stat_head->ds_cnt *
            sizeof(rrd_value_t);
    }
    rrd_seek(rrd_file, cdp_start, SEEK_SET);
    if (rrd_write(rrd_file, rrd->cdp_prep,
                  sizeof(cdp_prep_t) *
                  (rrd->stat_head->rra_cnt) * rrd->stat_head->ds_cnt)
        != (ssize_t) (sizeof(cdp_prep_t) * (rrd->stat_head->rra_cnt) *
                      (rrd->stat_head->ds_cnt))) {
        rrd_set_error("reset_aberrant_coefficients: cdp_prep write failed");
    }
}